From 95a9c3d2880aa310d96ebe8d664815c0f033964f Mon Sep 17 00:00:00 2001 From: Aatman Date: Tue, 30 Aug 2022 14:25:11 +0530 Subject: [PATCH] feat: upgrade kube2pulumi to v0.0.11 (#10) chore(docs): update pulumi kubernetes version --- README.md | 2 +- go.mod | 3 +- go.sum | 160 +- pkg/kube2cdk8s/kube2cdk8s.go | 9 +- util/util.go | 3 +- vendor/github.com/gofrs/flock/.gitignore | 24 - vendor/github.com/gofrs/flock/.travis.yml | 10 - vendor/github.com/gofrs/flock/LICENSE | 27 - vendor/github.com/gofrs/flock/README.md | 41 - vendor/github.com/gofrs/flock/appveyor.yml | 25 - vendor/github.com/gofrs/flock/flock.go | 127 - vendor/github.com/gofrs/flock/flock_unix.go | 195 -- vendor/github.com/gofrs/flock/flock_winapi.go | 76 - .../github.com/gofrs/flock/flock_windows.go | 140 - .../pkg/kube2pulumi/kube2pulumi.go | 13 +- .../kube2pulumi/pkg/pcl2pulumi/pcl2pulumi.go | 6 +- .../kube2pulumi/pkg/yaml2pcl/yaml2pcl.go | 18 +- .../pulumi/pulumi/pkg/v3/codegen/docs.go | 6 +- .../pulumi/pkg/v3/codegen/dotnet/doc.go | 42 +- .../pulumi/pkg/v3/codegen/dotnet/gen.go | 1007 ++++-- .../pkg/v3/codegen/dotnet/gen_program.go | 110 +- .../codegen/dotnet/gen_program_expressions.go | 117 +- .../pulumi/pkg/v3/codegen/dotnet/importer.go | 17 + .../pulumi/pkg/v3/codegen/dotnet/templates.go | 50 +- .../pulumi/pkg/v3/codegen/dotnet/utilities.go | 32 +- .../pulumi/pulumi/pkg/v3/codegen/go/doc.go | 50 +- .../pulumi/pulumi/pkg/v3/codegen/go/gen.go | 2723 +++++++++++---- .../pkg/v3/codegen/go/gen_crd2pulumi.go | 8 +- .../pulumi/pkg/v3/codegen/go/gen_program.go | 246 +- .../v3/codegen/go/gen_program_expressions.go | 536 +-- .../pkg/v3/codegen/go/gen_program_inputs.go | 116 - .../pkg/v3/codegen/go/gen_program_json.go | 16 +- .../v3/codegen/go/gen_program_optionals.go | 155 +- .../pkg/v3/codegen/go/gen_program_utils.go | 41 + .../pulumi/pkg/v3/codegen/go/gen_spill.go | 74 + .../pulumi/pkg/v3/codegen/go/importer.go | 28 +- .../pulumi/pkg/v3/codegen/go/utilities.go | 3 +- .../pulumi/pkg/v3/codegen/hcl2/invoke.go | 113 - .../pkg/v3/codegen/hcl2/model/diagnostics.go | 21 +- .../pkg/v3/codegen/hcl2/model/expression.go | 14 +- .../pkg/v3/codegen/hcl2/model/format/func.go | 4 +- .../pulumi/pkg/v3/codegen/hcl2/model/type.go | 33 +- .../v3/codegen/hcl2/model/type_collection.go | 4 +- .../pkg/v3/codegen/hcl2/model/type_const.go | 23 +- .../v3/codegen/hcl2/model/type_eventuals.go | 10 +- .../pkg/v3/codegen/hcl2/model/type_list.go | 22 +- .../pkg/v3/codegen/hcl2/model/type_map.go | 20 +- .../pkg/v3/codegen/hcl2/model/type_none.go | 9 +- .../pkg/v3/codegen/hcl2/model/type_object.go | 37 +- .../pkg/v3/codegen/hcl2/model/type_opaque.go | 70 +- .../pkg/v3/codegen/hcl2/model/type_output.go | 16 +- .../pkg/v3/codegen/hcl2/model/type_promise.go | 11 +- .../pkg/v3/codegen/hcl2/model/type_set.go | 26 +- .../pkg/v3/codegen/hcl2/model/type_tuple.go | 48 +- .../pkg/v3/codegen/hcl2/model/type_union.go | 42 +- .../pkg/v3/codegen/hcl2/syntax/utilities.go | 4 +- .../v3/codegen/internal/tstypes/tstypes.go | 222 ++ .../pulumi/pkg/v3/codegen/nodejs/doc.go | 34 +- .../pulumi/pkg/v3/codegen/nodejs/gen.go | 1011 ++++-- .../pkg/v3/codegen/nodejs/gen_intrinsics.go | 4 +- .../pkg/v3/codegen/nodejs/gen_program.go | 72 +- .../codegen/nodejs/gen_program_expressions.go | 81 +- .../v3/codegen/nodejs/gen_program_lower.go | 8 +- .../v3/codegen/nodejs/gen_program_utils.go | 17 + .../pulumi/pkg/v3/codegen/nodejs/importer.go | 17 + .../pulumi/pkg/v3/codegen/nodejs/utilities.go | 4 +- .../pkg/v3/codegen/{hcl2 => pcl}/binder.go | 29 +- .../v3/codegen/{hcl2 => pcl}/binder_nodes.go | 2 +- .../codegen/{hcl2 => pcl}/binder_resource.go | 8 +- .../v3/codegen/{hcl2 => pcl}/binder_schema.go | 67 +- .../pkg/v3/codegen/{hcl2 => pcl}/component.go | 2 +- .../pkg/v3/codegen/{hcl2 => pcl}/config.go | 2 +- .../v3/codegen/{hcl2 => pcl}/diagnostics.go | 2 +- .../pkg/v3/codegen/{hcl2 => pcl}/functions.go | 43 +- .../v3/codegen/{hcl2 => pcl}/intrinsics.go | 4 +- .../pulumi/pkg/v3/codegen/pcl/invoke.go | 232 ++ .../pkg/v3/codegen/{hcl2 => pcl}/local.go | 2 +- .../pkg/v3/codegen/{hcl2 => pcl}/output.go | 2 +- .../pkg/v3/codegen/{hcl2 => pcl}/program.go | 2 +- .../pkg/v3/codegen/{hcl2 => pcl}/resource.go | 2 +- .../v3/codegen/{hcl2 => pcl}/rewrite_apply.go | 2 +- .../codegen/{hcl2 => pcl}/rewrite_convert.go | 8 +- .../{hcl2 => pcl}/rewrite_properties.go | 2 +- .../pkg/v3/codegen/{hcl2 => pcl}/type.go | 2 +- .../pkg/v3/codegen/{hcl2 => pcl}/utilities.go | 2 +- .../pulumi/pkg/v3/codegen/python/.gitignore | 1 + .../pulumi/pkg/v3/codegen/python/doc.go | 29 +- .../pulumi/pkg/v3/codegen/python/gen.go | 1164 ++++--- .../pkg/v3/codegen/python/gen_program.go | 125 +- .../codegen/python/gen_program_expressions.go | 73 +- .../v3/codegen/python/gen_program_lower.go | 27 +- .../v3/codegen/python/gen_program_quotes.go | 24 +- .../v3/codegen/python/gen_program_utils.go | 17 + .../codegen/python/gen_resource_mappings.go | 14 +- .../pulumi/pkg/v3/codegen/python/importer.go | 13 +- .../pulumi/pkg/v3/codegen/python/python.go | 6 + .../pkg/v3/codegen/python/requirements.txt | 1 + .../pulumi/pkg/v3/codegen/python/utilities.go | 104 +- .../pulumi/pkg/v3/codegen/schema/loader.go | 78 +- .../pulumi/pkg/v3/codegen/schema/pulumi.json | 531 +++ .../pulumi/pkg/v3/codegen/schema/schema.go | 1801 ++++++++-- .../pulumi/pulumi/pkg/v3/codegen/utilities.go | 31 + .../pulumi/pkg/v3/codegen/utilities_types.go | 185 +- .../pulumi/sdk/v3/go/common/apitype/core.go | 41 +- .../sdk/v3/go/common/apitype/deployments.json | 173 + .../pulumi/sdk/v3/go/common/apitype/plan.go | 88 + .../v3/go/common/apitype/property-values.json | 231 ++ .../sdk/v3/go/common/apitype/resources.json | 127 + .../pulumi/sdk/v3/go/common/apitype/stacks.go | 23 + .../sdk/v3/go/common/diag/colors/colors.go | 6 +- .../sdk/v3/go/common/diag/colors/diag.go | 4 +- .../pulumi/sdk/v3/go/common/diag/errors.go | 6 +- .../pulumi/sdk/v3/go/common/diag/sink.go | 3 - .../sdk/v3/go/common/encoding/marshal.go | 12 +- .../sdk/v3/go/common/resource/config/crypt.go | 139 +- .../sdk/v3/go/common/resource/config/key.go | 2 +- .../resource/plugin/langruntime_plugin.go | 42 +- .../v3/go/common/resource/plugin/plugin.go | 92 +- .../plugin/provider-implementers-guide.md | 617 ---- .../v3/go/common/resource/plugin/provider.go | 145 +- .../common/resource/plugin/provider_plugin.go | 199 +- .../common/resource/plugin/provider_server.go | 95 +- .../plugin/resource_lifecycle.mermaid | 47 - .../resource/plugin/resource_lifecycle.svg | 1 - .../sdk/v3/go/common/resource/plugin/rpc.go | 173 +- .../sdk/v3/go/common/resource/properties.go | 39 +- .../v3/go/common/resource/properties_diff.go | 310 ++ .../v3/go/common/resource/properties_path.go | 87 +- .../v3/go/common/resource/resource_goal.go | 7 +- .../sdk/v3/go/common/resource/resource_id.go | 46 +- .../v3/go/common/resource/resource_state.go | 10 +- .../pulumi/sdk/v3/go/common/tokens/names.go | 52 +- .../pulumi/sdk/v3/go/common/tokens/tokens.go | 2 +- .../sdk/v3/go/common/util/ciutil/buildkite.go | 56 + .../sdk/v3/go/common/util/ciutil/detect.go | 8 +- .../sdk/v3/go/common/util/cmdutil/child.go | 1 + .../sdk/v3/go/common/util/cmdutil/console.go | 86 +- .../sdk/v3/go/common/util/cmdutil/trace.go | 193 +- .../sdk/v3/go/common/util/fsutil/lock.go | 24 +- .../sdk/v3/go/common/util/gitutil/git.go | 63 +- .../sdk/v3/go/common/util/httputil/http.go | 42 +- .../sdk/v3/go/common/util/logging/log.go | 13 +- .../sdk/v3/go/common/util/result/result.go | 14 + .../v3/go/common/util/rpcutil/interceptor.go | 20 +- .../sdk/v3/go/common/util/rpcutil/serve.go | 5 +- .../sdk/v3/go/common/workspace/creds.go | 121 +- .../sdk/v3/go/common/workspace/loaders.go | 8 +- .../sdk/v3/go/common/workspace/paths.go | 3 +- .../sdk/v3/go/common/workspace/plugins.go | 191 +- .../sdk/v3/go/common/workspace/project.go | 9 +- .../sdk/v3/go/common/workspace/templates.go | 31 +- .../sdk/v3/go/common/workspace/workspace.go | 31 +- .../pulumi/sdk/v3/proto/go/provider.pb.go | 616 +++- .../pulumi/sdk/v3/proto/go/resource.pb.go | 189 +- .../pulumi/pulumi/sdk/v3/python/.gitignore | 2 + .../pulumi/pulumi/sdk/v3/python/.pylintrc | 3 + .../pulumi/pulumi/sdk/v3/python/Makefile | 49 +- .../pulumi/pulumi/sdk/v3/python/Pipfile | 18 - .../pulumi/pulumi/sdk/v3/python/python.go | 19 +- .../pulumi/sdk/v3/python/requirements.txt | 21 + .../pulumi/pulumi/sdk/v3/python/shim_unix.go | 3 +- vendor/github.com/rivo/uniseg/LICENSE.txt | 21 + vendor/github.com/rivo/uniseg/README.md | 62 + vendor/github.com/rivo/uniseg/doc.go | 8 + vendor/github.com/rivo/uniseg/go.mod | 3 + vendor/github.com/rivo/uniseg/grapheme.go | 268 ++ vendor/github.com/rivo/uniseg/properties.go | 1658 +++++++++ .../github.com/rogpeppe/go-internal/LICENSE | 27 + .../internal/syscall/windows/mksyscall.go | 7 + .../internal/syscall/windows/psapi_windows.go | 20 + .../syscall/windows/reparse_windows.go | 64 + .../syscall/windows/security_windows.go | 128 + .../syscall/windows/symlink_windows.go | 39 + .../syscall/windows/syscall_windows.go | 307 ++ .../internal/syscall/windows/sysdll/sysdll.go | 28 + .../syscall/windows/zsyscall_windows.go | 363 ++ .../lockedfile/internal/filelock/filelock.go | 98 + .../internal/filelock/filelock_fcntl.go | 220 ++ .../internal/filelock/filelock_other.go | 37 + .../internal/filelock/filelock_plan9.go | 39 + .../internal/filelock/filelock_unix.go | 45 + .../internal/filelock/filelock_windows.go | 68 + .../go-internal/lockedfile/lockedfile.go | 122 + .../lockedfile/lockedfile_filelock.go | 65 + .../lockedfile/lockedfile_plan9.go | 94 + .../rogpeppe/go-internal/lockedfile/mutex.go | 60 + .../santhosh-tekuri/jsonschema/v5/.gitignore | 4 + .../santhosh-tekuri/jsonschema/v5/LICENSE | 175 + .../santhosh-tekuri/jsonschema/v5/README.md | 208 ++ .../santhosh-tekuri/jsonschema/v5/compiler.go | 696 ++++ .../santhosh-tekuri/jsonschema/v5/content.go | 29 + .../santhosh-tekuri/jsonschema/v5/doc.go | 52 + .../santhosh-tekuri/jsonschema/v5/draft.go | 1373 ++++++++ .../santhosh-tekuri/jsonschema/v5/errors.go | 214 ++ .../jsonschema/v5/extension.go | 116 + .../santhosh-tekuri/jsonschema/v5/format.go | 537 +++ .../santhosh-tekuri/jsonschema/v5/go.mod | 3 + .../santhosh-tekuri/jsonschema/v5/loader.go | 60 + .../santhosh-tekuri/jsonschema/v5/output.go | 77 + .../santhosh-tekuri/jsonschema/v5/resource.go | 272 ++ .../santhosh-tekuri/jsonschema/v5/schema.go | 793 +++++ vendor/golang.org/x/sys/unix/ifreq_linux.go | 109 + vendor/golang.org/x/sys/unix/ioctl_linux.go | 78 +- vendor/golang.org/x/sys/unix/mkerrors.sh | 2 - vendor/golang.org/x/sys/unix/syscall_linux.go | 17 +- .../x/sys/unix/syscall_linux_386.go | 4 +- .../x/sys/unix/syscall_linux_arm.go | 4 +- .../x/sys/unix/syscall_linux_arm64.go | 4 +- .../x/sys/unix/syscall_linux_mipsx.go | 4 +- .../x/sys/unix/syscall_linux_ppc.go | 4 +- .../golang.org/x/sys/unix/syscall_solaris.go | 240 ++ vendor/golang.org/x/sys/unix/syscall_unix.go | 4 + vendor/golang.org/x/sys/unix/zerrors_linux.go | 34 +- .../x/sys/unix/zerrors_linux_386.go | 1 + .../x/sys/unix/zerrors_linux_amd64.go | 1 + .../x/sys/unix/zerrors_linux_arm.go | 1 + .../x/sys/unix/zerrors_linux_arm64.go | 1 + .../x/sys/unix/zerrors_linux_mips.go | 1 + .../x/sys/unix/zerrors_linux_mips64.go | 1 + .../x/sys/unix/zerrors_linux_mips64le.go | 1 + .../x/sys/unix/zerrors_linux_mipsle.go | 1 + .../x/sys/unix/zerrors_linux_ppc.go | 1 + .../x/sys/unix/zerrors_linux_ppc64.go | 1 + .../x/sys/unix/zerrors_linux_ppc64le.go | 1 + .../x/sys/unix/zerrors_linux_riscv64.go | 1 + .../x/sys/unix/zerrors_linux_s390x.go | 1 + .../x/sys/unix/zerrors_linux_sparc64.go | 1 + .../x/sys/unix/zerrors_openbsd_386.go | 3 + .../x/sys/unix/zerrors_openbsd_arm.go | 3 + .../golang.org/x/sys/unix/zsyscall_linux.go | 12 +- .../x/sys/unix/zsyscall_solaris_amd64.go | 72 +- .../x/sys/unix/zsysnum_linux_386.go | 3 + .../x/sys/unix/zsysnum_linux_amd64.go | 711 ++-- .../x/sys/unix/zsysnum_linux_arm.go | 3 + .../x/sys/unix/zsysnum_linux_arm64.go | 601 ++-- .../x/sys/unix/zsysnum_linux_mips.go | 3 + .../x/sys/unix/zsysnum_linux_mips64.go | 697 ++-- .../x/sys/unix/zsysnum_linux_mips64le.go | 697 ++-- .../x/sys/unix/zsysnum_linux_mipsle.go | 3 + .../x/sys/unix/zsysnum_linux_ppc.go | 3 + .../x/sys/unix/zsysnum_linux_ppc64.go | 795 ++--- .../x/sys/unix/zsysnum_linux_ppc64le.go | 795 ++--- .../x/sys/unix/zsysnum_linux_riscv64.go | 599 ++-- .../x/sys/unix/zsysnum_linux_s390x.go | 725 ++-- .../x/sys/unix/zsysnum_linux_sparc64.go | 753 ++-- vendor/golang.org/x/sys/unix/ztypes_linux.go | 18 +- .../golang.org/x/sys/unix/ztypes_linux_386.go | 5 + .../x/sys/unix/ztypes_linux_amd64.go | 5 + .../golang.org/x/sys/unix/ztypes_linux_arm.go | 5 + .../x/sys/unix/ztypes_linux_arm64.go | 5 + .../x/sys/unix/ztypes_linux_mips.go | 5 + .../x/sys/unix/ztypes_linux_mips64.go | 5 + .../x/sys/unix/ztypes_linux_mips64le.go | 5 + .../x/sys/unix/ztypes_linux_mipsle.go | 5 + .../golang.org/x/sys/unix/ztypes_linux_ppc.go | 5 + .../x/sys/unix/ztypes_linux_ppc64.go | 5 + .../x/sys/unix/ztypes_linux_ppc64le.go | 5 + .../x/sys/unix/ztypes_linux_riscv64.go | 5 + .../x/sys/unix/ztypes_linux_s390x.go | 5 + .../x/sys/unix/ztypes_linux_sparc64.go | 5 + .../x/sys/unix/ztypes_solaris_amd64.go | 40 + .../golang.org/x/sys/windows/types_windows.go | 2 +- vendor/gopkg.in/yaml.v3/LICENSE | 50 + vendor/gopkg.in/yaml.v3/NOTICE | 13 + vendor/gopkg.in/yaml.v3/README.md | 150 + vendor/gopkg.in/yaml.v3/apic.go | 747 ++++ vendor/gopkg.in/yaml.v3/decode.go | 950 ++++++ vendor/gopkg.in/yaml.v3/emitterc.go | 2020 +++++++++++ vendor/gopkg.in/yaml.v3/encode.go | 577 ++++ vendor/gopkg.in/yaml.v3/go.mod | 5 + vendor/gopkg.in/yaml.v3/parserc.go | 1249 +++++++ vendor/gopkg.in/yaml.v3/readerc.go | 434 +++ vendor/gopkg.in/yaml.v3/resolve.go | 326 ++ vendor/gopkg.in/yaml.v3/scannerc.go | 3038 +++++++++++++++++ vendor/gopkg.in/yaml.v3/sorter.go | 134 + vendor/gopkg.in/yaml.v3/writerc.go | 48 + vendor/gopkg.in/yaml.v3/yaml.go | 698 ++++ vendor/gopkg.in/yaml.v3/yamlh.go | 807 +++++ vendor/gopkg.in/yaml.v3/yamlprivateh.go | 198 ++ vendor/modules.txt | 25 +- 280 files changed, 36361 insertions(+), 8244 deletions(-) delete mode 100644 vendor/github.com/gofrs/flock/.gitignore delete mode 100644 vendor/github.com/gofrs/flock/.travis.yml delete mode 100644 vendor/github.com/gofrs/flock/LICENSE delete mode 100644 vendor/github.com/gofrs/flock/README.md delete mode 100644 vendor/github.com/gofrs/flock/appveyor.yml delete mode 100644 vendor/github.com/gofrs/flock/flock.go delete mode 100644 vendor/github.com/gofrs/flock/flock_unix.go delete mode 100644 vendor/github.com/gofrs/flock/flock_winapi.go delete mode 100644 vendor/github.com/gofrs/flock/flock_windows.go delete mode 100644 vendor/github.com/pulumi/pulumi/pkg/v3/codegen/go/gen_program_inputs.go create mode 100644 vendor/github.com/pulumi/pulumi/pkg/v3/codegen/go/gen_spill.go delete mode 100644 vendor/github.com/pulumi/pulumi/pkg/v3/codegen/hcl2/invoke.go create mode 100644 vendor/github.com/pulumi/pulumi/pkg/v3/codegen/internal/tstypes/tstypes.go create mode 100644 vendor/github.com/pulumi/pulumi/pkg/v3/codegen/nodejs/gen_program_utils.go rename vendor/github.com/pulumi/pulumi/pkg/v3/codegen/{hcl2 => pcl}/binder.go (90%) rename vendor/github.com/pulumi/pulumi/pkg/v3/codegen/{hcl2 => pcl}/binder_nodes.go (99%) rename vendor/github.com/pulumi/pulumi/pkg/v3/codegen/{hcl2 => pcl}/binder_resource.go (97%) rename vendor/github.com/pulumi/pulumi/pkg/v3/codegen/{hcl2 => pcl}/binder_schema.go (84%) rename vendor/github.com/pulumi/pulumi/pkg/v3/codegen/{hcl2 => pcl}/component.go (98%) rename vendor/github.com/pulumi/pulumi/pkg/v3/codegen/{hcl2 => pcl}/config.go (99%) rename vendor/github.com/pulumi/pulumi/pkg/v3/codegen/{hcl2 => pcl}/diagnostics.go (99%) rename vendor/github.com/pulumi/pulumi/pkg/v3/codegen/{hcl2 => pcl}/functions.go (87%) rename vendor/github.com/pulumi/pulumi/pkg/v3/codegen/{hcl2 => pcl}/intrinsics.go (97%) create mode 100644 vendor/github.com/pulumi/pulumi/pkg/v3/codegen/pcl/invoke.go rename vendor/github.com/pulumi/pulumi/pkg/v3/codegen/{hcl2 => pcl}/local.go (99%) rename vendor/github.com/pulumi/pulumi/pkg/v3/codegen/{hcl2 => pcl}/output.go (99%) rename vendor/github.com/pulumi/pulumi/pkg/v3/codegen/{hcl2 => pcl}/program.go (99%) rename vendor/github.com/pulumi/pulumi/pkg/v3/codegen/{hcl2 => pcl}/resource.go (99%) rename vendor/github.com/pulumi/pulumi/pkg/v3/codegen/{hcl2 => pcl}/rewrite_apply.go (99%) rename vendor/github.com/pulumi/pulumi/pkg/v3/codegen/{hcl2 => pcl}/rewrite_convert.go (98%) rename vendor/github.com/pulumi/pulumi/pkg/v3/codegen/{hcl2 => pcl}/rewrite_properties.go (99%) rename vendor/github.com/pulumi/pulumi/pkg/v3/codegen/{hcl2 => pcl}/type.go (98%) rename vendor/github.com/pulumi/pulumi/pkg/v3/codegen/{hcl2 => pcl}/utilities.go (99%) create mode 100644 vendor/github.com/pulumi/pulumi/pkg/v3/codegen/python/.gitignore create mode 100644 vendor/github.com/pulumi/pulumi/pkg/v3/codegen/python/gen_program_utils.go create mode 100644 vendor/github.com/pulumi/pulumi/pkg/v3/codegen/python/requirements.txt create mode 100644 vendor/github.com/pulumi/pulumi/pkg/v3/codegen/schema/pulumi.json create mode 100644 vendor/github.com/pulumi/pulumi/sdk/v3/go/common/apitype/deployments.json create mode 100644 vendor/github.com/pulumi/pulumi/sdk/v3/go/common/apitype/plan.go create mode 100644 vendor/github.com/pulumi/pulumi/sdk/v3/go/common/apitype/property-values.json create mode 100644 vendor/github.com/pulumi/pulumi/sdk/v3/go/common/apitype/resources.json delete mode 100644 vendor/github.com/pulumi/pulumi/sdk/v3/go/common/resource/plugin/provider-implementers-guide.md delete mode 100644 vendor/github.com/pulumi/pulumi/sdk/v3/go/common/resource/plugin/resource_lifecycle.mermaid delete mode 100644 vendor/github.com/pulumi/pulumi/sdk/v3/go/common/resource/plugin/resource_lifecycle.svg create mode 100644 vendor/github.com/pulumi/pulumi/sdk/v3/go/common/util/ciutil/buildkite.go delete mode 100644 vendor/github.com/pulumi/pulumi/sdk/v3/python/Pipfile create mode 100644 vendor/github.com/pulumi/pulumi/sdk/v3/python/requirements.txt create mode 100644 vendor/github.com/rivo/uniseg/LICENSE.txt create mode 100644 vendor/github.com/rivo/uniseg/README.md create mode 100644 vendor/github.com/rivo/uniseg/doc.go create mode 100644 vendor/github.com/rivo/uniseg/go.mod create mode 100644 vendor/github.com/rivo/uniseg/grapheme.go create mode 100644 vendor/github.com/rivo/uniseg/properties.go create mode 100644 vendor/github.com/rogpeppe/go-internal/LICENSE create mode 100644 vendor/github.com/rogpeppe/go-internal/internal/syscall/windows/mksyscall.go create mode 100644 vendor/github.com/rogpeppe/go-internal/internal/syscall/windows/psapi_windows.go create mode 100644 vendor/github.com/rogpeppe/go-internal/internal/syscall/windows/reparse_windows.go create mode 100644 vendor/github.com/rogpeppe/go-internal/internal/syscall/windows/security_windows.go create mode 100644 vendor/github.com/rogpeppe/go-internal/internal/syscall/windows/symlink_windows.go create mode 100644 vendor/github.com/rogpeppe/go-internal/internal/syscall/windows/syscall_windows.go create mode 100644 vendor/github.com/rogpeppe/go-internal/internal/syscall/windows/sysdll/sysdll.go create mode 100644 vendor/github.com/rogpeppe/go-internal/internal/syscall/windows/zsyscall_windows.go create mode 100644 vendor/github.com/rogpeppe/go-internal/lockedfile/internal/filelock/filelock.go create mode 100644 vendor/github.com/rogpeppe/go-internal/lockedfile/internal/filelock/filelock_fcntl.go create mode 100644 vendor/github.com/rogpeppe/go-internal/lockedfile/internal/filelock/filelock_other.go create mode 100644 vendor/github.com/rogpeppe/go-internal/lockedfile/internal/filelock/filelock_plan9.go create mode 100644 vendor/github.com/rogpeppe/go-internal/lockedfile/internal/filelock/filelock_unix.go create mode 100644 vendor/github.com/rogpeppe/go-internal/lockedfile/internal/filelock/filelock_windows.go create mode 100644 vendor/github.com/rogpeppe/go-internal/lockedfile/lockedfile.go create mode 100644 vendor/github.com/rogpeppe/go-internal/lockedfile/lockedfile_filelock.go create mode 100644 vendor/github.com/rogpeppe/go-internal/lockedfile/lockedfile_plan9.go create mode 100644 vendor/github.com/rogpeppe/go-internal/lockedfile/mutex.go create mode 100644 vendor/github.com/santhosh-tekuri/jsonschema/v5/.gitignore create mode 100644 vendor/github.com/santhosh-tekuri/jsonschema/v5/LICENSE create mode 100644 vendor/github.com/santhosh-tekuri/jsonschema/v5/README.md create mode 100644 vendor/github.com/santhosh-tekuri/jsonschema/v5/compiler.go create mode 100644 vendor/github.com/santhosh-tekuri/jsonschema/v5/content.go create mode 100644 vendor/github.com/santhosh-tekuri/jsonschema/v5/doc.go create mode 100644 vendor/github.com/santhosh-tekuri/jsonschema/v5/draft.go create mode 100644 vendor/github.com/santhosh-tekuri/jsonschema/v5/errors.go create mode 100644 vendor/github.com/santhosh-tekuri/jsonschema/v5/extension.go create mode 100644 vendor/github.com/santhosh-tekuri/jsonschema/v5/format.go create mode 100644 vendor/github.com/santhosh-tekuri/jsonschema/v5/go.mod create mode 100644 vendor/github.com/santhosh-tekuri/jsonschema/v5/loader.go create mode 100644 vendor/github.com/santhosh-tekuri/jsonschema/v5/output.go create mode 100644 vendor/github.com/santhosh-tekuri/jsonschema/v5/resource.go create mode 100644 vendor/github.com/santhosh-tekuri/jsonschema/v5/schema.go create mode 100644 vendor/golang.org/x/sys/unix/ifreq_linux.go create mode 100644 vendor/gopkg.in/yaml.v3/LICENSE create mode 100644 vendor/gopkg.in/yaml.v3/NOTICE create mode 100644 vendor/gopkg.in/yaml.v3/README.md create mode 100644 vendor/gopkg.in/yaml.v3/apic.go create mode 100644 vendor/gopkg.in/yaml.v3/decode.go create mode 100644 vendor/gopkg.in/yaml.v3/emitterc.go create mode 100644 vendor/gopkg.in/yaml.v3/encode.go create mode 100644 vendor/gopkg.in/yaml.v3/go.mod create mode 100644 vendor/gopkg.in/yaml.v3/parserc.go create mode 100644 vendor/gopkg.in/yaml.v3/readerc.go create mode 100644 vendor/gopkg.in/yaml.v3/resolve.go create mode 100644 vendor/gopkg.in/yaml.v3/scannerc.go create mode 100644 vendor/gopkg.in/yaml.v3/sorter.go create mode 100644 vendor/gopkg.in/yaml.v3/writerc.go create mode 100644 vendor/gopkg.in/yaml.v3/yaml.go create mode 100644 vendor/gopkg.in/yaml.v3/yamlh.go create mode 100644 vendor/gopkg.in/yaml.v3/yamlprivateh.go diff --git a/README.md b/README.md index 0598db2..0a49bd0 100644 --- a/README.md +++ b/README.md @@ -13,7 +13,7 @@ Uses Pulumi's kube2pulumi as a base. ``` $ curl -fsSL https://get.pulumi.com | sh -$ pulumi plugin install resource kubernetes v2.4.2 +$ pulumi plugin install resource kubernetes v3.0.0 ``` ## Usage diff --git a/go.mod b/go.mod index b7f3c1c..93d456e 100644 --- a/go.mod +++ b/go.mod @@ -4,9 +4,8 @@ go 1.16 require ( github.com/bradleyjkemp/cupaloy v2.3.0+incompatible - github.com/pulumi/kube2pulumi v0.0.10 + github.com/pulumi/kube2pulumi v0.0.11 github.com/spf13/cobra v1.0.0 github.com/spf13/viper v1.8.1 golang.org/x/net v0.0.0-20210520170846-37e1c6afe023 // indirect - golang.org/x/sys v0.0.0-20210616094352-59db8d763f22 // indirect ) diff --git a/go.sum b/go.sum index aff68d4..70d7ea4 100644 --- a/go.sum +++ b/go.sum @@ -22,6 +22,7 @@ cloud.google.com/go v0.74.0/go.mod h1:VV1xSbzvo+9QJOxLDaJfTjx5e+MePCpCWwvftOeQmW cloud.google.com/go v0.75.0/go.mod h1:VGuuCn7PG0dwsd5XPVm2Mm3wlh3EL55/79EKB6hlPTY= cloud.google.com/go v0.78.0/go.mod h1:QjdrLG0uq+YwhjoVOLsS1t7TW8fs36kLs4XO5R5ECHg= cloud.google.com/go v0.79.0/go.mod h1:3bzgcEeQlzbuEAYu4mrWhKqWjmpprinYgKJLgKHnbb8= +cloud.google.com/go v0.81.0 h1:at8Tk2zUz63cLPR0JPWm5vp77pEZmzxEQBEfRKn1VV8= cloud.google.com/go v0.81.0/go.mod h1:mk/AM35KwGk/Nm2YSeZbxXdrNK3KZOYHmLkOqC2V6E0= cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= @@ -33,6 +34,7 @@ cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7 cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= cloud.google.com/go/firestore v1.1.0/go.mod h1:ulACoGHTpvq5r8rxGJ4ddJZBZqakUQqClKRT5SZwBmk= cloud.google.com/go/firestore v1.5.0/go.mod h1:c4nNYR1qdq7eaZ+jSc5fonrQN2k3M7sWATcYTiakjEo= +cloud.google.com/go/logging v1.0.0 h1:kaunpnoEh9L4hu6JUsBa8Y20LBfKnCuDhKUgdZp7oK8= cloud.google.com/go/logging v1.0.0/go.mod h1:V1cc3ogwobYzQq5f2R7DS/GvRIrI4FKj01Gs5glwAls= cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= @@ -44,6 +46,7 @@ cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0Zeo cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs= cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= +cloud.google.com/go/storage v1.15.0 h1:Ljj+ZXVEhCr/1+4ZhvtteN1ND7UUsNTlduGclLh8GO0= cloud.google.com/go/storage v1.15.0/go.mod h1:mjjQMoxxyGH7Jr8K5qrx6N2O0AHsczI61sMNn03GIZI= contrib.go.opencensus.io/exporter/aws v0.0.0-20200617204711-c478e41e60e9/go.mod h1:uu1P0UCM/6RbsMrgPa98ll8ZcHM858i/AD06a9aLRCA= contrib.go.opencensus.io/exporter/stackdriver v0.13.5/go.mod h1:aXENhDJ1Y4lIg4EUaVTwzvYETVNZk10Pu26tevFKLUc= @@ -51,33 +54,47 @@ contrib.go.opencensus.io/integrations/ocsql v0.1.7/go.mod h1:8DsSdjz3F+APR+0z0Wk dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= github.com/AlecAivazis/survey/v2 v2.0.5/go.mod h1:WYBhg6f0y/fNYUuesWQc0PKbJcEliGcYHB9sNT3Bg74= github.com/Azure/azure-amqp-common-go/v3 v3.1.0/go.mod h1:PBIGdzcO1teYoufTKMcGibdKaYZv4avS+O6LNIp8bq0= +github.com/Azure/azure-pipeline-go v0.2.3 h1:7U9HBg1JFK3jHl5qmo4CTZKFTVgMwdFHMVtCdfBE21U= github.com/Azure/azure-pipeline-go v0.2.3/go.mod h1:x841ezTBIMG6O3lAcl8ATHnsOPVl2bqk7S3ta6S6u4k= github.com/Azure/azure-sdk-for-go v51.1.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= +github.com/Azure/azure-sdk-for-go v54.0.0+incompatible h1:Bq3L9LF0DHCexlT0fccwxgrOMfjHx8LGz+d+L7gGQv4= github.com/Azure/azure-sdk-for-go v54.0.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= github.com/Azure/azure-service-bus-go v0.10.11/go.mod h1:AWw9eTTWZVZyvgpPahD1ybz3a8/vT3GsJDS8KYex55U= +github.com/Azure/azure-storage-blob-go v0.13.0 h1:lgWHvFh+UYBNVQLFHXkvul2f6yOPA9PIH82RTG2cSwc= github.com/Azure/azure-storage-blob-go v0.13.0/go.mod h1:pA9kNqtjUeQF2zOSu4s//nUdBD+e64lEuc4sVnuOfNs= github.com/Azure/go-amqp v0.13.0/go.mod h1:qj+o8xPCz9tMSbQ83Vp8boHahuRDl5mkNHyt1xlxUTs= github.com/Azure/go-amqp v0.13.4/go.mod h1:wbpCKA8tR5MLgRyIu+bb+S6ECdIDdYJ0NlpFE9xsBPI= github.com/Azure/go-amqp v0.13.7/go.mod h1:wbpCKA8tR5MLgRyIu+bb+S6ECdIDdYJ0NlpFE9xsBPI= +github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78 h1:w+iIsaOQNcT7OZ575w+acHgRric5iCyQh+xv+KJ4HB8= github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8= +github.com/Azure/go-autorest v14.2.0+incompatible h1:V5VMDjClD3GiElqLWO7mz2MxNAK/vTfRHdAubSIPRgs= github.com/Azure/go-autorest v14.2.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= github.com/Azure/go-autorest/autorest v0.11.3/go.mod h1:JFgpikqFJ/MleTTxwepExTKnFUKKszPS8UavbQYUMuw= github.com/Azure/go-autorest/autorest v0.11.17/go.mod h1:eipySxLmqSyC5s5k1CLupqet0PSENBEDP93LQ9a8QYw= +github.com/Azure/go-autorest/autorest v0.11.18 h1:90Y4srNYrwOtAgVo3ndrQkTYn6kf1Eg/AjTFJ8Is2aM= github.com/Azure/go-autorest/autorest v0.11.18/go.mod h1:dSiJPy22c3u0OtOKDNttNgqpNFY/GeWa7GH/Pz56QRA= github.com/Azure/go-autorest/autorest/adal v0.9.0/go.mod h1:/c022QCutn2P7uY+/oQWWNcK9YU+MH96NgK+jErpbcg= github.com/Azure/go-autorest/autorest/adal v0.9.2/go.mod h1:/3SMAM86bP6wC9Ev35peQDUeqFZBMH07vvUOmg4z/fE= github.com/Azure/go-autorest/autorest/adal v0.9.5/go.mod h1:B7KF7jKIeC9Mct5spmyCB/A8CG/sEz1vwIRGv/bbw7A= github.com/Azure/go-autorest/autorest/adal v0.9.11/go.mod h1:nBKAnTomx8gDtl+3ZCJv2v0KACFHWTB2drffI1B68Pk= +github.com/Azure/go-autorest/autorest/adal v0.9.13 h1:Mp5hbtOePIzM8pJVRa3YLrWWmZtoxRXqUEzCfJt3+/Q= github.com/Azure/go-autorest/autorest/adal v0.9.13/go.mod h1:W/MM4U6nLxnIskrw4UwWzlHfGjwUS50aOsc/I3yuU8M= +github.com/Azure/go-autorest/autorest/azure/auth v0.5.7 h1:8DQB8yl7aLQuP+nuR5e2RO6454OvFlSTXXaNHshc16s= github.com/Azure/go-autorest/autorest/azure/auth v0.5.7/go.mod h1:AkzUsqkrdmNhfP2i54HqINVQopw0CLDnvHpJ88Zz1eI= +github.com/Azure/go-autorest/autorest/azure/cli v0.4.2 h1:dMOmEJfkLKW/7JsokJqkyoYSgmR08hi9KrhjZb+JALY= github.com/Azure/go-autorest/autorest/azure/cli v0.4.2/go.mod h1:7qkJkT+j6b+hIpzMOwPChJhTqS8VbsqqgULzMNRugoM= +github.com/Azure/go-autorest/autorest/date v0.3.0 h1:7gUk1U5M/CQbp9WoqinNzJar+8KY+LPI6wiWrP/myHw= github.com/Azure/go-autorest/autorest/date v0.3.0/go.mod h1:BI0uouVdmngYNUzGWeSYnokU+TrmwEsOqdt8Y6sso74= github.com/Azure/go-autorest/autorest/mocks v0.4.0/go.mod h1:LTp+uSrOhSkaKrUy935gNZuuIPPVsHlr9DSOxSayd+k= github.com/Azure/go-autorest/autorest/mocks v0.4.1/go.mod h1:LTp+uSrOhSkaKrUy935gNZuuIPPVsHlr9DSOxSayd+k= +github.com/Azure/go-autorest/autorest/to v0.4.0 h1:oXVqrxakqqV1UZdSazDOPOLvOIz+XA683u8EctwboHk= github.com/Azure/go-autorest/autorest/to v0.4.0/go.mod h1:fE8iZBn7LQR7zH/9XU2NcPR4o9jEImooCeWJcYV/zLE= +github.com/Azure/go-autorest/autorest/validation v0.3.1 h1:AgyqjAd94fwNAoTjl/WQXg4VvFeRFpO+UhNyRXqF1ac= github.com/Azure/go-autorest/autorest/validation v0.3.1/go.mod h1:yhLgjC0Wda5DYXl6JAsWyUe4KVNffhoDhG0zVzUMo3E= github.com/Azure/go-autorest/logger v0.2.0/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZmbF5NWuPV8+WeEW8= +github.com/Azure/go-autorest/logger v0.2.1 h1:IG7i4p/mDa2Ce4TRyAO8IHnVhAVF3RFU+ZtXWSmf4Tg= github.com/Azure/go-autorest/logger v0.2.1/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZmbF5NWuPV8+WeEW8= +github.com/Azure/go-autorest/tracing v0.6.0 h1:TYi4+3m5t6K48TGI9AUdb+IzbnSxvnvUMfuitfgcfuo= github.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBpUA79WCAKPPZVC2DeU= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= @@ -88,7 +105,7 @@ github.com/Microsoft/go-winio v0.4.15-0.20190919025122-fc70bd9a86b5/go.mod h1:tT github.com/Microsoft/hcsshim v0.8.9/go.mod h1:5692vkUqntj1idxauYlpoINNKeqCiG6Sg38RRsjT5y8= github.com/Netflix/go-expect v0.0.0-20180615182759-c93bf25de8e8/go.mod h1:oX5x61PbNXchhh0oikYAH+4Pcfw5LKv21+Jnpr6r6Pc= github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= -github.com/Sirupsen/logrus v1.0.5/go.mod h1:rmk17hk6i8ZSAJkSDa7nOxamrG+SP4P0mm+DAvExv4U= +github.com/StackExchange/wmi v1.2.1/go.mod h1:rcmrprowKIVzvc+NUiLncP2uuArMWLCbu9SBzvHz7e8= github.com/agext/levenshtein v1.2.1 h1:QmvMAjj2aEICytGiWzmxoE0x2KZvE0fvmqMOfy2tjT8= github.com/agext/levenshtein v1.2.1/go.mod h1:JEDfjyjHDjOF/1e4FlBE/PkbqA9OfWu2ki2W0IB5558= github.com/alcortesm/tgz v0.0.0-20161220082320-9c5fe88206d7 h1:uSoVVbwJiQipAclBbw+8quDsfcvFjOpI5iCf4p/cqCs= @@ -97,6 +114,7 @@ github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuy github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= +github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239 h1:kFOfPq6dUM1hTo4JG6LR5AXSUEsOjtdm0kw0FtQtMJA= github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239/go.mod h1:2FmKhYUyUczH0OGQWaF5ceTx0UBShxjsH6f8oGKYe2c= github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= github.com/apparentlymart/go-dump v0.0.0-20180507223929-23540a00eaa3/go.mod h1:oL81AME2rN47vu18xqj1S1jPIPuN7afo62yKTNn3XMM= @@ -109,18 +127,22 @@ github.com/armon/go-metrics v0.3.0/go.mod h1:zXjbSimjXTd7vOpY8B0/2LpvNvDoXBuplAD github.com/armon/go-metrics v0.3.3/go.mod h1:4O98XIr/9W0sxpJ8UaYkvjk10Iff7SnFrb4QAOwNTFc= github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= github.com/armon/go-radix v1.0.0/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= +github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5 h1:0CwZNZbxp69SHPdPJAN/hZIm0C4OItdklCFmMRWYpio= github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs= github.com/aws/aws-sdk-go v1.15.27/go.mod h1:mFuSZ37Z9YOHbQEwBWztmVzqXrEkub65tZoCYDt7FT0= github.com/aws/aws-sdk-go v1.23.20/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= github.com/aws/aws-sdk-go v1.25.37/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= github.com/aws/aws-sdk-go v1.30.27/go.mod h1:5zCpMtNQVjRREroY7sYe8lOMRSxkhG6MZveU8YkpAk0= +github.com/aws/aws-sdk-go v1.38.35 h1:7AlAO0FC+8nFjxiGKEmq0QLpiA8/XFr6eIxgRTwkdTg= github.com/aws/aws-sdk-go v1.38.35/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= github.com/bketelsen/crypt v0.0.4/go.mod h1:aI6NrJ0pMGgvZKL1iVgXLnfIFJtfV+bKCoqOes/6LfM= +github.com/blang/semver v3.5.1+incompatible h1:cQNTCjp13qL8KC3Nbxr/y2Bqb63oX6wdnnjpJbkM4JQ= github.com/blang/semver v3.5.1+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk= +github.com/bradleyjkemp/cupaloy v2.3.0+incompatible h1:UafIjBvWQmS9i/xRg+CamMrnLTKNzo+bdmT/oH34c2Y= github.com/bradleyjkemp/cupaloy v2.3.0+incompatible/go.mod h1:Au1Xw1sgaJ5iSFktEhYsS0dbQiS1B0/XMXl+42y9Ilk= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/census-instrumentation/opencensus-proto v0.3.0/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= @@ -161,21 +183,26 @@ github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsr github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/denisenkom/go-mssqldb v0.9.0/go.mod h1:xbL0rPBG9cCiLr28tMa8zpbdarY27NDyej4t/EjAShU= github.com/devigned/tab v0.1.1/go.mod h1:XG9mPq0dFghrYvoBF3xdRrJzSTX1b7IQrvaL9mzjeJY= github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no= github.com/dimchansky/utfbom v1.1.0/go.mod h1:rO41eb7gLfo8SF1jd9F8HplJm1Fewwi4mQvIirEdv+8= +github.com/dimchansky/utfbom v1.1.1 h1:vV6w1AhK4VMnhBno/TPVCoK9U/LP0PkLCS9tbxHdi/U= github.com/dimchansky/utfbom v1.1.1/go.mod h1:SxdoEBH5qIqFocHMyGOXVAybYJdr71b1Q/j0mACtrfE= github.com/djherbis/times v1.2.0 h1:xANXjsC/iBqbO00vkWlYwPWgBgEVU6m6AFYg0Pic+Mc= github.com/djherbis/times v1.2.0/go.mod h1:CGMZlo255K5r4Yw0b9RRfFQpM2y7uOmxg4jm9HsaVf8= github.com/docker/distribution v2.7.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= +github.com/docker/docker v1.4.2-0.20200319182547-c7ad2b866182 h1:Caj/qGJ9KyulC1WSksyPgp7r8+DKgTGfU39lmb2C5MQ= github.com/docker/docker v1.4.2-0.20200319182547-c7ad2b866182/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec= github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= +github.com/dustin/go-humanize v1.0.0 h1:VSnTsYCnlFHaM2/igO1h6X3HA71jcobQuxemgkq4zYo= github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= +github.com/emirpasic/gods v1.12.0 h1:QAUIPSaCu4G+POclxeqb3F+WPpdKqFGlw36+yOzGlrg= github.com/emirpasic/gods v1.12.0/go.mod h1:YfzfFFoVP/catgzJb4IKIqXjX78Ha8FMSDh3ymbK86o= github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= @@ -189,16 +216,19 @@ github.com/fatih/color v1.9.0 h1:8xPHl4/q1VyqGIPif1F+1V3Y3lSmrq01EabUW3CoW5s= github.com/fatih/color v1.9.0/go.mod h1:eQcE1qtQxscV5RaZvpXrrb8Drkc3/DdQ+uUYCNjL+zU= github.com/fatih/structs v1.1.0/go.mod h1:9NiDSp5zOcgEDl+j00MP/WkGVPOlPRLejGD8Ga6PJ7M= github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568/go.mod h1:xEzjJPgXI435gkrCt3MPfRiAkVrwSbHsst4LCFVfpJc= +github.com/form3tech-oss/jwt-go v3.2.2+incompatible h1:TcekIExNqud5crz4xD2pavyTgWiPvpYe4Xau31I0PRk= github.com/form3tech-oss/jwt-go v3.2.2+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k= github.com/fortytw2/leaktest v1.3.0/go.mod h1:jDsjWgpAGjm2CA7WthBh/CdZYEPF31XHquHwclZch5g= github.com/frankban/quicktest v1.10.0/go.mod h1:ui7WezCLWMWxVWr1GETZY3smRy0G4KWq9vcPtJmFl7Y= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= +github.com/fsnotify/fsnotify v1.4.9 h1:hsms1Qyu0jgnwNXIxa+/V/PDsU6CfLf6CNO8H7IWoS4= github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= github.com/gedex/inflector v0.0.0-20170307190818-16278e9db813 h1:Uc+IZ7gYqAf/rSGFplbWBSHaGolEQlNLgMgSE3ccnIQ= github.com/gedex/inflector v0.0.0-20170307190818-16278e9db813/go.mod h1:P+oSoE9yhSRvsmYyZsshflcR6ePWYLql6UU1amW13IM= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/gin-contrib/sse v0.1.0/go.mod h1:RHrZQHXnP2xjPF+u1gW/2HnVO7nvIa9PG3Gm+fLHvGI= github.com/gin-gonic/gin v1.6.3/go.mod h1:75u5sXoLsGZoRN5Sgbi1eraJ4GU3++wFwWzhwvtwp4M= +github.com/gliderlabs/ssh v0.2.2 h1:6zsha5zo/TWhRhwqCD3+EarCAgZ2yN28ipRnGPnwkI0= github.com/gliderlabs/ssh v0.2.2/go.mod h1:U7qILu1NlMHj9FlMhZLlkCdDnU1DBEAqr0aevW3Awn0= github.com/go-asn1-ber/asn1-ber v1.3.1/go.mod h1:hEBeB/ic+5LoWskz+yKT7vGhhPYkProFKoKdwZRWMe0= github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= @@ -211,6 +241,7 @@ github.com/go-ldap/ldap/v3 v3.1.3/go.mod h1:3rbOH3jRS2u6jg2rJnKAMLE/xQyCKIveG2Sa github.com/go-ldap/ldap/v3 v3.1.10/go.mod h1:5Zun81jBTabRaI8lzN7E1JjyEl1g6zI6u9pd8luAK4Q= github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= +github.com/go-ole/go-ole v1.2.5/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0= github.com/go-playground/assert/v2 v2.0.1/go.mod h1:VDjEfimB/XKnb+ZQfWdccd7VUvScMdVu0Titje2rxJ4= github.com/go-playground/locales v0.13.0/go.mod h1:taPMhCMXrRLJO55olJkUXHZBHCxTMfnGwq/HNwmWNS8= github.com/go-playground/universal-translator v0.17.0/go.mod h1:UkSxE5sNxxRwHyU+Scu5vgOQjsIJAF8j9muTVoKLVtA= @@ -229,20 +260,21 @@ github.com/goccy/go-yaml v1.8.0 h1:WCe9sBiI0oZb6EC6f3kq3dv0+aEiNdstT7b4xxq4MJQ= github.com/goccy/go-yaml v1.8.0/go.mod h1:wS4gNoLalDSJxo/SpngzPQ2BN4uuZVLCmbM4S3vd4+Y= github.com/godbus/dbus v0.0.0-20190422162347-ade71ed3457e/go.mod h1:bBOAhwG1umN6/6ZUMtDFBMQR8jRg9O75tm9K00oMsK4= github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= -github.com/gofrs/flock v0.7.1 h1:DP+LD/t0njgoPBvT5MJLeliUIVQR03hiKR6vezdwHlc= -github.com/gofrs/flock v0.7.1/go.mod h1:F1TvTiK9OcQqauNUHlbJvyl9Qa1QvF/gOUDKA14jxHU= github.com/gofrs/uuid v3.3.0+incompatible h1:8K4tyRfvU1CYPgJsveYFQMhpFd/wXNM7iK6rR7UHz84= github.com/gofrs/uuid v3.3.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM= github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4= github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= +github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= github.com/golang-sql/civil v0.0.0-20190719163853-cb61b32ac6fe/go.mod h1:8vg3r2VgvsThLBIFL93Qb5yWzgyZWhEmBwUJWevAkK0= +github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE= github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= @@ -269,8 +301,10 @@ github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= github.com/golang/protobuf v1.5.1/go.mod h1:DopwsBzvsk0Fs44TXzsVbJyPhcCPeIwnvohx4u74HPM= +github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw= github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/golang/snappy v0.0.3 h1:fHPg5GQYlCeLIPB9BZqMVR5nR9A+IM5zcgeTdjMYmLA= github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= @@ -284,7 +318,9 @@ github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/ github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.5 h1:Khx7svrCpmxxtHBq5j2mp/xVjsi8hQMfNLvJFAlrGgU= github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-querystring v1.0.0 h1:Xkwi/a1rcvNg1PPYe5vI8GbeBY/jrVuDX5ASuANWTrk= github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck= github.com/google/go-replayers/grpcreplay v1.0.0/go.mod h1:8Ig2Idjpr6gifRd6pNVggX6TC1Zw6Jx74AKp7QNH2QE= github.com/google/go-replayers/httpreplay v0.1.2/go.mod h1:YKZViNhiGgqdBlUbI2MwGpq4pXxNmhJLPHQ7cv2b5no= @@ -306,14 +342,20 @@ github.com/google/pprof v0.0.0-20201218002935-b9804c9f04c2/go.mod h1:kpwsk12EmLe github.com/google/pprof v0.0.0-20210122040257-d980be63207e/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20210226084205-cbba55b83ad5/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= +github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510/go.mod h1:pupxD2MaaD3pAXIBCelhxNneeOaAeabZDe5s4K6zSpQ= github.com/google/subcommands v1.0.1/go.mod h1:ZjhPrFU+Olkh9WazFPsl27BQ4UPiG37m3yTrtFlrHVk= github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.2.0 h1:qJYtXnJRWmpe7m/3XlyhrsLrEURqHRM2kxzoxXqyUDs= github.com/google/uuid v1.2.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/wire v0.5.0 h1:I7ELFeVBr3yfPIcc8+MWvrjk+3VjbcSzoXm3JVa+jD8= github.com/google/wire v0.5.0/go.mod h1:ngWDr9Qvq3yZA10YrxfyGELY/AFWGVpy9c1LTRi1EoU= github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= +github.com/googleapis/gax-go/v2 v2.0.5 h1:sjZBwGj9Jlw33ImPtvFviGYvseOtDM7hkSKB7+Tv3SM= github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= +github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1 h1:EGx4pi6eqNxGaHF6qqu48+N2wcFQ5qg5FXgOdqsJ5d8= github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= +github.com/gorilla/mux v1.7.4 h1:VuZ8uybHlWmqV03+zRzdwKL4tUnIp1MAQtp1mIFE1bc= github.com/gorilla/mux v1.7.4/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So= github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= github.com/gorilla/websocket v1.4.1/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= @@ -330,6 +372,7 @@ github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= +github.com/hashicorp/go-cleanhttp v0.5.2 h1:035FKYIWjmULyFRBKPs8TBQoi0x6d9G4xc9neXJWAZQ= github.com/hashicorp/go-cleanhttp v0.5.2/go.mod h1:kO/YDlP8L1346E6Sodw+PrpBSV4/SoxCXGY6BqNFT48= github.com/hashicorp/go-hclog v0.0.0-20180709165350-ff2cf002a8dd/go.mod h1:9bjs9uLqI8l75knNv3lV1kA55veR+WUPSiKIWcQHudI= github.com/hashicorp/go-hclog v0.9.2/go.mod h1:5CU+agLiy3J7N7QjHK5d05KxGsuXiQLrjA0H7acj2lQ= @@ -347,11 +390,14 @@ github.com/hashicorp/go-plugin v1.0.1/go.mod h1:++UyYGoz3o5w9ZzAdZxtQKrWWP+iqPBn github.com/hashicorp/go-retryablehttp v0.5.3/go.mod h1:9B5zBasrRhHXnJnui7y6sL7es7NDiJgTc6Er0maI1Xs= github.com/hashicorp/go-retryablehttp v0.6.2/go.mod h1:gEx6HMUGxYYhJScX7W1Il64m6cc2C1mDaW3NQ9sY1FY= github.com/hashicorp/go-retryablehttp v0.6.6/go.mod h1:vAew36LZh98gCBJNLH42IQ1ER/9wtLZZ8meHqQvEYWY= +github.com/hashicorp/go-retryablehttp v0.7.0 h1:eu1EI/mbirUgP5C8hVsTNaGZreBDlYiwC1FZWkvQPQ4= github.com/hashicorp/go-retryablehttp v0.7.0/go.mod h1:vAew36LZh98gCBJNLH42IQ1ER/9wtLZZ8meHqQvEYWY= github.com/hashicorp/go-rootcerts v1.0.0/go.mod h1:K6zTfqpRlCUIjkwsN4Z+hiSfzSTQa6eBIzfwKfwNnHU= github.com/hashicorp/go-rootcerts v1.0.1/go.mod h1:pqUvnprVnM5bf7AOirdbb01K4ccR319Vf4pU3K5EGc8= +github.com/hashicorp/go-rootcerts v1.0.2 h1:jzhAVGtqPKbwpyCPELlgNWhE1znq+qwJtW5Oi2viEzc= github.com/hashicorp/go-rootcerts v1.0.2/go.mod h1:pqUvnprVnM5bf7AOirdbb01K4ccR319Vf4pU3K5EGc8= github.com/hashicorp/go-sockaddr v1.0.0/go.mod h1:7Xibr9yA9JjQq1JpNB2Vw7kxv8xerXegt+ozgdvDeDU= +github.com/hashicorp/go-sockaddr v1.0.2 h1:ztczhD1jLxIRjVejw8gFomI1BQZOe2WoVOu0SyteCQc= github.com/hashicorp/go-sockaddr v1.0.2/go.mod h1:rB4wwRAUzs07qva3c5SdrY/NEtAUjGlgmH/UkBUC97A= github.com/hashicorp/go-syslog v1.0.0/go.mod h1:qPfqrKkXGihmCqbJM2mZgkZGvKG1dFdvsLplgctolz4= github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= @@ -363,6 +409,7 @@ github.com/hashicorp/go.net v0.0.1/go.mod h1:hjKkEWcCURg++eb33jQU7oqQcI9XDCnUzHA github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.3/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= +github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4= github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= github.com/hashicorp/hcl/v2 v2.3.0 h1:iRly8YaMwTBAKhn1Ybk7VSdzbnopghktCD031P8ggUE= github.com/hashicorp/hcl/v2 v2.3.0/go.mod h1:d+FwDBbOLvpAM3Z6J7gPj/VoAGkNe/gm352ZhjJ/Zv8= @@ -371,34 +418,45 @@ github.com/hashicorp/mdns v1.0.0/go.mod h1:tL+uN++7HEJ6SQLQ2/p+z2pH24WQKWjBPkE0m github.com/hashicorp/memberlist v0.1.3/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2pPBoIllUwCN7I= github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc= github.com/hashicorp/vault/api v1.0.5-0.20200519221902-385fac77e20f/go.mod h1:euTFbi2YJgwcju3imEt919lhJKF68nN1cQPq3aA+kBE= +github.com/hashicorp/vault/api v1.1.0 h1:QcxC7FuqEl0sZaIjcXB/kNEeBa0DH5z57qbWBvZwLC4= github.com/hashicorp/vault/api v1.1.0/go.mod h1:R3Umvhlxi2TN7Ex2hzOowyeNb+SfbVWI973N+ctaFMk= github.com/hashicorp/vault/sdk v0.1.14-0.20200519221530-14615acda45f/go.mod h1:WX57W2PwkrOPQ6rVQk+dy5/htHIaB4aBM70EwKThu10= github.com/hashicorp/vault/sdk v0.1.14-0.20200519221838-e0cfd64bc267/go.mod h1:WX57W2PwkrOPQ6rVQk+dy5/htHIaB4aBM70EwKThu10= +github.com/hashicorp/vault/sdk v0.2.0 h1:hvVswvMA9LvXwLBFDJLIoDBXi8hj90Q+gSS7vRYmLvQ= github.com/hashicorp/vault/sdk v0.2.0/go.mod h1:cAGI4nVnEfAyMeqt9oB+Mase8DNn3qA/LDNHURiwssY= github.com/hashicorp/yamux v0.0.0-20180604194846-3520598351bb/go.mod h1:+NfK9FKeTrX5uv1uIXGdwYDTeHna2qgaIlx54MXqjAM= github.com/hinshun/vt10x v0.0.0-20180616224451-1954e6464174/go.mod h1:DqJ97dSdRW1W22yXSB90986pcOyQ7r45iio1KN2ez1A= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= +github.com/ijc/Gotty v0.0.0-20170406111628-a8b993ba6abd h1:anPrsicrIi2ColgWTVPk+TrN42hJIWlfPHSBP9S0ZkM= github.com/ijc/Gotty v0.0.0-20170406111628-a8b993ba6abd/go.mod h1:3LVOLeyx9XVvwPgrt2be44XgSqndprz1G18rSk8KD84= +github.com/inconshreveable/mousetrap v1.0.0 h1:Z8tu5sraLXCXIcARxBp/8cbvlwVa7Z1NHg9XEKhtSvM= github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= +github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99 h1:BQSFePA1RWJOlocH6Fxy8MmwDt+yVQYULKfN0RoTN8A= github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99/go.mod h1:1lJo3i6rXxKeerYnT8Nvf0QmHCRC1n8sfWVwXF2Frvo= github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= github.com/jmespath/go-jmespath v0.0.0-20160202185014-0b12d6b521d8/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= github.com/jmespath/go-jmespath v0.3.0/go.mod h1:9QtRXoHjLGCJ5IBSaohpXITPlowMeeYCZ7fLUTSywik= +github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg= github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo= github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U= github.com/joho/godotenv v1.3.0/go.mod h1:7hK45KPybAkOC6peb+G5yklZfMxEjkZhHbwpqxOKXbg= github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/json-iterator/go v1.1.11 h1:uVUAXhF2To8cbw/3xN3pxj6kk7TYKs98NIrTqPlMWAQ= github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= +github.com/jstemmer/go-junit-report v0.9.1 h1:6QPYqodiu3GuPL+7mfx+NwDdp2eTkp9IfEUpgAwUN0o= github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= +github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7C0MuV77Wo= github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= +github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51 h1:Z9n2FFNUXsshfwJMBgNA0RU6/i7WVaAegv3PtuIHPMs= github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51/go.mod h1:CzGEWj7cYgsdH8dAjBGEr58BoE7ScuLd+fwFZ44+/x8= +github.com/kevinburke/ssh_config v0.0.0-20190725054713-01f96b0aa0cd h1:Coekwdh0v2wtGp9Gmz1Ze3eVRAWJMLokvN3QjdzCHLY= github.com/kevinburke/ssh_config v0.0.0-20190725054713-01f96b0aa0cd/go.mod h1:CT57kijsi8u/K/BOFA39wgDQJ9CxiF4nAY/ojJ6r6mM= github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q= github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00= @@ -406,16 +464,19 @@ github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/klauspost/compress v1.10.3/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= github.com/klauspost/compress v1.12.2/go.mod h1:8dP1Hq4DHOhN9w426knH3Rhby4rFm6D8eO+e+Dq5Gzg= +github.com/konsorten/go-windows-terminal-sequences v1.0.1 h1:mweAR1A6xJ3oS2pRaGiHgQ4OO8tzTaLawm8vnODuwDk= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg= github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= +github.com/kr/pretty v0.2.1 h1:Fmg33tUaq4/8ym9TJN1x7sLJnHVwhP33CNkpYV/7rwI= github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/pty v1.1.4/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/pty v1.1.8/go.mod h1:O1sed60cT9XZ5uDucP5qwvh+TE3NnUj51EiZO/lmSfw= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= github.com/kylelemons/godebug v0.0.0-20170820004349-d65d576e9348 h1:MtvEpTB6LX3vkb4ax0b5D2DHbNAUsen0Gx5wZoq3lV4= github.com/kylelemons/godebug v0.0.0-20170820004349-d65d576e9348/go.mod h1:B69LEHPfb2qLo0BaaOLcbitczOKLWTsrBG9LczfCD4k= @@ -423,32 +484,39 @@ github.com/leodido/go-urn v1.2.0/go.mod h1:+8+nEpDfqqsY+g338gtMEUOtuK+4dEMhiQEgx github.com/lib/pq v1.10.0/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= github.com/lib/pq v1.10.1/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= +github.com/magiconair/properties v1.8.5 h1:b6kJs+EmPFMYGkow9GiUyCyOvIwYetYJ3fSaWak/Gls= github.com/magiconair/properties v1.8.5/go.mod h1:y3VJvCyxH9uVvJTWEGAELF3aiYNyPKd5NZ3oSwXrF60= github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= github.com/mattn/go-colorable v0.1.2/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= github.com/mattn/go-colorable v0.1.6 h1:6Su7aK7lXmJ/U79bYtBjLNaha4Fs1Rg9plHpcH+vvnE= github.com/mattn/go-colorable v0.1.6/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= +github.com/mattn/go-ieproxy v0.0.1 h1:qiyop7gCflfhwCzGyeT0gro3sF9AIg9HU98JORTkqfI= github.com/mattn/go-ieproxy v0.0.1/go.mod h1:pYabZ6IHcRpFh7vIaLfK7rdcWgFEb3SFJ6/gNWuh88E= github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= github.com/mattn/go-isatty v0.0.10/go.mod h1:qgIWMr58cqv1PHHyhnkY9lrL7etaEgOFcMEpPG5Rm84= github.com/mattn/go-isatty v0.0.11/go.mod h1:PhnuNfih5lzO57/f3n+odYbM4JtupLOxQOAqxQCu2WE= +github.com/mattn/go-isatty v0.0.12 h1:wuysRhFDzyxgEmMf5xjvJ2M9dZoWAXNNr5LSBS7uHXY= github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= github.com/mattn/go-runewidth v0.0.8 h1:3tS41NlGYSmhhe/8fhGRzc+z3AYCw1Fe1WAyLuujKs0= github.com/mattn/go-runewidth v0.0.8/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= +github.com/mgutz/ansi v0.0.0-20170206155736-9520e82c474b h1:j7+1HpAFS1zy5+Q4qx1fWh90gTKwiN4QCGoY9TWyyO4= github.com/mgutz/ansi v0.0.0-20170206155736-9520e82c474b/go.mod h1:01TrycV0kFyexm33Z7vhZRXopbI8J3TDReVlkTgMUxE= github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc= +github.com/mitchellh/copystructure v1.0.0 h1:Laisrj+bAB6b/yJwB5Bt3ITZhGJdqmxquMKeZ+mmkFQ= github.com/mitchellh/copystructure v1.0.0/go.mod h1:SNtv71yrdKgLRyLFxmLdkAbkKEFWgYaq1OVrnRcwhnw= github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= +github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= github.com/mitchellh/go-ps v1.0.0 h1:i6ampVEEF4wQFF+bkYfwYgY+F/uYJDktmvLPf7qIgjc= github.com/mitchellh/go-ps v1.0.0/go.mod h1:J4lOc8z8yJs6vUwklHw2XEIiT4z4C40KtWVN3nvg8Pg= github.com/mitchellh/go-testing-interface v0.0.0-20171004221916-a61a99592b77/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= github.com/mitchellh/go-wordwrap v0.0.0-20150314170334-ad45545899c7/go.mod h1:ZXFpozHsX6DPmq2I0TCekCxypsnAUbP2oI0UX1GXzOo= +github.com/mitchellh/go-wordwrap v1.0.0 h1:6GlHJ/LTGMrIJbwgdqdl2eEH8o+Exx/0m8ir9Gns0u4= github.com/mitchellh/go-wordwrap v1.0.0/go.mod h1:ZXFpozHsX6DPmq2I0TCekCxypsnAUbP2oI0UX1GXzOo= github.com/mitchellh/gox v0.4.0/go.mod h1:Sd9lOJ0+aimLBi73mGofS1ycjY8lL3uZM3JPS42BGNg= github.com/mitchellh/iochan v1.0.0/go.mod h1:JwYml1nuB7xOzsp52dPpHFffvOCDupsG0QubkSMEySY= @@ -456,16 +524,21 @@ github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:F github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= github.com/mitchellh/mapstructure v1.3.2/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/mitchellh/mapstructure v1.3.3/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= +github.com/mitchellh/mapstructure v1.4.1 h1:CpVNEelQCZBooIPDn+AR3NpivK/TIKU8bDxdASFVQag= github.com/mitchellh/mapstructure v1.4.1/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= +github.com/mitchellh/reflectwalk v1.0.0 h1:9D+8oIskB4VJBN5SFlmc27fSlIBZaov1Wpk/IfikLNY= github.com/mitchellh/reflectwalk v1.0.0/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/modern-go/reflect2 v1.0.1 h1:9f412s+6RmYXLWZSEzVVgPGK7C2PphHj5RJrvfx9AWI= github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= github.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc= github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/mxschmitt/golang-combinations v1.0.0/go.mod h1:RbMhWvfCelHR6WROvT2bVfxJvZHoEvBj71SKe+H0MYU= github.com/nbutton23/zxcvbn-go v0.0.0-20180912185939-ae427f1e4c1d/go.mod h1:o96djdrsSGy3AWPyBgZMAGfxZNfgntdJG+11KU4QvbU= +github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e h1:fD57ERR4JtEqsWbfPhv4DMiApHyliiK5xCTNVSPiaAs= github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU= github.com/oklog/run v1.0.0/go.mod h1:dlhp/R75TPv97u0XWUtDeV/lRKWPKSdTuV0TZvrmrQA= @@ -488,17 +561,22 @@ github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FI github.com/pascaldekloe/goe v0.1.0/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= github.com/pelletier/go-buffruneio v0.2.0/go.mod h1:JkE26KsDizTr40EUHkXVtNPvgGtbSNq5BcowyYOWdKo= github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= +github.com/pelletier/go-toml v1.9.3 h1:zeC5b1GviRUyKYd6OJPvBU/mcVDVoL1OhT17FCt5dSQ= github.com/pelletier/go-toml v1.9.3/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c= github.com/pgavlin/goldmark v1.1.33-0.20200616210433-b5eb04559386 h1:LoCV5cscNVWyK5ChN/uCoIFJz8jZD63VQiGJIRgr6uo= github.com/pgavlin/goldmark v1.1.33-0.20200616210433-b5eb04559386/go.mod h1:MRxHTJrf9FhdfNQ8Hdeh9gmHevC9RJE/fu8M3JIGjoE= github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= github.com/pierrec/lz4 v2.5.2+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= +github.com/pierrec/lz4 v2.6.0+incompatible h1:Ix9yFKn1nSPBLFl/yZknTp8TU5G4Ps0JDmguYK6iH1A= github.com/pierrec/lz4 v2.6.0+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= +github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA= github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.8.1-0.20171018195549-f15c970de5b7/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/sftp v1.10.1/go.mod h1:lYOWFsE0bwd1+KfKJaKeuokY15vzFx25BLbzYYoAxZI= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI= github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= @@ -522,52 +600,72 @@ github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7z github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A= github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU= -github.com/pulumi/kube2pulumi v0.0.10 h1:p5OMO5GnzcwVXWwrFoPC9aeaN11SPIzjiwik1nmhDBU= -github.com/pulumi/kube2pulumi v0.0.10/go.mod h1:LraV28sSviILoyS2S+aHLUjecfGQ2CHiXQGiNYqpz6s= -github.com/pulumi/pulumi/pkg/v3 v3.4.0 h1:kFUq0uGumdNhOPymRUL+z9OmT7DyCWYc+Er7gLPzCUg= -github.com/pulumi/pulumi/pkg/v3 v3.4.0/go.mod h1:aAGoadWl60wVSE8Ig2FqcxUdfrmMKV6xfErcTOToIV4= -github.com/pulumi/pulumi/sdk/v3 v3.3.1/go.mod h1:GBHyQ7awNQSRmiKp/p8kIKrGrMOZeA/k2czoM/GOqds= -github.com/pulumi/pulumi/sdk/v3 v3.4.0 h1:9FTfaLXQ4B0w8rcCdtW10rbfHk6Uo5BHLnd5WK5yBh8= -github.com/pulumi/pulumi/sdk/v3 v3.4.0/go.mod h1:GBHyQ7awNQSRmiKp/p8kIKrGrMOZeA/k2czoM/GOqds= +github.com/pulumi/kube2pulumi v0.0.11 h1:Q51wPoYlnzK/9m/JZ+NcNM/eHsYCT0UvQMf3E8pqEEo= +github.com/pulumi/kube2pulumi v0.0.11/go.mod h1:Ov5s8e4a7NmR5F1BskKDloYO3YHnFJ13OAbi3qso4U8= +github.com/pulumi/pulumi/pkg/v3 v3.24.1 h1:5Jqu/eR4P8O0ST7ZA9VqxX7Vq23g5y3dp2K0Egiy8w0= +github.com/pulumi/pulumi/pkg/v3 v3.24.1/go.mod h1:3MYj+8Rmv0mWYWXEQ6z8TeoRxS7vnqow9o/c/tp6IHI= +github.com/pulumi/pulumi/sdk/v3 v3.24.0/go.mod h1:WHOQB00iuHZyXhwrymxpKXhpOahSguJIpRjVokmM11w= +github.com/pulumi/pulumi/sdk/v3 v3.24.1 h1:Ywaih9y/zBfS8s4w6BtcsBF/rHQpGbbZ82oXwEVUfVI= +github.com/pulumi/pulumi/sdk/v3 v3.24.1/go.mod h1:WHOQB00iuHZyXhwrymxpKXhpOahSguJIpRjVokmM11w= +github.com/rivo/uniseg v0.2.0 h1:S1pD9weZBuJdFmowNwbpi7BJ8TNftyUImj/0WQi72jY= +github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc= +github.com/rjeczalik/notify v0.9.2 h1:MiTWrPj55mNDHEiIX5YUSKefw/+lCQVoAFmD6oQm5w8= github.com/rjeczalik/notify v0.9.2/go.mod h1:aErll2f0sUX9PXZnVNyeiObbmTlk5jnMoCa4QEjJeqM= github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= +github.com/rogpeppe/go-internal v1.8.1 h1:geMPLpDpQOgVyCg5z5GoRwLHepNdb71NXb67XFkP+Eg= +github.com/rogpeppe/go-internal v1.8.1/go.mod h1:JeRgkft04UBgHMgCIwADu4Pn6Mtm5d4nPKWu0nJ5d+o= github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= github.com/ryanuber/columnize v2.1.0+incompatible/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= +github.com/ryanuber/go-glob v1.0.0 h1:iQh3xXAumdQ+4Ufa5b25cRpC5TYKlno6hsv6Cb3pkBk= github.com/ryanuber/go-glob v1.0.0/go.mod h1:807d1WSdnB0XRJzKNil9Om6lcp/3a0v4qIHxIXzX/Yc= github.com/sabhiram/go-gitignore v0.0.0-20180611051255-d3107576ba94 h1:G04eS0JkAIVZfaJLjla9dNxkJCPiKIGZlw9AfOhzOD0= github.com/sabhiram/go-gitignore v0.0.0-20180611051255-d3107576ba94/go.mod h1:b18R55ulyQ/h3RaWyloPyER7fWQVZvimKKhnI5OfrJQ= +github.com/santhosh-tekuri/jsonschema/v5 v5.0.0 h1:TToq11gyfNlrMFZiYujSekIsPd9AmsA2Bj/iv+s4JHE= +github.com/santhosh-tekuri/jsonschema/v5 v5.0.0/go.mod h1:FKdcjfQW6rpZSnxxUvEA5H/cDPdvJ/SZJQLWWXWGrZ0= github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo= +github.com/sergi/go-diff v1.1.0 h1:we8PVUC3FE2uYfodKH/nBHMSetSfHDR6scGdBi+erh0= github.com/sergi/go-diff v1.1.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM= +github.com/shirou/gopsutil v3.21.7+incompatible/go.mod h1:5b4v6he4MtMOwMlS0TUMTu2PcXUg8+E1lC7eC3UO/RA= github.com/shurcooL/httpfs v0.0.0-20190707220628-8d4bc4ba7749/go.mod h1:ZY1cvUeJuFPAdZ/B6v7RHavJWZn2YPVFQ1OSXhCGOkg= github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= github.com/shurcooL/vfsgen v0.0.0-20200824052919-0d455de96546/go.mod h1:TrYk7fJVaAttu97ZZKrO9UbRa8izdowaMIZcxYMbVaw= github.com/sirupsen/logrus v1.0.4-0.20170822132746-89742aefa4b2/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q= +github.com/sirupsen/logrus v1.4.2 h1:SPIRibHv4MatM3XXNO2BJeFLZwZ2LvZgfQ5+UNI2im4= github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= github.com/skratchdot/open-golang v0.0.0-20200116055534-eef842397966/go.mod h1:sUM3LWHvSMaG192sy56D9F7CNvL7jUJVXoqM1QKLnog= +github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d h1:zE9ykElWQ6/NYmHa3jpm/yHnI4xSofP+UP6SpjHcSeM= github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= +github.com/smartystreets/goconvey v1.6.4 h1:fv0U8FUIMPNf1L9lnHLvLhgicrIVChEkdzIKYqbNC9s= github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= +github.com/spf13/afero v1.6.0 h1:xoax2sJ2DT8S8xA2paPFjDCScCNeWsg75VG0DLRreiY= github.com/spf13/afero v1.6.0/go.mod h1:Ai8FlHk4v/PARR026UzYexafAt9roJ7LcLMAmO6Z93I= github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= +github.com/spf13/cast v1.3.1 h1:nFm6S0SMdyzrzcmThSipiEubIDy8WEXKNZ0UOgiRpng= github.com/spf13/cast v1.3.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= github.com/spf13/cobra v0.0.2-0.20171109065643-2da4a54c5cee/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= +github.com/spf13/cobra v1.0.0 h1:6m/oheQuQ13N9ks4hubMG6BnvwOeaJrqSPLahSnczz8= github.com/spf13/cobra v1.0.0/go.mod h1:/6GTrnGXV9HjY+aR4k0oJ5tcvakLuG6EuKReYlHNrgE= github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= +github.com/spf13/jwalterweatherman v1.1.0 h1:ue6voC5bR5F8YxI5S67j9i582FU4Qvo2bmqnqMYADFk= github.com/spf13/jwalterweatherman v1.1.0/go.mod h1:aNWZUN0dPAAO/Ljvb5BEdw96iTZ0EXowPYD95IqWIGo= github.com/spf13/pflag v1.0.1-0.20171106142849-4c012f6dcd95/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.2/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= +github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/spf13/viper v1.4.0/go.mod h1:PTJ7Z/lr49W6bUbkmS1V3by4uWynFiR9p7+dSq/yZzE= +github.com/spf13/viper v1.8.1 h1:Kq1fyeebqsBfbjZj4EL7gj2IO0mMaiyjYUWcUsl2O44= github.com/spf13/viper v1.8.1/go.mod h1:o0Pch8wJ9BVSWGQMbra6iw0oQ5oktSIBaujf1rJH9Ns= +github.com/src-d/gcfg v1.4.0 h1:xXbNR5AlLSA315x2UO+fTSSAXCDf+Ar38/6oyGbDKQ4= github.com/src-d/gcfg v1.4.0/go.mod h1:p/UMsR43ujA89BJY9duynAwIpvqEujIH/jFlfL7jWoI= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= @@ -578,10 +676,14 @@ github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UV github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/subosito/gotenv v1.2.0 h1:Slr1R9HxAlEKefgq5jn9U+DnETlIUa6HfgEzj0g5d7s= github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw= github.com/texttheater/golang-levenshtein v0.0.0-20191208221605-eb6844b05fc6 h1:9VTskZOIRf2vKF3UL8TuWElry5pgUpV1tFSe/e/0m/E= github.com/texttheater/golang-levenshtein v0.0.0-20191208221605-eb6844b05fc6/go.mod h1:XDKHRm5ThF8YJjx001LtgelzsoaEcvnA7lVWz9EeX3g= +github.com/tklauser/go-sysconf v0.3.8/go.mod h1:z4zYWRS+X53WUKtBcmDg1comV3fPhdQnzasnIHUoLDU= +github.com/tklauser/numcpus v0.2.3/go.mod h1:vpEPS/JC+oZGGQ/My/vJnNsvMDQL6PwOqt8dsCw5j+E= github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926/go.mod h1:9ESjWnEqriFuLhtthL60Sar/7RFoluCcXsuvEwTV5KM= github.com/tweekmonster/luser v0.0.0-20161003172636-3fa38070dbd7 h1:X9dsIWPuuEJlPX//UmRKophhOKCGXc46RVIGuttks68= @@ -595,9 +697,13 @@ github.com/ugorji/go v1.1.7/go.mod h1:kZn38zHttfInRq0xu/PH0az30d+z6vm202qpg1oXVM github.com/ugorji/go/codec v1.1.7/go.mod h1:Ax+UKWsSmolVDwsd+7N3ZtXu+yMGCf907BLYF3GoBXY= github.com/urfave/cli v0.0.0-20171014202726-7bc6a0acffa5/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA= github.com/vmihailenco/msgpack v3.3.3+incompatible/go.mod h1:fy3FlTQTDXWkZ7Bh6AcGMlsjHatGryHQYUTf1ShIgkk= +github.com/xanzy/ssh-agent v0.2.1 h1:TCbipTQL2JiiCprBWx9frJ2eJlCYT00NmctrHxVAr70= github.com/xanzy/ssh-agent v0.2.1/go.mod h1:mLlQY/MoOhWBj+gOGMQkOeiEvkx+8pJSI+0Bx9h2kr4= +github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f h1:J9EGpcZtP0E/raorCMxlFGSTBrsSlaDGf3jU/qvAE2c= github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU= +github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 h1:EzJWgHovont7NscjpAxXsDA8S8BMYve8Y5+7cuRE7R0= github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415/go.mod h1:GwrjFmJcFw6At/Gs6z4yjiIwzuJ1/+UwLxMQDVQXShQ= +github.com/xeipuuv/gojsonschema v1.2.0 h1:LhYJRs+L4fBtjZUfuSZIKGeVu0QRy8e5Xi7D17UxZ74= github.com/xeipuuv/gojsonschema v1.2.0/go.mod h1:anYRn/JVcOK2ZgGU+IjEV4nwlhoK5sQluxsYJ78Id3Y= github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q= @@ -620,6 +726,7 @@ go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk= +go.opencensus.io v0.23.0 h1:gqCw0LfLxScz8irSi8exQc7fyQ0fKQU/qnC/X8+V/1M= go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/atomic v1.6.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= @@ -632,7 +739,9 @@ go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee/go.mod h1:vJERXedbb3MVM5f9E go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= go.uber.org/zap v1.16.0/go.mod h1:MA8QOfq0BHJwdXa996Y4dYkAqRKB8/1K1QMMZVaNZjQ= go.uber.org/zap v1.17.0/go.mod h1:MXVU+bhUf/A7Xi2HNOnopQOrmycQ5Ih87HtOu4q5SSo= +gocloud.dev v0.23.0 h1:u/6F8slWwaZPgGpjpNp0jzH+1P/M2ri7qEP3lFgbqBE= gocloud.dev v0.23.0/go.mod h1:zklCCIIo1N9ELkU2S2E7tW8P8eeMU7oGLeQCXdDwx9Q= +gocloud.dev/secrets/hashivault v0.23.0 h1:u9/KMKY44dy1iEpawBreRZWrQGxS8HdrqiyuBP6v7i4= gocloud.dev/secrets/hashivault v0.23.0/go.mod h1:JkedtcYw0IqNMru0glghf+dkoszG0WFjal3PCpucxBs= golang.org/x/crypto v0.0.0-20171113213409-9f005a07e0d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= @@ -679,6 +788,7 @@ golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRu golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= golang.org/x/lint v0.0.0-20201208152925-83fdc39ff7b5/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/lint v0.0.0-20210508222113-6edffad5e616 h1:VLliZ0d+/avPrXXH+OakdXhpJuEoBZuwh1m2j7U6Iug= golang.org/x/lint v0.0.0-20210508222113-6edffad5e616/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= @@ -690,6 +800,7 @@ golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.4.2 h1:Gz96sIWK3OalVv/I/qNygP42zyoKp3xptRVCWRFEBvo= golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180811021610-c39426892332/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -742,6 +853,7 @@ golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96b golang.org/x/net v0.0.0-20210420210106-798c2154c571/go.mod h1:72T/g9IO56b78aLF+1Kcs5dz7/ng1VjMUvfKvpfy+jM= golang.org/x/net v0.0.0-20210503060351-7fd8e65b6420/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20210505214959-0714010a04ed/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20210520170846-37e1c6afe023 h1:ADo5wSpq2gqaCGQWzk7S5vd//0iyyLeAratkEoG5dLE= golang.org/x/net v0.0.0-20210520170846-37e1c6afe023/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -756,6 +868,7 @@ golang.org/x/oauth2 v0.0.0-20210220000619-9bb904979d93/go.mod h1:KelEdhl1UZF7XfJ golang.org/x/oauth2 v0.0.0-20210313182246-cd4f82c27b84/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20210402161424-2e8d93401602/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20210413134643-5e61552d6c78/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210427180440-81ed05c6b58c h1:SgVl/sCtkicsS7psKkje4H9YtjdEl3xsYh7N+5TDHqY= golang.org/x/oauth2 v0.0.0-20210427180440-81ed05c6b58c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -767,6 +880,7 @@ golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20210220032951-036812b2e83c h1:5KslGYwFpkhGh+Q16bwMP3cOontH8FOep7tGV86Y7SQ= golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -837,8 +951,12 @@ golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20210503080704-8803ae5d1324/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210503173754-0981d6026fa6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210816074244-15123e1e1f71/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210817190340-bfb29a6856f2 h1:c8PlLMqBbOHoqtjteWm5/kbe6rNY2pbRfbIMVnepueo= +golang.org/x/sys v0.0.0-20210817190340-bfb29a6856f2/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= +golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1 h1:v+OssWQX+hTHEmOBgwxdZxK4zHq3yOs8F9J7mk0PY8E= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -847,11 +965,13 @@ golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.6 h1:aRYxNxv6iGQlyVaZmk6ZgYEDa+Jg18DxebPSrd6bg1M= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20200416051211-89c76fbcd5d1/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20210220033141-f8bda1e9f3ba h1:O8mE0/t419eoIwhTFpKVkHiTs/Igowgfkj25AcZrtiE= golang.org/x/time v0.0.0-20210220033141-f8bda1e9f3ba/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= @@ -913,10 +1033,12 @@ golang.org/x/tools v0.0.0-20210105154028-b0ab187a4818/go.mod h1:emZCQorbCU4vsT4f golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20210108195828-e2f9c7f1fc8e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0= +golang.org/x/tools v0.1.2 h1:kRBLX7v7Af8W7Gdbbc908OJcdgtK8bOz9Uaj8/F1ACA= golang.org/x/tools v0.1.2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= google.golang.org/api v0.5.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= @@ -943,6 +1065,7 @@ google.golang.org/api v0.41.0/go.mod h1:RkxM5lITDfTzmyKFPt+wGrCJbVfniCr2ool8kTBz google.golang.org/api v0.43.0/go.mod h1:nQsDGjRXMo4lvh5hP0TKqF244gqhGcr/YSIykhUk/94= google.golang.org/api v0.44.0/go.mod h1:EBOGZqzyhtvMDoxwS97ctnh0zUmYY6CxqXsc1AvkYD8= google.golang.org/api v0.45.0/go.mod h1:ISLIJCedJolbZvDfAk+Ctuq5hf+aJ33WgtUsfyFoLXA= +google.golang.org/api v0.46.0 h1:jkDWHOBIoNSD0OQpq4rtBVu+Rh325MPjXG1rakAp8JU= google.golang.org/api v0.46.0/go.mod h1:ceL4oozhkAiTID8XMmJBsIxID/9wMXJVVFXPg4ylg3I= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= @@ -951,6 +1074,7 @@ google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww google.golang.org/appengine v1.6.2/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/appengine v1.6.7 h1:FZR1q0exgwxzPzp/aF+VccGrSfxfPpkBqjIIEq3ru6c= google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= @@ -1003,6 +1127,7 @@ google.golang.org/genproto v0.0.0-20210420162539-3c870d7478d2/go.mod h1:P3QM42oQ google.golang.org/genproto v0.0.0-20210423144448-3a41ef94ed2b/go.mod h1:P3QM42oQyzQSnHPnZ/vqoCdDmzH28fzWByN9asMeM8A= google.golang.org/genproto v0.0.0-20210429181445-86c259c2b4ab/go.mod h1:P3QM42oQyzQSnHPnZ/vqoCdDmzH28fzWByN9asMeM8A= google.golang.org/genproto v0.0.0-20210506142907-4a47615972c2/go.mod h1:P3QM42oQyzQSnHPnZ/vqoCdDmzH28fzWByN9asMeM8A= +google.golang.org/genproto v0.0.0-20210602131652-f16073e35f0c h1:wtujag7C+4D6KMoulW9YauvK2lgdvCMS260jsqqBXr0= google.golang.org/genproto v0.0.0-20210602131652-f16073e35f0c/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= google.golang.org/grpc v1.14.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= @@ -1028,6 +1153,7 @@ google.golang.org/grpc v1.35.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAG google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= google.golang.org/grpc v1.36.1/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= google.golang.org/grpc v1.37.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= +google.golang.org/grpc v1.38.0 h1:/9BgsAsa5nWe26HqOlvlgJnqBuktYOLCgjCPqsa56W0= google.golang.org/grpc v1.38.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= @@ -1040,13 +1166,16 @@ google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpAD google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4= google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= +google.golang.org/protobuf v1.26.0 h1:bxAC2xTBsZGibn2RTntX0oH50xLsqy1OxA9tTL3p/lk= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= +gopkg.in/AlecAivazis/survey.v1 v1.8.9-0.20200217094205-6773bdf39b7f h1:AQkMzsSzHWrgZWqGRpuRaRPDmyNibcXlpGcnQJ7HxZw= gopkg.in/AlecAivazis/survey.v1 v1.8.9-0.20200217094205-6773bdf39b7f/go.mod h1:CaHjv79TCgAvXMSFJSVgonHXYWxnhzI3eoHtnX5UgUo= gopkg.in/airbrake/gobrake.v2 v2.0.9/go.mod h1:/h5ZAUhDkGaJfjzjKLSjv6zCL6O0LLBxU4K+aSYdM/U= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f h1:BLraFXnmrev5lT+xlilqcH8XK9/i0At2xKjWk4p6zsU= gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/cheggaaa/pb.v1 v1.0.28 h1:n1tBJnnK2r7g9OW2btFH91V92STTUevLXYFb8gy9EMk= gopkg.in/cheggaaa/pb.v1 v1.0.28/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw= @@ -1055,15 +1184,20 @@ gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMy gopkg.in/gemnasium/logrus-airbrake-hook.v2 v2.1.2/go.mod h1:Xk6kEKp8OKb+X14hQBKWaSkCsqBpgog8nAV2xsGOxlo= gopkg.in/go-playground/assert.v1 v1.2.1/go.mod h1:9RXL0bg/zibRAgZUYszZSwO/z8Y/a8bDuhia5mkpMnE= gopkg.in/go-playground/validator.v9 v9.30.0/go.mod h1:+c9/zcJMFNgbLvly1L1V+PpxWdVbfP1avr/N00E2vyQ= +gopkg.in/ini.v1 v1.62.0 h1:duBzk771uxoUuOlyRLkHsygud9+5lrlGjdFBb4mSKDU= gopkg.in/ini.v1 v1.62.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo= gopkg.in/square/go-jose.v2 v2.3.1/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= +gopkg.in/square/go-jose.v2 v2.5.1 h1:7odma5RETjNHWJnR32wx8t+Io4djHE1PqxCFx3iiZ2w= gopkg.in/square/go-jose.v2 v2.5.1/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= +gopkg.in/src-d/go-billy.v4 v4.3.2 h1:0SQA1pRztfTFx2miS8sA97XvooFeNOmvUenF4o0EcVg= gopkg.in/src-d/go-billy.v4 v4.3.2/go.mod h1:nDjArDMp+XMs1aFAESLRjfGSgfvoYN0hDfzEk0GjC98= gopkg.in/src-d/go-git-fixtures.v3 v3.5.0 h1:ivZFOIltbce2Mo8IjzUHAFoq/IylO9WHhNOAJK+LsJg= gopkg.in/src-d/go-git-fixtures.v3 v3.5.0/go.mod h1:dLBcvytrw/TYZsNTWCnkNF2DSIlzWYqTe3rJR56Ac7g= +gopkg.in/src-d/go-git.v4 v4.13.1 h1:SRtFyV8Kxc0UP7aCHcijOMQGPxHSmMOPrzulQWolkYE= gopkg.in/src-d/go-git.v4 v4.13.1/go.mod h1:nx5NYcxdKxq5fpltdHnPa2Exj4Sx0EclMWZQbYDu2z8= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= +gopkg.in/warnings.v0 v0.1.2 h1:wFXVbFY8DY5/xOe1ECiWdKCzZlxgshcYVNkBHstARME= gopkg.in/warnings.v0 v0.1.2/go.mod h1:jksf8JmL6Qr/oQM2OXTHunEvvTAsrWBLb6OOjuVWRNI= gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74= gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= @@ -1072,8 +1206,10 @@ gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b h1:h8qDotaEPuJATrMmW04NCwg7v22aHH28wwpauUhK9Oo= gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw= gotest.tools/v3 v3.0.2/go.mod h1:3SzNCllyD9/Y+b5r9JIKQ474KzkZyqLqEfYqMsX94Bk= @@ -1086,6 +1222,8 @@ honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9 honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= nhooyr.io/websocket v1.8.6/go.mod h1:B70DZP8IakI65RVQ51MsWP/8jndNma26DVA/nFSCgW0= nhooyr.io/websocket v1.8.7/go.mod h1:B70DZP8IakI65RVQ51MsWP/8jndNma26DVA/nFSCgW0= +pgregory.net/rapid v0.4.7 h1:MTNRktPuv5FNqOO151TM9mDTa+XHcX6ypYeISDVD14g= +pgregory.net/rapid v0.4.7/go.mod h1:UYpPVyjFHzYBGHIxLFoupi8vwk6rXNzRY9OMvVxFIOU= rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= diff --git a/pkg/kube2cdk8s/kube2cdk8s.go b/pkg/kube2cdk8s/kube2cdk8s.go index 83a08c5..096a173 100644 --- a/pkg/kube2cdk8s/kube2cdk8s.go +++ b/pkg/kube2cdk8s/kube2cdk8s.go @@ -2,7 +2,6 @@ package kube2cdk8s import ( "fmt" - "io/ioutil" "os" "path/filepath" "regexp" @@ -14,13 +13,12 @@ import ( ) func Kube2CDK8S(filePath string) (string, error) { - - path, _, err := kube2pulumi.Kube2PulumiFile(filePath, "typescript") + path, _, err := kube2pulumi.Kube2PulumiFile(filePath, "", "typescript") if err != nil { return "", err } - input, err := ioutil.ReadFile(path) + input, err := os.ReadFile(path) if err != nil { return "", err } @@ -71,10 +69,9 @@ func Kube2CDK8S(filePath string) (string, error) { } func Kube2CDK8SMultiple(filePath string) (string, error) { - var result string - input, err := ioutil.ReadFile(filePath) + input, err := os.ReadFile(filePath) if err != nil { return "", err } diff --git a/util/util.go b/util/util.go index 3de508e..355f915 100644 --- a/util/util.go +++ b/util/util.go @@ -1,12 +1,11 @@ package util import ( - "io/ioutil" "os" ) func CreateTempFile(text []byte) (*os.File, error) { - tmpFile, err := ioutil.TempFile(os.TempDir(), "prefix-") + tmpFile, err := os.CreateTemp(os.TempDir(), "prefix-") if err != nil { return nil, err } diff --git a/vendor/github.com/gofrs/flock/.gitignore b/vendor/github.com/gofrs/flock/.gitignore deleted file mode 100644 index daf913b..0000000 --- a/vendor/github.com/gofrs/flock/.gitignore +++ /dev/null @@ -1,24 +0,0 @@ -# Compiled Object files, Static and Dynamic libs (Shared Objects) -*.o -*.a -*.so - -# Folders -_obj -_test - -# Architecture specific extensions/prefixes -*.[568vq] -[568vq].out - -*.cgo1.go -*.cgo2.c -_cgo_defun.c -_cgo_gotypes.go -_cgo_export.* - -_testmain.go - -*.exe -*.test -*.prof diff --git a/vendor/github.com/gofrs/flock/.travis.yml b/vendor/github.com/gofrs/flock/.travis.yml deleted file mode 100644 index b791a74..0000000 --- a/vendor/github.com/gofrs/flock/.travis.yml +++ /dev/null @@ -1,10 +0,0 @@ -language: go -go: - - 1.10.x - - 1.11.x -script: go test -v -check.vv -race ./... -sudo: false -notifications: - email: - on_success: never - on_failure: always diff --git a/vendor/github.com/gofrs/flock/LICENSE b/vendor/github.com/gofrs/flock/LICENSE deleted file mode 100644 index aff7d35..0000000 --- a/vendor/github.com/gofrs/flock/LICENSE +++ /dev/null @@ -1,27 +0,0 @@ -Copyright (c) 2015, Tim Heckman -All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are met: - -* Redistributions of source code must retain the above copyright notice, this - list of conditions and the following disclaimer. - -* Redistributions in binary form must reproduce the above copyright notice, - this list of conditions and the following disclaimer in the documentation - and/or other materials provided with the distribution. - -* Neither the name of linode-netint nor the names of its - contributors may be used to endorse or promote products derived from - this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE -FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL -DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR -SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER -CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, -OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/gofrs/flock/README.md b/vendor/github.com/gofrs/flock/README.md deleted file mode 100644 index 7375e72..0000000 --- a/vendor/github.com/gofrs/flock/README.md +++ /dev/null @@ -1,41 +0,0 @@ -# flock -[![TravisCI Build Status](https://img.shields.io/travis/gofrs/flock/master.svg?style=flat)](https://travis-ci.org/gofrs/flock) -[![GoDoc](https://img.shields.io/badge/godoc-go--flock-blue.svg?style=flat)](https://godoc.org/github.com/gofrs/flock) -[![License](https://img.shields.io/badge/license-BSD_3--Clause-brightgreen.svg?style=flat)](https://github.com/gofrs/flock/blob/master/LICENSE) -[![Go Report Card](https://goreportcard.com/badge/github.com/gofrs/flock)](https://goreportcard.com/report/github.com/gofrs/flock) - -`flock` implements a thread-safe sync.Locker interface for file locking. It also -includes a non-blocking TryLock() function to allow locking without blocking execution. - -## License -`flock` is released under the BSD 3-Clause License. See the `LICENSE` file for more details. - -## Go Compatibility -This package makes use of the `context` package that was introduced in Go 1.7. As such, this -package has an implicit dependency on Go 1.7+. - -## Installation -``` -go get -u github.com/gofrs/flock -``` - -## Usage -```Go -import "github.com/gofrs/flock" - -fileLock := flock.New("/var/lock/go-lock.lock") - -locked, err := fileLock.TryLock() - -if err != nil { - // handle locking error -} - -if locked { - // do work - fileLock.Unlock() -} -``` - -For more detailed usage information take a look at the package API docs on -[GoDoc](https://godoc.org/github.com/gofrs/flock). diff --git a/vendor/github.com/gofrs/flock/appveyor.yml b/vendor/github.com/gofrs/flock/appveyor.yml deleted file mode 100644 index 6848e94..0000000 --- a/vendor/github.com/gofrs/flock/appveyor.yml +++ /dev/null @@ -1,25 +0,0 @@ -version: '{build}' - -build: false -deploy: false - -clone_folder: 'c:\gopath\src\github.com\gofrs\flock' - -environment: - GOPATH: 'c:\gopath' - GOVERSION: '1.11' - -init: - - git config --global core.autocrlf input - -install: - - rmdir c:\go /s /q - - appveyor DownloadFile https://storage.googleapis.com/golang/go%GOVERSION%.windows-amd64.msi - - msiexec /i go%GOVERSION%.windows-amd64.msi /q - - set Path=c:\go\bin;c:\gopath\bin;%Path% - - go version - - go env - -test_script: - - go get -t ./... - - go test -race -v ./... diff --git a/vendor/github.com/gofrs/flock/flock.go b/vendor/github.com/gofrs/flock/flock.go deleted file mode 100644 index 8f109b8..0000000 --- a/vendor/github.com/gofrs/flock/flock.go +++ /dev/null @@ -1,127 +0,0 @@ -// Copyright 2015 Tim Heckman. All rights reserved. -// Use of this source code is governed by the BSD 3-Clause -// license that can be found in the LICENSE file. - -// Package flock implements a thread-safe interface for file locking. -// It also includes a non-blocking TryLock() function to allow locking -// without blocking execution. -// -// Package flock is released under the BSD 3-Clause License. See the LICENSE file -// for more details. -// -// While using this library, remember that the locking behaviors are not -// guaranteed to be the same on each platform. For example, some UNIX-like -// operating systems will transparently convert a shared lock to an exclusive -// lock. If you Unlock() the flock from a location where you believe that you -// have the shared lock, you may accidentally drop the exclusive lock. -package flock - -import ( - "context" - "os" - "sync" - "time" -) - -// Flock is the struct type to handle file locking. All fields are unexported, -// with access to some of the fields provided by getter methods (Path() and Locked()). -type Flock struct { - path string - m sync.RWMutex - fh *os.File - l bool - r bool -} - -// New returns a new instance of *Flock. The only parameter -// it takes is the path to the desired lockfile. -func New(path string) *Flock { - return &Flock{path: path} -} - -// NewFlock returns a new instance of *Flock. The only parameter -// it takes is the path to the desired lockfile. -// -// Deprecated: Use New instead. -func NewFlock(path string) *Flock { - return New(path) -} - -// Close is equivalent to calling Unlock. -// -// This will release the lock and close the underlying file descriptor. -// It will not remove the file from disk, that's up to your application. -func (f *Flock) Close() error { - return f.Unlock() -} - -// Path returns the path as provided in NewFlock(). -func (f *Flock) Path() string { - return f.path -} - -// Locked returns the lock state (locked: true, unlocked: false). -// -// Warning: by the time you use the returned value, the state may have changed. -func (f *Flock) Locked() bool { - f.m.RLock() - defer f.m.RUnlock() - return f.l -} - -// RLocked returns the read lock state (locked: true, unlocked: false). -// -// Warning: by the time you use the returned value, the state may have changed. -func (f *Flock) RLocked() bool { - f.m.RLock() - defer f.m.RUnlock() - return f.r -} - -func (f *Flock) String() string { - return f.path -} - -// TryLockContext repeatedly tries to take an exclusive lock until one of the -// conditions is met: TryLock succeeds, TryLock fails with error, or Context -// Done channel is closed. -func (f *Flock) TryLockContext(ctx context.Context, retryDelay time.Duration) (bool, error) { - return tryCtx(ctx, f.TryLock, retryDelay) -} - -// TryRLockContext repeatedly tries to take a shared lock until one of the -// conditions is met: TryRLock succeeds, TryRLock fails with error, or Context -// Done channel is closed. -func (f *Flock) TryRLockContext(ctx context.Context, retryDelay time.Duration) (bool, error) { - return tryCtx(ctx, f.TryRLock, retryDelay) -} - -func tryCtx(ctx context.Context, fn func() (bool, error), retryDelay time.Duration) (bool, error) { - if ctx.Err() != nil { - return false, ctx.Err() - } - for { - if ok, err := fn(); ok || err != nil { - return ok, err - } - select { - case <-ctx.Done(): - return false, ctx.Err() - case <-time.After(retryDelay): - // try again - } - } -} - -func (f *Flock) setFh() error { - // open a new os.File instance - // create it if it doesn't exist, and open the file read-only. - fh, err := os.OpenFile(f.path, os.O_CREATE|os.O_RDONLY, os.FileMode(0600)) - if err != nil { - return err - } - - // set the filehandle on the struct - f.fh = fh - return nil -} diff --git a/vendor/github.com/gofrs/flock/flock_unix.go b/vendor/github.com/gofrs/flock/flock_unix.go deleted file mode 100644 index 45f71a7..0000000 --- a/vendor/github.com/gofrs/flock/flock_unix.go +++ /dev/null @@ -1,195 +0,0 @@ -// Copyright 2015 Tim Heckman. All rights reserved. -// Use of this source code is governed by the BSD 3-Clause -// license that can be found in the LICENSE file. - -// +build !windows - -package flock - -import ( - "os" - "syscall" -) - -// Lock is a blocking call to try and take an exclusive file lock. It will wait -// until it is able to obtain the exclusive file lock. It's recommended that -// TryLock() be used over this function. This function may block the ability to -// query the current Locked() or RLocked() status due to a RW-mutex lock. -// -// If we are already exclusive-locked, this function short-circuits and returns -// immediately assuming it can take the mutex lock. -// -// If the *Flock has a shared lock (RLock), this may transparently replace the -// shared lock with an exclusive lock on some UNIX-like operating systems. Be -// careful when using exclusive locks in conjunction with shared locks -// (RLock()), because calling Unlock() may accidentally release the exclusive -// lock that was once a shared lock. -func (f *Flock) Lock() error { - return f.lock(&f.l, syscall.LOCK_EX) -} - -// RLock is a blocking call to try and take a shared file lock. It will wait -// until it is able to obtain the shared file lock. It's recommended that -// TryRLock() be used over this function. This function may block the ability to -// query the current Locked() or RLocked() status due to a RW-mutex lock. -// -// If we are already shared-locked, this function short-circuits and returns -// immediately assuming it can take the mutex lock. -func (f *Flock) RLock() error { - return f.lock(&f.r, syscall.LOCK_SH) -} - -func (f *Flock) lock(locked *bool, flag int) error { - f.m.Lock() - defer f.m.Unlock() - - if *locked { - return nil - } - - if f.fh == nil { - if err := f.setFh(); err != nil { - return err - } - } - - if err := syscall.Flock(int(f.fh.Fd()), flag); err != nil { - shouldRetry, reopenErr := f.reopenFDOnError(err) - if reopenErr != nil { - return reopenErr - } - - if !shouldRetry { - return err - } - - if err = syscall.Flock(int(f.fh.Fd()), flag); err != nil { - return err - } - } - - *locked = true - return nil -} - -// Unlock is a function to unlock the file. This file takes a RW-mutex lock, so -// while it is running the Locked() and RLocked() functions will be blocked. -// -// This function short-circuits if we are unlocked already. If not, it calls -// syscall.LOCK_UN on the file and closes the file descriptor. It does not -// remove the file from disk. It's up to your application to do. -// -// Please note, if your shared lock became an exclusive lock this may -// unintentionally drop the exclusive lock if called by the consumer that -// believes they have a shared lock. Please see Lock() for more details. -func (f *Flock) Unlock() error { - f.m.Lock() - defer f.m.Unlock() - - // if we aren't locked or if the lockfile instance is nil - // just return a nil error because we are unlocked - if (!f.l && !f.r) || f.fh == nil { - return nil - } - - // mark the file as unlocked - if err := syscall.Flock(int(f.fh.Fd()), syscall.LOCK_UN); err != nil { - return err - } - - f.fh.Close() - - f.l = false - f.r = false - f.fh = nil - - return nil -} - -// TryLock is the preferred function for taking an exclusive file lock. This -// function takes an RW-mutex lock before it tries to lock the file, so there is -// the possibility that this function may block for a short time if another -// goroutine is trying to take any action. -// -// The actual file lock is non-blocking. If we are unable to get the exclusive -// file lock, the function will return false instead of waiting for the lock. If -// we get the lock, we also set the *Flock instance as being exclusive-locked. -func (f *Flock) TryLock() (bool, error) { - return f.try(&f.l, syscall.LOCK_EX) -} - -// TryRLock is the preferred function for taking a shared file lock. This -// function takes an RW-mutex lock before it tries to lock the file, so there is -// the possibility that this function may block for a short time if another -// goroutine is trying to take any action. -// -// The actual file lock is non-blocking. If we are unable to get the shared file -// lock, the function will return false instead of waiting for the lock. If we -// get the lock, we also set the *Flock instance as being share-locked. -func (f *Flock) TryRLock() (bool, error) { - return f.try(&f.r, syscall.LOCK_SH) -} - -func (f *Flock) try(locked *bool, flag int) (bool, error) { - f.m.Lock() - defer f.m.Unlock() - - if *locked { - return true, nil - } - - if f.fh == nil { - if err := f.setFh(); err != nil { - return false, err - } - } - - var retried bool -retry: - err := syscall.Flock(int(f.fh.Fd()), flag|syscall.LOCK_NB) - - switch err { - case syscall.EWOULDBLOCK: - return false, nil - case nil: - *locked = true - return true, nil - } - if !retried { - if shouldRetry, reopenErr := f.reopenFDOnError(err); reopenErr != nil { - return false, reopenErr - } else if shouldRetry { - retried = true - goto retry - } - } - - return false, err -} - -// reopenFDOnError determines whether we should reopen the file handle -// in readwrite mode and try again. This comes from util-linux/sys-utils/flock.c: -// Since Linux 3.4 (commit 55725513) -// Probably NFSv4 where flock() is emulated by fcntl(). -func (f *Flock) reopenFDOnError(err error) (bool, error) { - if err != syscall.EIO && err != syscall.EBADF { - return false, nil - } - if st, err := f.fh.Stat(); err == nil { - // if the file is able to be read and written - if st.Mode()&0600 == 0600 { - f.fh.Close() - f.fh = nil - - // reopen in read-write mode and set the filehandle - fh, err := os.OpenFile(f.path, os.O_CREATE|os.O_RDWR, os.FileMode(0600)) - if err != nil { - return false, err - } - f.fh = fh - return true, nil - } - } - - return false, nil -} diff --git a/vendor/github.com/gofrs/flock/flock_winapi.go b/vendor/github.com/gofrs/flock/flock_winapi.go deleted file mode 100644 index fe405a2..0000000 --- a/vendor/github.com/gofrs/flock/flock_winapi.go +++ /dev/null @@ -1,76 +0,0 @@ -// Copyright 2015 Tim Heckman. All rights reserved. -// Use of this source code is governed by the BSD 3-Clause -// license that can be found in the LICENSE file. - -// +build windows - -package flock - -import ( - "syscall" - "unsafe" -) - -var ( - kernel32, _ = syscall.LoadLibrary("kernel32.dll") - procLockFileEx, _ = syscall.GetProcAddress(kernel32, "LockFileEx") - procUnlockFileEx, _ = syscall.GetProcAddress(kernel32, "UnlockFileEx") -) - -const ( - winLockfileFailImmediately = 0x00000001 - winLockfileExclusiveLock = 0x00000002 - winLockfileSharedLock = 0x00000000 -) - -// Use of 0x00000000 for the shared lock is a guess based on some the MS Windows -// `LockFileEX` docs, which document the `LOCKFILE_EXCLUSIVE_LOCK` flag as: -// -// > The function requests an exclusive lock. Otherwise, it requests a shared -// > lock. -// -// https://msdn.microsoft.com/en-us/library/windows/desktop/aa365203(v=vs.85).aspx - -func lockFileEx(handle syscall.Handle, flags uint32, reserved uint32, numberOfBytesToLockLow uint32, numberOfBytesToLockHigh uint32, offset *syscall.Overlapped) (bool, syscall.Errno) { - r1, _, errNo := syscall.Syscall6( - uintptr(procLockFileEx), - 6, - uintptr(handle), - uintptr(flags), - uintptr(reserved), - uintptr(numberOfBytesToLockLow), - uintptr(numberOfBytesToLockHigh), - uintptr(unsafe.Pointer(offset))) - - if r1 != 1 { - if errNo == 0 { - return false, syscall.EINVAL - } - - return false, errNo - } - - return true, 0 -} - -func unlockFileEx(handle syscall.Handle, reserved uint32, numberOfBytesToLockLow uint32, numberOfBytesToLockHigh uint32, offset *syscall.Overlapped) (bool, syscall.Errno) { - r1, _, errNo := syscall.Syscall6( - uintptr(procUnlockFileEx), - 5, - uintptr(handle), - uintptr(reserved), - uintptr(numberOfBytesToLockLow), - uintptr(numberOfBytesToLockHigh), - uintptr(unsafe.Pointer(offset)), - 0) - - if r1 != 1 { - if errNo == 0 { - return false, syscall.EINVAL - } - - return false, errNo - } - - return true, 0 -} diff --git a/vendor/github.com/gofrs/flock/flock_windows.go b/vendor/github.com/gofrs/flock/flock_windows.go deleted file mode 100644 index 9f4a5f1..0000000 --- a/vendor/github.com/gofrs/flock/flock_windows.go +++ /dev/null @@ -1,140 +0,0 @@ -// Copyright 2015 Tim Heckman. All rights reserved. -// Use of this source code is governed by the BSD 3-Clause -// license that can be found in the LICENSE file. - -package flock - -import ( - "syscall" -) - -// ErrorLockViolation is the error code returned from the Windows syscall when a -// lock would block and you ask to fail immediately. -const ErrorLockViolation syscall.Errno = 0x21 // 33 - -// Lock is a blocking call to try and take an exclusive file lock. It will wait -// until it is able to obtain the exclusive file lock. It's recommended that -// TryLock() be used over this function. This function may block the ability to -// query the current Locked() or RLocked() status due to a RW-mutex lock. -// -// If we are already locked, this function short-circuits and returns -// immediately assuming it can take the mutex lock. -func (f *Flock) Lock() error { - return f.lock(&f.l, winLockfileExclusiveLock) -} - -// RLock is a blocking call to try and take a shared file lock. It will wait -// until it is able to obtain the shared file lock. It's recommended that -// TryRLock() be used over this function. This function may block the ability to -// query the current Locked() or RLocked() status due to a RW-mutex lock. -// -// If we are already locked, this function short-circuits and returns -// immediately assuming it can take the mutex lock. -func (f *Flock) RLock() error { - return f.lock(&f.r, winLockfileSharedLock) -} - -func (f *Flock) lock(locked *bool, flag uint32) error { - f.m.Lock() - defer f.m.Unlock() - - if *locked { - return nil - } - - if f.fh == nil { - if err := f.setFh(); err != nil { - return err - } - } - - if _, errNo := lockFileEx(syscall.Handle(f.fh.Fd()), flag, 0, 1, 0, &syscall.Overlapped{}); errNo > 0 { - return errNo - } - - *locked = true - return nil -} - -// Unlock is a function to unlock the file. This file takes a RW-mutex lock, so -// while it is running the Locked() and RLocked() functions will be blocked. -// -// This function short-circuits if we are unlocked already. If not, it calls -// UnlockFileEx() on the file and closes the file descriptor. It does not remove -// the file from disk. It's up to your application to do. -func (f *Flock) Unlock() error { - f.m.Lock() - defer f.m.Unlock() - - // if we aren't locked or if the lockfile instance is nil - // just return a nil error because we are unlocked - if (!f.l && !f.r) || f.fh == nil { - return nil - } - - // mark the file as unlocked - if _, errNo := unlockFileEx(syscall.Handle(f.fh.Fd()), 0, 1, 0, &syscall.Overlapped{}); errNo > 0 { - return errNo - } - - f.fh.Close() - - f.l = false - f.r = false - f.fh = nil - - return nil -} - -// TryLock is the preferred function for taking an exclusive file lock. This -// function does take a RW-mutex lock before it tries to lock the file, so there -// is the possibility that this function may block for a short time if another -// goroutine is trying to take any action. -// -// The actual file lock is non-blocking. If we are unable to get the exclusive -// file lock, the function will return false instead of waiting for the lock. If -// we get the lock, we also set the *Flock instance as being exclusive-locked. -func (f *Flock) TryLock() (bool, error) { - return f.try(&f.l, winLockfileExclusiveLock) -} - -// TryRLock is the preferred function for taking a shared file lock. This -// function does take a RW-mutex lock before it tries to lock the file, so there -// is the possibility that this function may block for a short time if another -// goroutine is trying to take any action. -// -// The actual file lock is non-blocking. If we are unable to get the shared file -// lock, the function will return false instead of waiting for the lock. If we -// get the lock, we also set the *Flock instance as being shared-locked. -func (f *Flock) TryRLock() (bool, error) { - return f.try(&f.r, winLockfileSharedLock) -} - -func (f *Flock) try(locked *bool, flag uint32) (bool, error) { - f.m.Lock() - defer f.m.Unlock() - - if *locked { - return true, nil - } - - if f.fh == nil { - if err := f.setFh(); err != nil { - return false, err - } - } - - _, errNo := lockFileEx(syscall.Handle(f.fh.Fd()), flag|winLockfileFailImmediately, 0, 1, 0, &syscall.Overlapped{}) - - if errNo > 0 { - if errNo == ErrorLockViolation || errNo == syscall.ERROR_IO_PENDING { - return false, nil - } - - return false, errNo - } - - *locked = true - - return true, nil -} diff --git a/vendor/github.com/pulumi/kube2pulumi/pkg/kube2pulumi/kube2pulumi.go b/vendor/github.com/pulumi/kube2pulumi/pkg/kube2pulumi/kube2pulumi.go index 95bc899..9c91e03 100644 --- a/vendor/github.com/pulumi/kube2pulumi/pkg/kube2pulumi/kube2pulumi.go +++ b/vendor/github.com/pulumi/kube2pulumi/pkg/kube2pulumi/kube2pulumi.go @@ -10,12 +10,12 @@ import ( // Kube2PulumiFile generates an output file containing the converted YAML manifest // file (filePath) into the specified language and returns the path of the generated // code file -func Kube2PulumiFile(filePath string, language string) (string, hcl.Diagnostics, error) { +func Kube2PulumiFile(filePath string, outputFile string, language string) (string, hcl.Diagnostics, error) { pcl, diags, err := yaml2pcl.ConvertFile(filePath) if err != nil { return "", diags, err } - outPath := getOutputFile(filepath.Dir(filePath), language) + outPath := getOutputFile(filepath.Dir(filePath), outputFile, language) outFile, err := pcl2pulumi.Pcl2Pulumi(pcl, outPath, language) if err != nil { return "", diags, err @@ -26,12 +26,12 @@ func Kube2PulumiFile(filePath string, language string) (string, hcl.Diagnostics, // Kube2PulumiDirectory generates an output file containing the converted directory // containing YAML manifests (directoryPath) into the specified language and returns // the path of the generated code file -func Kube2PulumiDirectory(directoryPath string, language string) (string, hcl.Diagnostics, error) { +func Kube2PulumiDirectory(directoryPath string, outputFile string, language string) (string, hcl.Diagnostics, error) { pcl, diags, err := yaml2pcl.ConvertDirectory(directoryPath) if err != nil { return "", diags, err } - outPath := getOutputFile(directoryPath, language) + outPath := getOutputFile(filepath.Dir(directoryPath), outputFile, language) outFile, err := pcl2pulumi.Pcl2Pulumi(pcl, outPath, language) if err != nil { return "", diags, err @@ -39,7 +39,10 @@ func Kube2PulumiDirectory(directoryPath string, language string) (string, hcl.Di return outFile, diags, nil } -func getOutputFile(dir, language string) string { +func getOutputFile(dir, outputFile, language string) string { + if outputFile != "" { + return outputFile + } var fName string switch language { case "typescript": diff --git a/vendor/github.com/pulumi/kube2pulumi/pkg/pcl2pulumi/pcl2pulumi.go b/vendor/github.com/pulumi/kube2pulumi/pkg/pcl2pulumi/pcl2pulumi.go index 871a67c..b4d18fc 100644 --- a/vendor/github.com/pulumi/kube2pulumi/pkg/pcl2pulumi/pcl2pulumi.go +++ b/vendor/github.com/pulumi/kube2pulumi/pkg/pcl2pulumi/pcl2pulumi.go @@ -14,9 +14,9 @@ import ( "github.com/hashicorp/hcl/v2" csgen "github.com/pulumi/pulumi/pkg/v3/codegen/dotnet" gogen "github.com/pulumi/pulumi/pkg/v3/codegen/go" - "github.com/pulumi/pulumi/pkg/v3/codegen/hcl2" "github.com/pulumi/pulumi/pkg/v3/codegen/hcl2/syntax" tsgen "github.com/pulumi/pulumi/pkg/v3/codegen/nodejs" + "github.com/pulumi/pulumi/pkg/v3/codegen/pcl" pygen "github.com/pulumi/pulumi/pkg/v3/codegen/python" ) @@ -67,7 +67,7 @@ func convertPulumi(ppFile *os.File, newFileName string, outputLanguage string) ( } }() - var generateProgram func(program *hcl2.Program) (map[string][]byte, hcl.Diagnostics, error) + var generateProgram func(program *pcl.Program) (map[string][]byte, hcl.Diagnostics, error) var fileExt string switch outputLanguage { case "typescript", "javascript": @@ -102,7 +102,7 @@ func convertPulumi(ppFile *os.File, newFileName string, outputLanguage string) ( return "", err } } - program, diags, err := hcl2.BindProgram(parser.Files) + program, diags, err := pcl.BindProgram(parser.Files) if err != nil { log.Print("failed to bind program: ") return "", err diff --git a/vendor/github.com/pulumi/kube2pulumi/pkg/yaml2pcl/yaml2pcl.go b/vendor/github.com/pulumi/kube2pulumi/pkg/yaml2pcl/yaml2pcl.go index 834676d..eeb055c 100644 --- a/vendor/github.com/pulumi/kube2pulumi/pkg/yaml2pcl/yaml2pcl.go +++ b/vendor/github.com/pulumi/kube2pulumi/pkg/yaml2pcl/yaml2pcl.go @@ -80,6 +80,9 @@ func convert(testFiles ast.File) (string, hcl.Diagnostics, error) { diagnostics := hcl.Diagnostics{} for _, doc := range testFiles.Docs { + if doc == nil || doc.Body == nil { + continue + } baseNodes := ast.Filter(ast.MappingValueType, doc.Body) header, diag := getHeader(baseNodes) // check diagnostics here and break at malformed resource then continue for other resources defined @@ -99,13 +102,26 @@ func convert(testFiles ast.File) (string, hcl.Diagnostics, error) { return buff.String(), diagnostics, err } +// this will remove any incorrect quotes around the apiVersion of a yaml file +// apiVersion: "apps/v1" is just as valid as apiVersion: apps/v1 +// so we should accept both +func trimQuotes(s string) string { + if len(s) > 0 && s[0] == '"' { + s = s[1:] + } + if len(s) > 0 && s[len(s)-1] == '"' { + s = s[:len(s)-1] + } + return s +} + // resource “kubernetes::” func getHeader(nodes []ast.Node) (string, hcl.Diagnostic) { var apiVersion string for _, node := range nodes { if mapValNode, ok := node.(*ast.MappingValueNode); ok { if mapValNode.Key.String() == "apiVersion" { - apiVersion = mapValNode.Value.String() + apiVersion = trimQuotes(mapValNode.Value.String()) break } } diff --git a/vendor/github.com/pulumi/pulumi/pkg/v3/codegen/docs.go b/vendor/github.com/pulumi/pulumi/pkg/v3/codegen/docs.go index cc68522..d597146 100644 --- a/vendor/github.com/pulumi/pulumi/pkg/v3/codegen/docs.go +++ b/vendor/github.com/pulumi/pulumi/pkg/v3/codegen/docs.go @@ -29,12 +29,16 @@ type DocLanguageHelper interface { GetDocLinkForPulumiType(pkg *schema.Package, typeName string) string GetDocLinkForResourceInputOrOutputType(pkg *schema.Package, moduleName, typeName string, input bool) string GetDocLinkForFunctionInputOrOutputType(pkg *schema.Package, moduleName, typeName string, input bool) string - GetLanguageTypeString(pkg *schema.Package, moduleName string, t schema.Type, input, args, optional bool) string + GetLanguageTypeString(pkg *schema.Package, moduleName string, t schema.Type, input bool) string GetFunctionName(modName string, f *schema.Function) string // GetResourceFunctionResultName returns the name of the result type when a static resource function is used to lookup // an existing resource. GetResourceFunctionResultName(modName string, f *schema.Function) string + + GetMethodName(m *schema.Method) string + GetMethodResultName(pkg *schema.Package, modName string, r *schema.Resource, m *schema.Method) string + // GetModuleDocLink returns the display name and the link for a module (including root modules) in a given package. GetModuleDocLink(pkg *schema.Package, modName string) (string, string) } diff --git a/vendor/github.com/pulumi/pulumi/pkg/v3/codegen/dotnet/doc.go b/vendor/github.com/pulumi/pulumi/pkg/v3/codegen/dotnet/doc.go index c394544..77749ed 100644 --- a/vendor/github.com/pulumi/pulumi/pkg/v3/codegen/dotnet/doc.go +++ b/vendor/github.com/pulumi/pulumi/pkg/v3/codegen/dotnet/doc.go @@ -72,19 +72,24 @@ func (d DocLanguageHelper) GetDocLinkForFunctionInputOrOutputType(pkg *schema.Pa } // GetLanguageTypeString returns the DotNet-specific type given a Pulumi schema type. -func (d DocLanguageHelper) GetLanguageTypeString(pkg *schema.Package, moduleName string, t schema.Type, input, args, optional bool) string { +func (d DocLanguageHelper) GetLanguageTypeString(pkg *schema.Package, moduleName string, t schema.Type, input bool) string { + info, ok := pkg.Language["csharp"].(CSharpPackageInfo) + if !ok { + info = CSharpPackageInfo{} + } typeDetails := map[*schema.ObjectType]*typeDetails{} mod := &modContext{ - pkg: pkg, - mod: moduleName, - typeDetails: typeDetails, - namespaces: d.Namespaces, + pkg: pkg, + mod: moduleName, + typeDetails: typeDetails, + namespaces: d.Namespaces, + rootNamespace: info.GetRootNamespace(), } - qualifier := "Inputs" + qualifier := "Inputs" // nolint: goconst if !input { qualifier = "Outputs" } - return mod.typeString(t, qualifier, input, false /*state*/, false /*wrapInput*/, args /*args*/, true /*requireInitializers*/, optional) + return mod.typeString(t, qualifier, input, false /*state*/, true /*requireInitializers*/) } func (d DocLanguageHelper) GetFunctionName(modName string, f *schema.Function) string { @@ -98,6 +103,29 @@ func (d DocLanguageHelper) GetResourceFunctionResultName(modName string, f *sche return funcName + "Result" } +func (d DocLanguageHelper) GetMethodName(m *schema.Method) string { + return Title(m.Name) +} + +func (d DocLanguageHelper) GetMethodResultName(pkg *schema.Package, modName string, r *schema.Resource, + m *schema.Method) string { + + if info, ok := pkg.Language["csharp"].(CSharpPackageInfo); ok { + if info.LiftSingleValueMethodReturns && m.Function.Outputs != nil && len(m.Function.Outputs.Properties) == 1 { + typeDetails := map[*schema.ObjectType]*typeDetails{} + mod := &modContext{ + pkg: pkg, + mod: modName, + typeDetails: typeDetails, + namespaces: d.Namespaces, + rootNamespace: info.GetRootNamespace(), + } + return mod.typeString(m.Function.Outputs.Properties[0].Type, "", false, false, false) + } + } + return fmt.Sprintf("%s.%sResult", resourceName(r), d.GetMethodName(m)) +} + // GetPropertyName uses the property's csharp-specific language info, if available, to generate // the property name. Otherwise, returns the PascalCase as the default. func (d DocLanguageHelper) GetPropertyName(p *schema.Property) (string, error) { diff --git a/vendor/github.com/pulumi/pulumi/pkg/v3/codegen/dotnet/gen.go b/vendor/github.com/pulumi/pulumi/pkg/v3/codegen/dotnet/gen.go index a49efec..f03eb7d 100644 --- a/vendor/github.com/pulumi/pulumi/pkg/v3/codegen/dotnet/gen.go +++ b/vendor/github.com/pulumi/pulumi/pkg/v3/codegen/dotnet/gen.go @@ -1,4 +1,4 @@ -// Copyright 2016-2020, Pulumi Corporation. +// Copyright 2016-2021, Pulumi Corporation. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -32,9 +32,11 @@ import ( "strings" "unicode" - "github.com/pkg/errors" "github.com/pulumi/pulumi/pkg/v3/codegen" "github.com/pulumi/pulumi/pkg/v3/codegen/schema" + "github.com/pulumi/pulumi/sdk/v3/go/common/diag" + "github.com/pulumi/pulumi/sdk/v3/go/common/resource/plugin" + "github.com/pulumi/pulumi/sdk/v3/go/common/util/cmdutil" "github.com/pulumi/pulumi/sdk/v3/go/common/util/contract" ) @@ -50,11 +52,11 @@ func (ss stringSet) has(s string) bool { } type typeDetails struct { - outputType bool - inputType bool - stateType bool - argsType bool - plainType bool + outputType bool + inputType bool + stateType bool + plainType bool + usedInFunctionOutputVersionInputs bool } // Title converts the input string to a title case @@ -107,14 +109,18 @@ func isImmutableArrayType(t schema.Type, wrapInput bool) bool { } func isValueType(t schema.Type) bool { - if _, ok := t.(*schema.EnumType); ok { - return true - } - switch t { - case schema.BoolType, schema.IntType, schema.NumberType: + switch t := t.(type) { + case *schema.OptionalType: + return isValueType(t.ElementType) + case *schema.EnumType: return true default: - return false + switch t { + case schema.BoolType, schema.IntType, schema.NumberType: + return true + default: + return false + } } } @@ -140,6 +146,22 @@ type modContext struct { namespaces map[string]string compatibility string dictionaryConstructors bool + + // If types in the Input namespace are used. + fullyQualifiedInputs bool + + // Determine whether to lift single-value method return values + liftSingleValueMethodReturns bool + + // The root namespace to use, if any. + rootNamespace string +} + +func (mod *modContext) RootNamespace() string { + if mod.rootNamespace != "" { + return mod.rootNamespace + } + return "Pulumi" } func (mod *modContext) propertyName(p *schema.Property) string { @@ -190,7 +212,7 @@ func (mod *modContext) tokenToNamespace(tok string, qualifier string) string { components := strings.Split(tok, ":") contract.Assertf(len(components) == 3, "malformed token %v", tok) - pkg, nsName := "Pulumi."+namespaceName(mod.namespaces, components[0]), mod.pkg.TokenToModule(tok) + pkg, nsName := mod.RootNamespace()+"."+namespaceName(mod.namespaces, components[0]), mod.pkg.TokenToModule(tok) if mod.isK8sCompatMode() { if qualifier != "" { @@ -221,6 +243,8 @@ func (mod *modContext) typeName(t *schema.ObjectType, state, input, args bool) s } switch { + case input && args && mod.details(t).usedInFunctionOutputVersionInputs: + return name + "InputArgs" case input: return name + "Args" case mod.details(t).plainType: @@ -229,39 +253,130 @@ func (mod *modContext) typeName(t *schema.ObjectType, state, input, args bool) s return name } -func (mod *modContext) typeString(t schema.Type, qualifier string, input, state, wrapInput, args, requireInitializers, optional bool) string { - var typ string - switch t := t.(type) { - case *schema.EnumType: - typ = mod.tokenToNamespace(t.Token, "") - typ += "." - typ += tokenToName(t.Token) +func isInputType(t schema.Type) bool { + if optional, ok := t.(*schema.OptionalType); ok { + t = optional.ElementType + } + _, isInputType := t.(*schema.InputType) + return isInputType +} + +func ignoreOptional(t *schema.OptionalType, requireInitializers bool) bool { + switch t := t.ElementType.(type) { + case *schema.InputType: + switch t.ElementType.(type) { + case *schema.ArrayType, *schema.MapType: + return true + } case *schema.ArrayType: - var listFmt string - switch { - case wrapInput: - listFmt, optional = "InputList<%v>", false - case requireInitializers: - listFmt = "List<%v>" - default: - listFmt, optional = "ImmutableArray<%v>", false + return !requireInitializers + } + return false +} + +func simplifyInputUnion(union *schema.UnionType) *schema.UnionType { + elements := make([]schema.Type, len(union.ElementTypes)) + for i, et := range union.ElementTypes { + if input, ok := et.(*schema.InputType); ok { + switch input.ElementType.(type) { + case *schema.ArrayType, *schema.MapType: + // Instead of just replacing Input<{Array,Map}> with {Array,Map}, replace it with + // {Array,Map}. This matches the behavior of typeString when presented with an + // Input<{Array,Map}>. + elements[i] = codegen.PlainType(input.ElementType) + default: + elements[i] = input.ElementType + } + } else { + elements[i] = et } + } + return &schema.UnionType{ + ElementTypes: elements, + DefaultType: union.DefaultType, + Discriminator: union.Discriminator, + Mapping: union.Mapping, + } +} - wrapInput = false - typ = fmt.Sprintf(listFmt, mod.typeString(t.ElementType, qualifier, input, state, wrapInput, args, false, false)) - case *schema.MapType: - var mapFmt string - switch { - case wrapInput: - mapFmt, optional = "InputMap<%v>", false - case requireInitializers: - mapFmt = "Dictionary" +func (mod *modContext) unionTypeString(t *schema.UnionType, qualifier string, input, wrapInput, state, requireInitializers bool) string { + elementTypeSet := stringSet{} + var elementTypes []string + for _, e := range t.ElementTypes { + // If this is an output and a "relaxed" enum, emit the type as the underlying primitive type rather than the union. + // Eg. Output rather than Output> + if typ, ok := e.(*schema.EnumType); ok && !input { + return mod.typeString(typ.ElementType, qualifier, input, state, requireInitializers) + } + + et := mod.typeString(e, qualifier, input, state, false) + if !elementTypeSet.has(et) { + elementTypeSet.add(et) + elementTypes = append(elementTypes, et) + } + } + + switch len(elementTypes) { + case 1: + if wrapInput { + return fmt.Sprintf("Input<%s>", elementTypes[0]) + } + return elementTypes[0] + case 2: + unionT := "Union" + if wrapInput { + unionT = "InputUnion" + } + return fmt.Sprintf("%s<%s>", unionT, strings.Join(elementTypes, ", ")) + default: + return "object" + } +} + +func (mod *modContext) typeString(t schema.Type, qualifier string, input, state, requireInitializers bool) string { + switch t := t.(type) { + case *schema.OptionalType: + elem := mod.typeString(t.ElementType, qualifier, input, state, requireInitializers) + if ignoreOptional(t, requireInitializers) { + return elem + } + return elem + "?" + case *schema.InputType: + inputType := "Input" + elem := t.ElementType + switch e := t.ElementType.(type) { + case *schema.ArrayType: + inputType, elem = "InputList", codegen.PlainType(e.ElementType) + case *schema.MapType: + inputType, elem = "InputMap", codegen.PlainType(e.ElementType) default: - mapFmt = "ImmutableDictionary" + if e == schema.JSONType { + return "InputJson" + } } - wrapInput = false - typ = fmt.Sprintf(mapFmt, mod.typeString(t.ElementType, qualifier, input, state, wrapInput, args, false, false)) + if union, ok := elem.(*schema.UnionType); ok { + union = simplifyInputUnion(union) + if inputType == "Input" { + return mod.unionTypeString(union, qualifier, input, true, state, requireInitializers) + } + elem = union + } + return fmt.Sprintf("%s<%s>", inputType, mod.typeString(elem, qualifier, input, state, requireInitializers)) + case *schema.EnumType: + return fmt.Sprintf("%s.%s", mod.tokenToNamespace(t.Token, ""), tokenToName(t.Token)) + case *schema.ArrayType: + listType := "ImmutableArray" + if requireInitializers { + listType = "List" + } + return fmt.Sprintf("%v<%v>", listType, mod.typeString(t.ElementType, qualifier, input, state, false)) + case *schema.MapType: + mapType := "ImmutableDictionary" + if requireInitializers { + mapType = "Dictionary" + } + return fmt.Sprintf("%v", mapType, mod.typeString(t.ElementType, qualifier, input, state, false)) case *schema.ObjectType: namingCtx := mod if t.Package != mod.pkg { @@ -276,117 +391,84 @@ func (mod *modContext) typeString(t schema.Type, qualifier string, input, state, namingCtx = &modContext{ pkg: extPkg, namespaces: info.Namespaces, + rootNamespace: info.GetRootNamespace(), compatibility: info.Compatibility, } } - typ = namingCtx.tokenToNamespace(t.Token, qualifier) + typ := namingCtx.tokenToNamespace(t.Token, qualifier) if (typ == namingCtx.namespaceName && qualifier == "") || typ == namingCtx.namespaceName+"."+qualifier { typ = qualifier } + if typ == "Inputs" && mod.fullyQualifiedInputs { + typ = fmt.Sprintf("%s.Inputs", mod.namespaceName) + } if typ != "" { typ += "." } - typ += mod.typeName(t, state, input, args) + return typ + mod.typeName(t, state, input, t.IsInputShape()) case *schema.ResourceType: if strings.HasPrefix(t.Token, "pulumi:providers:") { pkgName := strings.TrimPrefix(t.Token, "pulumi:providers:") - typ = fmt.Sprintf("Pulumi.%s.Provider", namespaceName(mod.namespaces, pkgName)) - } else { - namingCtx := mod - if t.Resource != nil && t.Resource.Package != mod.pkg { - // If resource type belongs to another package, we apply naming conventions from that package, - // including namespace naming and compatibility mode. - extPkg := t.Resource.Package - var info CSharpPackageInfo - contract.AssertNoError(extPkg.ImportLanguages(map[string]schema.Language{"csharp": Importer})) - if v, ok := t.Resource.Package.Language["csharp"].(CSharpPackageInfo); ok { - info = v - } - namingCtx = &modContext{ - pkg: extPkg, - namespaces: info.Namespaces, - compatibility: info.Compatibility, - } + return fmt.Sprintf("%s.%s.Provider", mod.RootNamespace(), namespaceName(mod.namespaces, pkgName)) + } + + namingCtx := mod + if t.Resource != nil && t.Resource.Package != mod.pkg { + // If resource type belongs to another package, we apply naming conventions from that package, + // including namespace naming and compatibility mode. + extPkg := t.Resource.Package + var info CSharpPackageInfo + contract.AssertNoError(extPkg.ImportLanguages(map[string]schema.Language{"csharp": Importer})) + if v, ok := t.Resource.Package.Language["csharp"].(CSharpPackageInfo); ok { + info = v } - typ = namingCtx.tokenToNamespace(t.Token, "") - if typ != "" { - typ += "." + namingCtx = &modContext{ + pkg: extPkg, + namespaces: info.Namespaces, + rootNamespace: info.GetRootNamespace(), + compatibility: info.Compatibility, } - typ += tokenToName(t.Token) } + typ := namingCtx.tokenToNamespace(t.Token, "") + if typ != "" { + typ += "." + } + return typ + tokenToName(t.Token) case *schema.TokenType: // Use the underlying type for now. if t.UnderlyingType != nil { - return mod.typeString(t.UnderlyingType, qualifier, input, state, wrapInput, args, requireInitializers, optional) + return mod.typeString(t.UnderlyingType, qualifier, input, state, requireInitializers) } - typ = tokenToName(t.Token) + typ := tokenToName(t.Token) if ns := mod.tokenToNamespace(t.Token, qualifier); ns != mod.namespaceName { typ = ns + "." + typ } + return typ case *schema.UnionType: - elementTypeSet := stringSet{} - var elementTypes []string - for _, e := range t.ElementTypes { - // If this is an output and a "relaxed" enum, emit the type as the underlying primitive type rather than the union. - // Eg. Output rather than Output> - if typ, ok := e.(*schema.EnumType); ok && !input { - return mod.typeString(typ.ElementType, qualifier, input, state, wrapInput, args, requireInitializers, optional) - } - - et := mod.typeString(e, qualifier, input, state, false, args, false, false) - if !elementTypeSet.has(et) { - elementTypeSet.add(et) - elementTypes = append(elementTypes, et) - } - } - - switch len(elementTypes) { - case 1: - return mod.typeString(t.ElementTypes[0], qualifier, input, state, wrapInput, args, requireInitializers, optional) - case 2: - unionT := "Union" - if wrapInput { - unionT = "InputUnion" - } - typ = fmt.Sprintf("%s<%s>", unionT, strings.Join(elementTypes, ", ")) - wrapInput = false - default: - typ = "object" - } + return mod.unionTypeString(t, qualifier, input, false, state, requireInitializers) default: switch t { case schema.BoolType: - typ = "bool" + return "bool" case schema.IntType: - typ = "int" + return "int" case schema.NumberType: - typ = "double" + return "double" case schema.StringType: - typ = "string" + return "string" case schema.ArchiveType: - typ = "Archive" + return "Archive" case schema.AssetType: - typ = "AssetOrArchive" + return "AssetOrArchive" case schema.JSONType: - if wrapInput { - typ = "InputJson" - wrapInput = false - } else { - typ = "System.Text.Json.JsonElement" - } + return "System.Text.Json.JsonElement" case schema.AnyType: - typ = "object" + return "object" } } - if wrapInput { - typ = fmt.Sprintf("Input<%s>", typ) - } - if optional { - typ += "?" - } - return typ + panic(fmt.Errorf("unexpected type %T", t)) } var docCommentEscaper = strings.NewReplacer( @@ -395,8 +477,16 @@ var docCommentEscaper = strings.NewReplacer( `>`, ">", ) -func printComment(w io.Writer, comment string, indent string) { - lines := strings.Split(docCommentEscaper.Replace(comment), "\n") +func printComment(w io.Writer, comment, indent string) { + printCommentWithOptions(w, comment, indent, true /*escape*/) +} + +func printCommentWithOptions(w io.Writer, comment, indent string, escape bool) { + if escape { + comment = docCommentEscaper.Replace(comment) + } + + lines := strings.Split(comment, "\n") for len(lines) > 0 && lines[len(lines)-1] == "" { lines = lines[:len(lines)-1] } @@ -415,41 +505,46 @@ type plainType struct { res *schema.Resource name string comment string + unescapeComment bool baseClass string propertyTypeQualifier string properties []*schema.Property args bool state bool + internal bool } -func (pt *plainType) genInputProperty(w io.Writer, prop *schema.Property, indent string) { - argsType := pt.args && !prop.IsPlain - +func (pt *plainType) genInputPropertyAttribute(w io.Writer, indent string, prop *schema.Property) { wireName := prop.Name - propertyName := pt.mod.propertyName(prop) - propertyType := pt.mod.typeString(prop.Type, pt.propertyTypeQualifier, true, pt.state, argsType, argsType, false, !prop.IsRequired) - - // First generate the input attribute. attributeArgs := "" - if prop.IsRequired { + if prop.IsRequired() { attributeArgs = ", required: true" } if pt.res != nil && pt.res.IsProvider { json := true - if prop.Type == schema.StringType { + typ := codegen.UnwrapType(prop.Type) + if typ == schema.StringType { json = false - } else if t, ok := prop.Type.(*schema.TokenType); ok && t.UnderlyingType == schema.StringType { + } else if t, ok := typ.(*schema.TokenType); ok && t.UnderlyingType == schema.StringType { json = false } if json { attributeArgs += ", json: true" } } + fmt.Fprintf(w, "%s[Input(\"%s\"%s)]\n", indent, wireName, attributeArgs) +} + +func (pt *plainType) genInputProperty(w io.Writer, prop *schema.Property, indent string, generateInputAttribute bool) { + propertyName := pt.mod.propertyName(prop) + propertyType := pt.mod.typeString(prop.Type, pt.propertyTypeQualifier, true, pt.state, false) indent = strings.Repeat(indent, 2) + // Next generate the input property itself. The way this is generated depends on the type of the property: + // complex types like lists and maps need a backing field. needsBackingField := false - switch prop.Type.(type) { + switch codegen.UnwrapType(prop.Type).(type) { case *schema.ArrayType, *schema.MapType: needsBackingField = true } @@ -461,10 +556,13 @@ func (pt *plainType) genInputProperty(w io.Writer, prop *schema.Property, indent // complex types like lists and maps need a backing field. Secret properties also require a backing field. if needsBackingField { backingFieldName := "_" + prop.Name - requireInitializers := !pt.args || prop.IsPlain - backingFieldType := pt.mod.typeString(prop.Type, pt.propertyTypeQualifier, true, pt.state, argsType, argsType, requireInitializers, false) + requireInitializers := !pt.args || !isInputType(prop.Type) + backingFieldType := pt.mod.typeString(codegen.RequiredType(prop), pt.propertyTypeQualifier, true, pt.state, requireInitializers) + + if generateInputAttribute { + pt.genInputPropertyAttribute(w, indent, prop) + } - fmt.Fprintf(w, "%s[Input(\"%s\"%s)]\n", indent, wireName, attributeArgs) fmt.Fprintf(w, "%sprivate %s? %s;\n", indent, backingFieldType, backingFieldName) if prop.Comment != "" { @@ -473,7 +571,7 @@ func (pt *plainType) genInputProperty(w io.Writer, prop *schema.Property, indent } printObsoleteAttribute(w, prop.DeprecationMessage, indent) - switch prop.Type.(type) { + switch codegen.UnwrapType(prop.Type).(type) { case *schema.ArrayType, *schema.MapType: // Note that we use the backing field type--which is just the property type without any nullable annotation--to // ensure that the user does not see warnings when initializing these properties using object or collection @@ -492,12 +590,12 @@ func (pt *plainType) genInputProperty(w io.Writer, prop *schema.Property, indent // Since we can't directly assign the Output from CreateSecret to the property, use an Output.All or // Output.Tuple to enable the secret flag on the data. (If any input to the All/Tuple is secret, then the // Output will also be secret.) - switch t := prop.Type.(type) { + switch t := codegen.UnwrapType(prop.Type).(type) { case *schema.ArrayType: - fmt.Fprintf(w, "%s var emptySecret = Output.CreateSecret(ImmutableArray.Create<%s>());\n", indent, t.ElementType.String()) + fmt.Fprintf(w, "%s var emptySecret = Output.CreateSecret(ImmutableArray.Create<%s>());\n", indent, codegen.PlainType(t.ElementType).String()) fmt.Fprintf(w, "%s %s = Output.All(value, emptySecret).Apply(v => v[0]);\n", indent, backingFieldName) case *schema.MapType: - fmt.Fprintf(w, "%s var emptySecret = Output.CreateSecret(ImmutableDictionary.Create());\n", indent, t.ElementType.String()) + fmt.Fprintf(w, "%s var emptySecret = Output.CreateSecret(ImmutableDictionary.Create());\n", indent, codegen.PlainType(t.ElementType).String()) fmt.Fprintf(w, "%s %s = Output.All(value, emptySecret).Apply(v => v[0]);\n", indent, backingFieldName) default: fmt.Fprintf(w, "%s var emptySecret = Output.CreateSecret(0);\n", indent) @@ -510,12 +608,16 @@ func (pt *plainType) genInputProperty(w io.Writer, prop *schema.Property, indent fmt.Fprintf(w, "%s}\n", indent) } else { initializer := "" - if prop.IsRequired && (!isValueType(prop.Type) || (pt.args && !prop.IsPlain)) { + if prop.IsRequired() && !isValueType(prop.Type) { initializer = " = null!;" } printComment(w, prop.Comment, indent) - fmt.Fprintf(w, "%s[Input(\"%s\"%s)]\n", indent, wireName, attributeArgs) + + if generateInputAttribute { + pt.genInputPropertyAttribute(w, indent, prop) + } + fmt.Fprintf(w, "%spublic %s %s { get; set; }%s\n", indent, propertyType, propertyName, initializer) } } @@ -524,6 +626,10 @@ func (pt *plainType) genInputProperty(w io.Writer, prop *schema.Property, indent var generatedTypes = codegen.Set{} func (pt *plainType) genInputType(w io.Writer, level int) error { + return pt.genInputTypeWithFlags(w, level, true /* generateInputAttributes */) +} + +func (pt *plainType) genInputTypeWithFlags(w io.Writer, level int, generateInputAttributes bool) error { // The way the legacy codegen for kubernetes is structured, inputs for a resource args type and resource args // subtype could become a single class because of the name + namespace clash. We use a set of generated types // to prevent generating classes with equal full names in multiple files. The check should be removed if we @@ -546,13 +652,19 @@ func (pt *plainType) genInputType(w io.Writer, level int) error { } // Open the class. - printComment(w, pt.comment, indent) - fmt.Fprintf(w, "%spublic %sclass %s : Pulumi.%s\n", indent, sealed, pt.name, pt.baseClass) + printCommentWithOptions(w, pt.comment, indent, !pt.unescapeComment) + + var suffix string + if pt.baseClass != "" { + suffix = fmt.Sprintf(" : Pulumi.%s", pt.baseClass) + } + + fmt.Fprintf(w, "%spublic %sclass %s%s\n", indent, sealed, pt.name, suffix) fmt.Fprintf(w, "%s{\n", indent) // Declare each input property. for _, p := range pt.properties { - pt.genInputProperty(w, p, indent) + pt.genInputProperty(w, p, indent, generateInputAttributes) fmt.Fprintf(w, "\n") } @@ -583,15 +695,25 @@ func (pt *plainType) genOutputType(w io.Writer, level int) { fmt.Fprintf(w, "\n") // Open the class and attribute it appropriately. + printCommentWithOptions(w, pt.comment, indent, !pt.unescapeComment) fmt.Fprintf(w, "%s[OutputType]\n", indent) - fmt.Fprintf(w, "%spublic sealed class %s\n", indent, pt.name) + + visibility := "public" + if pt.internal { + visibility = "internal" + } + + fmt.Fprintf(w, "%s%s sealed class %s\n", indent, visibility, pt.name) fmt.Fprintf(w, "%s{\n", indent) // Generate each output field. for _, prop := range pt.properties { fieldName := pt.mod.propertyName(prop) - required := prop.IsRequired || pt.mod.isK8sCompatMode() - fieldType := pt.mod.typeString(prop.Type, pt.propertyTypeQualifier, false, false, false, false, false, !required) + typ := prop.Type + if !prop.IsRequired() && pt.mod.isK8sCompatMode() { + typ = codegen.RequiredType(prop) + } + fieldType := pt.mod.typeString(typ, pt.propertyTypeQualifier, false, false, false) printComment(w, prop.Comment, indent+" ") fmt.Fprintf(w, "%s public readonly %s %s;\n", indent, fieldType, fieldName) } @@ -606,8 +728,11 @@ func (pt *plainType) genOutputType(w io.Writer, level int) { // Generate the constructor parameters. for i, prop := range pt.properties { paramName := csharpIdentifier(prop.Name) - required := prop.IsRequired || pt.mod.isK8sCompatMode() - paramType := pt.mod.typeString(prop.Type, pt.propertyTypeQualifier, false, false, false, false, false, !required) + typ := prop.Type + if !prop.IsRequired() && pt.mod.isK8sCompatMode() { + typ = codegen.RequiredType(prop) + } + paramType := pt.mod.typeString(typ, pt.propertyTypeQualifier, false, false, false) terminator := "" if i != len(pt.properties)-1 { @@ -660,11 +785,13 @@ func primitiveValue(value interface{}) (string, error) { case reflect.String: return fmt.Sprintf("%q", v.String()), nil default: - return "", errors.Errorf("unsupported default value of type %T", value) + return "", fmt.Errorf("unsupported default value of type %T", value) } } func (mod *modContext) getDefaultValue(dv *schema.DefaultValue, t schema.Type) (string, error) { + t = codegen.UnwrapType(t) + var val string if dv.Value != nil { switch enum := t.(type) { @@ -687,7 +814,7 @@ func (mod *modContext) getDefaultValue(dv *schema.DefaultValue, t schema.Type) ( break } if val == "" { - return "", errors.Errorf("default value '%v' not found in enum '%s'", dv.Value, enumName) + return "", fmt.Errorf("default value '%v' not found in enum '%s'", dv.Value, enumName) } default: v, err := primitiveValue(dv.Value) @@ -790,13 +917,18 @@ func (mod *modContext) genResource(w io.Writer, r *schema.Resource) error { // Write the property attribute wireName := prop.Name propertyName := mod.propertyName(prop) - required := prop.IsRequired || mod.isK8sCompatMode() - propertyType := mod.typeString(prop.Type, "Outputs", false, false, false, false, false, !required) + + typ := prop.Type + if !prop.IsRequired() && mod.isK8sCompatMode() { + typ = codegen.RequiredType(prop) + } + + propertyType := mod.typeString(typ, "Outputs", false, false, false) // Workaround the fact that provider inputs come back as strings. if r.IsProvider && !schema.IsPrimitiveType(prop.Type) { propertyType = "string" - if !prop.IsRequired { + if !prop.IsRequired() { propertyType += "?" } } @@ -825,7 +957,7 @@ func (mod *modContext) genResource(w io.Writer, r *schema.Resource) error { allOptionalInputs := true hasConstInputs := false for _, prop := range r.InputProperties { - allOptionalInputs = allOptionalInputs && !prop.IsRequired + allOptionalInputs = allOptionalInputs && !prop.IsRequired() hasConstInputs = hasConstInputs || prop.ConstValue != nil } if allOptionalInputs || mod.isK8sCompatMode() { @@ -915,6 +1047,9 @@ func (mod *modContext) genResource(w io.Writer, r *schema.Resource) error { fmt.Fprintf(w, " var defaultOptions = new %s\n", optionsType) fmt.Fprintf(w, " {\n") fmt.Fprintf(w, " Version = Utilities.Version,\n") + if url := mod.pkg.PluginDownloadURL; url != "" { + fmt.Fprintf(w, " PluginDownloadURL = %q,\n", url) + } if len(r.Aliases) > 0 { fmt.Fprintf(w, " Aliases =\n") @@ -937,6 +1072,21 @@ func (mod *modContext) genResource(w io.Writer, r *schema.Resource) error { fmt.Fprintf(w, " },\n") } + replaceOnChangesProps, errList := r.ReplaceOnChanges() + for _, err := range errList { + cmdutil.Diag().Warningf(&diag.Diag{Message: err.Error()}) + } + if len(replaceOnChangesProps) > 0 { + fmt.Fprint(w, " ReplaceOnChanges =\n") + fmt.Fprintf(w, " {\n") + for _, n := range schema.PropertyListJoinToString(replaceOnChangesProps, + func(s string) string { return s }) { + fmt.Fprintf(w, " ") + fmt.Fprintf(w, "%q,\n", n) + } + fmt.Fprintf(w, " },\n") + } + fmt.Fprintf(w, " };\n") fmt.Fprintf(w, " var merged = %s.Merge(defaultOptions, options);\n", optionsType) fmt.Fprintf(w, " // Override the ID if one was specified for consistency with other language SDKs.\n") @@ -967,6 +1117,68 @@ func (mod *modContext) genResource(w io.Writer, r *schema.Resource) error { fmt.Fprintf(w, " }\n") } + // Generate methods. + genMethod := func(method *schema.Method) { + methodName := Title(method.Name) + fun := method.Function + + shouldLiftReturn := mod.liftSingleValueMethodReturns && fun.Outputs != nil && len(fun.Outputs.Properties) == 1 + + fmt.Fprintf(w, "\n") + + returnType, typeParameter, lift := "void", "", "" + if fun.Outputs != nil { + typeParameter = fmt.Sprintf("<%s%sResult>", className, methodName) + if shouldLiftReturn { + returnType = fmt.Sprintf("Pulumi.Output<%s>", + mod.typeString(fun.Outputs.Properties[0].Type, "", false, false, false)) + + fieldName := mod.propertyName(fun.Outputs.Properties[0]) + lift = fmt.Sprintf(".Apply(v => v.%s)", fieldName) + } else { + returnType = fmt.Sprintf("Pulumi.Output%s", typeParameter) + } + } + + var argsParamDef string + argsParamRef := "CallArgs.Empty" + if fun.Inputs != nil { + var hasArgs bool + allOptionalInputs := true + for _, arg := range fun.Inputs.InputShape.Properties { + if arg.Name == "__self__" { + continue + } + hasArgs = true + allOptionalInputs = allOptionalInputs && !arg.IsRequired() + } + if hasArgs { + var argsDefault, sigil string + if allOptionalInputs { + // If the number of required input properties was zero, we can make the args object optional. + argsDefault, sigil = " = null", "?" + } + + argsParamDef = fmt.Sprintf("%s%sArgs%s args%s", className, methodName, sigil, argsDefault) + argsParamRef = fmt.Sprintf("args ?? new %s%sArgs()", className, methodName) + } + } + + // Emit the doc comment, if any. + printComment(w, fun.Comment, " ") + + if fun.DeprecationMessage != "" { + fmt.Fprintf(w, " [Obsolete(@\"%s\")]\n", strings.ReplaceAll(fun.DeprecationMessage, `"`, `""`)) + } + + fmt.Fprintf(w, " public %s %s(%s)\n", returnType, methodName, argsParamDef) + fmt.Fprintf(w, " => Pulumi.Deployment.Instance.Call%s(\"%s\", %s, this)%s;\n", + typeParameter, fun.Token, argsParamRef, lift) + } + for _, method := range r.Methods { + genMethod(method) + } + // Close the class. fmt.Fprintf(w, " }\n") @@ -1011,12 +1223,118 @@ func (mod *modContext) genResource(w io.Writer, r *schema.Resource) error { } } + // Generate method types. + genMethodTypes := func(method *schema.Method) error { + methodName := Title(method.Name) + fun := method.Function + + // Generate args type. + var args []*schema.Property + if fun.Inputs != nil { + // Filter out the __self__ argument from the inputs. + args = make([]*schema.Property, 0, len(fun.Inputs.InputShape.Properties)-1) + for _, arg := range fun.Inputs.InputShape.Properties { + if arg.Name == "__self__" { + continue + } + args = append(args, arg) + } + } + if len(args) > 0 { + comment, escape := fun.Inputs.Comment, true + if comment == "" { + comment, escape = fmt.Sprintf( + "The set of arguments for the method.", className, methodName), false + } + argsType := &plainType{ + mod: mod, + comment: comment, + unescapeComment: !escape, + name: fmt.Sprintf("%s%sArgs", className, methodName), + baseClass: "CallArgs", + propertyTypeQualifier: "Inputs", + properties: args, + args: true, + } + if err := argsType.genInputType(w, 1); err != nil { + return err + } + } + + // Generate result type. + if fun.Outputs != nil { + shouldLiftReturn := mod.liftSingleValueMethodReturns && len(fun.Outputs.Properties) == 1 + + comment, escape := fun.Inputs.Comment, true + if comment == "" { + comment, escape = fmt.Sprintf( + "The results of the method.", className, methodName), false + } + resultType := &plainType{ + mod: mod, + comment: comment, + unescapeComment: !escape, + name: fmt.Sprintf("%s%sResult", className, methodName), + propertyTypeQualifier: "Outputs", + properties: fun.Outputs.Properties, + internal: shouldLiftReturn, + } + resultType.genOutputType(w, 1) + } + + return nil + } + for _, method := range r.Methods { + if err := genMethodTypes(method); err != nil { + return err + } + } + // Close the namespace. fmt.Fprintf(w, "}\n") return nil } +func (mod *modContext) genFunctionFileCode(f *schema.Function) (string, error) { + imports := map[string]codegen.StringSet{} + mod.getImports(f, imports) + buffer := &bytes.Buffer{} + importStrings := mod.pulumiImports() + + // True if the function has a non-standard namespace. + nonStandardNamespace := mod.namespaceName != mod.tokenToNamespace(f.Token, "") + // If so, we need to import our project defined types. + if nonStandardNamespace { + importStrings = append(importStrings, mod.namespaceName) + } + for _, i := range imports { + importStrings = append(importStrings, i.SortedValues()...) + } + + // We need to qualify input types when we are not in the same module as them. + if nonStandardNamespace { + defer func(current bool) { mod.fullyQualifiedInputs = current }(mod.fullyQualifiedInputs) + mod.fullyQualifiedInputs = true + } + mod.genHeader(buffer, importStrings) + if err := mod.genFunction(buffer, f); err != nil { + return "", err + } + return buffer.String(), nil +} + +func allOptionalInputs(fun *schema.Function) bool { + if fun.Inputs != nil { + for _, prop := range fun.Inputs.Properties { + if prop.IsRequired() { + return false + } + } + } + return true +} + func (mod *modContext) genFunction(w io.Writer, fun *schema.Function) error { className := tokenToFunctionName(fun.Token) @@ -1031,13 +1349,8 @@ func (mod *modContext) genFunction(w io.Writer, fun *schema.Function) error { var argsParamDef string argsParamRef := "InvokeArgs.Empty" if fun.Inputs != nil { - allOptionalInputs := true - for _, prop := range fun.Inputs.Properties { - allOptionalInputs = allOptionalInputs && !prop.IsRequired - } - var argsDefault, sigil string - if allOptionalInputs { + if allOptionalInputs(fun) { // If the number of required input properties was zero, we can make the args object optional. argsDefault, sigil = " = null", "?" } @@ -1059,9 +1372,15 @@ func (mod *modContext) genFunction(w io.Writer, fun *schema.Function) error { // Emit the datasource method. fmt.Fprintf(w, " public static Task%s InvokeAsync(%sInvokeOptions? options = null)\n", typeParameter, argsParamDef) - fmt.Fprintf(w, " => Pulumi.Deployment.Instance.InvokeAsync%s(\"%s\", %s, options.WithVersion());\n", + fmt.Fprintf(w, " => Pulumi.Deployment.Instance.InvokeAsync%s(\"%s\", %s, options.WithDefaults());\n", typeParameter, fun.Token, argsParamRef) + // Emit the Output method if needed. + err := mod.genFunctionOutputVersion(w, fun) + if err != nil { + return err + } + // Close the class. fmt.Fprintf(w, " }\n") @@ -1080,6 +1399,12 @@ func (mod *modContext) genFunction(w io.Writer, fun *schema.Function) error { return err } } + + err = mod.genFunctionOutputVersionTypes(w, fun) + if err != nil { + return err + } + if fun.Outputs != nil { fmt.Fprintf(w, "\n") @@ -1097,6 +1422,61 @@ func (mod *modContext) genFunction(w io.Writer, fun *schema.Function) error { return nil } +func functionOutputVersionArgsTypeName(fun *schema.Function) string { + className := tokenToFunctionName(fun.Token) + return fmt.Sprintf("%sInvokeArgs", className) +} + +// Generates `${fn}Output(..)` version lifted to work on +// `Input`-wrapped arguments and producing an `Output`-wrapped result. +func (mod *modContext) genFunctionOutputVersion(w io.Writer, fun *schema.Function) error { + if !fun.NeedsOutputVersion() { + return nil + } + className := tokenToFunctionName(fun.Token) + + var argsDefault, sigil string + if allOptionalInputs(fun) { + // If the number of required input properties was zero, we can make the args object optional. + argsDefault, sigil = " = null", "?" + } + + argsTypeName := functionOutputVersionArgsTypeName(fun) + outputArgsParamDef := fmt.Sprintf("%s%s args%s, ", argsTypeName, sigil, argsDefault) + outputArgsParamRef := fmt.Sprintf("args ?? new %s()", argsTypeName) + + fmt.Fprintf(w, "\n") + + // Emit the doc comment, if any. + printComment(w, fun.Comment, " ") + fmt.Fprintf(w, " public static Output<%sResult> Invoke(%sInvokeOptions? options = null)\n", + className, outputArgsParamDef) + fmt.Fprintf(w, " => Pulumi.Deployment.Instance.Invoke<%sResult>(\"%s\", %s, options.WithDefaults());\n", + className, fun.Token, outputArgsParamRef) + return nil +} + +// Generate helper type definitions referred to in `genFunctionOutputVersion`. +func (mod *modContext) genFunctionOutputVersionTypes(w io.Writer, fun *schema.Function) error { + if !fun.NeedsOutputVersion() || fun.Inputs == nil { + return nil + } + + applyArgs := &plainType{ + mod: mod, + name: functionOutputVersionArgsTypeName(fun), + propertyTypeQualifier: "Inputs", + baseClass: "InvokeArgs", + properties: fun.Inputs.InputShape.Properties, + args: true, + } + + if err := applyArgs.genInputTypeWithFlags(w, 1, true /* generateInputAttributes */); err != nil { + return err + } + return nil +} + func (mod *modContext) genEnums(w io.Writer, enums []*schema.EnumType) error { // Open the namespace. fmt.Fprintf(w, "namespace %s\n", mod.namespaceName) @@ -1145,7 +1525,7 @@ func (mod *modContext) genEnum(w io.Writer, enum *schema.EnumType) error { // Print documentation comment printComment(w, enum.Comment, indent) - underlyingType := mod.typeString(enum.ElementType, "", false, false, false, false, false, false) + underlyingType := mod.typeString(enum.ElementType, "", false, false, false) switch enum.ElementType { case schema.StringType, schema.NumberType: // EnumType attribute @@ -1244,15 +1624,17 @@ func (mod *modContext) genEnum(w io.Writer, enum *schema.EnumType) error { return nil } -func visitObjectTypes(properties []*schema.Property, visitor func(*schema.ObjectType, bool)) { - codegen.VisitTypeClosure(properties, func(t codegen.Type) { - if o, ok := t.Type.(*schema.ObjectType); ok { - visitor(o, t.Plain) +func visitObjectTypes(properties []*schema.Property, visitor func(*schema.ObjectType)) { + codegen.VisitTypeClosure(properties, func(t schema.Type) { + if o, ok := t.(*schema.ObjectType); ok { + visitor(o) } }) } -func (mod *modContext) genType(w io.Writer, obj *schema.ObjectType, propertyTypeQualifier string, input, state, args bool, level int) error { +func (mod *modContext) genType(w io.Writer, obj *schema.ObjectType, propertyTypeQualifier string, input, state bool, level int) error { + args := obj.IsInputShape() + pt := &plainType{ mod: mod, name: mod.typeName(obj, state, input, args), @@ -1276,21 +1658,37 @@ func (mod *modContext) genType(w io.Writer, obj *schema.ObjectType, propertyType } // pulumiImports is a slice of common imports that are used with the genHeader method. -var pulumiImports = []string{ - "System", - "System.Collections.Generic", - "System.Collections.Immutable", - "System.Threading.Tasks", - "Pulumi.Serialization", +func (mod *modContext) pulumiImports() []string { + var pulumiImports = []string{ + "System", + "System.Collections.Generic", + "System.Collections.Immutable", + "System.Threading.Tasks", + "Pulumi.Serialization", + } + if mod.RootNamespace() != "Pulumi" { + pulumiImports = append(pulumiImports, "Pulumi") + } + return pulumiImports } func (mod *modContext) getTypeImports(t schema.Type, recurse bool, imports map[string]codegen.StringSet, seen codegen.Set) { + mod.getTypeImportsForResource(t, recurse, imports, seen, nil) +} + +func (mod *modContext) getTypeImportsForResource(t schema.Type, recurse bool, imports map[string]codegen.StringSet, seen codegen.Set, res *schema.Resource) { if seen.Has(t) { return } seen.Add(t) switch t := t.(type) { + case *schema.OptionalType: + mod.getTypeImports(t.ElementType, recurse, imports, seen) + return + case *schema.InputType: + mod.getTypeImports(t.ElementType, recurse, imports, seen) + return case *schema.ArrayType: mod.getTypeImports(t.ElementType, recurse, imports, seen) return @@ -1309,6 +1707,11 @@ func (mod *modContext) getTypeImports(t schema.Type, recurse bool, imports map[s return } + // Don't import itself. + if t.Resource == res { + return + } + modName, name, modPath := mod.pkg.TokenToModule(t.Token), tokenToName(t.Token), "" if modName != mod.mod { mp, err := filepath.Rel(mod.mod, modName) @@ -1339,6 +1742,10 @@ func (mod *modContext) getTypeImports(t schema.Type, recurse bool, imports map[s } func (mod *modContext) getImports(member interface{}, imports map[string]codegen.StringSet) { + mod.getImportsForResource(member, imports, nil) +} + +func (mod *modContext) getImportsForResource(member interface{}, imports map[string]codegen.StringSet, res *schema.Resource) { seen := codegen.Set{} switch member := member.(type) { case *schema.ObjectType: @@ -1351,10 +1758,22 @@ func (mod *modContext) getImports(member interface{}, imports map[string]codegen return case *schema.Resource: for _, p := range member.Properties { - mod.getTypeImports(p.Type, false, imports, seen) + mod.getTypeImportsForResource(p.Type, false, imports, seen, res) } for _, p := range member.InputProperties { - mod.getTypeImports(p.Type, false, imports, seen) + mod.getTypeImportsForResource(p.Type, false, imports, seen, res) + } + for _, method := range member.Methods { + if method.Function.Inputs != nil { + for _, p := range method.Function.Inputs.Properties { + mod.getTypeImportsForResource(p.Type, false, imports, seen, res) + } + } + if method.Function.Outputs != nil { + for _, p := range method.Function.Outputs.Properties { + mod.getTypeImportsForResource(p.Type, false, imports, seen, res) + } + } } return case *schema.Function: @@ -1389,8 +1808,9 @@ func (mod *modContext) genHeader(w io.Writer, using []string) { } func (mod *modContext) getConfigProperty(schemaType schema.Type) (string, string) { - propertyType := mod.typeString( - schemaType, "Types", false, false, false /*wrapInputs*/, false /*args*/, false /*requireInitializers*/, false) + schemaType = codegen.UnwrapType(schemaType) + + propertyType := mod.typeString(schemaType, "Types", false, false, false /*requireInitializers*/) var getFunc string nullableSigil := "?" @@ -1422,7 +1842,7 @@ func (mod *modContext) getConfigProperty(schemaType schema.Type) (string, string func (mod *modContext) genConfig(variables []*schema.Property) (string, error) { w := &bytes.Buffer{} - mod.genHeader(w, []string{"System.Collections.Immutable"}) + mod.genHeader(w, []string{"System", "System.Collections.Immutable"}) // Use the root namespace to avoid `Pulumi.Provider.Config.Config.VarName` usage. fmt.Fprintf(w, "namespace %s\n", mod.namespaceName) fmt.Fprintf(w, "{\n") @@ -1431,8 +1851,32 @@ func (mod *modContext) genConfig(variables []*schema.Property) (string, error) { fmt.Fprintf(w, " public static class Config\n") fmt.Fprintf(w, " {\n") + fmt.Fprintf(w, " [System.Diagnostics.CodeAnalysis.SuppressMessage(\"Microsoft.Design\", \"IDE1006\", Justification = \n") + fmt.Fprintf(w, " \"Double underscore prefix used to avoid conflicts with variable names.\")]\n") + fmt.Fprintf(w, " private sealed class __Value\n") + fmt.Fprintf(w, " {\n") + + fmt.Fprintf(w, " private readonly Func _getter;\n") + fmt.Fprintf(w, " private T _value = default!;\n") + fmt.Fprintf(w, " private bool _set;\n") + fmt.Fprintf(w, "\n") + fmt.Fprintf(w, " public __Value(Func getter)\n") + fmt.Fprintf(w, " {\n") + fmt.Fprintf(w, " _getter = getter;\n") + fmt.Fprintf(w, " }\n") + fmt.Fprintf(w, "\n") + fmt.Fprintf(w, " public T Get() => _set ? _value : _getter();\n") + fmt.Fprintf(w, "\n") + fmt.Fprintf(w, " public void Set(T value)\n") + fmt.Fprintf(w, " {\n") + fmt.Fprintf(w, " _value = value;\n") + fmt.Fprintf(w, " _set = true;\n") + fmt.Fprintf(w, " }\n") + fmt.Fprintf(w, " }\n") + fmt.Fprintf(w, "\n") + // Create a config bag for the variables to pull from. - fmt.Fprintf(w, " private static readonly Pulumi.Config __config = new Pulumi.Config(\"%v\");", mod.pkg.Name) + fmt.Fprintf(w, " private static readonly Pulumi.Config __config = new Pulumi.Config(\"%v\");\n", mod.pkg.Name) fmt.Fprintf(w, "\n") // Emit an entry for all config variables. @@ -1450,8 +1894,13 @@ func (mod *modContext) genConfig(variables []*schema.Property) (string, error) { initializer += " ?? " + dv } + fmt.Fprintf(w, " private static readonly __Value<%[1]s> _%[2]s = new __Value<%[1]s>(() => %[3]s);\n", propertyType, p.Name, initializer) printComment(w, p.Comment, " ") - fmt.Fprintf(w, " public static %s %s { get; set; } = %s;\n", propertyType, propertyName, initializer) + fmt.Fprintf(w, " public static %s %s\n", propertyType, propertyName) + fmt.Fprintf(w, " {\n") + fmt.Fprintf(w, " get => _%s.Get();\n", p.Name) + fmt.Fprintf(w, " set => _%s.Set(value);\n", p.Name) + fmt.Fprintf(w, " }\n") fmt.Fprintf(w, "\n") } @@ -1461,6 +1910,11 @@ func (mod *modContext) genConfig(variables []*schema.Property) (string, error) { fmt.Fprintf(w, " {\n") for _, typ := range mod.types { + // Ignore input-shaped types. + if typ.IsInputShape() { + continue + } + fmt.Fprintf(w, "\n") // Open the class. @@ -1470,10 +1924,10 @@ func (mod *modContext) genConfig(variables []*schema.Property) (string, error) { // Generate each output field. for _, prop := range typ.Properties { name := mod.propertyName(prop) - typ := mod.typeString(prop.Type, "Types", false, false, false /*wrapInput*/, false /*args*/, false, !prop.IsRequired) + typ := mod.typeString(prop.Type, "Types", false, false, false) initializer := "" - if !prop.IsRequired && !isValueType(prop.Type) && !isImmutableArrayType(prop.Type, false) { + if !prop.IsRequired() && !isValueType(prop.Type) && !isImmutableArrayType(codegen.UnwrapType(prop.Type), false) { initializer = " = null!;" } @@ -1509,10 +1963,11 @@ func (mod *modContext) genUtilities() (string, error) { // Strip any 'v' off of the version. w := &bytes.Buffer{} err := csharpUtilitiesTemplate.Execute(w, csharpUtilitiesTemplateContext{ - Name: namespaceName(mod.namespaces, mod.pkg.Name), - Namespace: mod.namespaceName, - ClassName: "Utilities", - Tool: mod.tool, + Name: namespaceName(mod.namespaces, mod.pkg.Name), + Namespace: mod.namespaceName, + ClassName: "Utilities", + Tool: mod.tool, + PluginDownloadURL: mod.pkg.PluginDownloadURL, }) if err != nil { return "", err @@ -1578,8 +2033,13 @@ func (mod *modContext) gen(fs fs) error { // Resources for _, r := range mod.resources { + if r.IsOverlay { + // This resource code is generated by the provider, so no further action is required. + continue + } + imports := map[string]codegen.StringSet{} - mod.getImports(r, imports) + mod.getImportsForResource(r, imports, r) buffer := &bytes.Buffer{} var additionalImports []string @@ -1587,7 +2047,7 @@ func (mod *modContext) gen(fs fs) error { additionalImports = append(additionalImports, i.SortedValues()...) } sort.Strings(additionalImports) - importStrings := pulumiImports + importStrings := mod.pulumiImports() importStrings = append(importStrings, additionalImports...) mod.genHeader(buffer, importStrings) @@ -1600,65 +2060,63 @@ func (mod *modContext) gen(fs fs) error { // Functions for _, f := range mod.functions { - imports := map[string]codegen.StringSet{} - mod.getImports(f, imports) - - buffer := &bytes.Buffer{} - importStrings := pulumiImports - for _, i := range imports { - importStrings = append(importStrings, i.SortedValues()...) + if f.IsOverlay { + // This function code is generated by the provider, so no further action is required. + continue } - mod.genHeader(buffer, importStrings) - if err := mod.genFunction(buffer, f); err != nil { + code, err := mod.genFunctionFileCode(f) + if err != nil { return err } - - addFile(tokenToName(f.Token)+".cs", buffer.String()) + addFile(tokenToName(f.Token)+".cs", code) } // Nested types for _, t := range mod.types { + if t.IsOverlay { + // This type is generated by the provider, so no further action is required. + continue + } + if mod.details(t).inputType { buffer := &bytes.Buffer{} - mod.genHeader(buffer, pulumiImports) + mod.genHeader(buffer, mod.pulumiImports()) fmt.Fprintf(buffer, "namespace %s\n", mod.tokenToNamespace(t.Token, "Inputs")) fmt.Fprintf(buffer, "{\n") - if mod.details(t).argsType { - if err := mod.genType(buffer, t, "Inputs", true, false, true, 1); err != nil { - return err - } - } - if mod.details(t).plainType { - if err := mod.genType(buffer, t, "Inputs", true, false, false, 1); err != nil { - return err - } + + if err := mod.genType(buffer, t, "Inputs", true, false, 1); err != nil { + return err } + fmt.Fprintf(buffer, "}\n") - addFile(path.Join("Inputs", tokenToName(t.Token)+"Args.cs"), buffer.String()) + name := tokenToName(t.Token) + if t.IsInputShape() { + name += "Args" + } + addFile(path.Join("Inputs", name+".cs"), buffer.String()) } if mod.details(t).stateType { buffer := &bytes.Buffer{} - mod.genHeader(buffer, pulumiImports) + mod.genHeader(buffer, mod.pulumiImports()) fmt.Fprintf(buffer, "namespace %s\n", mod.tokenToNamespace(t.Token, "Inputs")) fmt.Fprintf(buffer, "{\n") - if err := mod.genType(buffer, t, "Inputs", true, true, true, 1); err != nil { + if err := mod.genType(buffer, t, "Inputs", true, true, 1); err != nil { return err } fmt.Fprintf(buffer, "}\n") - addFile(path.Join("Inputs", tokenToName(t.Token)+"GetArgs.cs"), buffer.String()) } if mod.details(t).outputType { buffer := &bytes.Buffer{} - mod.genHeader(buffer, pulumiImports) + mod.genHeader(buffer, mod.pulumiImports()) fmt.Fprintf(buffer, "namespace %s\n", mod.tokenToNamespace(t.Token, "Outputs")) fmt.Fprintf(buffer, "{\n") - if err := mod.genType(buffer, t, "Outputs", false, false, false, 1); err != nil { + if err := mod.genType(buffer, t, "Outputs", false, false, 1); err != nil { return err } fmt.Fprintf(buffer, "}\n") @@ -1667,7 +2125,6 @@ func (mod *modContext) gen(fs fs) error { if (mod.isTFCompatMode() || mod.isK8sCompatMode()) && mod.details(t).plainType { suffix = "Result" } - addFile(path.Join("Outputs", tokenToName(t.Token)+suffix+".cs"), buffer.String()) } } @@ -1687,8 +2144,13 @@ func (mod *modContext) gen(fs fs) error { } // genPackageMetadata generates all the non-code metadata required by a Pulumi package. -func genPackageMetadata(pkg *schema.Package, assemblyName string, packageReferences map[string]string, files fs) error { - projectFile, err := genProjectFile(pkg, assemblyName, packageReferences) +func genPackageMetadata(pkg *schema.Package, + assemblyName string, + packageReferences map[string]string, + projectReferences []string, + files fs) error { + + projectFile, err := genProjectFile(pkg, assemblyName, packageReferences, projectReferences) if err != nil { return err } @@ -1697,18 +2159,41 @@ func genPackageMetadata(pkg *schema.Package, assemblyName string, packageReferen return err } + pulumiPlugin := &plugin.PulumiPluginJSON{ + Resource: true, + Name: pkg.Name, + Server: pkg.PluginDownloadURL, + } + + lang, ok := pkg.Language["csharp"].(CSharpPackageInfo) + if pkg.Version != nil && ok && lang.RespectSchemaVersion { + files.add("version.txt", []byte(pkg.Version.String())) + pulumiPlugin.Version = pkg.Version.String() + } + + plugin, err := (pulumiPlugin).JSON() + if err != nil { + return err + } + files.add(assemblyName+".csproj", projectFile) files.add("logo.png", logo) + files.add("pulumi-plugin.json", plugin) return nil } // genProjectFile emits a C# project file into the configured output directory. -func genProjectFile(pkg *schema.Package, assemblyName string, packageReferences map[string]string) ([]byte, error) { +func genProjectFile(pkg *schema.Package, + assemblyName string, + packageReferences map[string]string, + projectReferences []string) ([]byte, error) { + w := &bytes.Buffer{} err := csharpProjectFileTemplate.Execute(w, csharpProjectFileTemplateContext{ XMLDoc: fmt.Sprintf(`.\%s.xml`, assemblyName), Package: pkg, PackageReferences: packageReferences, + ProjectReferences: projectReferences, }) if err != nil { return nil, err @@ -1772,6 +2257,11 @@ func generateModuleContextMap(tool string, pkg *schema.Package) (map[string]*mod computePropertyNames(pkg.Config, propertyNames) computePropertyNames(pkg.Provider.InputProperties, propertyNames) for _, r := range pkg.Resources { + if r.IsOverlay { + // This resource code is generated by the provider, so no further action is required. + continue + } + computePropertyNames(r.Properties, propertyNames) computePropertyNames(r.InputProperties, propertyNames) if r.StateInputs != nil { @@ -1779,6 +2269,11 @@ func generateModuleContextMap(tool string, pkg *schema.Package) (map[string]*mod } } for _, f := range pkg.Functions { + if f.IsOverlay { + // This function code is generated by the provider, so no further action is required. + continue + } + if f.Inputs != nil { computePropertyNames(f.Inputs.Properties, propertyNames) } @@ -1801,20 +2296,22 @@ func generateModuleContextMap(tool string, pkg *schema.Package) (map[string]*mod mod, ok := modules[modName] if !ok { info := getPackageInfo(p) - ns := "Pulumi." + namespaceName(info.Namespaces, pkg.Name) + ns := info.GetRootNamespace() + "." + namespaceName(info.Namespaces, pkg.Name) if modName != "" { ns += "." + namespaceName(info.Namespaces, modName) } mod = &modContext{ - pkg: p, - mod: modName, - tool: tool, - namespaceName: ns, - namespaces: info.Namespaces, - typeDetails: details, - propertyNames: propertyNames, - compatibility: info.Compatibility, - dictionaryConstructors: info.DictionaryConstructors, + pkg: p, + mod: modName, + tool: tool, + namespaceName: ns, + namespaces: info.Namespaces, + rootNamespace: info.GetRootNamespace(), + typeDetails: details, + propertyNames: propertyNames, + compatibility: info.Compatibility, + dictionaryConstructors: info.DictionaryConstructors, + liftSingleValueMethodReturns: info.LiftSingleValueMethodReturns, } if modName != "" { @@ -1842,10 +2339,10 @@ func generateModuleContextMap(tool string, pkg *schema.Package) (map[string]*mod // Create the config module if necessary. if len(pkg.Config) > 0 { cfg := getMod("config", pkg) - cfg.namespaceName = "Pulumi." + namespaceName(infos[pkg].Namespaces, pkg.Name) + cfg.namespaceName = fmt.Sprintf("%s.%s", cfg.RootNamespace(), namespaceName(infos[pkg].Namespaces, pkg.Name)) } - visitObjectTypes(pkg.Config, func(t *schema.ObjectType, _ bool) { + visitObjectTypes(pkg.Config, func(t *schema.ObjectType) { getModFromToken(t.Token, pkg).details(t).outputType = true }) @@ -1853,22 +2350,14 @@ func generateModuleContextMap(tool string, pkg *schema.Package) (map[string]*mod scanResource := func(r *schema.Resource) { mod := getModFromToken(r.Token, pkg) mod.resources = append(mod.resources, r) - visitObjectTypes(r.Properties, func(t *schema.ObjectType, _ bool) { + visitObjectTypes(r.Properties, func(t *schema.ObjectType) { getModFromToken(t.Token, t.Package).details(t).outputType = true }) - visitObjectTypes(r.InputProperties, func(t *schema.ObjectType, plain bool) { - if r.IsProvider { - getModFromToken(t.Token, t.Package).details(t).outputType = true - } + visitObjectTypes(r.InputProperties, func(t *schema.ObjectType) { getModFromToken(t.Token, t.Package).details(t).inputType = true - if plain { - getModFromToken(t.Token, t.Package).details(t).plainType = true - } else { - getModFromToken(t.Token, t.Package).details(t).argsType = true - } }) if r.StateInputs != nil { - visitObjectTypes(r.StateInputs.Properties, func(t *schema.ObjectType, _ bool) { + visitObjectTypes(r.StateInputs.Properties, func(t *schema.ObjectType) { getModFromToken(t.Token, t.Package).details(t).inputType = true getModFromToken(t.Token, t.Package).details(t).stateType = true }) @@ -1882,17 +2371,31 @@ func generateModuleContextMap(tool string, pkg *schema.Package) (map[string]*mod // Find input and output types referenced by functions. for _, f := range pkg.Functions { + if f.IsOverlay { + // This function code is generated by the provider, so no further action is required. + continue + } + mod := getModFromToken(f.Token, pkg) - mod.functions = append(mod.functions, f) + if !f.IsMethod { + mod.functions = append(mod.functions, f) + } if f.Inputs != nil { - visitObjectTypes(f.Inputs.Properties, func(t *schema.ObjectType, _ bool) { + visitObjectTypes(f.Inputs.Properties, func(t *schema.ObjectType) { details := getModFromToken(t.Token, t.Package).details(t) details.inputType = true details.plainType = true }) + if f.NeedsOutputVersion() { + visitObjectTypes(f.Inputs.InputShape.Properties, func(t *schema.ObjectType) { + details := getModFromToken(t.Token, t.Package).details(t) + details.inputType = true + details.usedInFunctionOutputVersionInputs = true + }) + } } if f.Outputs != nil { - visitObjectTypes(f.Outputs.Properties, func(t *schema.ObjectType, _ bool) { + visitObjectTypes(f.Outputs.Properties, func(t *schema.ObjectType) { details := getModFromToken(t.Token, t.Package).details(t) details.outputType = true details.plainType = true @@ -1907,8 +2410,10 @@ func generateModuleContextMap(tool string, pkg *schema.Package) (map[string]*mod mod := getModFromToken(typ.Token, pkg) mod.types = append(mod.types, typ) case *schema.EnumType: - mod := getModFromToken(typ.Token, pkg) - mod.enums = append(mod.enums, typ) + if !typ.IsOverlay { + mod := getModFromToken(typ.Token, pkg) + mod.enums = append(mod.enums, typ) + } default: continue } @@ -1931,6 +2436,11 @@ func LanguageResources(tool string, pkg *schema.Package) (map[string]LanguageRes continue } for _, r := range mod.resources { + if r.IsOverlay { + // This resource code is generated by the provider, so no further action is required. + continue + } + lr := LanguageResource{ Resource: r, Package: namespaceName(info.Namespaces, modName), @@ -1949,7 +2459,7 @@ func GeneratePackage(tool string, pkg *schema.Package, extraFiles map[string][]b return nil, err } - assemblyName := "Pulumi." + namespaceName(info.Namespaces, pkg.Name) + assemblyName := info.GetRootNamespace() + "." + namespaceName(info.Namespaces, pkg.Name) // Generate each module. files := fs{} @@ -1964,7 +2474,12 @@ func GeneratePackage(tool string, pkg *schema.Package, extraFiles map[string][]b } // Finally emit the package metadata. - if err := genPackageMetadata(pkg, assemblyName, info.PackageReferences, files); err != nil { + if err := genPackageMetadata(pkg, + assemblyName, + info.PackageReferences, + info.ProjectReferences, + files); err != nil { + return nil, err } return files, nil diff --git a/vendor/github.com/pulumi/pulumi/pkg/v3/codegen/dotnet/gen_program.go b/vendor/github.com/pulumi/pulumi/pkg/v3/codegen/dotnet/gen_program.go index 3d5cef1..0680514 100644 --- a/vendor/github.com/pulumi/pulumi/pkg/v3/codegen/dotnet/gen_program.go +++ b/vendor/github.com/pulumi/pulumi/pkg/v3/codegen/dotnet/gen_program.go @@ -1,4 +1,4 @@ -// Copyright 2016-2020, Pulumi Corporation. +// Copyright 2016-2021, Pulumi Corporation. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -22,10 +22,10 @@ import ( "github.com/hashicorp/hcl/v2" "github.com/pulumi/pulumi/pkg/v3/codegen" - "github.com/pulumi/pulumi/pkg/v3/codegen/hcl2" "github.com/pulumi/pulumi/pkg/v3/codegen/hcl2/model" "github.com/pulumi/pulumi/pkg/v3/codegen/hcl2/model/format" "github.com/pulumi/pulumi/pkg/v3/codegen/hcl2/syntax" + "github.com/pulumi/pulumi/pkg/v3/codegen/pcl" "github.com/pulumi/pulumi/pkg/v3/codegen/schema" "github.com/pulumi/pulumi/sdk/v3/go/common/util/contract" ) @@ -33,7 +33,7 @@ import ( type generator struct { // The formatter to use when generating code. *format.Formatter - program *hcl2.Program + program *pcl.Program // C# namespace map per package. namespaces map[string]map[string]string // C# codegen compatibility mode per package. @@ -46,13 +46,16 @@ type generator struct { asyncInit bool configCreated bool diagnostics hcl.Diagnostics + // Helper map to emit custom type name suffixes that match + // those emitted by codegen. + usedInFunctionOutputVersionInputs map[schema.Type]bool } const pulumiPackage = "pulumi" -func GenerateProgram(program *hcl2.Program) (map[string][]byte, hcl.Diagnostics, error) { +func GenerateProgram(program *pcl.Program) (map[string][]byte, hcl.Diagnostics, error) { // Linearize the nodes into an order appropriate for procedural code generation. - nodes := hcl2.Linearize(program) + nodes := pcl.Linearize(program) // Import C#-specific schema info. namespaces := make(map[string]map[string]string) @@ -64,7 +67,10 @@ func GenerateProgram(program *hcl2.Program) (map[string][]byte, hcl.Diagnostics, return make(map[string][]byte), nil, err } - csharpInfo := p.Language["csharp"].(CSharpPackageInfo) + csharpInfo, hasInfo := p.Language["csharp"].(CSharpPackageInfo) + if !hasInfo { + csharpInfo = CSharpPackageInfo{} + } packageNamespaces := csharpInfo.Namespaces namespaces[p.Name] = packageNamespaces compatibilities[p.Name] = csharpInfo.Compatibility @@ -87,7 +93,7 @@ func GenerateProgram(program *hcl2.Program) (map[string][]byte, hcl.Diagnostics, g.Formatter = format.NewFormatter(g) for _, n := range nodes { - if r, ok := n.(*hcl2.Resource); ok && requiresAsyncInit(r) { + if r, ok := n.(*pcl.Resource); ok && requiresAsyncInit(r) { g.asyncInit = true break } @@ -138,17 +144,24 @@ func (g *generator) genComment(w io.Writer, comment syntax.Comment) { } // genPreamble generates using statements, class definition and constructor. -func (g *generator) genPreamble(w io.Writer, program *hcl2.Program) { +func (g *generator) genPreamble(w io.Writer, program *pcl.Program) { // Accumulate other using statements for the various providers and packages. Don't emit them yet, as we need // to sort them later on. systemUsings := codegen.NewStringSet() pulumiUsings := codegen.NewStringSet() + preambleHelperMethods := codegen.NewStringSet() for _, n := range program.Nodes { - if r, isResource := n.(*hcl2.Resource); isResource { + if r, isResource := n.(*pcl.Resource); isResource { pkg, _, _, _ := r.DecomposeToken() if pkg != pulumiPackage { namespace := namespaceName(g.namespaces[pkg], pkg) - pulumiUsings.Add(fmt.Sprintf("%s = Pulumi.%[1]s", namespace)) + var info CSharpPackageInfo + if r.Schema != nil && r.Schema.Package != nil { + if csharpinfo, ok := r.Schema.Package.Language["csharp"].(CSharpPackageInfo); ok { + info = csharpinfo + } + } + pulumiUsings.Add(fmt.Sprintf("%s = %[2]s.%[1]s", namespace, info.GetRootNamespace())) } if r.Options != nil && r.Options.Range != nil { systemUsings.Add("System.Collections.Generic") @@ -163,6 +176,11 @@ func (g *generator) genPreamble(w io.Writer, program *hcl2.Program) { pulumiUsings.Add(i) } } + + // Checking to see if this function call deserves its own dedicated helper method in the preamble + if helperMethodBody, ok := getHelperMethodIfNeeded(call.Name); ok { + preambleHelperMethods.Add(helperMethodBody) + } } if _, ok := n.(*model.SplatExpression); ok { systemUsings.Add("System.Linq") @@ -189,18 +207,24 @@ func (g *generator) genPreamble(w io.Writer, program *hcl2.Program) { // Emit Stack class signature g.Fprint(w, "class MyStack : Stack\n") g.Fprint(w, "{\n") + + // If we collected any helper methods that should be added, write them just before the main func + for _, preambleHelperMethodBody := range preambleHelperMethods.SortedValues() { + g.Fprintf(w, "\t%s\n\n", preambleHelperMethodBody) + } + g.Fprint(w, " public MyStack()\n") g.Fprint(w, " {\n") } // genInitialize generates the declaration and the call to the async Initialize method, and also fills stack // outputs from the initialization result. -func (g *generator) genInitialize(w io.Writer, nodes []hcl2.Node) { +func (g *generator) genInitialize(w io.Writer, nodes []pcl.Node) { g.Indented(func() { g.Fgenf(w, "%svar dict = Output.Create(Initialize());\n", g.Indent) for _, n := range nodes { switch n := n.(type) { - case *hcl2.OutputVariable: + case *pcl.OutputVariable: g.Fprintf(w, "%sthis.%s = dict.Apply(dict => dict[\"%s\"]);\n", g.Indent, propertyName(n.Name()), makeValidIdentifier(n.Name())) } @@ -212,7 +236,7 @@ func (g *generator) genInitialize(w io.Writer, nodes []hcl2.Node) { } // genPostamble closes the method and the class and declares stack output statements. -func (g *generator) genPostamble(w io.Writer, nodes []hcl2.Node) { +func (g *generator) genPostamble(w io.Writer, nodes []pcl.Node) { g.Indented(func() { // Return outputs from Initialize if needed if g.asyncInit { @@ -222,7 +246,7 @@ func (g *generator) genPostamble(w io.Writer, nodes []hcl2.Node) { g.Indented(func() { for _, n := range nodes { switch n := n.(type) { - case *hcl2.OutputVariable: + case *pcl.OutputVariable: g.Fgenf(w, "%s{ \"%s\", %[2]s },\n", g.Indent, n.Name()) } } @@ -237,7 +261,7 @@ func (g *generator) genPostamble(w io.Writer, nodes []hcl2.Node) { // Emit stack output properties for _, n := range nodes { switch n := n.(type) { - case *hcl2.OutputVariable: + case *pcl.OutputVariable: g.genOutputProperty(w, n) } } @@ -245,22 +269,22 @@ func (g *generator) genPostamble(w io.Writer, nodes []hcl2.Node) { g.Fprint(w, "}\n") } -func (g *generator) genNode(w io.Writer, n hcl2.Node) { +func (g *generator) genNode(w io.Writer, n pcl.Node) { switch n := n.(type) { - case *hcl2.Resource: + case *pcl.Resource: g.genResource(w, n) - case *hcl2.ConfigVariable: + case *pcl.ConfigVariable: g.genConfigVariable(w, n) - case *hcl2.LocalVariable: + case *pcl.LocalVariable: g.genLocalVariable(w, n) - case *hcl2.OutputVariable: + case *pcl.OutputVariable: g.genOutputAssignment(w, n) } } // requiresAsyncInit returns true if the program requires awaits in the code, and therefore an asynchronous // method must be declared. -func requiresAsyncInit(r *hcl2.Resource) bool { +func requiresAsyncInit(r *pcl.Resource) bool { if r.Options == nil || r.Options.Range == nil { return false } @@ -269,7 +293,7 @@ func requiresAsyncInit(r *hcl2.Resource) bool { } // resourceTypeName computes the C# class name for the given resource. -func (g *generator) resourceTypeName(r *hcl2.Resource) string { +func (g *generator) resourceTypeName(r *pcl.Resource) string { // Compute the resource type from the Pulumi type token. pkg, module, member, diags := r.DecomposeToken() contract.Assert(len(diags) == 0) @@ -290,7 +314,7 @@ func (g *generator) resourceTypeName(r *hcl2.Resource) string { } // resourceArgsTypeName computes the C# arguments class name for the given resource. -func (g *generator) resourceArgsTypeName(r *hcl2.Resource) string { +func (g *generator) resourceArgsTypeName(r *pcl.Resource) string { // Compute the resource type from the Pulumi type token. pkg, module, member, diags := r.DecomposeToken() contract.Assert(len(diags) == 0) @@ -318,7 +342,7 @@ func (g *generator) functionName(tokenArg model.Expression) (string, string) { tokenRange := tokenArg.SyntaxNode().Range() // Compute the resource type from the Pulumi type token. - pkg, module, member, diags := hcl2.DecomposeToken(token, tokenRange) + pkg, module, member, diags := pcl.DecomposeToken(token, tokenRange) contract.Assert(len(diags) == 0) namespaces := g.namespaces[pkg] rootNamespace := namespaceName(namespaces, pkg) @@ -331,9 +355,29 @@ func (g *generator) functionName(tokenArg model.Expression) (string, string) { return rootNamespace, fmt.Sprintf("%s%s.%s", rootNamespace, namespace, Title(member)) } +func (g *generator) toSchemaType(destType model.Type) (schema.Type, bool) { + schemaType, ok := pcl.GetSchemaForType(destType.(model.Type)) + if !ok { + return nil, false + } + return codegen.UnwrapType(schemaType), true +} + // argumentTypeName computes the C# argument class name for the given expression and model type. func (g *generator) argumentTypeName(expr model.Expression, destType model.Type) string { - schemaType, ok := hcl2.GetSchemaForType(destType.(model.Type)) + schemaType, ok := g.toSchemaType(destType) + if !ok { + return "" + } + suffix := "Args" + if g.usedInFunctionOutputVersionInputs[schemaType] { + suffix = "InputArgs" + } + return g.argumentTypeNameWithSuffix(expr, destType, suffix) +} + +func (g *generator) argumentTypeNameWithSuffix(expr model.Expression, destType model.Type, suffix string) string { + schemaType, ok := g.toSchemaType(destType) if !ok { return "" } @@ -351,7 +395,7 @@ func (g *generator) argumentTypeName(expr model.Expression, destType model.Type) qualifier = "" } - pkg, _, member, diags := hcl2.DecomposeToken(token, tokenRange) + pkg, _, member, diags := pcl.DecomposeToken(token, tokenRange) contract.Assert(len(diags) == 0) module := g.tokenToModules[pkg](token) namespaces := g.namespaces[pkg] @@ -368,7 +412,7 @@ func (g *generator) argumentTypeName(expr model.Expression, destType model.Type) } else if qualifier != "" { namespace = namespace + "." + qualifier } - member = member + "Args" + member = member + suffix return fmt.Sprintf("%s%s.%s", rootNamespace, namespace, Title(member)) } @@ -382,7 +426,7 @@ func (g *generator) makeResourceName(baseName, count string) string { return fmt.Sprintf("$\"%s-{%s}\"", baseName, count) } -func (g *generator) genResourceOptions(opts *hcl2.ResourceOptions) string { +func (g *generator) genResourceOptions(opts *pcl.ResourceOptions) string { if opts == nil { return "" } @@ -424,7 +468,7 @@ func (g *generator) genResourceOptions(opts *hcl2.ResourceOptions) string { } // genResource handles the generation of instantiations of non-builtin resources. -func (g *generator) genResource(w io.Writer, r *hcl2.Resource) { +func (g *generator) genResource(w io.Writer, r *pcl.Resource) { qualifiedMemberName := g.resourceTypeName(r) argsName := g.resourceArgsTypeName(r) @@ -493,7 +537,7 @@ func (g *generator) genResource(w io.Writer, r *hcl2.Resource) { g.genTrivia(w, r.Definition.Tokens.GetCloseBrace()) } -func (g *generator) genConfigVariable(w io.Writer, v *hcl2.ConfigVariable) { +func (g *generator) genConfigVariable(w io.Writer, v *pcl.ConfigVariable) { if !g.configCreated { g.Fprintf(w, "%svar config = new Config();\n", g.Indent) g.configCreated = true @@ -529,13 +573,13 @@ func (g *generator) genConfigVariable(w io.Writer, v *hcl2.ConfigVariable) { g.Fgenf(w, ";\n") } -func (g *generator) genLocalVariable(w io.Writer, v *hcl2.LocalVariable) { +func (g *generator) genLocalVariable(w io.Writer, v *pcl.LocalVariable) { // TODO(pdg): trivia expr := g.lowerExpression(v.Definition.Value, v.Type()) g.Fgenf(w, "%svar %s = %.3v;\n", g.Indent, makeValidIdentifier(v.Name()), expr) } -func (g *generator) genOutputAssignment(w io.Writer, v *hcl2.OutputVariable) { +func (g *generator) genOutputAssignment(w io.Writer, v *pcl.OutputVariable) { if g.asyncInit { g.Fgenf(w, "%svar %s", g.Indent, makeValidIdentifier(v.Name())) } else { @@ -544,7 +588,7 @@ func (g *generator) genOutputAssignment(w io.Writer, v *hcl2.OutputVariable) { g.Fgenf(w, " = %.3v;\n", g.lowerExpression(v.Value, v.Type())) } -func (g *generator) genOutputProperty(w io.Writer, v *hcl2.OutputVariable) { +func (g *generator) genOutputProperty(w io.Writer, v *pcl.OutputVariable) { // TODO(pdg): trivia g.Fgenf(w, "%s[Output(\"%s\")]\n", g.Indent, v.Name()) // TODO(msh): derive the element type of the Output from the type of its value. diff --git a/vendor/github.com/pulumi/pulumi/pkg/v3/codegen/dotnet/gen_program_expressions.go b/vendor/github.com/pulumi/pulumi/pkg/v3/codegen/dotnet/gen_program_expressions.go index d2fd971..ca90325 100644 --- a/vendor/github.com/pulumi/pulumi/pkg/v3/codegen/dotnet/gen_program_expressions.go +++ b/vendor/github.com/pulumi/pulumi/pkg/v3/codegen/dotnet/gen_program_expressions.go @@ -15,7 +15,6 @@ package dotnet import ( - "bytes" "fmt" "io" "math/big" @@ -23,8 +22,8 @@ import ( "github.com/hashicorp/hcl/v2" "github.com/hashicorp/hcl/v2/hclsyntax" - "github.com/pulumi/pulumi/pkg/v3/codegen/hcl2" "github.com/pulumi/pulumi/pkg/v3/codegen/hcl2/model" + "github.com/pulumi/pulumi/pkg/v3/codegen/pcl" "github.com/pulumi/pulumi/pkg/v3/codegen/schema" "github.com/pulumi/pulumi/sdk/v3/go/common/util/contract" "github.com/zclconf/go-cty/cty" @@ -38,10 +37,10 @@ func (nameInfo) Format(name string) string { // lowerExpression amends the expression with intrinsics for C# generation. func (g *generator) lowerExpression(expr model.Expression, typ model.Type) model.Expression { - expr = hcl2.RewritePropertyReferences(expr) - expr, diags := hcl2.RewriteApplies(expr, nameInfo(0), !g.asyncInit) + expr = pcl.RewritePropertyReferences(expr) + expr, diags := pcl.RewriteApplies(expr, nameInfo(0), !g.asyncInit) contract.Assert(len(diags) == 0) - expr = hcl2.RewriteConversions(expr, typ) + expr = pcl.RewriteConversions(expr, typ) if g.asyncInit { expr = g.awaitInvokes(expr) } else { @@ -60,7 +59,7 @@ func (g *generator) outputInvokes(x model.Expression) model.Expression { rewriter := func(x model.Expression) (model.Expression, hcl.Diagnostics) { // Ignore the node if it is not a call to invoke. call, ok := x.(*model.FunctionCallExpression) - if !ok || call.Name != hcl2.Invoke { + if !ok || call.Name != pcl.Invoke { return x, nil } @@ -90,7 +89,7 @@ func (g *generator) awaitInvokes(x model.Expression) model.Expression { rewriter := func(x model.Expression) (model.Expression, hcl.Diagnostics) { // Ignore the node if it is not a call to invoke. call, ok := x.(*model.FunctionCallExpression) - if !ok || call.Name != hcl2.Invoke { + if !ok || call.Name != pcl.Invoke { return x, nil } @@ -215,7 +214,7 @@ func (g *generator) GenForExpression(w io.Writer, expr *model.ForExpression) { func (g *generator) genApply(w io.Writer, expr *model.FunctionCallExpression) { // Extract the list of outputs and the continuation expression from the `__apply` arguments. - applyArgs, then := hcl2.ParseApplyCall(expr) + applyArgs, then := pcl.ParseApplyCall(expr) if len(applyArgs) == 1 { // If we only have a single output, just generate a normal `.Apply` @@ -239,13 +238,17 @@ func (g *generator) genRange(w io.Writer, call *model.FunctionCallExpression, en } var functionNamespaces = map[string][]string{ - "readDir": {"System.IO", "System.Linq"}, - "readFile": {"System.IO"}, - "toJSON": {"System.Text.Json", "System.Collections.Generic"}, + "readDir": {"System.IO", "System.Linq"}, + "readFile": {"System.IO"}, + "filebase64": {"System", "System.IO"}, + "filebase64sha256": {"System", "System.IO", "System.Security.Cryptography", "System.Text"}, + "toJSON": {"System.Text.Json", "System.Collections.Generic"}, + "toBase64": {"System"}, + "sha1": {"System.Security.Cryptography", "System.Text"}, } func (g *generator) genFunctionUsings(x *model.FunctionCallExpression) []string { - if x.Name != hcl2.Invoke { + if x.Name != pcl.Invoke { return functionNamespaces[x.Name] } @@ -253,16 +256,38 @@ func (g *generator) genFunctionUsings(x *model.FunctionCallExpression) []string return []string{fmt.Sprintf("%s = Pulumi.%[1]s", pkg)} } +func (g *generator) markTypeAsUsedInFunctionOutputVersionInputs(t model.Type) { + if g.usedInFunctionOutputVersionInputs == nil { + g.usedInFunctionOutputVersionInputs = make(map[schema.Type]bool) + } + schemaType, ok := g.toSchemaType(t) + if !ok { + return + } + g.usedInFunctionOutputVersionInputs[schemaType] = true +} + +func (g *generator) visitToMarkTypesUsedInFunctionOutputVersionInputs(expr model.Expression) { + visitor := func(expr model.Expression) (model.Expression, hcl.Diagnostics) { + isCons, _, t := pcl.RecognizeTypedObjectCons(expr) + if isCons { + g.markTypeAsUsedInFunctionOutputVersionInputs(t) + } + return expr, nil + } + model.VisitExpression(expr, nil, visitor) // nolint:errcheck +} + func (g *generator) GenFunctionCallExpression(w io.Writer, expr *model.FunctionCallExpression) { switch expr.Name { - case hcl2.IntrinsicConvert: + case pcl.IntrinsicConvert: switch arg := expr.Args[0].(type) { case *model.ObjectConsExpression: g.genObjectConsExpression(w, arg, expr.Type()) default: g.Fgenf(w, "%.v", expr.Args[0]) // <- probably wrong w.r.t. precedence } - case hcl2.IntrinsicApply: + case pcl.IntrinsicApply: g.genApply(w, expr) case intrinsicAwait: g.Fgenf(w, "await %.17v", expr.Args[0]) @@ -286,17 +311,34 @@ func (g *generator) GenFunctionCallExpression(w io.Writer, expr *model.FunctionC g.Fgenf(w, "new FileArchive(%.v)", expr.Args[0]) case "fileAsset": g.Fgenf(w, "new FileAsset(%.v)", expr.Args[0]) - case hcl2.Invoke: + case "filebase64": + // Assuming the existence of the following helper method located earlier in the preamble + g.Fgenf(w, "ReadFileBase64(%v)", expr.Args[0]) + case "filebase64sha256": + // Assuming the existence of the following helper method located earlier in the preamble + g.Fgenf(w, "ComputeFileBase64Sha256(%v)", expr.Args[0]) + case pcl.Invoke: _, name := g.functionName(expr.Args[0]) - optionsBag := "" - if len(expr.Args) == 3 { - var buf bytes.Buffer - g.Fgenf(&buf, ", %.v", expr.Args[2]) - optionsBag = buf.String() + isOut, outArgs, outArgsTy := pcl.RecognizeOutputVersionedInvoke(expr) + if isOut { + g.visitToMarkTypesUsedInFunctionOutputVersionInputs(outArgs) + g.Fprintf(w, "%s.Invoke(", name) + typeName := g.argumentTypeNameWithSuffix(expr, outArgsTy, "InvokeArgs") + g.genObjectConsExpressionWithTypeName(w, outArgs, typeName) + } else { + g.Fprintf(w, "%s.InvokeAsync(", name) + if len(expr.Args) >= 2 { + g.Fgenf(w, "%.v", expr.Args[1]) + } } - g.Fgenf(w, "%s.InvokeAsync(%.v%v)", name, expr.Args[1], optionsBag) + if len(expr.Args) == 3 { + g.Fgenf(w, ", %.v", expr.Args[2]) + } + g.Fprint(w, ")") + case "join": + g.Fgenf(w, "string.Join(%v, %v)", expr.Args[0], expr.Args[1]) case "length": g.Fgenf(w, "%.20v.Length", expr.Args[0]) case "lookup": @@ -314,10 +356,15 @@ func (g *generator) GenFunctionCallExpression(w io.Writer, expr *model.FunctionC g.Fgenf(w, "Output.CreateSecret(%v)", expr.Args[0]) case "split": g.Fgenf(w, "%.20v.Split(%v)", expr.Args[1], expr.Args[0]) + case "toBase64": + g.Fgenf(w, "Convert.ToBase64String(System.Text.UTF8.GetBytes(%v))", expr.Args[0]) case "toJSON": g.Fgen(w, "JsonSerializer.Serialize(") g.genDictionary(w, expr.Args[0]) g.Fgen(w, ")") + case "sha1": + // Assuming the existence of the following helper method located earlier in the preamble + g.Fgenf(w, "ComputeSHA1(%v)", expr.Args[0]) default: g.genNYI(w, "call %v", expr.Name) } @@ -399,7 +446,12 @@ func (g *generator) genStringLiteral(w io.Writer, v string) { } func (g *generator) GenLiteralValueExpression(w io.Writer, expr *model.LiteralValueExpression) { - switch expr.Type() { + typ := expr.Type() + if cns, ok := typ.(*model.ConstType); ok { + typ = cns.Type + } + + switch typ { case model.BoolType: g.Fgenf(w, "%v", expr.Value.True()) case model.NoneType: @@ -429,7 +481,18 @@ func (g *generator) genObjectConsExpression(w io.Writer, expr *model.ObjectConsE return } - typeName := g.argumentTypeName(expr, destType) + destTypeName := g.argumentTypeName(expr, destType) + g.genObjectConsExpressionWithTypeName(w, expr, destTypeName) +} + +func (g *generator) genObjectConsExpressionWithTypeName( + w io.Writer, expr *model.ObjectConsExpression, destTypeName string) { + + if len(expr.Items) == 0 { + return + } + + typeName := destTypeName if typeName != "" { g.Fgenf(w, "new %s", typeName) g.Fgenf(w, "\n%s{\n", g.Indent) @@ -504,8 +567,8 @@ func (g *generator) GenScopeTraversalExpression(w io.Writer, expr *model.ScopeTr g.Fgen(w, rootName) var objType *schema.ObjectType - if resource, ok := expr.Parts[0].(*hcl2.Resource); ok { - if schemaType, ok := hcl2.GetSchemaForType(resource.InputType); ok { + if resource, ok := expr.Parts[0].(*pcl.Resource); ok { + if schemaType, ok := pcl.GetSchemaForType(resource.InputType); ok { objType, _ = schemaType.(*schema.ObjectType) } } @@ -520,7 +583,7 @@ func (g *generator) GenTemplateExpression(w io.Writer, expr *model.TemplateExpre multiLine := false expressions := false for _, expr := range expr.Parts { - if lit, ok := expr.(*model.LiteralValueExpression); ok && lit.Type() == model.StringType { + if lit, ok := expr.(*model.LiteralValueExpression); ok && model.StringType.AssignableFrom(lit.Type()) { if strings.Contains(lit.Value.AsString(), "\n") { multiLine = true } @@ -537,7 +600,7 @@ func (g *generator) GenTemplateExpression(w io.Writer, expr *model.TemplateExpre } g.Fgen(w, "\"") for _, expr := range expr.Parts { - if lit, ok := expr.(*model.LiteralValueExpression); ok && lit.Type() == model.StringType { + if lit, ok := expr.(*model.LiteralValueExpression); ok && model.StringType.AssignableFrom(lit.Type()) { g.Fgen(w, g.escapeString(lit.Value.AsString(), multiLine, expressions)) } else { g.Fgenf(w, "{%.v}", expr) diff --git a/vendor/github.com/pulumi/pulumi/pkg/v3/codegen/dotnet/importer.go b/vendor/github.com/pulumi/pulumi/pkg/v3/codegen/dotnet/importer.go index 0375ceb..bf1e3c8 100644 --- a/vendor/github.com/pulumi/pulumi/pkg/v3/codegen/dotnet/importer.go +++ b/vendor/github.com/pulumi/pulumi/pkg/v3/codegen/dotnet/importer.go @@ -31,6 +31,23 @@ type CSharpPackageInfo struct { Namespaces map[string]string `json:"namespaces,omitempty"` Compatibility string `json:"compatibility,omitempty"` DictionaryConstructors bool `json:"dictionaryConstructors,omitempty"` + ProjectReferences []string `json:"projectReferences,omitempty"` + // Determines whether to make single-return-value methods return an output object or the single value. + LiftSingleValueMethodReturns bool `json:"liftSingleValueMethodReturns,omitempty"` + + // The root namespace used for the package. This defaults to `Pulumi`. + RootNamespace string `json:"rootNamespace,omitempty"` + + // Allow the Pkg.Version field to filter down to emitted code. + RespectSchemaVersion bool `json:"respectSchemaVersion,omitempty"` +} + +// Returns the root namespace, or "Pulumi" if not provided. +func (info *CSharpPackageInfo) GetRootNamespace() string { + if r := info.RootNamespace; r != "" { + return r + } + return "Pulumi" } // Importer implements schema.Language for .NET. diff --git a/vendor/github.com/pulumi/pulumi/pkg/v3/codegen/dotnet/templates.go b/vendor/github.com/pulumi/pulumi/pkg/v3/codegen/dotnet/templates.go index da1b2fc..02ffb9e 100644 --- a/vendor/github.com/pulumi/pulumi/pkg/v3/codegen/dotnet/templates.go +++ b/vendor/github.com/pulumi/pulumi/pkg/v3/codegen/dotnet/templates.go @@ -1,4 +1,4 @@ -// Copyright 2016-2020, Pulumi Corporation. +// Copyright 2016-2021, Pulumi Corporation. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -71,18 +71,20 @@ namespace {{.Namespace}} public static double? GetEnvDouble(params string[] names) => double.TryParse(GetEnv(names), out double v) ? (double?)v : null; + [Obsolete("Please use WithDefaults instead")] public static InvokeOptions WithVersion(this InvokeOptions? options) { - if (options?.Version != null) - { - return options; - } - return new InvokeOptions - { - Parent = options?.Parent, - Provider = options?.Provider, - Version = Version, - }; + InvokeOptions dst = options ?? new InvokeOptions{}; + dst.Version = options?.Version ?? Version; + return dst; + } + + public static InvokeOptions WithDefaults(this InvokeOptions? src) + { + InvokeOptions dst = src ?? new InvokeOptions{}; + dst.Version = src?.Version ?? Version;{{if ne .PluginDownloadURL "" }} + dst.PluginDownloadURL = src?.PluginDownloadURL ?? "{{.PluginDownloadURL}}";{{end}} + return dst; } private readonly static string version; @@ -115,10 +117,11 @@ namespace {{.Namespace}} var csharpUtilitiesTemplate = template.Must(template.New("CSharpUtilities").Parse(csharpUtilitiesTemplateText)) type csharpUtilitiesTemplateContext struct { - Name string - Namespace string - ClassName string - Tool string + Name string + Namespace string + ClassName string + Tool string + PluginDownloadURL string } // TODO(pdg): parameterize package name @@ -126,8 +129,8 @@ const csharpProjectFileTemplateText = ` true - Pulumi Corp. - Pulumi Corp. + {{or .Package.Publisher "Pulumi Corp."}} + {{or .Package.Publisher "Pulumi Corp."}} {{.Package.Description}} {{.Package.License}} {{.Package.Homepage}} @@ -136,6 +139,7 @@ const csharpProjectFileTemplateText = ` netcoreapp3.1 enable + false @@ -162,12 +166,23 @@ const csharpProjectFileTemplateText = ` + + + + + {{- range $package, $version := .PackageReferences}} {{- end}} + + {{- range $projdir := .ProjectReferences}} + + {{- end}} + + True @@ -193,5 +208,6 @@ type csharpProjectFileTemplateContext struct { XMLDoc string Package *schema.Package PackageReferences map[string]string + ProjectReferences []string Version string } diff --git a/vendor/github.com/pulumi/pulumi/pkg/v3/codegen/dotnet/utilities.go b/vendor/github.com/pulumi/pulumi/pkg/v3/codegen/dotnet/utilities.go index 55fc4a6..af73539 100644 --- a/vendor/github.com/pulumi/pulumi/pkg/v3/codegen/dotnet/utilities.go +++ b/vendor/github.com/pulumi/pulumi/pkg/v3/codegen/dotnet/utilities.go @@ -15,12 +15,12 @@ package dotnet import ( - "github.com/pulumi/pulumi/pkg/v3/codegen" + "fmt" "regexp" "strings" "unicode" - "github.com/pkg/errors" + "github.com/pulumi/pulumi/pkg/v3/codegen" ) // isReservedWord returns true if s is a C# reserved word as per @@ -96,7 +96,7 @@ func makeSafeEnumName(name, typeName string) (string, error) { // If the name is one illegal character, return an error. if len(safeName) == 1 && !isLegalIdentifierStart(rune(safeName[0])) { - return "", errors.Errorf("enum name %s is not a valid identifier", safeName) + return "", fmt.Errorf("enum name %s is not a valid identifier", safeName) } // Capitalize and make a valid identifier. @@ -118,3 +118,29 @@ func makeSafeEnumName(name, typeName string) (string, error) { return safeName, nil } + +// Provides code for a method which will be placed in the program preamble if deemed +// necessary. Because many Terraform functions are complex, it is much prettier to +// encapsulate them as their own function in the preamble. +func getHelperMethodIfNeeded(functionName string) (string, bool) { + switch functionName { + case "filebase64": + return `private static string ReadFileBase64(string path) { + return Convert.ToBase64String(Encoding.UTF8.GetBytes(File.ReadAllText(path))) + }`, true + case "filebase64sha256": + return `private static string ComputeFileBase64Sha256(string path) { + var fileData = Encoding.UTF8.GetBytes(File.ReadAllText(path)); + var hashData = SHA256.Create().ComputeHash(fileData); + return Convert.ToBase64String(hashData); + }`, true + case "sha1": + return `private static string ComputeSHA1(string input) { + return BitConverter.ToString( + SHA1.Create().ComputeHash(Encoding.UTF8.GetBytes(input)) + ).Replace("-","").ToLowerInvariant()); + }`, true + default: + return "", false + } +} diff --git a/vendor/github.com/pulumi/pulumi/pkg/v3/codegen/go/doc.go b/vendor/github.com/pulumi/pulumi/pkg/v3/codegen/go/doc.go index 582e4b5..deb168b 100644 --- a/vendor/github.com/pulumi/pulumi/pkg/v3/codegen/go/doc.go +++ b/vendor/github.com/pulumi/pulumi/pkg/v3/codegen/go/doc.go @@ -28,6 +28,8 @@ import ( "github.com/pulumi/pulumi/pkg/v3/codegen/schema" ) +const pulumiSDKVersion = "v3" + // DocLanguageHelper is the Go-specific implementation of the DocLanguageHelper. type DocLanguageHelper struct { packages map[string]*pkgContext @@ -37,18 +39,21 @@ var _ codegen.DocLanguageHelper = DocLanguageHelper{} // GetDocLinkForPulumiType returns the doc link for a Pulumi type. func (d DocLanguageHelper) GetDocLinkForPulumiType(pkg *schema.Package, typeName string) string { - moduleVersion := "" - if pkg.Version != nil { - if pkg.Version.Major > 1 { - moduleVersion = fmt.Sprintf("v%d/", pkg.Version.Major) + version := pulumiSDKVersion + if info, ok := pkg.Language["go"].(GoPackageInfo); ok { + if info.PulumiSDKVersion == 1 { + return fmt.Sprintf("https://pkg.go.dev/github.com/pulumi/pulumi/sdk/go/pulumi?tab=doc#%s", typeName) + } + if info.PulumiSDKVersion != 0 { + version = fmt.Sprintf("v%d", info.PulumiSDKVersion) } } - return fmt.Sprintf("https://pkg.go.dev/github.com/pulumi/pulumi/sdk/%sgo/pulumi?tab=doc#%s", moduleVersion, typeName) + return fmt.Sprintf("https://pkg.go.dev/github.com/pulumi/pulumi/sdk/%s/go/pulumi?tab=doc#%s", version, typeName) } // GetDocLinkForResourceType returns the godoc URL for a type belonging to a resource provider. func (d DocLanguageHelper) GetDocLinkForResourceType(pkg *schema.Package, moduleName string, typeName string) string { - path := fmt.Sprintf("%s/%s", goPackage(pkg.Name), moduleName) + path := fmt.Sprintf("%s/%s", packageName(pkg), moduleName) typeNameParts := strings.Split(typeName, ".") typeName = typeNameParts[len(typeNameParts)-1] typeName = strings.TrimPrefix(typeName, "*") @@ -82,16 +87,13 @@ func (d DocLanguageHelper) GetDocLinkForFunctionInputOrOutputType(pkg *schema.Pa } // GetLanguageTypeString returns the Go-specific type given a Pulumi schema type. -func (d DocLanguageHelper) GetLanguageTypeString(pkg *schema.Package, moduleName string, t schema.Type, input, args, optional bool) string { +func (d DocLanguageHelper) GetLanguageTypeString(pkg *schema.Package, moduleName string, t schema.Type, input bool) string { modPkg, ok := d.packages[moduleName] if !ok { glog.Errorf("cannot calculate type string for type %q. could not find a package for module %q", t.String(), moduleName) os.Exit(1) } - if _, ok := t.(*schema.EnumType); ok { - return modPkg.inputType(t, optional) - } - return modPkg.plainType(t, optional) + return modPkg.typeString(t) } // GeneratePackagesMap generates a map of Go packages for resources, functions and types. @@ -133,14 +135,36 @@ func (d DocLanguageHelper) GetResourceFunctionResultName(modName string, f *sche return funcName + "Result" } +func (d DocLanguageHelper) GetMethodName(m *schema.Method) string { + return Title(m.Name) +} + +func (d DocLanguageHelper) GetMethodResultName(pkg *schema.Package, modName string, r *schema.Resource, + m *schema.Method) string { + + if info, ok := pkg.Language["go"].(GoPackageInfo); ok { + if info.LiftSingleValueMethodReturns && m.Function.Outputs != nil && len(m.Function.Outputs.Properties) == 1 { + t := m.Function.Outputs.Properties[0].Type + modPkg, ok := d.packages[modName] + if !ok { + glog.Errorf("cannot calculate type string for type %q. could not find a package for module %q", + t.String(), modName) + os.Exit(1) + } + return modPkg.outputType(t) + } + } + return fmt.Sprintf("%s%sResultOutput", rawResourceName(r), d.GetMethodName(m)) +} + // GetModuleDocLink returns the display name and the link for a module. func (d DocLanguageHelper) GetModuleDocLink(pkg *schema.Package, modName string) (string, string) { var displayName string var link string if modName == "" { - displayName = goPackage(pkg.Name) + displayName = packageName(pkg) } else { - displayName = fmt.Sprintf("%s/%s", goPackage(pkg.Name), modName) + displayName = fmt.Sprintf("%s/%s", packageName(pkg), modName) } link = d.GetDocLinkForResourceType(pkg, modName, "") return displayName, link diff --git a/vendor/github.com/pulumi/pulumi/pkg/v3/codegen/go/gen.go b/vendor/github.com/pulumi/pulumi/pkg/v3/codegen/go/gen.go index 2d4830e..55d04f4 100644 --- a/vendor/github.com/pulumi/pulumi/pkg/v3/codegen/go/gen.go +++ b/vendor/github.com/pulumi/pulumi/pkg/v3/codegen/go/gen.go @@ -1,4 +1,4 @@ -// Copyright 2016-2020, Pulumi Corporation. +// Copyright 2016-2021, Pulumi Corporation. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -31,17 +31,55 @@ import ( "strings" "unicode" - "github.com/pkg/errors" - "github.com/pulumi/pulumi/pkg/v3/codegen" "github.com/pulumi/pulumi/pkg/v3/codegen/schema" + "github.com/pulumi/pulumi/sdk/v3/go/common/diag" + "github.com/pulumi/pulumi/sdk/v3/go/common/resource/plugin" + "github.com/pulumi/pulumi/sdk/v3/go/common/util/cmdutil" "github.com/pulumi/pulumi/sdk/v3/go/common/util/contract" ) type typeDetails struct { - ptrElement bool - arrayElement bool - mapElement bool + // Note: if any of {ptr,array,map}Input are set, input and the corresponding output field must also be set. The + // mark* functions ensure that these invariants hold. + input bool + ptrInput bool + arrayInput bool + mapInput bool + + // Note: if any of {ptr,array,map}Output are set, output must also be set. The mark* functions ensure that these + // invariants hold. + output bool + ptrOutput bool + arrayOutput bool + mapOutput bool +} + +func (d *typeDetails) hasOutputs() bool { + return d.output || d.ptrOutput || d.arrayOutput || d.mapOutput +} + +func (d *typeDetails) mark(input, output bool) { + d.input = d.input || input + d.output = d.output || input || output +} + +func (d *typeDetails) markPtr(input, output bool) { + d.mark(input, output) + d.ptrInput = d.ptrInput || input + d.ptrOutput = d.ptrOutput || input || output +} + +func (d *typeDetails) markArray(input, output bool) { + d.mark(input, output) + d.arrayInput = d.arrayInput || input + d.arrayOutput = d.arrayOutput || input || output +} + +func (d *typeDetails) markMap(input, output bool) { + d.mark(input, output) + d.mapInput = d.mapInput || input + d.mapOutput = d.mapOutput || input || output } // Title converts the input string to a title case @@ -92,21 +130,38 @@ type pkgContext struct { types []*schema.ObjectType resources []*schema.Resource functions []*schema.Function + // schemaNames tracks the names of types/resources as specified in the schema - schemaNames codegen.StringSet - names codegen.StringSet - renamed map[string]string - functionNames map[*schema.Function]string - needsUtils bool - tool string - packages map[string]*pkgContext + schemaNames codegen.StringSet + names codegen.StringSet + renamed map[string]string + + // duplicateTokens tracks tokens that exist for both types and resources + duplicateTokens map[string]bool + functionNames map[*schema.Function]string + needsUtils bool + tool string + packages map[string]*pkgContext // Name overrides set in GoPackageInfo modToPkg map[string]string // Module name -> package name pkgImportAliases map[string]string // Package name -> import alias + + // Determines whether to make single-return-value methods return an output struct or the value + liftSingleValueMethodReturns bool + + // Determines if we should emit type registration code + disableInputTypeRegistrations bool + + // Determines if we should emit object defaults code + disableObjectDefaults bool } func (pkg *pkgContext) detailsForType(t schema.Type) *typeDetails { + if obj, ok := t.(*schema.ObjectType); ok && obj.IsInputShape() { + t = obj.PlainShape + } + details, ok := pkg.typeDetails[t] if !ok { details = &typeDetails{} @@ -134,20 +189,59 @@ func (pkg *pkgContext) tokenToType(tok string) string { mod, name := pkg.tokenToPackage(tok), components[2] - modPkg, ok := pkg.packages[mod] name = Title(name) - - if ok { + if modPkg, ok := pkg.packages[mod]; ok { newName, renamed := modPkg.renamed[name] if renamed { name = newName - } else if modPkg.names.Has(name) { - // If the package containing the type's token already has a resource with the - // same name, add a `Type` suffix. - newName = name + "Type" - modPkg.renamed[name] = newName - modPkg.names.Add(newName) + } else if modPkg.duplicateTokens[strings.ToLower(tok)] { + // maintain support for duplicate tokens for types and resources in Kubernetes + name += "Type" + } + } + + if mod == pkg.mod { + return name + } + if mod == "" { + mod = packageRoot(pkg.pkg) + } + + var importPath string + if alias, hasAlias := pkg.pkgImportAliases[path.Join(pkg.importBasePath, mod)]; hasAlias { + importPath = alias + } else { + importPath = strings.ReplaceAll(mod, "/", "") + importPath = strings.ReplaceAll(importPath, "-", "") + } + + return strings.ReplaceAll(importPath+"."+name, "-provider", "") +} + +func (pkg *pkgContext) tokenToEnum(tok string) string { + // token := pkg : module : member + // module := path/to/module + + components := strings.Split(tok, ":") + contract.Assert(len(components) == 3) + if pkg == nil { + panic(fmt.Errorf("pkg is nil. token %s", tok)) + } + if pkg.pkg == nil { + panic(fmt.Errorf("pkg.pkg is nil. token %s", tok)) + } + + mod, name := pkg.tokenToPackage(tok), components[2] + + name = Title(name) + if modPkg, ok := pkg.packages[mod]; ok { + newName, renamed := modPkg.renamed[name] + if renamed { name = newName + } else if modPkg.duplicateTokens[tok] { + // If the package containing the enum's token already has a resource or type with the + // same name, add an `Enum` suffix. + name += "Enum" } } @@ -157,8 +251,7 @@ func (pkg *pkgContext) tokenToType(tok string) string { if mod == "" { mod = components[0] } - mod = strings.Replace(mod, "/", "", -1) + "." + name - return strings.Replace(mod, "-provider", "", -1) + return strings.Replace(mod, "/", "", -1) + "." + name } func (pkg *pkgContext) tokenToResource(tok string) string { @@ -189,7 +282,15 @@ func (pkg *pkgContext) tokenToResource(tok string) string { if mod == "" { mod = components[0] } - return strings.Replace(mod, "/", "", -1) + "." + name + + var importPath string + if alias, hasAlias := pkg.pkgImportAliases[path.Join(pkg.importBasePath, mod)]; hasAlias { + importPath = alias + } else { + importPath = strings.ReplaceAll(mod, "/", "") + } + + return importPath + "." + name } func tokenToModule(tok string) string { @@ -207,58 +308,156 @@ func tokenToName(tok string) string { return Title(components[2]) } -func resourceName(r *schema.Resource) string { +// disambiguatedResourceName gets the name of a resource as it should appear in source, resolving conflicts in the process. +func disambiguatedResourceName(r *schema.Resource, pkg *pkgContext) string { + name := rawResourceName(r) + if renamed, ok := pkg.renamed[name]; ok { + name = renamed + } + return name +} + +// rawResourceName produces raw resource name translated from schema type token without resolving conflicts or dupes. +func rawResourceName(r *schema.Resource) string { if r.IsProvider { return "Provider" } return tokenToName(r.Token) } -func (pkg *pkgContext) plainType(t schema.Type, optional bool) string { - var typ string +// If `nil` is a valid value of type `t`. +func isNilType(t schema.Type) bool { switch t := t.(type) { + case *schema.OptionalType, *schema.ArrayType, *schema.MapType, *schema.ResourceType, *schema.InputType: + return true + case *schema.TokenType: + // Use the underlying type for now. + if t.UnderlyingType != nil { + return isNilType(t.UnderlyingType) + } + case *schema.UnionType: + // If the union is actually a relaxed enum type, use the underlying + // type for the enum instead + for _, e := range t.ElementTypes { + if typ, ok := e.(*schema.EnumType); ok { + return isNilType(typ.ElementType) + } + } + default: + switch t { + case schema.ArchiveType, schema.AssetType, schema.JSONType, schema.AnyType: + return true + } + } + return false +} + +func (pkg *pkgContext) inputType(t schema.Type) (result string) { + switch t := codegen.SimplifyInputUnion(t).(type) { + case *schema.OptionalType: + return pkg.typeString(t) + case *schema.InputType: + return pkg.inputType(t.ElementType) case *schema.EnumType: - return pkg.plainType(t.ElementType, optional) + // Since enum type is itself an input + return pkg.tokenToEnum(t.Token) + "Input" case *schema.ArrayType: - typ = "[]" - typ += pkg.plainType(t.ElementType, false) - return typ + en := pkg.inputType(t.ElementType) + return strings.TrimSuffix(en, "Input") + "ArrayInput" case *schema.MapType: - typ = "map[string]" - typ += pkg.plainType(t.ElementType, false) - return typ + en := pkg.inputType(t.ElementType) + return strings.TrimSuffix(en, "Input") + "MapInput" case *schema.ObjectType: - typ = pkg.resolveObjectType(t) + if t.IsInputShape() { + t = t.PlainShape + } + return pkg.resolveObjectType(t) + "Input" case *schema.ResourceType: - typ = pkg.resolveResourceType(t) - // Set optional to true because resources are pointers. - optional = true + return pkg.resolveResourceType(t) + "Input" case *schema.TokenType: // Use the underlying type for now. if t.UnderlyingType != nil { - return pkg.plainType(t.UnderlyingType, optional) + return pkg.inputType(t.UnderlyingType) } - typ = pkg.tokenToType(t.Token) + return pkg.tokenToType(t.Token) + "Input" case *schema.UnionType: // If the union is actually a relaxed enum type, use the underlying - // type for the enum instead + // type for the input instead for _, e := range t.ElementTypes { if typ, ok := e.(*schema.EnumType); ok { - return pkg.plainType(typ.ElementType, optional) + return pkg.inputType(typ.ElementType) } } // TODO(pdg): union types - return "interface{}" + return "pulumi.Input" + default: + switch t { + case schema.BoolType: + return "pulumi.BoolInput" + case schema.IntType: + return "pulumi.IntInput" + case schema.NumberType: + return "pulumi.Float64Input" + case schema.StringType: + return "pulumi.StringInput" + case schema.ArchiveType: + return "pulumi.ArchiveInput" + case schema.AssetType: + return "pulumi.AssetOrArchiveInput" + case schema.JSONType: + fallthrough + case schema.AnyType: + return "pulumi.Input" + } + } + + panic(fmt.Errorf("unexpected type %T", t)) +} + +func (pkg *pkgContext) argsTypeImpl(t schema.Type) (result string) { + switch t := codegen.SimplifyInputUnion(t).(type) { + case *schema.OptionalType: + return pkg.typeStringImpl(t, true) + case *schema.InputType: + return pkg.argsTypeImpl(t.ElementType) + case *schema.EnumType: + // Since enum type is itself an input + return pkg.tokenToEnum(t.Token) + case *schema.ArrayType: + en := pkg.argsTypeImpl(t.ElementType) + return strings.TrimSuffix(en, "Args") + "Array" + case *schema.MapType: + en := pkg.argsTypeImpl(t.ElementType) + return strings.TrimSuffix(en, "Args") + "Map" + case *schema.ObjectType: + return pkg.resolveObjectType(t) + case *schema.ResourceType: + return pkg.resolveResourceType(t) + case *schema.TokenType: + // Use the underlying type for now. + if t.UnderlyingType != nil { + return pkg.argsTypeImpl(t.UnderlyingType) + } + return pkg.tokenToType(t.Token) + case *schema.UnionType: + // If the union is actually a relaxed enum type, use the underlying + // type for the input instead + for _, e := range t.ElementTypes { + if typ, ok := e.(*schema.EnumType); ok { + return pkg.argsTypeImpl(typ.ElementType) + } + } + return "pulumi.Any" default: switch t { case schema.BoolType: - typ = "bool" + return "pulumi.Bool" case schema.IntType: - typ = "int" + return "pulumi.Int" case schema.NumberType: - typ = "float64" + return "pulumi.Float64" case schema.StringType: - typ = "string" + return "pulumi.String" case schema.ArchiveType: return "pulumi.Archive" case schema.AssetType: @@ -266,88 +465,123 @@ func (pkg *pkgContext) plainType(t schema.Type, optional bool) string { case schema.JSONType: fallthrough case schema.AnyType: - return "interface{}" + return "pulumi.Any" } } - if optional { - return "*" + typ - } - return typ + panic(fmt.Errorf("unexpected type %T", t)) +} + +func (pkg *pkgContext) argsType(t schema.Type) string { + return pkg.typeStringImpl(t, true) } -func (pkg *pkgContext) inputType(t schema.Type, optional bool) string { - var typ string +func (pkg *pkgContext) typeStringImpl(t schema.Type, argsType bool) string { switch t := t.(type) { - case *schema.EnumType: - var prefix string - if optional { - prefix = "*" + case *schema.OptionalType: + if input, isInputType := t.ElementType.(*schema.InputType); isInputType { + elem := pkg.inputType(input.ElementType) + if isNilType(input.ElementType) || elem == "pulumi.Input" { + return elem + } + if argsType { + return elem + "Ptr" + } + return strings.TrimSuffix(elem, "Input") + "PtrInput" } - // Since enum type is itself an input - return prefix + pkg.tokenToEnum(t.Token) + + elementType := pkg.typeStringImpl(t.ElementType, argsType) + if isNilType(t.ElementType) || elementType == "interface{}" { + return elementType + } + return "*" + elementType + case *schema.InputType: + if argsType { + return pkg.argsTypeImpl(t.ElementType) + } + return pkg.inputType(t.ElementType) + case *schema.EnumType: + return pkg.tokenToEnum(t.Token) case *schema.ArrayType: - en := pkg.inputType(t.ElementType, false) - return strings.TrimSuffix(en, "Input") + "ArrayInput" + typ := "[]" + return typ + pkg.typeStringImpl(t.ElementType, argsType) case *schema.MapType: - en := pkg.inputType(t.ElementType, false) - return strings.TrimSuffix(en, "Input") + "MapInput" + typ := "map[string]" + return typ + pkg.typeStringImpl(t.ElementType, argsType) case *schema.ObjectType: - typ = pkg.resolveObjectType(t) + return pkg.resolveObjectType(t) case *schema.ResourceType: - typ = pkg.resolveResourceType(t) - return typ + "Input" + return "*" + pkg.resolveResourceType(t) case *schema.TokenType: // Use the underlying type for now. if t.UnderlyingType != nil { - return pkg.inputType(t.UnderlyingType, optional) + return pkg.typeStringImpl(t.UnderlyingType, argsType) } - typ = pkg.tokenToType(t.Token) + return pkg.tokenToType(t.Token) case *schema.UnionType: // If the union is actually a relaxed enum type, use the underlying - // type for the input instead + // type for the enum instead for _, e := range t.ElementTypes { if typ, ok := e.(*schema.EnumType); ok { - return pkg.inputType(typ.ElementType, optional) + return pkg.typeStringImpl(typ.ElementType, argsType) } } // TODO(pdg): union types - return "pulumi.Input" + return "interface{}" default: switch t { case schema.BoolType: - typ = "pulumi.Bool" + return "bool" case schema.IntType: - typ = "pulumi.Int" + return "int" case schema.NumberType: - typ = "pulumi.Float64" + return "float64" case schema.StringType: - typ = "pulumi.String" + return "string" case schema.ArchiveType: - return "pulumi.ArchiveInput" + return "pulumi.Archive" case schema.AssetType: - return "pulumi.AssetOrArchiveInput" + return "pulumi.AssetOrArchive" case schema.JSONType: fallthrough case schema.AnyType: - return "pulumi.Input" + return "interface{}" } } - if optional { - return typ + "PtrInput" + panic(fmt.Errorf("unexpected type %T", t)) +} + +func (pkg *pkgContext) typeString(t schema.Type) string { + s := pkg.typeStringImpl(t, false) + if s == "pulumi." { + return "pulumi.Any" } - return typ + "Input" + return s + } func (pkg *pkgContext) isExternalReference(t schema.Type) bool { + isExternal, _ := pkg.isExternalReferenceWithPackage(t) + return isExternal +} + +func (pkg *pkgContext) isExternalReferenceWithPackage(t schema.Type) (isExternal bool, extPkg *schema.Package) { switch typ := t.(type) { case *schema.ObjectType: - return typ.Package != nil && pkg.pkg != nil && typ.Package != pkg.pkg + isExternal = typ.Package != nil && pkg.pkg != nil && typ.Package != pkg.pkg + if isExternal { + extPkg = typ.Package + } + return case *schema.ResourceType: - return typ.Resource != nil && pkg.pkg != nil && typ.Resource.Package != pkg.pkg + isExternal = typ.Resource != nil && pkg.pkg != nil && typ.Resource.Package != pkg.pkg + if isExternal { + extPkg = typ.Resource.Package + } + return } - return false + return } // resolveResourceType resolves resource references in properties while @@ -358,22 +592,10 @@ func (pkg *pkgContext) resolveResourceType(t *schema.ResourceType) string { if !pkg.isExternalReference(t) { return pkg.tokenToResource(t.Token) } - extPkg := t.Resource.Package - var goInfo GoPackageInfo - - contract.AssertNoError(extPkg.ImportLanguages(map[string]schema.Language{"go": Importer})) - if info, ok := extPkg.Language["go"].(GoPackageInfo); ok { - goInfo = info - } - extPkgCtx := &pkgContext{ - pkg: extPkg, - importBasePath: goInfo.ImportBasePath, - pkgImportAliases: goInfo.PackageImportAliases, - modToPkg: goInfo.ModuleToPackage, - } + extPkgCtx := pkg.contextForExternalReference(t) resType := extPkgCtx.tokenToResource(t.Token) if !strings.Contains(resType, ".") { - resType = fmt.Sprintf("%s.%s", extPkg.Name, resType) + resType = fmt.Sprintf("%s.%s", extPkgCtx.pkg.Name, resType) } return resType } @@ -384,72 +606,113 @@ func (pkg *pkgContext) resolveResourceType(t *schema.ResourceType) string { // optional and convert the type to a pointer if necessary. func (pkg *pkgContext) resolveObjectType(t *schema.ObjectType) string { if !pkg.isExternalReference(t) { - return pkg.tokenToType(t.Token) + name := pkg.tokenToType(t.Token) + if t.IsInputShape() { + return name + "Args" + } + return name } - extPkg := t.Package - var goInfo GoPackageInfo + return pkg.contextForExternalReference(t).typeString(t) +} +func (pkg *pkgContext) contextForExternalReference(t schema.Type) *pkgContext { + isExternal, extPkg := pkg.isExternalReferenceWithPackage(t) + contract.Assert(isExternal) + + var goInfo GoPackageInfo contract.AssertNoError(extPkg.ImportLanguages(map[string]schema.Language{"go": Importer})) if info, ok := extPkg.Language["go"].(GoPackageInfo); ok { goInfo = info + } else { + goInfo.ImportBasePath = extractImportBasePath(extPkg) + } + + pkgImportAliases := goInfo.PackageImportAliases + + // Ensure that any package import aliases we have specified locally take precedence over those + // specified in the remote package. + if ourPkgGoInfoI, has := pkg.pkg.Language["go"]; has { + ourPkgGoInfo := ourPkgGoInfoI.(GoPackageInfo) + if len(ourPkgGoInfo.PackageImportAliases) > 0 { + pkgImportAliases = make(map[string]string) + // Copy the external import aliases. + for k, v := range goInfo.PackageImportAliases { + pkgImportAliases[k] = v + } + // Copy the local import aliases, overwriting any external aliases. + for k, v := range ourPkgGoInfo.PackageImportAliases { + pkgImportAliases[k] = v + } + } } + extPkgCtx := &pkgContext{ pkg: extPkg, importBasePath: goInfo.ImportBasePath, - pkgImportAliases: goInfo.PackageImportAliases, + pkgImportAliases: pkgImportAliases, modToPkg: goInfo.ModuleToPackage, } - return extPkgCtx.plainType(t, false) + return extPkgCtx } -func (pkg *pkgContext) outputType(t schema.Type, optional bool) string { - var typ string +// outputTypeImpl does the meat of the generation of output type names from schema types. This function should only be +// called with a fully-resolved type (e.g. the result of codegen.ResolvedType). Instead of calling this function, you +// probably want to call pkgContext.outputType, which ensures that its argument is resolved. +func (pkg *pkgContext) outputTypeImpl(t schema.Type) string { switch t := t.(type) { + case *schema.OptionalType: + elem := pkg.outputTypeImpl(t.ElementType) + if isNilType(t.ElementType) || elem == "pulumi.AnyOutput" { + return elem + } + return strings.TrimSuffix(elem, "Output") + "PtrOutput" case *schema.EnumType: - return pkg.outputType(t.ElementType, optional) + return pkg.tokenToEnum(t.Token) + "Output" case *schema.ArrayType: - en := strings.TrimSuffix(pkg.outputType(t.ElementType, false), "Output") + en := strings.TrimSuffix(pkg.outputTypeImpl(t.ElementType), "Output") if en == "pulumi.Any" { return "pulumi.ArrayOutput" } return en + "ArrayOutput" case *schema.MapType: - en := strings.TrimSuffix(pkg.outputType(t.ElementType, false), "Output") + en := strings.TrimSuffix(pkg.outputTypeImpl(t.ElementType), "Output") if en == "pulumi.Any" { return "pulumi.MapOutput" } return en + "MapOutput" case *schema.ObjectType: - typ = pkg.resolveObjectType(t) + return pkg.resolveObjectType(t) + "Output" case *schema.ResourceType: - typ = pkg.resolveResourceType(t) - return typ + "Output" + return pkg.resolveResourceType(t) + "Output" case *schema.TokenType: // Use the underlying type for now. if t.UnderlyingType != nil { - return pkg.outputType(t.UnderlyingType, optional) + return pkg.outputTypeImpl(t.UnderlyingType) } - typ = pkg.tokenToType(t.Token) + return pkg.tokenToType(t.Token) + "Output" case *schema.UnionType: // If the union is actually a relaxed enum type, use the underlying // type for the output instead for _, e := range t.ElementTypes { if typ, ok := e.(*schema.EnumType); ok { - return pkg.outputType(typ.ElementType, optional) + return pkg.outputTypeImpl(typ.ElementType) } } // TODO(pdg): union types return "pulumi.AnyOutput" + case *schema.InputType: + // We can't make output types for input types. We instead strip the input and try again. + return pkg.outputTypeImpl(t.ElementType) default: switch t { case schema.BoolType: - typ = "pulumi.Bool" + return "pulumi.BoolOutput" case schema.IntType: - typ = "pulumi.Int" + return "pulumi.IntOutput" case schema.NumberType: - typ = "pulumi.Float64" + return "pulumi.Float64Output" case schema.StringType: - typ = "pulumi.String" + return "pulumi.StringOutput" case schema.ArchiveType: return "pulumi.ArchiveOutput" case schema.AssetType: @@ -461,10 +724,26 @@ func (pkg *pkgContext) outputType(t schema.Type, optional bool) string { } } - if optional { - return typ + "PtrOutput" + panic(fmt.Errorf("unexpected type %T", t)) +} + +// outputType returns a reference to the Go output type that corresponds to the given schema type. For example, given +// a schema.String, outputType returns "pulumi.String", and given a *schema.ObjectType with the token pkg:mod:Name, +// outputType returns "mod.NameOutput" or "NameOutput", depending on whether or not the object type lives in a +// different module than the one associated with the receiver. +func (pkg *pkgContext) outputType(t schema.Type) string { + return pkg.outputTypeImpl(codegen.ResolvedType(t)) +} + +// toOutputMethod returns the name of the "ToXXXOutput" method for the given schema type. For example, given a +// schema.String, toOutputMethod returns "ToStringOutput", and given a *schema.ObjectType with the token pkg:mod:Name, +// outputType returns "ToNameOutput". +func (pkg *pkgContext) toOutputMethod(t schema.Type) string { + outputTypeName := pkg.outputType(t) + if i := strings.LastIndexByte(outputTypeName, '.'); i != -1 { + outputTypeName = outputTypeName[i+1:] } - return typ + "Output" + return "To" + outputTypeName } func printComment(w io.Writer, comment string, indent bool) int { @@ -588,45 +867,44 @@ func (pkg *pkgContext) getInputUsage(name string) string { }, "\n") } -// genResourceContainerInput handles generating container (slice/map) wrappers around -// resources to facilitate external references. -func genResourceContainerInput(w io.Writer, name, receiverType, elementType string) { - fmt.Fprintf(w, "func (%s) ElementType() reflect.Type {\n", receiverType) - fmt.Fprintf(w, "\treturn reflect.TypeOf((%s)(nil))\n", elementType) - fmt.Fprintf(w, "}\n\n") - - fmt.Fprintf(w, "func (i %s) To%sOutput() %sOutput {\n", receiverType, Title(name), name) - fmt.Fprintf(w, "\treturn i.To%sOutputWithContext(context.Background())\n", Title(name)) - fmt.Fprintf(w, "}\n\n") +type genInputImplementationArgs struct { + name string + receiverType string + elementType string + ptrMethods bool + toOutputMethods bool +} - fmt.Fprintf(w, "func (i %s) To%sOutputWithContext(ctx context.Context) %sOutput {\n", receiverType, Title(name), name) - if strings.HasSuffix(name, "Ptr") { - base := name[:len(name)-3] - fmt.Fprintf(w, "\treturn pulumi.ToOutputWithContext(ctx, i).(%sOutput).To%sOutput()\n", base, Title(name)) - } else { - fmt.Fprintf(w, "\treturn pulumi.ToOutputWithContext(ctx, i).(%sOutput)\n", name) - } - fmt.Fprintf(w, "}\n\n") +func genInputImplementation(w io.Writer, name, receiverType, elementType string, ptrMethods bool) { + genInputImplementationWithArgs(w, genInputImplementationArgs{ + name: name, + receiverType: receiverType, + elementType: elementType, + ptrMethods: ptrMethods, + toOutputMethods: true, + }) } -func genInputMethods(w io.Writer, name, receiverType, elementType string, ptrMethods, resourceType bool) { +func genInputImplementationWithArgs(w io.Writer, genArgs genInputImplementationArgs) { + name := genArgs.name + receiverType := genArgs.receiverType + elementType := genArgs.elementType + fmt.Fprintf(w, "func (%s) ElementType() reflect.Type {\n", receiverType) - if resourceType { - fmt.Fprintf(w, "\treturn reflect.TypeOf((*%s)(nil))\n", elementType) - } else { - fmt.Fprintf(w, "\treturn reflect.TypeOf((*%s)(nil)).Elem()\n", elementType) - } + fmt.Fprintf(w, "\treturn reflect.TypeOf((*%s)(nil)).Elem()\n", elementType) fmt.Fprintf(w, "}\n\n") - fmt.Fprintf(w, "func (i %s) To%sOutput() %sOutput {\n", receiverType, Title(name), name) - fmt.Fprintf(w, "\treturn i.To%sOutputWithContext(context.Background())\n", Title(name)) - fmt.Fprintf(w, "}\n\n") + if genArgs.toOutputMethods { + fmt.Fprintf(w, "func (i %s) To%sOutput() %sOutput {\n", receiverType, Title(name), name) + fmt.Fprintf(w, "\treturn i.To%sOutputWithContext(context.Background())\n", Title(name)) + fmt.Fprintf(w, "}\n\n") - fmt.Fprintf(w, "func (i %s) To%sOutputWithContext(ctx context.Context) %sOutput {\n", receiverType, Title(name), name) - fmt.Fprintf(w, "\treturn pulumi.ToOutputWithContext(ctx, i).(%sOutput)\n", name) - fmt.Fprintf(w, "}\n\n") + fmt.Fprintf(w, "func (i %s) To%sOutputWithContext(ctx context.Context) %sOutput {\n", receiverType, Title(name), name) + fmt.Fprintf(w, "\treturn pulumi.ToOutputWithContext(ctx, i).(%sOutput)\n", name) + fmt.Fprintf(w, "}\n\n") + } - if ptrMethods { + if genArgs.ptrMethods { fmt.Fprintf(w, "func (i %s) To%sPtrOutput() %sPtrOutput {\n", receiverType, Title(name), name) fmt.Fprintf(w, "\treturn i.To%sPtrOutputWithContext(context.Background())\n", Title(name)) fmt.Fprintf(w, "}\n\n") @@ -641,17 +919,82 @@ func genInputMethods(w io.Writer, name, receiverType, elementType string, ptrMet } } -func (pkg *pkgContext) genEnum(w io.Writer, enum *schema.EnumType) error { - return pkg.genEnumType(w, pkg.tokenToEnum(enum.Token), enum) +func genOutputType(w io.Writer, baseName, elementType string, ptrMethods bool) { + fmt.Fprintf(w, "type %sOutput struct { *pulumi.OutputState }\n\n", baseName) + + fmt.Fprintf(w, "func (%sOutput) ElementType() reflect.Type {\n", baseName) + fmt.Fprintf(w, "\treturn reflect.TypeOf((*%s)(nil)).Elem()\n", elementType) + fmt.Fprintf(w, "}\n\n") + + fmt.Fprintf(w, "func (o %[1]sOutput) To%[2]sOutput() %[1]sOutput {\n", baseName, Title(baseName)) + fmt.Fprintf(w, "\treturn o\n") + fmt.Fprintf(w, "}\n\n") + + fmt.Fprintf(w, "func (o %[1]sOutput) To%[2]sOutputWithContext(ctx context.Context) %[1]sOutput {\n", baseName, Title(baseName)) + fmt.Fprintf(w, "\treturn o\n") + fmt.Fprintf(w, "}\n\n") + + if ptrMethods { + fmt.Fprintf(w, "func (o %[1]sOutput) To%[2]sPtrOutput() %[1]sPtrOutput {\n", baseName, Title(baseName)) + fmt.Fprintf(w, "\treturn o.To%sPtrOutputWithContext(context.Background())\n", Title(baseName)) + fmt.Fprintf(w, "}\n\n") + + fmt.Fprintf(w, "func (o %[1]sOutput) To%[2]sPtrOutputWithContext(ctx context.Context) %[1]sPtrOutput {\n", baseName, Title(baseName)) + fmt.Fprintf(w, "\treturn o.ApplyTWithContext(ctx, func(_ context.Context, v %[1]s) *%[1]s {\n", elementType) + fmt.Fprintf(w, "\t\treturn &v\n") + fmt.Fprintf(w, "\t}).(%sPtrOutput)\n", baseName) + fmt.Fprintf(w, "}\n\n") + } +} + +func genArrayOutput(w io.Writer, baseName, elementType string) { + genOutputType(w, baseName+"Array", "[]"+elementType, false) + + fmt.Fprintf(w, "func (o %[1]sArrayOutput) Index(i pulumi.IntInput) %[1]sOutput {\n", baseName) + fmt.Fprintf(w, "\treturn pulumi.All(o, i).ApplyT(func (vs []interface{}) %s {\n", elementType) + fmt.Fprintf(w, "\t\treturn vs[0].([]%s)[vs[1].(int)]\n", elementType) + fmt.Fprintf(w, "\t}).(%sOutput)\n", baseName) + fmt.Fprintf(w, "}\n\n") +} + +func genMapOutput(w io.Writer, baseName, elementType string) { + genOutputType(w, baseName+"Map", "map[string]"+elementType, false) + + fmt.Fprintf(w, "func (o %[1]sMapOutput) MapIndex(k pulumi.StringInput) %[1]sOutput {\n", baseName) + fmt.Fprintf(w, "\treturn pulumi.All(o, k).ApplyT(func (vs []interface{}) %s{\n", elementType) + fmt.Fprintf(w, "\t\treturn vs[0].(map[string]%s)[vs[1].(string)]\n", elementType) + fmt.Fprintf(w, "\t}).(%sOutput)\n", baseName) + fmt.Fprintf(w, "}\n\n") +} + +func genPtrOutput(w io.Writer, baseName, elementType string) { + genOutputType(w, baseName+"Ptr", "*"+elementType, false) + + fmt.Fprintf(w, "func (o %[1]sPtrOutput) Elem() %[1]sOutput {\n", baseName) + fmt.Fprintf(w, "\treturn o.ApplyT(func(v *%[1]s) %[1]s {\n", baseName) + fmt.Fprint(w, "\t\tif v != nil {\n") + fmt.Fprintf(w, "\t\t\treturn *v\n") + fmt.Fprint(w, "\t\t}\n") + fmt.Fprintf(w, "\t\tvar ret %s\n", baseName) + fmt.Fprint(w, "\t\treturn ret\n") + fmt.Fprintf(w, "\t}).(%sOutput)\n", baseName) + fmt.Fprint(w, "}\n\n") } -func (pkg *pkgContext) genEnumType(w io.Writer, name string, enumType *schema.EnumType) error { +func (pkg *pkgContext) genEnum(w io.Writer, enumType *schema.EnumType) error { + name := pkg.tokenToEnum(enumType.Token) + mod := pkg.tokenToPackage(enumType.Token) modPkg, ok := pkg.packages[mod] contract.Assert(ok) + printCommentWithDeprecationMessage(w, enumType.Comment, "", false) - elementType := pkg.enumElementType(enumType.ElementType, false) - fmt.Fprintf(w, "type %s %s\n\n", name, elementType) + + elementArgsType := pkg.argsTypeImpl(enumType.ElementType) + elementGoType := pkg.typeString(enumType.ElementType) + asFuncName := strings.TrimPrefix(elementArgsType, "pulumi.") + + fmt.Fprintf(w, "type %s %s\n\n", name, elementGoType) fmt.Fprintln(w, "const (") for _, e := range enumType.Elements { @@ -677,104 +1020,195 @@ func (pkg *pkgContext) genEnumType(w io.Writer, name string, enumType *schema.En } } fmt.Fprintln(w, ")") - inputType := pkg.inputType(enumType, false) - contract.Assertf(name == inputType, - "expect inputType (%s) for enums to be the same as enum type (%s)", inputType, enumType) - pkg.genEnumInputFuncs(w, name, enumType, elementType, inputType) - return nil -} -func (pkg *pkgContext) enumElementType(t schema.Type, optional bool) string { - suffix := "" - if optional { - suffix = "Ptr" - } - switch t { - case schema.BoolType: - return "pulumi.Bool" + suffix - case schema.IntType: - return "pulumi.Int" + suffix - case schema.NumberType: - return "pulumi.Float64" + suffix - case schema.StringType: - return "pulumi.String" + suffix - default: - // We only expect to support the above element types for enums - panic(fmt.Sprintf("Invalid enum type: %s", t)) + details := pkg.detailsForType(enumType) + if details.input || details.ptrInput { + inputType := pkg.inputType(enumType) + pkg.genEnumInputFuncs(w, name, enumType, elementArgsType, inputType, asFuncName) } -} -func (pkg *pkgContext) genEnumInputFuncs(w io.Writer, typeName string, enum *schema.EnumType, elementType, inputType string) { - fmt.Fprintln(w) - asFuncName := Title(strings.Replace(elementType, "pulumi.", "", -1)) - fmt.Fprintf(w, "func (%s) ElementType() reflect.Type {\n", typeName) - fmt.Fprintf(w, "return reflect.TypeOf((*%s)(nil)).Elem()\n", elementType) - fmt.Fprintln(w, "}") - fmt.Fprintln(w) + if details.output || details.ptrOutput { + pkg.genEnumOutputTypes(w, name, elementArgsType, elementGoType, asFuncName) + } + if details.input || details.ptrInput { + pkg.genEnumInputTypes(w, name, enumType, elementGoType) + } - fmt.Fprintf(w, "func (e %s) To%sOutput() %sOutput {\n", typeName, asFuncName, elementType) - fmt.Fprintf(w, "return pulumi.ToOutput(%[1]s(e)).(%[1]sOutput)\n", elementType) - fmt.Fprintln(w, "}") - fmt.Fprintln(w) + // Generate the array input. + if details.arrayInput { + pkg.genInputInterface(w, name+"Array") - fmt.Fprintf(w, "func (e %[1]s) To%[2]sOutputWithContext(ctx context.Context) %[3]sOutput {\n", typeName, asFuncName, elementType) - fmt.Fprintf(w, "return pulumi.ToOutputWithContext(ctx, %[1]s(e)).(%[1]sOutput)\n", elementType) - fmt.Fprintln(w, "}") - fmt.Fprintln(w) + fmt.Fprintf(w, "type %[1]sArray []%[1]s\n\n", name) - fmt.Fprintf(w, "func (e %[1]s) To%[2]sPtrOutput() %[3]sPtrOutput {\n", typeName, asFuncName, elementType) - fmt.Fprintf(w, "return %[1]s(e).To%[2]sPtrOutputWithContext(context.Background())\n", elementType, asFuncName) - fmt.Fprintln(w, "}") + genInputImplementation(w, name+"Array", name+"Array", "[]"+name, false) + } + + // Generate the map input. + if details.mapInput { + pkg.genInputInterface(w, name+"Map") + + fmt.Fprintf(w, "type %[1]sMap map[string]%[1]s\n\n", name) + + genInputImplementation(w, name+"Map", name+"Map", "map[string]"+name, false) + } + + // Generate the array output + if details.arrayOutput { + genArrayOutput(w, name, name) + } + + // Generate the map output. + if details.mapOutput { + genMapOutput(w, name, name) + } + + return nil +} + +func (pkg *pkgContext) genEnumOutputTypes(w io.Writer, name, elementArgsType, elementGoType, asFuncName string) { + genOutputType(w, name, name, true) + + fmt.Fprintf(w, "func (o %[1]sOutput) To%[2]sOutput() %[3]sOutput {\n", name, asFuncName, elementArgsType) + fmt.Fprintf(w, "return o.To%sOutputWithContext(context.Background())\n", asFuncName) + fmt.Fprint(w, "}\n\n") + + fmt.Fprintf(w, "func (o %[1]sOutput) To%[2]sOutputWithContext(ctx context.Context) %[3]sOutput {\n", name, asFuncName, elementArgsType) + fmt.Fprintf(w, "return o.ApplyTWithContext(ctx, func(_ context.Context, e %s) %s {\n", name, elementGoType) + fmt.Fprintf(w, "return %s(e)\n", elementGoType) + fmt.Fprintf(w, "}).(%sOutput)\n", elementArgsType) + fmt.Fprint(w, "}\n\n") + + fmt.Fprintf(w, "func (o %[1]sOutput) To%[2]sPtrOutput() %[3]sPtrOutput {\n", name, asFuncName, elementArgsType) + fmt.Fprintf(w, "return o.To%sPtrOutputWithContext(context.Background())\n", asFuncName) + fmt.Fprint(w, "}\n\n") + + fmt.Fprintf(w, "func (o %[1]sOutput) To%[2]sPtrOutputWithContext(ctx context.Context) %[3]sPtrOutput {\n", name, asFuncName, elementArgsType) + fmt.Fprintf(w, "return o.ApplyTWithContext(ctx, func(_ context.Context, e %s) *%s {\n", name, elementGoType) + fmt.Fprintf(w, "v := %s(e)\n", elementGoType) + fmt.Fprintf(w, "return &v\n") + fmt.Fprintf(w, "}).(%sPtrOutput)\n", elementArgsType) + fmt.Fprint(w, "}\n\n") + + genPtrOutput(w, name, name) + + fmt.Fprintf(w, "func (o %[1]sPtrOutput) To%[2]sPtrOutput() %[3]sPtrOutput {\n", name, asFuncName, elementArgsType) + fmt.Fprintf(w, "return o.To%sPtrOutputWithContext(context.Background())\n", asFuncName) + fmt.Fprint(w, "}\n\n") + + fmt.Fprintf(w, "func (o %[1]sPtrOutput) To%[2]sPtrOutputWithContext(ctx context.Context) %[3]sPtrOutput {\n", name, asFuncName, elementArgsType) + fmt.Fprintf(w, "return o.ApplyTWithContext(ctx, func(_ context.Context, e *%s) *%s {\n", name, elementGoType) + fmt.Fprintf(w, "if e == nil {\n") + fmt.Fprintf(w, "return nil\n") + fmt.Fprintf(w, "}\n") + fmt.Fprintf(w, "v := %s(*e)\n", elementGoType) + fmt.Fprintf(w, "return &v\n") + fmt.Fprintf(w, "}).(%sPtrOutput)\n", elementArgsType) + fmt.Fprint(w, "}\n\n") +} + +func (pkg *pkgContext) genEnumInputTypes(w io.Writer, name string, enumType *schema.EnumType, elementGoType string) { + pkg.genInputInterface(w, name) + + fmt.Fprintf(w, "var %sPtrType = reflect.TypeOf((**%s)(nil)).Elem()\n", camel(name), name) fmt.Fprintln(w) - fmt.Fprintf(w, "func (e %[1]s) To%[2]sPtrOutputWithContext(ctx context.Context) %[3]sPtrOutput {\n", typeName, asFuncName, elementType) - fmt.Fprintf(w, "return %[1]s(e).To%[2]sOutputWithContext(ctx).To%[2]sPtrOutputWithContext(ctx)\n", elementType, asFuncName) - fmt.Fprintln(w, "}") + fmt.Fprintf(w, "type %sPtrInput interface {\n", name) + fmt.Fprint(w, "pulumi.Input\n\n") + fmt.Fprintf(w, "To%[1]sPtrOutput() %[1]sPtrOutput\n", name) + fmt.Fprintf(w, "To%[1]sPtrOutputWithContext(context.Context) %[1]sPtrOutput\n", name) + fmt.Fprintf(w, "}\n") fmt.Fprintln(w) - details := pkg.detailsForType(enum) - // Generate the array input. - if details.arrayElement { - pkg.genInputInterface(w, typeName+"Array") + fmt.Fprintf(w, "type %sPtr %s\n", camel(name), elementGoType) + fmt.Fprintln(w) - fmt.Fprintf(w, "type %[1]sArray []%[1]s\n\n", typeName) + fmt.Fprintf(w, "func %[1]sPtr(v %[2]s) %[1]sPtrInput {\n", name, elementGoType) + fmt.Fprintf(w, "return (*%sPtr)(&v)\n", camel(name)) + fmt.Fprintf(w, "}\n") + fmt.Fprintln(w) - genInputMethods(w, typeName+"Array", typeName+"Array", "[]"+typeName, false, false) - } + fmt.Fprintf(w, "func (*%sPtr) ElementType() reflect.Type {\n", camel(name)) + fmt.Fprintf(w, "return %sPtrType\n", camel(name)) + fmt.Fprintf(w, "}\n") + fmt.Fprintln(w) - // Generate the map input. - if details.mapElement { - pkg.genInputInterface(w, typeName+"Map") + fmt.Fprintf(w, "func (in *%[1]sPtr) To%[2]sPtrOutput() %[2]sPtrOutput {\n", camel(name), name) + fmt.Fprintf(w, "return pulumi.ToOutput(in).(%sPtrOutput)\n", name) + fmt.Fprintf(w, "}\n") + fmt.Fprintln(w) - fmt.Fprintf(w, "type %[1]sMap map[string]%[1]s\n\n", typeName) + fmt.Fprintf(w, "func (in *%[1]sPtr) To%[2]sPtrOutputWithContext(ctx context.Context) %[2]sPtrOutput {\n", camel(name), name) + fmt.Fprintf(w, "return pulumi.ToOutputWithContext(ctx, in).(%sPtrOutput)\n", name) + fmt.Fprintf(w, "}\n") + fmt.Fprintln(w) +} - genInputMethods(w, typeName+"Map", typeName+"Map", "map[string]"+typeName, false, false) - } +func (pkg *pkgContext) genEnumInputFuncs(w io.Writer, typeName string, enum *schema.EnumType, elementArgsType, inputType, asFuncName string) { + fmt.Fprintln(w) + fmt.Fprintf(w, "func (%s) ElementType() reflect.Type {\n", typeName) + fmt.Fprintf(w, "return reflect.TypeOf((*%s)(nil)).Elem()\n", typeName) + fmt.Fprintln(w, "}") + fmt.Fprintln(w) - // Generate the array output - if details.arrayElement { - fmt.Fprintf(w, "type %sArrayOutput struct { *pulumi.OutputState }\n\n", typeName) + fmt.Fprintf(w, "func (e %[1]s) To%[1]sOutput() %[1]sOutput {\n", typeName) + fmt.Fprintf(w, "return pulumi.ToOutput(e).(%sOutput)\n", typeName) + fmt.Fprintln(w, "}") + fmt.Fprintln(w) - genOutputMethods(w, typeName+"Array", "[]"+typeName, false) + fmt.Fprintf(w, "func (e %[1]s) To%[1]sOutputWithContext(ctx context.Context) %[1]sOutput {\n", typeName) + fmt.Fprintf(w, "return pulumi.ToOutputWithContext(ctx, e).(%sOutput)\n", typeName) + fmt.Fprintln(w, "}") + fmt.Fprintln(w) - fmt.Fprintf(w, "func (o %[1]sArrayOutput) Index(i pulumi.IntInput) %[2]sOutput {\n", typeName, elementType) - fmt.Fprintf(w, "\treturn pulumi.All(o, i).ApplyT(func (vs []interface{}) %sOutput {\n", elementType) - fmt.Fprintf(w, "\t\treturn vs[0].([]%s)[vs[1].(int)].To%sOutput()\n", typeName, asFuncName) - fmt.Fprintf(w, "\t}).(%sOutput)\n", elementType) - fmt.Fprintf(w, "}\n\n") - } + fmt.Fprintf(w, "func (e %[1]s) To%[1]sPtrOutput() %[1]sPtrOutput {\n", typeName) + fmt.Fprintf(w, "return e.To%sPtrOutputWithContext(context.Background())\n", typeName) + fmt.Fprintln(w, "}") + fmt.Fprintln(w) - // Generate the map output. - if details.mapElement { - fmt.Fprintf(w, "type %sMapOutput struct { *pulumi.OutputState }\n\n", typeName) + fmt.Fprintf(w, "func (e %[1]s) To%[1]sPtrOutputWithContext(ctx context.Context) %[1]sPtrOutput {\n", typeName) + fmt.Fprintf(w, "return %[1]s(e).To%[1]sOutputWithContext(ctx).To%[1]sPtrOutputWithContext(ctx)\n", typeName) + fmt.Fprintln(w, "}") + fmt.Fprintln(w) - genOutputMethods(w, typeName+"Map", "map[string]"+typeName, false) + fmt.Fprintf(w, "func (e %[1]s) To%[2]sOutput() %[3]sOutput {\n", typeName, asFuncName, elementArgsType) + fmt.Fprintf(w, "return pulumi.ToOutput(%[1]s(e)).(%[1]sOutput)\n", elementArgsType) + fmt.Fprintln(w, "}") + fmt.Fprintln(w) - fmt.Fprintf(w, "func (o %[1]sMapOutput) MapIndex(k pulumi.StringInput) %[2]sOutput {\n", typeName, elementType) - fmt.Fprintf(w, "\treturn pulumi.All(o, k).ApplyT(func (vs []interface{}) %sOutput {\n", elementType) - fmt.Fprintf(w, "\t\treturn vs[0].(map[string]%s)[vs[1].(string)].To%sOutput()\n", typeName, asFuncName) - fmt.Fprintf(w, "\t}).(%sOutput)\n", elementType) - fmt.Fprintf(w, "}\n\n") + fmt.Fprintf(w, "func (e %[1]s) To%[2]sOutputWithContext(ctx context.Context) %[3]sOutput {\n", typeName, asFuncName, elementArgsType) + fmt.Fprintf(w, "return pulumi.ToOutputWithContext(ctx, %[1]s(e)).(%[1]sOutput)\n", elementArgsType) + fmt.Fprintln(w, "}") + fmt.Fprintln(w) + + fmt.Fprintf(w, "func (e %[1]s) To%[2]sPtrOutput() %[3]sPtrOutput {\n", typeName, asFuncName, elementArgsType) + fmt.Fprintf(w, "return %s(e).To%sPtrOutputWithContext(context.Background())\n", elementArgsType, asFuncName) + fmt.Fprintln(w, "}") + fmt.Fprintln(w) + + fmt.Fprintf(w, "func (e %[1]s) To%[2]sPtrOutputWithContext(ctx context.Context) %[3]sPtrOutput {\n", typeName, asFuncName, elementArgsType) + fmt.Fprintf(w, "return %[1]s(e).To%[2]sOutputWithContext(ctx).To%[2]sPtrOutputWithContext(ctx)\n", elementArgsType, asFuncName) + fmt.Fprintln(w, "}") + fmt.Fprintln(w) +} + +func (pkg *pkgContext) assignProperty(w io.Writer, p *schema.Property, object, value string, indirectAssign bool) { + t := strings.TrimSuffix(pkg.typeString(p.Type), "Input") + switch codegen.UnwrapType(p.Type).(type) { + case *schema.EnumType: + t = "" + } + + if codegen.IsNOptionalInput(p.Type) { + if t != "" { + value = fmt.Sprintf("%s(%s)", t, value) + } + fmt.Fprintf(w, "\targs.%s = %s\n", Title(p.Name), value) + } else if indirectAssign { + tmpName := camel(p.Name) + "_" + fmt.Fprintf(w, "%s := %s\n", tmpName, value) + fmt.Fprintf(w, "%s.%s = &%s\n", object, Title(p.Name), tmpName) + } else { + fmt.Fprintf(w, "%s.%s = %s\n", object, Title(p.Name), value) } } @@ -785,33 +1219,87 @@ func (pkg *pkgContext) genPlainType(w io.Writer, name, comment, deprecationMessa fmt.Fprintf(w, "type %s struct {\n", name) for _, p := range properties { printCommentWithDeprecationMessage(w, p.Comment, p.DeprecationMessage, true) - fmt.Fprintf(w, "\t%s %s `pulumi:\"%s\"`\n", Title(p.Name), pkg.plainType(p.Type, !p.IsRequired), p.Name) + fmt.Fprintf(w, "\t%s %s `pulumi:\"%s\"`\n", Title(p.Name), pkg.typeString(codegen.ResolvedType(p.Type)), p.Name) } fmt.Fprintf(w, "}\n\n") } +func (pkg *pkgContext) genPlainObjectDefaultFunc(w io.Writer, name string, + properties []*schema.Property) error { + defaults := []*schema.Property{} + for _, p := range properties { + if p.DefaultValue != nil || codegen.IsProvideDefaultsFuncRequired(p.Type) { + defaults = append(defaults, p) + } + } + + // There are no defaults, so we don't need to generate a defaults function. + if len(defaults) == 0 { + return nil + } + + printComment(w, fmt.Sprintf("%s sets the appropriate defaults for %s", ProvideDefaultsMethodName, name), false) + fmt.Fprintf(w, "func (val *%[1]s) %[2]s() *%[1]s {\n", name, ProvideDefaultsMethodName) + fmt.Fprint(w, "if val == nil {\n return nil\n}\n") + fmt.Fprint(w, "tmp := *val\n") + for _, p := range defaults { + if p.DefaultValue != nil { + dv, err := pkg.getDefaultValue(p.DefaultValue, codegen.UnwrapType(p.Type)) + if err != nil { + return err + } + pkg.needsUtils = true + fmt.Fprintf(w, "if isZero(tmp.%s) {\n", Title(p.Name)) + pkg.assignProperty(w, p, "tmp", dv, !p.IsRequired()) + fmt.Fprintf(w, "}\n") + } else if funcName := pkg.provideDefaultsFuncName(p.Type); funcName != "" { + var member string + if codegen.IsNOptionalInput(p.Type) { + f := fmt.Sprintf("func(v %[1]s) %[1]s { return v.%[2]s*() }", name, funcName) + member = fmt.Sprintf("tmp.%[1]s.ApplyT(%[2]s)\n", Title(p.Name), f) + } else { + member = fmt.Sprintf("tmp.%[1]s.%[2]s()\n", Title(p.Name), funcName) + } + sigil := "" + if p.IsRequired() { + sigil = "*" + } + pkg.assignProperty(w, p, "tmp", sigil+member, false) + } else { + panic(fmt.Sprintf("Property %s[%s] should not be in the default list", p.Name, p.Type.String())) + } + } + + fmt.Fprintf(w, "return &tmp\n}\n") + return nil +} + +// The name of the method used to instantiate defaults. +const ProvideDefaultsMethodName = "Defaults" + +func (pkg *pkgContext) provideDefaultsFuncName(typ schema.Type) string { + if !codegen.IsProvideDefaultsFuncRequired(typ) { + return "" + } + return ProvideDefaultsMethodName +} + func (pkg *pkgContext) genInputTypes(w io.Writer, t *schema.ObjectType, details *typeDetails) { + contract.Assert(t.IsInputShape()) + name := pkg.tokenToType(t.Token) // Generate the plain inputs. - pkg.genInputInterface(w, name) + if details.input { + pkg.genInputInterface(w, name) - printComment(w, t.Comment, false) - fmt.Fprintf(w, "type %sArgs struct {\n", name) - for _, p := range t.Properties { - printCommentWithDeprecationMessage(w, p.Comment, p.DeprecationMessage, true) - typ := pkg.inputType(p.Type, !p.IsRequired) - if p.IsPlain { - typ = pkg.plainType(p.Type, !p.IsRequired) - } - fmt.Fprintf(w, "\t%s %s `pulumi:\"%s\"`\n", Title(p.Name), typ, p.Name) - } - fmt.Fprintf(w, "}\n\n") + pkg.genInputArgsStruct(w, name+"Args", t) - genInputMethods(w, name, name+"Args", name, details.ptrElement, false) + genInputImplementation(w, name, name+"Args", name, details.ptrInput) + } // Generate the pointer input. - if details.ptrElement { + if details.ptrInput { pkg.genInputInterface(w, name+"Ptr") ptrTypeName := camel(name) + "PtrType" @@ -822,97 +1310,94 @@ func (pkg *pkgContext) genInputTypes(w io.Writer, t *schema.ObjectType, details fmt.Fprintf(w, "\treturn (*%s)(v)\n", ptrTypeName) fmt.Fprintf(w, "}\n\n") - genInputMethods(w, name+"Ptr", "*"+ptrTypeName, "*"+name, false, false) + genInputImplementation(w, name+"Ptr", "*"+ptrTypeName, "*"+name, false) } // Generate the array input. - if details.arrayElement { + if details.arrayInput { pkg.genInputInterface(w, name+"Array") fmt.Fprintf(w, "type %[1]sArray []%[1]sInput\n\n", name) - genInputMethods(w, name+"Array", name+"Array", "[]"+name, false, false) + genInputImplementation(w, name+"Array", name+"Array", "[]"+name, false) } // Generate the map input. - if details.mapElement { + if details.mapInput { pkg.genInputInterface(w, name+"Map") fmt.Fprintf(w, "type %[1]sMap map[string]%[1]sInput\n\n", name) - genInputMethods(w, name+"Map", name+"Map", "map[string]"+name, false, false) + genInputImplementation(w, name+"Map", name+"Map", "map[string]"+name, false) } } -func genOutputMethods(w io.Writer, name, elementType string, resourceType bool) { - fmt.Fprintf(w, "func (%sOutput) ElementType() reflect.Type {\n", name) - if resourceType { - fmt.Fprintf(w, "\treturn reflect.TypeOf((*%s)(nil))\n", elementType) - } else { - fmt.Fprintf(w, "\treturn reflect.TypeOf((*%s)(nil)).Elem()\n", elementType) - } - fmt.Fprintf(w, "}\n\n") - - fmt.Fprintf(w, "func (o %[1]sOutput) To%[2]sOutput() %[1]sOutput {\n", name, Title(name)) - fmt.Fprintf(w, "\treturn o\n") - fmt.Fprintf(w, "}\n\n") +func (pkg *pkgContext) genInputArgsStruct(w io.Writer, typeName string, t *schema.ObjectType) { + contract.Assert(t.IsInputShape()) - fmt.Fprintf(w, "func (o %[1]sOutput) To%[2]sOutputWithContext(ctx context.Context) %[1]sOutput {\n", name, Title(name)) - fmt.Fprintf(w, "\treturn o\n") + printComment(w, t.Comment, false) + fmt.Fprintf(w, "type %s struct {\n", typeName) + for _, p := range t.Properties { + printCommentWithDeprecationMessage(w, p.Comment, p.DeprecationMessage, true) + fmt.Fprintf(w, "\t%s %s `pulumi:\"%s\"`\n", Title(p.Name), pkg.typeString(p.Type), p.Name) + } fmt.Fprintf(w, "}\n\n") } -func (pkg *pkgContext) genOutputTypes(w io.Writer, t *schema.ObjectType, details *typeDetails) { - name := pkg.tokenToType(t.Token) +type genOutputTypesArgs struct { + t *schema.ObjectType - printComment(w, t.Comment, false) - fmt.Fprintf(w, "type %sOutput struct { *pulumi.OutputState }\n\n", name) + // optional type name override + name string +} - genOutputMethods(w, name, name, false) +func (pkg *pkgContext) genOutputTypes(w io.Writer, genArgs genOutputTypesArgs) { + t := genArgs.t + details := pkg.detailsForType(t) - if details.ptrElement { - fmt.Fprintf(w, "func (o %[1]sOutput) To%[2]sPtrOutput() %[1]sPtrOutput {\n", name, Title(name)) - fmt.Fprintf(w, "\treturn o.To%sPtrOutputWithContext(context.Background())\n", Title(name)) - fmt.Fprintf(w, "}\n\n") + contract.Assert(!t.IsInputShape()) - fmt.Fprintf(w, "func (o %[1]sOutput) To%[2]sPtrOutputWithContext(ctx context.Context) %[1]sPtrOutput {\n", name, Title(name)) - fmt.Fprintf(w, "\treturn o.ApplyT(func(v %[1]s) *%[1]s {\n", name) - fmt.Fprintf(w, "\t\treturn &v\n") - fmt.Fprintf(w, "\t}).(%sPtrOutput)\n", name) - fmt.Fprintf(w, "}\n") + name := genArgs.name + if name == "" { + name = pkg.tokenToType(t.Token) } - for _, p := range t.Properties { - printCommentWithDeprecationMessage(w, p.Comment, p.DeprecationMessage, false) - outputType, applyType := pkg.outputType(p.Type, !p.IsRequired), pkg.plainType(p.Type, !p.IsRequired) + if details.output { + printComment(w, t.Comment, false) + genOutputType(w, + name, /* baseName */ + name, /* elementType */ + details.ptrInput, /* ptrMethods */ + ) - propName := Title(p.Name) - switch strings.ToLower(p.Name) { - case "elementtype", "issecret": - propName = "Get" + propName + for _, p := range t.Properties { + printCommentWithDeprecationMessage(w, p.Comment, p.DeprecationMessage, false) + outputType, applyType := pkg.outputType(p.Type), pkg.typeString(p.Type) + + propName := Title(p.Name) + switch strings.ToLower(p.Name) { + case "elementtype", "issecret": + propName = "Get" + propName + } + fmt.Fprintf(w, "func (o %sOutput) %s() %s {\n", name, propName, outputType) + fmt.Fprintf(w, "\treturn o.ApplyT(func (v %s) %s { return v.%s }).(%s)\n", + name, applyType, Title(p.Name), outputType) + fmt.Fprintf(w, "}\n\n") } - fmt.Fprintf(w, "func (o %sOutput) %s() %s {\n", name, propName, outputType) - fmt.Fprintf(w, "\treturn o.ApplyT(func (v %s) %s { return v.%s }).(%s)\n", name, applyType, Title(p.Name), outputType) - fmt.Fprintf(w, "}\n\n") } - if details.ptrElement { - fmt.Fprintf(w, "type %sPtrOutput struct { *pulumi.OutputState }\n\n", name) - - genOutputMethods(w, name+"Ptr", "*"+name, false) - - fmt.Fprintf(w, "func (o %[1]sPtrOutput) Elem() %[1]sOutput {\n", name) - fmt.Fprintf(w, "\treturn o.ApplyT(func (v *%[1]s) %[1]s { return *v }).(%[1]sOutput)\n", name) - fmt.Fprintf(w, "}\n\n") + if details.ptrOutput { + genPtrOutput(w, name, name) for _, p := range t.Properties { printCommentWithDeprecationMessage(w, p.Comment, p.DeprecationMessage, false) - outputType, applyType := pkg.outputType(p.Type, true), pkg.plainType(p.Type, true) + optionalType := codegen.OptionalType(p) + outputType, applyType := pkg.outputType(optionalType), pkg.typeString(optionalType) deref := "" // If the property was required, but the type it needs to return is an explicit pointer type, then we need // to dereference it, unless it is a resource type which should remain a pointer. _, isResourceType := p.Type.(*schema.ResourceType) - if p.IsRequired && applyType[0] == '*' && !isResourceType { + if p.IsRequired() && applyType[0] == '*' && !isResourceType { deref = "&" } @@ -934,28 +1419,12 @@ func (pkg *pkgContext) genOutputTypes(w io.Writer, t *schema.ObjectType, details } } - if details.arrayElement { - fmt.Fprintf(w, "type %sArrayOutput struct { *pulumi.OutputState }\n\n", name) - - genOutputMethods(w, name+"Array", "[]"+name, false) - - fmt.Fprintf(w, "func (o %[1]sArrayOutput) Index(i pulumi.IntInput) %[1]sOutput {\n", name) - fmt.Fprintf(w, "\treturn pulumi.All(o, i).ApplyT(func (vs []interface{}) %s {\n", name) - fmt.Fprintf(w, "\t\treturn vs[0].([]%s)[vs[1].(int)]\n", name) - fmt.Fprintf(w, "\t}).(%sOutput)\n", name) - fmt.Fprintf(w, "}\n\n") + if details.arrayOutput { + genArrayOutput(w, name, name) } - if details.mapElement { - fmt.Fprintf(w, "type %sMapOutput struct { *pulumi.OutputState }\n\n", name) - - genOutputMethods(w, name+"Map", "map[string]"+name, false) - - fmt.Fprintf(w, "func (o %[1]sMapOutput) MapIndex(k pulumi.StringInput) %[1]sOutput {\n", name) - fmt.Fprintf(w, "\treturn pulumi.All(o, k).ApplyT(func (vs []interface{}) %s {\n", name) - fmt.Fprintf(w, "\t\treturn vs[0].(map[string]%s)[vs[1].(string)]\n", name) - fmt.Fprintf(w, "\t}).(%sOutput)\n", name) - fmt.Fprintf(w, "}\n\n") + if details.mapOutput { + genMapOutput(w, name, name) } } @@ -976,11 +1445,15 @@ func goPrimitiveValue(value interface{}) (string, error) { case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32: return strconv.FormatUint(v.Uint(), 10), nil case reflect.Float32, reflect.Float64: - return strconv.FormatFloat(v.Float(), 'f', -1, 64), nil + value := strconv.FormatFloat(v.Float(), 'f', -1, 64) + if !strings.ContainsRune(value, '.') { + value += ".0" + } + return value, nil case reflect.String: return fmt.Sprintf("%q", v.String()), nil default: - return "", errors.Errorf("unsupported default value of type %T", value) + return "", fmt.Errorf("unsupported default value of type %T", value) } } @@ -1005,13 +1478,18 @@ func (pkg *pkgContext) getDefaultValue(dv *schema.DefaultValue, t schema.Type) ( return "", err } val = v + switch t.(type) { + case *schema.EnumType: + typeName := strings.TrimSuffix(pkg.typeString(codegen.UnwrapType(t)), "Input") + val = fmt.Sprintf("%s(%s)", typeName, val) + } } if len(dv.Environment) > 0 { pkg.needsUtils = true parser, typDefault, typ := "nil", "\"\"", "string" - switch t.(type) { + switch codegen.UnwrapType(t).(type) { case *schema.ArrayType: parser, typDefault, typ = "parseEnvStringArray", "pulumi.StringArray{}", "pulumi.StringArray" } @@ -1039,7 +1517,7 @@ func (pkg *pkgContext) getDefaultValue(dv *schema.DefaultValue, t schema.Type) ( } func (pkg *pkgContext) genResource(w io.Writer, r *schema.Resource, generateResourceContainerTypes bool) error { - name := resourceName(r) + name := disambiguatedResourceName(r, pkg) printCommentWithDeprecationMessage(w, r.Comment, r.DeprecationMessage, false) fmt.Fprintf(w, "type %s struct {\n", name) @@ -1054,9 +1532,10 @@ func (pkg *pkgContext) genResource(w io.Writer, r *schema.Resource, generateReso } var secretProps []*schema.Property + for _, p := range r.Properties { printCommentWithDeprecationMessage(w, p.Comment, p.DeprecationMessage, true) - fmt.Fprintf(w, "\t%s %s `pulumi:\"%s\"`\n", Title(p.Name), pkg.outputType(p.Type, !p.IsRequired), p.Name) + fmt.Fprintf(w, "\t%s %s `pulumi:\"%s\"`\n", Title(p.Name), pkg.outputType(p.Type), p.Name) if p.Secret { secretProps = append(secretProps, p) @@ -1072,7 +1551,7 @@ func (pkg *pkgContext) genResource(w io.Writer, r *schema.Resource, generateReso // Ensure required arguments are present. hasRequired := false for _, p := range r.InputProperties { - if p.IsRequired { + if p.IsRequired() { hasRequired = true } } @@ -1087,74 +1566,57 @@ func (pkg *pkgContext) genResource(w io.Writer, r *schema.Resource, generateReso fmt.Fprintf(w, "\t}\n\n") // Produce the inputs. + + // Check all required inputs are present for _, p := range r.InputProperties { - switch p.Type.(type) { - case *schema.EnumType: - // not a pointer type and already handled above - default: - if p.IsRequired && !p.IsPlain { - fmt.Fprintf(w, "\tif args.%s == nil {\n", Title(p.Name)) - fmt.Fprintf(w, "\t\treturn nil, errors.New(\"invalid value for required argument '%s'\")\n", Title(p.Name)) - fmt.Fprintf(w, "\t}\n") - } + if p.IsRequired() && isNilType(p.Type) && p.DefaultValue == nil { + fmt.Fprintf(w, "\tif args.%s == nil {\n", Title(p.Name)) + fmt.Fprintf(w, "\t\treturn nil, errors.New(\"invalid value for required argument '%s'\")\n", Title(p.Name)) + fmt.Fprintf(w, "\t}\n") } } + assign := func(p *schema.Property, value string) { + pkg.assignProperty(w, p, "args", value, isNilType(p.Type)) + } + for _, p := range r.InputProperties { if p.ConstValue != nil { v, err := pkg.getConstValue(p.ConstValue) if err != nil { return err } - - t := strings.TrimSuffix(pkg.inputType(p.Type, !p.IsRequired), "Input") - if t == "pulumi." { - t = "pulumi.Any" - } - - fmt.Fprintf(w, "\targs.%s = %s(%s)\n", Title(p.Name), t, v) - } - if p.DefaultValue != nil { - v, err := pkg.getDefaultValue(p.DefaultValue, p.Type) + assign(p, v) + } else if p.DefaultValue != nil { + dv, err := pkg.getDefaultValue(p.DefaultValue, codegen.UnwrapType(p.Type)) if err != nil { return err } - - t := strings.TrimSuffix(pkg.inputType(p.Type, !p.IsRequired), "Input") - if t == "pulumi." { - t = "pulumi.Any" + pkg.needsUtils = true + fmt.Fprintf(w, "\tif isZero(args.%s) {\n", Title(p.Name)) + assign(p, dv) + fmt.Fprintf(w, "\t}\n") + } else if name := pkg.provideDefaultsFuncName(p.Type); name != "" && !pkg.disableObjectDefaults { + optionalDeref := "" + if p.IsRequired() { + optionalDeref = "*" } - switch typ := p.Type.(type) { - case *schema.EnumType: - if p.IsRequired { - switch typ.ElementType { - // Only string and numeric types are supported for enums - case schema.StringType: - fmt.Fprintf(w, "\tif args.%s == \"\" {\n", Title(p.Name)) - case schema.IntType, schema.NumberType: - fmt.Fprintf(w, "\tif args.%s == 0 {\n", Title(p.Name)) - default: - contract.Assertf(false, "unxpected type %T for enum: %s", typ, typ.Token) - } - fmt.Fprintf(w, "\t\targs.%s = %s(%s)\n", Title(p.Name), t, v) - fmt.Fprintf(w, "\t}\n") - } else { - fmt.Fprintf(w, "\tif args.%s == nil {\n", Title(p.Name)) - - // Enum types are themselves inputs so pkg.InputType() returns * - // when the type is optional. We want the generated code to look like this: - // e:= () - // args. = &e - fmt.Fprintf(w, "\te := %s(%s)\n", pkg.inputType(p.Type, false), v) - fmt.Fprintf(w, "\t\targs.%s = &e\n", Title(p.Name)) - fmt.Fprintf(w, "\t}\n") - } - default: - fmt.Fprintf(w, "\tif args.%s == nil {\n", Title(p.Name)) - fmt.Fprintf(w, "\t\targs.%s = %s(%s)\n", Title(p.Name), t, v) - fmt.Fprintf(w, "\t}\n") + toOutputMethod := pkg.toOutputMethod(p.Type) + outputType := pkg.outputType(p.Type) + resolvedType := pkg.typeString(codegen.ResolvedType(p.Type)) + originalValue := fmt.Sprintf("args.%s.%s()", Title(p.Name), toOutputMethod) + valueWithDefaults := fmt.Sprintf("%[1]v.ApplyT(func (v %[2]s) %[2]s { return %[3]sv.%[4]s() }).(%[5]s)", + originalValue, resolvedType, optionalDeref, name, outputType) + + if !p.IsRequired() { + fmt.Fprintf(w, "if args.%s != nil {\n", Title(p.Name)) + fmt.Fprintf(w, "args.%[1]s = %s\n", Title(p.Name), valueWithDefaults) + fmt.Fprint(w, "}\n") + } else { + fmt.Fprintf(w, "args.%[1]s = %s\n", Title(p.Name), valueWithDefaults) } + } } @@ -1178,10 +1640,12 @@ func (pkg *pkgContext) genResource(w io.Writer, r *schema.Resource, generateReso fmt.Fprintf(w, "\t})\n") fmt.Fprintf(w, "\topts = append(opts, aliases)\n") } + + // Setup secrets if len(secretProps) > 0 { for _, p := range secretProps { fmt.Fprintf(w, "\tif args.%s != nil {\n", Title(p.Name)) - fmt.Fprintf(w, "\t\targs.%[1]s = pulumi.ToSecret(args.%[1]s).(%[2]s)\n", Title(p.Name), pkg.outputType(p.Type, false)) + fmt.Fprintf(w, "\t\targs.%[1]s = pulumi.ToSecret(args.%[1]s).(%[2]s)\n", Title(p.Name), pkg.outputType(p.Type)) fmt.Fprintf(w, "\t}\n") } fmt.Fprintf(w, "\tsecrets := pulumi.AdditionalSecretOutputs([]string{\n") @@ -1192,6 +1656,24 @@ func (pkg *pkgContext) genResource(w io.Writer, r *schema.Resource, generateReso fmt.Fprintf(w, "\topts = append(opts, secrets)\n") } + // Setup replaceOnChange + replaceOnChangesProps, errList := r.ReplaceOnChanges() + for _, err := range errList { + cmdutil.Diag().Warningf(&diag.Diag{Message: err.Error()}) + } + replaceOnChangesStrings := schema.PropertyListJoinToString(replaceOnChangesProps, + func(x string) string { return x }) + if len(replaceOnChangesProps) > 0 { + fmt.Fprint(w, "\treplaceOnChanges := pulumi.ReplaceOnChanges([]string{\n") + for _, p := range replaceOnChangesStrings { + fmt.Fprintf(w, "\t\t%q,\n", p) + } + fmt.Fprint(w, "\t})\n") + fmt.Fprint(w, "\topts = append(opts, replaceOnChanges)\n") + } + + pkg.GenPkgDefaultsOptsCall(w, false /*invoke*/) + // Finally make the call to registration. fmt.Fprintf(w, "\tvar resource %s\n", name) if r.IsComponent { @@ -1222,16 +1704,20 @@ func (pkg *pkgContext) genResource(w io.Writer, r *schema.Resource, generateReso // Emit the state types for get methods. fmt.Fprintf(w, "// Input properties used for looking up and filtering %s resources.\n", name) fmt.Fprintf(w, "type %sState struct {\n", camel(name)) - for _, p := range r.Properties { - printCommentWithDeprecationMessage(w, p.Comment, p.DeprecationMessage, true) - fmt.Fprintf(w, "\t%s %s `pulumi:\"%s\"`\n", Title(p.Name), pkg.plainType(p.Type, true), p.Name) + if r.StateInputs != nil { + for _, p := range r.StateInputs.Properties { + printCommentWithDeprecationMessage(w, p.Comment, p.DeprecationMessage, true) + fmt.Fprintf(w, "\t%s %s `pulumi:\"%s\"`\n", Title(p.Name), pkg.typeString(codegen.ResolvedType(codegen.OptionalType(p))), p.Name) + } } fmt.Fprintf(w, "}\n\n") fmt.Fprintf(w, "type %sState struct {\n", name) - for _, p := range r.Properties { - printCommentWithDeprecationMessage(w, p.Comment, p.DeprecationMessage, true) - fmt.Fprintf(w, "\t%s %s\n", Title(p.Name), pkg.inputType(p.Type, true)) + if r.StateInputs != nil { + for _, p := range r.StateInputs.Properties { + printCommentWithDeprecationMessage(w, p.Comment, p.DeprecationMessage, true) + fmt.Fprintf(w, "\t%s %s\n", Title(p.Name), pkg.inputType(p.Type)) + } } fmt.Fprintf(w, "}\n\n") @@ -1244,7 +1730,7 @@ func (pkg *pkgContext) genResource(w io.Writer, r *schema.Resource, generateReso fmt.Fprintf(w, "type %sArgs struct {\n", camel(name)) for _, p := range r.InputProperties { printCommentWithDeprecationMessage(w, p.Comment, p.DeprecationMessage, true) - fmt.Fprintf(w, "\t%s %s `pulumi:\"%s\"`\n", Title(p.Name), pkg.plainType(p.Type, !p.IsRequired), p.Name) + fmt.Fprintf(w, "\t%s %s `pulumi:\"%s\"`\n", Title(p.Name), pkg.typeString(codegen.ResolvedType(p.Type)), p.Name) } fmt.Fprintf(w, "}\n\n") @@ -1252,114 +1738,218 @@ func (pkg *pkgContext) genResource(w io.Writer, r *schema.Resource, generateReso fmt.Fprintf(w, "type %sArgs struct {\n", name) for _, p := range r.InputProperties { printCommentWithDeprecationMessage(w, p.Comment, p.DeprecationMessage, true) - typ := pkg.inputType(p.Type, !p.IsRequired) - if p.IsPlain { - typ = pkg.plainType(p.Type, !p.IsRequired) - } - fmt.Fprintf(w, "\t%s %s\n", Title(p.Name), typ) + fmt.Fprintf(w, "\t%s %s\n", Title(p.Name), pkg.typeString(p.Type)) } fmt.Fprintf(w, "}\n\n") fmt.Fprintf(w, "func (%sArgs) ElementType() reflect.Type {\n", name) fmt.Fprintf(w, "\treturn reflect.TypeOf((*%sArgs)(nil)).Elem()\n", camel(name)) - fmt.Fprintf(w, "}\n\n") + fmt.Fprintf(w, "}\n") - // Emit the resource input type. - fmt.Fprintf(w, "type %sInput interface {\n", name) - fmt.Fprintf(w, "\tpulumi.Input\n\n") - fmt.Fprintf(w, "\tTo%[1]sOutput() %[1]sOutput\n", name) - fmt.Fprintf(w, "\tTo%[1]sOutputWithContext(ctx context.Context) %[1]sOutput\n", name) - fmt.Fprintf(w, "}\n\n") + // Emit resource methods. + for _, method := range r.Methods { + methodName := Title(method.Name) + f := method.Function - genInputMethods(w, name, "*"+name, name, generateResourceContainerTypes, true) + shouldLiftReturn := pkg.liftSingleValueMethodReturns && f.Outputs != nil && len(f.Outputs.Properties) == 1 - if generateResourceContainerTypes { - // Emit the resource pointer input type. - fmt.Fprintf(w, "type %sPtrInput interface {\n", name) - fmt.Fprintf(w, "\tpulumi.Input\n\n") - fmt.Fprintf(w, "\tTo%[1]sPtrOutput() %[1]sPtrOutput\n", name) - fmt.Fprintf(w, "\tTo%[1]sPtrOutputWithContext(ctx context.Context) %[1]sPtrOutput\n", name) - fmt.Fprintf(w, "}\n\n") - ptrTypeName := camel(name) + "PtrType" - fmt.Fprintf(w, "type %s %sArgs\n\n", ptrTypeName, name) - genInputMethods(w, name+"Ptr", "*"+ptrTypeName, "*"+name, false, true) + var args []*schema.Property + if f.Inputs != nil { + for _, arg := range f.Inputs.InputShape.Properties { + if arg.Name == "__self__" { + continue + } + args = append(args, arg) + } + } - if !r.IsProvider { - // Generate the resource array input. - pkg.genInputInterface(w, name+"Array") - fmt.Fprintf(w, "type %[1]sArray []%[1]sInput\n\n", name) - genResourceContainerInput(w, name+"Array", name+"Array", "[]*"+name) + // Now emit the method signature. + argsig := "ctx *pulumi.Context" + if len(args) > 0 { + argsig = fmt.Sprintf("%s, args *%s%sArgs", argsig, name, methodName) + } + var retty string + if f.Outputs == nil { + retty = "error" + } else if shouldLiftReturn { + retty = fmt.Sprintf("(%s, error)", pkg.outputType(f.Outputs.Properties[0].Type)) + } else { + retty = fmt.Sprintf("(%s%sResultOutput, error)", name, methodName) + } + fmt.Fprintf(w, "\n") + printCommentWithDeprecationMessage(w, f.Comment, f.DeprecationMessage, false) + fmt.Fprintf(w, "func (r *%s) %s(%s) %s {\n", name, methodName, argsig, retty) - // Generate the resource map input. - pkg.genInputInterface(w, name+"Map") - fmt.Fprintf(w, "type %[1]sMap map[string]%[1]sInput\n\n", name) - genResourceContainerInput(w, name+"Map", name+"Map", "map[string]*"+name) + resultVar := "_" + if f.Outputs != nil { + resultVar = "out" } - } - // Emit the resource output type. - fmt.Fprintf(w, "type %sOutput struct {\n", name) - fmt.Fprintf(w, "\t*pulumi.OutputState\n") - fmt.Fprintf(w, "}\n\n") - genOutputMethods(w, name, name, true) - fmt.Fprintf(w, "\n") - if generateResourceContainerTypes { - fmt.Fprintf(w, "func (o %[1]sOutput) To%[2]sPtrOutput() %[1]sPtrOutput {\n", name, Title(name)) - fmt.Fprintf(w, "\treturn o.To%sPtrOutputWithContext(context.Background())\n", Title(name)) - fmt.Fprintf(w, "}\n\n") + // Make a map of inputs to pass to the runtime function. + inputsVar := "nil" + if len(args) > 0 { + inputsVar = "args" + } - fmt.Fprintf(w, "func (o %[1]sOutput) To%[2]sPtrOutputWithContext(ctx context.Context) %[1]sPtrOutput {\n", name, Title(name)) - fmt.Fprintf(w, "\treturn o.ApplyT(func(v %[1]s) *%[1]s {\n", name) - fmt.Fprintf(w, "\t\treturn &v\n") - fmt.Fprintf(w, "\t}).(%sPtrOutput)\n", name) + // Now simply invoke the runtime function with the arguments. + outputsType := "pulumi.AnyOutput" + if f.Outputs != nil { + if shouldLiftReturn { + outputsType = fmt.Sprintf("%s%sResultOutput", camel(name), methodName) + } else { + outputsType = fmt.Sprintf("%s%sResultOutput", name, methodName) + } + } + fmt.Fprintf(w, "\t%s, err := ctx.Call(%q, %s, %s{}, r)\n", resultVar, f.Token, inputsVar, outputsType) + if f.Outputs == nil { + fmt.Fprintf(w, "\treturn err\n") + } else if shouldLiftReturn { + // Check the error before proceeding. + fmt.Fprintf(w, "\tif err != nil {\n") + fmt.Fprintf(w, "\t\treturn %s{}, err\n", pkg.outputType(f.Outputs.Properties[0].Type)) + fmt.Fprintf(w, "\t}\n") + + // Get the name of the method to return the output + fmt.Fprintf(w, "\treturn %s.(%s).%s(), nil\n", resultVar, camel(outputsType), Title(f.Outputs.Properties[0].Name)) + } else { + // Check the error before proceeding. + fmt.Fprintf(w, "\tif err != nil {\n") + fmt.Fprintf(w, "\t\treturn %s{}, err\n", outputsType) + fmt.Fprintf(w, "\t}\n") + + // Return the result. + fmt.Fprintf(w, "\treturn %s.(%s), nil\n", resultVar, outputsType) + } fmt.Fprintf(w, "}\n") - fmt.Fprintf(w, "\n") - // Emit the resource pointer output type. - fmt.Fprintf(w, "type %sOutput struct {\n", name+"Ptr") - fmt.Fprintf(w, "\t*pulumi.OutputState\n") - fmt.Fprintf(w, "}\n\n") - genOutputMethods(w, name+"Ptr", "*"+name, true) - - if !r.IsProvider { - // Emit the array output type - fmt.Fprintf(w, "type %sArrayOutput struct { *pulumi.OutputState }\n\n", name) - genOutputMethods(w, name+"Array", "[]"+name, true) - fmt.Fprintf(w, "func (o %[1]sArrayOutput) Index(i pulumi.IntInput) %[1]sOutput {\n", name) - fmt.Fprintf(w, "\treturn pulumi.All(o, i).ApplyT(func (vs []interface{}) %s {\n", name) - fmt.Fprintf(w, "\t\treturn vs[0].([]%s)[vs[1].(int)]\n", name) - fmt.Fprintf(w, "\t}).(%sOutput)\n", name) + // If there are argument and/or return types, emit them. + if len(args) > 0 { + fmt.Fprintf(w, "\n") + fmt.Fprintf(w, "type %s%sArgs struct {\n", camel(name), methodName) + for _, p := range args { + printCommentWithDeprecationMessage(w, p.Comment, p.DeprecationMessage, true) + fmt.Fprintf(w, "\t%s %s `pulumi:\"%s\"`\n", Title(p.Name), pkg.typeString(codegen.ResolvedType(p.Type)), + p.Name) + } fmt.Fprintf(w, "}\n\n") - // Emit the map output type - fmt.Fprintf(w, "type %sMapOutput struct { *pulumi.OutputState }\n\n", name) - genOutputMethods(w, name+"Map", "map[string]"+name, true) - fmt.Fprintf(w, "func (o %[1]sMapOutput) MapIndex(k pulumi.StringInput) %[1]sOutput {\n", name) - fmt.Fprintf(w, "\treturn pulumi.All(o, k).ApplyT(func (vs []interface{}) %s {\n", name) - fmt.Fprintf(w, "\t\treturn vs[0].(map[string]%s)[vs[1].(string)]\n", name) - fmt.Fprintf(w, "\t}).(%sOutput)\n", name) + + fmt.Fprintf(w, "// The set of arguments for the %s method of the %s resource.\n", methodName, name) + fmt.Fprintf(w, "type %s%sArgs struct {\n", name, methodName) + for _, p := range args { + printCommentWithDeprecationMessage(w, p.Comment, p.DeprecationMessage, true) + fmt.Fprintf(w, "\t%s %s\n", Title(p.Name), pkg.typeString(p.Type)) + } + fmt.Fprintf(w, "}\n\n") + + fmt.Fprintf(w, "func (%s%sArgs) ElementType() reflect.Type {\n", name, methodName) + fmt.Fprintf(w, "\treturn reflect.TypeOf((*%s%sArgs)(nil)).Elem()\n", camel(name), methodName) fmt.Fprintf(w, "}\n\n") } - } - // Register all output types - fmt.Fprintf(w, "func init() {\n") - fmt.Fprintf(w, "\tpulumi.RegisterOutputType(%sOutput{})\n", name) + if f.Outputs != nil { + outputStructName := name - if generateResourceContainerTypes { - fmt.Fprintf(w, "\tpulumi.RegisterOutputType(%sPtrOutput{})\n", name) - if !r.IsProvider { - fmt.Fprintf(w, "\tpulumi.RegisterOutputType(%sArrayOutput{})\n", name) - fmt.Fprintf(w, "\tpulumi.RegisterOutputType(%sMapOutput{})\n", name) + // Don't export the result struct if we're lifting the value + if shouldLiftReturn { + outputStructName = camel(name) + } + + fmt.Fprintf(w, "\n") + pkg.genPlainType(w, fmt.Sprintf("%s%sResult", outputStructName, methodName), f.Outputs.Comment, "", + f.Outputs.Properties) + + fmt.Fprintf(w, "\n") + fmt.Fprintf(w, "type %s%sResultOutput struct{ *pulumi.OutputState }\n\n", outputStructName, methodName) + + fmt.Fprintf(w, "func (%s%sResultOutput) ElementType() reflect.Type {\n", outputStructName, methodName) + fmt.Fprintf(w, "\treturn reflect.TypeOf((*%s%sResult)(nil)).Elem()\n", outputStructName, methodName) + fmt.Fprintf(w, "}\n") + + for _, p := range f.Outputs.Properties { + fmt.Fprintf(w, "\n") + printCommentWithDeprecationMessage(w, p.Comment, p.DeprecationMessage, false) + fmt.Fprintf(w, "func (o %s%sResultOutput) %s() %s {\n", outputStructName, methodName, Title(p.Name), + pkg.outputType(p.Type)) + fmt.Fprintf(w, "\treturn o.ApplyT(func(v %s%sResult) %s { return v.%s }).(%s)\n", outputStructName, methodName, + pkg.typeString(codegen.ResolvedType(p.Type)), Title(p.Name), pkg.outputType(p.Type)) + fmt.Fprintf(w, "}\n") + } } } + + // Emit the resource input type. + fmt.Fprintf(w, "\n") + fmt.Fprintf(w, "type %sInput interface {\n", name) + fmt.Fprintf(w, "\tpulumi.Input\n\n") + fmt.Fprintf(w, "\tTo%[1]sOutput() %[1]sOutput\n", name) + fmt.Fprintf(w, "\tTo%[1]sOutputWithContext(ctx context.Context) %[1]sOutput\n", name) fmt.Fprintf(w, "}\n\n") + genInputImplementation(w, name, "*"+name, "*"+name, false) + + if generateResourceContainerTypes && !r.IsProvider { + // Generate the resource array input. + pkg.genInputInterface(w, name+"Array") + fmt.Fprintf(w, "type %[1]sArray []%[1]sInput\n\n", name) + genInputImplementation(w, name+"Array", name+"Array", "[]*"+name, false) + + // Generate the resource map input. + pkg.genInputInterface(w, name+"Map") + fmt.Fprintf(w, "type %[1]sMap map[string]%[1]sInput\n\n", name) + genInputImplementation(w, name+"Map", name+"Map", "map[string]*"+name, false) + } + + // Emit the resource output type. + genOutputType(w, name, "*"+name, false) + + if generateResourceContainerTypes && !r.IsProvider { + genArrayOutput(w, name, "*"+name) + genMapOutput(w, name, "*"+name) + } + + pkg.genResourceRegistrations(w, r, generateResourceContainerTypes) + return nil } -func (pkg *pkgContext) genFunction(w io.Writer, f *schema.Function) { - // If the function starts with New or Get, it will conflict; so rename them. - name := pkg.functionNames[f] +func NeedsGoOutputVersion(f *schema.Function) bool { + fPkg := f.Package + + var goInfo GoPackageInfo + + contract.AssertNoError(fPkg.ImportLanguages(map[string]schema.Language{"go": Importer})) + if info, ok := fPkg.Language["go"].(GoPackageInfo); ok { + goInfo = info + } + + if goInfo.DisableFunctionOutputVersions { + return false + } + + return f.NeedsOutputVersion() +} + +func (pkg *pkgContext) genFunctionCodeFile(f *schema.Function) (string, error) { + importsAndAliases := map[string]string{} + pkg.getImports(f, importsAndAliases) + importsAndAliases["github.com/pulumi/pulumi/sdk/v3/go/pulumi"] = "" + + buffer := &bytes.Buffer{} + var imports []string + if NeedsGoOutputVersion(f) { + imports = []string{"context", "reflect"} + } + + pkg.genHeader(buffer, imports, importsAndAliases) + if err := pkg.genFunction(buffer, f); err != nil { + return "", err + } + pkg.genFunctionOutputVersion(buffer, f) + return buffer.String(), nil +} + +func (pkg *pkgContext) genFunction(w io.Writer, f *schema.Function) error { + name := pkg.functionName(f) printCommentWithDeprecationMessage(w, f.Comment, f.DeprecationMessage, false) // Now, emit the function signature. @@ -1379,6 +1969,8 @@ func (pkg *pkgContext) genFunction(w io.Writer, f *schema.Function) { var inputsVar string if f.Inputs == nil { inputsVar = "nil" + } else if codegen.IsProvideDefaultsFuncRequired(f.Inputs) && !pkg.disableObjectDefaults { + inputsVar = "args.Defaults()" } else { inputsVar = "args" } @@ -1390,6 +1982,9 @@ func (pkg *pkgContext) genFunction(w io.Writer, f *schema.Function) { } else { outputsType = name + "Result" } + + pkg.GenPkgDefaultsOptsCall(w, true /*invoke*/) + fmt.Fprintf(w, "\tvar rv %s\n", outputsType) fmt.Fprintf(w, "\terr := ctx.Invoke(\"%s\", %s, &rv, opts...)\n", f.Token, inputsVar) @@ -1402,167 +1997,502 @@ func (pkg *pkgContext) genFunction(w io.Writer, f *schema.Function) { fmt.Fprintf(w, "\t}\n") // Return the result. - fmt.Fprintf(w, "\treturn &rv, nil\n") + var retValue string + if codegen.IsProvideDefaultsFuncRequired(f.Outputs) && !pkg.disableObjectDefaults { + retValue = "rv.Defaults()" + } else { + retValue = "&rv" + } + fmt.Fprintf(w, "\treturn %s, nil\n", retValue) } fmt.Fprintf(w, "}\n") // If there are argument and/or return types, emit them. if f.Inputs != nil { fmt.Fprintf(w, "\n") - pkg.genPlainType(w, fmt.Sprintf("%sArgs", name), f.Inputs.Comment, "", f.Inputs.Properties) + fnInputsName := pkg.functionArgsTypeName(f) + pkg.genPlainType(w, fnInputsName, f.Inputs.Comment, "", f.Inputs.Properties) + if codegen.IsProvideDefaultsFuncRequired(f.Inputs) && !pkg.disableObjectDefaults { + if err := pkg.genPlainObjectDefaultFunc(w, fnInputsName, f.Inputs.Properties); err != nil { + return err + } + } } if f.Outputs != nil { fmt.Fprintf(w, "\n") - pkg.genPlainType(w, fmt.Sprintf("%sResult", name), f.Outputs.Comment, "", f.Outputs.Properties) + fnOutputsName := pkg.functionResultTypeName(f) + pkg.genPlainType(w, fnOutputsName, f.Outputs.Comment, "", f.Outputs.Properties) + if codegen.IsProvideDefaultsFuncRequired(f.Outputs) && !pkg.disableObjectDefaults { + if err := pkg.genPlainObjectDefaultFunc(w, fnOutputsName, f.Outputs.Properties); err != nil { + return err + } + } + } + return nil +} + +func (pkg *pkgContext) functionName(f *schema.Function) string { + // If the function starts with New or Get, it will conflict; so rename them. + name, hasName := pkg.functionNames[f] + + if !hasName { + panic(fmt.Sprintf("No function name found for %v", f)) + } + + return name +} + +func (pkg *pkgContext) functionArgsTypeName(f *schema.Function) string { + name := pkg.functionName(f) + return fmt.Sprintf("%sArgs", name) +} + +func (pkg *pkgContext) functionResultTypeName(f *schema.Function) string { + name := pkg.functionName(f) + return fmt.Sprintf("%sResult", name) +} + +func (pkg *pkgContext) genFunctionOutputVersion(w io.Writer, f *schema.Function) { + if !NeedsGoOutputVersion(f) { + return + } + + originalName := pkg.functionName(f) + name := originalName + "Output" + originalResultTypeName := pkg.functionResultTypeName(f) + resultTypeName := originalResultTypeName + "Output" + + code := ` +func ${fn}Output(ctx *pulumi.Context, args ${fn}OutputArgs, opts ...pulumi.InvokeOption) ${outputType} { + return pulumi.ToOutputWithContext(context.Background(), args). + ApplyT(func(v interface{}) (${fn}Result, error) { + args := v.(${fn}Args) + r, err := ${fn}(ctx, &args, opts...) + return *r, err + }).(${outputType}) +} + +` + code = strings.ReplaceAll(code, "${fn}", originalName) + code = strings.ReplaceAll(code, "${outputType}", resultTypeName) + fmt.Fprintf(w, code) + + pkg.genInputArgsStruct(w, name+"Args", f.Inputs.InputShape) + + genInputImplementationWithArgs(w, genInputImplementationArgs{ + name: name + "Args", + receiverType: name + "Args", + elementType: pkg.functionArgsTypeName(f), + }) + + pkg.genOutputTypes(w, genOutputTypesArgs{ + t: f.Outputs, + name: originalResultTypeName, + }) + + // Assuming the file represented by `w` only has one function, + // generate an `init()` for Output type init. + initCode := ` +func init() { + pulumi.RegisterOutputType(${outputType}{}) +} + +` + initCode = strings.ReplaceAll(initCode, "${outputType}", resultTypeName) + fmt.Fprintf(w, initCode) +} + +type objectProperty struct { + object *schema.ObjectType + property *schema.Property +} + +// When computing the type name for a field of an object type, we must ensure that we do not generate invalid recursive +// struct types. A struct type T contains invalid recursion if the closure of its fields and its struct-typed fields' +// fields includes a field of type T. A few examples: +// +// Directly invalid: +// +// type T struct { +// Invalid T +// } +// +// Indirectly invalid: +// +// type T struct { +// Invalid S +// } +// +// type S struct { +// Invalid T +// } +// +// In order to avoid generating invalid struct types, we replace all references to types involved in a cyclical +// definition with *T. The examples above therefore become: +// +// (1) +// type T struct { +// Valid *T +// } +// +// (2) +// type T struct { +// Valid *S +// } +// +// type S struct { +// Valid *T +// } +// +// We do this using a rewriter that turns all fields involved in reference cycles into optional fields. +func rewriteCyclicField(rewritten codegen.Set, path []objectProperty, op objectProperty) { + // If this property refers to an Input<> type, unwrap the type. This ensures that the plain and input shapes of an + // object type remain identical. + t := op.property.Type + if inputType, isInputType := op.property.Type.(*schema.InputType); isInputType { + t = inputType.ElementType + } + + // If this property does not refer to an object type, it cannot be involved in a cycle. Skip it. + objectType, isObjectType := t.(*schema.ObjectType) + if !isObjectType { + return + } + + path = append(path, op) + + // Check the current path for cycles by crawling backwards until reaching the start of the path + // or finding a property that is a member of the current object type. + var cycle []objectProperty + for i := len(path) - 1; i > 0; i-- { + if path[i].object == objectType { + cycle = path[i:] + break + } + } + + // If the current path does not involve a cycle, recur into the current object type. + if len(cycle) == 0 { + rewriteCyclicFields(rewritten, path, objectType) + return + } + + // If we've found a cycle, mark each property involved in the cycle as optional. + // + // NOTE: this overestimates the set of properties that must be marked as optional. For example, in case (2) above, + // only one of T.Invalid or S.Invalid needs to be marked as optional in order to break the cycle. However, choosing + // a minimal set of properties that is also deterministic and resilient to changes in visit order is difficult and + // seems to add little value. + for _, p := range cycle { + p.property.Type = codegen.OptionalType(p.property) + } +} + +func rewriteCyclicFields(rewritten codegen.Set, path []objectProperty, obj *schema.ObjectType) { + if !rewritten.Has(obj) { + rewritten.Add(obj) + for _, property := range obj.Properties { + rewriteCyclicField(rewritten, path, objectProperty{obj, property}) + } + } +} + +func rewriteCyclicObjectFields(pkg *schema.Package) { + rewritten := codegen.Set{} + for _, t := range pkg.Types { + if obj, ok := t.(*schema.ObjectType); ok && !obj.IsInputShape() { + rewriteCyclicFields(rewritten, nil, obj) + rewriteCyclicFields(rewritten, nil, obj.InputShape) + } } } -func (pkg *pkgContext) genType(w io.Writer, obj *schema.ObjectType) { - pkg.genPlainType(w, pkg.tokenToType(obj.Token), obj.Comment, "", obj.Properties) - pkg.genInputTypes(w, obj, pkg.detailsForType(obj)) - pkg.genOutputTypes(w, obj, pkg.detailsForType(obj)) +func (pkg *pkgContext) genType(w io.Writer, obj *schema.ObjectType) error { + contract.Assert(!obj.IsInputShape()) + if obj.IsOverlay { + // This type is generated by the provider, so no further action is required. + return nil + } + + plainName := pkg.tokenToType(obj.Token) + pkg.genPlainType(w, plainName, obj.Comment, "", obj.Properties) + if !pkg.disableObjectDefaults { + if err := pkg.genPlainObjectDefaultFunc(w, plainName, obj.Properties); err != nil { + return err + } + } + + pkg.genInputTypes(w, obj.InputShape, pkg.detailsForType(obj)) + pkg.genOutputTypes(w, genOutputTypesArgs{t: obj}) + return nil } func (pkg *pkgContext) addSuffixesToName(typ schema.Type, name string) []string { var names []string details := pkg.detailsForType(typ) - if details.arrayElement { - names = append(names, name+"Array") + if details.arrayInput { + names = append(names, name+"ArrayInput") + } + if details.arrayOutput || details.arrayInput { + names = append(names, name+"ArrayOutput") } - if details.mapElement { - names = append(names, name+"Map") + if details.mapInput { + names = append(names, name+"MapInput") + } + if details.mapOutput || details.mapInput { + names = append(names, name+"MapOutput") } return names } -func (pkg *pkgContext) genNestedCollectionType(w io.Writer, typ schema.Type) []string { +type nestedTypeInfo struct { + resolvedElementType string + names map[string]bool +} + +// collectNestedCollectionTypes builds a deduped mapping of element types -> associated collection types. +// different shapes of known types can resolve to the same element type. by collecting types in one step and emitting types +// in a second step, we avoid collision and redeclaration. +func (pkg *pkgContext) collectNestedCollectionTypes(types map[string]*nestedTypeInfo, typ schema.Type) { var elementTypeName string var names []string switch t := typ.(type) { case *schema.ArrayType: // Builtins already cater to primitive arrays if schema.IsPrimitiveType(t.ElementType) { - return nil + return } elementTypeName = pkg.nestedTypeToType(t.ElementType) - elementTypeName += "Array" + elementTypeName = strings.TrimSuffix(elementTypeName, "Args") + "Array" names = pkg.addSuffixesToName(t, elementTypeName) case *schema.MapType: // Builtins already cater to primitive maps if schema.IsPrimitiveType(t.ElementType) { - return nil + return } elementTypeName = pkg.nestedTypeToType(t.ElementType) - elementTypeName += "Map" + elementTypeName = strings.TrimSuffix(elementTypeName, "Args") + "Map" names = pkg.addSuffixesToName(t, elementTypeName) + default: + contract.Failf("unexpected type %T in collectNestedCollectionTypes", t) } - - for _, name := range names { - if strings.HasSuffix(name, "Array") { - fmt.Fprintf(w, "type %s []%sInput\n\n", name, elementTypeName) - genInputMethods(w, name, name, elementTypeName, false, false) - - fmt.Fprintf(w, "type %sOutput struct { *pulumi.OutputState }\n\n", name) - genOutputMethods(w, name, elementTypeName, false) - - fmt.Fprintf(w, "func (o %sOutput) Index(i pulumi.IntInput) %sOutput {\n", name, elementTypeName) - fmt.Fprintf(w, "\treturn pulumi.All(o, i).ApplyT(func (vs []interface{}) %s {\n", elementTypeName) - fmt.Fprintf(w, "\t\treturn vs[0].([]%s)[vs[1].(int)]\n", elementTypeName) - fmt.Fprintf(w, "\t}).(%sOutput)\n", elementTypeName) - fmt.Fprintf(w, "}\n\n") + nti, ok := types[elementTypeName] + if !ok { + nti = &nestedTypeInfo{ + names: map[string]bool{}, + resolvedElementType: pkg.typeString(codegen.ResolvedType(typ)), } + types[elementTypeName] = nti + } + for _, n := range names { + nti.names[n] = true + } +} - if strings.HasSuffix(name, "Map") { - fmt.Fprintf(w, "type %s map[string]%sInput\n\n", name, elementTypeName) - genInputMethods(w, name, name, elementTypeName, false, false) +// genNestedCollectionTypes emits nested collection types given the deduped mapping of element types -> associated collection types. +// different shapes of known types can resolve to the same element type. by collecting types in one step and emitting types +// in a second step, we avoid collision and redeclaration. +func (pkg *pkgContext) genNestedCollectionTypes(w io.Writer, types map[string]*nestedTypeInfo) []string { + var names []string - fmt.Fprintf(w, "type %sOutput struct { *pulumi.OutputState }\n\n", name) - genOutputMethods(w, name, elementTypeName, false) + // map iteration is unstable so sort items for deterministic codegen + sortedElems := []string{} + for k := range types { + sortedElems = append(sortedElems, k) + } + sort.Strings(sortedElems) - // Emit the map output type - fmt.Fprintf(w, "func (o %sOutput) MapIndex(k pulumi.StringInput) %sOutput {\n", name, elementTypeName) - fmt.Fprintf(w, "\treturn pulumi.All(o, k).ApplyT(func (vs []interface{}) %s {\n", elementTypeName) - fmt.Fprintf(w, "\t\treturn vs[0].(map[string]%s)[vs[1].(string)]\n", elementTypeName) - fmt.Fprintf(w, "\t}).(%sOutput)\n", elementTypeName) - fmt.Fprintf(w, "}\n\n") + for _, elementTypeName := range sortedElems { + info := types[elementTypeName] + + collectionTypes := []string{} + for k := range info.names { + collectionTypes = append(collectionTypes, k) + } + sort.Strings(collectionTypes) + for _, name := range collectionTypes { + names = append(names, name) + switch { + case strings.HasSuffix(name, "ArrayInput"): + name = strings.TrimSuffix(name, "Input") + fmt.Fprintf(w, "type %s []%sInput\n\n", name, elementTypeName) + genInputImplementation(w, name, name, "[]"+info.resolvedElementType, false) + + pkg.genInputInterface(w, name) + case strings.HasSuffix(name, "ArrayOutput"): + genArrayOutput(w, strings.TrimSuffix(name, "ArrayOutput"), info.resolvedElementType) + case strings.HasSuffix(name, "MapInput"): + name = strings.TrimSuffix(name, "Input") + fmt.Fprintf(w, "type %s map[string]%sInput\n\n", name, elementTypeName) + genInputImplementation(w, name, name, "map[string]"+info.resolvedElementType, false) + + pkg.genInputInterface(w, name) + case strings.HasSuffix(name, "MapOutput"): + genMapOutput(w, strings.TrimSuffix(name, "MapOutput"), info.resolvedElementType) + } } - pkg.genInputInterface(w, name) } return names } func (pkg *pkgContext) nestedTypeToType(typ schema.Type) string { - switch t := typ.(type) { + switch t := codegen.UnwrapType(typ).(type) { case *schema.ArrayType: return pkg.nestedTypeToType(t.ElementType) case *schema.MapType: return pkg.nestedTypeToType(t.ElementType) + case *schema.ObjectType: + return pkg.resolveObjectType(t) } - return pkg.tokenToType(typ.String()) + return strings.TrimSuffix(pkg.tokenToType(typ.String()), "Args") } -func (pkg *pkgContext) tokenToEnum(tok string) string { - // token := pkg : module : member - // module := path/to/module - - components := strings.Split(tok, ":") - contract.Assert(len(components) == 3) - if pkg == nil { - panic(fmt.Errorf("pkg is nil. token %s", tok)) - } - if pkg.pkg == nil { - panic(fmt.Errorf("pkg.pkg is nil. token %s", tok)) - } - - mod, name := pkg.tokenToPackage(tok), components[2] - - modPkg, ok := pkg.packages[mod] - name = Title(name) - - if ok { - newName, renamed := modPkg.renamed[name] - if renamed { - name = newName - } else if modPkg.names.Has(name) { - // If the package containing the enum's token already has a resource with the - // same name, add a `Enum` suffix. - newName := name + "Enum" - modPkg.renamed[name] = newName - modPkg.names.Add(newName) - name = newName +func (pkg *pkgContext) genTypeRegistrations(w io.Writer, objTypes []*schema.ObjectType, types ...string) { + fmt.Fprintf(w, "func init() {\n") + + // Input types. + if !pkg.disableInputTypeRegistrations { + for _, obj := range objTypes { + if obj.IsOverlay { + // This type is generated by the provider, so no further action is required. + continue + } + name, details := pkg.tokenToType(obj.Token), pkg.detailsForType(obj) + if details.input { + fmt.Fprintf(w, + "\tpulumi.RegisterInputType(reflect.TypeOf((*%[1]sInput)(nil)).Elem(), %[1]sArgs{})\n", name) + } + if details.ptrInput { + fmt.Fprintf(w, + "\tpulumi.RegisterInputType(reflect.TypeOf((*%[1]sPtrInput)(nil)).Elem(), %[1]sArgs{})\n", name) + } + if details.arrayInput { + fmt.Fprintf(w, + "\tpulumi.RegisterInputType(reflect.TypeOf((*%[1]sArrayInput)(nil)).Elem(), %[1]sArray{})\n", name) + } + if details.mapInput { + fmt.Fprintf(w, + "\tpulumi.RegisterInputType(reflect.TypeOf((*%[1]sMapInput)(nil)).Elem(), %[1]sMap{})\n", name) + } + } + for _, t := range types { + if strings.HasSuffix(t, "Input") { + fmt.Fprintf(w, "\tpulumi.RegisterInputType(reflect.TypeOf((*%s)(nil)).Elem(), %s{})\n", t, strings.TrimSuffix(t, "Input")) + } } } - if mod == pkg.mod { - return name + // Output types. + for _, obj := range objTypes { + if obj.IsOverlay { + // This type is generated by the provider, so no further action is required. + continue + } + name, details := pkg.tokenToType(obj.Token), pkg.detailsForType(obj) + if details.output { + fmt.Fprintf(w, "\tpulumi.RegisterOutputType(%sOutput{})\n", name) + } + if details.ptrOutput { + fmt.Fprintf(w, "\tpulumi.RegisterOutputType(%sPtrOutput{})\n", name) + } + if details.arrayOutput { + fmt.Fprintf(w, "\tpulumi.RegisterOutputType(%sArrayOutput{})\n", name) + } + if details.mapOutput { + fmt.Fprintf(w, "\tpulumi.RegisterOutputType(%sMapOutput{})\n", name) + } } - if mod == "" { - mod = components[0] + for _, t := range types { + if strings.HasSuffix(t, "Output") { + fmt.Fprintf(w, "\tpulumi.RegisterOutputType(%s{})\n", t) + } } - return strings.Replace(mod, "/", "", -1) + "." + name + + fmt.Fprintf(w, "}\n") } -func (pkg *pkgContext) genTypeRegistrations(w io.Writer, objTypes []*schema.ObjectType, types ...string) { +func (pkg *pkgContext) genEnumRegistrations(w io.Writer) { fmt.Fprintf(w, "func init() {\n") - for _, obj := range objTypes { - name, details := pkg.tokenToType(obj.Token), pkg.detailsForType(obj) - fmt.Fprintf(w, "\tpulumi.RegisterOutputType(%sOutput{})\n", name) - if details.ptrElement { + // Register all input types + if !pkg.disableInputTypeRegistrations { + for _, e := range pkg.enums { + // Enums are guaranteed to have at least one element when they are + // bound into a schema. + contract.Assert(len(e.Elements) > 0) + name, details := pkg.tokenToEnum(e.Token), pkg.detailsForType(e) + instance := fmt.Sprintf("%#v", e.Elements[0].Value) + if details.input || details.ptrInput { + fmt.Fprintf(w, + "\tpulumi.RegisterInputType(reflect.TypeOf((*%[1]sInput)(nil)).Elem(), %[1]s(%[2]s))\n", + name, instance) + fmt.Fprintf(w, + "\tpulumi.RegisterInputType(reflect.TypeOf((*%[1]sPtrInput)(nil)).Elem(), %[1]s(%[2]s))\n", + name, instance) + } + if details.arrayInput { + fmt.Fprintf(w, + "\tpulumi.RegisterInputType(reflect.TypeOf((*%[1]sArrayInput)(nil)).Elem(), %[1]sArray{})\n", + name) + } + if details.mapInput { + fmt.Fprintf(w, + "\tpulumi.RegisterInputType(reflect.TypeOf((*%[1]sMapInput)(nil)).Elem(), %[1]sMap{})\n", + name) + } + } + } + // Register all output types + for _, e := range pkg.enums { + name, details := pkg.tokenToEnum(e.Token), pkg.detailsForType(e) + if details.output || details.ptrOutput { + fmt.Fprintf(w, "\tpulumi.RegisterOutputType(%sOutput{})\n", name) fmt.Fprintf(w, "\tpulumi.RegisterOutputType(%sPtrOutput{})\n", name) } - if details.arrayElement { + if details.arrayOutput { fmt.Fprintf(w, "\tpulumi.RegisterOutputType(%sArrayOutput{})\n", name) } - if details.mapElement { + if details.mapOutput { fmt.Fprintf(w, "\tpulumi.RegisterOutputType(%sMapOutput{})\n", name) } } + fmt.Fprintf(w, "}\n\n") +} - for _, t := range types { - fmt.Fprintf(w, "\tpulumi.RegisterOutputType(%sOutput{})\n", t) +func (pkg *pkgContext) genResourceRegistrations(w io.Writer, r *schema.Resource, generateResourceContainerTypes bool) { + name := disambiguatedResourceName(r, pkg) + fmt.Fprintf(w, "func init() {\n") + // Register input type + if !pkg.disableInputTypeRegistrations { + fmt.Fprintf(w, + "\tpulumi.RegisterInputType(reflect.TypeOf((*%[1]sInput)(nil)).Elem(), &%[1]s{})\n", + name) + if generateResourceContainerTypes && !r.IsProvider { + fmt.Fprintf(w, + "\tpulumi.RegisterInputType(reflect.TypeOf((*%[1]sArrayInput)(nil)).Elem(), %[1]sArray{})\n", + name) + fmt.Fprintf(w, + "\tpulumi.RegisterInputType(reflect.TypeOf((*%[1]sMapInput)(nil)).Elem(), %[1]sMap{})\n", + name) + } } - fmt.Fprintf(w, "}\n") + // Register all output types + fmt.Fprintf(w, "\tpulumi.RegisterOutputType(%sOutput{})\n", name) + for _, method := range r.Methods { + if method.Function.Outputs != nil { + if pkg.liftSingleValueMethodReturns && len(method.Function.Outputs.Properties) == 1 { + fmt.Fprintf(w, "\tpulumi.RegisterOutputType(%s%sResultOutput{})\n", camel(name), Title(method.Name)) + } else { + fmt.Fprintf(w, "\tpulumi.RegisterOutputType(%s%sResultOutput{})\n", name, Title(method.Name)) + } + } + } + + if generateResourceContainerTypes && !r.IsProvider { + fmt.Fprintf(w, "\tpulumi.RegisterOutputType(%sArrayOutput{})\n", name) + fmt.Fprintf(w, "\tpulumi.RegisterOutputType(%sMapOutput{})\n", name) + } + fmt.Fprintf(w, "}\n\n") } func (pkg *pkgContext) getTypeImports(t schema.Type, recurse bool, importsAndAliases map[string]string, seen map[schema.Type]struct{}) { @@ -1571,6 +2501,10 @@ func (pkg *pkgContext) getTypeImports(t schema.Type, recurse bool, importsAndAli } seen[t] = struct{}{} switch t := t.(type) { + case *schema.OptionalType: + pkg.getTypeImports(t.ElementType, recurse, importsAndAliases, seen) + case *schema.InputType: + pkg.getTypeImports(t.ElementType, recurse, importsAndAliases, seen) case *schema.EnumType: mod := pkg.tokenToPackage(t.Token) if mod != pkg.mod { @@ -1582,26 +2516,11 @@ func (pkg *pkgContext) getTypeImports(t schema.Type, recurse bool, importsAndAli case *schema.MapType: pkg.getTypeImports(t.ElementType, recurse, importsAndAliases, seen) case *schema.ObjectType: - if t.Package != nil && pkg.pkg != nil && t.Package != pkg.pkg { - extPkg := t.Package - var goInfo GoPackageInfo - - contract.AssertNoError(extPkg.ImportLanguages(map[string]schema.Language{"go": Importer})) - if info, ok := extPkg.Language["go"].(GoPackageInfo); ok { - goInfo = info - } else { - // tests don't include ImportBasePath - goInfo.ImportBasePath = extractImportBasePath(extPkg) - } - extPkgCtx := &pkgContext{ - pkg: extPkg, - importBasePath: goInfo.ImportBasePath, - pkgImportAliases: goInfo.PackageImportAliases, - modToPkg: goInfo.ModuleToPackage, - } + if pkg.isExternalReference(t) { + extPkgCtx := pkg.contextForExternalReference(t) mod := extPkgCtx.tokenToPackage(t.Token) - imp := path.Join(goInfo.ImportBasePath, mod) - importsAndAliases[imp] = goInfo.PackageImportAliases[imp] + imp := path.Join(extPkgCtx.importBasePath, mod) + importsAndAliases[imp] = extPkgCtx.pkgImportAliases[imp] break } mod := pkg.tokenToPackage(t.Token) @@ -1616,26 +2535,11 @@ func (pkg *pkgContext) getTypeImports(t schema.Type, recurse bool, importsAndAli } } case *schema.ResourceType: - if t.Resource != nil && pkg.pkg != nil && t.Resource.Package != pkg.pkg { - extPkg := t.Resource.Package - var goInfo GoPackageInfo - - contract.AssertNoError(extPkg.ImportLanguages(map[string]schema.Language{"go": Importer})) - if info, ok := extPkg.Language["go"].(GoPackageInfo); ok { - goInfo = info - } else { - // tests don't include ImportBasePath - goInfo.ImportBasePath = extractImportBasePath(extPkg) - } - extPkgCtx := &pkgContext{ - pkg: extPkg, - importBasePath: goInfo.ImportBasePath, - pkgImportAliases: goInfo.PackageImportAliases, - modToPkg: goInfo.ModuleToPackage, - } + if pkg.isExternalReference(t) { + extPkgCtx := pkg.contextForExternalReference(t) mod := extPkgCtx.tokenToPackage(t.Token) - imp := path.Join(goInfo.ImportBasePath, mod) - importsAndAliases[imp] = goInfo.PackageImportAliases[imp] + imp := path.Join(extPkgCtx.importBasePath, mod) + importsAndAliases[imp] = extPkgCtx.pkgImportAliases[imp] break } mod := pkg.tokenToPackage(t.Token) @@ -1673,10 +2577,25 @@ func (pkg *pkgContext) getImports(member interface{}, importsAndAliases map[stri for _, p := range member.InputProperties { pkg.getTypeImports(p.Type, false, importsAndAliases, seen) - if p.IsRequired { + if p.IsRequired() { importsAndAliases["github.com/pkg/errors"] = "" } } + for _, method := range member.Methods { + if method.Function.Inputs != nil { + for _, p := range method.Function.Inputs.InputShape.Properties { + if p.Name == "__self__" { + continue + } + pkg.getTypeImports(p.Type, false, importsAndAliases, seen) + } + } + if method.Function.Outputs != nil { + for _, p := range method.Function.Outputs.Properties { + pkg.getTypeImports(p.Type, false, importsAndAliases, seen) + } + } + } case *schema.Function: if member.Inputs != nil { pkg.getTypeImports(member.Inputs, true, importsAndAliases, seen) @@ -1692,8 +2611,6 @@ func (pkg *pkgContext) getImports(member interface{}, importsAndAliases map[stri default: return } - - importsAndAliases["github.com/pulumi/pulumi/sdk/v3/go/pulumi"] = "" } func (pkg *pkgContext) genHeader(w io.Writer, goImports []string, importsAndAliases map[string]string) { @@ -1702,10 +2619,7 @@ func (pkg *pkgContext) genHeader(w io.Writer, goImports []string, importsAndAlia var pkgName string if pkg.mod == "" { - pkgName = pkg.rootPackageName - if pkgName == "" { - pkgName = goPackage(pkg.pkg.Name) - } + pkgName = packageName(pkg.pkg) } else { pkgName = path.Base(pkg.mod) } @@ -1750,7 +2664,10 @@ func (pkg *pkgContext) genHeader(w io.Writer, goImports []string, importsAndAlia } func (pkg *pkgContext) genConfig(w io.Writer, variables []*schema.Property) error { - importsAndAliases := map[string]string{"github.com/pulumi/pulumi/sdk/v3/go/pulumi/config": ""} + importsAndAliases := map[string]string{ + "github.com/pulumi/pulumi/sdk/v3/go/pulumi/config": "", + "github.com/pulumi/pulumi/sdk/v3/go/pulumi": "", + } pkg.getImports(variables, importsAndAliases) pkg.genHeader(w, nil, importsAndAliases) @@ -1760,7 +2677,7 @@ func (pkg *pkgContext) genConfig(w io.Writer, variables []*schema.Property) erro var getType string var funcType string - switch p.Type { + switch codegen.UnwrapType(p.Type) { case schema.BoolType: getType, funcType = "bool", "Bool" case schema.IntType: @@ -1776,7 +2693,7 @@ func (pkg *pkgContext) genConfig(w io.Writer, variables []*schema.Property) erro fmt.Fprintf(w, "func Get%s(ctx *pulumi.Context) %s {\n", Title(p.Name), getType) if p.DefaultValue != nil { - defaultValue, err := pkg.getDefaultValue(p.DefaultValue, p.Type) + defaultValue, err := pkg.getDefaultValue(p.DefaultValue, codegen.UnwrapType(p.Type)) if err != nil { return err } @@ -1801,22 +2718,32 @@ func (pkg *pkgContext) genConfig(w io.Writer, variables []*schema.Property) erro // definition and its registration to support rehydrating providers. func (pkg *pkgContext) genResourceModule(w io.Writer) { contract.Assert(len(pkg.resources) != 0) + allResourcesAreOverlays := true + for _, r := range pkg.resources { + if !r.IsOverlay { + allResourcesAreOverlays = false + break + } + } + if allResourcesAreOverlays { + // If all resources in this module are overlays, skip further code generation. + return + } basePath := pkg.importBasePath - // TODO: importBasePath isn't currently set for schemas generated by pulumi-terraform-bridge. - // Remove this once the linked issue is fixed. https://github.com/pulumi/pulumi-terraform-bridge/issues/320 - if len(basePath) == 0 { - basePath = fmt.Sprintf("github.com/pulumi/pulumi-%[1]s/sdk/v2/go/%[1]s", pkg.pkg.Name) - } - imports := map[string]string{ "github.com/blang/semver": "", "github.com/pulumi/pulumi/sdk/v3/go/pulumi": "", } + topLevelModule := pkg.mod == "" if !topLevelModule { - imports[basePath] = "" + if alias, ok := pkg.pkgImportAliases[basePath]; ok { + imports[basePath] = alias + } else { + imports[basePath] = "" + } } pkg.genHeader(w, []string{"fmt"}, imports) @@ -1837,6 +2764,10 @@ func (pkg *pkgContext) genResourceModule(w io.Writer) { fmt.Fprintf(w, "func (m *module) Construct(ctx *pulumi.Context, name, typ, urn string) (r pulumi.Resource, err error) {\n") fmt.Fprintf(w, "\tswitch typ {\n") for _, r := range pkg.resources { + if r.IsOverlay { + // This resource code is generated by the provider, so no further action is required. + continue + } if r.IsProvider { contract.Assert(provider == nil) provider = r @@ -1845,7 +2776,7 @@ func (pkg *pkgContext) genResourceModule(w io.Writer) { registrations.Add(tokenToModule(r.Token)) fmt.Fprintf(w, "\tcase %q:\n", r.Token) - fmt.Fprintf(w, "\t\tr = &%s{}\n", resourceName(r)) + fmt.Fprintf(w, "\t\tr = &%s{}\n", disambiguatedResourceName(r, pkg)) } fmt.Fprintf(w, "\tdefault:\n") fmt.Fprintf(w, "\t\treturn nil, fmt.Errorf(\"unknown resource type: %%s\", typ)\n") @@ -1878,8 +2809,15 @@ func (pkg *pkgContext) genResourceModule(w io.Writer) { if topLevelModule { fmt.Fprintf(w, "\tversion, err := PkgVersion()\n") } else { - // Some package names contain '-' characters, so grab the name from the base path. - pkgName := basePath[strings.LastIndex(basePath, "/")+1:] + // Some package names contain '-' characters, so grab the name from the base path, unless there is an alias + // in which case we use that instead. + var pkgName string + if alias, ok := pkg.pkgImportAliases[basePath]; ok { + pkgName = alias + } else { + pkgName = basePath[strings.LastIndex(basePath, "/")+1:] + } + pkgName = strings.ReplaceAll(pkgName, "-", "") fmt.Fprintf(w, "\tversion, err := %s.PkgVersion()\n", pkgName) } fmt.Fprintf(w, "\tif err != nil {\n") @@ -1910,19 +2848,23 @@ func generatePackageContextMap(tool string, pkg *schema.Package, goInfo GoPackag pack, ok := packages[mod] if !ok { pack = &pkgContext{ - pkg: pkg, - mod: mod, - importBasePath: goInfo.ImportBasePath, - rootPackageName: goInfo.RootPackageName, - typeDetails: map[schema.Type]*typeDetails{}, - names: codegen.NewStringSet(), - schemaNames: codegen.NewStringSet(), - renamed: map[string]string{}, - functionNames: map[*schema.Function]string{}, - tool: tool, - modToPkg: goInfo.ModuleToPackage, - pkgImportAliases: goInfo.PackageImportAliases, - packages: packages, + pkg: pkg, + mod: mod, + importBasePath: goInfo.ImportBasePath, + rootPackageName: goInfo.RootPackageName, + typeDetails: map[schema.Type]*typeDetails{}, + names: codegen.NewStringSet(), + schemaNames: codegen.NewStringSet(), + renamed: map[string]string{}, + duplicateTokens: map[string]bool{}, + functionNames: map[*schema.Function]string{}, + tool: tool, + modToPkg: goInfo.ModuleToPackage, + pkgImportAliases: goInfo.PackageImportAliases, + packages: packages, + liftSingleValueMethodReturns: goInfo.LiftSingleValueMethodReturns, + disableInputTypeRegistrations: goInfo.DisableInputTypeRegistrations, + disableObjectDefaults: goInfo.DisableObjectDefaults, } packages[mod] = pack } @@ -1935,7 +2877,7 @@ func generatePackageContextMap(tool string, pkg *schema.Package, goInfo GoPackag var getPkgFromType func(schema.Type) *pkgContext getPkgFromType = func(typ schema.Type) *pkgContext { - switch t := typ.(type) { + switch t := codegen.UnwrapType(typ).(type) { case *schema.ArrayType: return getPkgFromType(t.ElementType) case *schema.MapType: @@ -1953,55 +2895,83 @@ func generatePackageContextMap(tool string, pkg *schema.Package, goInfo GoPackag // In addition, if the optional property's type is itself an object type, we also need to generate pointer // types corresponding to all of it's nested properties, as our accessor methods will lift `nil` into // those nested types. - var populateDetailsForPropertyTypes func(seen codegen.StringSet, props []*schema.Property, parentOptional bool) - var populateDetailsForTypes func(seen codegen.StringSet, schemaType schema.Type, isRequired bool, parentOptional bool) + var populateDetailsForPropertyTypes func(seen codegen.StringSet, props []*schema.Property, optional, input, output bool) + var populateDetailsForTypes func(seen codegen.StringSet, schemaType schema.Type, optional, input, output bool) + + seenKey := func(t schema.Type, optional, input, output bool) string { + var key string + switch t := t.(type) { + case *schema.ObjectType: + key = t.Token + case *schema.EnumType: + key = t.Token + default: + key = t.String() + } + if optional { + key += ",optional" + } + if input { + key += ",input" + } + if output { + key += ",output" + } + return key + } - populateDetailsForPropertyTypes = func(seen codegen.StringSet, props []*schema.Property, parentOptional bool) { + populateDetailsForPropertyTypes = func(seen codegen.StringSet, props []*schema.Property, optional, input, output bool) { for _, p := range props { - populateDetailsForTypes(seen, p.Type, p.IsRequired, parentOptional) + populateDetailsForTypes(seen, p.Type, !p.IsRequired() || optional, input, output) } } - populateDetailsForTypes = func(seen codegen.StringSet, schemaType schema.Type, isRequired bool, parentOptional bool) { + populateDetailsForTypes = func(seen codegen.StringSet, schemaType schema.Type, optional, input, output bool) { + key := seenKey(schemaType, optional, input, output) + if seen.Has(key) { + return + } + seen.Add(key) + switch typ := schemaType.(type) { + case *schema.InputType: + populateDetailsForTypes(seen, typ.ElementType, optional, true, false) + case *schema.OptionalType: + populateDetailsForTypes(seen, typ.ElementType, true, input, output) case *schema.ObjectType: pkg := getPkgFromToken(typ.Token) - if !isRequired || parentOptional { - if seen.Has(typ.Token) { - return - } - seen.Add(typ.Token) - pkg.detailsForType(typ).ptrElement = true - populateDetailsForPropertyTypes(seen, typ.Properties, true) + pkg.detailsForType(typ).mark(input || goInfo.GenerateExtraInputTypes, output) + + if optional { + pkg.detailsForType(typ).markPtr(input || goInfo.GenerateExtraInputTypes, output) } + pkg.schemaNames.Add(tokenToName(typ.Token)) + + populateDetailsForPropertyTypes(seen, typ.Properties, optional, input, output) case *schema.EnumType: - if seen.Has(typ.Token) { - return - } - seen.Add(typ.Token) pkg := getPkgFromToken(typ.Token) - if !isRequired || parentOptional { - pkg.detailsForType(typ).ptrElement = true + pkg.detailsForType(typ).mark(input || goInfo.GenerateExtraInputTypes, output) + + if optional { + pkg.detailsForType(typ).markPtr(input || goInfo.GenerateExtraInputTypes, output) } + pkg.schemaNames.Add(tokenToName(typ.Token)) case *schema.ArrayType: - if seen.Has(typ.String()) { - return - } - seen.Add(typ.String()) - getPkgFromType(typ.ElementType).detailsForType(typ.ElementType).arrayElement = true - populateDetailsForTypes(seen, typ.ElementType, true, false) + details := getPkgFromType(typ.ElementType).detailsForType(codegen.UnwrapType(typ.ElementType)) + details.markArray(input || goInfo.GenerateExtraInputTypes, output) + populateDetailsForTypes(seen, typ.ElementType, false, input, output) case *schema.MapType: - if seen.Has(typ.String()) { - return - } - seen.Add(typ.String()) - getPkgFromType(typ.ElementType).detailsForType(typ.ElementType).mapElement = true - populateDetailsForTypes(seen, typ.ElementType, true, false) + details := getPkgFromType(typ.ElementType).detailsForType(codegen.UnwrapType(typ.ElementType)) + details.markMap(input || goInfo.GenerateExtraInputTypes, output) + populateDetailsForTypes(seen, typ.ElementType, false, input, output) } } + // Rewrite cyclic types. See the docs on rewriteCyclicFields for the motivation. + rewriteCyclicObjectFields(pkg) + // Use a string set to track object types that have already been processed. // This avoids recursively processing the same type. For example, in the // Kubernetes package, JSONSchemaProps have properties whose type is itself. @@ -2009,38 +2979,108 @@ func generatePackageContextMap(tool string, pkg *schema.Package, goInfo GoPackag for _, t := range pkg.Types { switch typ := t.(type) { case *schema.ArrayType: - getPkgFromType(typ.ElementType).detailsForType(typ.ElementType).arrayElement = true + details := getPkgFromType(typ.ElementType).detailsForType(codegen.UnwrapType(typ.ElementType)) + details.markArray(goInfo.GenerateExtraInputTypes, false) case *schema.MapType: - getPkgFromType(typ.ElementType).detailsForType(typ.ElementType).mapElement = true + details := getPkgFromType(typ.ElementType).detailsForType(codegen.UnwrapType(typ.ElementType)) + details.markMap(goInfo.GenerateExtraInputTypes, false) case *schema.ObjectType: pkg := getPkgFromToken(typ.Token) - pkg.types = append(pkg.types, typ) - populateDetailsForPropertyTypes(seenMap, typ.Properties, false) + if !typ.IsInputShape() { + pkg.types = append(pkg.types, typ) + } + populateDetailsForTypes(seenMap, typ, false, false, false) case *schema.EnumType: - pkg := getPkgFromToken(typ.Token) - pkg.enums = append(pkg.enums, typ) + if !typ.IsOverlay { + pkg := getPkgFromToken(typ.Token) + pkg.enums = append(pkg.enums, typ) + + populateDetailsForTypes(seenMap, typ, false, false, false) + } } } + resSeen := map[string]bool{} + typeSeen := map[string]bool{} + + // compute set of names generated by a resource + // handling any potential collisions via remapping along the way scanResource := func(r *schema.Resource) { + if resSeen[strings.ToLower(r.Token)] { + return + } + resSeen[strings.ToLower(r.Token)] = true pkg := getPkgFromToken(r.Token) pkg.resources = append(pkg.resources, r) pkg.schemaNames.Add(tokenToName(r.Token)) - pkg.names.Add(resourceName(r)) - pkg.names.Add(resourceName(r) + "Input") - pkg.names.Add(resourceName(r) + "Output") - pkg.names.Add(resourceName(r) + "Args") - pkg.names.Add(camel(resourceName(r)) + "Args") - pkg.names.Add("New" + resourceName(r)) - if !r.IsProvider && !r.IsComponent { - pkg.names.Add(resourceName(r) + "State") - pkg.names.Add(camel(resourceName(r)) + "State") - pkg.names.Add("Get" + resourceName(r)) + getNames := func(suffix string) []string { + names := []string{} + names = append(names, rawResourceName(r)+suffix) + names = append(names, rawResourceName(r)+suffix+"Input") + names = append(names, rawResourceName(r)+suffix+"Output") + names = append(names, rawResourceName(r)+suffix+"Args") + names = append(names, camel(rawResourceName(r))+suffix+"Args") + names = append(names, "New"+rawResourceName(r)+suffix) + if !r.IsProvider && !r.IsComponent { + names = append(names, rawResourceName(r)+suffix+"State") + names = append(names, camel(rawResourceName(r))+suffix+"State") + names = append(names, "Get"+rawResourceName(r)+suffix) + } + return names + } + + suffixes := []string{"", "Resource", "Res"} + suffix := "" + suffixIndex := 0 + canGenerate := false + + for !canGenerate && suffixIndex <= len(suffixes) { + suffix = suffixes[suffixIndex] + candidates := getNames(suffix) + conflict := false + for _, c := range candidates { + if pkg.names.Has(c) { + conflict = true + } + } + if !conflict { + canGenerate = true + break + } + + suffixIndex++ + } + + if !canGenerate { + panic(fmt.Sprintf("unable to generate Go SDK, schema has unresolvable overlapping resource: %s", rawResourceName(r))) + } + + names := getNames(suffix) + originalNames := getNames("") + for i, n := range names { + pkg.names.Add(n) + if suffix != "" { + pkg.renamed[originalNames[i]] = names[i] + } + } + + populateDetailsForPropertyTypes(seenMap, r.InputProperties, r.IsProvider, false, false) + populateDetailsForPropertyTypes(seenMap, r.Properties, r.IsProvider, false, true) + + if r.StateInputs != nil { + populateDetailsForPropertyTypes(seenMap, r.StateInputs.Properties, + r.IsProvider, false /*input*/, false /*output*/) } - populateDetailsForPropertyTypes(seenMap, r.InputProperties, !r.IsProvider) - populateDetailsForPropertyTypes(seenMap, r.Properties, !r.IsProvider) + for _, method := range r.Methods { + if method.Function.Inputs != nil { + pkg.names.Add(rawResourceName(r) + Title(method.Name) + "Args") + } + if method.Function.Outputs != nil { + pkg.names.Add(rawResourceName(r) + Title(method.Name) + "Result") + } + } } scanResource(pkg.Provider) @@ -2048,12 +3088,144 @@ func generatePackageContextMap(tool string, pkg *schema.Package, goInfo GoPackag scanResource(r) } + // compute set of names generated by a type + // handling any potential collisions via remapping along the way + scanType := func(t schema.Type) { + getNames := func(name, suffix string) []string { + return []string{name + suffix, name + suffix + "Input", name + suffix + "Output"} + } + + switch t := t.(type) { + case *schema.ObjectType: + pkg := getPkgFromToken(t.Token) + // maintain support for duplicate tokens for types and resources in Kubernetes + if resSeen[strings.ToLower(t.Token)] { + pkg.duplicateTokens[strings.ToLower(t.Token)] = true + } + if typeSeen[strings.ToLower(t.Token)] { + return + } + typeSeen[strings.ToLower(t.Token)] = true + + name := pkg.tokenToType(t.Token) + suffixes := []string{"", "Type", "Typ"} + suffix := "" + suffixIndex := 0 + canGenerate := false + + for !canGenerate && suffixIndex <= len(suffixes) { + suffix = suffixes[suffixIndex] + candidates := getNames(name, suffix) + conflict := false + for _, c := range candidates { + if pkg.names.Has(c) { + conflict = true + } + } + if !conflict { + canGenerate = true + break + } + + suffixIndex++ + } + + if !canGenerate { + panic(fmt.Sprintf("unable to generate Go SDK, schema has unresolvable overlapping type: %s", name)) + } + + names := getNames(name, suffix) + originalNames := getNames(name, "") + for i, n := range names { + pkg.names.Add(n) + if suffix != "" { + pkg.renamed[originalNames[i]] = names[i] + } + } + case *schema.EnumType: + pkg := getPkgFromToken(t.Token) + if resSeen[t.Token] { + pkg.duplicateTokens[strings.ToLower(t.Token)] = true + } + if typeSeen[t.Token] { + return + } + typeSeen[t.Token] = true + + name := pkg.tokenToEnum(t.Token) + suffixes := []string{"", "Enum"} + suffix := "" + suffixIndex := 0 + canGenerate := false + + for !canGenerate && suffixIndex <= len(suffixes) { + suffix = suffixes[suffixIndex] + candidates := getNames(name, suffix) + conflict := false + for _, c := range candidates { + if pkg.names.Has(c) { + conflict = true + } + } + if !conflict { + canGenerate = true + break + } + + suffixIndex++ + } + + if !canGenerate { + panic(fmt.Sprintf("unable to generate Go SDK, schema has unresolvable overlapping type: %s", name)) + } + + names := getNames(name, suffix) + originalNames := getNames(name, "") + for i, n := range names { + pkg.names.Add(n) + if suffix != "" { + pkg.renamed[originalNames[i]] = names[i] + } + } + default: + return + } + } + + for _, t := range pkg.Types { + scanType(t) + } + + // For fnApply function versions, we need to register any + // input or output property type metadata, in case they have + // types used in array or pointer element positions. + if !goInfo.DisableFunctionOutputVersions || goInfo.GenerateExtraInputTypes { + for _, f := range pkg.Functions { + if f.NeedsOutputVersion() || goInfo.GenerateExtraInputTypes { + optional := false + if f.Inputs != nil { + populateDetailsForPropertyTypes(seenMap, f.Inputs.InputShape.Properties, optional, false, false) + } + if f.Outputs != nil { + populateDetailsForTypes(seenMap, f.Outputs, optional, false, true) + } + } + } + } + for _, f := range pkg.Functions { + if f.IsMethod { + continue + } + pkg := getPkgFromToken(f.Token) pkg.functions = append(pkg.functions, f) name := tokenToName(f.Token) - if pkg.names.Has(name) { + + if pkg.names.Has(name) || + pkg.names.Has(name+"Args") || + pkg.names.Has(name+"Result") { switch { case strings.HasPrefix(name, "New"): name = "Create" + name[3:] @@ -2113,6 +3285,11 @@ func LanguageResources(tool string, pkg *schema.Package) (map[string]LanguageRes pkg := packages[mod] for _, r := range pkg.resources { + if r.IsOverlay { + // This resource code is generated by the provider, so no further action is required. + continue + } + packagePath := path.Join(goPkgInfo.ImportBasePath, pkg.mod) resources[r.Token] = LanguageResource{ Resource: r, @@ -2126,6 +3303,37 @@ func LanguageResources(tool string, pkg *schema.Package) (map[string]LanguageRes return resources, nil } +// packageRoot is the relative root file for go code. That means that every go +// source file should be under this root. For example: +// +// root = aws => sdk/go/aws/*.go +func packageRoot(pkg *schema.Package) string { + var info GoPackageInfo + if goInfo, ok := pkg.Language["go"].(GoPackageInfo); ok { + info = goInfo + } + if info.RootPackageName != "" { + // package structure is flat + return "" + } + if info.ImportBasePath != "" { + return path.Base(info.ImportBasePath) + } + return goPackage(pkg.Name) +} + +// packageName is the go package name for the generated package. +func packageName(pkg *schema.Package) string { + var info GoPackageInfo + if goInfo, ok := pkg.Language["go"].(GoPackageInfo); ok { + info = goInfo + } + if info.RootPackageName != "" { + return info.RootPackageName + } + return goPackage(packageRoot(pkg)) +} + func GeneratePackage(tool string, pkg *schema.Package) (map[string][]byte, error) { if err := pkg.ImportLanguages(map[string]schema.Language{"go": Importer}); err != nil { return nil, err @@ -2144,25 +3352,37 @@ func GeneratePackage(tool string, pkg *schema.Package) (map[string][]byte, error } sort.Strings(pkgMods) - name := goPkgInfo.RootPackageName - if name == "" { - name = goPackage(pkg.Name) - } + name := packageName(pkg) + pathPrefix := packageRoot(pkg) files := map[string][]byte{} + + // Generate pulumi-plugin.json + pulumiPlugin := &plugin.PulumiPluginJSON{ + Resource: true, + Name: pkg.Name, + Server: pkg.PluginDownloadURL, + } + if goPkgInfo.RespectSchemaVersion && pkg.Version != nil { + pulumiPlugin.Version = pkg.Version.String() + } + pulumiPluginJSON, err := pulumiPlugin.JSON() + if err != nil { + return nil, fmt.Errorf("Failed to format pulumi-plugin.json: %w", err) + } + files[path.Join(pathPrefix, "pulumi-plugin.json")] = pulumiPluginJSON + setFile := func(relPath, contents string) { - if goPkgInfo.RootPackageName == "" { - relPath = path.Join(goPackage(name), relPath) - } + relPath = path.Join(pathPrefix, relPath) if _, ok := files[relPath]; ok { - panic(errors.Errorf("duplicate file: %s", relPath)) + panic(fmt.Errorf("duplicate file: %s", relPath)) } // Run Go formatter on the code before saving to disk formattedSource, err := format.Source([]byte(contents)) if err != nil { fmt.Fprintf(os.Stderr, "Invalid content:\n%s\n%s\n", relPath, contents) - panic(errors.Wrapf(err, "invalid Go source code:\n\n%s\n", relPath)) + panic(fmt.Errorf("invalid Go source code:\n\n%s\n: %w", relPath, err)) } files[relPath] = formattedSource @@ -2199,8 +3419,14 @@ func GeneratePackage(tool string, pkg *schema.Package) (map[string][]byte, error // Resources for _, r := range pkg.resources { + if r.IsOverlay { + // This resource code is generated by the provider, so no further action is required. + continue + } + importsAndAliases := map[string]string{} pkg.getImports(r, importsAndAliases) + importsAndAliases["github.com/pulumi/pulumi/sdk/v3/go/pulumi"] = "" buffer := &bytes.Buffer{} pkg.genHeader(buffer, []string{"context", "reflect"}, importsAndAliases) @@ -2209,20 +3435,22 @@ func GeneratePackage(tool string, pkg *schema.Package) (map[string][]byte, error return nil, err } - setFile(path.Join(mod, camel(resourceName(r))+".go"), buffer.String()) + setFile(path.Join(mod, camel(rawResourceName(r))+".go"), buffer.String()) } // Functions for _, f := range pkg.functions { - importsAndAliases := map[string]string{} - pkg.getImports(f, importsAndAliases) - - buffer := &bytes.Buffer{} - pkg.genHeader(buffer, nil, importsAndAliases) - - pkg.genFunction(buffer, f) + if f.IsOverlay { + // This function code is generated by the provider, so no further action is required. + continue + } - setFile(path.Join(mod, camel(tokenToName(f.Token))+".go"), buffer.String()) + fileName := path.Join(mod, camel(tokenToName(f.Token))+".go") + code, err := pkg.genFunctionCodeFile(f) + if err != nil { + return nil, err + } + setFile(fileName, code) } knownTypes := make(map[schema.Type]struct{}, len(pkg.typeDetails)) @@ -2232,13 +3460,19 @@ func GeneratePackage(tool string, pkg *schema.Package) (map[string][]byte, error // Enums if len(pkg.enums) > 0 { - imports := map[string]string{} + hasOutputs, imports := false, map[string]string{} for _, e := range pkg.enums { pkg.getImports(e, imports) + hasOutputs = hasOutputs || pkg.detailsForType(e).hasOutputs() + } + var goImports []string + if hasOutputs { + goImports = []string{"context", "reflect"} + imports["github.com/pulumi/pulumi/sdk/v3/go/pulumi"] = "" } buffer := &bytes.Buffer{} - pkg.genHeader(buffer, []string{"context", "reflect"}, imports) + pkg.genHeader(buffer, goImports, imports) for _, e := range pkg.enums { if err := pkg.genEnum(buffer, e); err != nil { @@ -2246,21 +3480,30 @@ func GeneratePackage(tool string, pkg *schema.Package) (map[string][]byte, error } delete(knownTypes, e) } + pkg.genEnumRegistrations(buffer) setFile(path.Join(mod, "pulumiEnums.go"), buffer.String()) } // Types if len(pkg.types) > 0 { - importsAndAliases := map[string]string{} + hasOutputs, importsAndAliases := false, map[string]string{} for _, t := range pkg.types { pkg.getImports(t, importsAndAliases) + hasOutputs = hasOutputs || pkg.detailsForType(t).hasOutputs() + } + var goImports []string + if hasOutputs { + goImports = []string{"context", "reflect"} + importsAndAliases["github.com/pulumi/pulumi/sdk/v3/go/pulumi"] = "" } buffer := &bytes.Buffer{} - pkg.genHeader(buffer, []string{"context", "reflect"}, importsAndAliases) + pkg.genHeader(buffer, goImports, importsAndAliases) for _, t := range pkg.types { - pkg.genType(buffer, t) + if err := pkg.genType(buffer, t); err != nil { + return nil, err + } delete(knownTypes, t) } @@ -2272,14 +3515,16 @@ func GeneratePackage(tool string, pkg *schema.Package) (map[string][]byte, error return sortedKnownTypes[i].String() < sortedKnownTypes[j].String() }) - var types []string + collectionTypes := map[string]*nestedTypeInfo{} for _, t := range sortedKnownTypes { switch typ := t.(type) { case *schema.ArrayType, *schema.MapType: - types = pkg.genNestedCollectionType(buffer, typ) + pkg.collectNestedCollectionTypes(collectionTypes, typ) } } + types := pkg.genNestedCollectionTypes(buffer, collectionTypes) + pkg.genTypeRegistrations(buffer, pkg.types, types...) setFile(path.Join(mod, "pulumiTypes.go"), buffer.String()) @@ -2299,16 +3544,13 @@ func GeneratePackage(tool string, pkg *schema.Package) (map[string][]byte, error packageRegex = fmt.Sprintf("^%s(/v\\d+)?", pkg.importBasePath) } - _, err := fmt.Fprintf(buffer, utilitiesFile, packageRegex) - if err != nil { - return nil, err - } + pkg.GenUtilitiesFile(buffer, packageRegex) setFile(path.Join(mod, "pulumiUtilities.go"), buffer.String()) } // If there are resources in this module, register the module with the runtime. - if len(pkg.resources) != 0 { + if len(pkg.resources) != 0 && !allResourcesAreOverlays(pkg.resources) { buffer := &bytes.Buffer{} pkg.genResourceModule(buffer) @@ -2319,12 +3561,22 @@ func GeneratePackage(tool string, pkg *schema.Package) (map[string][]byte, error return files, nil } +func allResourcesAreOverlays(resources []*schema.Resource) bool { + for _, r := range resources { + if !r.IsOverlay { + return false + } + } + return true +} + // goPackage returns the suggested package name for the given string. func goPackage(name string) string { - return strings.Split(name, "-")[0] + return strings.ReplaceAll(name, "-", "") } -const utilitiesFile = ` +func (pkg *pkgContext) GenUtilitiesFile(w io.Writer, packageRegex string) { + const utilitiesFile = ` type envParser func(v string) interface{} func parseEnvBool(v string) interface{} { @@ -2385,4 +3637,51 @@ func PkgVersion() (semver.Version, error) { } return semver.Version{}, fmt.Errorf("failed to determine the package version from %%s", pkgPath) } + +// isZero is a null safe check for if a value is it's types zero value. +func isZero(v interface{}) bool { + if v == nil { + return true + } + return reflect.ValueOf(v).IsZero() +} +` + _, err := fmt.Fprintf(w, utilitiesFile, packageRegex) + contract.AssertNoError(err) + pkg.GenPkgDefaultOpts(w) +} + +func (pkg *pkgContext) GenPkgDefaultOpts(w io.Writer) { + url := pkg.pkg.PluginDownloadURL + if url == "" { + return + } + const template string = ` +// pkg%[1]sDefaultOpts provides package level defaults to pulumi.Option%[1]s. +func pkg%[1]sDefaultOpts(opts []pulumi.%[1]sOption) []pulumi.%[1]sOption { + defaults := []pulumi.%[1]sOption{%[2]s} + + return append(defaults, opts...) +} ` + pluginDownloadURL := fmt.Sprintf("pulumi.PluginDownloadURL(%q)", url) + for _, typ := range []string{"Resource", "Invoke"} { + _, err := fmt.Fprintf(w, template, typ, pluginDownloadURL) + contract.AssertNoError(err) + } +} + +// GenPkgDefaultsOptsCall generates a call to Pkg{TYPE}DefaultsOpts. +func (pkg *pkgContext) GenPkgDefaultsOptsCall(w io.Writer, invoke bool) { + // The `pkg%sDefaultOpts` call won't do anything, so we don't insert it. + if pkg.pkg.PluginDownloadURL == "" { + return + } + pkg.needsUtils = true + typ := "Resource" + if invoke { + typ = "Invoke" + } + _, err := fmt.Fprintf(w, "\topts = pkg%sDefaultOpts(opts)\n", typ) + contract.AssertNoError(err) +} diff --git a/vendor/github.com/pulumi/pulumi/pkg/v3/codegen/go/gen_crd2pulumi.go b/vendor/github.com/pulumi/pulumi/pkg/v3/codegen/go/gen_crd2pulumi.go index 2d562d6..993ecfd 100644 --- a/vendor/github.com/pulumi/pulumi/pkg/v3/codegen/go/gen_crd2pulumi.go +++ b/vendor/github.com/pulumi/pulumi/pkg/v3/codegen/go/gen_crd2pulumi.go @@ -2,8 +2,8 @@ package gen import ( "bytes" + "fmt" - "github.com/pkg/errors" "github.com/pulumi/pulumi/pkg/v3/codegen/schema" ) @@ -37,13 +37,15 @@ func CRDTypes(tool string, pkg *schema.Package) (map[string]*bytes.Buffer, error pkg.genHeader(buffer, []string{"context", "reflect"}, importsAndAliases) if err := pkg.genResource(buffer, r, goPkgInfo.GenerateResourceContainerTypes); err != nil { - return nil, errors.Wrapf(err, "generating resource %s", mod) + return nil, fmt.Errorf("generating resource %s: %w", mod, err) } } if len(pkg.types) > 0 { for _, t := range pkg.types { - pkg.genType(buffer, t) + if err := pkg.genType(buffer, t); err != nil { + return nil, err + } } pkg.genTypeRegistrations(buffer, pkg.types) } diff --git a/vendor/github.com/pulumi/pulumi/pkg/v3/codegen/go/gen_program.go b/vendor/github.com/pulumi/pulumi/pkg/v3/codegen/go/gen_program.go index 862659f..cff81fd 100644 --- a/vendor/github.com/pulumi/pulumi/pkg/v3/codegen/go/gen_program.go +++ b/vendor/github.com/pulumi/pulumi/pkg/v3/codegen/go/gen_program.go @@ -6,14 +6,15 @@ import ( gofmt "go/format" "io" "strings" + "sync" "github.com/hashicorp/hcl/v2" - "github.com/pkg/errors" + "github.com/pulumi/pulumi/pkg/v3/codegen" - "github.com/pulumi/pulumi/pkg/v3/codegen/hcl2" "github.com/pulumi/pulumi/pkg/v3/codegen/hcl2/model" "github.com/pulumi/pulumi/pkg/v3/codegen/hcl2/model/format" "github.com/pulumi/pulumi/pkg/v3/codegen/hcl2/syntax" + "github.com/pulumi/pulumi/pkg/v3/codegen/pcl" "github.com/pulumi/pulumi/pkg/v3/codegen/schema" "github.com/pulumi/pulumi/sdk/v3/go/common/util/contract" ) @@ -21,10 +22,11 @@ import ( type generator struct { // The formatter to use when generating code. *format.Formatter - program *hcl2.Program + program *pcl.Program packages map[string]*schema.Package contexts map[string]map[string]*pkgContext diagnostics hcl.Diagnostics + spills *spills jsonTempSpiller *jsonSpiller ternaryTempSpiller *tempSpiller readDirTempSpiller *readDirSpiller @@ -36,9 +38,9 @@ type generator struct { configCreated bool } -func GenerateProgram(program *hcl2.Program) (map[string][]byte, hcl.Diagnostics, error) { +func GenerateProgram(program *pcl.Program) (map[string][]byte, hcl.Diagnostics, error) { // Linearize the nodes into an order appropriate for procedural code generation. - nodes := hcl2.Linearize(program) + nodes := pcl.Linearize(program) packages, contexts := map[string]*schema.Package{}, map[string]map[string]*pkgContext{} for _, pkg := range program.Packages() { @@ -49,6 +51,7 @@ func GenerateProgram(program *hcl2.Program) (map[string][]byte, hcl.Diagnostics, program: program, packages: packages, contexts: contexts, + spills: &spills{counts: map[string]int{}}, jsonTempSpiller: &jsonSpiller{}, ternaryTempSpiller: &tempSpiller{}, readDirTempSpiller: &readDirSpiller{}, @@ -65,7 +68,8 @@ func GenerateProgram(program *hcl2.Program) (map[string][]byte, hcl.Diagnostics, // but still have access to types provided by __convert intrinsics after lowering. pulumiImports := codegen.NewStringSet() stdImports := codegen.NewStringSet() - g.collectImports(program, stdImports, pulumiImports) + preambleHelperMethods := codegen.NewStringSet() + g.collectImports(program, stdImports, pulumiImports, preambleHelperMethods) var progPostamble bytes.Buffer for _, n := range nodes { @@ -83,13 +87,13 @@ func GenerateProgram(program *hcl2.Program) (map[string][]byte, hcl.Diagnostics, // present in resource declarations or invokes alone. Expressions are lowered when the program is generated // and this must happen first so we can access types via __convert intrinsics. var index bytes.Buffer - g.genPreamble(&index, program, stdImports, pulumiImports) + g.genPreamble(&index, program, stdImports, pulumiImports, preambleHelperMethods) index.Write(progPostamble.Bytes()) // Run Go formatter on the code before saving to disk formattedSource, err := gofmt.Source(index.Bytes()) if err != nil { - panic(errors.Errorf("invalid Go source code:\n\n%s", index.String())) + panic(fmt.Errorf("invalid Go source code:\n\n%s", index.String())) } files := map[string][]byte{ @@ -98,7 +102,13 @@ func GenerateProgram(program *hcl2.Program) (map[string][]byte, hcl.Diagnostics, return files, g.diagnostics, nil } +var packageContexts sync.Map + func getPackages(tool string, pkg *schema.Package) map[string]*pkgContext { + if v, ok := packageContexts.Load(pkg); ok { + return v.(map[string]*pkgContext) + } + if err := pkg.ImportLanguages(map[string]schema.Language{"go": Importer}); err != nil { return nil } @@ -107,10 +117,12 @@ func getPackages(tool string, pkg *schema.Package) map[string]*pkgContext { if goInfo, ok := pkg.Language["go"].(GoPackageInfo); ok { goPkgInfo = goInfo } - return generatePackageContextMap(tool, pkg, goPkgInfo) + v := generatePackageContextMap(tool, pkg, goPkgInfo) + packageContexts.Store(pkg, v) + return v } -func (g *generator) collectScopeRoots(n hcl2.Node) { +func (g *generator) collectScopeRoots(n pcl.Node) { diags := n.VisitExpressions(nil, func(n model.Expression) (model.Expression, hcl.Diagnostics) { if st, ok := n.(*model.ScopeTraversalExpression); ok { g.scopeTraversalRoots.Add(st.RootName) @@ -121,11 +133,12 @@ func (g *generator) collectScopeRoots(n hcl2.Node) { } // genPreamble generates package decl, imports, and opens the main func -func (g *generator) genPreamble(w io.Writer, program *hcl2.Program, stdImports, pulumiImports codegen.StringSet) { +func (g *generator) genPreamble(w io.Writer, program *pcl.Program, stdImports, pulumiImports, + preambleHelperMethods codegen.StringSet) { g.Fprint(w, "package main\n\n") g.Fprintf(w, "import (\n") - g.collectImports(program, stdImports, pulumiImports) + g.collectImports(program, stdImports, pulumiImports, preambleHelperMethods) for _, imp := range stdImports.SortedValues() { g.Fprintf(w, "\"%s\"\n", imp) } @@ -138,18 +151,67 @@ func (g *generator) genPreamble(w io.Writer, program *hcl2.Program, stdImports, } g.Fprintf(w, ")\n") + + // If we collected any helper methods that should be added, write them just before the main func + for _, preambleHelperMethodBody := range preambleHelperMethods.SortedValues() { + g.Fprintf(w, "%s\n\n", preambleHelperMethodBody) + } + g.Fprintf(w, "func main() {\n") g.Fprintf(w, "pulumi.Run(func(ctx *pulumi.Context) error {\n") } +func (g *generator) collectTypeImports(program *pcl.Program, t schema.Type, imports codegen.StringSet) { + var token string + switch t := t.(type) { + case *schema.InputType: + g.collectTypeImports(program, t.ElementType, imports) + return + case *schema.OptionalType: + g.collectTypeImports(program, t.ElementType, imports) + return + case *schema.ArrayType: + g.collectTypeImports(program, t.ElementType, imports) + return + case *schema.MapType: + g.collectTypeImports(program, t.ElementType, imports) + return + case *schema.UnionType: + for _, t := range t.ElementTypes { + g.collectTypeImports(program, t, imports) + } + return + case *schema.ObjectType: + token = t.Token + case *schema.EnumType: + token = t.Token + case *schema.TokenType: + token = t.Token + case *schema.ResourceType: + token = t.Token + } + if token == "" { + return + } + + var tokenRange hcl.Range + pkg, mod, _, _ := pcl.DecomposeToken(token, tokenRange) + vPath, err := g.getVersionPath(program, pkg) + if err != nil { + panic(err) + } + imports.Add(g.getPulumiImport(pkg, vPath, mod)) +} + // collect Imports returns two sets of packages imported by the program, std lib packages and pulumi packages func (g *generator) collectImports( - program *hcl2.Program, + program *pcl.Program, stdImports, - pulumiImports codegen.StringSet) (codegen.StringSet, codegen.StringSet) { + pulumiImports, + preambleHelperMethods codegen.StringSet) (codegen.StringSet, codegen.StringSet, codegen.StringSet) { // Accumulate import statements for the various providers for _, n := range program.Nodes { - if r, isResource := n.(*hcl2.Resource); isResource { + if r, isResource := n.(*pcl.Resource); isResource { pkg, mod, name, _ := r.DecomposeToken() if pkg == "pulumi" && mod == "providers" { pkg = name @@ -162,17 +224,17 @@ func (g *generator) collectImports( pulumiImports.Add(g.getPulumiImport(pkg, vPath, mod)) } - if _, isConfigVar := n.(*hcl2.ConfigVariable); isConfigVar { + if _, isConfigVar := n.(*pcl.ConfigVariable); isConfigVar { pulumiImports.Add("\"github.com/pulumi/pulumi/sdk/v3/go/pulumi/config\"") } diags := n.VisitExpressions(nil, func(n model.Expression) (model.Expression, hcl.Diagnostics) { if call, ok := n.(*model.FunctionCallExpression); ok { - if call.Name == hcl2.Invoke { + if call.Name == pcl.Invoke { tokenArg := call.Args[0] token := tokenArg.(*model.TemplateExpression).Parts[0].(*model.LiteralValueExpression).Value.AsString() tokenRange := tokenArg.SyntaxNode().Range() - pkg, mod, _, diagnostics := hcl2.DecomposeToken(token, tokenRange) + pkg, mod, _, diagnostics := pcl.DecomposeToken(token, tokenRange) contract.Assert(len(diagnostics) == 0) @@ -181,30 +243,13 @@ func (g *generator) collectImports( panic(err) } pulumiImports.Add(g.getPulumiImport(pkg, vPath, mod)) - } else if call.Name == hcl2.IntrinsicConvert { - if schemaType, ok := hcl2.GetSchemaForType(call.Type()); ok { - switch schemaType := schemaType.(type) { - case *schema.ObjectType: - token := schemaType.Token - var tokenRange hcl.Range - pkg, mod, _, _ := hcl2.DecomposeToken(token, tokenRange) - vPath, err := g.getVersionPath(program, pkg) - if err != nil { - panic(err) - } - pulumiImports.Add(g.getPulumiImport(pkg, vPath, mod)) - case *schema.ArrayType: - token := schemaType.ElementType.(*schema.ObjectType).Token - var tokenRange hcl.Range - pkg, mod, _, _ := hcl2.DecomposeToken(token, tokenRange) - vPath, err := g.getVersionPath(program, pkg) - if err != nil { - panic(err) - } - pulumiImports.Add(g.getPulumiImport(pkg, vPath, mod)) - } + } else if call.Name == pcl.IntrinsicConvert { + g.collectConvertImports(program, call, pulumiImports) + } - } + // Checking to see if this function call deserves its own dedicated helper method in the preamble + if helperMethodBody, ok := getHelperMethodIfNeeded(call.Name); ok { + preambleHelperMethods.Add(helperMethodBody) } } return n, nil @@ -227,10 +272,34 @@ func (g *generator) collectImports( contract.Assert(len(diags) == 0) } - return stdImports, pulumiImports + return stdImports, pulumiImports, preambleHelperMethods } -func (g *generator) getVersionPath(program *hcl2.Program, pkg string) (string, error) { +func (g *generator) collectConvertImports( + program *pcl.Program, + call *model.FunctionCallExpression, + pulumiImports codegen.StringSet) { + if schemaType, ok := pcl.GetSchemaForType(call.Type()); ok { + // Sometimes code for a `__convert` call does not + // really use the import of the result type. In such + // cases it is important not to generate a + // non-compiling unused import. Detect some of these + // cases here. + // + // Fully solving this is deferred for later: + // TODO[pulumi/pulumi#8324]. + if expr, ok := call.Args[0].(*model.TemplateExpression); ok { + if lit, ok := expr.Parts[0].(*model.LiteralValueExpression); ok && + model.StringType.AssignableFrom(lit.Type()) && + call.Type().AssignableFrom(lit.Type()) { + return + } + } + g.collectTypeImports(program, schemaType, pulumiImports) + } +} + +func (g *generator) getVersionPath(program *pcl.Program, pkg string) (string, error) { for _, p := range program.Packages() { if p.Name == pkg { if p.Version != nil && p.Version.Major > 1 { @@ -240,7 +309,7 @@ func (g *generator) getVersionPath(program *hcl2.Program, pkg string) (string, e } } - return "", errors.Errorf("could not find package version information for pkg: %s", pkg) + return "", fmt.Errorf("could not find package version information for pkg: %s", pkg) } @@ -276,8 +345,12 @@ func (g *generator) getPulumiImport(pkg, vPath, mod string) string { // All providers don't follow the sdk/go/ scheme. Allow ImportBasePath as // a means to override this assumption. - if info.ImportBasePath != "" && mod != "" { - imp = fmt.Sprintf("%s/%s", info.ImportBasePath, mod) + if info.ImportBasePath != "" { + if mod != "" { + imp = fmt.Sprintf("%s/%s", info.ImportBasePath, mod) + } else { + imp = info.ImportBasePath + } } if alias, ok := info.PackageImportAliases[imp]; ok { @@ -290,14 +363,14 @@ func (g *generator) getPulumiImport(pkg, vPath, mod string) string { if modSplit[0] == "" || modSplit[0] == "index" { imp = fmt.Sprintf("github.com/pulumi/pulumi-%s/sdk%s/go/%s", pkg, vPath, pkg) } else { - imp = fmt.Sprintf("github.com/pulumi/pulumi-%s/sdk%s/go/%s/%s", pkg, vPath, pkg, strings.Split(mod, "/")[0]) + imp = fmt.Sprintf("github.com/pulumi/pulumi-%s/sdk%s/go/%s/%s", pkg, vPath, pkg, modSplit[0]) } } return fmt.Sprintf("%q", imp) } // genPostamble closes the method -func (g *generator) genPostamble(w io.Writer, nodes []hcl2.Node) { +func (g *generator) genPostamble(w io.Writer, nodes []pcl.Node) { g.Fprint(w, "return nil\n") g.Fprintf(w, "})\n") @@ -312,22 +385,22 @@ func (g *generator) genHelpers(w io.Writer) { } } -func (g *generator) genNode(w io.Writer, n hcl2.Node) { +func (g *generator) genNode(w io.Writer, n pcl.Node) { switch n := n.(type) { - case *hcl2.Resource: + case *pcl.Resource: g.genResource(w, n) - case *hcl2.OutputVariable: + case *pcl.OutputVariable: g.genOutputAssignment(w, n) - case *hcl2.ConfigVariable: + case *pcl.ConfigVariable: g.genConfigVariable(w, n) - case *hcl2.LocalVariable: + case *pcl.LocalVariable: g.genLocalVariable(w, n) } } var resourceType = model.MustNewOpaqueType("pulumi.Resource") -func (g *generator) lowerResourceOptions(opts *hcl2.ResourceOptions) (*model.Block, []interface{}) { +func (g *generator) lowerResourceOptions(opts *pcl.ResourceOptions) (*model.Block, []interface{}) { if opts == nil { return nil, nil } @@ -342,7 +415,7 @@ func (g *generator) lowerResourceOptions(opts *hcl2.ResourceOptions) (*model.Blo } } - value, valueTemps := g.lowerExpression(value, destType, false) + value, valueTemps := g.lowerExpression(value, destType) temps = append(temps, valueTemps...) block.Body.Items = append(block.Body.Items, &model.Attribute{ @@ -382,9 +455,9 @@ func (g *generator) genResourceOptions(w io.Writer, block *model.Block) { } } -func (g *generator) genResource(w io.Writer, r *hcl2.Resource) { +func (g *generator) genResource(w io.Writer, r *pcl.Resource) { - resName := makeValidIdentifier(r.Name()) + resName, resNameVar := r.Name(), makeValidIdentifier(r.Name()) pkg, mod, typ, _ := r.DecomposeToken() if mod == "" || strings.HasPrefix(mod, "/") || strings.HasPrefix(mod, "index/") { mod = pkg @@ -398,8 +471,7 @@ func (g *generator) genResource(w io.Writer, r *hcl2.Resource) { for _, input := range r.Inputs { destType, diagnostics := r.InputType.Traverse(hcl.TraverseAttr{Name: input.Name}) g.diagnostics = append(g.diagnostics, diagnostics...) - isInput := true - expr, temps := g.lowerExpression(input.Value, destType.(model.Type), isInput) + expr, temps := g.lowerExpression(input.Value, destType.(model.Type)) input.Value = expr g.genTemps(w, temps) } @@ -438,10 +510,10 @@ func (g *generator) genResource(w io.Writer, r *hcl2.Resource) { if r.Options != nil && r.Options.Range != nil { rangeType := model.ResolveOutputs(r.Options.Range.Type()) - rangeExpr, temps := g.lowerExpression(r.Options.Range, rangeType, false) + rangeExpr, temps := g.lowerExpression(r.Options.Range, rangeType) g.genTemps(w, temps) - g.Fgenf(w, "var %s []*%s.%s\n", resName, modOrAlias, typ) + g.Fgenf(w, "var %s []*%s.%s\n", resNameVar, modOrAlias, typ) // ahead of range statement declaration generate the resource instantiation // to detect and removed unused k,v variables @@ -456,18 +528,17 @@ func (g *generator) genResource(w io.Writer, r *hcl2.Resource) { g.Fgenf(w, "for key0, %s := range %.v {\n", valVar, rangeExpr) g.Fgen(w, instantiation) - g.Fgenf(w, "%s = append(%s, __res)\n", resName, resName) + g.Fgenf(w, "%[1]s = append(%[1]s, __res)\n", resNameVar) g.Fgenf(w, "}\n") } else { - instantiate(resName, fmt.Sprintf("%q", resName), w) + instantiate(resNameVar, fmt.Sprintf("%q", resName), w) } } -func (g *generator) genOutputAssignment(w io.Writer, v *hcl2.OutputVariable) { - isInput := false - expr, temps := g.lowerExpression(v.Value, v.Type(), isInput) +func (g *generator) genOutputAssignment(w io.Writer, v *pcl.OutputVariable) { + expr, temps := g.lowerExpression(v.Value, v.Type()) g.genTemps(w, temps) g.Fgenf(w, "ctx.Export(\"%s\", %.3v)\n", v.Name(), expr) } @@ -482,7 +553,7 @@ func (g *generator) genTempsMultiReturn(w io.Writer, temps []interface{}, zeroVa if zeroValueType != "" { for _, t := range temps { switch t.(type) { - case *jsonTemp, *readDirTemp: + case *spillTemp, *jsonTemp, *readDirTemp: genZeroValueDecl = true default: } @@ -506,10 +577,10 @@ func (g *generator) genTempsMultiReturn(w io.Writer, temps []interface{}, zeroVa g.Fgenf(w, "} else {\n") g.Fgenf(w, "%s = %.v\n", t.Name, t.Value.FalseResult) g.Fgenf(w, "}\n") - case *jsonTemp: - bytesVar := fmt.Sprintf("tmp%s", strings.ToUpper(t.Name)) + case *spillTemp: + bytesVar := fmt.Sprintf("tmp%s", strings.ToUpper(t.Variable.Name)) g.Fgenf(w, "%s, err := json.Marshal(", bytesVar) - args := stripInputs(t.Value.Args[0]) + args := t.Value.(*model.FunctionCallExpression).Args[0] g.Fgenf(w, "%.v)\n", args) g.Fgenf(w, "if err != nil {\n") if genZeroValueDecl { @@ -518,7 +589,7 @@ func (g *generator) genTempsMultiReturn(w io.Writer, temps []interface{}, zeroVa g.Fgenf(w, "return err\n") } g.Fgenf(w, "}\n") - g.Fgenf(w, "%s := string(%s)\n", t.Name, bytesVar) + g.Fgenf(w, "%s := string(%s)\n", t.Variable.Name, bytesVar) case *readDirTemp: tmpSuffix := strings.Split(t.Name, "files")[1] g.Fgenf(w, "%s, err := ioutil.ReadDir(%.v)\n", t.Name, t.Value.Args[0]) @@ -554,9 +625,8 @@ func (g *generator) genTempsMultiReturn(w io.Writer, temps []interface{}, zeroVa } } -func (g *generator) genLocalVariable(w io.Writer, v *hcl2.LocalVariable) { - isInput := false - expr, temps := g.lowerExpression(v.Definition.Value, v.Type(), isInput) +func (g *generator) genLocalVariable(w io.Writer, v *pcl.LocalVariable) { + expr, temps := g.lowerExpression(v.Definition.Value, v.Type()) g.genTemps(w, temps) name := makeValidIdentifier(v.Name()) assignment := ":=" @@ -569,12 +639,20 @@ func (g *generator) genLocalVariable(w io.Writer, v *hcl2.LocalVariable) { switch expr := expr.(type) { case *model.FunctionCallExpression: switch expr.Name { - case hcl2.Invoke: - g.Fgenf(w, "%s, err %s %.3v;\n", name, assignment, expr) - g.isErrAssigned = true - g.Fgenf(w, "if err != nil {\n") - g.Fgenf(w, "return err\n") - g.Fgenf(w, "}\n") + case pcl.Invoke: + // OutputVersionedInvoke does not return an error + noError, _, _ := pcl.RecognizeOutputVersionedInvoke(expr) + if noError { + g.Fgenf(w, "%s %s %.3v;\n", name, assignment, expr) + } else { + g.Fgenf(w, "%s, err %s %.3v;\n", name, assignment, expr) + g.isErrAssigned = true + g.Fgenf(w, "if err != nil {\n") + g.Fgenf(w, "return err\n") + g.Fgenf(w, "}\n") + } + case "join", "toBase64", "mimeType", "fileAsset": + g.Fgenf(w, "%s := %.3v;\n", name, expr) } default: g.Fgenf(w, "%s := %.3v;\n", name, expr) @@ -582,7 +660,7 @@ func (g *generator) genLocalVariable(w io.Writer, v *hcl2.LocalVariable) { } } -func (g *generator) genConfigVariable(w io.Writer, v *hcl2.ConfigVariable) { +func (g *generator) genConfigVariable(w io.Writer, v *pcl.ConfigVariable) { if !g.configCreated { g.Fprint(w, "cfg := config.New(ctx, \"\")\n") g.configCreated = true @@ -609,12 +687,12 @@ func (g *generator) genConfigVariable(w io.Writer, v *hcl2.ConfigVariable) { if v.DefaultValue == nil { g.Fgenf(w, "%[1]s := cfg.%[2]s%[3]s(\"%[1]s\")\n", v.Name(), getOrRequire, getType) } else { - expr, temps := g.lowerExpression(v.DefaultValue, v.DefaultValue.Type(), false) + expr, temps := g.lowerExpression(v.DefaultValue, v.DefaultValue.Type()) g.genTemps(w, temps) switch expr := expr.(type) { case *model.FunctionCallExpression: switch expr.Name { - case hcl2.Invoke: + case pcl.Invoke: g.Fgenf(w, "%s, err := %.3v;\n", v.Name(), expr) g.isErrAssigned = true g.Fgenf(w, "if err != nil {\n") @@ -649,7 +727,7 @@ func (g *generator) genConfigVariable(w io.Writer, v *hcl2.ConfigVariable) { // LookupVPC function: https://github.com/pulumi/pulumi-aws/blob/7835df354694e2f9f23371602a9febebc6b45be8/sdk/go/aws/ec2/getVpc.go#L15 // Given that the naming here is not consisten, we must reverse the process from gen.go. func (g *generator) useLookupInvokeForm(token string) bool { - pkg, module, member, _ := hcl2.DecomposeToken(token, *new(hcl.Range)) + pkg, module, member, _ := pcl.DecomposeToken(token, *new(hcl.Range)) modSplit := strings.Split(module, "/") mod := modSplit[0] fn := Title(member) diff --git a/vendor/github.com/pulumi/pulumi/pkg/v3/codegen/go/gen_program_expressions.go b/vendor/github.com/pulumi/pulumi/pkg/v3/codegen/go/gen_program_expressions.go index b0b51f3..02e226d 100644 --- a/vendor/github.com/pulumi/pulumi/pkg/v3/codegen/go/gen_program_expressions.go +++ b/vendor/github.com/pulumi/pulumi/pkg/v3/codegen/go/gen_program_expressions.go @@ -5,13 +5,13 @@ import ( "fmt" "io" "math/big" - "reflect" "strings" "github.com/hashicorp/hcl/v2" "github.com/hashicorp/hcl/v2/hclsyntax" - "github.com/pulumi/pulumi/pkg/v3/codegen/hcl2" + "github.com/pulumi/pulumi/pkg/v3/codegen" "github.com/pulumi/pulumi/pkg/v3/codegen/hcl2/model" + "github.com/pulumi/pulumi/pkg/v3/codegen/pcl" "github.com/pulumi/pulumi/pkg/v3/codegen/schema" "github.com/pulumi/pulumi/sdk/v3/go/common/util/contract" "github.com/zclconf/go-cty/cty" @@ -64,32 +64,37 @@ func (g *generator) GetPrecedence(expr model.Expression) int { // GenAnonymousFunctionExpression generates code for an AnonymousFunctionExpression. func (g *generator) GenAnonymousFunctionExpression(w io.Writer, expr *model.AnonymousFunctionExpression) { - g.genAnonymousFunctionExpression(w, expr, nil) + g.genAnonymousFunctionExpression(w, expr, nil, false) } func (g *generator) genAnonymousFunctionExpression( w io.Writer, expr *model.AnonymousFunctionExpression, bodyPreamble []string, + inApply bool, ) { g.Fgenf(w, "func(") leadingSep := "" for _, param := range expr.Signature.Parameters { isInput := isInputty(param.Type) - g.Fgenf(w, "%s%s %s", leadingSep, param.Name, g.argumentTypeName(nil, param.Type, isInput)) + g.Fgenf(w, "%s%s %s", leadingSep, makeValidIdentifier(param.Name), g.argumentTypeName(nil, param.Type, isInput)) leadingSep = ", " } - isInput := isInputty(expr.Signature.ReturnType) - retType := g.argumentTypeName(nil, expr.Signature.ReturnType, isInput) - g.Fgenf(w, ") (%s, error) {\n", retType) + retType := expr.Signature.ReturnType + if inApply { + retType = model.ResolveOutputs(retType) + } + + retTypeName := g.argumentTypeName(nil, retType, false) + g.Fgenf(w, ") (%s, error) {\n", retTypeName) for _, decl := range bodyPreamble { g.Fgenf(w, "%s\n", decl) } - body, temps := g.lowerExpression(expr.Body, expr.Signature.ReturnType, isInput) - g.genTempsMultiReturn(w, temps, retType) + body, temps := g.lowerExpression(expr.Body, retType) + g.genTempsMultiReturn(w, temps, retTypeName) g.Fgenf(w, "return %v, nil", body) g.Fgenf(w, "\n}") @@ -141,31 +146,9 @@ func (g *generator) GenConditionalExpression(w io.Writer, expr *model.Conditiona func (g *generator) GenForExpression(w io.Writer, expr *model.ForExpression) { /*TODO*/ } func (g *generator) GenFunctionCallExpression(w io.Writer, expr *model.FunctionCallExpression) { + //nolint:goconst switch expr.Name { - case hcl2.IntrinsicInput: - isInput := true - // bypass passthrough __convert expressions that might require prefix: "pulumi.*" - if c, ok := expr.Args[0].(*model.FunctionCallExpression); ok && c.Name == hcl2.IntrinsicConvert { - switch c := c.Args[0].(type) { - case *model.RelativeTraversalExpression, *model.ScopeTraversalExpression: - expr.Args[0] = c - g.GenFunctionCallExpression(w, expr) - return - } - } - switch arg := expr.Args[0].(type) { - case *model.RelativeTraversalExpression: - g.genRelativeTraversalExpression(w, arg, isInput) - case *model.ScopeTraversalExpression: - g.genScopeTraversalExpression(w, arg, isInput) - case *model.ObjectConsExpression: - g.genObjectConsExpression(w, arg, expr.Type(), isInput) - default: - argType := g.argumentTypeName(arg, arg.Type(), isInput) - g.Fgenf(w, "%s(%v", argType, arg) - g.Fgenf(w, ")") - } - case hcl2.IntrinsicConvert: + case pcl.IntrinsicConvert: switch arg := expr.Args[0].(type) { case *model.TupleConsExpression: g.genTupleConsExpression(w, arg, expr.Type()) @@ -174,10 +157,14 @@ func (g *generator) GenFunctionCallExpression(w io.Writer, expr *model.FunctionC g.genObjectConsExpression(w, arg, expr.Type(), isInput) case *model.LiteralValueExpression: g.genLiteralValueExpression(w, arg, expr.Type()) + case *model.TemplateExpression: + g.genTemplateExpression(w, arg, expr.Type()) + case *model.ScopeTraversalExpression: + g.genScopeTraversalExpression(w, arg, expr.Type()) default: - g.Fgenf(w, "%.v", expr.Args[0]) // <- probably wrong w.r.t. precedence + g.Fgenf(w, "%.v", expr.Args[0]) } - case hcl2.IntrinsicApply: + case pcl.IntrinsicApply: g.genApply(w, expr) case "element": g.genNYI(w, "element") @@ -195,17 +182,33 @@ func (g *generator) GenFunctionCallExpression(w io.Writer, expr *model.FunctionC // } // g.Fgenf(w, " => new { Key = k, Value = v })") case "fileArchive": - g.genNYI(w, "call %v", expr.Name) - // g.Fgenf(w, "new FileArchive(%.v)", expr.Args[0]) + g.Fgenf(w, "pulumi.NewFileArchive(%.v)", expr.Args[0]) case "fileAsset": g.Fgenf(w, "pulumi.NewFileAsset(%.v)", expr.Args[0]) - case hcl2.Invoke: + case "filebase64": + // Assuming the existence of the following helper method + g.Fgenf(w, "filebase64OrPanic(%v)", expr.Args[0]) + case "filebase64sha256": + // Assuming the existence of the following helper method + g.Fgenf(w, "filebase64sha256OrPanic(%v)", expr.Args[0]) + case pcl.Invoke: pkg, module, fn, diags := g.functionName(expr.Args[0]) contract.Assert(len(diags) == 0) if module == "" { module = pkg } - name := fmt.Sprintf("%s.%s", module, fn) + isOut, outArgs, outArgsType := pcl.RecognizeOutputVersionedInvoke(expr) + if isOut { + outTypeName, err := outputVersionFunctionArgTypeName(outArgsType) + if err != nil { + panic(fmt.Errorf("Error when generating an output-versioned Invoke: %w", err)) + } + g.Fgenf(w, "%s.%sOutput(ctx, ", module, fn) + g.genObjectConsExpressionWithTypeName(w, outArgs, outArgsType, outTypeName) + } else { + g.Fgenf(w, "%s.%s(ctx, ", module, fn) + g.Fgenf(w, "%.v", expr.Args[1]) + } optionsBag := "" var buf bytes.Buffer @@ -215,10 +218,9 @@ func (g *generator) GenFunctionCallExpression(w io.Writer, expr *model.FunctionC g.Fgenf(&buf, ", nil") } optionsBag = buf.String() - - g.Fgenf(w, "%s(ctx, ", name) - g.Fgenf(w, "%.v", expr.Args[1]) g.Fgenf(w, "%v)", optionsBag) + case "join": + g.Fgenf(w, "strings.Join(%v, %v)", expr.Args[1], expr.Args[0]) case "length": g.genNYI(w, "call %v", expr.Name) // g.Fgenf(w, "%.20v.Length", expr.Args[0]) @@ -228,9 +230,10 @@ func (g *generator) GenFunctionCallExpression(w io.Writer, expr *model.FunctionC g.genNYI(w, "call %v", expr.Name) // g.genRange(w, expr, false) case "readFile": - g.genNYI(w, "ReadFile") + // Assuming the existence of the following helper method located earlier in the preamble + g.Fgenf(w, "readFileOrPanic(%v)", expr.Args[0]) case "readDir": - contract.Failf("unlowered toJSON function expression @ %v", expr.SyntaxNode().Range()) + contract.Failf("unlowered readDir function expression @ %v", expr.SyntaxNode().Range()) case "secret": outputTypeName := "pulumi.Any" if model.ResolveOutputs(expr.Type()) != model.DynamicType { @@ -240,15 +243,53 @@ func (g *generator) GenFunctionCallExpression(w io.Writer, expr *model.FunctionC case "split": g.genNYI(w, "call %v", expr.Name) // g.Fgenf(w, "%.20v.Split(%v)", expr.Args[1], expr.Args[0]) + case "toBase64": + g.Fgenf(w, "base64.StdEncoding.EncodeToString([]byte(%v))", expr.Args[0]) case "toJSON": contract.Failf("unlowered toJSON function expression @ %v", expr.SyntaxNode().Range()) case "mimeType": g.Fgenf(w, "mime.TypeByExtension(path.Ext(%.v))", expr.Args[0]) + case "sha1": + g.Fgenf(w, "sha1Hash(%v)", expr.Args[0]) + case "goOptionalFloat64": + g.Fgenf(w, "pulumi.Float64Ref(%.v)", expr.Args[0]) + case "goOptionalBool": + g.Fgenf(w, "pulumi.BoolRef(%.v)", expr.Args[0]) + case "goOptionalInt": + g.Fgenf(w, "pulumi.IntRef(%.v)", expr.Args[0]) + case "goOptionalString": + g.Fgenf(w, "pulumi.StringRef(%.v)", expr.Args[0]) default: g.genNYI(w, "call %v", expr.Name) } } +// Currently args type for output-versioned invokes are named +// `FOutputArgs`, but this is not yet understood by `tokenToType`. Use +// this function to compensate. +func outputVersionFunctionArgTypeName(t model.Type) (string, error) { + schemaType, ok := pcl.GetSchemaForType(t) + if !ok { + return "", fmt.Errorf("No schema.Type type found for the given model.Type") + } + + objType, ok := schemaType.(*schema.ObjectType) + if !ok { + return "", fmt.Errorf("Expected a schema.ObjectType, got %s", schemaType.String()) + } + + pkg := &pkgContext{pkg: &schema.Package{Name: "main"}} + + var ty string + if pkg.isExternalReference(objType) { + ty = pkg.contextForExternalReference(objType).tokenToType(objType.Token) + } else { + ty = pkg.tokenToType(objType.Token) + } + + return fmt.Sprintf("%sOutputArgs", strings.TrimSuffix(ty, "Args")), nil +} + func (g *generator) GenIndexExpression(w io.Writer, expr *model.IndexExpression) { g.Fgenf(w, "%.20v[%.v]", expr.Collection, expr.Key) } @@ -258,7 +299,12 @@ func (g *generator) GenLiteralValueExpression(w io.Writer, expr *model.LiteralVa } func (g *generator) genLiteralValueExpression(w io.Writer, expr *model.LiteralValueExpression, destType model.Type) { - if destType == model.NoneType { + exprType := expr.Type() + if cns, ok := exprType.(*model.ConstType); ok { + exprType = cns.Type + } + + if exprType == model.NoneType { g.Fgen(w, "nil") return } @@ -266,61 +312,41 @@ func (g *generator) genLiteralValueExpression(w io.Writer, expr *model.LiteralVa argTypeName := g.argumentTypeName(expr, destType, false) isPulumiType := strings.HasPrefix(argTypeName, "pulumi.") - switch destType := destType.(type) { - case *model.OpaqueType: - switch destType { - case model.BoolType: + switch exprType { + case model.BoolType: + if isPulumiType { + g.Fgenf(w, "%s(%v)", argTypeName, expr.Value.True()) + } else { + g.Fgenf(w, "%v", expr.Value.True()) + } + case model.NumberType, model.IntType: + bf := expr.Value.AsBigFloat() + if i, acc := bf.Int64(); acc == big.Exact { if isPulumiType { - g.Fgenf(w, "%s(%v)", argTypeName, expr.Value.True()) + g.Fgenf(w, "%s(%d)", argTypeName, i) } else { - g.Fgenf(w, "%v", expr.Value.True()) + g.Fgenf(w, "%d", i) } - case model.NumberType, model.IntType: - bf := expr.Value.AsBigFloat() - if i, acc := bf.Int64(); acc == big.Exact { - if isPulumiType { - g.Fgenf(w, "%s(%d)", argTypeName, i) - } else { - g.Fgenf(w, "%d", i) - } - } else { - f, _ := bf.Float64() - if isPulumiType { - g.Fgenf(w, "%s(%g)", argTypeName, f) - } else { - g.Fgenf(w, "%g", f) - } - } - case model.StringType: - strVal := expr.Value.AsString() + } else { + f, _ := bf.Float64() if isPulumiType { - g.Fgenf(w, "%s(", argTypeName) - g.genStringLiteral(w, strVal) - g.Fgenf(w, ")") + g.Fgenf(w, "%s(%g)", argTypeName, f) } else { - g.genStringLiteral(w, strVal) + g.Fgenf(w, "%g", f) } - default: - contract.Failf("unexpected opaque type in GenLiteralValueExpression: %v (%v)", destType, - expr.SyntaxNode().Range()) } - // handles the __convert intrinsic assuming that the union type will have an opaque type containing the dest type - case *model.UnionType: - var didGenerate bool - for _, t := range destType.ElementTypes { - if didGenerate { - break - } - switch t := t.(type) { - case *model.OpaqueType: - g.genLiteralValueExpression(w, expr, t) - didGenerate = true - break - } + case model.StringType: + strVal := expr.Value.AsString() + if isPulumiType { + g.Fgenf(w, "%s(", argTypeName) + g.genStringLiteral(w, strVal) + g.Fgenf(w, ")") + } else { + g.genStringLiteral(w, strVal) } default: - contract.Failf("unexpected destType in GenLiteralValueExpression: %v (%v)", destType, + contract.Failf("unexpected opaque type in GenLiteralValueExpression: %v (%v)", destType, expr.SyntaxNode().Range()) } } @@ -334,69 +360,91 @@ func (g *generator) genObjectConsExpression( w io.Writer, expr *model.ObjectConsExpression, destType model.Type, - isInput bool, -) { - if len(expr.Items) > 0 { - var temps []interface{} - isInput = isInput || isInputty(destType) - typeName := g.argumentTypeName(expr, destType, isInput) - if strings.HasSuffix(typeName, "Args") { - isInput = true - } - // invokes are not inputty - if strings.Contains(typeName, ".Lookup") || strings.Contains(typeName, ".Get") { - isInput = false - } - isMap := strings.HasPrefix(typeName, "map[") - - // TODO: retrieve schema and propagate optionals to emit bool ptr, etc. - - // first lower all inner expressions and emit temps - for i, item := range expr.Items { - // don't treat keys as inputs - k, kTemps := g.lowerExpression(item.Key, item.Key.Type(), false) - temps = append(temps, kTemps...) - item.Key = k - x, xTemps := g.lowerExpression(item.Value, item.Value.Type(), isInput) - temps = append(temps, xTemps...) - item.Value = x - expr.Items[i] = item - } - g.genTemps(w, temps) - - if isMap || !strings.HasSuffix(typeName, "Args") { - g.Fgenf(w, "%s", typeName) - } else { - g.Fgenf(w, "&%s", typeName) + isInput bool) { + + isInput = isInput || isInputty(destType) + + typeName := g.argumentTypeName(expr, destType, isInput) + if schemaType, ok := pcl.GetSchemaForType(destType); ok { + if obj, ok := codegen.UnwrapType(schemaType).(*schema.ObjectType); ok { + if g.useLookupInvokeForm(obj.Token) { + typeName = strings.Replace(typeName, ".Get", ".Lookup", 1) + } } - g.Fgenf(w, "{\n") + } - for _, item := range expr.Items { - if lit, ok := g.literalKey(item.Key); ok { - if isMap || strings.HasSuffix(typeName, "Map") { - g.Fgenf(w, "\"%s\"", lit) - } else { - g.Fgenf(w, "%s", Title(lit)) - } + g.genObjectConsExpressionWithTypeName(w, expr, destType, typeName) +} + +func (g *generator) genObjectConsExpressionWithTypeName( + w io.Writer, + expr *model.ObjectConsExpression, + destType model.Type, + typeName string) { + + if len(expr.Items) == 0 { + g.Fgenf(w, "nil") + return + } + + var temps []interface{} + // TODO: @pgavlin --- ineffectual assignment, was there some work in flight here? + // if strings.HasSuffix(typeName, "Args") { + // isInput = true + // } + // // invokes are not inputty + // if strings.Contains(typeName, ".Lookup") || strings.Contains(typeName, ".Get") { + // isInput = false + // } + isMap := strings.HasPrefix(typeName, "map[") + + // TODO: retrieve schema and propagate optionals to emit bool ptr, etc. + + // first lower all inner expressions and emit temps + for i, item := range expr.Items { + // don't treat keys as inputs + //nolint: revive + k, kTemps := g.lowerExpression(item.Key, item.Key.Type()) + temps = append(temps, kTemps...) + item.Key = k + x, xTemps := g.lowerExpression(item.Value, item.Value.Type()) + temps = append(temps, xTemps...) + item.Value = x + expr.Items[i] = item + } + g.genTemps(w, temps) + + if isMap || !strings.HasSuffix(typeName, "Args") || strings.HasSuffix(typeName, "OutputArgs") { + g.Fgenf(w, "%s", typeName) + } else { + g.Fgenf(w, "&%s", typeName) + } + g.Fgenf(w, "{\n") + + for _, item := range expr.Items { + if lit, ok := g.literalKey(item.Key); ok { + if isMap || strings.HasSuffix(typeName, "Map") { + g.Fgenf(w, "\"%s\"", lit) } else { - g.Fgenf(w, "%.v", item.Key) + g.Fgenf(w, "%s", Title(lit)) } - - g.Fgenf(w, ": %.v,\n", item.Value) + } else { + g.Fgenf(w, "%.v", item.Key) } - g.Fgenf(w, "}") - } else { - g.Fgenf(w, "nil") + g.Fgenf(w, ": %.v,\n", item.Value) } + + g.Fgenf(w, "}") } -func (g *generator) genRelativeTraversalExpression(w io.Writer, expr *model.RelativeTraversalExpression, isInput bool) { +func (g *generator) genRelativeTraversalExpression( + w io.Writer, expr *model.RelativeTraversalExpression, isInput bool) { if _, ok := expr.Parts[0].(*model.PromiseType); ok { isInput = false } - if _, ok := expr.Parts[0].(*hcl2.Resource); ok { + if _, ok := expr.Parts[0].(*pcl.Resource); ok { isInput = false } if isInput { @@ -413,7 +461,7 @@ func (g *generator) GenRelativeTraversalExpression(w io.Writer, expr *model.Rela isRootResource := false if ie, ok := expr.Source.(*model.IndexExpression); ok { if se, ok := ie.Collection.(*model.ScopeTraversalExpression); ok { - if _, ok := se.Parts[0].(*hcl2.Resource); ok { + if _, ok := se.Parts[0].(*pcl.Resource); ok { isRootResource = true } } @@ -422,11 +470,11 @@ func (g *generator) GenRelativeTraversalExpression(w io.Writer, expr *model.Rela } func (g *generator) GenScopeTraversalExpression(w io.Writer, expr *model.ScopeTraversalExpression) { - isInput := false - g.genScopeTraversalExpression(w, expr, isInput) + g.genScopeTraversalExpression(w, expr, expr.Type()) } -func (g *generator) genScopeTraversalExpression(w io.Writer, expr *model.ScopeTraversalExpression, isInput bool) { +func (g *generator) genScopeTraversalExpression( + w io.Writer, expr *model.ScopeTraversalExpression, destType model.Type) { rootName := expr.RootName if _, ok := expr.Parts[0].(*model.SplatVariable); ok { @@ -435,9 +483,14 @@ func (g *generator) genScopeTraversalExpression(w io.Writer, expr *model.ScopeTr genIDCall := false - if resource, ok := expr.Parts[0].(*hcl2.Resource); ok { + isInput := false + if schemaType, ok := pcl.GetSchemaForType(destType); ok { + _, isInput = schemaType.(*schema.InputType) + } + + if resource, ok := expr.Parts[0].(*pcl.Resource); ok { isInput = false - if _, ok := hcl2.GetSchemaForType(resource.InputType); ok { + if _, ok := pcl.GetSchemaForType(resource.InputType); ok { // convert .id into .ID() last := expr.Traversal[len(expr.Traversal)-1] if attr, ok := last.(hcl.TraverseAttr); ok && attr.Name == "id" { @@ -451,22 +504,26 @@ func (g *generator) genScopeTraversalExpression(w io.Writer, expr *model.ScopeTr if isInput { argType := g.argumentTypeName(expr, expr.Type(), isInput) if strings.HasSuffix(argType, "Array") { - // use a helper to transform prompt arrays into inputty arrays - var helper *promptToInputArrayHelper - if h, ok := g.arrayHelpers[argType]; ok { - helper = h - } else { - // helpers are emitted at the end in the postamble step - helper = &promptToInputArrayHelper{ - destType: argType, + destTypeName := g.argumentTypeName(expr, destType, isInput) + if argType != destTypeName { + // use a helper to transform prompt arrays into inputty arrays + var helper *promptToInputArrayHelper + if h, ok := g.arrayHelpers[argType]; ok { + helper = h + } else { + // helpers are emitted at the end in the postamble step + helper = &promptToInputArrayHelper{ + destType: argType, + } + g.arrayHelpers[argType] = helper } - g.arrayHelpers[argType] = helper + g.Fgenf(w, "%s(", helper.getFnName()) + defer g.Fgenf(w, ")") } - g.Fgenf(w, "%s(", helper.getFnName()) } else { g.Fgenf(w, "%s(", g.argumentTypeName(expr, expr.Type(), isInput)) + defer g.Fgenf(w, ")") } - } // TODO: this isn't exhaustively correct as "range" could be a legit var name @@ -488,10 +545,6 @@ func (g *generator) genScopeTraversalExpression(w io.Writer, expr *model.ScopeTr g.genRelativeTraversal(w, expr.Traversal.SimpleSplit().Rel, expr.Parts[1:], isRootResource) } - if isInput { - g.Fgenf(w, ")") - } - if genIDCall { g.Fgenf(w, ".ID()") } @@ -504,12 +557,23 @@ func (g *generator) GenSplatExpression(w io.Writer, expr *model.SplatExpression) // GenTemplateExpression generates code for a TemplateExpression. func (g *generator) GenTemplateExpression(w io.Writer, expr *model.TemplateExpression) { + g.genTemplateExpression(w, expr, expr.Type()) +} + +func (g *generator) genTemplateExpression(w io.Writer, expr *model.TemplateExpression, destType model.Type) { if len(expr.Parts) == 1 { - if lit, ok := expr.Parts[0].(*model.LiteralValueExpression); ok && lit.Type() == model.StringType { - g.GenLiteralValueExpression(w, lit) + if lit, ok := expr.Parts[0].(*model.LiteralValueExpression); ok && model.StringType.AssignableFrom(lit.Type()) { + g.genLiteralValueExpression(w, lit, destType) return } } else { + argTypeName := g.argumentTypeName(expr, destType, false) + isPulumiType := strings.HasPrefix(argTypeName, "pulumi.") + if isPulumiType { + g.Fgenf(w, "%s(", argTypeName) + defer g.Fgenf(w, ")") + } + fmtMaker := make([]string, len(expr.Parts)+1) fmtStr := strings.Join(fmtMaker, "%v") g.Fgenf(w, "fmt.Sprintf(\"%s\"", fmtStr) @@ -530,11 +594,11 @@ func (g *generator) GenTupleConsExpression(w io.Writer, expr *model.TupleConsExp // GenTupleConsExpression generates code for a TupleConsExpression. func (g *generator) genTupleConsExpression(w io.Writer, expr *model.TupleConsExpression, destType model.Type) { - isInput := isInputty(destType) || containsInputs(expr) + isInput := isInputty(destType) var temps []interface{} for i, item := range expr.Expressions { - item, itemTemps := g.lowerExpression(item, item.Type(), isInput) + item, itemTemps := g.lowerExpression(item, item.Type()) temps = append(temps, itemTemps...) expr.Expressions[i] = item } @@ -564,64 +628,28 @@ func (g *generator) GenUnaryOpExpression(w io.Writer, expr *model.UnaryOpExpress g.Fgenf(w, "%[2]v%.[1]*[3]v", precedence, opstr, expr.Operand) } +var typeNameID = 0 + // argumentTypeName computes the go type for the given expression and model type. -func (g *generator) argumentTypeName(expr model.Expression, destType model.Type, isInput bool) string { - var tokenRange hcl.Range - if expr != nil { - node := expr.SyntaxNode() - if node != nil && !reflect.ValueOf(node).IsNil() { - tokenRange = expr.SyntaxNode().Range() - } - } - if schemaType, ok := hcl2.GetSchemaForType(destType.(model.Type)); ok { - switch schemaType := schemaType.(type) { - case *schema.ArrayType: - token := schemaType.ElementType.(*schema.ObjectType).Token - pkg, module, member, diags := hcl2.DecomposeToken(token, tokenRange) - // namespaceless invokes - if module == "" || strings.HasPrefix(module, "/") || strings.HasPrefix(module, "index/") { - module = pkg - } - importPrefix := g.getModOrAlias(pkg, module) - importPrefix = strings.Split(importPrefix, "/")[0] - contract.Assert(len(diags) == 0) - fmtString := "[]%s.%s" - if isInput { - member = Title(member) - if strings.HasPrefix(member, "Get") { - if g.useLookupInvokeForm(token) { - member = strings.Replace(member, "Get", "Lookup", 1) - } - return fmt.Sprintf("[]%s.%s", importPrefix, member) - } - fmtString = "%s.%sArray" - } - return fmt.Sprintf(fmtString, importPrefix, member) - case *schema.ObjectType: - token := schemaType.Token - pkg, module, member, diags := hcl2.DecomposeToken(token, tokenRange) - // namespaceless invokes - if module == "" || strings.HasPrefix(module, "/") || strings.HasPrefix(module, "index/") { - module = pkg - } - importPrefix := g.getModOrAlias(pkg, module) - importPrefix = strings.Split(importPrefix, "/")[0] - contract.Assert(len(diags) == 0) - member = Title(member) - if strings.HasPrefix(member, "Get") { - if g.useLookupInvokeForm(token) { - member = strings.Replace(member, "Get", "Lookup", 1) - } - return fmt.Sprintf("%s.%s", importPrefix, member) - } - fmtString := "%s.%s" - if isInput { - fmtString = "%s.%sArgs" - } - return fmt.Sprintf(fmtString, importPrefix, member) - default: - contract.Failf("unexpected schema type %T", schemaType) - } +func (g *generator) argumentTypeName(expr model.Expression, destType model.Type, isInput bool) (result string) { + // defer func(id int, t model.Type) { + // schemaType, _ := pcl.GetSchemaForType(destType) + // log.Printf("%v: argumentTypeName(%v, %v, %v) = %v", id, t, isInput, schemaType, result) + // }(typeNameID, destType) + typeNameID++ + + if cns, ok := destType.(*model.ConstType); ok { + destType = cns.Type + } + + // This can happen with null literals. + if destType == model.NoneType { + return "" + } + + if schemaType, ok := pcl.GetSchemaForType(destType); ok { + pkg := &pkgContext{pkg: &schema.Package{Name: "main"}} + return pkg.argsType(schemaType) } switch destType := destType.(type) { @@ -694,9 +722,13 @@ func (g *generator) argumentTypeName(expr model.Expression, destType model.Type, for i, t := range destType.ElementTypes { if i == 0 { elmType = t + if cns, ok := elmType.(*model.ConstType); ok { + elmType = cns.Type + } + continue } - if !elmType.Equals(t) { + if !elmType.AssignableFrom(t) { elmType = nil break } @@ -719,11 +751,13 @@ func (g *generator) argumentTypeName(expr model.Expression, destType model.Type, return g.argumentTypeName(expr, destType.ElementType, isInput) case *model.UnionType: for _, ut := range destType.ElementTypes { - if _, isOpaqueType := ut.(*model.OpaqueType); isOpaqueType { + switch ut := ut.(type) { + case *model.OpaqueType: + return g.argumentTypeName(expr, ut, isInput) + case *model.ConstType: + return g.argumentTypeName(expr, ut.Type, isInput) + case *model.TupleType: return g.argumentTypeName(expr, ut, isInput) - } - if ct, isConstType := ut.(*model.ConstType); isConstType { - return g.argumentTypeName(expr, ct.Type, isInput) } } return "interface{}" @@ -782,20 +816,17 @@ func (nameInfo) Format(name string) string { } // lowerExpression amends the expression with intrinsics for Go generation. -func (g *generator) lowerExpression(expr model.Expression, typ model.Type, isInput bool) ( +func (g *generator) lowerExpression(expr model.Expression, typ model.Type) ( model.Expression, []interface{}) { - expr = hcl2.RewritePropertyReferences(expr) - expr, diags := hcl2.RewriteApplies(expr, nameInfo(0), false /*TODO*/) - expr = hcl2.RewriteConversions(expr, typ) + expr = pcl.RewritePropertyReferences(expr) + expr, diags := pcl.RewriteApplies(expr, nameInfo(0), false /*TODO*/) + expr = pcl.RewriteConversions(expr, typ) expr, tTemps, ternDiags := g.rewriteTernaries(expr, g.ternaryTempSpiller) - expr, jTemps, jsonDiags := g.rewriteToJSON(expr, g.jsonTempSpiller) + expr, jTemps, jsonDiags := g.rewriteToJSON(expr) expr, rTemps, readDirDiags := g.rewriteReadDir(expr, g.readDirTempSpiller) expr, sTemps, splatDiags := g.rewriteSplat(expr, g.splatSpiller) expr, oTemps, optDiags := g.rewriteOptionals(expr, g.optionalSpiller) - if isInput { - expr = rewriteInputs(expr) - } var temps []interface{} for _, t := range tTemps { temps = append(temps, t) @@ -833,14 +864,19 @@ func (g *generator) genNYI(w io.Writer, reason string, vs ...interface{}) { func (g *generator) genApply(w io.Writer, expr *model.FunctionCallExpression) { // Extract the list of outputs and the continuation expression from the `__apply` arguments. - applyArgs, then := hcl2.ParseApplyCall(expr) - then = stripInputs(then).(*model.AnonymousFunctionExpression) + applyArgs, then := pcl.ParseApplyCall(expr) isInput := false retType := g.argumentTypeName(nil, then.Signature.ReturnType, isInput) // TODO account for outputs in other namespaces like aws - typeAssertion := fmt.Sprintf(".(%sOutput)", retType) - if !strings.HasPrefix(retType, "pulumi.") { - typeAssertion = fmt.Sprintf(".(pulumi.%sOutput)", Title(retType)) + // TODO[pulumi/pulumi#8453] incomplete pattern code below. + var typeAssertion string + if retType == "[]string" { + typeAssertion = ".(pulumi.StringArrayOutput)" + } else { + typeAssertion = fmt.Sprintf(".(%sOutput)", retType) + if !strings.HasPrefix(retType, "pulumi.") { + typeAssertion = fmt.Sprintf(".(pulumi.%sOutput)", Title(retType)) + } } if len(applyArgs) == 1 { @@ -854,7 +890,7 @@ func (g *generator) genApply(w io.Writer, expr *model.FunctionCallExpression) { } allApplyThen, typeConvDecls := g.rewriteThenForAllApply(then) g.Fgenf(w, ").ApplyT(") - g.genAnonymousFunctionExpression(w, allApplyThen, typeConvDecls) + g.genAnonymousFunctionExpression(w, allApplyThen, typeConvDecls, true) g.Fgenf(w, ")%s", typeAssertion) } } @@ -936,7 +972,7 @@ func (g *generator) literalKey(x model.Expression) (string, bool) { strKey := "" switch x := x.(type) { case *model.LiteralValueExpression: - if x.Type() == model.StringType { + if model.StringType.AssignableFrom(x.Type()) { strKey = x.Value.AsString() break } @@ -945,7 +981,7 @@ func (g *generator) literalKey(x model.Expression) (string, bool) { return buf.String(), true case *model.TemplateExpression: if len(x.Parts) == 1 { - if lit, ok := x.Parts[0].(*model.LiteralValueExpression); ok && lit.Type() == model.StringType { + if lit, ok := x.Parts[0].(*model.LiteralValueExpression); ok && model.StringType.AssignableFrom(lit.Type()) { strKey = lit.Value.AsString() break } @@ -966,7 +1002,7 @@ func (g *generator) functionName(tokenArg model.Expression) (string, string, str tokenRange := tokenArg.SyntaxNode().Range() // Compute the resource type from the Pulumi type token. - pkg, module, member, diagnostics := hcl2.DecomposeToken(token, tokenRange) + pkg, module, member, diagnostics := pcl.DecomposeToken(token, tokenRange) if strings.HasPrefix(member, "get") { if g.useLookupInvokeForm(token) { member = strings.Replace(member, "get", "lookup", 1) @@ -978,9 +1014,15 @@ func (g *generator) functionName(tokenArg model.Expression) (string, string, str } var functionPackages = map[string][]string{ - "toJSON": {"encoding/json"}, - "readDir": {"io/ioutil"}, - "mimeType": {"mime", "path"}, + "join": {"strings"}, + "mimeType": {"mime", "path"}, + "readDir": {"io/ioutil"}, + "readFile": {"io/ioutil"}, + "filebase64": {"io/ioutil", "encoding/base64"}, + "toBase64": {"encoding/base64"}, + "toJSON": {"encoding/json"}, + "sha1": {"fmt", "crypto/sha1"}, + "filebase64sha256": {"fmt", "io/ioutil", "crypto/sha256"}, } func (g *generator) genFunctionPackages(x *model.FunctionCallExpression) []string { diff --git a/vendor/github.com/pulumi/pulumi/pkg/v3/codegen/go/gen_program_inputs.go b/vendor/github.com/pulumi/pulumi/pkg/v3/codegen/go/gen_program_inputs.go deleted file mode 100644 index 6036d29..0000000 --- a/vendor/github.com/pulumi/pulumi/pkg/v3/codegen/go/gen_program_inputs.go +++ /dev/null @@ -1,116 +0,0 @@ -package gen - -import ( - "github.com/pulumi/pulumi/pkg/v3/codegen/hcl2" - "github.com/pulumi/pulumi/pkg/v3/codegen/hcl2/model" -) - -// rewriteInputs wraps expressions in an __input intrinsic -// used for generation of pulumi values for go such as pulumi.String("foo") -func rewriteInputs(x model.Expression) model.Expression { - return modifyInputs(x, applyInput) -} - -// stripInputs removes any __input intrinsics -func stripInputs(x model.Expression) model.Expression { - return modifyInputs(x, stripInput) -} - -func stripInput(expr model.Expression) model.Expression { - switch expr := expr.(type) { - case *model.FunctionCallExpression: - switch expr.Name { - case hcl2.IntrinsicInput: - return expr.Args[0] - } - } - return expr -} - -func applyInput(expr model.Expression) model.Expression { - return &model.FunctionCallExpression{ - Name: hcl2.IntrinsicInput, - Signature: model.StaticFunctionSignature{ - Parameters: []model.Parameter{ - { - Name: "type", - Type: expr.Type(), - }, - }, - ReturnType: expr.Type(), - }, - Args: []model.Expression{expr}, - } -} - -func modifyInputs( - x model.Expression, - modf func(model.Expression) model.Expression, -) model.Expression { - switch expr := x.(type) { - case *model.AnonymousFunctionExpression: - switch expr.Signature.ReturnType.(type) { - case *model.ConstType, *model.OpaqueType: - x = modf(x) - } - case *model.FunctionCallExpression: - if expr.Name == hcl2.IntrinsicInput { - return x - } - switch expr.Name { - case "mimeType": - return modf(x) - case hcl2.IntrinsicConvert: - switch rt := expr.Signature.ReturnType.(type) { - case *model.UnionType: - for _, t := range rt.ElementTypes { - switch t.(type) { - case *model.ConstType, *model.OpaqueType: - return modf(x) - } - } - } - } - case *model.TemplateExpression: - return modf(x) - case *model.LiteralValueExpression: - t := expr.Type() - switch t.(type) { - case *model.ConstType, *model.OpaqueType: - x = modf(x) - } - case *model.ObjectConsExpression: - for _, item := range expr.Items { - item.Value = modifyInputs(item.Value, modf) - } - x = modf(x) - case *model.TupleConsExpression: - for i, item := range expr.Expressions { - expr.Expressions[i] = modifyInputs(item, modf) - } - case *model.ScopeTraversalExpression: - x = modf(x) - } - - return x -} - -func containsInputs(x model.Expression) bool { - isInput := false - switch expr := x.(type) { - case *model.FunctionCallExpression: - switch expr.Name { - case hcl2.IntrinsicInput: - return true - } - case *model.TupleConsExpression: - for _, e := range expr.Expressions { - isInput = isInput || containsInputs(e) - } - case *model.ObjectConsExpression: - for _, item := range expr.Items { - isInput = isInput || containsInputs(item.Value) - } - } - return isInput -} diff --git a/vendor/github.com/pulumi/pulumi/pkg/v3/codegen/go/gen_program_json.go b/vendor/github.com/pulumi/pulumi/pkg/v3/codegen/go/gen_program_json.go index 205cec3..c376758 100644 --- a/vendor/github.com/pulumi/pulumi/pkg/v3/codegen/go/gen_program_json.go +++ b/vendor/github.com/pulumi/pulumi/pkg/v3/codegen/go/gen_program_json.go @@ -56,13 +56,11 @@ func (js *jsonSpiller) spillExpression(x model.Expression) (model.Expression, hc }, nil } -func (g *generator) rewriteToJSON( - x model.Expression, - spiller *jsonSpiller, -) (model.Expression, []*jsonTemp, hcl.Diagnostics) { - spiller.temps = nil - x, diags := model.VisitExpression(x, spiller.spillExpression, nil) - - return x, spiller.temps, diags - +func (g *generator) rewriteToJSON(x model.Expression) (model.Expression, []*spillTemp, hcl.Diagnostics) { + return g.rewriteSpills(x, func(x model.Expression) (string, model.Expression, bool) { + if call, ok := x.(*model.FunctionCallExpression); ok && call.Name == "toJSON" { + return "json", x, true + } + return "", nil, false + }) } diff --git a/vendor/github.com/pulumi/pulumi/pkg/v3/codegen/go/gen_program_optionals.go b/vendor/github.com/pulumi/pulumi/pkg/v3/codegen/go/gen_program_optionals.go index b8bb0e4..0703361 100644 --- a/vendor/github.com/pulumi/pulumi/pkg/v3/codegen/go/gen_program_optionals.go +++ b/vendor/github.com/pulumi/pulumi/pkg/v3/codegen/go/gen_program_optionals.go @@ -1,13 +1,12 @@ package gen import ( - "fmt" - "github.com/hashicorp/hcl/v2" "github.com/hashicorp/hcl/v2/hclsyntax" - "github.com/pulumi/pulumi/pkg/v3/codegen/hcl2" + "github.com/pulumi/pulumi/pkg/v3/codegen" "github.com/pulumi/pulumi/pkg/v3/codegen/hcl2/model" "github.com/pulumi/pulumi/pkg/v3/codegen/hcl2/syntax" + "github.com/pulumi/pulumi/pkg/v3/codegen/pcl" "github.com/pulumi/pulumi/pkg/v3/codegen/schema" ) @@ -29,74 +28,69 @@ func (ot *optionalTemp) SyntaxNode() hclsyntax.Node { } type optionalSpiller struct { - temps []*optionalTemp - count int + invocation *model.FunctionCallExpression + intrinsicConvertTo *model.Type } -func (os *optionalSpiller) spillExpressionHelper( - x model.Expression, - destType model.Type, - isInvoke bool, -) (model.Expression, hcl.Diagnostics) { - var temp *optionalTemp +func (os *optionalSpiller) preVisitor(x model.Expression) (model.Expression, hcl.Diagnostics) { switch x := x.(type) { case *model.FunctionCallExpression: if x.Name == "invoke" { // recurse into invoke args - isInvoke = true - _, diags := os.spillExpressionHelper(x.Args[1], x.Args[1].Type(), isInvoke) - return x, diags + isOutputInvoke, _, _ := pcl.RecognizeOutputVersionedInvoke(x) + // ignore output-versioned invokes as they do not need converting + if !isOutputInvoke { + os.invocation = x + } + os.intrinsicConvertTo = nil + return x, nil } - if x.Name == hcl2.IntrinsicConvert { - // propagate convert type - _, diags := os.spillExpressionHelper(x.Args[0], x.Signature.ReturnType, isInvoke) - return x, diags + if x.Name == pcl.IntrinsicConvert { + if os.invocation != nil { + os.intrinsicConvertTo = &x.Signature.ReturnType + } + return x, nil } case *model.ObjectConsExpression: - // only rewrite invoke args (required to be prompt values in Go) - // pulumi.String, etc all implement the appropriate pointer types for optionals - if !isInvoke { + if os.invocation == nil { return x, nil } - if schemaType, ok := hcl2.GetSchemaForType(destType); ok { + destType := x.Type() + if os.intrinsicConvertTo != nil { + destType = *os.intrinsicConvertTo + } + if schemaType, ok := pcl.GetSchemaForType(destType); ok { if schemaType, ok := schemaType.(*schema.ObjectType); ok { - var optionalPrimitives []string + // map of item name to optional type wrapper fn + optionalPrimitives := make(map[string]schema.Type) for _, v := range schemaType.Properties { - isPrimitive := false - primitives := []schema.Type{ - schema.NumberType, - schema.BoolType, - schema.IntType, - schema.StringType, - } - for _, p := range primitives { - if p == v.Type { - isPrimitive = true - break + if !v.IsRequired() { + ty := codegen.UnwrapType(v.Type) + switch ty { + case schema.NumberType, schema.BoolType, schema.IntType, schema.StringType: + optionalPrimitives[v.Name] = ty } } - if isPrimitive && !v.IsRequired { - optionalPrimitives = append(optionalPrimitives, v.Name) - } } for i, item := range x.Items { // keys for schematized objects should be simple strings if key, ok := item.Key.(*model.LiteralValueExpression); ok { - if key.Type() == model.StringType { + if model.StringType.AssignableFrom(key.Type()) { strKey := key.Value.AsString() - for _, op := range optionalPrimitives { - if strKey == op { - temp = &optionalTemp{ - Name: fmt.Sprintf("opt%d", os.count), - Value: item.Value, - } - os.temps = append(os.temps, temp) - os.count++ - x.Items[i].Value = &model.ScopeTraversalExpression{ - RootName: fmt.Sprintf("&%s", temp.Name), - Traversal: hcl.Traversal{hcl.TraverseRoot{Name: ""}}, - Parts: []model.Traversable{temp}, - } + if schemaType, isOptional := optionalPrimitives[strKey]; isOptional { + functionName := os.getOptionalConversion(schemaType) + expectedModelType := os.getExpectedModelType(schemaType) + + x.Items[i].Value = &model.FunctionCallExpression{ + Name: functionName, + Signature: model.StaticFunctionSignature{ + Parameters: []model.Parameter{{ + Name: "val", + Type: expectedModelType, + }}, + ReturnType: model.NewOptionalType(expectedModelType), + }, + Args: []model.Expression{item.Value}, } } } @@ -104,22 +98,67 @@ func (os *optionalSpiller) spillExpressionHelper( } } } + // Clear before visiting children, require another __convert call to set again + os.intrinsicConvertTo = nil + return x, nil + default: + // Ditto + os.intrinsicConvertTo = nil + return x, nil + } + return x, nil +} + +func (os *optionalSpiller) postVisitor(x model.Expression) (model.Expression, hcl.Diagnostics) { + switch x := x.(type) { + case *model.FunctionCallExpression: + if x.Name == "invoke" { + if x == os.invocation { + // Clear invocation flag once we're done traversing children. + os.invocation = nil + } + } } return x, nil } -func (os *optionalSpiller) spillExpression(x model.Expression) (model.Expression, hcl.Diagnostics) { - isInvoke := false - return os.spillExpressionHelper(x, x.Type(), isInvoke) +func (*optionalSpiller) getOptionalConversion(ty schema.Type) string { + switch ty { + case schema.NumberType: + return "goOptionalFloat64" + case schema.BoolType: + return "goOptionalBool" + case schema.IntType: + return "goOptionalInt" + case schema.StringType: + return "goOptionalString" + default: + return "" + } +} + +func (*optionalSpiller) getExpectedModelType(ty schema.Type) model.Type { + switch ty { + case schema.NumberType: + return model.NumberType + case schema.BoolType: + return model.BoolType + case schema.IntType: + return model.IntType + case schema.StringType: + return model.StringType + default: + return nil + } } func (g *generator) rewriteOptionals( x model.Expression, spiller *optionalSpiller, ) (model.Expression, []*optionalTemp, hcl.Diagnostics) { - spiller.temps = nil - x, diags := model.VisitExpression(x, spiller.spillExpression, nil) - - return x, spiller.temps, diags + // We want to recurse but we only want to use the previsitor, if post visitor is nil we don't + // recurse. + x, diags := model.VisitExpression(x, spiller.preVisitor, spiller.postVisitor) + return x, nil, diags } diff --git a/vendor/github.com/pulumi/pulumi/pkg/v3/codegen/go/gen_program_utils.go b/vendor/github.com/pulumi/pulumi/pkg/v3/codegen/go/gen_program_utils.go index d794a5c..b7491ef 100644 --- a/vendor/github.com/pulumi/pulumi/pkg/v3/codegen/go/gen_program_utils.go +++ b/vendor/github.com/pulumi/pulumi/pkg/v3/codegen/go/gen_program_utils.go @@ -54,3 +54,44 @@ func (p *promptToInputArrayHelper) getPromptItemType() string { func (p *promptToInputArrayHelper) getInputItemType() string { return strings.TrimSuffix(p.destType, "Array") } + +// Provides code for a method which will be placed in the program preamble if deemed +// necessary. Because many tasks in Go such as reading a file require extensive error +// handling, it is much prettier to encapsulate that error handling boilerplate as its +// own function in the preamble. +func getHelperMethodIfNeeded(functionName string) (string, bool) { + switch functionName { + case "readFile": + return `func readFileOrPanic(path string) pulumi.StringPtrInput { + data, err := ioutil.ReadFile(path) + if err != nil { + panic(err.Error()) + } + return pulumi.String(string(data)) + }`, true + case "filebase64": + return `func filebase64OrPanic(path string) pulumi.StringPtrInput { + if fileData, err := ioutil.ReadFile(path); err == nil { + return pulumi.String(base64.StdEncoding.EncodeToString(fileData[:])) + } else { + panic(err.Error()) + } + }`, true + case "filebase64sha256": + return `func filebase64sha256OrPanic(path string) pulumi.StringPtrInput { + if fileData, err := ioutil.ReadFile(path); err == nil { + hashedData := sha256.Sum256([]byte(fileData)) + return pulumi.String(base64.StdEncoding.EncodeToString(hashedData[:])) + } else { + panic(err.Error()) + } + }`, true + case "sha1": + return `func sha1Hash(input string) string { + hash := sha1.Sum([]byte(input)) + return hex.EncodeToString(hash[:]) + }`, true + default: + return "", false + } +} diff --git a/vendor/github.com/pulumi/pulumi/pkg/v3/codegen/go/gen_spill.go b/vendor/github.com/pulumi/pulumi/pkg/v3/codegen/go/gen_spill.go new file mode 100644 index 0000000..010de0a --- /dev/null +++ b/vendor/github.com/pulumi/pulumi/pkg/v3/codegen/go/gen_spill.go @@ -0,0 +1,74 @@ +package gen + +import ( + "fmt" + + "github.com/hashicorp/hcl/v2" + "github.com/pulumi/pulumi/pkg/v3/codegen/hcl2/model" +) + +type spillFunc func(x model.Expression) (string, model.Expression, bool) + +type spillTemp struct { + Kind string + Variable *model.Variable + Value model.Expression +} + +type spills struct { + counts map[string]int +} + +func (s *spills) newTemp(kind string, value model.Expression) *spillTemp { + i := s.counts[kind] + s.counts[kind] = i + 1 + + v := &model.Variable{ + Name: fmt.Sprintf("%s%d", kind, i), + VariableType: value.Type(), + } + return &spillTemp{ + Variable: v, + Value: value, + } +} + +type spiller struct { + spills *spills + + temps []*spillTemp + spill spillFunc + disabled bool +} + +func (s *spiller) preVisit(x model.Expression) (model.Expression, hcl.Diagnostics) { + _, isfn := x.(*model.AnonymousFunctionExpression) + if isfn { + s.disabled = true + } + return x, nil +} + +func (s *spiller) postVisit(x model.Expression) (model.Expression, hcl.Diagnostics) { + _, isfn := x.(*model.AnonymousFunctionExpression) + if isfn { + s.disabled = false + } else if !s.disabled { + if kind, value, ok := s.spill(x); ok { + t := s.spills.newTemp(kind, value) + s.temps = append(s.temps, t) + return model.VariableReference(t.Variable), nil + } + } + return x, nil +} + +func (g *generator) rewriteSpills( + x model.Expression, spill spillFunc) (model.Expression, []*spillTemp, hcl.Diagnostics) { + spiller := &spiller{ + spills: g.spills, + spill: spill, + } + x, diags := model.VisitExpression(x, spiller.preVisit, spiller.postVisit) + return x, spiller.temps, diags +} diff --git a/vendor/github.com/pulumi/pulumi/pkg/v3/codegen/go/importer.go b/vendor/github.com/pulumi/pulumi/pkg/v3/codegen/go/importer.go index 7bd89fd..7ee337b 100644 --- a/vendor/github.com/pulumi/pulumi/pkg/v3/codegen/go/importer.go +++ b/vendor/github.com/pulumi/pulumi/pkg/v3/codegen/go/importer.go @@ -1,4 +1,4 @@ -// Copyright 2016-2020, Pulumi Corporation. +// Copyright 2016-2021, Pulumi Corporation. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -45,6 +45,32 @@ type GoPackageInfo struct { // Generate container types (arrays, maps, pointer output types etc.) for each resource. // These are typically used to support external references. GenerateResourceContainerTypes bool `json:"generateResourceContainerTypes,omitempty"` + + // The version of the Pulumi SDK used with this provider, e.g. 3. + // Used to generate doc links for pulumi builtin types. If omitted, the latest SDK version is used. + PulumiSDKVersion int `json:"pulumiSDKVersion,omitempty"` + + // Feature flag to disable generating `$fnOutput` invoke + // function versions to save space. + DisableFunctionOutputVersions bool `json:"disableFunctionOutputVersions,omitempty"` + + // Determines whether to make single-return-value methods return an output struct or the value. + LiftSingleValueMethodReturns bool `json:"liftSingleValueMethodReturns,omitempty"` + + // Feature flag to disable generating input type registration. This is a + // space saving measure. + DisableInputTypeRegistrations bool `json:"disableInputTypeRegistrations,omitempty"` + + // Feature flag to disable generating Pulumi object default functions. This is a + // space saving measure. + DisableObjectDefaults bool `json:"disableObjectDefaults,omitempty"` + + // GenerateExtraInputTypes determines whether or not the code generator generates input (and output) types for + // all plain types, instead of for only types that are used as input/output types. + GenerateExtraInputTypes bool `json:"generateExtraInputTypes,omitempty"` + + // Respect the Pkg.Version field for emitted code. + RespectSchemaVersion bool `json:"respectSchemaVersion,omitempty"` } // Importer implements schema.Language for Go. diff --git a/vendor/github.com/pulumi/pulumi/pkg/v3/codegen/go/utilities.go b/vendor/github.com/pulumi/pulumi/pkg/v3/codegen/go/utilities.go index 34751cc..1c64461 100644 --- a/vendor/github.com/pulumi/pulumi/pkg/v3/codegen/go/utilities.go +++ b/vendor/github.com/pulumi/pulumi/pkg/v3/codegen/go/utilities.go @@ -20,7 +20,6 @@ import ( "strings" "unicode" - "github.com/pkg/errors" "github.com/pulumi/pulumi/pkg/v3/codegen" ) @@ -84,7 +83,7 @@ func makeSafeEnumName(name, typeName string) (string, error) { // If the name is one illegal character, return an error. if len(safeName) == 1 && !isLegalIdentifierStart(rune(safeName[0])) { - return "", errors.Errorf("enum name %s is not a valid identifier", safeName) + return "", fmt.Errorf("enum name %s is not a valid identifier", safeName) } // Capitalize and make a valid identifier. diff --git a/vendor/github.com/pulumi/pulumi/pkg/v3/codegen/hcl2/invoke.go b/vendor/github.com/pulumi/pulumi/pkg/v3/codegen/hcl2/invoke.go deleted file mode 100644 index 273b2cf..0000000 --- a/vendor/github.com/pulumi/pulumi/pkg/v3/codegen/hcl2/invoke.go +++ /dev/null @@ -1,113 +0,0 @@ -// Copyright 2016-2020, Pulumi Corporation. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package hcl2 - -import ( - "github.com/hashicorp/hcl/v2" - "github.com/hashicorp/hcl/v2/hclsyntax" - "github.com/pulumi/pulumi/pkg/v3/codegen/hcl2/model" - "github.com/zclconf/go-cty/cty" -) - -const Invoke = "invoke" - -func getInvokeToken(call *hclsyntax.FunctionCallExpr) (string, hcl.Range, bool) { - if call.Name != Invoke || len(call.Args) < 1 { - return "", hcl.Range{}, false - } - template, ok := call.Args[0].(*hclsyntax.TemplateExpr) - if !ok || len(template.Parts) != 1 { - return "", hcl.Range{}, false - } - literal, ok := template.Parts[0].(*hclsyntax.LiteralValueExpr) - if !ok { - return "", hcl.Range{}, false - } - if literal.Val.Type() != cty.String { - return "", hcl.Range{}, false - } - return literal.Val.AsString(), call.Args[0].Range(), true -} - -func (b *binder) bindInvokeSignature(args []model.Expression) (model.StaticFunctionSignature, hcl.Diagnostics) { - signature := model.StaticFunctionSignature{ - Parameters: []model.Parameter{ - { - Name: "token", - Type: model.StringType, - }, - { - Name: "args", - Type: model.NewOptionalType(model.DynamicType), - }, - { - Name: "provider", - Type: model.NewOptionalType(model.StringType), - }, - }, - ReturnType: model.DynamicType, - } - - if len(args) < 1 { - return signature, nil - } - - template, ok := args[0].(*model.TemplateExpression) - if !ok || len(template.Parts) != 1 { - return signature, hcl.Diagnostics{tokenMustBeStringLiteral(args[0])} - } - lit, ok := template.Parts[0].(*model.LiteralValueExpression) - if !ok || lit.Type() != model.StringType { - return signature, hcl.Diagnostics{tokenMustBeStringLiteral(args[0])} - } - - token, tokenRange := lit.Value.AsString(), args[0].SyntaxNode().Range() - pkg, _, _, diagnostics := DecomposeToken(token, tokenRange) - if diagnostics.HasErrors() { - return signature, diagnostics - } - - pkgSchema, ok := b.options.packageCache.entries[pkg] - if !ok { - return signature, hcl.Diagnostics{unknownPackage(pkg, tokenRange)} - } - - fn, ok := pkgSchema.functions[token] - if !ok { - canon := canonicalizeToken(token, pkgSchema.schema) - if fn, ok = pkgSchema.functions[canon]; ok { - token, lit.Value = canon, cty.StringVal(canon) - } - } - if !ok { - return signature, hcl.Diagnostics{unknownFunction(token, tokenRange)} - } - - // Create args and result types for the schema. - if fn.Inputs == nil { - signature.Parameters[1].Type = model.NewOptionalType(model.NewObjectType(map[string]model.Type{})) - } else { - signature.Parameters[1].Type = b.schemaTypeToType(fn.Inputs) - } - - if fn.Outputs == nil { - signature.ReturnType = model.NewObjectType(map[string]model.Type{}) - } else { - signature.ReturnType = b.schemaTypeToType(fn.Outputs) - } - signature.ReturnType = model.NewPromiseType(signature.ReturnType) - - return signature, nil -} diff --git a/vendor/github.com/pulumi/pulumi/pkg/v3/codegen/hcl2/model/diagnostics.go b/vendor/github.com/pulumi/pulumi/pkg/v3/codegen/hcl2/model/diagnostics.go index 4e10bec..bfb7fa8 100644 --- a/vendor/github.com/pulumi/pulumi/pkg/v3/codegen/hcl2/model/diagnostics.go +++ b/vendor/github.com/pulumi/pulumi/pkg/v3/codegen/hcl2/model/diagnostics.go @@ -35,10 +35,29 @@ func diagf(severity hcl.DiagnosticSeverity, subject hcl.Range, f string, args .. } func ExprNotConvertible(destType Type, expr Expression) *hcl.Diagnostic { - return errorf(expr.SyntaxNode().Range(), "cannot assign expression of type %v to location of type %v", expr.Type(), + _, whyF := destType.conversionFrom(expr.Type(), false, map[Type]struct{}{}) + why := whyF() + if len(why) != 0 { + return errorf(expr.SyntaxNode().Range(), why[0].Summary) + } + return errorf(expr.SyntaxNode().Range(), "cannot assign expression of type %v to location of type %v: ", expr.Type(), destType) } +func typeNotConvertible(dest, src Type) *hcl.Diagnostic { + return &hcl.Diagnostic{Severity: hcl.DiagError, Summary: fmt.Sprintf("cannot assign value of type %v to type %v", + src, dest)} +} + +func tuplesHaveDifferentLengths(dest, src *TupleType) *hcl.Diagnostic { + return &hcl.Diagnostic{Severity: hcl.DiagError, Summary: fmt.Sprintf("tuples %v and %v have different lengths", + dest, src)} +} + +func invalidRecursiveType(t Type) *hcl.Diagnostic { + return errorf(t.SyntaxNode().Range(), "invalid recursive type") +} + func objectKeysMustBeStrings(expr Expression) *hcl.Diagnostic { return errorf(expr.SyntaxNode().Range(), "object keys must be strings: cannot assign expression of type %v to location of type string", expr.Type()) diff --git a/vendor/github.com/pulumi/pulumi/pkg/v3/codegen/hcl2/model/expression.go b/vendor/github.com/pulumi/pulumi/pkg/v3/codegen/hcl2/model/expression.go index aab44b7..4db650c 100644 --- a/vendor/github.com/pulumi/pulumi/pkg/v3/codegen/hcl2/model/expression.go +++ b/vendor/github.com/pulumi/pulumi/pkg/v3/codegen/hcl2/model/expression.go @@ -1034,7 +1034,13 @@ func (x *FunctionCallExpression) Typecheck(typecheckOperands bool) hcl.Diagnosti typecheckDiags := typecheckArgs(rng, x.Signature, x.Args...) diagnostics = append(diagnostics, typecheckDiags...) - x.Signature.ReturnType = liftOperationType(x.Signature.ReturnType, x.Args...) + // Unless the function is already automatically using an + // Output-returning version, modify the signature to account + // for automatic lifting to Promise or Output. + _, isOutput := x.Signature.ReturnType.(*OutputType) + if !isOutput { + x.Signature.ReturnType = liftOperationType(x.Signature.ReturnType, x.Args...) + } return diagnostics } @@ -1311,7 +1317,8 @@ func (x *LiteralValueExpression) NodeTokens() syntax.NodeTokens { // Type returns the type of the literal value expression. func (x *LiteralValueExpression) Type() Type { if x.exprType == nil { - x.exprType = ctyTypeToType(x.Value.Type(), false) + typ := ctyTypeToType(x.Value.Type(), false) + x.exprType = NewConstType(typ, x.Value) } return x.exprType } @@ -1327,6 +1334,7 @@ func (x *LiteralValueExpression) Typecheck(typecheckOperands bool) hcl.Diagnosti switch { case typ == NoneType || typ == StringType || typ == IntType || typ == NumberType || typ == BoolType: // OK + typ = NewConstType(typ, x.Value) default: var rng hcl.Range if x.Syntax != nil { @@ -2211,7 +2219,7 @@ func (x *TemplateExpression) print(w io.Writer, p *printer) { // Print the expressions. for _, part := range x.Parts { - if lit, ok := part.(*LiteralValueExpression); ok && lit.Type() == StringType { + if lit, ok := part.(*LiteralValueExpression); ok && StringType.AssignableFrom(lit.Type()) { lit.printLit(w, p, !isHeredoc) } else { p.fprintf(w, "%v", part) diff --git a/vendor/github.com/pulumi/pulumi/pkg/v3/codegen/hcl2/model/format/func.go b/vendor/github.com/pulumi/pulumi/pkg/v3/codegen/hcl2/model/format/func.go index 9b06cc6..698772b 100644 --- a/vendor/github.com/pulumi/pulumi/pkg/v3/codegen/hcl2/model/format/func.go +++ b/vendor/github.com/pulumi/pulumi/pkg/v3/codegen/hcl2/model/format/func.go @@ -14,7 +14,9 @@ package format -import "fmt" +import ( + "fmt" +) // Func is a function type that implements the fmt.Formatter interface. This can be used to conveniently // implement this interface for types defined in other packages. diff --git a/vendor/github.com/pulumi/pulumi/pkg/v3/codegen/hcl2/model/type.go b/vendor/github.com/pulumi/pulumi/pkg/v3/codegen/hcl2/model/type.go index 2efdd3a..feabda8 100644 --- a/vendor/github.com/pulumi/pulumi/pkg/v3/codegen/hcl2/model/type.go +++ b/vendor/github.com/pulumi/pulumi/pkg/v3/codegen/hcl2/model/type.go @@ -15,9 +15,12 @@ package model import ( + "github.com/hashicorp/hcl/v2" "github.com/pulumi/pulumi/sdk/v3/go/common/util/contract" ) +type lazyDiagnostics func() hcl.Diagnostics + type ConversionKind int const ( @@ -41,7 +44,7 @@ type Type interface { String() string equals(other Type, seen map[Type]struct{}) bool - conversionFrom(src Type, unifying bool, seen map[Type]struct{}) ConversionKind + conversionFrom(src Type, unifying bool, seen map[Type]struct{}) (ConversionKind, lazyDiagnostics) string(seen map[Type]struct{}) string unify(other Type) (Type, ConversionKind) isType() @@ -62,22 +65,33 @@ var ( DynamicType = MustNewOpaqueType("dynamic") ) -func assignableFrom(dest, src Type, assignableFrom func() bool) bool { - return dest.Equals(src) || dest == DynamicType || assignableFrom() +func assignableFrom(dest, src Type, assignableFromImpl func() bool) bool { + if dest.Equals(src) || dest == DynamicType { + return true + } + if cns, ok := src.(*ConstType); ok { + return assignableFrom(dest, cns.Type, assignableFromImpl) + } + return assignableFromImpl() } func conversionFrom(dest, src Type, unifying bool, seen map[Type]struct{}, - conversionFrom func() ConversionKind) ConversionKind { + conversionFromImpl func() (ConversionKind, lazyDiagnostics)) (ConversionKind, lazyDiagnostics) { + if dest.Equals(src) || dest == DynamicType { - return SafeConversion + return SafeConversion, nil } - if src, isUnion := src.(*UnionType); isUnion { + + switch src := src.(type) { + case *UnionType: return src.conversionTo(dest, unifying, seen) + case *ConstType: + return conversionFrom(dest, src.Type, unifying, seen, conversionFromImpl) } if src == DynamicType { - return UnsafeConversion + return UnsafeConversion, nil } - return conversionFrom() + return conversionFromImpl() } func unify(t0, t1 Type, unify func() (Type, ConversionKind)) (Type, ConversionKind) { @@ -95,7 +109,8 @@ func unify(t0, t1 Type, unify func() (Type, ConversionKind)) (Type, ConversionKi // The dynamic type unifies with any other type by selecting that other type. return t0, UnsafeConversion default: - conversionFrom, conversionTo := t0.conversionFrom(t1, true, nil), t1.conversionFrom(t0, true, nil) + conversionFrom, _ := t0.conversionFrom(t1, true, nil) + conversionTo, _ := t1.conversionFrom(t0, true, nil) switch { case conversionFrom < conversionTo: return t1, conversionTo diff --git a/vendor/github.com/pulumi/pulumi/pkg/v3/codegen/hcl2/model/type_collection.go b/vendor/github.com/pulumi/pulumi/pkg/v3/codegen/hcl2/model/type_collection.go index 7e52657..06d2c8e 100644 --- a/vendor/github.com/pulumi/pulumi/pkg/v3/codegen/hcl2/model/type_collection.go +++ b/vendor/github.com/pulumi/pulumi/pkg/v3/codegen/hcl2/model/type_collection.go @@ -14,7 +14,9 @@ package model -import "github.com/hashicorp/hcl/v2" +import ( + "github.com/hashicorp/hcl/v2" +) // unwrapIterableSourceType removes any eventual types that wrap a type intended for iteration. func unwrapIterableSourceType(t Type) Type { diff --git a/vendor/github.com/pulumi/pulumi/pkg/v3/codegen/hcl2/model/type_const.go b/vendor/github.com/pulumi/pulumi/pkg/v3/codegen/hcl2/model/type_const.go index 6f23266..a23b619 100644 --- a/vendor/github.com/pulumi/pulumi/pkg/v3/codegen/hcl2/model/type_const.go +++ b/vendor/github.com/pulumi/pulumi/pkg/v3/codegen/hcl2/model/type_const.go @@ -15,11 +15,10 @@ package model import ( - "fmt" - "github.com/hashicorp/hcl/v2" "github.com/hashicorp/hcl/v2/hclsyntax" "github.com/pulumi/pulumi/pkg/v3/codegen/hcl2/syntax" + "github.com/zclconf/go-cty/cty" ) // ConstType represents a type that is a single constant value. @@ -27,11 +26,11 @@ type ConstType struct { // Type is the underlying value type. Type Type // Value is the constant value. - Value interface{} + Value cty.Value } // NewConstType creates a new constant type with the given type and value. -func NewConstType(typ Type, value interface{}) *ConstType { +func NewConstType(typ Type, value cty.Value) *ConstType { return &ConstType{Type: typ, Value: value} } @@ -57,7 +56,7 @@ func (t *ConstType) equals(other Type, seen map[Type]struct{}) bool { } otherConst, ok := other.(*ConstType) - return ok && t.Value == otherConst.Value && t.Type.equals(otherConst.Type, seen) + return ok && t.Value.RawEquals(otherConst.Value) && t.Type.equals(otherConst.Type, seen) } // AssignableFrom returns true if this type is assignable from the indicated source type. A const(value) is assignable @@ -71,17 +70,21 @@ func (t *ConstType) AssignableFrom(src Type) bool { // ConversionFrom returns the kind of conversion (if any) that is possible from the source type to this type. // The const type is only convertible from itself. func (t *ConstType) ConversionFrom(src Type) ConversionKind { - return t.conversionFrom(src, false, nil) + kind, _ := t.conversionFrom(src, false, nil) + return kind } -func (t *ConstType) conversionFrom(src Type, unifying bool, seen map[Type]struct{}) ConversionKind { - return conversionFrom(t, src, unifying, seen, func() ConversionKind { - return NoConversion +func (t *ConstType) conversionFrom(src Type, unifying bool, seen map[Type]struct{}) (ConversionKind, lazyDiagnostics) { + return conversionFrom(t, src, unifying, seen, func() (ConversionKind, lazyDiagnostics) { + if t.Type.ConversionFrom(src) != NoConversion { + return UnsafeConversion, nil + } + return NoConversion, nil }) } func (t *ConstType) String() string { - return fmt.Sprintf("%v", t.Value) + return t.Value.GoString() } func (t *ConstType) string(_ map[Type]struct{}) string { diff --git a/vendor/github.com/pulumi/pulumi/pkg/v3/codegen/hcl2/model/type_eventuals.go b/vendor/github.com/pulumi/pulumi/pkg/v3/codegen/hcl2/model/type_eventuals.go index 4ef95fe..c78a4ff 100644 --- a/vendor/github.com/pulumi/pulumi/pkg/v3/codegen/hcl2/model/type_eventuals.go +++ b/vendor/github.com/pulumi/pulumi/pkg/v3/codegen/hcl2/model/type_eventuals.go @@ -69,7 +69,7 @@ func resolveEventualsImpl(t Type, resolveOutputs bool, seen map[Type]Type) (Type } elementTypes[i] = element } - return NewUnionType(elementTypes...), transform + return NewUnionTypeAnnotated(elementTypes, t.Annotations...), transform case *ObjectType: transform := makeIdentity if already, ok := seen[t]; ok { @@ -267,10 +267,6 @@ func inputTypeImpl(t Type, seen map[Type]Type) Type { return t } - if already, ok := seen[t]; ok { - return already - } - var src Type switch t := t.(type) { case *OutputType: @@ -288,6 +284,10 @@ func inputTypeImpl(t Type, seen map[Type]Type) Type { } src = NewUnionTypeAnnotated(elementTypes, t.Annotations...) case *ObjectType: + if already, ok := seen[t]; ok { + return already + } + properties := map[string]Type{} src = NewObjectType(properties, t.Annotations...) seen[t] = src diff --git a/vendor/github.com/pulumi/pulumi/pkg/v3/codegen/hcl2/model/type_list.go b/vendor/github.com/pulumi/pulumi/pkg/v3/codegen/hcl2/model/type_list.go index fdf420b..fd66115 100644 --- a/vendor/github.com/pulumi/pulumi/pkg/v3/codegen/hcl2/model/type_list.go +++ b/vendor/github.com/pulumi/pulumi/pkg/v3/codegen/hcl2/model/type_list.go @@ -88,11 +88,12 @@ func (t *ListType) AssignableFrom(src Type) bool { // to T. If any element type is unsafely convertible to T and no element type is safely convertible to T, the // conversion is unsafe. Otherwise, no conversion exists. func (t *ListType) ConversionFrom(src Type) ConversionKind { - return t.conversionFrom(src, false, nil) + kind, _ := t.conversionFrom(src, false, nil) + return kind } -func (t *ListType) conversionFrom(src Type, unifying bool, seen map[Type]struct{}) ConversionKind { - return conversionFrom(t, src, unifying, seen, func() ConversionKind { +func (t *ListType) conversionFrom(src Type, unifying bool, seen map[Type]struct{}) (ConversionKind, lazyDiagnostics) { + return conversionFrom(t, src, unifying, seen, func() (ConversionKind, lazyDiagnostics) { switch src := src.(type) { case *ListType: return t.ElementType.conversionFrom(src.ElementType, unifying, seen) @@ -100,14 +101,18 @@ func (t *ListType) conversionFrom(src Type, unifying bool, seen map[Type]struct{ return t.ElementType.conversionFrom(src.ElementType, unifying, seen) case *TupleType: conversionKind := SafeConversion + var diags lazyDiagnostics for _, src := range src.ElementTypes { - if ck := t.ElementType.conversionFrom(src, unifying, seen); ck < conversionKind { - conversionKind = ck + if ck, why := t.ElementType.conversionFrom(src, unifying, seen); ck < conversionKind { + conversionKind, diags = ck, why + if conversionKind == NoConversion { + break + } } } - return conversionKind + return conversionKind, diags } - return NoConversion + return NoConversion, func() hcl.Diagnostics { return hcl.Diagnostics{typeNotConvertible(t, src)} } }) } @@ -143,7 +148,8 @@ func (t *ListType) unify(other Type) (Type, ConversionKind) { return NewListType(elementType), conversionKind default: // Prefer the list type. - return t, t.conversionFrom(other, true, nil) + kind, _ := t.conversionFrom(other, true, nil) + return t, kind } }) } diff --git a/vendor/github.com/pulumi/pulumi/pkg/v3/codegen/hcl2/model/type_map.go b/vendor/github.com/pulumi/pulumi/pkg/v3/codegen/hcl2/model/type_map.go index 1dec7e4..666814c 100644 --- a/vendor/github.com/pulumi/pulumi/pkg/v3/codegen/hcl2/model/type_map.go +++ b/vendor/github.com/pulumi/pulumi/pkg/v3/codegen/hcl2/model/type_map.go @@ -89,24 +89,29 @@ func (t *MapType) AssignableFrom(src Type) bool { // convertible to T. If any element type is unsafely convertible to T and no element type is safely convertible to T, // the conversion is unsafe. Otherwise, no conversion exists. func (t *MapType) ConversionFrom(src Type) ConversionKind { - return t.conversionFrom(src, false, nil) + kind, _ := t.conversionFrom(src, false, nil) + return kind } -func (t *MapType) conversionFrom(src Type, unifying bool, seen map[Type]struct{}) ConversionKind { - return conversionFrom(t, src, unifying, seen, func() ConversionKind { +func (t *MapType) conversionFrom(src Type, unifying bool, seen map[Type]struct{}) (ConversionKind, lazyDiagnostics) { + return conversionFrom(t, src, unifying, seen, func() (ConversionKind, lazyDiagnostics) { switch src := src.(type) { case *MapType: return t.ElementType.conversionFrom(src.ElementType, unifying, seen) case *ObjectType: conversionKind := SafeConversion + var diags lazyDiagnostics for _, src := range src.Properties { - if ck := t.ElementType.conversionFrom(src, unifying, seen); ck < conversionKind { + if ck, _ := t.ElementType.conversionFrom(src, unifying, seen); ck < conversionKind { conversionKind = ck + if conversionKind == NoConversion { + break + } } } - return conversionKind + return conversionKind, diags } - return NoConversion + return NoConversion, func() hcl.Diagnostics { return hcl.Diagnostics{typeNotConvertible(t, src)} } }) } @@ -138,7 +143,8 @@ func (t *MapType) unify(other Type) (Type, ConversionKind) { return NewMapType(elementType), conversionKind default: // Prefer the map type. - return t, t.conversionFrom(other, true, nil) + kind, _ := t.conversionFrom(other, true, nil) + return t, kind } }) } diff --git a/vendor/github.com/pulumi/pulumi/pkg/v3/codegen/hcl2/model/type_none.go b/vendor/github.com/pulumi/pulumi/pkg/v3/codegen/hcl2/model/type_none.go index 13b9c7d..5299786 100644 --- a/vendor/github.com/pulumi/pulumi/pkg/v3/codegen/hcl2/model/type_none.go +++ b/vendor/github.com/pulumi/pulumi/pkg/v3/codegen/hcl2/model/type_none.go @@ -45,12 +45,13 @@ func (noneType) AssignableFrom(src Type) bool { } func (noneType) ConversionFrom(src Type) ConversionKind { - return NoneType.conversionFrom(src, false, nil) + kind, _ := NoneType.conversionFrom(src, false, nil) + return kind } -func (noneType) conversionFrom(src Type, unifying bool, seen map[Type]struct{}) ConversionKind { - return conversionFrom(NoneType, src, unifying, seen, func() ConversionKind { - return NoConversion +func (noneType) conversionFrom(src Type, unifying bool, seen map[Type]struct{}) (ConversionKind, lazyDiagnostics) { + return conversionFrom(NoneType, src, unifying, seen, func() (ConversionKind, lazyDiagnostics) { + return NoConversion, nil }) } diff --git a/vendor/github.com/pulumi/pulumi/pkg/v3/codegen/hcl2/model/type_object.go b/vendor/github.com/pulumi/pulumi/pkg/v3/codegen/hcl2/model/type_object.go index aafe1f8..e761bf1 100644 --- a/vendor/github.com/pulumi/pulumi/pkg/v3/codegen/hcl2/model/type_object.go +++ b/vendor/github.com/pulumi/pulumi/pkg/v3/codegen/hcl2/model/type_object.go @@ -179,50 +179,60 @@ func (u *objectTypeUnifier) unify(t *ObjectType) { // This conversion is always unsafe, and may fail if the map does not contain an appropriate set of keys for the // destination type. func (t *ObjectType) ConversionFrom(src Type) ConversionKind { - return t.conversionFrom(src, false, nil) + kind, _ := t.conversionFrom(src, false, nil) + return kind } -func (t *ObjectType) conversionFrom(src Type, unifying bool, seen map[Type]struct{}) ConversionKind { - return conversionFrom(t, src, unifying, seen, func() ConversionKind { +func (t *ObjectType) conversionFrom(src Type, unifying bool, seen map[Type]struct{}) (ConversionKind, lazyDiagnostics) { + return conversionFrom(t, src, unifying, seen, func() (ConversionKind, lazyDiagnostics) { switch src := src.(type) { case *ObjectType: if seen != nil { if _, ok := seen[t]; ok { - return NoConversion + return NoConversion, func() hcl.Diagnostics { return hcl.Diagnostics{invalidRecursiveType(t)} } } } else { seen = map[Type]struct{}{} } seen[t] = struct{}{} + defer delete(seen, t) if unifying { var unifier objectTypeUnifier unifier.unify(t) unifier.unify(src) - return unifier.conversionKind + return unifier.conversionKind, nil } conversionKind := SafeConversion + var diags lazyDiagnostics for k, dst := range t.Properties { src, ok := src.Properties[k] if !ok { src = NoneType } - if ck := dst.conversionFrom(src, unifying, seen); ck < conversionKind { - conversionKind = ck + if ck, why := dst.conversionFrom(src, unifying, seen); ck < conversionKind { + conversionKind, diags = ck, why + if conversionKind == NoConversion { + break + } } } - return conversionKind + return conversionKind, diags case *MapType: conversionKind := UnsafeConversion + var diags lazyDiagnostics for _, dst := range t.Properties { - if ck := dst.conversionFrom(src.ElementType, unifying, seen); ck < conversionKind { - conversionKind = ck + if ck, why := dst.conversionFrom(src.ElementType, unifying, seen); ck < conversionKind { + conversionKind, diags = ck, why + if conversionKind == NoConversion { + break + } } } - return conversionKind + return conversionKind, diags } - return NoConversion + return NoConversion, func() hcl.Diagnostics { return hcl.Diagnostics{typeNotConvertible(t, src)} } }) } @@ -282,7 +292,8 @@ func (t *ObjectType) unify(other Type) (Type, ConversionKind) { return NewObjectType(unifier.properties), unifier.conversionKind default: // Otherwise, prefer the object type. - return t, t.conversionFrom(other, true, nil) + kind, _ := t.conversionFrom(other, true, nil) + return t, kind } }) } diff --git a/vendor/github.com/pulumi/pulumi/pkg/v3/codegen/hcl2/model/type_opaque.go b/vendor/github.com/pulumi/pulumi/pkg/v3/codegen/hcl2/model/type_opaque.go index 4bc36f0..99c5e63 100644 --- a/vendor/github.com/pulumi/pulumi/pkg/v3/codegen/hcl2/model/type_opaque.go +++ b/vendor/github.com/pulumi/pulumi/pkg/v3/codegen/hcl2/model/type_opaque.go @@ -19,7 +19,7 @@ import ( "github.com/hashicorp/hcl/v2" "github.com/hashicorp/hcl/v2/hclsyntax" - "github.com/pkg/errors" + "github.com/pulumi/pulumi/pkg/v3/codegen/hcl2/syntax" "github.com/pulumi/pulumi/sdk/v3/go/common/util/contract" ) @@ -55,7 +55,7 @@ func MustNewOpaqueType(name string, annotations ...interface{}) *OpaqueType { // NewOpaqueType creates a new opaque type with the given name. func NewOpaqueType(name string, annotations ...interface{}) (*OpaqueType, error) { if _, ok := opaqueTypes[name]; ok { - return nil, errors.Errorf("opaque type %s is already defined", name) + return nil, fmt.Errorf("opaque type %s is already defined", name) } t := &OpaqueType{Name: name, Annotations: annotations} @@ -95,51 +95,62 @@ func (t *OpaqueType) AssignableFrom(src Type) bool { }) } -func (t *OpaqueType) conversionFromImpl(src Type, unifying, checkUnsafe bool, seen map[Type]struct{}) ConversionKind { - return conversionFrom(t, src, unifying, seen, func() ConversionKind { +func (t *OpaqueType) conversionFromImpl( + src Type, unifying, checkUnsafe bool, seen map[Type]struct{}) (ConversionKind, lazyDiagnostics) { + return conversionFrom(t, src, unifying, seen, func() (ConversionKind, lazyDiagnostics) { if constType, ok := src.(*ConstType); ok { - return t.ConversionFrom(constType.Type) + return t.conversionFrom(constType.Type, unifying, seen) } switch { case t == NumberType: // src == NumberType is handled by t == src above contract.Assert(src != NumberType) - cki := IntType.conversionFromImpl(src, unifying, false, seen) - if cki == SafeConversion { - return SafeConversion - } - if cki == UnsafeConversion || checkUnsafe && StringType.conversionFromImpl(src, unifying, false, seen).Exists() { - return UnsafeConversion + cki, _ := IntType.conversionFromImpl(src, unifying, false, seen) + switch cki { + case SafeConversion: + return SafeConversion, nil + case UnsafeConversion: + return UnsafeConversion, nil + default: + if checkUnsafe { + if kind, _ := StringType.conversionFromImpl(src, unifying, false, seen); kind.Exists() { + return UnsafeConversion, nil + } + } } - return NoConversion + return NoConversion, nil case t == IntType: - if checkUnsafe && NumberType.conversionFromImpl(src, unifying, true, seen).Exists() { - return UnsafeConversion + if checkUnsafe { + if kind, _ := NumberType.conversionFromImpl(src, unifying, true, seen); kind.Exists() { + return UnsafeConversion, nil + } } - return NoConversion + return NoConversion, nil case t == BoolType: - if checkUnsafe && StringType.conversionFromImpl(src, unifying, false, seen).Exists() { - return UnsafeConversion + if checkUnsafe { + if kind, _ := StringType.conversionFromImpl(src, unifying, false, seen); kind.Exists() { + return UnsafeConversion, nil + } } - return NoConversion + return NoConversion, nil case t == StringType: - ckb := BoolType.conversionFromImpl(src, unifying, false, seen) - ckn := NumberType.conversionFromImpl(src, unifying, false, seen) + ckb, _ := BoolType.conversionFromImpl(src, unifying, false, seen) + ckn, _ := NumberType.conversionFromImpl(src, unifying, false, seen) if ckb == SafeConversion || ckn == SafeConversion { - return SafeConversion + return SafeConversion, nil } if ckb == UnsafeConversion || ckn == UnsafeConversion { - return UnsafeConversion + return UnsafeConversion, nil } - return NoConversion + return NoConversion, nil default: - return NoConversion + return NoConversion, nil } }) } -func (t *OpaqueType) conversionFrom(src Type, unifying bool, seen map[Type]struct{}) ConversionKind { +func (t *OpaqueType) conversionFrom(src Type, unifying bool, seen map[Type]struct{}) (ConversionKind, lazyDiagnostics) { return t.conversionFromImpl(src, unifying, true, seen) } @@ -155,7 +166,8 @@ func (t *OpaqueType) conversionFrom(src Type, unifying bool, seen map[Type]struc // - The bool type is unsafely convertible from string // func (t *OpaqueType) ConversionFrom(src Type) ConversionKind { - return t.conversionFrom(src, false, nil) + kind, _ := t.conversionFrom(src, false, nil) + return kind } func (t *OpaqueType) String() string { @@ -196,10 +208,12 @@ func (t *OpaqueType) unify(other Type) (Type, ConversionKind) { for _, goal := range opaquePrecedence { if t == goal { - return goal, goal.conversionFrom(other, true, nil) + kind, _ := goal.conversionFrom(other, true, nil) + return goal, kind } if other == goal { - return goal, goal.conversionFrom(t, true, nil) + kind, _ := goal.conversionFrom(t, true, nil) + return goal, kind } } diff --git a/vendor/github.com/pulumi/pulumi/pkg/v3/codegen/hcl2/model/type_output.go b/vendor/github.com/pulumi/pulumi/pkg/v3/codegen/hcl2/model/type_output.go index 855da6c..65abd77 100644 --- a/vendor/github.com/pulumi/pulumi/pkg/v3/codegen/hcl2/model/type_output.go +++ b/vendor/github.com/pulumi/pulumi/pkg/v3/codegen/hcl2/model/type_output.go @@ -67,7 +67,7 @@ func (t *OutputType) AssignableFrom(src Type) bool { case *OutputType: return t.ElementType.AssignableFrom(src.ElementType) case *PromiseType: - return t.ElementType.AssignableFrom(src.ElementType) + return t.ElementType.AssignableFrom(ResolveOutputs(src.ElementType)) } return t.ElementType.AssignableFrom(src) }) @@ -77,16 +77,17 @@ func (t *OutputType) AssignableFrom(src Type) bool { // output(T) is convertible from a type U, output(U), or promise(U) if U is convertible to T. If the conversion from // U to T is unsafe, the entire conversion is unsafe. Otherwise, the conversion is safe. func (t *OutputType) ConversionFrom(src Type) ConversionKind { - return t.conversionFrom(src, false, nil) + kind, _ := t.conversionFrom(src, false, nil) + return kind } -func (t *OutputType) conversionFrom(src Type, unifying bool, seen map[Type]struct{}) ConversionKind { - return conversionFrom(t, src, unifying, seen, func() ConversionKind { +func (t *OutputType) conversionFrom(src Type, unifying bool, seen map[Type]struct{}) (ConversionKind, lazyDiagnostics) { + return conversionFrom(t, src, unifying, seen, func() (ConversionKind, lazyDiagnostics) { switch src := src.(type) { case *OutputType: return t.ElementType.conversionFrom(src.ElementType, unifying, seen) case *PromiseType: - return t.ElementType.conversionFrom(src.ElementType, unifying, seen) + return t.ElementType.conversionFrom(ResolveOutputs(src.ElementType), unifying, seen) } return t.ElementType.conversionFrom(src, unifying, seen) }) @@ -109,11 +110,12 @@ func (t *OutputType) unify(other Type) (Type, ConversionKind) { return NewOutputType(elementType), conversionKind case *PromiseType: // If the other type is a promise type, unify based on the element type. - elementType, conversionKind := t.ElementType.unify(other.ElementType) + elementType, conversionKind := t.ElementType.unify(ResolveOutputs(other.ElementType)) return NewOutputType(elementType), conversionKind default: // Prefer the output type. - return t, t.conversionFrom(other, true, nil) + kind, _ := t.conversionFrom(other, true, nil) + return t, kind } }) } diff --git a/vendor/github.com/pulumi/pulumi/pkg/v3/codegen/hcl2/model/type_promise.go b/vendor/github.com/pulumi/pulumi/pkg/v3/codegen/hcl2/model/type_promise.go index 8169873..5b558b3 100644 --- a/vendor/github.com/pulumi/pulumi/pkg/v3/codegen/hcl2/model/type_promise.go +++ b/vendor/github.com/pulumi/pulumi/pkg/v3/codegen/hcl2/model/type_promise.go @@ -74,11 +74,13 @@ func (t *PromiseType) AssignableFrom(src Type) bool { // promise(T) is convertible from a type U or promise(U) if U is convertible to T. If the conversion from U to T is // unsafe, the entire conversion is unsafe. Otherwise, the conversion is safe. func (t *PromiseType) ConversionFrom(src Type) ConversionKind { - return t.conversionFrom(src, false, nil) + kind, _ := t.conversionFrom(src, false, nil) + return kind } -func (t *PromiseType) conversionFrom(src Type, unifying bool, seen map[Type]struct{}) ConversionKind { - return conversionFrom(t, src, unifying, seen, func() ConversionKind { +func (t *PromiseType) conversionFrom( + src Type, unifying bool, seen map[Type]struct{}) (ConversionKind, lazyDiagnostics) { + return conversionFrom(t, src, unifying, seen, func() (ConversionKind, lazyDiagnostics) { if src, ok := src.(*PromiseType); ok { return t.ElementType.conversionFrom(src.ElementType, unifying, seen) } @@ -107,7 +109,8 @@ func (t *PromiseType) unify(other Type) (Type, ConversionKind) { return NewOutputType(elementType), conversionKind default: // Prefer the promise type. - return t, t.conversionFrom(other, true, nil) + kind, _ := t.conversionFrom(other, true, nil) + return t, kind } }) } diff --git a/vendor/github.com/pulumi/pulumi/pkg/v3/codegen/hcl2/model/type_set.go b/vendor/github.com/pulumi/pulumi/pkg/v3/codegen/hcl2/model/type_set.go index ff121f5..8b21807 100644 --- a/vendor/github.com/pulumi/pulumi/pkg/v3/codegen/hcl2/model/type_set.go +++ b/vendor/github.com/pulumi/pulumi/pkg/v3/codegen/hcl2/model/type_set.go @@ -72,26 +72,29 @@ func (t *SetType) AssignableFrom(src Type) bool { // the entire conversion is unsafe; otherwise the conversion is safe. An unsafe conversion exists from list(U) or // or tuple(U_0 ... U_N) to set(T) if a conversion exists from each U to T. func (t *SetType) ConversionFrom(src Type) ConversionKind { - return t.conversionFrom(src, false, nil) + kind, _ := t.conversionFrom(src, false, nil) + return kind } -func (t *SetType) conversionFrom(src Type, unifying bool, seen map[Type]struct{}) ConversionKind { - return conversionFrom(t, src, unifying, seen, func() ConversionKind { +func (t *SetType) conversionFrom(src Type, unifying bool, seen map[Type]struct{}) (ConversionKind, lazyDiagnostics) { + return conversionFrom(t, src, unifying, seen, func() (ConversionKind, lazyDiagnostics) { switch src := src.(type) { case *SetType: return t.ElementType.conversionFrom(src.ElementType, unifying, seen) case *ListType: - if conversionKind := t.ElementType.conversionFrom(src.ElementType, unifying, seen); conversionKind == NoConversion { - return NoConversion + if conversionKind, why := t.ElementType.conversionFrom(src.ElementType, unifying, seen); conversionKind == + NoConversion { + return NoConversion, why } - return UnsafeConversion + return UnsafeConversion, nil case *TupleType: - if conversionKind := NewListType(t.ElementType).conversionFrom(src, unifying, seen); conversionKind == NoConversion { - return NoConversion + if conversionKind, why := NewListType(t.ElementType).conversionFrom(src, unifying, seen); conversionKind == + NoConversion { + return NoConversion, why } - return UnsafeConversion + return UnsafeConversion, nil } - return NoConversion + return NoConversion, func() hcl.Diagnostics { return hcl.Diagnostics{typeNotConvertible(t, src)} } }) } @@ -127,7 +130,8 @@ func (t *SetType) unify(other Type) (Type, ConversionKind) { return NewSetType(elementType), conversionKind default: // Prefer the set type. - return t, t.conversionFrom(other, true, nil) + kind, _ := t.conversionFrom(other, true, nil) + return t, kind } }) } diff --git a/vendor/github.com/pulumi/pulumi/pkg/v3/codegen/hcl2/model/type_tuple.go b/vendor/github.com/pulumi/pulumi/pkg/v3/codegen/hcl2/model/type_tuple.go index f3e39d1..f268ed8 100644 --- a/vendor/github.com/pulumi/pulumi/pkg/v3/codegen/hcl2/model/type_tuple.go +++ b/vendor/github.com/pulumi/pulumi/pkg/v3/codegen/hcl2/model/type_tuple.go @@ -148,11 +148,12 @@ func (u *tupleElementUnifier) unify(t *TupleType) { } func (t *TupleType) ConversionFrom(src Type) ConversionKind { - return t.conversionFrom(src, false, nil) + kind, _ := t.conversionFrom(src, false, nil) + return kind } -func (t *TupleType) conversionFrom(src Type, unifying bool, seen map[Type]struct{}) ConversionKind { - return conversionFrom(t, src, unifying, seen, func() ConversionKind { +func (t *TupleType) conversionFrom(src Type, unifying bool, seen map[Type]struct{}) (ConversionKind, lazyDiagnostics) { + return conversionFrom(t, src, unifying, seen, func() (ConversionKind, lazyDiagnostics) { switch src := src.(type) { case *TupleType: // When unifying, we will unify two tuples of different length to a new tuple, where elements with matching @@ -161,47 +162,59 @@ func (t *TupleType) conversionFrom(src Type, unifying bool, seen map[Type]struct var unifier tupleElementUnifier unifier.unify(t) unifier.unify(src) - return unifier.conversionKind + return unifier.conversionKind, nil } if len(t.ElementTypes) != len(src.ElementTypes) { - return NoConversion + return NoConversion, func() hcl.Diagnostics { return hcl.Diagnostics{tuplesHaveDifferentLengths(t, src)} } } conversionKind := SafeConversion + var diags lazyDiagnostics for i, dst := range t.ElementTypes { - if ck := dst.conversionFrom(src.ElementTypes[i], unifying, seen); ck < conversionKind { - conversionKind = ck + if ck, why := dst.conversionFrom(src.ElementTypes[i], unifying, seen); ck < conversionKind { + conversionKind, diags = ck, why + if conversionKind == NoConversion { + break + } } } // When unifying, the conversion kind of two tuple types is the lesser of the conversion in each direction. if unifying { - conversionTo := src.conversionFrom(t, false, seen) + conversionTo, _ := src.conversionFrom(t, false, seen) if conversionTo < conversionKind { conversionKind = conversionTo } } - return conversionKind + return conversionKind, diags case *ListType: conversionKind := UnsafeConversion + var diags lazyDiagnostics for _, t := range t.ElementTypes { - if ck := t.conversionFrom(src.ElementType, unifying, seen); ck < conversionKind { - conversionKind = ck + if ck, why := t.conversionFrom(src.ElementType, unifying, seen); ck < conversionKind { + conversionKind, diags = ck, why + if conversionKind == NoConversion { + break + } } } - return conversionKind + return conversionKind, diags case *SetType: conversionKind := UnsafeConversion + var diags lazyDiagnostics for _, t := range t.ElementTypes { - if ck := t.conversionFrom(src.ElementType, unifying, seen); ck < conversionKind { - conversionKind = ck + if ck, why := t.conversionFrom(src.ElementType, unifying, seen); ck < conversionKind { + conversionKind, diags = ck, why + if conversionKind == NoConversion { + break + } } } - return conversionKind + return conversionKind, diags } - return NoConversion + return NoConversion, func() hcl.Diagnostics { return hcl.Diagnostics{typeNotConvertible(t, src)} } }) } @@ -254,7 +267,8 @@ func (t *TupleType) unify(other Type) (Type, ConversionKind) { return NewSetType(elementType), conversionKind default: // Otherwise, prefer the tuple type. - return t, t.conversionFrom(other, true, nil) + kind, _ := t.conversionFrom(other, true, nil) + return t, kind } }) } diff --git a/vendor/github.com/pulumi/pulumi/pkg/v3/codegen/hcl2/model/type_union.go b/vendor/github.com/pulumi/pulumi/pkg/v3/codegen/hcl2/model/type_union.go index 0cd2cab..4681062 100644 --- a/vendor/github.com/pulumi/pulumi/pkg/v3/codegen/hcl2/model/type_union.go +++ b/vendor/github.com/pulumi/pulumi/pkg/v3/codegen/hcl2/model/type_union.go @@ -169,28 +169,52 @@ func (t *UnionType) AssignableFrom(src Type) bool { // type is safely convertible, the conversion is safe; if no element is safely convertible but some element is unsafely // convertible, the conversion is unsafe. func (t *UnionType) ConversionFrom(src Type) ConversionKind { - return t.conversionFrom(src, false, nil) + kind, _ := t.conversionFrom(src, false, nil) + return kind } -func (t *UnionType) conversionFrom(src Type, unifying bool, seen map[Type]struct{}) ConversionKind { - return conversionFrom(t, src, unifying, seen, func() ConversionKind { +func (t *UnionType) conversionFrom(src Type, unifying bool, seen map[Type]struct{}) (ConversionKind, lazyDiagnostics) { + return conversionFrom(t, src, unifying, seen, func() (ConversionKind, lazyDiagnostics) { var conversionKind ConversionKind + var diags []lazyDiagnostics + + // Fast path: see if the source type is equal to any of the element types. Equality checks are generally + // less expensive that full convertibility checks. for _, t := range t.ElementTypes { - if ck := t.conversionFrom(src, unifying, seen); ck > conversionKind { + if src.Equals(t) { + return SafeConversion, nil + } + } + + for _, t := range t.ElementTypes { + ck, why := t.conversionFrom(src, unifying, seen) + if ck > conversionKind { conversionKind = ck + } else if why != nil { + diags = append(diags, why) + } + } + if conversionKind == NoConversion { + return NoConversion, func() hcl.Diagnostics { + var all hcl.Diagnostics + for _, why := range diags { + //nolint:errcheck + all.Extend(why()) + } + return all } } - return conversionKind + return conversionKind, nil }) } // If all conversions to a dest type from a union type are safe, the conversion is safe. // If no conversions to a dest type from a union type exist, the conversion does not exist. // Otherwise, the conversion is unsafe. -func (t *UnionType) conversionTo(dest Type, unifying bool, seen map[Type]struct{}) ConversionKind { +func (t *UnionType) conversionTo(dest Type, unifying bool, seen map[Type]struct{}) (ConversionKind, lazyDiagnostics) { conversionKind, exists := SafeConversion, false for _, t := range t.ElementTypes { - switch dest.conversionFrom(t, unifying, seen) { + switch kind, _ := dest.conversionFrom(t, unifying, seen); kind { case SafeConversion: exists = true case UnsafeConversion: @@ -200,9 +224,9 @@ func (t *UnionType) conversionTo(dest Type, unifying bool, seen map[Type]struct{ } } if !exists { - return NoConversion + return NoConversion, nil } - return conversionKind + return conversionKind, nil } func (t *UnionType) String() string { diff --git a/vendor/github.com/pulumi/pulumi/pkg/v3/codegen/hcl2/syntax/utilities.go b/vendor/github.com/pulumi/pulumi/pkg/v3/codegen/hcl2/syntax/utilities.go index 7b04311..22c066d 100644 --- a/vendor/github.com/pulumi/pulumi/pkg/v3/codegen/hcl2/syntax/utilities.go +++ b/vendor/github.com/pulumi/pulumi/pkg/v3/codegen/hcl2/syntax/utilities.go @@ -1,6 +1,8 @@ package syntax -import "github.com/hashicorp/hcl/v2/hclsyntax" +import ( + "github.com/hashicorp/hcl/v2/hclsyntax" +) // None is an HCL syntax node that can be used when a syntax node is required but none is appropriate. var None hclsyntax.Node = &hclsyntax.Body{} diff --git a/vendor/github.com/pulumi/pulumi/pkg/v3/codegen/internal/tstypes/tstypes.go b/vendor/github.com/pulumi/pulumi/pkg/v3/codegen/internal/tstypes/tstypes.go new file mode 100644 index 0000000..8717d14 --- /dev/null +++ b/vendor/github.com/pulumi/pulumi/pkg/v3/codegen/internal/tstypes/tstypes.go @@ -0,0 +1,222 @@ +// Copyright 2016-2021, Pulumi Corporation. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Helper code to assist emitting correctly minimally parenthesized +// TypeScript type literals. +package tstypes + +import ( + "bytes" +) + +// Supported types include type identifiers, arrays `T[]`, unions +// `A|B`, and maps with string keys. +type TypeAst interface { + depth() int +} + +// Produces a TypeScript type literal for the type, with minimally +// inserted parentheses. +func TypeLiteral(ast TypeAst) string { + tokens := (&typeScriptTypeUnparser{}).unparse(ast) + return toLiteral(tokens) +} + +// Builds a type identifier (possibly qualified such as +// "my.module.MyType") or a primitive such as "boolean". +func Identifier(id string) TypeAst { + return &idType{id} +} + +// Builds a `T[]` type from a `T` type. +func Array(t TypeAst) TypeAst { + return &arrayType{t} +} + +// Builds a `{[key: string]: T}` type from a `T` type. +func StringMap(t TypeAst) TypeAst { + return &mapType{t} +} + +// Builds a union `A | B | C` type. +func Union(t ...TypeAst) TypeAst { + if len(t) == 0 { + panic("At least one type is needed to form a Union, none are given") + } + if len(t) == 1 { + return t[0] + } + return &unionType{t[0], t[1], t[2:]} +} + +// Normalizes by unnesting unions `A | (B | C) => A | B | C`. +func Normalize(ast TypeAst) TypeAst { + return transform(ast, func(t TypeAst) TypeAst { + switch v := t.(type) { + case *unionType: + var all []TypeAst + for _, e := range v.all() { + switch ev := e.(type) { + case *unionType: + all = append(all, ev.all()...) + default: + all = append(all, ev) + } + } + return Union(all...) + default: + return t + } + }) +} + +func transform(t TypeAst, f func(x TypeAst) TypeAst) TypeAst { + switch v := t.(type) { + case *unionType: + var ts []TypeAst + for _, x := range v.all() { + ts = append(ts, transform(x, f)) + } + return f(Union(ts...)) + case *arrayType: + return f(&arrayType{transform(v.arrayElement, f)}) + case *mapType: + return f(&mapType{transform(v.mapElement, f)}) + default: + return f(t) + } +} + +type idType struct { + id string +} + +func (*idType) depth() int { + return 1 +} + +var _ TypeAst = &idType{} + +type mapType struct { + mapElement TypeAst +} + +func (t *mapType) depth() int { + return t.mapElement.depth() + 1 +} + +var _ TypeAst = &mapType{} + +type arrayType struct { + arrayElement TypeAst +} + +func (t *arrayType) depth() int { + return t.arrayElement.depth() + 1 +} + +var _ TypeAst = &arrayType{} + +type unionType struct { + t1 TypeAst + t2 TypeAst + tRest []TypeAst +} + +func (t *unionType) all() []TypeAst { + return append([]TypeAst{t.t1, t.t2}, t.tRest...) +} + +func (t *unionType) depth() int { + var maxDepth = 0 + for _, t := range t.all() { + d := t.depth() + if d > maxDepth { + maxDepth = d + } + } + return maxDepth +} + +var _ TypeAst = &unionType{} + +type typeTokenKind string + +const ( + openParen typeTokenKind = "(" + closeParen = ")" + openMap = "{[key: string]: " + closeMap = "}" + identifier = "x" + array = "[]" + union = " | " +) + +type typeToken struct { + kind typeTokenKind + value string +} + +type typeScriptTypeUnparser struct{} + +func (u typeScriptTypeUnparser) unparse(ast TypeAst) []typeToken { + switch v := ast.(type) { + case *idType: + return []typeToken{{identifier, v.id}} + case *arrayType: + return append(u.unparseWithUnionParens(v.arrayElement), typeToken{array, ""}) + case *mapType: + return append([]typeToken{{openMap, ""}}, append(u.unparse(v.mapElement), typeToken{closeMap, ""})...) + case *unionType: + var tokens []typeToken + for i, t := range v.all() { + if i > 0 { + tokens = append(tokens, typeToken{union, ""}) + } + tokens = append(tokens, u.unparseWithUnionParens(t)...) + } + return tokens + default: + panic("Unknown object of type typeAst") + } +} + +func (u typeScriptTypeUnparser) unparseWithUnionParens(ast TypeAst) []typeToken { + var parens bool + switch ast.(type) { + case *unionType: + parens = true + } + tokens := u.unparse(ast) + if parens { + return u.parenthesize(tokens) + } + return tokens +} + +func (u typeScriptTypeUnparser) parenthesize(tokens []typeToken) []typeToken { + return append([]typeToken{{openParen, ""}}, append(tokens, typeToken{closeParen, ""})...) +} + +func toLiteral(tokens []typeToken) string { + var buffer bytes.Buffer + for _, t := range tokens { + if t.value != "" { + buffer.WriteString(t.value) + } else { + buffer.WriteString(string(t.kind)) + } + } + return buffer.String() +} diff --git a/vendor/github.com/pulumi/pulumi/pkg/v3/codegen/nodejs/doc.go b/vendor/github.com/pulumi/pulumi/pkg/v3/codegen/nodejs/doc.go index 9fc0552..74299dc 100644 --- a/vendor/github.com/pulumi/pulumi/pkg/v3/codegen/nodejs/doc.go +++ b/vendor/github.com/pulumi/pulumi/pkg/v3/codegen/nodejs/doc.go @@ -69,12 +69,18 @@ func (d DocLanguageHelper) GetDocLinkForFunctionInputOrOutputType(pkg *schema.Pa } // GetLanguageTypeString returns the language-specific type given a Pulumi schema type. -func (d DocLanguageHelper) GetLanguageTypeString(pkg *schema.Package, moduleName string, t schema.Type, input, args, optional bool) string { +func (d DocLanguageHelper) GetLanguageTypeString(pkg *schema.Package, moduleName string, t schema.Type, input bool) string { + // Remove the union with `undefined` for optional types, + // since we will show that information separately anyway. + if optional, ok := t.(*schema.OptionalType); ok { + t = optional.ElementType + } + modCtx := &modContext{ pkg: pkg, mod: moduleName, } - typeName := modCtx.typeString(t, input, false /*wrapInput*/, args, optional, nil) + typeName := modCtx.typeString(t, input, nil) // Remove any package qualifiers from the type name. typeQualifierPackage := "inputs" @@ -84,11 +90,6 @@ func (d DocLanguageHelper) GetLanguageTypeString(pkg *schema.Package, moduleName typeName = strings.ReplaceAll(typeName, typeQualifierPackage+".", "") typeName = strings.ReplaceAll(typeName, "enums.", "") - // Remove the union with `undefined` for optional types, - // since we will show that information separately anyway. - if optional { - typeName = strings.ReplaceAll(typeName, " | undefined", "?") - } return typeName } @@ -103,6 +104,25 @@ func (d DocLanguageHelper) GetResourceFunctionResultName(modName string, f *sche return title(funcName) + "Result" } +func (d DocLanguageHelper) GetMethodName(m *schema.Method) string { + return camel(m.Name) +} + +func (d DocLanguageHelper) GetMethodResultName(pkg *schema.Package, modName string, r *schema.Resource, + m *schema.Method) string { + + if info, ok := pkg.Language["nodejs"].(NodePackageInfo); ok { + if info.LiftSingleValueMethodReturns && m.Function.Outputs != nil && len(m.Function.Outputs.Properties) == 1 { + modCtx := &modContext{ + pkg: pkg, + mod: modName, + } + return modCtx.typeString(m.Function.Outputs.Properties[0].Type, false, nil) + } + } + return fmt.Sprintf("%s.%sResult", resourceName(r), title(d.GetMethodName(m))) +} + // GetPropertyName returns the property name specific to NodeJS. func (d DocLanguageHelper) GetPropertyName(p *schema.Property) (string, error) { return p.Name, nil diff --git a/vendor/github.com/pulumi/pulumi/pkg/v3/codegen/nodejs/gen.go b/vendor/github.com/pulumi/pulumi/pkg/v3/codegen/nodejs/gen.go index 789937c..27f2b3d 100644 --- a/vendor/github.com/pulumi/pulumi/pkg/v3/codegen/nodejs/gen.go +++ b/vendor/github.com/pulumi/pulumi/pkg/v3/codegen/nodejs/gen.go @@ -1,4 +1,4 @@ -// Copyright 2016-2020, Pulumi Corporation. +// Copyright 2016-2021, Pulumi Corporation. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -21,6 +21,7 @@ package nodejs import ( "bytes" "encoding/json" + "errors" "fmt" "io" "path" @@ -31,19 +32,30 @@ import ( "strings" "unicode" - "github.com/pkg/errors" "github.com/pulumi/pulumi/pkg/v3/codegen" + "github.com/pulumi/pulumi/pkg/v3/codegen/internal/tstypes" "github.com/pulumi/pulumi/pkg/v3/codegen/schema" + + "github.com/pulumi/pulumi/sdk/v3/go/common/diag" + "github.com/pulumi/pulumi/sdk/v3/go/common/util/cmdutil" "github.com/pulumi/pulumi/sdk/v3/go/common/util/contract" ) type typeDetails struct { outputType bool inputType bool - argsType bool - plainType bool + + usedInFunctionOutputVersionInputs bool // helps decide naming under the tfbridge20 flag } +// title capitalizes the first rune in s. +// +// Examples: +// "hello" => "Hello" +// "hiAlice" => "HiAlice" +// "hi.Bob" => "Hi.Bob" +// +// Note: This is expected to work on strings which are not valid identifiers. func title(s string) string { if s == "" { return "" @@ -52,6 +64,13 @@ func title(s string) string { return string(append([]rune{unicode.ToUpper(runes[0])}, runes[1:]...)) } +// camel converts s to camel case. +// +// Examples: +// "helloWorld" => "helloWorld" +// "HelloWorld" => "helloWorld" +// "JSONObject" => "jsonobject" +// "My-FRIEND.Bob" => "my-FRIEND.Bob" func camel(s string) string { if s == "" { return "" @@ -68,6 +87,40 @@ func camel(s string) string { return string(res) } +// pascal converts s to pascal case. Word breaks are signified by illegal +// identifier runes (excluding '.'). These are found by use of +// isLegalIdentifierPart. +// +// Examples: +// "My-Friend.Bob" => "MyFriend.Bob" +// "JSONObject" => "JSONObject"' +// "a-glad-dayTime" => "AGladDayTime" +// +// Note: because camel aggressively down-cases the first continuous sub-string +// of uppercase characters, we cannot define pascal as title(camel(x)). +func pascal(s string) string { + split := [][]rune{{}} + runes := []rune(s) + for _, r := range runes { + if !isLegalIdentifierPart(r) && r != '.' { + split = append(split, []rune{}) + } else { + split[len(split)-1] = append(split[len(split)-1], r) + } + } + words := make([]string, len(split)) + for i, v := range split { + words[i] = title(string(v)) + } + return strings.Join(words, "") +} + +// externalModuleName Formats the name of package to comply with an external +// module. +func externalModuleName(s string) string { + return fmt.Sprintf("pulumi%s", pascal(s)) +} + type modContext struct { pkg *schema.Package mod string @@ -84,6 +137,9 @@ type modContext struct { modToPkg map[string]string // Module name -> package name compatibility string // Toggle compatibility mode for a specified target. disableUnionOutputTypes bool // Disable unions in output types. + + // Determine whether to lift single-value method return values + liftSingleValueMethodReturns bool } func (mod *modContext) String() string { @@ -138,7 +194,8 @@ func (mod *modContext) namingContext(pkg *schema.Package) (namingCtx *modContext return } -func (mod *modContext) objectType(pkg *schema.Package, tok string, input, args, enum bool) string { +func (mod *modContext) objectType(pkg *schema.Package, details *typeDetails, tok string, input, args, enum bool) string { + root := "outputs." if input { root = "inputs." @@ -146,6 +203,7 @@ func (mod *modContext) objectType(pkg *schema.Package, tok string, input, args, namingCtx, pkgName, external := mod.namingContext(pkg) if external { + pkgName = externalModuleName(pkgName) root = "types.output." if input { root = "types.input." @@ -158,15 +216,22 @@ func (mod *modContext) objectType(pkg *schema.Package, tok string, input, args, return "enums." + modName + title(name) } - if args && mod.compatibility != tfbridge20 && mod.compatibility != kubernetes20 { + if args && input && details != nil && details.usedInFunctionOutputVersionInputs { + name += "Args" + } else if args && namingCtx.compatibility != tfbridge20 && namingCtx.compatibility != kubernetes20 { name += "Args" } + return pkgName + root + modName + title(name) } func (mod *modContext) resourceType(r *schema.ResourceType) string { if strings.HasPrefix(r.Token, "pulumi:providers:") { pkgName := strings.TrimPrefix(r.Token, "pulumi:providers:") + if pkgName != mod.pkg.Name { + pkgName = externalModuleName(pkgName) + } + return fmt.Sprintf("%s.Provider", pkgName) } @@ -174,7 +239,10 @@ func (mod *modContext) resourceType(r *schema.ResourceType) string { if r.Resource != nil { pkg = r.Resource.Package } - namingCtx, pkgName, _ := mod.namingContext(pkg) + namingCtx, pkgName, external := mod.namingContext(pkg) + if external { + pkgName = externalModuleName(pkgName) + } modName, name := namingCtx.tokenToModName(r.Token), tokenToName(r.Token) @@ -206,74 +274,93 @@ func tokenToFunctionName(tok string) string { return camel(tokenToName(tok)) } -func (mod *modContext) typeString(t schema.Type, input, wrapInput, args, optional bool, constValue interface{}) string { - var typ string +func (mod *modContext) typeAst(t schema.Type, input bool, constValue interface{}) tstypes.TypeAst { switch t := t.(type) { + case *schema.OptionalType: + return tstypes.Union( + mod.typeAst(t.ElementType, input, constValue), + tstypes.Identifier("undefined"), + ) + case *schema.InputType: + typ := mod.typeString(codegen.SimplifyInputUnion(t.ElementType), input, constValue) + if typ == "any" { + return tstypes.Identifier("any") + } + return tstypes.Identifier(fmt.Sprintf("pulumi.Input<%s>", typ)) case *schema.EnumType: - typ = mod.objectType(nil, t.Token, input, args, true) + return tstypes.Identifier(mod.objectType(nil, nil, t.Token, input, false, true)) case *schema.ArrayType: - typ = mod.typeString(t.ElementType, input, wrapInput, args, false, constValue) + "[]" + return tstypes.Array(mod.typeAst(t.ElementType, input, constValue)) case *schema.MapType: - typ = fmt.Sprintf("{[key: string]: %v}", mod.typeString(t.ElementType, input, wrapInput, args, false, constValue)) + return tstypes.StringMap(mod.typeAst(t.ElementType, input, constValue)) case *schema.ObjectType: - typ = mod.objectType(t.Package, t.Token, input, args, false) + details := mod.details(t) + return tstypes.Identifier(mod.objectType(t.Package, details, t.Token, input, t.IsInputShape(), false)) case *schema.ResourceType: - typ = mod.resourceType(t) + return tstypes.Identifier(mod.resourceType(t)) case *schema.TokenType: - typ = tokenToName(t.Token) + return tstypes.Identifier(tokenToName(t.Token)) case *schema.UnionType: if !input && mod.disableUnionOutputTypes { if t.DefaultType != nil { - return mod.typeString(t.DefaultType, input, wrapInput, args, optional, constValue) + return mod.typeAst(t.DefaultType, input, constValue) } - typ = "any" - } else { - var elements []string - for _, e := range t.ElementTypes { - t := mod.typeString(e, input, wrapInput, args, false, constValue) - if args && strings.HasPrefix(t, "pulumi.Input<") { - contract.Assert(t[len(t)-1] == '>') - // Strip off the leading `pulumi.Input<` and the trailing `>` - t = t[len("pulumi.Input<") : len(t)-1] - } - elements = append(elements, t) - } - typ = strings.Join(elements, " | ") + return tstypes.Identifier("any") } + + elements := make([]tstypes.TypeAst, len(t.ElementTypes)) + for i, e := range t.ElementTypes { + elements[i] = mod.typeAst(e, input, constValue) + } + return tstypes.Union(elements...) default: switch t { case schema.BoolType: - typ = "boolean" + return tstypes.Identifier("boolean") case schema.IntType, schema.NumberType: - typ = "number" + return tstypes.Identifier("number") case schema.StringType: - typ = "string" + if constValue != nil { + return tstypes.Identifier(fmt.Sprintf("%q", constValue.(string))) + } + return tstypes.Identifier("string") case schema.ArchiveType: - typ = "pulumi.asset.Archive" + return tstypes.Identifier("pulumi.asset.Archive") case schema.AssetType: - typ = "pulumi.asset.Asset | pulumi.asset.Archive" + return tstypes.Union( + tstypes.Identifier("pulumi.asset.Asset"), + tstypes.Identifier("pulumi.asset.Archive"), + ) case schema.JSONType: fallthrough case schema.AnyType: - typ = "any" + return tstypes.Identifier("any") } } + panic(fmt.Errorf("unexpected type %T", t)) +} - if constValue != nil && typ == "string" { - typ = fmt.Sprintf("%q", constValue.(string)) - } - if wrapInput && typ != "any" { - typ = fmt.Sprintf("pulumi.Input<%s>", typ) - } - if optional { - return typ + " | undefined" - } - return typ +func (mod *modContext) typeString(t schema.Type, input bool, constValue interface{}) string { + return tstypes.TypeLiteral(tstypes.Normalize(mod.typeAst(t, input, constValue))) } func isStringType(t schema.Type) bool { - for tt, ok := t.(*schema.TokenType); ok; tt, ok = t.(*schema.TokenType) { - t = tt.UnderlyingType + t = codegen.UnwrapType(t) + + switch typ := t.(type) { + case *schema.TokenType: + t = typ.UnderlyingType + case *schema.EnumType: + t = typ.ElementType + case *schema.UnionType: + // The following case detects for relaxed string enums. If it's a Union, check if one ElementType is an EnumType. + // If yes, t is the ElementType of the EnumType. + for _, tt := range typ.ElementTypes { + t = codegen.UnwrapType(tt) + if typ, ok := t.(*schema.EnumType); ok { + t = typ.ElementType + } + } } return t == schema.StringType @@ -309,7 +396,11 @@ func printComment(w io.Writer, comment, deprecationMessage, indent string) { fmt.Fprintf(w, "%s */\n", indent) } -func (mod *modContext) genPlainType(w io.Writer, name, comment string, properties []*schema.Property, input, arg, readonly bool, level int) { +// Generates a plain interface type. +// +// We use this to represent both argument and plain object types. +func (mod *modContext) genPlainType(w io.Writer, name, comment string, + properties []*schema.Property, input, readonly bool, level int) error { indent := strings.Repeat(" ", level) printComment(w, comment, "", indent) @@ -323,15 +414,97 @@ func (mod *modContext) genPlainType(w io.Writer, name, comment string, propertie prefix = "readonly " } - sigil := "" - if !p.IsRequired { - sigil = "?" + sigil, propertyType := "", p.Type + if !p.IsRequired() { + sigil, propertyType = "?", codegen.RequiredType(p) } - typ := mod.typeString(p.Type, input, arg && !p.IsPlain, arg && !p.IsPlain, false, p.ConstValue) + typ := mod.typeString(propertyType, input, p.ConstValue) fmt.Fprintf(w, "%s %s%s%s: %s;\n", indent, prefix, p.Name, sigil, typ) } fmt.Fprintf(w, "%s}\n", indent) + return nil +} + +// Generate a provide defaults function for an associated plain object. +func (mod *modContext) genPlainObjectDefaultFunc(w io.Writer, name string, + properties []*schema.Property, input, readonly bool, level int) error { + indent := strings.Repeat(" ", level) + defaults := []string{} + for _, p := range properties { + + if p.DefaultValue != nil { + dv, err := mod.getDefaultValue(p.DefaultValue, codegen.UnwrapType(p.Type)) + if err != nil { + return err + } + defaults = append(defaults, fmt.Sprintf("%s: (val.%s) ?? %s", p.Name, p.Name, dv)) + } else if funcName := mod.provideDefaultsFuncName(p.Type, input); funcName != "" { + // ProvideDefaults functions have the form `(Input | undefined) -> + // Output | undefined`. We need to disallow the undefined. This is safe + // because val.%arg existed in the input (type system enforced). + var compositeObject string + if codegen.IsNOptionalInput(p.Type) { + compositeObject = fmt.Sprintf("pulumi.output(val.%s).apply(%s)", p.Name, funcName) + } else { + compositeObject = fmt.Sprintf("%s(val.%s)", funcName, p.Name) + } + if !p.IsRequired() { + compositeObject = fmt.Sprintf("(val.%s ? %s : undefined)", p.Name, compositeObject) + } + defaults = append(defaults, fmt.Sprintf("%s: %s", p.Name, compositeObject)) + } + } + + // There are no defaults, so don't generate a default function. + if len(defaults) == 0 { + return nil + } + // Generates a function header that looks like this: + // export function %sProvideDefaults(val: pulumi.Input<%s> | undefined): pulumi.Output<%s> | undefined { + // const def = (val: LayeredTypeArgs) => ({ + // ...val, + defaultProvderName := provideDefaultsFuncNameFromName(name) + printComment(w, fmt.Sprintf("%s sets the appropriate defaults for %s", + defaultProvderName, name), "", indent) + fmt.Fprintf(w, "%sexport function %s(val: %s): "+ + "%s {\n", indent, defaultProvderName, name, name) + fmt.Fprintf(w, "%s return {\n", indent) + fmt.Fprintf(w, "%s ...val,\n", indent) + + // Fields look as follows + // %s: (val.%s) ?? devValue, + for _, val := range defaults { + fmt.Fprintf(w, "%s %s,\n", indent, val) + } + fmt.Fprintf(w, "%s };\n", indent) + fmt.Fprintf(w, "%s}\n", indent) + return nil +} + +// The name of the helper function used to provide default values to plain +// types, derived purely from the name of the enclosing type. Prefer to use +// provideDefaultsFuncName when full type information is available. +func provideDefaultsFuncNameFromName(typeName string) string { + var i int + if in := strings.LastIndex(typeName, "."); in != -1 { + i = in + } + // path + camel(name) + ProvideDefaults suffix + return typeName[:i] + camel(typeName[i:]) + "ProvideDefaults" +} + +// The name of the function used to set defaults on the plain type. +// +// `type` is the type which the function applies to. +// `input` indicates whither `type` is an input type. +func (mod *modContext) provideDefaultsFuncName(typ schema.Type, input bool) string { + if !codegen.IsProvideDefaultsFuncRequired(typ) { + return "" + } + requiredType := codegen.UnwrapType(typ) + typeName := mod.typeString(requiredType, input, nil) + return provideDefaultsFuncNameFromName(typeName) } func tsPrimitiveValue(value interface{}) (string, error) { @@ -355,7 +528,7 @@ func tsPrimitiveValue(value interface{}) (string, error) { case reflect.String: return fmt.Sprintf("%q", v.String()), nil default: - return "", errors.Errorf("unsupported default value of type %T", value) + return "", fmt.Errorf("unsupported default value of type %T", value) } } @@ -391,7 +564,7 @@ func (mod *modContext) getDefaultValue(dv *schema.DefaultValue, t schema.Type) ( } cast := "" - if t != schema.StringType { + if t != schema.StringType && getType == "" { cast = "" } @@ -508,7 +681,7 @@ func (mod *modContext) genResource(w io.Writer, r *schema.Resource) error { allOptionalInputs := true for _, prop := range r.InputProperties { ins.Add(prop.Name) - allOptionalInputs = allOptionalInputs && !prop.IsRequired + allOptionalInputs = allOptionalInputs && !prop.IsRequired() } for _, prop := range r.Properties { printComment(w, prop.Comment, prop.DeprecationMessage, " ") @@ -519,11 +692,11 @@ func (mod *modContext) genResource(w io.Writer, r *schema.Resource) error { outcomment = "/*out*/ " } - required := prop.IsRequired + propertyType := prop.Type if mod.compatibility == kubernetes20 { - required = true + propertyType = codegen.RequiredType(prop) } - fmt.Fprintf(w, " public %sreadonly %s!: pulumi.Output<%s>;\n", outcomment, prop.Name, mod.typeString(prop.Type, false, false, false, !required, prop.ConstValue)) + fmt.Fprintf(w, " public %sreadonly %s!: pulumi.Output<%s>;\n", outcomment, prop.Name, mod.typeString(propertyType, false, prop.ConstValue)) } fmt.Fprintf(w, "\n") @@ -566,7 +739,7 @@ func (mod *modContext) genResource(w io.Writer, r *schema.Resource) error { genInputProps := func() error { for _, prop := range r.InputProperties { - if prop.IsRequired { + if prop.IsRequired() { fmt.Fprintf(w, " if ((!args || args.%s === undefined) && !opts.urn) {\n", prop.Name) fmt.Fprintf(w, " throw new Error(\"Missing required property '%s'\");\n", prop.Name) fmt.Fprintf(w, " }\n") @@ -574,10 +747,24 @@ func (mod *modContext) genResource(w io.Writer, r *schema.Resource) error { } for _, prop := range r.InputProperties { var arg string + applyDefaults := func(arg string) string { + if name := mod.provideDefaultsFuncName(prop.Type, true /*input*/); name != "" { + var body string + if codegen.IsNOptionalInput(prop.Type) { + body = fmt.Sprintf("pulumi.output(%[2]s).apply(%[1]s)", name, arg) + } else { + body = fmt.Sprintf("%s(%s)", name, arg) + } + return fmt.Sprintf("(%s ? %s : undefined)", arg, body) + } + return arg + } + + argValue := applyDefaults(fmt.Sprintf("args.%s", prop.Name)) if prop.Secret { - arg = fmt.Sprintf("args?.%[1]s ? pulumi.secret(args.%[1]s) : undefined", prop.Name) + arg = fmt.Sprintf("args?.%[1]s ? pulumi.secret(%[2]s) : undefined", prop.Name, argValue) } else { - arg = fmt.Sprintf("args ? args.%[1]s : undefined", prop.Name) + arg = fmt.Sprintf("args ? %[1]s : undefined", argValue) } prefix := " " @@ -589,7 +776,7 @@ func (mod *modContext) genResource(w io.Writer, r *schema.Resource) error { arg = cv } else { if prop.DefaultValue != nil { - dv, err := mod.getDefaultValue(prop.DefaultValue, prop.Type) + dv, err := mod.getDefaultValue(prop.DefaultValue, codegen.UnwrapType(prop.Type)) if err != nil { return err } @@ -602,13 +789,13 @@ func (mod *modContext) genResource(w io.Writer, r *schema.Resource) error { arg = fmt.Sprintf("pulumi.output(%s).apply(JSON.stringify)", arg) } } - fmt.Fprintf(w, "%sinputs[\"%s\"] = %s;\n", prefix, prop.Name, arg) + fmt.Fprintf(w, "%sresourceInputs[\"%s\"] = %s;\n", prefix, prop.Name, arg) } for _, prop := range r.Properties { prefix := " " if !ins.Has(prop.Name) { - fmt.Fprintf(w, "%sinputs[\"%s\"] = undefined /*out*/;\n", prefix, prop.Name) + fmt.Fprintf(w, "%sresourceInputs[\"%s\"] = undefined /*out*/;\n", prefix, prop.Name) } } @@ -630,7 +817,7 @@ func (mod *modContext) genResource(w io.Writer, r *schema.Resource) error { if r.DeprecationMessage != "" && mod.compatibility != kubernetes20 { fmt.Fprintf(w, " pulumi.log.warn(\"%s is deprecated: %s\")\n", name, r.DeprecationMessage) } - fmt.Fprintf(w, " let inputs: pulumi.Inputs = {};\n") + fmt.Fprintf(w, " let resourceInputs: pulumi.Inputs = {};\n") fmt.Fprintf(w, " opts = opts || {};\n") if r.StateInputs != nil { @@ -638,7 +825,7 @@ func (mod *modContext) genResource(w io.Writer, r *schema.Resource) error { fmt.Fprintf(w, " if (opts.id) {\n") fmt.Fprintf(w, " const state = argsOrState as %[1]s | undefined;\n", stateType) for _, prop := range r.StateInputs.Properties { - fmt.Fprintf(w, " inputs[\"%[1]s\"] = state ? state.%[1]s : undefined;\n", prop.Name) + fmt.Fprintf(w, " resourceInputs[\"%[1]s\"] = state ? state.%[1]s : undefined;\n", prop.Name) } // The creation case (with args): fmt.Fprintf(w, " } else {\n") @@ -657,11 +844,11 @@ func (mod *modContext) genResource(w io.Writer, r *schema.Resource) error { // The get case: fmt.Fprintf(w, " } else {\n") for _, prop := range r.Properties { - fmt.Fprintf(w, " inputs[\"%[1]s\"] = undefined /*out*/;\n", prop.Name) + fmt.Fprintf(w, " resourceInputs[\"%[1]s\"] = undefined /*out*/;\n", prop.Name) } } } else { - fmt.Fprintf(w, " let inputs: pulumi.Inputs = {};\n") + fmt.Fprintf(w, " let resourceInputs: pulumi.Inputs = {};\n") fmt.Fprintf(w, " opts = opts || {};\n") fmt.Fprintf(w, " {\n") err := genInputProps() @@ -678,9 +865,9 @@ func (mod *modContext) genResource(w io.Writer, r *schema.Resource) error { fmt.Fprintf(w, " }\n") // If the caller didn't request a specific version, supply one using the version of this library. - fmt.Fprintf(w, " if (!opts.version) {\n") - fmt.Fprintf(w, " opts = pulumi.mergeOptions(opts, { version: utilities.getVersion()});\n") - fmt.Fprintf(w, " }\n") + // If a `pluginDownloadURL` was supplied by the generating schema, we supply a default facility + // much like for version. Both operations are handled in the utilities library. + fmt.Fprint(w, " opts = pulumi.mergeOptions(utilities.resourceOptsDefaults(), opts);\n") // Now invoke the super constructor with the type, name, and a property map. if len(r.Aliases) > 0 { @@ -700,32 +887,181 @@ func (mod *modContext) genResource(w io.Writer, r *schema.Resource) error { fmt.Fprintf(w, "\n opts = pulumi.mergeOptions(opts, secretOpts);\n") } + replaceOnChanges, errList := r.ReplaceOnChanges() + for _, err := range errList { + cmdutil.Diag().Warningf(&diag.Diag{Message: err.Error()}) + } + replaceOnChangesStrings := schema.PropertyListJoinToString(replaceOnChanges, + func(x string) string { return x }) + if len(replaceOnChanges) > 0 { + fmt.Fprintf(w, ` const replaceOnChanges = { replaceOnChanges: ["%s"] };`, strings.Join(replaceOnChangesStrings, `", "`)) + fmt.Fprintf(w, "\n opts = pulumi.mergeOptions(opts, replaceOnChanges);\n") + } + // If it's a ComponentResource, set the remote option. if r.IsComponent { - fmt.Fprintf(w, " super(%s.__pulumiType, name, inputs, opts, true /*remote*/);\n", name) + fmt.Fprintf(w, " super(%s.__pulumiType, name, resourceInputs, opts, true /*remote*/);\n", name) } else { - fmt.Fprintf(w, " super(%s.__pulumiType, name, inputs, opts);\n", name) + fmt.Fprintf(w, " super(%s.__pulumiType, name, resourceInputs, opts);\n", name) } - // Finish the class. fmt.Fprintf(w, " }\n") + + // Generate methods. + genMethod := func(method *schema.Method) { + methodName := camel(method.Name) + fun := method.Function + + shouldLiftReturn := mod.liftSingleValueMethodReturns && fun.Outputs != nil && len(fun.Outputs.Properties) == 1 + + // Write the TypeDoc/JSDoc for the data source function. + fmt.Fprint(w, "\n") + printComment(w, codegen.FilterExamples(fun.Comment, "typescript"), fun.DeprecationMessage, " ") + + // Now, emit the method signature. + var args []*schema.Property + var argsig string + argsOptional := true + if fun.Inputs != nil { + // Filter out the __self__ argument from the inputs. + args = make([]*schema.Property, 0, len(fun.Inputs.InputShape.Properties)) + for _, arg := range fun.Inputs.InputShape.Properties { + if arg.Name == "__self__" { + continue + } + if arg.IsRequired() { + argsOptional = false + } + args = append(args, arg) + } + + if len(args) > 0 { + optFlag := "" + if argsOptional { + optFlag = "?" + } + argsig = fmt.Sprintf("args%s: %s.%sArgs", optFlag, name, title(method.Name)) + } + } + var retty string + if fun.Outputs == nil { + retty = "void" + } else if shouldLiftReturn { + retty = fmt.Sprintf("pulumi.Output<%s>", mod.typeString(fun.Outputs.Properties[0].Type, false, nil)) + } else { + retty = fmt.Sprintf("pulumi.Output<%s.%sResult>", name, title(method.Name)) + } + fmt.Fprintf(w, " %s(%s): %s {\n", methodName, argsig, retty) + if fun.DeprecationMessage != "" { + fmt.Fprintf(w, " pulumi.log.warn(\"%s.%s is deprecated: %s\")\n", name, methodName, + fun.DeprecationMessage) + } + + // Zero initialize the args if empty and necessary. + if len(args) > 0 && argsOptional { + fmt.Fprintf(w, " args = args || {};\n") + } + + // Now simply call the runtime function with the arguments, returning the results. + var ret string + if fun.Outputs != nil { + if shouldLiftReturn { + ret = fmt.Sprintf("const result: pulumi.Output<%s.%sResult> = ", name, title(method.Name)) + } else { + ret = "return " + } + } + fmt.Fprintf(w, " %spulumi.runtime.call(\"%s\", {\n", ret, fun.Token) + if fun.Inputs != nil { + for _, p := range fun.Inputs.InputShape.Properties { + // Pass the argument to the invocation. + if p.Name == "__self__" { + fmt.Fprintf(w, " \"%s\": this,\n", p.Name) + } else { + fmt.Fprintf(w, " \"%[1]s\": args.%[1]s,\n", p.Name) + } + } + } + fmt.Fprintf(w, " }, this);\n") + if shouldLiftReturn { + fmt.Fprintf(w, " return result.%s;\n", camel(fun.Outputs.Properties[0].Name)) + } + fmt.Fprintf(w, " }\n") + } + for _, method := range r.Methods { + genMethod(method) + } + + // Finish the class. fmt.Fprintf(w, "}\n") // Emit the state type for get methods. if r.StateInputs != nil { fmt.Fprintf(w, "\n") - mod.genPlainType(w, stateType, r.StateInputs.Comment, r.StateInputs.Properties, true, true, false, 0) + if err := mod.genPlainType(w, stateType, r.StateInputs.Comment, r.StateInputs.Properties, true, false, 0); err != nil { + return err + } } // Emit the argument type for construction. fmt.Fprintf(w, "\n") argsComment := fmt.Sprintf("The set of arguments for constructing a %s resource.", name) - mod.genPlainType(w, argsType, argsComment, r.InputProperties, true, true, false, 0) - + if err := mod.genPlainType(w, argsType, argsComment, r.InputProperties, true, false, 0); err != nil { + return err + } + + // Emit any method types inside a namespace merged with the class, to represent types nested in the class. + // https://www.typescriptlang.org/docs/handbook/declaration-merging.html#merging-namespaces-with-classes + genMethodTypes := func(w io.Writer, method *schema.Method) error { + fun := method.Function + methodName := title(method.Name) + if fun.Inputs != nil { + args := make([]*schema.Property, 0, len(fun.Inputs.InputShape.Properties)) + for _, arg := range fun.Inputs.InputShape.Properties { + if arg.Name == "__self__" { + continue + } + args = append(args, arg) + } + if len(args) > 0 { + comment := fun.Inputs.Comment + if comment == "" { + comment = fmt.Sprintf("The set of arguments for the %s.%s method.", name, method.Name) + } + if err := mod.genPlainType(w, methodName+"Args", comment, args, true, false, 1); err != nil { + return err + } + fmt.Fprintf(w, "\n") + } + } + if fun.Outputs != nil { + comment := fun.Inputs.Comment + if comment == "" { + comment = fmt.Sprintf("The results of the %s.%s method.", name, method.Name) + } + if err := mod.genPlainType(w, methodName+"Result", comment, fun.Outputs.Properties, false, true, 1); err != nil { + return err + } + fmt.Fprintf(w, "\n") + } + return nil + } + types := &bytes.Buffer{} + for _, method := range r.Methods { + if err := genMethodTypes(types, method); err != nil { + return err + } + } + typesString := types.String() + if typesString != "" { + fmt.Fprintf(w, "\nexport namespace %s {\n", name) + fmt.Fprintf(w, typesString) + fmt.Fprintf(w, "}\n") + } return nil } -func (mod *modContext) genFunction(w io.Writer, fun *schema.Function) { +func (mod *modContext) genFunction(w io.Writer, fun *schema.Function) error { name := tokenToFunctionName(fun.Token) // Write the TypeDoc/JSDoc for the data source function. @@ -737,28 +1073,16 @@ func (mod *modContext) genFunction(w io.Writer, fun *schema.Function) { // Now, emit the function signature. var argsig string - argsOptional := true + argsOptional := functionArgsOptional(fun) if fun.Inputs != nil { - for _, p := range fun.Inputs.Properties { - if p.IsRequired { - argsOptional = false - break - } - } - optFlag := "" if argsOptional { optFlag = "?" } argsig = fmt.Sprintf("args%s: %sArgs, ", optFlag, title(name)) } - var retty string - if fun.Outputs == nil { - retty = "void" - } else { - retty = title(name) + "Result" - } - fmt.Fprintf(w, "export function %s(%sopts?: pulumi.InvokeOptions): Promise<%s> {\n", name, argsig, retty) + fmt.Fprintf(w, "export function %s(%sopts?: pulumi.InvokeOptions): Promise<%s> {\n", + name, argsig, functionReturnType(fun)) if fun.DeprecationMessage != "" && mod.compatibility != kubernetes20 { fmt.Fprintf(w, " pulumi.log.warn(\"%s is deprecated: %s\")\n", name, fun.DeprecationMessage) } @@ -773,16 +1097,23 @@ func (mod *modContext) genFunction(w io.Writer, fun *schema.Function) { fmt.Fprintf(w, " opts = {}\n") fmt.Fprintf(w, " }\n") fmt.Fprintf(w, "\n") - fmt.Fprintf(w, " if (!opts.version) {\n") - fmt.Fprintf(w, " opts.version = utilities.getVersion();\n") - fmt.Fprintf(w, " }\n") + fmt.Fprintf(w, " opts = pulumi.mergeOptions(utilities.resourceOptsDefaults(), opts);\n") // Now simply invoke the runtime function with the arguments, returning the results. fmt.Fprintf(w, " return pulumi.runtime.invoke(\"%s\", {\n", fun.Token) if fun.Inputs != nil { for _, p := range fun.Inputs.Properties { // Pass the argument to the invocation. - fmt.Fprintf(w, " \"%[1]s\": args.%[1]s,\n", p.Name) + body := fmt.Sprintf("args.%s", p.Name) + if name := mod.provideDefaultsFuncName(p.Type, true /*input*/); name != "" { + if codegen.IsNOptionalInput(p.Type) { + body = fmt.Sprintf("pulumi.output(%s).apply(%s)", body, name) + } else { + body = fmt.Sprintf("%s(%s)", name, body) + } + body = fmt.Sprintf("args.%s ? %s : undefined", p.Name, body) + } + fmt.Fprintf(w, " \"%[1]s\": %[2]s,\n", p.Name, body) } } fmt.Fprintf(w, " }, opts);\n") @@ -791,23 +1122,82 @@ func (mod *modContext) genFunction(w io.Writer, fun *schema.Function) { // If there are argument and/or return types, emit them. if fun.Inputs != nil { fmt.Fprintf(w, "\n") - mod.genPlainType(w, title(name)+"Args", fun.Inputs.Comment, fun.Inputs.Properties, true, false, false, 0) + if err := mod.genPlainType(w, title(name)+"Args", fun.Inputs.Comment, fun.Inputs.Properties, true, false, 0); err != nil { + return err + } } if fun.Outputs != nil { fmt.Fprintf(w, "\n") - mod.genPlainType(w, title(name)+"Result", fun.Outputs.Comment, fun.Outputs.Properties, false, false, true, 0) + if err := mod.genPlainType(w, title(name)+"Result", fun.Outputs.Comment, fun.Outputs.Properties, false, true, 0); err != nil { + return err + } } + + return mod.genFunctionOutputVersion(w, fun) } -func visitObjectTypes(properties []*schema.Property, visitor func(*schema.ObjectType, bool)) { - codegen.VisitTypeClosure(properties, func(t codegen.Type) { - if o, ok := t.Type.(*schema.ObjectType); ok { - visitor(o, t.Plain) +func functionArgsOptional(fun *schema.Function) bool { + if fun.Inputs != nil { + for _, p := range fun.Inputs.Properties { + if p.IsRequired() { + return false + } + } + } + return true +} + +func functionReturnType(fun *schema.Function) string { + if fun.Outputs == nil { + return "void" + } + return title(tokenToFunctionName(fun.Token)) + "Result" +} + +// Generates `function ${fn}Output(..)` version lifted to work on +// `Input`-warpped arguments and producing an `Output`-wrapped result. +func (mod *modContext) genFunctionOutputVersion(w io.Writer, fun *schema.Function) error { + if !fun.NeedsOutputVersion() { + return nil + } + + originalName := tokenToFunctionName(fun.Token) + fnOutput := fmt.Sprintf("%sOutput", originalName) + argTypeName := fmt.Sprintf("%sArgs", title(fnOutput)) + + var argsig string + argsOptional := functionArgsOptional(fun) + optFlag := "" + if argsOptional { + optFlag = "?" + } + argsig = fmt.Sprintf("args%s: %s, ", optFlag, argTypeName) + + fmt.Fprintf(w, ` +export function %s(%sopts?: pulumi.InvokeOptions): pulumi.Output<%s> { + return pulumi.output(args).apply(a => %s(a, opts)) +} +`, fnOutput, argsig, functionReturnType(fun), originalName) + fmt.Fprintf(w, "\n") + + return mod.genPlainType(w, + argTypeName, + fun.Inputs.Comment, + fun.Inputs.InputShape.Properties, + true, /* input */ + false, /* readonly */ + 0 /* level */) +} + +func visitObjectTypes(properties []*schema.Property, visitor func(*schema.ObjectType)) { + codegen.VisitTypeClosure(properties, func(t schema.Type) { + if o, ok := t.(*schema.ObjectType); ok { + visitor(o) } }) } -func (mod *modContext) genType(w io.Writer, obj *schema.ObjectType, input bool, level int) { +func (mod *modContext) genType(w io.Writer, obj *schema.ObjectType, input bool, level int) error { properties := obj.Properties info, hasInfo := obj.Language["nodejs"] if hasInfo { @@ -827,33 +1217,43 @@ func (mod *modContext) genType(w io.Writer, obj *schema.ObjectType, input bool, properties = make([]*schema.Property, len(obj.Properties)) for i, p := range obj.Properties { copy := *p + if required.Has(p.Name) { + copy.Type = codegen.RequiredType(©) + } else { + copy.Type = codegen.OptionalType(©) + } properties[i] = © - properties[i].IsRequired = required.Has(p.Name) } } } - name := tokenToName(obj.Token) - if mod.compatibility == tfbridge20 || mod.compatibility == kubernetes20 { - wrapInput := input && !mod.details(obj).plainType - mod.genPlainType(w, name, obj.Comment, properties, input, wrapInput, false, level) - return + name := mod.getObjectName(obj, input) + err := mod.genPlainType(w, name, obj.Comment, properties, input, false, level) + if err != nil { + return err } + return mod.genPlainObjectDefaultFunc(w, name, properties, input, false, level) +} - if input { - if mod.details(obj).plainType { - mod.genPlainType(w, name, obj.Comment, properties, true, false, false, level) - } - if mod.details(obj).argsType { - mod.genPlainType(w, name+"Args", obj.Comment, properties, true, true, false, level) - } - return - } +// getObjectName recovers the name of `obj` as a type. +func (mod *modContext) getObjectName(obj *schema.ObjectType, input bool) string { + name := tokenToName(obj.Token) + + details := mod.details(obj) - mod.genPlainType(w, name, obj.Comment, properties, false, false, false, level) + if obj.IsInputShape() && input && details != nil && details.usedInFunctionOutputVersionInputs { + name += "Args" + } else if obj.IsInputShape() && mod.compatibility != tfbridge20 && mod.compatibility != kubernetes20 { + name += "Args" + } + return name } func (mod *modContext) getTypeImports(t schema.Type, recurse bool, externalImports codegen.StringSet, imports map[string]codegen.StringSet, seen codegen.Set) bool { + return mod.getTypeImportsForResource(t, recurse, externalImports, imports, seen, nil) +} + +func (mod *modContext) getTypeImportsForResource(t schema.Type, recurse bool, externalImports codegen.StringSet, imports map[string]codegen.StringSet, seen codegen.Set, res *schema.Resource) bool { if seen.Has(t) { return false } @@ -879,7 +1279,24 @@ func (mod *modContext) getTypeImports(t schema.Type, recurse bool, externalImpor return false } + var nodePackageInfo NodePackageInfo + if languageInfo, hasLanguageInfo := mod.pkg.Language["nodejs"]; hasLanguageInfo { + nodePackageInfo = languageInfo.(NodePackageInfo) + } + + writeImports := func(pkg string) { + if imp, ok := nodePackageInfo.ProviderNameToModuleName[pkg]; ok { + externalImports.Add(fmt.Sprintf("import * as %s from \"%s\";", externalModuleName(pkg), imp)) + } else { + externalImports.Add(fmt.Sprintf("import * as %s from \"@pulumi/%s\";", externalModuleName(pkg), pkg)) + } + } + switch t := t.(type) { + case *schema.OptionalType: + return mod.getTypeImports(t.ElementType, recurse, externalImports, imports, seen) + case *schema.InputType: + return mod.getTypeImports(t.ElementType, recurse, externalImports, imports, seen) case *schema.ArrayType: return mod.getTypeImports(t.ElementType, recurse, externalImports, imports, seen) case *schema.MapType: @@ -890,7 +1307,7 @@ func (mod *modContext) getTypeImports(t schema.Type, recurse bool, externalImpor // If it's from another package, add an import for the external package. if t.Package != nil && t.Package != mod.pkg { pkg := t.Package.Name - externalImports.Add(fmt.Sprintf("import * as %[1]s from \"@pulumi/%[1]s\";", pkg)) + writeImports(pkg) return false } @@ -902,7 +1319,12 @@ func (mod *modContext) getTypeImports(t schema.Type, recurse bool, externalImpor // If it's from another package, add an import for the external package. if t.Resource != nil && t.Resource.Package != mod.pkg { pkg := t.Resource.Package.Name - externalImports.Add(fmt.Sprintf("import * as %[1]s from \"@pulumi/%[1]s\";", pkg)) + writeImports(pkg) + return false + } + + // Don't import itself. + if t.Resource == res { return false } @@ -921,6 +1343,10 @@ func (mod *modContext) getTypeImports(t schema.Type, recurse bool, externalImpor } func (mod *modContext) getImports(member interface{}, externalImports codegen.StringSet, imports map[string]codegen.StringSet) bool { + return mod.getImportsForResource(member, externalImports, imports, nil) +} + +func (mod *modContext) getImportsForResource(member interface{}, externalImports codegen.StringSet, imports map[string]codegen.StringSet, res *schema.Resource) bool { seen := codegen.Set{} switch member := member.(type) { case *schema.ObjectType: @@ -935,19 +1361,37 @@ func (mod *modContext) getImports(member interface{}, externalImports codegen.St case *schema.Resource: needsTypes := false for _, p := range member.Properties { - needsTypes = mod.getTypeImports(p.Type, false, externalImports, imports, seen) || needsTypes + needsTypes = mod.getTypeImportsForResource(p.Type, false, externalImports, imports, seen, res) || needsTypes } for _, p := range member.InputProperties { - needsTypes = mod.getTypeImports(p.Type, false, externalImports, imports, seen) || needsTypes + needsTypes = mod.getTypeImportsForResource(p.Type, false, externalImports, imports, seen, res) || needsTypes + } + for _, method := range member.Methods { + if method.Function.Inputs != nil { + for _, p := range method.Function.Inputs.Properties { + needsTypes = + mod.getTypeImportsForResource(p.Type, false, externalImports, imports, seen, res) || needsTypes + } + } + if method.Function.Outputs != nil { + for _, p := range method.Function.Outputs.Properties { + needsTypes = + mod.getTypeImportsForResource(p.Type, false, externalImports, imports, seen, res) || needsTypes + } + } } return needsTypes case *schema.Function: needsTypes := false if member.Inputs != nil { - needsTypes = mod.getTypeImports(member.Inputs, false, externalImports, imports, seen) || needsTypes + for _, p := range member.Inputs.Properties { + needsTypes = mod.getTypeImports(p.Type, false, externalImports, imports, seen) || needsTypes + } } if member.Outputs != nil { - needsTypes = mod.getTypeImports(member.Outputs, false, externalImports, imports, seen) || needsTypes + for _, p := range member.Outputs.Properties { + needsTypes = mod.getTypeImports(p.Type, false, externalImports, imports, seen) || needsTypes + } } return needsTypes case []*schema.Property: @@ -1003,16 +1447,18 @@ func (mod *modContext) genHeader(w io.Writer, imports []string, externalImports // configGetter returns the name of the config.get* method used for a configuration variable and the cast necessary // for the result of the call, if any. func (mod *modContext) configGetter(v *schema.Property) (string, string) { - if v.Type == schema.StringType { + typ := codegen.RequiredType(v) + + if typ == schema.StringType { return "get", "" } - if tok, ok := v.Type.(*schema.TokenType); ok && tok.UnderlyingType == schema.StringType { - return "get", fmt.Sprintf("<%s>", mod.typeString(v.Type, false, false, false, false, nil)) + if tok, ok := typ.(*schema.TokenType); ok && tok.UnderlyingType == schema.StringType { + return "get", fmt.Sprintf("<%s>", mod.typeString(typ, false, nil)) } // Only try to parse a JSON object if the config isn't a straight string. - return fmt.Sprintf("getObject<%s>", mod.typeString(v.Type, false, false, false, false, nil)), "" + return fmt.Sprintf("getObject<%s>", mod.typeString(typ, false, nil)), "" } func (mod *modContext) genConfig(w io.Writer, variables []*schema.Property) error { @@ -1021,8 +1467,10 @@ func (mod *modContext) genConfig(w io.Writer, variables []*schema.Property) erro mod.genHeader(w, mod.sdkImports(referencesNestedTypes, true), externalImports, imports) + fmt.Fprintf(w, "declare var exports: any;\n") + // Create a config bag for the variables to pull from. - fmt.Fprintf(w, "let __config = new pulumi.Config(\"%v\");\n", mod.pkg.Name) + fmt.Fprintf(w, "const __config = new pulumi.Config(\"%v\");\n", mod.pkg.Name) fmt.Fprintf(w, "\n") // Emit an entry for all config variables. @@ -1034,18 +1482,24 @@ func (mod *modContext) genConfig(w io.Writer, variables []*schema.Property) erro configFetch := fmt.Sprintf("%s__config.%s(\"%s\")", cast, getfunc, p.Name) // TODO: handle ConstValues https://github.com/pulumi/pulumi/issues/4755 if p.DefaultValue != nil { - v, err := mod.getDefaultValue(p.DefaultValue, p.Type) + v, err := mod.getDefaultValue(p.DefaultValue, codegen.UnwrapType(p.Type)) if err != nil { return err } - // Note: this logic isn't quite correct, but already exists in all of the TF-based providers. - // Specifically, this doesn't work right if the first value is set to false but the default value - // is true. - configFetch += " || " + v + configFetch += " ?? " + v + } + optType := codegen.OptionalType(p) + if p.DefaultValue != nil && p.DefaultValue.Value != nil { + optType = codegen.RequiredType(p) } - fmt.Fprintf(w, "export let %s: %s = %s;\n", - p.Name, mod.typeString(p.Type, false, false, false, true, nil), configFetch) + fmt.Fprintf(w, "export declare const %s: %s;\n", p.Name, mod.typeString(optType, false, nil)) + fmt.Fprintf(w, "Object.defineProperty(exports, %q, {\n", p.Name) + fmt.Fprintf(w, " get() {\n") + fmt.Fprintf(w, " return %s;\n", configFetch) + fmt.Fprintf(w, " },\n") + fmt.Fprintf(w, " enumerable: true,\n") + fmt.Fprintf(w, "});\n\n") } return nil @@ -1077,23 +1531,40 @@ func (mod *modContext) sdkImports(nested, utilities bool) []string { return imports } -func (mod *modContext) genTypes() (string, string) { +func (mod *modContext) genTypes() (string, string, error) { externalImports, imports := codegen.NewStringSet(), map[string]codegen.StringSet{} + var hasDefaultObjects bool for _, t := range mod.types { + if t.IsOverlay { + // This type is generated by the provider, so no further action is required. + continue + } + mod.getImports(t, externalImports, imports) + if codegen.IsProvideDefaultsFuncRequired(t) { + hasDefaultObjects = true + } + } + // Instantiating the default might require an environmental variable. This + // uses utilities. + if hasDefaultObjects { + externalImports.Add(fmt.Sprintf("import * as utilities from \"%s/utilities\";", mod.getRelativePath())) } inputs, outputs := &bytes.Buffer{}, &bytes.Buffer{} - mod.genHeader(inputs, mod.sdkImports(true, false), externalImports, imports) mod.genHeader(outputs, mod.sdkImports(true, false), externalImports, imports) // Build a namespace tree out of the types, then emit them. namespaces := mod.getNamespaces() - mod.genNamespace(inputs, namespaces[""], true, 0) - mod.genNamespace(outputs, namespaces[""], false, 0) + if err := mod.genNamespace(inputs, namespaces[""], true, 0); err != nil { + return "", "", err + } + if err := mod.genNamespace(outputs, namespaces[""], false, 0); err != nil { + return "", "", err + } - return inputs.String(), outputs.String() + return inputs.String(), outputs.String(), nil } type namespace struct { @@ -1130,6 +1601,11 @@ func (mod *modContext) getNamespaces() map[string]*namespace { } for _, t := range mod.types { + if t.IsOverlay { + // This type is generated by the provider, so no further action is required. + continue + } + modName := mod.pkg.TokenToModule(t.Token) if override, ok := mod.modToPkg[modName]; ok { modName = override @@ -1141,7 +1617,7 @@ func (mod *modContext) getNamespaces() map[string]*namespace { return namespaces } -func (mod *modContext) genNamespace(w io.Writer, ns *namespace, input bool, level int) { +func (mod *modContext) genNamespace(w io.Writer, ns *namespace, input bool, level int) error { indent := strings.Repeat(" ", level) sort.Slice(ns.types, func(i, j int) bool { @@ -1152,7 +1628,9 @@ func (mod *modContext) genNamespace(w io.Writer, ns *namespace, input bool, leve }) for i, t := range ns.types { if input && mod.details(t).inputType || !input && mod.details(t).outputType { - mod.genType(w, t, input, level) + if err := mod.genType(w, t, input, level); err != nil { + return err + } if i != len(ns.types)-1 { fmt.Fprintf(w, "\n") } @@ -1164,12 +1642,15 @@ func (mod *modContext) genNamespace(w io.Writer, ns *namespace, input bool, leve }) for i, child := range ns.children { fmt.Fprintf(w, "%sexport namespace %s {\n", indent, child.name) - mod.genNamespace(w, child, input, level+1) + if err := mod.genNamespace(w, child, input, level+1); err != nil { + return err + } fmt.Fprintf(w, "%s}\n", indent) if i != len(ns.children)-1 { fmt.Fprintf(w, "\n") } } + return nil } func (mod *modContext) genEnum(w io.Writer, enum *schema.EnumType) error { @@ -1242,7 +1723,7 @@ func (mod *modContext) gen(fs fs) error { case "": buffer := &bytes.Buffer{} mod.genHeader(buffer, nil, nil, nil) - fmt.Fprintf(buffer, "%s", utilitiesFile) + mod.genUtilitiesFile(buffer) fs.add(path.Join(modDir, "utilities.ts"), buffer.Bytes()) // Ensure that the top-level (provider) module directory contains a README.md file. @@ -1275,8 +1756,13 @@ func (mod *modContext) gen(fs fs) error { // Resources for _, r := range mod.resources { + if r.IsOverlay { + // This resource code is generated by the provider, so no further action is required. + continue + } + externalImports, imports := codegen.NewStringSet(), map[string]codegen.StringSet{} - referencesNestedTypes := mod.getImports(r, externalImports, imports) + referencesNestedTypes := mod.getImportsForResource(r, externalImports, imports, r) buffer := &bytes.Buffer{} mod.genHeader(buffer, mod.sdkImports(referencesNestedTypes, true), externalImports, imports) @@ -1291,13 +1777,20 @@ func (mod *modContext) gen(fs fs) error { // Functions for _, f := range mod.functions { + if f.IsOverlay { + // This function code is generated by the provider, so no further action is required. + continue + } + externalImports, imports := codegen.NewStringSet(), map[string]codegen.StringSet{} referencesNestedTypes := mod.getImports(f, externalImports, imports) buffer := &bytes.Buffer{} mod.genHeader(buffer, mod.sdkImports(referencesNestedTypes, true), externalImports, imports) - mod.genFunction(buffer, f) + if err := mod.genFunction(buffer, f); err != nil { + return err + } fileName := camel(tokenToName(f.Token)) + ".ts" if mod.isReservedSourceFileName(fileName) { @@ -1327,7 +1820,10 @@ func (mod *modContext) gen(fs fs) error { // Nested types if len(mod.types) > 0 { - input, output := mod.genTypes() + input, output, err := mod.genTypes() + if err != nil { + return err + } fs.add(path.Join(modDir, "input.ts"), []byte(input)) fs.add(path.Join(modDir, "output.ts"), []byte(output)) } @@ -1411,7 +1907,12 @@ func (mod *modContext) genIndex(exports []string) string { } fmt.Fprintf(w, "// Export sub-modules:\n") - sorted := children.SortedValues() + directChildren := codegen.NewStringSet() + for _, child := range children.SortedValues() { + directChildren.Add(path.Base(child)) + } + sorted := directChildren.SortedValues() + for _, mod := range sorted { fmt.Fprintf(w, "import * as %[1]s from \"./%[1]s\";\n", mod) } @@ -1441,6 +1942,11 @@ func (mod *modContext) genResourceModule(w io.Writer) { } else { registrations, first := codegen.StringSet{}, true for _, r := range mod.resources { + if r.IsOverlay { + // This resource code is generated by the provider, so no further action is required. + continue + } + if r.IsProvider { contract.Assert(provider == nil) provider = r @@ -1463,6 +1969,11 @@ func (mod *modContext) genResourceModule(w io.Writer) { fmt.Fprintf(w, " switch (type) {\n") for _, r := range mod.resources { + if r.IsOverlay { + // This resource code is generated by the provider, so no further action is required. + continue + } + if r.IsProvider { continue } @@ -1535,7 +2046,12 @@ func (mod *modContext) genEnums(buffer *bytes.Buffer, enums []*schema.EnumType) if len(children) > 0 { fmt.Fprintf(buffer, "// Export sub-modules:\n") - sorted := children.SortedValues() + directChildren := codegen.NewStringSet() + for _, child := range children.SortedValues() { + directChildren.Add(path.Base(child)) + } + sorted := directChildren.SortedValues() + for _, mod := range sorted { fmt.Fprintf(buffer, "import * as %[1]s from \"./%[1]s\";\n", mod) } @@ -1558,12 +2074,15 @@ func (mod *modContext) genEnums(buffer *bytes.Buffer, enums []*schema.EnumType) } // genPackageMetadata generates all the non-code metadata required by a Pulumi package. -func genPackageMetadata(pkg *schema.Package, info NodePackageInfo, files fs) { - // The generator already emitted Pulumi.yaml, so that leaves two more files to write out: +func genPackageMetadata(pkg *schema.Package, info NodePackageInfo, files fs) error { + // The generator already emitted Pulumi.yaml, so that leaves three more files to write out: // 1) package.json: minimal NPM package metadata // 2) tsconfig.json: instructions for TypeScript compilation + // 3) install-pulumi-plugin.js: plugin install script files.add("package.json", []byte(genNPMPackageMetadata(pkg, info))) files.add("tsconfig.json", []byte(genTypeScriptProjectFile(info, files))) + files.add("scripts/install-pulumi-plugin.js", []byte(genInstallScript(pkg.PluginDownloadURL))) + return nil } type npmPackage struct { @@ -1583,6 +2102,8 @@ type npmPackage struct { } type npmPulumiManifest struct { + Name string `json:"name,omitempty"` + Version string `json:"version,omitempty"` Resource bool `json:"resource,omitempty"` PluginDownloadURL string `json:"pluginDownloadURL,omitempty"` } @@ -1596,28 +2117,45 @@ func genNPMPackageMetadata(pkg *schema.Package, info NodePackageInfo) string { devDependencies := map[string]string{} if info.TypeScriptVersion != "" { devDependencies["typescript"] = info.TypeScriptVersion + } else { + devDependencies["typescript"] = "^4.3.5" + } + + version := "${VERSION}" + versionSet := pkg.Version != nil && info.RespectSchemaVersion + if versionSet { + version = pkg.Version.String() + } + + pluginVersion := info.PluginVersion + if versionSet && pluginVersion == "" { + pluginVersion = version + } + + scriptVersion := "${VERSION}" + if pluginVersion != "" { + scriptVersion = pluginVersion } // Create info that will get serialized into an NPM package.json. npminfo := npmPackage{ Name: packageName, - Version: "${VERSION}", + Version: version, Description: info.PackageDescription, Keywords: pkg.Keywords, Homepage: pkg.Homepage, Repository: pkg.Repository, License: pkg.License, - // Ideally, this `scripts` section would include an install script that installs the provider, however, doing - // so causes problems when we try to restore package dependencies, since we must do an install for that. So - // we have another process that adds the install script when generating the package.json that we actually - // publish. Scripts: map[string]string{ - "build": "tsc", + "build": "tsc", + "install": fmt.Sprintf("node scripts/install-pulumi-plugin.js resource %s %s", pkg.Name, scriptVersion), }, DevDependencies: devDependencies, Pulumi: npmPulumiManifest{ Resource: true, PluginDownloadURL: pkg.PluginDownloadURL, + Name: info.PluginName, + Version: pluginVersion, }, } @@ -1661,7 +2199,7 @@ func genNPMPackageMetadata(pkg *schema.Package, info NodePackageInfo) string { // Now write out the serialized form. npmjson, err := json.MarshalIndent(npminfo, "", " ") contract.Assert(err == nil) - return string(npmjson) + return string(npmjson) + "\n" } func genTypeScriptProjectFile(info NodePackageInfo, files fs) string { @@ -1690,6 +2228,8 @@ func genTypeScriptProjectFile(info NodePackageInfo, files fs) string { tsFiles = append(tsFiles, f) } } + + tsFiles = append(tsFiles, info.ExtraTypeScriptFiles...) sort.Strings(tsFiles) for i, file := range tsFiles { @@ -1706,8 +2246,12 @@ func genTypeScriptProjectFile(info NodePackageInfo, files fs) string { } // generateModuleContextMap groups resources, types, and functions into NodeJS packages. -func generateModuleContextMap(tool string, pkg *schema.Package, info NodePackageInfo, - extraFiles map[string][]byte) (map[string]*modContext, NodePackageInfo, error) { +func generateModuleContextMap(tool string, pkg *schema.Package, extraFiles map[string][]byte, +) (map[string]*modContext, NodePackageInfo, error) { + if err := pkg.ImportLanguages(map[string]schema.Language{"nodejs": Importer}); err != nil { + return nil, NodePackageInfo{}, err + } + info, _ := pkg.Language["nodejs"].(NodePackageInfo) // group resources, types, and functions into NodeJS packages modules := map[string]*modContext{} @@ -1720,12 +2264,13 @@ func generateModuleContextMap(tool string, pkg *schema.Package, info NodePackage mod, ok := modules[modName] if !ok { mod = &modContext{ - pkg: pkg, - mod: modName, - tool: tool, - compatibility: info.Compatibility, - modToPkg: info.ModuleToPackage, - disableUnionOutputTypes: info.DisableUnionOutputTypes, + pkg: pkg, + mod: modName, + tool: tool, + compatibility: info.Compatibility, + modToPkg: info.ModuleToPackage, + disableUnionOutputTypes: info.DisableUnionOutputTypes, + liftSingleValueMethodReturns: info.LiftSingleValueMethodReturns, } if modName != "" { @@ -1755,31 +2300,27 @@ func generateModuleContextMap(tool string, pkg *schema.Package, info NodePackage _ = getMod("config") } - visitObjectTypes(pkg.Config, func(t *schema.ObjectType, plain bool) { + visitObjectTypes(pkg.Config, func(t *schema.ObjectType) { types.details(t).outputType = true }) scanResource := func(r *schema.Resource) { + if r.IsOverlay { + // This resource code is generated by the provider, so no further action is required. + return + } + mod := getModFromToken(r.Token) mod.resources = append(mod.resources, r) - visitObjectTypes(r.Properties, func(t *schema.ObjectType, _ bool) { + visitObjectTypes(r.Properties, func(t *schema.ObjectType) { types.details(t).outputType = true }) - visitObjectTypes(r.InputProperties, func(t *schema.ObjectType, plain bool) { - if r.IsProvider { - types.details(t).outputType = true - } + visitObjectTypes(r.InputProperties, func(t *schema.ObjectType) { types.details(t).inputType = true - if plain { - types.details(t).plainType = true - } else { - types.details(t).argsType = true - } }) if r.StateInputs != nil { - visitObjectTypes(r.StateInputs.Properties, func(t *schema.ObjectType, _ bool) { + visitObjectTypes(r.StateInputs.Properties, func(t *schema.ObjectType) { types.details(t).inputType = true - types.details(t).argsType = true }) } } @@ -1793,17 +2334,27 @@ func generateModuleContextMap(tool string, pkg *schema.Package, info NodePackage // from function inputs and outputs, including types that have already been visited. for _, f := range pkg.Functions { mod := getModFromToken(f.Token) - mod.functions = append(mod.functions, f) + if !f.IsMethod { + mod.functions = append(mod.functions, f) + } if f.Inputs != nil { - visitObjectTypes(f.Inputs.Properties, func(t *schema.ObjectType, _ bool) { + visitObjectTypes(f.Inputs.Properties, func(t *schema.ObjectType) { types.details(t).inputType = true - types.details(t).plainType = true }) + + if f.NeedsOutputVersion() { + visitObjectTypes(f.Inputs.InputShape.Properties, func(t *schema.ObjectType) { + for _, mod := range []*modContext{types, getModFromToken(t.Token)} { + det := mod.details(t) + det.inputType = true + det.usedInFunctionOutputVersionInputs = true + } + }) + } } if f.Outputs != nil { - visitObjectTypes(f.Outputs.Properties, func(t *schema.ObjectType, _ bool) { + visitObjectTypes(f.Outputs.Properties, func(t *schema.ObjectType) { types.details(t).outputType = true - types.details(t).plainType = true }) } } @@ -1818,9 +2369,11 @@ func generateModuleContextMap(tool string, pkg *schema.Package, info NodePackage case *schema.ObjectType: types.types = append(types.types, typ) case *schema.EnumType: - info.ContainsEnums = true - mod := getModFromToken(typ.Token) - mod.enums = append(mod.enums, typ) + if !typ.IsOverlay { + info.ContainsEnums = true + mod := getModFromToken(typ.Token) + mod.enums = append(mod.enums, typ) + } default: continue } @@ -1870,18 +2423,18 @@ type LanguageProperty struct { func LanguageResources(pkg *schema.Package) (map[string]LanguageResource, error) { resources := map[string]LanguageResource{} - if err := pkg.ImportLanguages(map[string]schema.Language{"nodejs": Importer}); err != nil { - return nil, err - } - info, _ := pkg.Language["nodejs"].(NodePackageInfo) - - modules, _, err := generateModuleContextMap("", pkg, info, nil) + modules, _, err := generateModuleContextMap("", pkg, nil) if err != nil { return nil, err } for modName, mod := range modules { for _, r := range mod.resources { + if r.IsOverlay { + // This resource code is generated by the provider, so no further action is required. + continue + } + packagePath := strings.Replace(modName, "/", ".", -1) lr := LanguageResource{ Resource: r, @@ -1893,9 +2446,9 @@ func LanguageResources(pkg *schema.Package) (map[string]LanguageResource, error) Name: p.Name, } if p.ConstValue != nil { - lp.ConstValue = mod.typeString(p.Type, false, false, false, false, p.ConstValue) + lp.ConstValue = mod.typeString(p.Type, false, p.ConstValue) } else { - lp.Package = mod.typeString(p.Type, false, false, false, false, nil) + lp.Package = mod.typeString(p.Type, false, nil) } lr.Properties = append(lr.Properties, lp) } @@ -1907,13 +2460,7 @@ func LanguageResources(pkg *schema.Package) (map[string]LanguageResource, error) } func GeneratePackage(tool string, pkg *schema.Package, extraFiles map[string][]byte) (map[string][]byte, error) { - // Decode node-specific info - if err := pkg.ImportLanguages(map[string]schema.Language{"nodejs": Importer}); err != nil { - return nil, err - } - info, _ := pkg.Language["nodejs"].(NodePackageInfo) - - modules, info, err := generateModuleContextMap(tool, pkg, info, extraFiles) + modules, info, err := generateModuleContextMap(tool, pkg, extraFiles) if err != nil { return nil, err } @@ -1930,11 +2477,14 @@ func GeneratePackage(tool string, pkg *schema.Package, extraFiles map[string][]b } // Finally emit the package metadata (NPM, TypeScript, and so on). - genPackageMetadata(pkg, info, files) + if err = genPackageMetadata(pkg, info, files); err != nil { + return nil, err + } return files, nil } -const utilitiesFile = ` +func (mod *modContext) genUtilitiesFile(w io.Writer) { + const body = ` export function getEnv(...vars: string[]): string | undefined { for (const v of vars) { const value = process.env[v]; @@ -1980,4 +2530,51 @@ export function getVersion(): string { } return version; } + +/** @internal */ +export function resourceOptsDefaults(): any { + return { version: getVersion()%s }; +} +` + var pluginDownloadURL string + if url := mod.pkg.PluginDownloadURL; url != "" { + pluginDownloadURL = fmt.Sprintf(", pluginDownloadURL: %q", url) + } + _, err := fmt.Fprintf(w, body, pluginDownloadURL) + contract.AssertNoError(err) +} + +func genInstallScript(pluginDownloadURL string) string { + const installScript = `"use strict"; +var childProcess = require("child_process"); + +var args = process.argv.slice(2); + +if (args.indexOf("${VERSION}") !== -1) { + process.exit(0); +} + +var res = childProcess.spawnSync("pulumi", ["plugin", "install"%s].concat(args), { + stdio: ["ignore", "inherit", "inherit"] +}); + +if (res.error && res.error.code === "ENOENT") { + console.error("\nThere was an error installing the resource provider plugin. " + + "It looks like ` + "`pulumi`" + ` is not installed on your system. " + + "Please visit https://pulumi.com/ to install the Pulumi CLI.\n" + + "You may try manually installing the plugin by running " + + "` + "`" + `pulumi plugin install " + args.join(" ") + "` + "`" + `"); +} else if (res.error || res.status !== 0) { + console.error("\nThere was an error installing the resource provider plugin. " + + "You may try to manually installing the plugin by running " + + "` + "`" + `pulumi plugin install " + args.join(" ") + "` + "`" + `"); +} + +process.exit(0); ` + server := "" + if pluginDownloadURL != "" { + server = fmt.Sprintf(`, "--server", %q`, pluginDownloadURL) + } + return fmt.Sprintf(installScript, server) +} diff --git a/vendor/github.com/pulumi/pulumi/pkg/v3/codegen/nodejs/gen_intrinsics.go b/vendor/github.com/pulumi/pulumi/pkg/v3/codegen/nodejs/gen_intrinsics.go index 500b179..118f5db 100644 --- a/vendor/github.com/pulumi/pulumi/pkg/v3/codegen/nodejs/gen_intrinsics.go +++ b/vendor/github.com/pulumi/pulumi/pkg/v3/codegen/nodejs/gen_intrinsics.go @@ -14,7 +14,9 @@ package nodejs -import "github.com/pulumi/pulumi/pkg/v3/codegen/hcl2/model" +import ( + "github.com/pulumi/pulumi/pkg/v3/codegen/hcl2/model" +) const ( // intrinsicAwait is the name of the await intrinsic. diff --git a/vendor/github.com/pulumi/pulumi/pkg/v3/codegen/nodejs/gen_program.go b/vendor/github.com/pulumi/pulumi/pkg/v3/codegen/nodejs/gen_program.go index ca0cb57..605a8ed 100644 --- a/vendor/github.com/pulumi/pulumi/pkg/v3/codegen/nodejs/gen_program.go +++ b/vendor/github.com/pulumi/pulumi/pkg/v3/codegen/nodejs/gen_program.go @@ -24,10 +24,10 @@ import ( "github.com/hashicorp/hcl/v2" "github.com/pulumi/pulumi/pkg/v3/codegen" - "github.com/pulumi/pulumi/pkg/v3/codegen/hcl2" "github.com/pulumi/pulumi/pkg/v3/codegen/hcl2/model" "github.com/pulumi/pulumi/pkg/v3/codegen/hcl2/model/format" "github.com/pulumi/pulumi/pkg/v3/codegen/hcl2/syntax" + "github.com/pulumi/pulumi/pkg/v3/codegen/pcl" "github.com/pulumi/pulumi/pkg/v3/codegen/schema" "github.com/pulumi/pulumi/sdk/v3/go/common/util/contract" "github.com/zclconf/go-cty/cty" @@ -37,22 +37,25 @@ type generator struct { // The formatter to use when generating code. *format.Formatter - program *hcl2.Program + program *pcl.Program diagnostics hcl.Diagnostics asyncMain bool configCreated bool } -func GenerateProgram(program *hcl2.Program) (map[string][]byte, hcl.Diagnostics, error) { +func GenerateProgram(program *pcl.Program) (map[string][]byte, hcl.Diagnostics, error) { // Linearize the nodes into an order appropriate for procedural code generation. - nodes := hcl2.Linearize(program) + nodes := pcl.Linearize(program) g := &generator{ program: program, } g.Formatter = format.NewFormatter(g) + // Creating a list to store and later print helper methods if they turn out to be needed + preambleHelperMethods := codegen.NewStringSet() + for _, p := range program.Packages() { if err := p.ImportLanguages(map[string]schema.Language{"nodejs": Importer}); err != nil { return nil, nil, err @@ -60,9 +63,9 @@ func GenerateProgram(program *hcl2.Program) (map[string][]byte, hcl.Diagnostics, } var index bytes.Buffer - g.genPreamble(&index, program) + g.genPreamble(&index, program, preambleHelperMethods) for _, n := range nodes { - if r, ok := n.(*hcl2.Resource); ok && requiresAsyncMain(r) { + if r, ok := n.(*pcl.Resource); ok && requiresAsyncMain(r) { g.asyncMain = true break } @@ -82,7 +85,7 @@ func GenerateProgram(program *hcl2.Program) (map[string][]byte, hcl.Diagnostics, if g.asyncMain { var result *model.ObjectConsExpression for _, n := range nodes { - if o, ok := n.(*hcl2.OutputVariable); ok { + if o, ok := n.(*pcl.OutputVariable); ok { if result == nil { result = &model.ObjectConsExpression{} } @@ -150,7 +153,7 @@ func (g *generator) genComment(w io.Writer, comment syntax.Comment) { } } -func (g *generator) genPreamble(w io.Writer, program *hcl2.Program) { +func (g *generator) genPreamble(w io.Writer, program *pcl.Program, preambleHelperMethods codegen.StringSet) { // Print the @pulumi/pulumi import at the top. g.Fprintln(w, `import * as pulumi from "@pulumi/pulumi";`) @@ -158,14 +161,25 @@ func (g *generator) genPreamble(w io.Writer, program *hcl2.Program) { // later on. importSet := codegen.NewStringSet("@pulumi/pulumi") for _, n := range program.Nodes { - if r, isResource := n.(*hcl2.Resource); isResource { + if r, isResource := n.(*pcl.Resource); isResource { pkg, _, _, _ := r.DecomposeToken() - importSet.Add("@pulumi/" + pkg) + pkgName := "@pulumi/" + pkg + if r.Schema != nil && r.Schema.Package != nil { + if info, ok := r.Schema.Package.Language["nodejs"].(NodePackageInfo); ok && info.PackageName != "" { + pkgName = info.PackageName + } + } + importSet.Add(pkgName) } diags := n.VisitExpressions(nil, func(n model.Expression) (model.Expression, hcl.Diagnostics) { if call, ok := n.(*model.FunctionCallExpression); ok { - if i := g.getFunctionImports(call); i != "" { - importSet.Add(i) + if i := g.getFunctionImports(call); len(i) > 0 && i[0] != "" { + for _, importPackage := range i { + importSet.Add(importPackage) + } + } + if helperMethodBody, ok := getHelperMethodIfNeeded(call.Name); ok { + preambleHelperMethods.Add(helperMethodBody) } } return n, nil @@ -179,7 +193,7 @@ func (g *generator) genPreamble(w io.Writer, program *hcl2.Program) { continue } as := makeValidIdentifier(path.Base(pkg)) - if as != pkg { + if as != pkg || pkg == "crypto" { imports = append(imports, fmt.Sprintf("import * as %v from \"%v\";", as, pkg)) } else { imports = append(imports, fmt.Sprintf("import * from \"%v\";", pkg)) @@ -192,22 +206,27 @@ func (g *generator) genPreamble(w io.Writer, program *hcl2.Program) { g.Fprintln(w, i) } g.Fprint(w, "\n") + + // If we collected any helper methods that should be added, write them just before the main func + for _, preambleHelperMethodBody := range preambleHelperMethods.SortedValues() { + g.Fprintf(w, "%s\n\n", preambleHelperMethodBody) + } } -func (g *generator) genNode(w io.Writer, n hcl2.Node) { +func (g *generator) genNode(w io.Writer, n pcl.Node) { switch n := n.(type) { - case *hcl2.Resource: + case *pcl.Resource: g.genResource(w, n) - case *hcl2.ConfigVariable: + case *pcl.ConfigVariable: g.genConfigVariable(w, n) - case *hcl2.LocalVariable: + case *pcl.LocalVariable: g.genLocalVariable(w, n) - case *hcl2.OutputVariable: + case *pcl.OutputVariable: g.genOutputVariable(w, n) } } -func requiresAsyncMain(r *hcl2.Resource) bool { +func requiresAsyncMain(r *pcl.Resource) bool { if r.Options == nil || r.Options.Range == nil { return false } @@ -216,7 +235,7 @@ func requiresAsyncMain(r *hcl2.Resource) bool { } // resourceTypeName computes the NodeJS package, module, and type name for the given resource. -func resourceTypeName(r *hcl2.Resource) (string, string, string, hcl.Diagnostics) { +func resourceTypeName(r *pcl.Resource) (string, string, string, hcl.Diagnostics) { // Compute the resource type from the Pulumi type token. pkg, module, member, diagnostics := r.DecomposeToken() if pkg == "pulumi" && module == "providers" { @@ -234,7 +253,8 @@ func resourceTypeName(r *hcl2.Resource) (string, string, string, hcl.Diagnostics } } - return makeValidIdentifier(pkg), strings.Replace(module, "/", ".", -1), title(member), diagnostics + module = strings.ToLower(strings.Replace(module, "/", ".", -1)) + return makeValidIdentifier(pkg), module, title(member), diagnostics } // makeResourceName returns the expression that should be emitted for a resource's "name" parameter given its base name @@ -246,7 +266,7 @@ func (g *generator) makeResourceName(baseName, count string) string { return fmt.Sprintf("`%s-${%s}`", baseName, count) } -func (g *generator) genResourceOptions(opts *hcl2.ResourceOptions) string { +func (g *generator) genResourceOptions(opts *pcl.ResourceOptions) string { if opts == nil { return "" } @@ -292,7 +312,7 @@ func (g *generator) genResourceOptions(opts *hcl2.ResourceOptions) string { } // genResource handles the generation of instantiations of non-builtin resources. -func (g *generator) genResource(w io.Writer, r *hcl2.Resource) { +func (g *generator) genResource(w io.Writer, r *pcl.Resource) { pkg, module, memberName, diagnostics := resourceTypeName(r) g.diagnostics = append(g.diagnostics, diagnostics...) @@ -384,7 +404,7 @@ func (g *generator) genResource(w io.Writer, r *hcl2.Resource) { g.genTrivia(w, r.Definition.Tokens.GetCloseBrace()) } -func (g *generator) genConfigVariable(w io.Writer, v *hcl2.ConfigVariable) { +func (g *generator) genConfigVariable(w io.Writer, v *pcl.ConfigVariable) { // TODO(pdg): trivia if !g.configCreated { @@ -414,12 +434,12 @@ func (g *generator) genConfigVariable(w io.Writer, v *hcl2.ConfigVariable) { g.Fgenf(w, ";\n") } -func (g *generator) genLocalVariable(w io.Writer, v *hcl2.LocalVariable) { +func (g *generator) genLocalVariable(w io.Writer, v *pcl.LocalVariable) { // TODO(pdg): trivia g.Fgenf(w, "%sconst %s = %.3v;\n", g.Indent, v.Name(), g.lowerExpression(v.Definition.Value)) } -func (g *generator) genOutputVariable(w io.Writer, v *hcl2.OutputVariable) { +func (g *generator) genOutputVariable(w io.Writer, v *pcl.OutputVariable) { // TODO(pdg): trivia export := "export " if g.asyncMain { diff --git a/vendor/github.com/pulumi/pulumi/pkg/v3/codegen/nodejs/gen_program_expressions.go b/vendor/github.com/pulumi/pulumi/pkg/v3/codegen/nodejs/gen_program_expressions.go index 49ea6f9..ef1e3e0 100644 --- a/vendor/github.com/pulumi/pulumi/pkg/v3/codegen/nodejs/gen_program_expressions.go +++ b/vendor/github.com/pulumi/pulumi/pkg/v3/codegen/nodejs/gen_program_expressions.go @@ -9,8 +9,8 @@ import ( "github.com/hashicorp/hcl/v2" "github.com/hashicorp/hcl/v2/hclsyntax" - "github.com/pulumi/pulumi/pkg/v3/codegen/hcl2" "github.com/pulumi/pulumi/pkg/v3/codegen/hcl2/model" + "github.com/pulumi/pulumi/pkg/v3/codegen/pcl" "github.com/pulumi/pulumi/sdk/v3/go/common/util/contract" "github.com/zclconf/go-cty/cty" "github.com/zclconf/go-cty/cty/convert" @@ -27,8 +27,8 @@ func (g *generator) lowerExpression(expr model.Expression) model.Expression { if g.asyncMain { expr = g.awaitInvokes(expr) } - expr = hcl2.RewritePropertyReferences(expr) - expr, _ = hcl2.RewriteApplies(expr, nameInfo(0), !g.asyncMain) + expr = pcl.RewritePropertyReferences(expr) + expr, _ = pcl.RewriteApplies(expr, nameInfo(0), !g.asyncMain) expr, _ = g.lowerProxyApplies(expr) return expr } @@ -176,7 +176,7 @@ func (g *generator) GenForExpression(w io.Writer, expr *model.ForExpression) { func (g *generator) genApply(w io.Writer, expr *model.FunctionCallExpression) { // Extract the list of outputs and the continuation expression from the `__apply` arguments. - applyArgs, then := hcl2.ParseApplyCall(expr) + applyArgs, then := pcl.ParseApplyCall(expr) // If all of the arguments are promises, use promise methods. If any argument is an output, convert all other args // to outputs and use output methods. @@ -214,7 +214,7 @@ func functionName(tokenArg model.Expression) (string, string, string, hcl.Diagno tokenRange := tokenArg.SyntaxNode().Range() // Compute the resource type from the Pulumi type token. - pkg, module, member, diagnostics := hcl2.DecomposeToken(token, tokenRange) + pkg, module, member, diagnostics := pcl.DecomposeToken(token, tokenRange) return pkg, strings.Replace(module, "/", ".", -1), member, diagnostics } @@ -266,34 +266,37 @@ func (g *generator) genRange(w io.Writer, call *model.FunctionCallExpression, en genSuffix() } -var functionImports = map[string]string{ - intrinsicInterpolate: "@pulumi/pulumi", - "fileArchive": "@pulumi/pulumi", - "fileAsset": "@pulumi/pulumi", - "readFile": "fs", - "readDir": "fs", +var functionImports = map[string][]string{ + intrinsicInterpolate: {"@pulumi/pulumi"}, + "fileArchive": {"@pulumi/pulumi"}, + "fileAsset": {"@pulumi/pulumi"}, + "filebase64": {"fs"}, + "filebase64sha256": {"fs", "crypto"}, + "readFile": {"fs"}, + "readDir": {"fs"}, + "sha1": {"crypto"}, } -func (g *generator) getFunctionImports(x *model.FunctionCallExpression) string { - if x.Name != hcl2.Invoke { +func (g *generator) getFunctionImports(x *model.FunctionCallExpression) []string { + if x.Name != pcl.Invoke { return functionImports[x.Name] } pkg, _, _, diags := functionName(x.Args[0]) contract.Assert(len(diags) == 0) - return "@pulumi/" + pkg + return []string{"@pulumi/" + pkg} } func (g *generator) GenFunctionCallExpression(w io.Writer, expr *model.FunctionCallExpression) { switch expr.Name { - case hcl2.IntrinsicApply: + case pcl.IntrinsicApply: g.genApply(w, expr) case intrinsicAwait: g.Fgenf(w, "await %.17v", expr.Args[0]) case intrinsicInterpolate: g.Fgen(w, "pulumi.interpolate`") for _, part := range expr.Args { - if lit, ok := part.(*model.LiteralValueExpression); ok && lit.Type() == model.StringType { + if lit, ok := part.(*model.LiteralValueExpression); ok && model.StringType.AssignableFrom(lit.Type()) { g.Fgen(w, lit.Value.AsString()) } else { g.Fgenf(w, "${%.v}", part) @@ -318,22 +321,32 @@ func (g *generator) GenFunctionCallExpression(w io.Writer, expr *model.FunctionC g.Fgenf(w, "new pulumi.asset.FileArchive(%.v)", expr.Args[0]) case "fileAsset": g.Fgenf(w, "new pulumi.asset.FileAsset(%.v)", expr.Args[0]) - case hcl2.Invoke: + case "filebase64": + g.Fgenf(w, "Buffer.from(fs.readFileSync(%v), 'binary').toString('base64')", expr.Args[0]) + case "filebase64sha256": + // Assuming the existence of the following helper method + g.Fgenf(w, "computeFilebase64sha256(%v)", expr.Args[0]) + case pcl.Invoke: pkg, module, fn, diags := functionName(expr.Args[0]) contract.Assert(len(diags) == 0) if module != "" { module = "." + module } + isOut := pcl.IsOutputVersionInvokeCall(expr) name := fmt.Sprintf("%s%s.%s", makeValidIdentifier(pkg), module, fn) - - optionsBag := "" + if isOut { + name = fmt.Sprintf("%sOutput", name) + } + g.Fprintf(w, "%s(", name) + if len(expr.Args) >= 2 { + g.Fgenf(w, "%.v", expr.Args[1]) + } if len(expr.Args) == 3 { - var buf bytes.Buffer - g.Fgenf(&buf, ", %.v", expr.Args[2]) - optionsBag = buf.String() + g.Fgenf(w, ", %.v", expr.Args[2]) } - - g.Fgenf(w, "%s(%.v%v)", name, expr.Args[1], optionsBag) + g.Fprint(w, ")") + case "join": + g.Fgenf(w, "%.20v.join(%v)", expr.Args[1], expr.Args[0]) case "length": g.Fgenf(w, "%.20v.length", expr.Args[0]) case "lookup": @@ -351,8 +364,13 @@ func (g *generator) GenFunctionCallExpression(w io.Writer, expr *model.FunctionC g.Fgenf(w, "pulumi.secret(%v)", expr.Args[0]) case "split": g.Fgenf(w, "%.20v.split(%v)", expr.Args[1], expr.Args[0]) + case "toBase64": + g.Fgenf(w, "Buffer.from(%v).toString(\"base64\")", expr.Args[0]) case "toJSON": g.Fgenf(w, "JSON.stringify(%v)", expr.Args[0]) + case "sha1": + g.Fgenf(w, "crypto.createHash('sha1').update(%v).digest('hex')", expr.Args[0]) + default: var rng hcl.Range if expr.Syntax != nil { @@ -408,7 +426,12 @@ func (g *generator) genStringLiteral(w io.Writer, v string) { } func (g *generator) GenLiteralValueExpression(w io.Writer, expr *model.LiteralValueExpression) { - switch expr.Type() { + typ := expr.Type() + if cns, ok := typ.(*model.ConstType); ok { + typ = cns.Type + } + + switch typ { case model.BoolType: g.Fgenf(w, "%v", expr.Value.True()) case model.NoneType: @@ -433,7 +456,7 @@ func (g *generator) literalKey(x model.Expression) (string, bool) { strKey := "" switch x := x.(type) { case *model.LiteralValueExpression: - if x.Type() == model.StringType { + if model.StringType.AssignableFrom(x.Type()) { strKey = x.Value.AsString() break } @@ -442,7 +465,7 @@ func (g *generator) literalKey(x model.Expression) (string, bool) { return buf.String(), true case *model.TemplateExpression: if len(x.Parts) == 1 { - if lit, ok := x.Parts[0].(*model.LiteralValueExpression); ok && lit.Type() == model.StringType { + if lit, ok := x.Parts[0].(*model.LiteralValueExpression); ok && model.StringType.AssignableFrom(lit.Type()) { strKey = lit.Value.AsString() break } @@ -535,7 +558,7 @@ func (g *generator) GenSplatExpression(w io.Writer, expr *model.SplatExpression) func (g *generator) GenTemplateExpression(w io.Writer, expr *model.TemplateExpression) { if len(expr.Parts) == 1 { - if lit, ok := expr.Parts[0].(*model.LiteralValueExpression); ok && lit.Type() == model.StringType { + if lit, ok := expr.Parts[0].(*model.LiteralValueExpression); ok && model.StringType.AssignableFrom(lit.Type()) { g.GenLiteralValueExpression(w, lit) return } @@ -543,7 +566,7 @@ func (g *generator) GenTemplateExpression(w io.Writer, expr *model.TemplateExpre g.Fgen(w, "`") for _, expr := range expr.Parts { - if lit, ok := expr.(*model.LiteralValueExpression); ok && lit.Type() == model.StringType { + if lit, ok := expr.(*model.LiteralValueExpression); ok && model.StringType.AssignableFrom(lit.Type()) { g.Fgen(w, lit.Value.AsString()) } else { g.Fgenf(w, "${%.v}", expr) diff --git a/vendor/github.com/pulumi/pulumi/pkg/v3/codegen/nodejs/gen_program_lower.go b/vendor/github.com/pulumi/pulumi/pkg/v3/codegen/nodejs/gen_program_lower.go index d9017a6..17c37b3 100644 --- a/vendor/github.com/pulumi/pulumi/pkg/v3/codegen/nodejs/gen_program_lower.go +++ b/vendor/github.com/pulumi/pulumi/pkg/v3/codegen/nodejs/gen_program_lower.go @@ -3,8 +3,8 @@ package nodejs import ( "github.com/hashicorp/hcl/v2" "github.com/pulumi/pulumi/pkg/v3/codegen" - "github.com/pulumi/pulumi/pkg/v3/codegen/hcl2" "github.com/pulumi/pulumi/pkg/v3/codegen/hcl2/model" + "github.com/pulumi/pulumi/pkg/v3/codegen/pcl" "github.com/pulumi/pulumi/sdk/v3/go/common/util/contract" ) @@ -192,12 +192,12 @@ func (g *generator) lowerProxyApplies(expr model.Expression) (model.Expression, rewriter := func(expr model.Expression) (model.Expression, hcl.Diagnostics) { // Ignore the node if it is not a call to the apply intrinsic. apply, ok := expr.(*model.FunctionCallExpression) - if !ok || apply.Name != hcl2.IntrinsicApply { + if !ok || apply.Name != pcl.IntrinsicApply { return expr, nil } // Parse the apply call. - args, then := hcl2.ParseApplyCall(apply) + args, then := pcl.ParseApplyCall(apply) parameters := codegen.Set{} for _, p := range then.Parameters { @@ -231,7 +231,7 @@ func (g *generator) awaitInvokes(x model.Expression) model.Expression { rewriter := func(x model.Expression) (model.Expression, hcl.Diagnostics) { // Ignore the node if it is not a call to invoke. call, ok := x.(*model.FunctionCallExpression) - if !ok || call.Name != hcl2.Invoke { + if !ok || call.Name != pcl.Invoke { return x, nil } diff --git a/vendor/github.com/pulumi/pulumi/pkg/v3/codegen/nodejs/gen_program_utils.go b/vendor/github.com/pulumi/pulumi/pkg/v3/codegen/nodejs/gen_program_utils.go new file mode 100644 index 0000000..fdf64f6 --- /dev/null +++ b/vendor/github.com/pulumi/pulumi/pkg/v3/codegen/nodejs/gen_program_utils.go @@ -0,0 +1,17 @@ +package nodejs + +// Provides code for a method which will be placed in the program preamble if deemed +// necessary. Because many tasks in Go such as reading a file require extensive error +// handling, it is much prettier to encapsulate that error handling boilerplate as its +// own function in the preamble. +func getHelperMethodIfNeeded(functionName string) (string, bool) { + switch functionName { + case "filebase64sha256": + return `func computeFilebase64sha256(path string) string { + const fileData = Buffer.from(fs.readFileSync(path), 'binary') + return crypto.createHash('sha256').update(fileData).digest('hex') +}`, true + default: + return "", false + } +} diff --git a/vendor/github.com/pulumi/pulumi/pkg/v3/codegen/nodejs/importer.go b/vendor/github.com/pulumi/pulumi/pkg/v3/codegen/nodejs/importer.go index f0bf83b..1ad06f4 100644 --- a/vendor/github.com/pulumi/pulumi/pkg/v3/codegen/nodejs/importer.go +++ b/vendor/github.com/pulumi/pulumi/pkg/v3/codegen/nodejs/importer.go @@ -52,6 +52,23 @@ type NodePackageInfo struct { DisableUnionOutputTypes bool `json:"disableUnionOutputTypes,omitempty"` // An indicator for whether the package contains enums. ContainsEnums bool `json:"containsEnums,omitempty"` + // A map allowing you to map the name of a provider to the name of the module encapsulating the provider. + ProviderNameToModuleName map[string]string `json:"providerNameToModuleName,omitempty"` + // The name of the plugin, which might be different from the package name. + PluginName string `json:"pluginName,omitempty"` + // The version of the plugin, which might be different from the version of the package.. + PluginVersion string `json:"pluginVersion,omitempty"` + // Additional files to include in TypeScript compilation. + // These paths are added to the `files` section of the + // generated `tsconfig.json`. A typical use case for this is + // compiling hand-authored unit test files that check the + // generated code. + ExtraTypeScriptFiles []string `json:"extraTypeScriptFiles,omitempty"` + // Determines whether to make single-return-value methods return an output object or the single value. + LiftSingleValueMethodReturns bool `json:"liftSingleValueMethodReturns,omitempty"` + + // Respect the Pkg.Version field in the schema + RespectSchemaVersion bool `json:"respectSchemaVersion,omitempty"` } // NodeObjectInfo contains NodeJS-specific information for an object. diff --git a/vendor/github.com/pulumi/pulumi/pkg/v3/codegen/nodejs/utilities.go b/vendor/github.com/pulumi/pulumi/pkg/v3/codegen/nodejs/utilities.go index 4cc3519..b9bad27 100644 --- a/vendor/github.com/pulumi/pulumi/pkg/v3/codegen/nodejs/utilities.go +++ b/vendor/github.com/pulumi/pulumi/pkg/v3/codegen/nodejs/utilities.go @@ -15,12 +15,12 @@ package nodejs import ( + "fmt" "io" "regexp" "strings" "unicode" - "github.com/pkg/errors" "github.com/pulumi/pulumi/pkg/v3/codegen" ) @@ -109,7 +109,7 @@ func makeSafeEnumName(name, typeName string) (string, error) { // If the name is one illegal character, return an error. if len(safeName) == 1 && !isLegalIdentifierStart(rune(safeName[0])) { - return "", errors.Errorf("enum name %s is not a valid identifier", safeName) + return "", fmt.Errorf("enum name %s is not a valid identifier", safeName) } // Capitalize and make a valid identifier. diff --git a/vendor/github.com/pulumi/pulumi/pkg/v3/codegen/hcl2/binder.go b/vendor/github.com/pulumi/pulumi/pkg/v3/codegen/pcl/binder.go similarity index 90% rename from vendor/github.com/pulumi/pulumi/pkg/v3/codegen/hcl2/binder.go rename to vendor/github.com/pulumi/pulumi/pkg/v3/codegen/pcl/binder.go index 0540d7c..1f173e1 100644 --- a/vendor/github.com/pulumi/pulumi/pkg/v3/codegen/hcl2/binder.go +++ b/vendor/github.com/pulumi/pulumi/pkg/v3/codegen/pcl/binder.go @@ -12,9 +12,10 @@ // See the License for the specific language governing permissions and // limitations under the License. -package hcl2 +package pcl import ( + "fmt" "os" "sort" @@ -29,9 +30,11 @@ import ( ) type bindOptions struct { - allowMissingVariables bool - loader schema.Loader - packageCache *PackageCache + allowMissingVariables bool + allowMissingProperties bool + skipResourceTypecheck bool + loader schema.Loader + packageCache *PackageCache } func (opts bindOptions) modelOptions() []model.BindOption { @@ -45,7 +48,7 @@ type binder struct { options bindOptions referencedPackages map[string]*schema.Package - typeSchemas map[model.Type]schema.Type + schemaTypes map[schema.Type]model.Type tokens syntax.TokenMap nodes []Node @@ -58,6 +61,14 @@ func AllowMissingVariables(options *bindOptions) { options.allowMissingVariables = true } +func AllowMissingProperties(options *bindOptions) { + options.allowMissingProperties = true +} + +func SkipResourceTypechecking(options *bindOptions) { + options.skipResourceTypecheck = true +} + func PluginHost(host plugin.Host) BindOption { return Loader(schema.NewPluginLoader(host)) } @@ -82,6 +93,9 @@ func BindProgram(files []*syntax.File, opts ...BindOption) (*Program, hcl.Diagno o(&options) } + // TODO: remove this once the latest pulumi-terraform-bridge has been rolled out + options.skipResourceTypecheck = true + if options.loader == nil { cwd, err := os.Getwd() if err != nil { @@ -104,7 +118,7 @@ func BindProgram(files []*syntax.File, opts ...BindOption) (*Program, hcl.Diagno options: options, tokens: syntax.NewTokenMapForFiles(files), referencedPackages: map[string]*schema.Package{}, - typeSchemas: map[model.Type]schema.Type{}, + schemaTypes: map[schema.Type]model.Type{}, root: model.NewRootScope(syntax.None), } @@ -174,6 +188,9 @@ func (b *binder) declareNodes(file *syntax.File) (hcl.Diagnostics, error) { typeExpr, diags := model.BindExpressionText(item.Labels[1], model.TypeScope, item.LabelRanges[1].Start) diagnostics = append(diagnostics, diags...) + if typeExpr == nil { + return diagnostics, fmt.Errorf("cannot bind expression: %v", diagnostics.Error()) + } typ = typeExpr.Type() default: diagnostics = append(diagnostics, labelsErrorf(item, "config variables must have exactly one or two labels")) diff --git a/vendor/github.com/pulumi/pulumi/pkg/v3/codegen/hcl2/binder_nodes.go b/vendor/github.com/pulumi/pulumi/pkg/v3/codegen/pcl/binder_nodes.go similarity index 99% rename from vendor/github.com/pulumi/pulumi/pkg/v3/codegen/hcl2/binder_nodes.go rename to vendor/github.com/pulumi/pulumi/pkg/v3/codegen/pcl/binder_nodes.go index 969afcd..3c3fb62 100644 --- a/vendor/github.com/pulumi/pulumi/pkg/v3/codegen/hcl2/binder_nodes.go +++ b/vendor/github.com/pulumi/pulumi/pkg/v3/codegen/pcl/binder_nodes.go @@ -12,7 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -package hcl2 +package pcl import ( "github.com/hashicorp/hcl/v2" diff --git a/vendor/github.com/pulumi/pulumi/pkg/v3/codegen/hcl2/binder_resource.go b/vendor/github.com/pulumi/pulumi/pkg/v3/codegen/pcl/binder_resource.go similarity index 97% rename from vendor/github.com/pulumi/pulumi/pkg/v3/codegen/hcl2/binder_resource.go rename to vendor/github.com/pulumi/pulumi/pkg/v3/codegen/pcl/binder_resource.go index 62ec0bd..d8be554 100644 --- a/vendor/github.com/pulumi/pulumi/pkg/v3/codegen/hcl2/binder_resource.go +++ b/vendor/github.com/pulumi/pulumi/pkg/v3/codegen/pcl/binder_resource.go @@ -13,7 +13,7 @@ // limitations under the License. //nolint: goconst -package hcl2 +package pcl import ( "github.com/hashicorp/hcl/v2" @@ -84,7 +84,7 @@ func (b *binder) bindResourceTypes(node *Resource) hcl.Diagnostics { node.Token = token // Create input and output types for the schema. - inputType := model.InputType(b.schemaTypeToType(&schema.ObjectType{Properties: inputProperties})) + inputType := b.schemaTypeToType(&schema.ObjectType{Properties: inputProperties}) outputProperties := map[string]model.Type{ "id": model.NewOutputType(model.StringType), @@ -264,7 +264,7 @@ func (b *binder) bindResourceBody(node *Resource) hcl.Diagnostics { } // Typecheck the attributes. - if objectType, ok := node.InputType.(*model.ObjectType); ok { + if objectType, ok := node.InputType.(*model.ObjectType); ok && !b.options.skipResourceTypecheck { attrNames := codegen.StringSet{} for _, attr := range node.Inputs { attrNames.Add(attr.Name) @@ -281,7 +281,7 @@ func (b *binder) bindResourceBody(node *Resource) hcl.Diagnostics { for _, k := range codegen.SortedKeys(objectType.Properties) { if !model.IsOptionalType(objectType.Properties[k]) && !attrNames.Has(k) { diagnostics = append(diagnostics, - missingRequiredAttribute(k, node.Definition.Body.Syntax.MissingItemRange())) + missingRequiredAttribute(k, block.Body.Syntax.MissingItemRange())) } } } diff --git a/vendor/github.com/pulumi/pulumi/pkg/v3/codegen/hcl2/binder_schema.go b/vendor/github.com/pulumi/pulumi/pkg/v3/codegen/pcl/binder_schema.go similarity index 84% rename from vendor/github.com/pulumi/pulumi/pkg/v3/codegen/hcl2/binder_schema.go rename to vendor/github.com/pulumi/pulumi/pkg/v3/codegen/pcl/binder_schema.go index 23a9dc6..e87098d 100644 --- a/vendor/github.com/pulumi/pulumi/pkg/v3/codegen/hcl2/binder_schema.go +++ b/vendor/github.com/pulumi/pulumi/pkg/v3/codegen/pcl/binder_schema.go @@ -12,7 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -package hcl2 +package pcl import ( "fmt" @@ -25,6 +25,7 @@ import ( "github.com/pulumi/pulumi/pkg/v3/codegen/hcl2/model" "github.com/pulumi/pulumi/pkg/v3/codegen/schema" "github.com/pulumi/pulumi/sdk/v3/go/common/util/contract" + "github.com/zclconf/go-cty/cty" ) type packageSchema struct { @@ -145,34 +146,42 @@ func (b *binder) loadReferencedPackageSchemas(n Node) error { // schemaTypeToType converts a schema.Type to a model Type. func (b *binder) schemaTypeToType(src schema.Type) (result model.Type) { - return b.schemaTypeToTypeImpl(src, map[schema.Type]model.Type{}) -} - -func (b *binder) schemaTypeToTypeImpl(src schema.Type, seen map[schema.Type]model.Type) (result model.Type) { - defer func() { - b.typeSchemas[result] = src - }() - - if already, ok := seen[src]; ok { - return already - } - switch src := src.(type) { case *schema.ArrayType: - return model.NewListType(b.schemaTypeToTypeImpl(src.ElementType, seen)) + return model.NewListType(b.schemaTypeToType(src.ElementType)) case *schema.MapType: - return model.NewMapType(b.schemaTypeToTypeImpl(src.ElementType, seen)) + return model.NewMapType(b.schemaTypeToType(src.ElementType)) + case *schema.EnumType: + // TODO(codegen): make this a union of constant types. + return b.schemaTypeToType(src.ElementType) case *schema.ObjectType: + if t, ok := b.schemaTypes[src]; ok { + return t + } + properties := map[string]model.Type{} objType := model.NewObjectType(properties, src) - seen[src] = objType + b.schemaTypes[src] = objType for _, prop := range src.Properties { - t := b.schemaTypeToTypeImpl(prop.Type, seen) - if !prop.IsRequired { - t = model.NewOptionalType(t) + typ := prop.Type + if b.options.allowMissingProperties { + typ = &schema.OptionalType{ElementType: typ} } + + t := b.schemaTypeToType(typ) if prop.ConstValue != nil { - t = model.NewConstType(t, prop.ConstValue) + var value cty.Value + switch v := prop.ConstValue.(type) { + case bool: + value = cty.BoolVal(v) + case float64: + value = cty.NumberFloatVal(v) + case string: + value = cty.StringVal(v) + default: + contract.Failf("unexpected constant type %T", v) + } + t = model.NewConstType(t, value) } properties[prop.Name] = t } @@ -186,14 +195,21 @@ func (b *binder) schemaTypeToTypeImpl(src schema.Type, seen map[schema.Type]mode } if src.UnderlyingType != nil { - underlyingType := b.schemaTypeToTypeImpl(src.UnderlyingType, seen) + underlyingType := b.schemaTypeToType(src.UnderlyingType) return model.NewUnionType(t, underlyingType) } return t + case *schema.InputType: + elementType := b.schemaTypeToType(src.ElementType) + resolvedElementType := b.schemaTypeToType(codegen.ResolvedType(src.ElementType)) + return model.NewUnionTypeAnnotated([]model.Type{elementType, model.NewOutputType(resolvedElementType)}, src) + case *schema.OptionalType: + elementType := b.schemaTypeToType(src.ElementType) + return model.NewOptionalType(elementType) case *schema.UnionType: types := make([]model.Type, len(src.ElementTypes)) for i, src := range src.ElementTypes { - types[i] = b.schemaTypeToTypeImpl(src, seen) + types[i] = b.schemaTypeToType(src) } if src.Discriminator != "" { return model.NewUnionTypeAnnotated(types, src) @@ -256,8 +272,11 @@ func GetSchemaForType(t model.Type) (schema.Type, bool) { return GetSchemaForType(t.ElementType) case *model.UnionType: for _, a := range t.Annotations { - if t, ok := a.(*schema.UnionType); ok { - return t, true + switch a := a.(type) { + case *schema.UnionType: + return a, true + case *schema.InputType: + return a, true } } schemas := codegen.Set{} diff --git a/vendor/github.com/pulumi/pulumi/pkg/v3/codegen/hcl2/component.go b/vendor/github.com/pulumi/pulumi/pkg/v3/codegen/pcl/component.go similarity index 98% rename from vendor/github.com/pulumi/pulumi/pkg/v3/codegen/hcl2/component.go rename to vendor/github.com/pulumi/pulumi/pkg/v3/codegen/pcl/component.go index f1650b3..2d25767 100644 --- a/vendor/github.com/pulumi/pulumi/pkg/v3/codegen/hcl2/component.go +++ b/vendor/github.com/pulumi/pulumi/pkg/v3/codegen/pcl/component.go @@ -12,7 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -package hcl2 +package pcl import ( "github.com/hashicorp/hcl/v2/hclsyntax" diff --git a/vendor/github.com/pulumi/pulumi/pkg/v3/codegen/hcl2/config.go b/vendor/github.com/pulumi/pulumi/pkg/v3/codegen/pcl/config.go similarity index 99% rename from vendor/github.com/pulumi/pulumi/pkg/v3/codegen/hcl2/config.go rename to vendor/github.com/pulumi/pulumi/pkg/v3/codegen/pcl/config.go index bc09652..2cb8964 100644 --- a/vendor/github.com/pulumi/pulumi/pkg/v3/codegen/hcl2/config.go +++ b/vendor/github.com/pulumi/pulumi/pkg/v3/codegen/pcl/config.go @@ -12,7 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -package hcl2 +package pcl import ( "github.com/hashicorp/hcl/v2" diff --git a/vendor/github.com/pulumi/pulumi/pkg/v3/codegen/hcl2/diagnostics.go b/vendor/github.com/pulumi/pulumi/pkg/v3/codegen/pcl/diagnostics.go similarity index 99% rename from vendor/github.com/pulumi/pulumi/pkg/v3/codegen/hcl2/diagnostics.go rename to vendor/github.com/pulumi/pulumi/pkg/v3/codegen/pcl/diagnostics.go index 6b921ca..fbbd4f6 100644 --- a/vendor/github.com/pulumi/pulumi/pkg/v3/codegen/hcl2/diagnostics.go +++ b/vendor/github.com/pulumi/pulumi/pkg/v3/codegen/pcl/diagnostics.go @@ -1,4 +1,4 @@ -package hcl2 +package pcl import ( "fmt" diff --git a/vendor/github.com/pulumi/pulumi/pkg/v3/codegen/hcl2/functions.go b/vendor/github.com/pulumi/pulumi/pkg/v3/codegen/pcl/functions.go similarity index 87% rename from vendor/github.com/pulumi/pulumi/pkg/v3/codegen/hcl2/functions.go rename to vendor/github.com/pulumi/pulumi/pkg/v3/codegen/pcl/functions.go index b01a9df..5f21af6 100644 --- a/vendor/github.com/pulumi/pulumi/pkg/v3/codegen/hcl2/functions.go +++ b/vendor/github.com/pulumi/pulumi/pkg/v3/codegen/pcl/functions.go @@ -12,7 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -package hcl2 +package pcl import ( "github.com/hashicorp/hcl/v2" @@ -91,6 +91,19 @@ var pulumiBuiltins = map[string]*model.Function{ }}, ReturnType: AssetType, }), + "join": model.NewFunction(model.StaticFunctionSignature{ + Parameters: []model.Parameter{ + { + Name: "separator", + Type: model.StringType, + }, + { + Name: "strings", + Type: model.NewListType(model.StringType), + }, + }, + ReturnType: model.StringType, + }), "length": model.NewFunction(model.GenericFunctionSignature( func(args []model.Expression) (model.StaticFunctionSignature, hcl.Diagnostics) { var diagnostics hcl.Diagnostics @@ -196,6 +209,20 @@ var pulumiBuiltins = map[string]*model.Function{ }}, ReturnType: model.StringType, }), + "filebase64": model.NewFunction(model.StaticFunctionSignature{ + Parameters: []model.Parameter{{ + Name: "path", + Type: model.StringType, + }}, + ReturnType: model.StringType, + }), + "filebase64sha256": model.NewFunction(model.StaticFunctionSignature{ + Parameters: []model.Parameter{{ + Name: "path", + Type: model.StringType, + }}, + ReturnType: model.StringType, + }), "secret": model.NewFunction(model.GenericFunctionSignature( func(args []model.Expression) (model.StaticFunctionSignature, hcl.Diagnostics) { valueType := model.Type(model.DynamicType) @@ -211,6 +238,13 @@ var pulumiBuiltins = map[string]*model.Function{ ReturnType: model.NewOutputType(valueType), }, nil })), + "sha1": model.NewFunction(model.StaticFunctionSignature{ + Parameters: []model.Parameter{{ + Name: "input", + Type: model.StringType, + }}, + ReturnType: model.StringType, + }), "split": model.NewFunction(model.StaticFunctionSignature{ Parameters: []model.Parameter{ { @@ -224,6 +258,13 @@ var pulumiBuiltins = map[string]*model.Function{ }, ReturnType: model.NewListType(model.StringType), }), + "toBase64": model.NewFunction(model.StaticFunctionSignature{ + Parameters: []model.Parameter{{ + Name: "value", + Type: model.StringType, + }}, + ReturnType: model.StringType, + }), "toJSON": model.NewFunction(model.StaticFunctionSignature{ Parameters: []model.Parameter{{ Name: "value", diff --git a/vendor/github.com/pulumi/pulumi/pkg/v3/codegen/hcl2/intrinsics.go b/vendor/github.com/pulumi/pulumi/pkg/v3/codegen/pcl/intrinsics.go similarity index 97% rename from vendor/github.com/pulumi/pulumi/pkg/v3/codegen/hcl2/intrinsics.go rename to vendor/github.com/pulumi/pulumi/pkg/v3/codegen/pcl/intrinsics.go index 8002449..74b91eb 100644 --- a/vendor/github.com/pulumi/pulumi/pkg/v3/codegen/hcl2/intrinsics.go +++ b/vendor/github.com/pulumi/pulumi/pkg/v3/codegen/pcl/intrinsics.go @@ -12,7 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -package hcl2 +package pcl import ( "github.com/pulumi/pulumi/pkg/v3/codegen/hcl2/model" @@ -24,8 +24,6 @@ const ( IntrinsicApply = "__apply" // IntrinsicConvert is the name of the conversion intrinsic. IntrinsicConvert = "__convert" - // IntrinsicInput is the name of the input intrinsic. - IntrinsicInput = "__input" ) func isOutput(t model.Type) bool { diff --git a/vendor/github.com/pulumi/pulumi/pkg/v3/codegen/pcl/invoke.go b/vendor/github.com/pulumi/pulumi/pkg/v3/codegen/pcl/invoke.go new file mode 100644 index 0000000..cf75c3a --- /dev/null +++ b/vendor/github.com/pulumi/pulumi/pkg/v3/codegen/pcl/invoke.go @@ -0,0 +1,232 @@ +// Copyright 2016-2021, Pulumi Corporation. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package pcl + +import ( + "fmt" + + "github.com/hashicorp/hcl/v2" + "github.com/hashicorp/hcl/v2/hclsyntax" + "github.com/pulumi/pulumi/pkg/v3/codegen/hcl2/model" + "github.com/pulumi/pulumi/pkg/v3/codegen/schema" + "github.com/zclconf/go-cty/cty" +) + +const Invoke = "invoke" + +func getInvokeToken(call *hclsyntax.FunctionCallExpr) (string, hcl.Range, bool) { + if call.Name != Invoke || len(call.Args) < 1 { + return "", hcl.Range{}, false + } + template, ok := call.Args[0].(*hclsyntax.TemplateExpr) + if !ok || len(template.Parts) != 1 { + return "", hcl.Range{}, false + } + literal, ok := template.Parts[0].(*hclsyntax.LiteralValueExpr) + if !ok { + return "", hcl.Range{}, false + } + if literal.Val.Type() != cty.String { + return "", hcl.Range{}, false + } + return literal.Val.AsString(), call.Args[0].Range(), true +} + +func (b *binder) bindInvokeSignature(args []model.Expression) (model.StaticFunctionSignature, hcl.Diagnostics) { + if len(args) < 1 { + return b.zeroSignature(), nil + } + + template, ok := args[0].(*model.TemplateExpression) + if !ok || len(template.Parts) != 1 { + return b.zeroSignature(), hcl.Diagnostics{tokenMustBeStringLiteral(args[0])} + } + lit, ok := template.Parts[0].(*model.LiteralValueExpression) + if !ok || model.StringType.ConversionFrom(lit.Type()) == model.NoConversion { + return b.zeroSignature(), hcl.Diagnostics{tokenMustBeStringLiteral(args[0])} + } + + token, tokenRange := lit.Value.AsString(), args[0].SyntaxNode().Range() + pkg, _, _, diagnostics := DecomposeToken(token, tokenRange) + if diagnostics.HasErrors() { + return b.zeroSignature(), diagnostics + } + + pkgSchema, ok := b.options.packageCache.entries[pkg] + if !ok { + return b.zeroSignature(), hcl.Diagnostics{unknownPackage(pkg, tokenRange)} + } + + fn, ok := pkgSchema.functions[token] + if !ok { + canon := canonicalizeToken(token, pkgSchema.schema) + if fn, ok = pkgSchema.functions[canon]; ok { + token, lit.Value = canon, cty.StringVal(canon) + } + } + if !ok { + return b.zeroSignature(), hcl.Diagnostics{unknownFunction(token, tokenRange)} + } + + if len(args) < 2 { + return b.zeroSignature(), hcl.Diagnostics{errorf(tokenRange, "missing second arg")} + } + sig, err := b.signatureForArgs(fn, args[1]) + if err != nil { + diag := hcl.Diagnostics{errorf(tokenRange, "Invoke binding error: %v", err)} + return b.zeroSignature(), diag + } + + return sig, nil +} + +func (b *binder) makeSignature(argsType, returnType model.Type) model.StaticFunctionSignature { + return model.StaticFunctionSignature{ + Parameters: []model.Parameter{ + { + Name: "token", + Type: model.StringType, + }, + { + Name: "args", + Type: argsType, + }, + { + Name: "provider", + Type: model.NewOptionalType(model.StringType), + }, + }, + ReturnType: returnType, + } +} + +func (b *binder) zeroSignature() model.StaticFunctionSignature { + return b.makeSignature(model.NewOptionalType(model.DynamicType), model.DynamicType) +} + +func (b *binder) signatureForArgs(fn *schema.Function, args model.Expression) (model.StaticFunctionSignature, error) { + if args != nil && b.useOutputVersion(fn, args) { + return b.outputVersionSignature(fn) + } + return b.regularSignature(fn), nil +} + +// Heuristic to decide when to use `fnOutput` form of a function. Will +// conservatively prefer `false`. It only decides to return `true` if +// doing so avoids the need to introduce an `apply` form to +// accommodate `Output` args (`Promise` args do not count). +func (b *binder) useOutputVersion(fn *schema.Function, args model.Expression) bool { + if !fn.NeedsOutputVersion() { + // No code emitted for an `fnOutput` form, impossible. + return false + } + + outputFormParamType := b.schemaTypeToType(fn.Inputs.InputShape) + regularFormParamType := b.schemaTypeToType(fn.Inputs) + argsType := args.Type() + + if regularFormParamType.ConversionFrom(argsType) == model.NoConversion && + outputFormParamType.ConversionFrom(argsType) == model.SafeConversion && + model.ContainsOutputs(argsType) { + return true + } + + return false +} + +func (b *binder) regularSignature(fn *schema.Function) model.StaticFunctionSignature { + var argsType model.Type + if fn.Inputs == nil { + argsType = model.NewOptionalType(model.NewObjectType(map[string]model.Type{})) + } else { + argsType = b.schemaTypeToType(fn.Inputs) + } + + var returnType model.Type + if fn.Outputs == nil { + returnType = model.NewObjectType(map[string]model.Type{}) + } else { + returnType = b.schemaTypeToType(fn.Outputs) + } + + return b.makeSignature(argsType, model.NewPromiseType(returnType)) +} + +func (b *binder) outputVersionSignature(fn *schema.Function) (model.StaticFunctionSignature, error) { + if !fn.NeedsOutputVersion() { + return model.StaticFunctionSignature{}, fmt.Errorf("Function %s does not have an Output version", fn.Token) + } + + // Given `fn.NeedsOutputVersion()==true`, can assume `fn.Inputs != nil`, `fn.Outputs != nil`. + argsType := b.schemaTypeToType(fn.Inputs.InputShape) + returnType := b.schemaTypeToType(fn.Outputs) + return b.makeSignature(argsType, model.NewOutputType(returnType)), nil +} + +// Detects invoke calls that use an output version of a function. +func IsOutputVersionInvokeCall(call *model.FunctionCallExpression) bool { + if call.Name == Invoke { + // Currently binder.bindInvokeSignature will assign + // either DynamicType, a Promise, or an Output + // for the return type of an invoke. Output implies + // that an output version has been picked. + _, returnsOutput := call.Signature.ReturnType.(*model.OutputType) + return returnsOutput + } + return false +} + +// Pattern matches to recognize `__convert(objCons(..))` pattern that +// is used to annotate object constructors with appropriate nominal +// types. If the expression matches, returns true followed by the +// constructor expression and the appropriate type. +func RecognizeTypedObjectCons(theExpr model.Expression) (bool, *model.ObjectConsExpression, model.Type) { + expr, isFunc := theExpr.(*model.FunctionCallExpression) + if !isFunc { + return false, nil, nil + } + + if expr.Name != IntrinsicConvert { + return false, nil, nil + } + + if len(expr.Args) != 1 { + return false, nil, nil + } + + objCons, isObjCons := expr.Args[0].(*model.ObjectConsExpression) + if !isObjCons { + return false, nil, nil + } + + return true, objCons, expr.Type() +} + +// Pattern matches to recognize an encoded call to an output-versioned +// invoke, such as `invoke(token, __convert(objCons(..)))`. If +// matching, returns the `args` expression and its schema-bound type. +func RecognizeOutputVersionedInvoke( + expr *model.FunctionCallExpression, +) (bool, *model.ObjectConsExpression, model.Type) { + if !IsOutputVersionInvokeCall(expr) { + return false, nil, nil + } + + if len(expr.Args) < 2 { + return false, nil, nil + } + + return RecognizeTypedObjectCons(expr.Args[1]) +} diff --git a/vendor/github.com/pulumi/pulumi/pkg/v3/codegen/hcl2/local.go b/vendor/github.com/pulumi/pulumi/pkg/v3/codegen/pcl/local.go similarity index 99% rename from vendor/github.com/pulumi/pulumi/pkg/v3/codegen/hcl2/local.go rename to vendor/github.com/pulumi/pulumi/pkg/v3/codegen/pcl/local.go index b8cc582..793c376 100644 --- a/vendor/github.com/pulumi/pulumi/pkg/v3/codegen/hcl2/local.go +++ b/vendor/github.com/pulumi/pulumi/pkg/v3/codegen/pcl/local.go @@ -12,7 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -package hcl2 +package pcl import ( "github.com/hashicorp/hcl/v2" diff --git a/vendor/github.com/pulumi/pulumi/pkg/v3/codegen/hcl2/output.go b/vendor/github.com/pulumi/pulumi/pkg/v3/codegen/pcl/output.go similarity index 99% rename from vendor/github.com/pulumi/pulumi/pkg/v3/codegen/hcl2/output.go rename to vendor/github.com/pulumi/pulumi/pkg/v3/codegen/pcl/output.go index 0c43f8b..47e87d5 100644 --- a/vendor/github.com/pulumi/pulumi/pkg/v3/codegen/hcl2/output.go +++ b/vendor/github.com/pulumi/pulumi/pkg/v3/codegen/pcl/output.go @@ -12,7 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -package hcl2 +package pcl import ( "github.com/hashicorp/hcl/v2" diff --git a/vendor/github.com/pulumi/pulumi/pkg/v3/codegen/hcl2/program.go b/vendor/github.com/pulumi/pulumi/pkg/v3/codegen/pcl/program.go similarity index 99% rename from vendor/github.com/pulumi/pulumi/pkg/v3/codegen/hcl2/program.go rename to vendor/github.com/pulumi/pulumi/pkg/v3/codegen/pcl/program.go index 1ff33ae..a362e09 100644 --- a/vendor/github.com/pulumi/pulumi/pkg/v3/codegen/hcl2/program.go +++ b/vendor/github.com/pulumi/pulumi/pkg/v3/codegen/pcl/program.go @@ -12,7 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -package hcl2 +package pcl import ( "io" diff --git a/vendor/github.com/pulumi/pulumi/pkg/v3/codegen/hcl2/resource.go b/vendor/github.com/pulumi/pulumi/pkg/v3/codegen/pcl/resource.go similarity index 99% rename from vendor/github.com/pulumi/pulumi/pkg/v3/codegen/hcl2/resource.go rename to vendor/github.com/pulumi/pulumi/pkg/v3/codegen/pcl/resource.go index b2295af..35af4f4 100644 --- a/vendor/github.com/pulumi/pulumi/pkg/v3/codegen/hcl2/resource.go +++ b/vendor/github.com/pulumi/pulumi/pkg/v3/codegen/pcl/resource.go @@ -12,7 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -package hcl2 +package pcl import ( "github.com/hashicorp/hcl/v2" diff --git a/vendor/github.com/pulumi/pulumi/pkg/v3/codegen/hcl2/rewrite_apply.go b/vendor/github.com/pulumi/pulumi/pkg/v3/codegen/pcl/rewrite_apply.go similarity index 99% rename from vendor/github.com/pulumi/pulumi/pkg/v3/codegen/hcl2/rewrite_apply.go rename to vendor/github.com/pulumi/pulumi/pkg/v3/codegen/pcl/rewrite_apply.go index 22b46e0..9f7ba25 100644 --- a/vendor/github.com/pulumi/pulumi/pkg/v3/codegen/hcl2/rewrite_apply.go +++ b/vendor/github.com/pulumi/pulumi/pkg/v3/codegen/pcl/rewrite_apply.go @@ -12,7 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -package hcl2 +package pcl import ( "fmt" diff --git a/vendor/github.com/pulumi/pulumi/pkg/v3/codegen/hcl2/rewrite_convert.go b/vendor/github.com/pulumi/pulumi/pkg/v3/codegen/pcl/rewrite_convert.go similarity index 98% rename from vendor/github.com/pulumi/pulumi/pkg/v3/codegen/hcl2/rewrite_convert.go rename to vendor/github.com/pulumi/pulumi/pkg/v3/codegen/pcl/rewrite_convert.go index beb4f8e..19fbac3 100644 --- a/vendor/github.com/pulumi/pulumi/pkg/v3/codegen/hcl2/rewrite_convert.go +++ b/vendor/github.com/pulumi/pulumi/pkg/v3/codegen/pcl/rewrite_convert.go @@ -1,6 +1,8 @@ -package hcl2 +package pcl import ( + "strings" + "github.com/hashicorp/hcl/v2" "github.com/hashicorp/hcl/v2/hclsyntax" "github.com/pulumi/pulumi/pkg/v3/codegen" @@ -9,7 +11,6 @@ import ( "github.com/pulumi/pulumi/sdk/v3/go/common/util/contract" "github.com/zclconf/go-cty/cty" "github.com/zclconf/go-cty/cty/convert" - "strings" ) func sameSchemaTypes(xt, yt model.Type) bool { @@ -157,6 +158,7 @@ func resolveDiscriminatedUnions(obj *model.ObjectConsExpression, modelType model if !ok { return nil } + schType = codegen.UnwrapType(schType) union, ok := schType.(*schema.UnionType) if !ok || union.Discriminator == "" { return nil @@ -261,7 +263,7 @@ func extractStringValue(arg model.Expression) (string, bool) { return "", false } lit, ok := template.Parts[0].(*model.LiteralValueExpression) - if !ok || lit.Type() != model.StringType { + if !ok || model.StringType.ConversionFrom(lit.Type()) == model.NoConversion { return "", false } return lit.Value.AsString(), true diff --git a/vendor/github.com/pulumi/pulumi/pkg/v3/codegen/hcl2/rewrite_properties.go b/vendor/github.com/pulumi/pulumi/pkg/v3/codegen/pcl/rewrite_properties.go similarity index 99% rename from vendor/github.com/pulumi/pulumi/pkg/v3/codegen/hcl2/rewrite_properties.go rename to vendor/github.com/pulumi/pulumi/pkg/v3/codegen/pcl/rewrite_properties.go index fca2efe..d5e7c86 100644 --- a/vendor/github.com/pulumi/pulumi/pkg/v3/codegen/hcl2/rewrite_properties.go +++ b/vendor/github.com/pulumi/pulumi/pkg/v3/codegen/pcl/rewrite_properties.go @@ -1,4 +1,4 @@ -package hcl2 +package pcl import ( "bytes" diff --git a/vendor/github.com/pulumi/pulumi/pkg/v3/codegen/hcl2/type.go b/vendor/github.com/pulumi/pulumi/pkg/v3/codegen/pcl/type.go similarity index 98% rename from vendor/github.com/pulumi/pulumi/pkg/v3/codegen/hcl2/type.go rename to vendor/github.com/pulumi/pulumi/pkg/v3/codegen/pcl/type.go index fb0969e..33b5d58 100644 --- a/vendor/github.com/pulumi/pulumi/pkg/v3/codegen/hcl2/type.go +++ b/vendor/github.com/pulumi/pulumi/pkg/v3/codegen/pcl/type.go @@ -12,7 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -package hcl2 +package pcl import ( "github.com/pulumi/pulumi/pkg/v3/codegen/hcl2/model" diff --git a/vendor/github.com/pulumi/pulumi/pkg/v3/codegen/hcl2/utilities.go b/vendor/github.com/pulumi/pulumi/pkg/v3/codegen/pcl/utilities.go similarity index 99% rename from vendor/github.com/pulumi/pulumi/pkg/v3/codegen/hcl2/utilities.go rename to vendor/github.com/pulumi/pulumi/pkg/v3/codegen/pcl/utilities.go index 3d3b3f9..30f1339 100644 --- a/vendor/github.com/pulumi/pulumi/pkg/v3/codegen/hcl2/utilities.go +++ b/vendor/github.com/pulumi/pulumi/pkg/v3/codegen/pcl/utilities.go @@ -12,7 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -package hcl2 +package pcl import ( "sort" diff --git a/vendor/github.com/pulumi/pulumi/pkg/v3/codegen/python/.gitignore b/vendor/github.com/pulumi/pulumi/pkg/v3/codegen/python/.gitignore new file mode 100644 index 0000000..5ceb386 --- /dev/null +++ b/vendor/github.com/pulumi/pulumi/pkg/v3/codegen/python/.gitignore @@ -0,0 +1 @@ +venv diff --git a/vendor/github.com/pulumi/pulumi/pkg/v3/codegen/python/doc.go b/vendor/github.com/pulumi/pulumi/pkg/v3/codegen/python/doc.go index a81912e..e5ba7da 100644 --- a/vendor/github.com/pulumi/pulumi/pkg/v3/codegen/python/doc.go +++ b/vendor/github.com/pulumi/pulumi/pkg/v3/codegen/python/doc.go @@ -73,14 +73,14 @@ func (d DocLanguageHelper) GetDocLinkForFunctionInputOrOutputType(pkg *schema.Pa } // GetLanguageTypeString returns the Python-specific type given a Pulumi schema type. -func (d DocLanguageHelper) GetLanguageTypeString(pkg *schema.Package, moduleName string, t schema.Type, input, args, optional bool) string { +func (d DocLanguageHelper) GetLanguageTypeString(pkg *schema.Package, moduleName string, t schema.Type, input bool) string { typeDetails := map[*schema.ObjectType]*typeDetails{} mod := &modContext{ pkg: pkg, mod: moduleName, typeDetails: typeDetails, } - typeName := mod.typeString(t, input, false /*wrapInput*/, args, optional /*optional*/, false /*acceptMapping*/) + typeName := mod.typeString(t, input, false /*acceptMapping*/) // Remove any package qualifiers from the type name. if !input { @@ -103,16 +103,25 @@ func (d DocLanguageHelper) GetResourceFunctionResultName(modName string, f *sche return title(tokenToName(f.Token)) + "Result" } -// GenPropertyCaseMap generates the case maps for a property. -func (d DocLanguageHelper) GenPropertyCaseMap(pkg *schema.Package, modName, tool string, prop *schema.Property, snakeCaseToCamelCase, camelCaseToSnakeCase map[string]string, seenTypes codegen.Set) { - if _, imported := pkg.Language["python"]; !imported { - if err := pkg.ImportLanguages(map[string]schema.Language{"python": Importer}); err != nil { - fmt.Printf("error building case map for %q in module %q", prop.Name, modName) - return +func (d DocLanguageHelper) GetMethodName(m *schema.Method) string { + return PyName(m.Name) +} + +func (d DocLanguageHelper) GetMethodResultName(pkg *schema.Package, modName string, r *schema.Resource, + m *schema.Method) string { + + if info, ok := pkg.Language["python"].(PackageInfo); ok { + if info.LiftSingleValueMethodReturns && m.Function.Outputs != nil && len(m.Function.Outputs.Properties) == 1 { + typeDetails := map[*schema.ObjectType]*typeDetails{} + mod := &modContext{ + pkg: pkg, + mod: modName, + typeDetails: typeDetails, + } + return mod.typeString(m.Function.Outputs.Properties[0].Type, false, false) } } - - recordProperty(prop, snakeCaseToCamelCase, camelCaseToSnakeCase, seenTypes) + return fmt.Sprintf("%s.%sResult", resourceName(r), title(d.GetMethodName(m))) } // GetPropertyName returns the property name specific to Python. diff --git a/vendor/github.com/pulumi/pulumi/pkg/v3/codegen/python/gen.go b/vendor/github.com/pulumi/pulumi/pkg/v3/codegen/python/gen.go index 1a9687a..c02e59d 100644 --- a/vendor/github.com/pulumi/pulumi/pkg/v3/codegen/python/gen.go +++ b/vendor/github.com/pulumi/pulumi/pkg/v3/codegen/python/gen.go @@ -1,4 +1,4 @@ -// Copyright 2016-2020, Pulumi Corporation. +// Copyright 2016-2021, Pulumi Corporation. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -32,18 +32,18 @@ import ( "unicode" "github.com/blang/semver" - "github.com/pkg/errors" "github.com/pulumi/pulumi/pkg/v3/codegen" "github.com/pulumi/pulumi/pkg/v3/codegen/schema" + "github.com/pulumi/pulumi/sdk/v3/go/common/diag" "github.com/pulumi/pulumi/sdk/v3/go/common/resource/plugin" + "github.com/pulumi/pulumi/sdk/v3/go/common/util/cmdutil" "github.com/pulumi/pulumi/sdk/v3/go/common/util/contract" ) type typeDetails struct { outputType bool inputType bool - argsType bool resourceOutputType bool plainType bool } @@ -90,25 +90,26 @@ func title(s string) string { } type modContext struct { - pkg *schema.Package - mod string - pyPkgName string - types []*schema.ObjectType - enums []*schema.EnumType - resources []*schema.Resource - functions []*schema.Function - typeDetails map[*schema.ObjectType]*typeDetails - children []*modContext - parent *modContext - snakeCaseToCamelCase map[string]string - camelCaseToSnakeCase map[string]string - tool string - extraSourceFiles []string - isConfig bool + pkg *schema.Package + mod string + pyPkgName string + types []*schema.ObjectType + enums []*schema.EnumType + resources []*schema.Resource + functions []*schema.Function + typeDetails map[*schema.ObjectType]*typeDetails + children []*modContext + parent *modContext + tool string + extraSourceFiles []string + isConfig bool // Name overrides set in PackageInfo modNameOverrides map[string]string // Optional overrides for Pulumi module names compatibility string // Toggle compatibility mode for a specified target. + + // Determine whether to lift single-value method return values + liftSingleValueMethodReturns bool } func (mod *modContext) isTopLevel() bool { @@ -141,7 +142,7 @@ func (mod *modContext) details(t *schema.ObjectType) *typeDetails { return details } -func (mod *modContext) modNameAndName(pkg *schema.Package, t schema.Type, input, args bool) (modName string, name string) { +func (mod *modContext) modNameAndName(pkg *schema.Package, t schema.Type, input bool) (modName string, name string) { var info PackageInfo contract.AssertNoError(pkg.ImportLanguages(map[string]schema.Language{"python": Importer})) if v, ok := pkg.Language["python"].(PackageInfo); ok { @@ -158,26 +159,23 @@ func (mod *modContext) modNameAndName(pkg *schema.Package, t schema.Type, input, modNameOverrides: info.ModuleNameOverrides, compatibility: info.Compatibility, } - token, name = t.Token, namingCtx.unqualifiedObjectTypeName(t, input, args) + token, name = t.Token, namingCtx.unqualifiedObjectTypeName(t, input) case *schema.ResourceType: token, name = t.Token, tokenToName(t.Token) } modName = tokenToModule(token, pkg, info.ModuleNameOverrides) - if modName == mod.mod { - modName = "" - } if modName != "" { modName = strings.ReplaceAll(modName, "/", ".") + "." } return } -func (mod *modContext) unqualifiedObjectTypeName(t *schema.ObjectType, input, args bool) string { +func (mod *modContext) unqualifiedObjectTypeName(t *schema.ObjectType, input bool) string { name := tokenToName(t.Token) if mod.compatibility != tfbridge20 && mod.compatibility != kubernetes20 { - if args { + if t.IsInputShape() { return name + "Args" } return name @@ -192,7 +190,7 @@ func (mod *modContext) unqualifiedObjectTypeName(t *schema.ObjectType, input, ar return name } -func (mod *modContext) objectType(t *schema.ObjectType, input, args bool) string { +func (mod *modContext) objectType(t *schema.ObjectType, input bool) string { var prefix string if !input { prefix = "outputs." @@ -200,11 +198,11 @@ func (mod *modContext) objectType(t *schema.ObjectType, input, args bool) string // If it's an external type, reference it via fully qualified name. if t.Package != mod.pkg { - modName, name := mod.modNameAndName(t.Package, t, input, args) + modName, name := mod.modNameAndName(t.Package, t, input) return fmt.Sprintf("'%s.%s%s%s'", pyPack(t.Package.Name), modName, prefix, name) } - modName, name := mod.tokenToModule(t.Token), mod.unqualifiedObjectTypeName(t, input, args) + modName, name := mod.tokenToModule(t.Token), mod.unqualifiedObjectTypeName(t, input) if modName == "" && modName != mod.mod { rootModName := "_root_outputs." if input { @@ -252,7 +250,7 @@ func (mod *modContext) resourceType(r *schema.ResourceType) string { } pkg := r.Resource.Package - modName, name := mod.modNameAndName(pkg, r, false, false) + modName, name := mod.modNameAndName(pkg, r, false) return fmt.Sprintf("%s.%s%s", pyPack(pkg.Name), modName, name) } @@ -291,10 +289,18 @@ func tokenToName(tok string) string { } func tokenToModule(tok string, pkg *schema.Package, moduleNameOverrides map[string]string) string { + // See if there's a manually-overridden module name. canonicalModName := pkg.TokenToModule(tok) - modName := PyName(strings.ToLower(canonicalModName)) if override, ok := moduleNameOverrides[canonicalModName]; ok { - modName = override + return override + } + // A module can include fileparts, which we want to preserve. + var modName string + for i, part := range strings.Split(strings.ToLower(canonicalModName), "/") { + if i > 0 { + modName += "/" + } + modName += PyName(part) } return modName } @@ -328,13 +334,17 @@ func printComment(w io.Writer, comment string, indent string) { fmt.Fprintf(w, "%s\"\"\"\n", indent) } -func (mod *modContext) genHeader(w io.Writer, needsSDK bool, imports imports) { +func genStandardHeader(w io.Writer, tool string) { // Set the encoding to UTF-8, in case the comments contain non-ASCII characters. fmt.Fprintf(w, "# coding=utf-8\n") // Emit a standard warning header ("do not edit", etc). - fmt.Fprintf(w, "# *** WARNING: this file was generated by %v. ***\n", mod.tool) + fmt.Fprintf(w, "# *** WARNING: this file was generated by %v. ***\n", tool) fmt.Fprintf(w, "# *** Do not edit by hand unless you're certain you know what you are doing! ***\n\n") +} + +func (mod *modContext) genHeader(w io.Writer, needsSDK bool, imports imports) { + genStandardHeader(w, mod.tool) // If needed, emit the standard Pulumi SDK import statement. if needsSDK { @@ -380,6 +390,20 @@ func (fs fs) add(path string, contents []byte) { fs[path] = contents } +func (mod *modContext) genUtilitiesFile() []byte { + buffer := &bytes.Buffer{} + genStandardHeader(buffer, mod.tool) + fmt.Fprintf(buffer, utilitiesFile) + if url := mod.pkg.PluginDownloadURL; url != "" { + _, err := fmt.Fprintf(buffer, ` +def get_plugin_download_url(): + return %q +`, url) + contract.AssertNoError(err) + } + return buffer.Bytes() +} + func (mod *modContext) gen(fs fs) error { dir := path.Join(mod.pyPkgName, mod.mod) @@ -396,21 +420,27 @@ func (mod *modContext) gen(fs fs) error { addFile := func(name, contents string) { p := path.Join(dir, name) - exports = append(exports, name[:len(name)-len(".py")]) + if !strings.HasSuffix(name, ".pyi") { + exports = append(exports, name[:len(name)-len(".py")]) + } fs.add(p, []byte(contents)) } // Utilities, config, readme switch mod.mod { case "": - buffer := &bytes.Buffer{} - mod.genHeader(buffer, false /*needsSDK*/, nil) - fmt.Fprintf(buffer, "%s", utilitiesFile) - fs.add(filepath.Join(dir, "_utilities.py"), buffer.Bytes()) + fs.add(filepath.Join(dir, "_utilities.py"), mod.genUtilitiesFile()) fs.add(filepath.Join(dir, "py.typed"), []byte{}) // Ensure that the top-level (provider) module directory contains a README.md file. - readme := mod.pkg.Language["python"].(PackageInfo).Readme + + var readme string + if pythonInfo, ok := mod.pkg.Language["python"]; ok { + if typedInfo, ok := pythonInfo.(PackageInfo); ok { + readme = typedInfo.Readme + } + } + if readme == "" { readme = mod.pkg.Description if readme != "" && readme[len(readme)-1] != '\n' { @@ -435,11 +465,21 @@ func (mod *modContext) gen(fs fs) error { return err } addFile("vars.py", vars) + typeStubs, err := mod.genConfigStubs(mod.pkg.Config) + if err != nil { + return err + } + addFile("__init__.pyi", typeStubs) } } // Resources for _, r := range mod.resources { + if r.IsOverlay { + // This resource code is generated by the provider, so no further action is required. + continue + } + res, err := mod.genResource(r) if err != nil { return err @@ -458,6 +498,11 @@ func (mod *modContext) gen(fs fs) error { // Functions for _, f := range mod.functions { + if f.IsOverlay { + // This function code is generated by the provider, so no further action is required. + continue + } + fun, err := mod.genFunction(f) if err != nil { return err @@ -491,6 +536,9 @@ func (mod *modContext) gen(fs fs) error { } func (mod *modContext) hasTypes(input bool) bool { + if allTypesAreOverlays(mod.types) { + return false + } for _, t := range mod.types { if input && mod.details(t).inputType { return true @@ -524,9 +572,9 @@ func (mod *modContext) unqualifiedImportName() string { // Extract version suffix from child modules. Nested versions will have their own __init__.py file. // Example: apps/v1beta1 -> v1beta1 - parts := strings.SplitN(name, "/", 2) - if len(parts) == 2 { - name = parts[1] + parts := strings.Split(name, "/") + if len(parts) > 1 { + name = parts[len(parts)-1] } return PyName(name) @@ -547,6 +595,13 @@ func (mod *modContext) fullyQualifiedImportName() string { func (mod *modContext) genInit(exports []string) string { w := &bytes.Buffer{} mod.genHeader(w, false /*needsSDK*/, nil) + if mod.isConfig { + fmt.Fprintf(w, "import sys\n") + fmt.Fprintf(w, "from .vars import _ExportableConfig\n") + fmt.Fprintf(w, "\n") + fmt.Fprintf(w, "sys.modules[__name__].__class__ = _ExportableConfig\n") + return w.String() + } fmt.Fprintf(w, "%s\n", mod.genUtilitiesImport()) fmt.Fprintf(w, "import typing\n") @@ -589,9 +644,18 @@ func (mod *modContext) genInit(exports []string) string { for _, submod := range children { if !submod.isEmpty() { - fmt.Fprintf(w, " import %s as %s\n", + unq := submod.unqualifiedImportName() + + // The `__iam = iam` hack enables + // PyCharm and VSCode completion to do + // better. + // + // See https://github.com/pulumi/pulumi/issues/7367 + fmt.Fprintf(w, " import %s as __%s\n %s = __%s\n", submod.fullyQualifiedImportName(), - submod.unqualifiedImportName()) + unq, + unq, + unq) } } @@ -720,7 +784,7 @@ func (mod *modContext) importResourceType(r *schema.ResourceType) string { return fmt.Sprintf("from %s%s import %s", importPath, name, components[0]) } -// emitConfigVariables emits all config variables in the given module, returning the resulting file. +// genConfig emits all config variables in the given module, returning the resulting file. func (mod *modContext) genConfig(variables []*schema.Property) (string, error) { w := &bytes.Buffer{} @@ -728,32 +792,98 @@ func (mod *modContext) genConfig(variables []*schema.Property) (string, error) { mod.collectImports(variables, imports, false /*input*/) mod.genHeader(w, true /*needsSDK*/, imports) - - // Export only the symbols we want exported. - if len(variables) > 0 { - fmt.Fprintf(w, "__all__ = [\n") - for _, p := range variables { - fmt.Fprintf(w, " '%s',\n", PyName(p.Name)) - } - fmt.Fprintf(w, "]\n\n") - } + fmt.Fprintf(w, "import types\n") + fmt.Fprintf(w, "\n") // Create a config bag for the variables to pull from. fmt.Fprintf(w, "__config__ = pulumi.Config('%s')\n", mod.pkg.Name) - fmt.Fprintf(w, "\n") + fmt.Fprintf(w, "\n\n") + + // To avoid a breaking change to the existing config getters, we define a class that extends + // the `ModuleType` type and implements property getters for each config key. We then overwrite + // the `__class__` attribute of the current module as described in the proposal for PEP-549. This allows + // us to maintain the existing interface for users but implement dynamic getters behind the scenes. + fmt.Fprintf(w, "class _ExportableConfig(types.ModuleType):\n") + indent := " " // Emit an entry for all config variables. for _, p := range variables { - configFetch := fmt.Sprintf("__config__.get('%s')", p.Name) - if p.DefaultValue != nil { - v, err := getDefaultValue(p.DefaultValue, p.Type) - if err != nil { - return "", err - } - configFetch += " or " + v + configFetch, err := genConfigFetch(p) + if err != nil { + return "", err } - fmt.Fprintf(w, "%s = %s\n", PyName(p.Name), configFetch) + typeString := genConfigVarType(p) + fmt.Fprintf(w, "%s@property\n", indent) + fmt.Fprintf(w, "%sdef %s(self) -> %s:\n", indent, PyName(p.Name), typeString) + dblIndent := strings.Repeat(indent, 2) + + printComment(w, p.Comment, dblIndent) + fmt.Fprintf(w, "%sreturn %s\n", dblIndent, configFetch) + fmt.Fprintf(w, "\n") + } + + return w.String(), nil +} + +func genConfigFetch(configVar *schema.Property) (string, error) { + getFunc := "get" + unwrappedType := codegen.UnwrapType(configVar.Type) + switch unwrappedType { + case schema.BoolType: + getFunc = "get_bool" + case schema.IntType: + getFunc = "get_int" + case schema.NumberType: + getFunc = "get_float" + } + + configFetch := fmt.Sprintf("__config__.%s('%s')", getFunc, configVar.Name) + if configVar.DefaultValue != nil { + v, err := getDefaultValue(configVar.DefaultValue, unwrappedType) + if err != nil { + return "", err + } + configFetch += " or " + v + } + return configFetch, nil +} + +func genConfigVarType(configVar *schema.Property) string { + // For historical reasons and to maintain backwards compatibility, the config variables for python + // are typed as `Optional[str`] or `str` for complex objects since the getters only use config.get(). + // To return the rich objects would be a breaking change, tracked in https://github.com/pulumi/pulumi/issues/7493 + typeString := "str" + switch codegen.UnwrapType(configVar.Type) { + case schema.BoolType: + typeString = "bool" + case schema.IntType: + typeString = "int" + case schema.NumberType: + typeString = "float" + } + + if configVar.DefaultValue == nil || configVar.DefaultValue.Value == nil { + typeString = "Optional[" + typeString + "]" + } + return typeString +} + +// genConfigStubs emits all type information for the config variables in the given module, returning the resulting file. +// We do this because we lose IDE autocomplete by implementing the dynamic config getters described in genConfig. +// Emitting these stubs allows us to maintain type hints and autocomplete for users. +func (mod *modContext) genConfigStubs(variables []*schema.Property) (string, error) { + w := &bytes.Buffer{} + + imports := imports{} + mod.collectImports(variables, imports, false /*input*/) + + mod.genHeader(w, true /*needsSDK*/, imports) + + // Emit an entry for all config variables. + for _, p := range variables { + typeString := genConfigVarType(p) + fmt.Fprintf(w, "%s: %s\n", p.Name, typeString) printComment(w, p.Comment, "") fmt.Fprintf(w, "\n") } @@ -761,14 +891,33 @@ func (mod *modContext) genConfig(variables []*schema.Property) (string, error) { return w.String(), nil } +func allTypesAreOverlays(types []*schema.ObjectType) bool { + for _, t := range types { + if !t.IsOverlay { + return false + } + } + return true +} + func (mod *modContext) genTypes(dir string, fs fs) error { genTypes := func(file string, input bool) error { w := &bytes.Buffer{} + if allTypesAreOverlays(mod.types) { + // If all resources in this module are overlays, skip further code generation. + return nil + } + imports := imports{} for _, t := range mod.types { + if t.IsOverlay { + // This type is generated by the provider, so no further action is required. + continue + } + if input && mod.details(t).inputType { - visitObjectTypes(t.Properties, func(t schema.Type, _ bool) { + visitObjectTypes(t.Properties, func(t schema.Type) { switch t := t.(type) { case *schema.ObjectType: imports.addTypeIf(mod, t, true /*input*/, func(imp string) bool { @@ -795,21 +944,24 @@ func (mod *modContext) genTypes(dir string, fs fs) error { // Export only the symbols we want exported. fmt.Fprintf(w, "__all__ = [\n") for _, t := range mod.types { - if input && mod.details(t).inputType { - if mod.details(t).argsType { - fmt.Fprintf(w, " '%s',\n", mod.unqualifiedObjectTypeName(t, true, true)) - } - if mod.details(t).plainType { - fmt.Fprintf(w, " '%s',\n", mod.unqualifiedObjectTypeName(t, true, false)) - } - } else if !input && mod.details(t).outputType { - fmt.Fprintf(w, " '%s',\n", mod.unqualifiedObjectTypeName(t, false, false)) + if t.IsOverlay { + // This type is generated by the provider, so no further action is required. + continue + } + + if input && mod.details(t).inputType || !input && mod.details(t).outputType { + fmt.Fprintf(w, " '%s',\n", mod.unqualifiedObjectTypeName(t, input)) } } fmt.Fprintf(w, "]\n\n") var hasTypes bool for _, t := range mod.types { + if t.IsOverlay { + // This type is generated by the provider, so no further action is required. + continue + } + if input && mod.details(t).inputType { if err := mod.genObjectType(w, t, true); err != nil { return err @@ -831,10 +983,7 @@ func (mod *modContext) genTypes(dir string, fs fs) error { if err := genTypes("_inputs.py", true); err != nil { return err } - if err := genTypes("outputs.py", false); err != nil { - return err - } - return nil + return genTypes("outputs.py", false) } func awaitableTypeNames(tok string) (baseName, awaitableName string) { @@ -877,9 +1026,8 @@ func (mod *modContext) genAwaitableType(w io.Writer, obj *schema.ObjectType) str fmt.Fprintf(w, "\n") // Write out Python property getters for each property. - mod.genProperties(w, obj.Properties, false /*setters*/, func(prop *schema.Property) string { - return mod.typeString(prop.Type, false /*input*/, false /*wrapInput*/, false /*args*/, !prop.IsRequired, - false /*acceptMapping*/) + mod.genProperties(w, obj.Properties, false /*setters*/, "", func(prop *schema.Property) string { + return mod.typeString(prop.Type, false /*input*/, false /*acceptMapping*/) }) // Produce an awaitable subclass. @@ -910,29 +1058,67 @@ func (mod *modContext) genAwaitableType(w io.Writer, obj *schema.ObjectType) str return awaitableName } +func resourceName(res *schema.Resource) string { + name := pyClassName(tokenToName(res.Token)) + if res.IsProvider { + name = "Provider" + } + return name +} + func (mod *modContext) genResource(res *schema.Resource) (string, error) { w := &bytes.Buffer{} imports := imports{} - mod.collectImports(res.Properties, imports, false /*input*/) - mod.collectImports(res.InputProperties, imports, true /*input*/) + mod.collectImportsForResource(res.Properties, imports, false /*input*/, res) + mod.collectImportsForResource(res.InputProperties, imports, true /*input*/, res) if res.StateInputs != nil { - mod.collectImports(res.StateInputs.Properties, imports, true /*input*/) + mod.collectImportsForResource(res.StateInputs.Properties, imports, true /*input*/, res) + } + for _, method := range res.Methods { + if method.Function.Inputs != nil { + mod.collectImportsForResource(method.Function.Inputs.Properties, imports, true /*input*/, res) + } + if method.Function.Outputs != nil { + mod.collectImportsForResource(method.Function.Outputs.Properties, imports, false /*input*/, res) + } } mod.genHeader(w, true /*needsSDK*/, imports) - name := pyClassName(tokenToName(res.Token)) - if res.IsProvider { - name = "Provider" + name := resourceName(res) + + resourceArgsName := fmt.Sprintf("%sArgs", name) + // Some providers (e.g. Kubernetes) have types with the same name as resources (e.g. StorageClass in Kubernetes). + // We've already shipped the input type (e.g. StorageClassArgs) in the same module as the resource, so we can't use + // the same name for the resource's args class. When an input type exists that would conflict with the name of the + // resource args class, we'll use a different name: `InitArgs` instead of `Args`. + const alternateSuffix = "InitArgs" + for _, t := range mod.types { + if mod.details(t).inputType { + if mod.unqualifiedObjectTypeName(t, true) == resourceArgsName { + resourceArgsName = name + alternateSuffix + break + } + } + } + // If we're using the alternate name, ensure the alternate name doesn't conflict with an input type. + if strings.HasSuffix(resourceArgsName, alternateSuffix) { + for _, t := range mod.types { + if mod.details(t).inputType { + if mod.unqualifiedObjectTypeName(t, true) == resourceArgsName { + return "", fmt.Errorf("resource args class named %s in %s conflicts with input type", resourceArgsName, mod.mod) + } + } + } } // Export only the symbols we want exported. - fmt.Fprintf(w, "__all__ = ['%[1]sArgs', '%[1]s']\n\n", name) + fmt.Fprintf(w, "__all__ = ['%s', '%s']\n\n", resourceArgsName, name) // Produce an args class. argsComment := fmt.Sprintf("The set of arguments for constructing a %s resource.", name) - err := mod.genType(w, fmt.Sprintf("%sArgs", name), argsComment, res.InputProperties, false, true, true, false) + err := mod.genType(w, resourceArgsName, argsComment, res.InputProperties, true, false) if err != nil { return "", err } @@ -945,7 +1131,7 @@ func (mod *modContext) genResource(res *schema.Resource) (string, error) { len(res.StateInputs.Properties) > 0 if hasStateInputs { stateComment := fmt.Sprintf("Input properties used for looking up and filtering %s resources.", name) - err = mod.genType(w, fmt.Sprintf("_%sState", name), stateComment, res.StateInputs.Properties, false, true, true, false) + err = mod.genType(w, fmt.Sprintf("_%sState", name), stateComment, res.StateInputs.Properties, true, false) if err != nil { return "", err } @@ -976,7 +1162,7 @@ func (mod *modContext) genResource(res *schema.Resource) (string, error) { // Determine if all inputs are optional. allOptionalInputs := true for _, prop := range res.InputProperties { - allOptionalInputs = allOptionalInputs && !prop.IsRequired + allOptionalInputs = allOptionalInputs && !prop.IsRequired() } // Emit __init__ overloads and implementation... @@ -989,8 +1175,7 @@ func (mod *modContext) genResource(res *schema.Resource) (string, error) { // If there's an argument type, emit it. for _, prop := range res.InputProperties { - wrapInput := !prop.IsPlain - ty := mod.typeString(prop.Type, true, wrapInput, wrapInput, true /*optional*/, true /*acceptMapping*/) + ty := mod.typeString(codegen.OptionalType(prop), true, true /*acceptMapping*/) fmt.Fprintf(w, ",\n %s: %s = None", InitParamName(prop.Name), ty) } @@ -1000,7 +1185,7 @@ func (mod *modContext) genResource(res *schema.Resource) (string, error) { // Emit an __init__ overload that accepts the resource's inputs as function arguments. fmt.Fprintf(w, " @overload\n") emitInitMethodSignature("__init__") - mod.genInitDocstring(w, res, name, false /*argsOverload*/) + mod.genInitDocstring(w, res, resourceArgsName, false /*argsOverload*/) fmt.Fprintf(w, " ...\n") // Emit an __init__ overload that accepts the resource's inputs from the args class. @@ -1008,18 +1193,18 @@ func (mod *modContext) genResource(res *schema.Resource) (string, error) { fmt.Fprintf(w, " def __init__(__self__,\n") fmt.Fprintf(w, " resource_name: str,\n") if allOptionalInputs { - fmt.Fprintf(w, " args: Optional[%sArgs] = None,\n", name) + fmt.Fprintf(w, " args: Optional[%s] = None,\n", resourceArgsName) } else { - fmt.Fprintf(w, " args: %sArgs,\n", name) + fmt.Fprintf(w, " args: %s,\n", resourceArgsName) } fmt.Fprintf(w, " opts: Optional[pulumi.ResourceOptions] = None):\n") - mod.genInitDocstring(w, res, name, true /*argsOverload*/) + mod.genInitDocstring(w, res, resourceArgsName, true /*argsOverload*/) fmt.Fprintf(w, " ...\n") // Emit the actual implementation of __init__, which does the appropriate thing based on which // overload was called. fmt.Fprintf(w, " def __init__(__self__, resource_name: str, *args, **kwargs):\n") - fmt.Fprintf(w, " resource_args, opts = _utilities.get_resource_args_opts(%sArgs, pulumi.ResourceOptions, *args, **kwargs)\n", name) + fmt.Fprintf(w, " resource_args, opts = _utilities.get_resource_args_opts(%s, pulumi.ResourceOptions, *args, **kwargs)\n", resourceArgsName) fmt.Fprintf(w, " if resource_args is not None:\n") fmt.Fprintf(w, " __self__._internal_init(resource_name, opts, **resource_args.__dict__)\n") fmt.Fprintf(w, " else:\n") @@ -1037,6 +1222,11 @@ func (mod *modContext) genResource(res *schema.Resource) (string, error) { fmt.Fprintf(w, " raise TypeError('Expected resource options to be a ResourceOptions instance')\n") fmt.Fprintf(w, " if opts.version is None:\n") fmt.Fprintf(w, " opts.version = _utilities.get_version()\n") + if mod.pkg.PluginDownloadURL != "" { + fmt.Fprintf(w, " if opts.plugin_download_url is None:\n") + fmt.Fprintf(w, " opts.plugin_download_url = _utilities.get_plugin_download_url()\n") + } + if res.IsComponent { fmt.Fprintf(w, " if opts.id is not None:\n") fmt.Fprintf(w, " raise ValueError('ComponentResource classes do not support opts.id')\n") @@ -1051,7 +1241,7 @@ func (mod *modContext) genResource(res *schema.Resource) (string, error) { // We use an instance of the `Args` class for `__props__` to opt-in to the type/name metadata based // translation behavior. The instance is created using `__new__` to avoid any validation in the `__init__` method, // values are set directly on its `__dict__`, including any additional output properties. - fmt.Fprintf(w, " __props__ = %[1]sArgs.__new__(%[1]sArgs)\n\n", name) + fmt.Fprintf(w, " __props__ = %[1]s.__new__(%[1]s)\n\n", resourceArgsName) fmt.Fprintf(w, "") ins := codegen.NewStringSet() @@ -1062,7 +1252,7 @@ func (mod *modContext) genResource(res *schema.Resource) (string, error) { // Fill in computed defaults for arguments. if prop.DefaultValue != nil { - dv, err := getDefaultValue(prop.DefaultValue, prop.Type) + dv, err := getDefaultValue(prop.DefaultValue, codegen.UnwrapType(prop.Type)) if err != nil { return "", err } @@ -1071,7 +1261,7 @@ func (mod *modContext) genResource(res *schema.Resource) (string, error) { } // Check that required arguments are present. - if prop.IsRequired { + if prop.IsRequired() { fmt.Fprintf(w, " if %s is None and not opts.urn:\n", pname) fmt.Fprintf(w, " raise TypeError(\"Missing required property '%s'\")\n", pname) } @@ -1142,6 +1332,16 @@ func (mod *modContext) genResource(res *schema.Resource) (string, error) { fmt.Fprintf(w, "\n opts = pulumi.ResourceOptions.merge(opts, secret_opts)\n") } + replaceOnChangesProps, errList := res.ReplaceOnChanges() + for _, err := range errList { + cmdutil.Diag().Warningf(&diag.Diag{Message: err.Error()}) + } + if len(replaceOnChangesProps) > 0 { + replaceOnChangesStrings := schema.PropertyListJoinToString(replaceOnChangesProps, PyName) + fmt.Fprintf(w, ` replace_on_changes = pulumi.ResourceOptions(replace_on_changes=["%s"])`, strings.Join(replaceOnChangesStrings, `", "`)) + fmt.Fprintf(w, "\n opts = pulumi.ResourceOptions.merge(opts, replace_on_changes)\n") + } + // Finally, chain to the base constructor, which will actually register the resource. tok := res.Token if res.IsProvider { @@ -1167,8 +1367,8 @@ func (mod *modContext) genResource(res *schema.Resource) (string, error) { if hasStateInputs { for _, prop := range res.StateInputs.Properties { - pname := PyName(prop.Name) - ty := mod.typeString(prop.Type, true, true, true, true /*optional*/, true /*acceptMapping*/) + pname := InitParamName(prop.Name) + ty := mod.typeString(codegen.OptionalType(prop), true, true /*acceptMapping*/) fmt.Fprintf(w, ",\n %s: %s = None", pname, ty) } } @@ -1182,14 +1382,14 @@ func (mod *modContext) genResource(res *schema.Resource) (string, error) { } else { // If we don't have any state inputs, we'll just instantiate the `Args` class, // to opt-in to the improved translation behavior. - fmt.Fprintf(w, " __props__ = %[1]sArgs.__new__(%[1]sArgs)\n\n", name) + fmt.Fprintf(w, " __props__ = %[1]s.__new__(%[1]s)\n\n", resourceArgsName) } stateInputs := codegen.NewStringSet() if res.StateInputs != nil { for _, prop := range res.StateInputs.Properties { stateInputs.Add(prop.Name) - fmt.Fprintf(w, " __props__.__dict__[%[1]q] = %[1]s\n", PyName(prop.Name)) + fmt.Fprintf(w, " __props__.__dict__[%q] = %s\n", PyName(prop.Name), InitParamName(prop.Name)) } } for _, prop := range res.Properties { @@ -1202,15 +1402,18 @@ func (mod *modContext) genResource(res *schema.Resource) (string, error) { } // Write out Python property getters for each of the resource's properties. - mod.genProperties(w, res.Properties, false /*setters*/, func(prop *schema.Property) string { - ty := mod.typeString(prop.Type, false /*input*/, false /*wrapInput*/, false /*args*/, !prop.IsRequired, false /*acceptMapping*/) + mod.genProperties(w, res.Properties, false /*setters*/, "", func(prop *schema.Property) string { + ty := mod.typeString(prop.Type, false /*input*/, false /*acceptMapping*/) return fmt.Sprintf("pulumi.Output[%s]", ty) }) + // Write out methods. + mod.genMethods(w, res) + return w.String(), nil } -func (mod *modContext) genProperties(w io.Writer, properties []*schema.Property, setters bool, +func (mod *modContext) genProperties(w io.Writer, properties []*schema.Property, setters bool, indent string, propType func(prop *schema.Property) string) { // Write out Python properties for each property. If there is a property named "property", it will // be emitted last to avoid conflicting with the built-in `@property` decorator function. We do @@ -1218,22 +1421,22 @@ func (mod *modContext) genProperties(w io.Writer, properties []*schema.Property, // because that wouldn't address the problem if there was a property named "builtins". emitProp := func(pname string, prop *schema.Property) { ty := propType(prop) - fmt.Fprintf(w, " @property\n") + fmt.Fprintf(w, "%s @property\n", indent) if pname == prop.Name { - fmt.Fprintf(w, " @pulumi.getter\n") + fmt.Fprintf(w, "%s @pulumi.getter\n", indent) } else { - fmt.Fprintf(w, " @pulumi.getter(name=%q)\n", prop.Name) + fmt.Fprintf(w, "%s @pulumi.getter(name=%q)\n", indent, prop.Name) } - fmt.Fprintf(w, " def %s(self) -> %s:\n", pname, ty) + fmt.Fprintf(w, "%s def %s(self) -> %s:\n", indent, pname, ty) if prop.Comment != "" { - printComment(w, prop.Comment, " ") + printComment(w, prop.Comment, indent+" ") } - fmt.Fprintf(w, " return pulumi.get(self, %q)\n\n", pname) + fmt.Fprintf(w, "%s return pulumi.get(self, %q)\n\n", indent, pname) if setters { - fmt.Fprintf(w, " @%s.setter\n", pname) - fmt.Fprintf(w, " def %s(self, value: %s):\n", pname, ty) - fmt.Fprintf(w, " pulumi.set(self, %q, value)\n\n", pname) + fmt.Fprintf(w, "%s @%s.setter\n", indent, pname) + fmt.Fprintf(w, "%s def %s(self, value: %s):\n", indent, pname, ty) + fmt.Fprintf(w, "%s pulumi.set(self, %q, value)\n\n", indent, pname) } } var propNamedProperty *schema.Property @@ -1251,6 +1454,170 @@ func (mod *modContext) genProperties(w io.Writer, properties []*schema.Property, } } +func (mod *modContext) genMethods(w io.Writer, res *schema.Resource) { + genReturnType := func(method *schema.Method) string { + obj := method.Function.Outputs + name := pyClassName(title(method.Name)) + "Result" + + // Produce a class definition with optional """ comment. + fmt.Fprintf(w, " @pulumi.output_type\n") + fmt.Fprintf(w, " class %s:\n", name) + printComment(w, obj.Comment, " ") + + // Now generate an initializer with properties for all inputs. + fmt.Fprintf(w, " def __init__(__self__") + for _, prop := range obj.Properties { + fmt.Fprintf(w, ", %s=None", PyName(prop.Name)) + } + fmt.Fprintf(w, "):\n") + for _, prop := range obj.Properties { + // Check that required arguments are present. Also check that types are as expected. + pname := PyName(prop.Name) + ptype := mod.pyType(prop.Type) + fmt.Fprintf(w, " if %s and not isinstance(%s, %s):\n", pname, pname, ptype) + fmt.Fprintf(w, " raise TypeError(\"Expected argument '%s' to be a %s\")\n", pname, ptype) + + if prop.DeprecationMessage != "" { + escaped := strings.ReplaceAll(prop.DeprecationMessage, `"`, `\"`) + fmt.Fprintf(w, " if %s is not None:\n", pname) + fmt.Fprintf(w, " warnings.warn(\"\"\"%s\"\"\", DeprecationWarning)\n", escaped) + fmt.Fprintf(w, " pulumi.log.warn(\"\"\"%s is deprecated: %s\"\"\")\n\n", pname, escaped) + } + + // Now perform the assignment. + fmt.Fprintf(w, " pulumi.set(__self__, \"%[1]s\", %[1]s)\n", pname) + } + fmt.Fprintf(w, "\n") + + // Write out Python property getters for each property. + mod.genProperties(w, obj.Properties, false /*setters*/, " ", func(prop *schema.Property) string { + return mod.typeString(prop.Type, false /*input*/, false /*acceptMapping*/) + }) + + return name + } + + genMethod := func(method *schema.Method) { + methodName := PyName(method.Name) + fun := method.Function + + shouldLiftReturn := mod.liftSingleValueMethodReturns && method.Function.Outputs != nil && len(method.Function.Outputs.Properties) == 1 + + // If there is a return type, emit it. + var retTypeName, retTypeNameQualified, retTypeNameQualifiedOutput, methodRetType string + if fun.Outputs != nil { + retTypeName = genReturnType(method) + retTypeNameQualified = fmt.Sprintf("%s.%s", resourceName(res), retTypeName) + retTypeNameQualifiedOutput = fmt.Sprintf("pulumi.Output['%s']", retTypeNameQualified) + + if shouldLiftReturn { + methodRetType = fmt.Sprintf("pulumi.Output['%s']", mod.pyType(fun.Outputs.Properties[0].Type)) + } else { + methodRetType = retTypeNameQualifiedOutput + } + } + + var args []*schema.Property + if fun.Inputs != nil { + // Filter out the __self__ argument from the inputs. + args = make([]*schema.Property, 0, len(fun.Inputs.InputShape.Properties)-1) + for _, arg := range fun.Inputs.InputShape.Properties { + if arg.Name == "__self__" { + continue + } + args = append(args, arg) + } + // Sort required args first. + sort.Slice(args, func(i, j int) bool { + pi, pj := args[i], args[j] + switch { + case pi.IsRequired() != pj.IsRequired(): + return pi.IsRequired() && !pj.IsRequired() + default: + return pi.Name < pj.Name + } + }) + } + + // Write out the function signature. + def := fmt.Sprintf(" def %s(", methodName) + var indent string + if len(args) > 0 { + indent = strings.Repeat(" ", len(def)) + } + fmt.Fprintf(w, "%s__self__", def) + // Bare `*` argument to force callers to use named arguments. + if len(args) > 0 { + fmt.Fprintf(w, ", *") + } + for _, arg := range args { + pname := PyName(arg.Name) + ty := mod.typeString(arg.Type, true, false /*acceptMapping*/) + var defaultValue string + if !arg.IsRequired() { + defaultValue = " = None" + } + fmt.Fprintf(w, ",\n%s%s: %s%s", indent, pname, ty, defaultValue) + } + if retTypeNameQualifiedOutput != "" { + fmt.Fprintf(w, ") -> %s:\n", methodRetType) + } else { + fmt.Fprintf(w, ") -> None:\n") + } + + // If this func has documentation, write it at the top of the docstring, otherwise use a generic comment. + docs := &bytes.Buffer{} + if fun.Comment != "" { + fmt.Fprintln(docs, codegen.FilterExamples(fun.Comment, "python")) + } + if len(args) > 0 { + fmt.Fprintln(docs, "") + for _, arg := range args { + mod.genPropDocstring(docs, PyName(arg.Name), arg, false /*acceptMapping*/) + } + } + printComment(w, docs.String(), " ") + + if fun.DeprecationMessage != "" { + fmt.Fprintf(w, " pulumi.log.warn(\"\"\"%s is deprecated: %s\"\"\")\n", methodName, + fun.DeprecationMessage) + } + + // Copy the function arguments into a dictionary. + fmt.Fprintf(w, " __args__ = dict()\n") + fmt.Fprintf(w, " __args__['__self__'] = __self__\n") + for _, arg := range args { + pname := PyName(arg.Name) + fmt.Fprintf(w, " __args__['%s'] = %s\n", arg.Name, pname) + } + + // Now simply call the function with the arguments. + var typ string + if retTypeNameQualified != "" { + // Pass along the private output_type we generated, so any nested output classes are instantiated by + // the call. + typ = fmt.Sprintf(", typ=%s", retTypeNameQualified) + } + + if method.Function.Outputs == nil { + fmt.Fprintf(w, " pulumi.runtime.call('%s', __args__, res=__self__%s)\n", fun.Token, typ) + } else if shouldLiftReturn { + // Store the return in a variable and return the property output + fmt.Fprintf(w, " __result__ = pulumi.runtime.call('%s', __args__, res=__self__%s)\n", fun.Token, typ) + fmt.Fprintf(w, " return __result__.%s\n", PyName(fun.Outputs.Properties[0].Name)) + } else { + // Otherwise return the call directly + fmt.Fprintf(w, " return pulumi.runtime.call('%s', __args__, res=__self__%s)\n", fun.Token, typ) + } + + fmt.Fprintf(w, "\n") + } + + for _, method := range res.Methods { + genMethod(method) + } +} + func (mod *modContext) writeAlias(w io.Writer, alias *schema.Alias) { fmt.Fprint(w, "pulumi.Alias(") parts := []string{} @@ -1286,7 +1653,10 @@ func (mod *modContext) genFunction(fun *schema.Function) (string, error) { mod.genHeader(w, true /*needsSDK*/, imports) - baseName, awaitableName := awaitableTypeNames(fun.Outputs.Token) + var baseName, awaitableName string + if fun.Outputs != nil { + baseName, awaitableName = awaitableTypeNames(fun.Outputs.Token) + } name := PyName(tokenToName(fun.Token)) // Export only the symbols we want exported. @@ -1296,6 +1666,9 @@ func (mod *modContext) genFunction(fun *schema.Function) (string, error) { fmt.Fprintf(w, " '%s',\n", awaitableName) } fmt.Fprintf(w, " '%s',\n", name) + if fun.NeedsOutputVersion() { + fmt.Fprintf(w, " '%s_output',\n", name) + } fmt.Fprintf(w, "]\n\n") if fun.DeprecationMessage != "" { @@ -1316,47 +1689,9 @@ func (mod *modContext) genFunction(fun *schema.Function) (string, error) { args = fun.Inputs.Properties } - // Write out the function signature. - def := fmt.Sprintf("def %s(", name) - var indent string - if len(args) > 0 { - indent = strings.Repeat(" ", len(def)) - } - fmt.Fprintf(w, def) - for i, arg := range args { - var ind string - if i != 0 { - ind = indent - } - pname := PyName(arg.Name) - ty := mod.typeString(arg.Type, true, false /*wrapInput*/, false /*args*/, true /*optional*/, true /*acceptMapping*/) - fmt.Fprintf(w, "%s%s: %s = None,\n", ind, pname, ty) - } - fmt.Fprintf(w, "%sopts: Optional[pulumi.InvokeOptions] = None", indent) - if retTypeName != "" { - fmt.Fprintf(w, ") -> %s:\n", retTypeName) - } else { - fmt.Fprintf(w, "):\n") - } - - // If this func has documentation, write it at the top of the docstring, otherwise use a generic comment. - docs := &bytes.Buffer{} - if fun.Comment != "" { - fmt.Fprintln(docs, codegen.FilterExamples(fun.Comment, "python")) - } else { - fmt.Fprintln(docs, "Use this data source to access information about an existing resource.") - } - if len(args) > 0 { - fmt.Fprintln(docs, "") - for _, arg := range args { - mod.genPropDocstring(docs, PyName(arg.Name), arg, false /*wrapInputs*/, true /*acceptMapping*/) - } - } - printComment(w, docs.String(), " ") - - if fun.DeprecationMessage != "" { - fmt.Fprintf(w, " pulumi.log.warn(\"\"\"%s is deprecated: %s\"\"\")\n", name, fun.DeprecationMessage) - } + mod.genFunDef(w, name, retTypeName, args, false /* wrapInput */) + mod.genFunDocstring(w, fun) + mod.genFunDeprecationMessage(w, fun) // Copy the function arguments into a dictionary. fmt.Fprintf(w, " __args__ = dict()\n") @@ -1370,6 +1705,10 @@ func (mod *modContext) genFunction(fun *schema.Function) (string, error) { fmt.Fprintf(w, " opts = pulumi.InvokeOptions()\n") fmt.Fprintf(w, " if opts.version is None:\n") fmt.Fprintf(w, " opts.version = _utilities.get_version()\n") + if mod.pkg.PluginDownloadURL != "" { + fmt.Fprintf(w, " if opts.plugin_download_url is None:\n") + fmt.Fprintf(w, " opts.plugin_download_url = _utilities.get_plugin_download_url()\n") + } // Now simply invoke the runtime function with the arguments. var typ string @@ -1396,9 +1735,107 @@ func (mod *modContext) genFunction(fun *schema.Function) (string, error) { } } + mod.genFunctionOutputVersion(w, fun) return w.String(), nil } +func (mod *modContext) genFunDocstring(w io.Writer, fun *schema.Function) { + var args []*schema.Property + if fun.Inputs != nil { + args = fun.Inputs.Properties + } + + // If this func has documentation, write it at the top of the docstring, otherwise use a generic comment. + docs := &bytes.Buffer{} + if fun.Comment != "" { + fmt.Fprintln(docs, codegen.FilterExamples(fun.Comment, "python")) + } else { + fmt.Fprintln(docs, "Use this data source to access information about an existing resource.") + } + if len(args) > 0 { + fmt.Fprintln(docs, "") + for _, arg := range args { + mod.genPropDocstring(docs, PyName(arg.Name), arg, true /*acceptMapping*/) + } + } + printComment(w, docs.String(), " ") +} + +func (mod *modContext) genFunDeprecationMessage(w io.Writer, fun *schema.Function) { + if fun.DeprecationMessage == "" { + return + } + name := PyName(tokenToName(fun.Token)) + fmt.Fprintf(w, " pulumi.log.warn(\"\"\"%s is deprecated: %s\"\"\")\n", name, fun.DeprecationMessage) +} + +// Generates the function signature line `def fn(...):` without the body. +func (mod *modContext) genFunDef(w io.Writer, name, retTypeName string, args []*schema.Property, wrapInput bool) { + def := fmt.Sprintf("def %s(", name) + var indent string + if len(args) > 0 { + indent = strings.Repeat(" ", len(def)) + } + fmt.Fprintf(w, def) + for i, arg := range args { + var ind string + if i != 0 { + ind = indent + } + pname := PyName(arg.Name) + + var argType schema.Type + if wrapInput { + argType = &schema.OptionalType{ + ElementType: &schema.InputType{ + ElementType: arg.Type, + }, + } + } else { + argType = codegen.OptionalType(arg) + } + + ty := mod.typeString(argType, true /*input*/, true /*acceptMapping*/) + fmt.Fprintf(w, "%s%s: %s = None,\n", ind, pname, ty) + } + fmt.Fprintf(w, "%sopts: Optional[pulumi.InvokeOptions] = None", indent) + if retTypeName != "" { + fmt.Fprintf(w, ") -> %s:\n", retTypeName) + } else { + fmt.Fprintf(w, "):\n") + } +} + +// Generates `def ${fn}_output(..) version lifted to work on +// `Input`-wrapped arguments and producing an `Output`-wrapped result. +func (mod *modContext) genFunctionOutputVersion(w io.Writer, fun *schema.Function) { + if !fun.NeedsOutputVersion() { + return + } + + var retTypeName string + if fun.Outputs != nil { + originalOutputTypeName, _ := awaitableTypeNames(fun.Outputs.Token) + retTypeName = fmt.Sprintf("pulumi.Output[%s]", originalOutputTypeName) + } else { + retTypeName = "pulumi.Output[void]" + } + + originalName := PyName(tokenToName(fun.Token)) + outputSuffixedName := fmt.Sprintf("%s_output", originalName) + + var args []*schema.Property + if fun.Inputs != nil { + args = fun.Inputs.Properties + } + + fmt.Fprintf(w, "\n\n@_utilities.lift_output_func(%s)\n", originalName) + mod.genFunDef(w, outputSuffixedName, retTypeName, args, true /*wrapInput*/) + mod.genFunDocstring(w, fun) + mod.genFunDeprecationMessage(w, fun) + fmt.Fprintf(w, " ...\n") +} + func (mod *modContext) genEnums(w io.Writer, enums []*schema.EnumType) error { // Header mod.genHeader(w, false /*needsSDK*/, nil) @@ -1428,7 +1865,7 @@ func (mod *modContext) genEnums(w io.Writer, enums []*schema.EnumType) error { func (mod *modContext) genEnum(w io.Writer, enum *schema.EnumType) error { indent := " " enumName := tokenToName(enum.Token) - underlyingType := mod.typeString(enum.ElementType, false, false, false, false, false) + underlyingType := mod.typeString(enum.ElementType, false, false) switch enum.ElementType { case schema.StringType, schema.IntType, schema.NumberType: @@ -1452,32 +1889,43 @@ func (mod *modContext) genEnum(w io.Writer, enum *schema.EnumType) error { } else { fmt.Fprintf(w, "%v\n", e.Value) } + if e.Comment != "" { + printComment(w, e.Comment, indent) + } } default: - return errors.Errorf("enums of type %s are not yet implemented for this language", enum.ElementType.String()) + return fmt.Errorf("enums of type %s are not yet implemented for this language", enum.ElementType.String()) } return nil } -func visitObjectTypes(properties []*schema.Property, visitor func(objectOrResource schema.Type, plain bool)) { - codegen.VisitTypeClosure(properties, func(t codegen.Type) { - switch st := t.Type.(type) { +func visitObjectTypes(properties []*schema.Property, visitor func(objectOrResource schema.Type)) { + codegen.VisitTypeClosure(properties, func(t schema.Type) { + switch st := t.(type) { case *schema.EnumType, *schema.ObjectType, *schema.ResourceType: - visitor(st, t.Plain) + visitor(st) } }) } func (mod *modContext) collectImports(properties []*schema.Property, imports imports, input bool) { - codegen.VisitTypeClosure(properties, func(t codegen.Type) { - switch t := t.Type.(type) { + mod.collectImportsForResource(properties, imports, input, nil) +} + +func (mod *modContext) collectImportsForResource(properties []*schema.Property, imports imports, input bool, + res *schema.Resource) { + codegen.VisitTypeClosure(properties, func(t schema.Type) { + switch t := t.(type) { case *schema.ObjectType: imports.addType(mod, t, input) case *schema.EnumType: imports.addEnum(mod, t.Token) case *schema.ResourceType: - imports.addResource(mod, t) + // Don't import itself. + if t.Resource != res { + imports.addResource(mod, t) + } } }) } @@ -1506,15 +1954,19 @@ func genPulumiPluginFile(pkg *schema.Package) ([]byte, error) { plugin := &plugin.PulumiPluginJSON{ Resource: true, Name: pkg.Name, - Version: "${PLUGIN_VERSION}", Server: pkg.PluginDownloadURL, } + + if info, ok := pkg.Language["python"].(PackageInfo); pkg.Version != nil && ok && info.RespectSchemaVersion { + plugin.Version = pkg.Version.String() + } + return plugin.JSON() } // genPackageMetadata generates all the non-code metadata required by a Pulumi package. func genPackageMetadata( - tool string, pkg *schema.Package, pyPkgName string, emitPulumiPluginFile bool, requires map[string]string) (string, error) { + tool string, pkg *schema.Package, pyPkgName string, requires map[string]string, pythonRequires string) (string, error) { w := &bytes.Buffer{} (&modContext{tool: tool}).genHeader(w, false /*needsSDK*/, nil) @@ -1526,24 +1978,35 @@ func genPackageMetadata( fmt.Fprintf(w, "from subprocess import check_call\n") fmt.Fprintf(w, "\n\n") + // Create a constant for the version number to replace during build + version := "0.0.0" + pluginVersion := version + info, ok := pkg.Language["python"].(PackageInfo) + if pkg.Version != nil && ok && info.RespectSchemaVersion { + version = pypiVersion(*pkg.Version) + pluginVersion = pkg.Version.String() + } + fmt.Fprintf(w, "VERSION = \"%s\"\n", version) + fmt.Fprintf(w, "PLUGIN_VERSION = \"%s\"\n\n", pluginVersion) + // Create a command that will install the Pulumi plugin for this resource provider. fmt.Fprintf(w, "class InstallPluginCommand(install):\n") fmt.Fprintf(w, " def run(self):\n") fmt.Fprintf(w, " install.run(self)\n") fmt.Fprintf(w, " try:\n") if pkg.PluginDownloadURL == "" { - fmt.Fprintf(w, " check_call(['pulumi', 'plugin', 'install', 'resource', '%s', '${PLUGIN_VERSION}'])\n", pkg.Name) + fmt.Fprintf(w, " check_call(['pulumi', 'plugin', 'install', 'resource', '%s', PLUGIN_VERSION])\n", pkg.Name) } else { - fmt.Fprintf(w, " check_call(['pulumi', 'plugin', 'install', 'resource', '%s', '${PLUGIN_VERSION}', '--server', '%s'])\n", pkg.Name, pkg.PluginDownloadURL) + fmt.Fprintf(w, " check_call(['pulumi', 'plugin', 'install', 'resource', '%s', PLUGIN_VERSION, '--server', '%s'])\n", pkg.Name, pkg.PluginDownloadURL) } fmt.Fprintf(w, " except OSError as error:\n") fmt.Fprintf(w, " if error.errno == errno.ENOENT:\n") - fmt.Fprintf(w, " print(\"\"\"\n") + fmt.Fprintf(w, " print(f\"\"\"\n") fmt.Fprintf(w, " There was an error installing the %s resource provider plugin.\n", pkg.Name) fmt.Fprintf(w, " It looks like `pulumi` is not installed on your system.\n") fmt.Fprintf(w, " Please visit https://pulumi.com/ to install the Pulumi CLI.\n") fmt.Fprintf(w, " You may try manually installing the plugin by running\n") - fmt.Fprintf(w, " `pulumi plugin install resource %s ${PLUGIN_VERSION}`\n", pkg.Name) + fmt.Fprintf(w, " `pulumi plugin install resource %s {PLUGIN_VERSION}`\n", pkg.Name) fmt.Fprintf(w, " \"\"\")\n") fmt.Fprintf(w, " else:\n") fmt.Fprintf(w, " raise\n") @@ -1556,12 +2019,15 @@ func genPackageMetadata( fmt.Fprintf(w, " with open('README.md', encoding='utf-8') as f:\n") fmt.Fprintf(w, " return f.read()\n") fmt.Fprintf(w, " except FileNotFoundError:\n") - fmt.Fprintf(w, " return \"%s Pulumi Package - Development Version\"\n", pkg.Name) + fmt.Fprintf(w, " return \"%s Pulumi Package - Development Version\"\n", pkg.Name) fmt.Fprintf(w, "\n\n") // Finally, the actual setup part. fmt.Fprintf(w, "setup(name='%s',\n", pyPkgName) - fmt.Fprintf(w, " version='${VERSION}',\n") + if pythonRequires != "" { + fmt.Fprintf(w, " python_requires='%s',\n", pythonRequires) + } + fmt.Fprintf(w, " version=VERSION,\n") if pkg.Description != "" { fmt.Fprintf(w, " description=%q,\n", sanitizePackageDescription(pkg.Description)) } @@ -1597,9 +2063,7 @@ func genPackageMetadata( fmt.Fprintf(w, " package_data={\n") fmt.Fprintf(w, " '%s': [\n", pyPkgName) fmt.Fprintf(w, " 'py.typed',\n") - if emitPulumiPluginFile { - fmt.Fprintf(w, " 'pulumiplugin.json',\n") - } + fmt.Fprintf(w, " 'pulumi-plugin.json',\n") fmt.Fprintf(w, " ]\n") fmt.Fprintf(w, " },\n") @@ -1610,15 +2074,15 @@ func genPackageMetadata( // We expect a specific pattern of ">=version,=version1,=version1,._utilities. is the module we want to query the version for. root_package, *rest = __name__.split('.') @@ -2576,8 +2950,13 @@ def get_semver_version(): return SemverVersion(major=major, minor=minor, patch=patch, prerelease=prerelease) +# Determine the version once and cache the value, which measurably improves program performance. +_version = _get_semver_version() +_version_str = str(_version) + + def get_version(): - return str(get_semver_version()) + return _version_str def get_resource_args_opts(resource_args_type, resource_options_type, *args, **kwargs): @@ -2648,14 +3027,12 @@ def _lazy_import_temp(fullname): class Package(pulumi.runtime.ResourcePackage): - _version = get_semver_version() - def __init__(self, pkg_info): super().__init__() self.pkg_info = pkg_info def version(self): - return Package._version + return _version def construct_provider(self, name: str, typ: str, urn: str) -> pulumi.ProviderResource: if typ != self.pkg_info['token']: @@ -2665,14 +3042,12 @@ class Package(pulumi.runtime.ResourcePackage): class Module(pulumi.runtime.ResourceModule): - _version = get_semver_version() - def __init__(self, mod_info): super().__init__() self.mod_info = mod_info def version(self): - return Module._version + return _version def construct(self, name: str, typ: str, urn: str) -> pulumi.Resource: class_name = self.mod_info['classes'].get(typ, None) @@ -2696,4 +3071,27 @@ def register(resource_modules, resource_packages): mod_info['pkg'], mod_info['mod'], Module(mod_info)) + + +_F = typing.TypeVar('_F', bound=typing.Callable[..., typing.Any]) + + +def lift_output_func(func: typing.Any) -> typing.Callable[[_F], _F]: + """Decorator internally used on {fn}_output lifted function versions + to implement them automatically from the un-lifted function.""" + + func_sig = inspect.signature(func) + + def lifted_func(*args, opts=None, **kwargs): + bound_args = func_sig.bind(*args, **kwargs) + # Convert tuple to list, see pulumi/pulumi#8172 + args_list = list(bound_args.args) + return pulumi.Output.from_input({ + 'args': args_list, + 'kwargs': bound_args.kwargs + }).apply(lambda resolved_args: func(*resolved_args['args'], + opts=opts, + **resolved_args['kwargs'])) + + return (lambda _: lifted_func) ` diff --git a/vendor/github.com/pulumi/pulumi/pkg/v3/codegen/python/gen_program.go b/vendor/github.com/pulumi/pulumi/pkg/v3/codegen/python/gen_program.go index f10eb4d..288131a 100644 --- a/vendor/github.com/pulumi/pulumi/pkg/v3/codegen/python/gen_program.go +++ b/vendor/github.com/pulumi/pulumi/pkg/v3/codegen/python/gen_program.go @@ -22,10 +22,10 @@ import ( "github.com/hashicorp/hcl/v2" "github.com/pulumi/pulumi/pkg/v3/codegen" - "github.com/pulumi/pulumi/pkg/v3/codegen/hcl2" "github.com/pulumi/pulumi/pkg/v3/codegen/hcl2/model" "github.com/pulumi/pulumi/pkg/v3/codegen/hcl2/model/format" "github.com/pulumi/pulumi/pkg/v3/codegen/hcl2/syntax" + "github.com/pulumi/pulumi/pkg/v3/codegen/pcl" "github.com/pulumi/pulumi/pkg/v3/codegen/schema" "github.com/pulumi/pulumi/sdk/v3/go/common/util/contract" ) @@ -34,30 +34,27 @@ type generator struct { // The formatter to use when generating code. *format.Formatter - program *hcl2.Program + program *pcl.Program diagnostics hcl.Diagnostics configCreated bool - casingTables map[string]map[string]string quotes map[model.Expression]string } -type objectTypeInfo struct { - isDictionary bool - camelCaseToSnakeCase map[string]string -} - -func GenerateProgram(program *hcl2.Program) (map[string][]byte, hcl.Diagnostics, error) { +func GenerateProgram(program *pcl.Program) (map[string][]byte, hcl.Diagnostics, error) { g, err := newGenerator(program) if err != nil { return nil, nil, err } // Linearize the nodes into an order appropriate for procedural code generation. - nodes := hcl2.Linearize(program) + nodes := pcl.Linearize(program) + + // Creating a list to store and later print helper methods if they turn out to be needed + preambleHelperMethods := codegen.NewStringSet() var main bytes.Buffer - g.genPreamble(&main, program) + g.genPreamble(&main, program, preambleHelperMethods) for _, n := range nodes { g.genNode(&main, n) } @@ -68,25 +65,17 @@ func GenerateProgram(program *hcl2.Program) (map[string][]byte, hcl.Diagnostics, return files, g.diagnostics, nil } -func newGenerator(program *hcl2.Program) (*generator, error) { +func newGenerator(program *pcl.Program) (*generator, error) { // Import Python-specific schema info. - casingTables := map[string]map[string]string{} for _, p := range program.Packages() { if err := p.ImportLanguages(map[string]schema.Language{"python": Importer}); err != nil { return nil, err } - - // Build the case mapping table. - camelCaseToSnakeCase := map[string]string{} - seenTypes := codegen.Set{} - buildCaseMappingTables(p, nil, camelCaseToSnakeCase, seenTypes) - casingTables[PyName(p.Name)] = camelCaseToSnakeCase } g := &generator{ - program: program, - casingTables: casingTables, - quotes: map[model.Expression]string{}, + program: program, + quotes: map[model.Expression]string{}, } g.Formatter = format.NewFormatter(g) @@ -126,21 +115,45 @@ func (g *generator) genComment(w io.Writer, comment syntax.Comment) { } } -func (g *generator) genPreamble(w io.Writer, program *hcl2.Program) { +func (g *generator) genPreamble(w io.Writer, program *pcl.Program, preambleHelperMethods codegen.StringSet) { // Print the pulumi import at the top. g.Fprintln(w, "import pulumi") // Accumulate other imports for the various providers. Don't emit them yet, as we need to sort them later on. - importSet := codegen.NewStringSet("pulumi") + type Import struct { + // Use an "import ${KEY} as ${.Pkg}" + ImportAs bool + // Only relevant for when ImportAs=true + Pkg string + } + importSet := map[string]Import{} for _, n := range program.Nodes { - if r, isResource := n.(*hcl2.Resource); isResource { + if r, isResource := n.(*pcl.Resource); isResource { pkg, _, _, _ := r.DecomposeToken() - importSet.Add("pulumi_" + makeValidIdentifier(pkg)) + packageName := "pulumi_" + makeValidIdentifier(pkg) + if r.Schema != nil && r.Schema.Package != nil { + if info, ok := r.Schema.Package.Language["python"].(PackageInfo); ok && info.PackageName != "" { + packageName = info.PackageName + } + } + importSet[packageName] = Import{ImportAs: true, Pkg: makeValidIdentifier(pkg)} } diags := n.VisitExpressions(nil, func(n model.Expression) (model.Expression, hcl.Diagnostics) { if call, ok := n.(*model.FunctionCallExpression); ok { - if i := g.getFunctionImports(call); i != "" { - importSet.Add(i) + if i := g.getFunctionImports(call); len(i) > 0 && i[0] != "" { + for _, importPackage := range i { + importAs := strings.HasPrefix(importPackage, "pulumi_") + var maybePkg string + if importAs { + maybePkg = importPackage[len("pulumi_"):] + } + importSet[importPackage] = Import{ + ImportAs: importAs, + Pkg: maybePkg} + } + } + if helperMethodBody, ok := getHelperMethodIfNeeded(call.Name); ok { + preambleHelperMethods.Add(helperMethodBody) } } return n, nil @@ -149,12 +162,17 @@ func (g *generator) genPreamble(w io.Writer, program *hcl2.Program) { } var imports []string - for _, pkg := range importSet.SortedValues() { + importSetNames := codegen.NewStringSet() + for k := range importSet { + importSetNames.Add(k) + } + for _, pkg := range importSetNames.SortedValues() { if pkg == "pulumi" { continue } - if strings.HasPrefix(pkg, "pulumi_") { - imports = append(imports, fmt.Sprintf("import %s as %s", pkg, pkg[len("pulumi_"):])) + control := importSet[pkg] + if control.ImportAs { + imports = append(imports, fmt.Sprintf("import %s as %s", pkg, control.Pkg)) } else { imports = append(imports, fmt.Sprintf("import %s", pkg)) } @@ -166,23 +184,28 @@ func (g *generator) genPreamble(w io.Writer, program *hcl2.Program) { g.Fprintln(w, i) } g.Fprint(w, "\n") + + // If we collected any helper methods that should be added, write them just before the main func + for _, preambleHelperMethodBody := range preambleHelperMethods.SortedValues() { + g.Fprintf(w, "%s\n\n", preambleHelperMethodBody) + } } -func (g *generator) genNode(w io.Writer, n hcl2.Node) { +func (g *generator) genNode(w io.Writer, n pcl.Node) { switch n := n.(type) { - case *hcl2.Resource: + case *pcl.Resource: g.genResource(w, n) - case *hcl2.ConfigVariable: + case *pcl.ConfigVariable: g.genConfigVariable(w, n) - case *hcl2.LocalVariable: + case *pcl.LocalVariable: g.genLocalVariable(w, n) - case *hcl2.OutputVariable: + case *pcl.OutputVariable: g.genOutputVariable(w, n) } } // resourceTypeName computes the Python package, module, and type name for the given resource. -func resourceTypeName(r *hcl2.Resource) (string, string, string, hcl.Diagnostics) { +func resourceTypeName(r *pcl.Resource) (string, string, string, hcl.Diagnostics) { // Compute the resource type from the Pulumi type token. pkg, module, member, diagnostics := r.DecomposeToken() @@ -206,30 +229,23 @@ func resourceTypeName(r *hcl2.Resource) (string, string, string, hcl.Diagnostics // argumentTypeName computes the Python argument class name for the given expression and model type. func (g *generator) argumentTypeName(expr model.Expression, destType model.Type) string { - schemaType, ok := hcl2.GetSchemaForType(destType.(model.Type)) + schemaType, ok := pcl.GetSchemaForType(destType.(model.Type)) if !ok { return "" } + schemaType = codegen.UnwrapType(schemaType) + objType, ok := schemaType.(*schema.ObjectType) if !ok { return "" } - if objType.Language != nil { - pyTypeInfo, ok := objType.Language["python"].(objectTypeInfo) - if ok { - if pyTypeInfo.isDictionary { - return "" - } - } - } - token := objType.Token tokenRange := expr.SyntaxNode().Range() // Example: aws, s3/BucketLogging, BucketLogging, []Diagnostics - pkgName, module, member, diagnostics := hcl2.DecomposeToken(token, tokenRange) + pkgName, module, member, diagnostics := pcl.DecomposeToken(token, tokenRange) contract.Assert(len(diagnostics) == 0) modName := objType.Package.TokenToModule(token) @@ -261,7 +277,7 @@ func (g *generator) makeResourceName(baseName, count string) string { return fmt.Sprintf(`f"%s-{%s}"`, baseName, count) } -func (g *generator) lowerResourceOptions(opts *hcl2.ResourceOptions) (*model.Block, []*quoteTemp) { +func (g *generator) lowerResourceOptions(opts *pcl.ResourceOptions) (*model.Block, []*quoteTemp) { if opts == nil { return nil, nil } @@ -328,7 +344,7 @@ func (g *generator) genResourceOptions(w io.Writer, block *model.Block, hasInput } // genResource handles the generation of instantiations of non-builtin resources. -func (g *generator) genResource(w io.Writer, r *hcl2.Resource) { +func (g *generator) genResource(w io.Writer, r *pcl.Resource) { pkg, module, memberName, diagnostics := resourceTypeName(r) g.diagnostics = append(g.diagnostics, diagnostics...) if module != "" { @@ -346,10 +362,7 @@ func (g *generator) genResource(w io.Writer, r *hcl2.Resource) { } g.genTrivia(w, r.Definition.Tokens.GetOpenBrace()) - casingTable := g.casingTables[pkg] for _, input := range r.Inputs { - g.lowerObjectKeys(input.Value, casingTable) - destType, diagnostics := r.InputType.Traverse(hcl.TraverseAttr{Name: input.Name}) g.diagnostics = append(g.diagnostics, diagnostics...) value, valueTemps := g.lowerExpression(input.Value, destType.(model.Type)) @@ -422,7 +435,7 @@ func (g *generator) genTemps(w io.Writer, temps []*quoteTemp) { } } -func (g *generator) genConfigVariable(w io.Writer, v *hcl2.ConfigVariable) { +func (g *generator) genConfigVariable(w io.Writer, v *pcl.ConfigVariable) { // TODO(pdg): trivia if !g.configCreated { @@ -464,7 +477,7 @@ func (g *generator) genConfigVariable(w io.Writer, v *hcl2.ConfigVariable) { } } -func (g *generator) genLocalVariable(w io.Writer, v *hcl2.LocalVariable) { +func (g *generator) genLocalVariable(w io.Writer, v *pcl.LocalVariable) { value, temps := g.lowerExpression(v.Definition.Value, v.Type()) g.genTemps(w, temps) @@ -472,7 +485,7 @@ func (g *generator) genLocalVariable(w io.Writer, v *hcl2.LocalVariable) { g.Fgenf(w, "%s%s = %.v\n", g.Indent, PyName(v.Name()), value) } -func (g *generator) genOutputVariable(w io.Writer, v *hcl2.OutputVariable) { +func (g *generator) genOutputVariable(w io.Writer, v *pcl.OutputVariable) { value, temps := g.lowerExpression(v.Value, v.Type()) g.genTemps(w, temps) diff --git a/vendor/github.com/pulumi/pulumi/pkg/v3/codegen/python/gen_program_expressions.go b/vendor/github.com/pulumi/pulumi/pkg/v3/codegen/python/gen_program_expressions.go index 0a02e6e..ecbedb0 100644 --- a/vendor/github.com/pulumi/pulumi/pkg/v3/codegen/python/gen_program_expressions.go +++ b/vendor/github.com/pulumi/pulumi/pkg/v3/codegen/python/gen_program_expressions.go @@ -11,8 +11,8 @@ import ( "github.com/hashicorp/hcl/v2" "github.com/hashicorp/hcl/v2/hclsyntax" - "github.com/pulumi/pulumi/pkg/v3/codegen/hcl2" "github.com/pulumi/pulumi/pkg/v3/codegen/hcl2/model" + "github.com/pulumi/pulumi/pkg/v3/codegen/pcl" "github.com/pulumi/pulumi/sdk/v3/go/common/util/contract" "github.com/zclconf/go-cty/cty" ) @@ -26,10 +26,10 @@ func (nameInfo) Format(name string) string { func (g *generator) lowerExpression(expr model.Expression, typ model.Type) (model.Expression, []*quoteTemp) { // TODO(pdg): diagnostics - expr = hcl2.RewritePropertyReferences(expr) - expr, _ = hcl2.RewriteApplies(expr, nameInfo(0), false) + expr = pcl.RewritePropertyReferences(expr) + expr, _ = pcl.RewriteApplies(expr, nameInfo(0), false) expr, _ = g.lowerProxyApplies(expr) - expr = hcl2.RewriteConversions(expr, typ) + expr = pcl.RewriteConversions(expr, typ) expr, quotes, _ := g.rewriteQuotes(expr) return expr, quotes @@ -153,7 +153,7 @@ func (g *generator) GenForExpression(w io.Writer, expr *model.ForExpression) { func (g *generator) genApply(w io.Writer, expr *model.FunctionCallExpression) { // Extract the list of outputs and the continuation expression from the `__apply` arguments. - applyArgs, then := hcl2.ParseApplyCall(expr) + applyArgs, then := pcl.ParseApplyCall(expr) if len(applyArgs) == 1 { // If we only have a single output, just generate a normal `.apply`. @@ -177,37 +177,41 @@ func functionName(tokenArg model.Expression) (string, string, string, hcl.Diagno tokenRange := tokenArg.SyntaxNode().Range() // Compute the resource type from the Pulumi type token. - pkg, module, member, diagnostics := hcl2.DecomposeToken(token, tokenRange) + pkg, module, member, diagnostics := pcl.DecomposeToken(token, tokenRange) return makeValidIdentifier(pkg), strings.Replace(module, "/", ".", -1), title(member), diagnostics } -var functionImports = map[string]string{ - "fileArchive": "pulumi", - "fileAsset": "pulumi", - "readDir": "os", - "toJSON": "json", +var functionImports = map[string][]string{ + "fileArchive": {"pulumi"}, + "fileAsset": {"pulumi"}, + "filebase64": {"base64"}, + "filebase64sha256": {"base64", "hashlib"}, + "readDir": {"os"}, + "toBase64": {"base64"}, + "toJSON": {"json"}, + "sha1": {"hashlib"}, } -func (g *generator) getFunctionImports(x *model.FunctionCallExpression) string { - if x.Name != hcl2.Invoke { +func (g *generator) getFunctionImports(x *model.FunctionCallExpression) []string { + if x.Name != pcl.Invoke { return functionImports[x.Name] } pkg, _, _, diags := functionName(x.Args[0]) contract.Assert(len(diags) == 0) - return "pulumi_" + pkg + return []string{"pulumi_" + pkg} } func (g *generator) GenFunctionCallExpression(w io.Writer, expr *model.FunctionCallExpression) { switch expr.Name { - case hcl2.IntrinsicConvert: + case pcl.IntrinsicConvert: switch arg := expr.Args[0].(type) { case *model.ObjectConsExpression: g.genObjectConsExpression(w, arg, expr.Type()) default: g.Fgenf(w, "%.v", expr.Args[0]) } - case hcl2.IntrinsicApply: + case pcl.IntrinsicApply: g.genApply(w, expr) case "element": g.Fgenf(w, "%.16v[%.v]", expr.Args[0], expr.Args[1]) @@ -217,7 +221,12 @@ func (g *generator) GenFunctionCallExpression(w io.Writer, expr *model.FunctionC g.Fgenf(w, "pulumi.FileArchive(%.v)", expr.Args[0]) case "fileAsset": g.Fgenf(w, "pulumi.FileAsset(%.v)", expr.Args[0]) - case hcl2.Invoke: + case "filebase64": + g.Fgenf(w, "(lambda path: base64.b64encode(open(path).read().encode()).decode())(%.v)", expr.Args[0]) + case "filebase64sha256": + // Assuming the existence of the following helper method + g.Fgenf(w, "computeFilebase64sha256(%v)", expr.Args[0]) + case pcl.Invoke: pkg, module, fn, diags := functionName(expr.Args[0]) contract.Assert(len(diags) == 0) if module != "" { @@ -225,6 +234,16 @@ func (g *generator) GenFunctionCallExpression(w io.Writer, expr *model.FunctionC } name := fmt.Sprintf("%s%s.%s", pkg, module, PyName(fn)) + isOut := pcl.IsOutputVersionInvokeCall(expr) + if isOut { + name = fmt.Sprintf("%s_output", name) + } + + if len(expr.Args) == 1 { + g.Fprintf(w, "%s()", name) + return + } + optionsBag := "" if len(expr.Args) == 3 { var buf bytes.Buffer @@ -234,11 +253,8 @@ func (g *generator) GenFunctionCallExpression(w io.Writer, expr *model.FunctionC g.Fgenf(w, "%s(", name) - casingTable := g.casingTables[pkg] if obj, ok := expr.Args[1].(*model.FunctionCallExpression); ok { if obj, ok := obj.Args[0].(*model.ObjectConsExpression); ok { - g.lowerObjectKeys(expr.Args[1], casingTable) - indenter := func(f func()) { f() } if len(obj.Items) > 1 { indenter = g.Indented @@ -263,6 +279,8 @@ func (g *generator) GenFunctionCallExpression(w io.Writer, expr *model.FunctionC } g.Fgenf(w, "%v)", optionsBag) + case "join": + g.Fgenf(w, "%.16v.join(%v)", expr.Args[0], expr.Args[1]) case "length": g.Fgenf(w, "len(%.v)", expr.Args[0]) case "lookup": @@ -289,8 +307,12 @@ func (g *generator) GenFunctionCallExpression(w io.Writer, expr *model.FunctionC g.Fgenf(w, "pulumi.secret(%v)", expr.Args[0]) case "split": g.Fgenf(w, "%.16v.split(%.v)", expr.Args[1], expr.Args[0]) + case "toBase64": + g.Fgenf(w, "base64.b64encode(%.16v.encode()).decode()", expr.Args[0]) case "toJSON": g.Fgenf(w, "json.dumps(%.v)", expr.Args[0]) + case "sha1": + g.Fgenf(w, "hashlib.sha1(%v.encode()).hexdigest()", expr.Args[0]) default: var rng hcl.Range if expr.Syntax != nil { @@ -342,7 +364,12 @@ func (g *generator) genStringLiteral(w io.Writer, quotes, v string) { } func (g *generator) GenLiteralValueExpression(w io.Writer, expr *model.LiteralValueExpression) { - switch expr.Type() { + typ := expr.Type() + if cns, ok := typ.(*model.ConstType); ok { + typ = cns.Type + } + + switch typ { case model.BoolType: if expr.Value.True() { g.Fgen(w, "True") @@ -461,7 +488,7 @@ func (g *generator) GenTemplateExpression(w io.Writer, expr *model.TemplateExpre prefix, escapeBraces := "", false for _, part := range expr.Parts { - if lit, ok := part.(*model.LiteralValueExpression); !ok || lit.Type() != model.StringType { + if lit, ok := part.(*model.LiteralValueExpression); !ok || !model.StringType.AssignableFrom(lit.Type()) { prefix, escapeBraces = "f", true break } @@ -472,7 +499,7 @@ func (g *generator) GenTemplateExpression(w io.Writer, expr *model.TemplateExpre g.Fprintf(b, "%s%s", prefix, quotes) for _, expr := range expr.Parts { - if lit, ok := expr.(*model.LiteralValueExpression); ok && lit.Type() == model.StringType { + if lit, ok := expr.(*model.LiteralValueExpression); ok && model.StringType.AssignableFrom(lit.Type()) { g.genEscapedString(b, lit.Value.AsString(), escapeNewlines, escapeBraces) } else { g.Fgenf(b, "{%.v}", expr) diff --git a/vendor/github.com/pulumi/pulumi/pkg/v3/codegen/python/gen_program_lower.go b/vendor/github.com/pulumi/pulumi/pkg/v3/codegen/python/gen_program_lower.go index 6fb24fa..663fe5d 100644 --- a/vendor/github.com/pulumi/pulumi/pkg/v3/codegen/python/gen_program_lower.go +++ b/vendor/github.com/pulumi/pulumi/pkg/v3/codegen/python/gen_program_lower.go @@ -3,10 +3,9 @@ package python import ( "github.com/hashicorp/hcl/v2" "github.com/pulumi/pulumi/pkg/v3/codegen" - "github.com/pulumi/pulumi/pkg/v3/codegen/hcl2" "github.com/pulumi/pulumi/pkg/v3/codegen/hcl2/model" + "github.com/pulumi/pulumi/pkg/v3/codegen/pcl" "github.com/pulumi/pulumi/sdk/v3/go/common/util/contract" - "github.com/zclconf/go-cty/cty" ) func isParameterReference(parameters codegen.Set, x model.Expression) bool { @@ -81,12 +80,12 @@ func (g *generator) lowerProxyApplies(expr model.Expression) (model.Expression, rewriter := func(expr model.Expression) (model.Expression, hcl.Diagnostics) { // Ignore the node if it is not a call to the apply intrinsic. apply, ok := expr.(*model.FunctionCallExpression) - if !ok || apply.Name != hcl2.IntrinsicApply { + if !ok || apply.Name != pcl.IntrinsicApply { return expr, nil } // Parse the apply call. - args, then := hcl2.ParseApplyCall(apply) + args, then := pcl.ParseApplyCall(apply) parameters := codegen.Set{} for _, p := range then.Parameters { @@ -102,23 +101,3 @@ func (g *generator) lowerProxyApplies(expr model.Expression) (model.Expression, } return model.VisitExpression(expr, model.IdentityVisitor, rewriter) } - -func (g *generator) lowerObjectKeys(expr model.Expression, camelCaseToSnakeCase map[string]string) { - switch expr := expr.(type) { - case *model.ObjectConsExpression: - for _, item := range expr.Items { - // Ignore non-literal keys - if key, ok := item.Key.(*model.LiteralValueExpression); ok && key.Value.Type().Equals(cty.String) { - if keyVal, ok := camelCaseToSnakeCase[key.Value.AsString()]; ok { - key.Value = cty.StringVal(keyVal) - } - } - - g.lowerObjectKeys(item.Value, camelCaseToSnakeCase) - } - case *model.TupleConsExpression: - for _, element := range expr.Expressions { - g.lowerObjectKeys(element, camelCaseToSnakeCase) - } - } -} diff --git a/vendor/github.com/pulumi/pulumi/pkg/v3/codegen/python/gen_program_quotes.go b/vendor/github.com/pulumi/pulumi/pkg/v3/codegen/python/gen_program_quotes.go index bb1f130..e6c7440 100644 --- a/vendor/github.com/pulumi/pulumi/pkg/v3/codegen/python/gen_program_quotes.go +++ b/vendor/github.com/pulumi/pulumi/pkg/v3/codegen/python/gen_program_quotes.go @@ -7,10 +7,9 @@ import ( "github.com/hashicorp/hcl/v2" "github.com/hashicorp/hcl/v2/hclsyntax" "github.com/pulumi/pulumi/pkg/v3/codegen" - "github.com/pulumi/pulumi/pkg/v3/codegen/hcl2" "github.com/pulumi/pulumi/pkg/v3/codegen/hcl2/model" "github.com/pulumi/pulumi/pkg/v3/codegen/hcl2/syntax" - "github.com/pulumi/pulumi/pkg/v3/codegen/schema" + "github.com/pulumi/pulumi/pkg/v3/codegen/pcl" "github.com/pulumi/pulumi/sdk/v3/go/common/util/contract" "github.com/zclconf/go-cty/cty" ) @@ -53,19 +52,8 @@ func (g *generator) rewriteTraversal(traversal hcl.Traversal, source model.Expre keyVal, objectKey := key.AsString(), false receiver := parts[i] - if schemaType, ok := hcl2.GetSchemaForType(model.GetTraversableType(receiver)); ok { - obj := schemaType.(*schema.ObjectType) - - info, ok := obj.Language["python"].(objectTypeInfo) - if ok { - objectKey = !info.isDictionary - if mapped, ok := info.camelCaseToSnakeCase[keyVal]; ok { - keyVal = mapped - } - } else { - objectKey, keyVal = true, PyName(keyVal) - } - + if _, ok := pcl.GetSchemaForType(model.GetTraversableType(receiver)); ok { + objectKey, keyVal = true, PyName(keyVal) switch t := traverser.(type) { case hcl.TraverseAttr: t.Name = keyVal @@ -196,7 +184,7 @@ func (qa *quoteAllocator) allocateExpression(x model.Expression) (model.Expressi var longString bool switch x := x.(type) { case *model.LiteralValueExpression: - if x.Type() != model.StringType || qa.inTemplate() { + if !model.StringType.AssignableFrom(x.Type()) || qa.inTemplate() { return x, nil } v := x.Value.AsString() @@ -210,7 +198,7 @@ func (qa *quoteAllocator) allocateExpression(x model.Expression) (model.Expressi } case *model.TemplateExpression: for i, part := range x.Parts { - if lit, ok := part.(*model.LiteralValueExpression); ok && lit.Type() == model.StringType { + if lit, ok := part.(*model.LiteralValueExpression); ok && model.StringType.AssignableFrom(lit.Type()) { v := lit.Value.AsString() switch strings.Count(v, "\n") { case 0: @@ -257,7 +245,7 @@ func (qa *quoteAllocator) freeExpression(x model.Expression) (model.Expression, switch x := x.(type) { case *model.LiteralValueExpression: - if x.Type() != model.StringType || qa.inTemplate() { + if !model.StringType.AssignableFrom(x.Type()) || qa.inTemplate() { return x, nil } // OK diff --git a/vendor/github.com/pulumi/pulumi/pkg/v3/codegen/python/gen_program_utils.go b/vendor/github.com/pulumi/pulumi/pkg/v3/codegen/python/gen_program_utils.go new file mode 100644 index 0000000..c3a8f1e --- /dev/null +++ b/vendor/github.com/pulumi/pulumi/pkg/v3/codegen/python/gen_program_utils.go @@ -0,0 +1,17 @@ +package python + +// Provides code for a method which will be placed in the program preamble if deemed +// necessary. Because many tasks in Go such as reading a file require extensive error +// handling, it is much prettier to encapsulate that error handling boilerplate as its +// own function in the preamble. +func getHelperMethodIfNeeded(functionName string) (string, bool) { + switch functionName { + case "filebase64sha256": + return `def computeFilebase64sha256(path): + fileData = open(path).read().encode() + hashedData = hashlib.sha256(fileData.encode()).digest() + return base64.b64encode(hashedData).decode()`, true + default: + return "", false + } +} diff --git a/vendor/github.com/pulumi/pulumi/pkg/v3/codegen/python/gen_resource_mappings.go b/vendor/github.com/pulumi/pulumi/pkg/v3/codegen/python/gen_resource_mappings.go index 3b598db..f15d5b1 100644 --- a/vendor/github.com/pulumi/pulumi/pkg/v3/codegen/python/gen_resource_mappings.go +++ b/vendor/github.com/pulumi/pulumi/pkg/v3/codegen/python/gen_resource_mappings.go @@ -89,7 +89,7 @@ func makeResourceModuleInfo(pkg, mod, fqn string) resourceModuleInfo { } func allResourceModuleInfos(root *modContext) []resourceModuleInfo { - var result []resourceModuleInfo + result := []resourceModuleInfo{} for _, mctx := range root.walkSelfWithDescendants() { result = append(result, collectResourceModuleInfos(mctx)...) } @@ -103,6 +103,11 @@ func collectResourceModuleInfos(mctx *modContext) []resourceModuleInfo { byMod := make(map[string]resourceModuleInfo) for _, res := range mctx.resources { + if res.IsOverlay { + // This resource code is generated by the provider, so no further action is required. + continue + } + if !res.IsProvider { pkg := mctx.pkg.Name mod := mctx.pkg.TokenToRuntimeModule(res.Token) @@ -145,7 +150,7 @@ type resourcePackageInfo struct { } func allResourcePackageInfos(root *modContext) []resourcePackageInfo { - var result []resourcePackageInfo + result := []resourcePackageInfo{} for _, mctx := range root.walkSelfWithDescendants() { result = append(result, collectResourcePackageInfos(mctx)...) } @@ -158,6 +163,11 @@ func allResourcePackageInfos(root *modContext) []resourcePackageInfo { func collectResourcePackageInfos(mctx *modContext) []resourcePackageInfo { var out []resourcePackageInfo for _, res := range mctx.resources { + if res.IsOverlay { + // This resource code is generated by the provider, so no further action is required. + continue + } + if res.IsProvider { pkg := mctx.pkg.Name token := res.Token diff --git a/vendor/github.com/pulumi/pulumi/pkg/v3/codegen/python/importer.go b/vendor/github.com/pulumi/pulumi/pkg/v3/codegen/python/importer.go index a837b61..85b14cc 100644 --- a/vendor/github.com/pulumi/pulumi/pkg/v3/codegen/python/importer.go +++ b/vendor/github.com/pulumi/pulumi/pkg/v3/codegen/python/importer.go @@ -34,8 +34,10 @@ type PropertyInfo struct { // PackageInfo tracks Python-specific information associated with a package. type PackageInfo struct { // PackageName is an override for the name of the generated python package. - PackageName string `json:"packageName,omitempty"` - Requires map[string]string `json:"requires,omitempty"` + PackageName string `json:"packageName,omitempty"` + // PythonRequires determines the Python versions that the generated provider supports + PythonRequires string `json:"pythonRequires,omitempty"` + Requires map[string]string `json:"requires,omitempty"` // Readme contains the text for the package's README.md files. Readme string `json:"readme,omitempty"` // Optional overrides for Pulumi module names @@ -47,8 +49,11 @@ type PackageInfo struct { Compatibility string `json:"compatibility,omitempty"` // Deprecated: This bool is no longer needed since all providers now use input/output classes. UsesIOClasses bool `json:"usesIOClasses,omitempty"` - // Indicates whether the pulumiplugin.json file should be generated. - EmitPulumiPluginFile bool `json:"emitPulumiPluginFile,omitempty"` + // Determines whether to make single-return-value methods return an output object or the single value. + LiftSingleValueMethodReturns bool `json:"liftSingleValueMethodReturns,omitempty"` + + // Respect the Pkg.Version field for emitted code. + RespectSchemaVersion bool `json:"respectSchemaVersion,omitempty"` } // Importer implements schema.Language for Python. diff --git a/vendor/github.com/pulumi/pulumi/pkg/v3/codegen/python/python.go b/vendor/github.com/pulumi/pulumi/pkg/v3/codegen/python/python.go index 2423ca8..a506e32 100644 --- a/vendor/github.com/pulumi/pulumi/pkg/v3/codegen/python/python.go +++ b/vendor/github.com/pulumi/pulumi/pkg/v3/codegen/python/python.go @@ -105,6 +105,12 @@ func pyName(name string, legacy bool) string { var result strings.Builder // The components of the name, joined together with underscores. var currentComponent strings.Builder // The characters composing the current component being built + + // Preallocate enough space for the name + 5 underscores. '5' is based on a wild guess that most names will consist + // of 5 or fewer words. + result.Grow(len(name) + 5) + currentComponent.Grow(len(name) + 5) + state := stateFirst for _, char := range name { // If this is an illegal character for a Python identifier, replace it. diff --git a/vendor/github.com/pulumi/pulumi/pkg/v3/codegen/python/requirements.txt b/vendor/github.com/pulumi/pulumi/pkg/v3/codegen/python/requirements.txt new file mode 100644 index 0000000..e079f8a --- /dev/null +++ b/vendor/github.com/pulumi/pulumi/pkg/v3/codegen/python/requirements.txt @@ -0,0 +1 @@ +pytest diff --git a/vendor/github.com/pulumi/pulumi/pkg/v3/codegen/python/utilities.go b/vendor/github.com/pulumi/pulumi/pkg/v3/codegen/python/utilities.go index d4a27b5..adc57bd 100644 --- a/vendor/github.com/pulumi/pulumi/pkg/v3/codegen/python/utilities.go +++ b/vendor/github.com/pulumi/pulumi/pkg/v3/codegen/python/utilities.go @@ -1,12 +1,14 @@ package python import ( - "github.com/pkg/errors" - "github.com/pulumi/pulumi/pkg/v3/codegen" + "fmt" "io" "regexp" "strings" "unicode" + + "github.com/blang/semver" + "github.com/pulumi/pulumi/pkg/v3/codegen" ) // isLegalIdentifierStart returns true if it is legal for c to be the first character of a Python identifier as per @@ -66,7 +68,7 @@ func makeSafeEnumName(name, typeName string) (string, error) { // If the name is one illegal character, return an error. if len(safeName) == 1 && !isLegalIdentifierStart(rune(safeName[0])) { - return "", errors.Errorf("enum name %s is not a valid identifier", safeName) + return "", fmt.Errorf("enum name %s is not a valid identifier", safeName) } // If it's camelCase, change it to snake_case. @@ -87,3 +89,99 @@ func makeSafeEnumName(name, typeName string) (string, error) { return safeName, nil } + +var pypiReleaseTranslations = []struct { + prefix string + replacment string +}{ + {"alpha", "a"}, + {"beta", "b"}, +} + +// A valid release tag for pypi +var pypiRelease = regexp.MustCompile("^(a|b|rc)[0-9]+$") + +// A valid dev tag for pypi +var pypiDev = regexp.MustCompile("^dev[0-9]+$") + +// A valid post tag for pypi +var pypiPost = regexp.MustCompile("^post[0-9]+$") + +// pypiVersion translates semver 2.0 into pypi's versioning scheme: +// Details can be found here: https://www.python.org/dev/peps/pep-0440/#version-scheme +// [N!]N(.N)*[{a|b|rc}N][.postN][.devN] +func pypiVersion(v semver.Version) string { + var localList []string + + getRelease := func(maybeRelease string) string { + for _, tup := range pypiReleaseTranslations { + if strings.HasPrefix(maybeRelease, tup.prefix) { + guess := tup.replacment + maybeRelease[len(tup.prefix):] + if pypiRelease.MatchString(guess) { + return guess + } + } + } + if pypiRelease.MatchString(maybeRelease) { + return maybeRelease + } + return "" + } + getDev := func(maybeDev string) string { + if pypiDev.MatchString(maybeDev) { + return "." + maybeDev + } + return "" + } + + getPost := func(maybePost string) string { + if pypiPost.MatchString(maybePost) { + return "." + maybePost + } + return "" + } + + var preListIndex int + + var release string + var dev string + var post string + // We allow the first pre-release in `v` to indicate the release for the + // pypi version. + for _, special := range []struct { + getFunc func(string) string + maybeSet *string + }{ + {getRelease, &release}, + {getDev, &dev}, + {getPost, &post}, + } { + if len(v.Pre) > preListIndex && special.getFunc(v.Pre[preListIndex].VersionStr) != "" { + *special.maybeSet = special.getFunc(v.Pre[preListIndex].VersionStr) + preListIndex++ + } + } + + // All other pre-release segments are added to the local identifier. If we + // didn't find a release, the first pre-release is also added to the local + // identifier. + if release != "" { + preListIndex = 1 + } + for ; preListIndex < len(v.Pre); preListIndex++ { + // This can only contain [0-9a-zA-Z-] because semver enforces that set + // and '-' we need only replace '-' with a valid character: '.' + localList = append(localList, strings.ReplaceAll(v.Pre[preListIndex].VersionStr, "-", ".")) + } + // All build flags are added to the local identifier list + for _, b := range v.Build { + // This can only contain [0-9a-zA-Z-] because semver enforces that set + // and '-' we need only replace '-' with a valid character: '.' + localList = append(localList, strings.ReplaceAll(b, "-", ".")) + } + local := "" + if len(localList) > 0 { + local = "+" + strings.Join(localList, ".") + } + return fmt.Sprintf("%d.%d.%d%s%s%s%s", v.Major, v.Minor, v.Patch, release, dev, post, local) +} diff --git a/vendor/github.com/pulumi/pulumi/pkg/v3/codegen/schema/loader.go b/vendor/github.com/pulumi/pulumi/pkg/v3/codegen/schema/loader.go index ce4658c..475f71b 100644 --- a/vendor/github.com/pulumi/pulumi/pkg/v3/codegen/schema/loader.go +++ b/vendor/github.com/pulumi/pulumi/pkg/v3/codegen/schema/loader.go @@ -1,13 +1,19 @@ package schema import ( + "fmt" + "io" + "io/ioutil" + "os" "sync" + "time" "github.com/blang/semver" jsoniter "github.com/json-iterator/go" - "github.com/pkg/errors" + "github.com/pulumi/pulumi/sdk/v3/go/common/resource/plugin" "github.com/pulumi/pulumi/sdk/v3/go/common/tokens" + "github.com/pulumi/pulumi/sdk/v3/go/common/util/contract" "github.com/pulumi/pulumi/sdk/v3/go/common/workspace" ) @@ -51,13 +57,69 @@ func (l *pluginLoader) ensurePlugin(pkg string, version *semver.Version) error { Name: pkg, Version: version, } + + tryDownload := func(dst io.WriteCloser) error { + defer dst.Close() + tarball, expectedByteCount, err := pkgPlugin.Download() + if err != nil { + return err + } + defer tarball.Close() + copiedByteCount, err := io.Copy(dst, tarball) + if err != nil { + return err + } + if copiedByteCount != expectedByteCount { + return fmt.Errorf("Expected %d bytes but copied %d when downloading plugin %s", + expectedByteCount, copiedByteCount, pkgPlugin) + } + return nil + } + + tryDownloadToFile := func() (string, error) { + file, err := ioutil.TempFile("" /* default temp dir */, "pulumi-plugin-tar") + if err != nil { + return "", err + } + err = tryDownload(file) + if err != nil { + err2 := os.Remove(file.Name()) + if err2 != nil { + return "", fmt.Errorf("Error while removing tempfile: %v. Context: %w", err2, err) + } + return "", err + } + return file.Name(), nil + } + + downloadToFileWithRetry := func() (string, error) { + delay := 80 * time.Millisecond + for attempt := 0; ; attempt++ { + tempFile, err := tryDownloadToFile() + if err == nil { + return tempFile, nil + } + + if err != nil && attempt >= 5 { + return tempFile, err + } + time.Sleep(delay) + delay = delay * 2 + } + } + if !workspace.HasPlugin(pkgPlugin) { - tarball, _, err := pkgPlugin.Download() + tarball, err := downloadToFileWithRetry() if err != nil { - return errors.Wrapf(err, "failed to download plugin: %s", pkgPlugin) + return fmt.Errorf("failed to download plugin: %s: %w", pkgPlugin, err) } - if err := pkgPlugin.Install(tarball); err != nil { - return errors.Wrapf(err, "failed to install plugin %s", pkgPlugin) + defer os.Remove(tarball) + reader, err := os.Open(tarball) + if err != nil { + return fmt.Errorf("failed to open downloaded plugin: %s: %w", pkgPlugin, err) + } + if err := pkgPlugin.Install(reader, false); err != nil { + return fmt.Errorf("failed to install plugin %s: %w", pkgPlugin, err) } } @@ -82,6 +144,7 @@ func (l *pluginLoader) LoadPackage(pkg string, version *semver.Version) (*Packag if err != nil { return nil, err } + contract.Assert(provider != nil) schemaFormatVersion := 0 schemaBytes, err := provider.GetSchema(schemaFormatVersion) @@ -94,10 +157,13 @@ func (l *pluginLoader) LoadPackage(pkg string, version *semver.Version) (*Packag return nil, err } - p, err := importSpec(spec, nil, l) + p, diags, err := bindSpec(spec, nil, l, false) if err != nil { return nil, err } + if diags.HasErrors() { + return nil, diags + } l.m.Lock() defer l.m.Unlock() diff --git a/vendor/github.com/pulumi/pulumi/pkg/v3/codegen/schema/pulumi.json b/vendor/github.com/pulumi/pulumi/pkg/v3/codegen/schema/pulumi.json new file mode 100644 index 0000000..ee4fd76 --- /dev/null +++ b/vendor/github.com/pulumi/pulumi/pkg/v3/codegen/schema/pulumi.json @@ -0,0 +1,531 @@ +{ + "$schema": "https://json-schema.org/draft/2020-12/schema", + "$id": "https://github.com/pulumi/pulumi/blob/master/pkg/codegen/schema.json", + "title": "Pulumi Package Metaschema", + "description": "A description of the schema for a Pulumi Package", + "type": "object", + "properties": { + "name": { + "description": "The unqualified name of the package (e.g. \"aws\", \"azure\", \"gcp\", \"kubernetes\", \"random\")", + "type": "string", + "pattern": "^[a-zA-Z][-a-zA-Z0-9_]*$" + }, + "displayName": { + "description": "The human-friendly name of the package.", + "type": "string" + }, + "version": { + "description": "The version of the package. The version must be valid semver.", + "type": "string", + "pattern": "^v?(?P0|[1-9]\\d*)\\.(?P0|[1-9]\\d*)\\.(?P0|[1-9]\\d*)(?:-(?P(?:0|[1-9]\\d*|\\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\\.(?:0|[1-9]\\d*|\\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?(?:\\+(?P[0-9a-zA-Z-]+(?:\\.[0-9a-zA-Z-]+)*))?$" + }, + "description": { + "description": "The description of the package. Descriptions are interpreted as Markdown.", + "type": "string" + }, + "keywords": { + "description": "The list of keywords that are associated with the package, if any.", + "type": "array", + "items": { + "type": "string" + } + }, + "homepage": { + "description": "The package's homepage.", + "type": "string" + }, + "license": { + "description": "The name of the license used for the package's contents.", + "type": "string" + }, + "attribution": { + "description": "Freeform text attribution of derived work, if required.", + "type": "string" + }, + "repository": { + "description": "The URL at which the package's sources can be found.", + "type": "string" + }, + "logoUrl": { + "description": "The URL of the package's logo, if any.", + "type": "string" + }, + "pluginDownloadUrl": { + "description": "The URL to use when downloading the provider plugin binary.", + "type": "string" + }, + "publisher": { + "description": "The name of the person or organization that authored and published the package.", + "type": "string" + }, + "meta": { + "description": "Format metadata about this package.", + "type": "object", + "properties": { + "moduleFormat": { + "description": "A regex that is used by the importer to extract a module name from the module portion of a type token. Packages that use the module format \"namespace1/namespace2/.../namespaceN\" do not need to specify a format. The regex must define one capturing group that contains the module name, which must be formatted as \"namespace1/namespace2/...namespaceN\".", + "type": "string", + "format": "regex" + } + }, + "required": ["moduleFormat"] + }, + "config": { + "description": "The package's configuration variables.", + "type": "object", + "properties": { + "variables": { + "description": "A map from variable name to propertySpec that describes a package's configuration variables.", + "type": "object", + "additionalProperties": { + "$ref": "#/$defs/propertySpec" + } + }, + "required": { + "description": "A list of the names of the package's required configuration variables.", + "type": "array", + "items": { + "type": "string" + } + } + } + }, + "types": { + "description": "A map from type token to complexTypeSpec that describes the set of complex types (i.e. object, enum) defined by this package.", + "type": "object", + "additionalProperties": { + "$ref": "#/$defs/complexTypeSpec" + }, + "propertyNames": { + "$ref": "#/$defs/token" + } + }, + "provider": { + "description": "The provider type for this package.", + "$ref": "#/$defs/resourceSpec" + }, + "resources": { + "description": "A map from type token to resourceSpec that describes the set of resources and components defined by this package.", + "type": "object", + "additionalProperties": { + "$ref": "#/$defs/resourceSpec" + }, + "propertyNames": { + "$ref": "#/$defs/token" + } + }, + "functions": { + "description": "A map from token to functionSpec that describes the set of functions defined by this package.", + "type": "object", + "additionalProperties": { + "$ref": "#/$defs/functionSpec" + }, + "propertyNames": { + "$ref": "#/$defs/token" + } + }, + "language": { + "description": "Additional language-specific data about the package.", + "type": "object" + } + }, + "additionalProperties": false, + "required": [ + "name" + ], + "$defs": { + "token": { + "title": "Token", + "type": "string", + "$comment": "In the regex below, the 'module' portion of the token is optional. However, a missing module component creates a '::', which breaks URNs ('::' is the URN delimiter). We have many test schemas that use an empty module component successfully, as they never create URNs; while these are _probably_ the only places that need updating, it might be possible that there are module-less type tokens in the wild elsewhere and we may need to remain compatible with those tokens.", + "pattern": "^[a-zA-Z][-a-zA-Z0-9_]*:([^0-9][a-zA-Z0-9._/-]*)?:[^0-9][a-zA-Z0-9._/]*$" + }, + "typeSpec": { + "title": "Type Reference", + "description": "A reference to a type. The particular kind of type referenced is determined based on the contents of the \"type\" property and the presence or absence of the \"additionalProperties\", \"items\", \"oneOf\", and \"$ref\" properties.", + "type": "object", + "properties": { + "plain": { + "description": "Indicates that when used as an input, this type does not accept eventual values.", + "type": "boolean" + } + }, + "oneOf": [ + { + "title": "Primitive Type", + "description": "A reference to a primitive type. A primitive type must have only the \"type\" property set.", + "type": "object", + "properties": { + "type": { + "description": "The primitive type, if any", + "type": "string", + "enum": ["boolean", "integer", "number", "string"] + }, + "additionalProperties": false, + "items": false, + "oneOf": false, + "$ref": false + }, + "required": ["type"] + }, + { + "title": "Array Type", + "description": "A reference to an array type. The \"type\" property must be set to \"array\" and the \"items\" property must be present. No other properties may be present.", + "type": "object", + "$comment": "An array type must have the \"type\" property set.", + "properties": { + "type": { + "const": "array" + }, + "items": { + "description": "The element type of the array", + "$ref": "#/$defs/typeSpec" + }, + "additionalProperties": false, + "oneOf": false, + "$ref": false + }, + "required": ["type", "items"] + }, + { + "title": "Map Type", + "description": "A reference to a map type. The \"type\" property must be set to \"object\" and the \"additionalProperties\" property may be present. No other properties may be present.", + "type": "object", + "properties": { + "type": { + "const": "object" + }, + "additionalProperties": { + "description": "The element type of the map. Defaults to \"string\" when omitted.", + "$ref": "#/$defs/typeSpec" + }, + "items": false, + "oneOf": false, + "$ref": false + }, + "required": ["type"] + }, + { + "title": "Named Type", + "description": "A reference to a type in this or another document. The \"$ref\" property must be present. The \"type\" property is ignored if it is present. No other properties may be present.", + "type": "object", + "properties": { + "type": { + "description": "ignored; present for compatibility with existing schemas", + "type": "string" + }, + "$ref": { + "description": "The URI of the referenced type. For example, the built-in Archive, Asset, and Any\ntypes are referenced as \"pulumi.json#/Archive\", \"pulumi.json#/Asset\", and \"pulumi.json#/Any\", respectively.\nA type from this document is referenced as \"#/types/pulumi:type:token\".\nA type from another document is referenced as \"path#/types/pulumi:type:token\", where path is of the form:\n \"/provider/vX.Y.Z/schema.json\" or \"pulumi.json\" or \"http[s]://example.com/provider/vX.Y.Z/schema.json\"\nA resource from this document is referenced as \"#/resources/pulumi:type:token\".\nA resource from another document is referenced as \"path#/resources/pulumi:type:token\", where path is of the form:\n \"/provider/vX.Y.Z/schema.json\" or \"pulumi.json\" or \"http[s]://example.com/provider/vX.Y.Z/schema.json\"", + "type": "string", + "format": "uri-reference" + }, + "additionalProperties": false, + "items": false, + "oneOf": false + }, + "required": ["$ref"] + }, + { + "title": "Union Type", + "description": "A reference to a union type. The \"oneOf\" property must be present. The union may additional specify an underlying primitive type via the \"type\" property and a discriminator via the \"discriminator\" property. No other properties may be present.", + "type": "object", + "properties": { + "type": { + "description": "The underlying primitive type of the union, if any", + "type": "string", + "enum": ["boolean", "integer", "number", "string"] + }, + "oneOf": { + "description": "If present, indicates that values of the type may be one of any of the listed types", + "type": "array", + "items": { + "$ref": "#/$defs/typeSpec" + }, + "minItems": 2 + }, + "discriminator": { + "description": "Informs the consumer of an alternative schema based on the value associated with it", + "type": "object", + "properties": { + "propertyName": { + "description": "PropertyName is the name of the property in the payload that will hold the discriminator value", + "type": "string", + "minLength": 1 + }, + "mapping": { + "description": "an optional object to hold mappings between payload values and schema names or references", + "type": "object", + "additionalProperties": { + "type": "string" + } + } + }, + "required": [ + "propertyName" + ] + }, + "additionalProperties": false, + "items": false, + "$ref": false + }, + "required": ["oneOf"] + } + ] + }, + "propertySpec": { + "title": "Property Definition", + "description": "Describes an object or resource property", + "type": "object", + "allOf": [ + { "$ref": "#/$defs/typeSpec" } + ], + "properties": { + "description": { + "description": "The description of the property, if any. Interpreted as Markdown.", + "type": "string" + }, + "const": { + "description": "The constant value for the property, if any. The type of the value must be assignable to the type of the property.", + "type": ["boolean", "number", "string"] + }, + "default": { + "description": "The default value for the property, if any. The type of the value must be assignable to the type of the property.", + "type": ["boolean", "number", "string"] + }, + "defaultInfo": { + "description": "Additional information about the property's default value, if any.", + "type": "object", + "properties": { + "environment": { + "description": "A set of environment variables to probe for a default value.", + "type": "array", + "items": { + "type": "string" + } + }, + "language": { + "description": "Additional language-specific data about the default value.", + "type": "object" + } + }, + "required": ["environment"] + }, + "deprecationMessage": { + "description": "Indicates whether the property is deprecated", + "type": "string" + }, + "language": { + "description": "Additional language-specific data about the property.", + "type": "object" + }, + "secret": { + "description": "Specifies whether the property is secret (default false).", + "type": "boolean" + }, + "replaceOnChanges": { + "description": "Specifies whether a change to the property causes its containing resource to be replaced instead of updated (default false).", + "type": "boolean" + } + } + }, + "complexTypeSpec": { + "title": "Type Definition", + "description": "Describes an object or enum type.", + "type": "object", + "properties": { + "description": { + "description": "The description of the type, if any. Interpreted as Markdown.", + "type": "string" + }, + "language": { + "description": "Additional language-specific data about the type.", + "type": "object" + }, + "isOverlay": { + "description": "Indicates that the implementation of the type should not be generated from the schema, and is instead provided out-of-band by the package author", + "type": "boolean" + } + }, + "oneOf": [ + { + "title": "Object Type Definition", + "type": "object", + "allOf": [ + { "$ref": "#/$defs/objectTypeSpec" } + ], + "properties": { + "type": { + "const": "object" + } + } + }, + { "$ref": "#/$defs/enumTypeSpec" } + ] + }, + "objectTypeSpec": { + "title": "Object Type Details", + "description": "Describes an object type", + "type": "object", + "properties": { + "properties": { + "description": "A map from property name to propertySpec that describes the object's properties.", + "type": "object", + "additionalProperties": { + "$ref": "#/$defs/propertySpec" + } + }, + "required": { + "description": "A list of the names of an object type's required properties. These properties must be set for inputs and will always be set for outputs.", + "type": "array", + "items": { + "type": "string" + } + } + } + }, + "enumTypeSpec": { + "title": "Enum Type Definition", + "description": "Describes an enum type", + "type": "object", + "properties" :{ + "type": { + "description": "The underlying primitive type of the enum", + "type": "string", + "enum": ["boolean", "integer", "number", "string"] + }, + "enum": { + "description": "The list of possible values for the enum", + "type": "array", + "items": { + "title": "Enum Value Definition", + "type": "object", + "properties": { + "name": { + "description": "If present, overrides the name of the enum value that would usually be derived from the value.", + "type": "string" + }, + "description": { + "description": "The description of the enum value, if any. Interpreted as Markdown.", + "type": "string" + }, + "value": { + "description": "The enum value itself", + "type": ["boolean", "integer", "number", "string"] + }, + "deprecationMessage": { + "description": "Indicates whether the value is deprecated.", + "type": "string" + } + }, + "required": ["value"] + } + } + }, + "required": ["type", "enum"] + }, + "resourceSpec": { + "title": "Resource Definition", + "description": "Describes a resource or component.", + "type": "object", + "allOf": [ + { "$ref": "#/$defs/objectTypeSpec" } + ], + "properties": { + "description": { + "description": "The description of the resource, if any. Interpreted as Markdown.", + "type": "string" + }, + "inputProperties": { + "description": "A map from property name to propertySpec that describes the resource's input properties.", + "type": "object", + "additionalProperties": { + "$ref": "#/$defs/propertySpec" + } + }, + "requiredInputs": { + "description": "A list of the names of the resource's required input properties.", + "type": "array", + "items": { + "type": "string" + } + }, + "stateInputs": { + "description": "An optional objectTypeSpec that describes additional inputs that mau be necessary to get an existing resource. If this is unset, only an ID is necessary.", + "$ref": "#/$defs/objectTypeSpec" + }, + "aliases": { + "description": "The list of aliases for the resource.", + "type": "array", + "items": { + "title": "Alias Definition", + "type": "object", + "properties": { + "name": { + "description": "The name portion of the alias, if any", + "type": "string" + }, + "project": { + "description": "The project portion of the alias, if any", + "type": "string" + }, + "type": { + "description": "The type portion of the alias, if any", + "type": "string" + } + } + } + }, + "deprecationMessage": { + "description": "Indicates whether the resource is deprecated", + "type": "string" + }, + "isComponent": { + "description": "Indicates whether the resource is a component.", + "type": "boolean" + }, + "methods": { + "description": "A map from method name to function token that describes the resource's method set.", + "type": "object", + "additionalProperties": { + "type": "string" + } + }, + "isOverlay": { + "description": "Indicates that the implementation of the resource should not be generated from the schema, and is instead provided out-of-band by the package author", + "type": "boolean" + } + } + }, + "functionSpec": { + "title": "Function Definition", + "description": "Describes a function.", + "type": "object", + "properties": { + "description": { + "description": "The description of the function, if any. Interpreted as Markdown.", + "type": "string" + }, + "inputs": { + "description": "The bag of input values for the function, if any.", + "$ref": "#/$defs/objectTypeSpec" + }, + "outputs": { + "description": "The bag of output values for the function, if any.", + "$ref": "#/$defs/objectTypeSpec" + }, + "deprecationMessage": { + "description": "Indicates whether the function is deprecated", + "type": "string" + }, + "language": { + "description": "Additional language-specific data about the function.", + "type": "object" + }, + "isOverlay": { + "description": "Indicates that the implementation of the function should not be generated from the schema, and is instead provided out-of-band by the package author", + "type": "boolean" + } + } + } + } +} diff --git a/vendor/github.com/pulumi/pulumi/pkg/v3/codegen/schema/schema.go b/vendor/github.com/pulumi/pulumi/pkg/v3/codegen/schema/schema.go index 96106b7..85c278e 100644 --- a/vendor/github.com/pulumi/pulumi/pkg/v3/codegen/schema/schema.go +++ b/vendor/github.com/pulumi/pulumi/pkg/v3/codegen/schema/schema.go @@ -1,4 +1,4 @@ -// Copyright 2016-2020, Pulumi Corporation. +// Copyright 2016-2021, Pulumi Corporation. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -15,8 +15,11 @@ package schema import ( + "bytes" + _ "embed" "encoding/json" "fmt" + "io" "math" "net/url" "os" @@ -26,7 +29,11 @@ import ( "strings" "github.com/blang/semver" - "github.com/pkg/errors" + "github.com/hashicorp/hcl/v2" + + "github.com/santhosh-tekuri/jsonschema/v5" + "gopkg.in/yaml.v3" + "github.com/pulumi/pulumi/sdk/v3/go/common/resource/plugin" "github.com/pulumi/pulumi/sdk/v3/go/common/util/contract" ) @@ -84,7 +91,7 @@ func (primitiveType) isType() {} // IsPrimitiveType returns true if the given Type is a primitive type. The primitive types are bool, int, number, // string, archive, asset, and any. func IsPrimitiveType(t Type) bool { - _, ok := t.(primitiveType) + _, ok := plainType(t).(primitiveType) return ok } @@ -107,6 +114,22 @@ var ( AnyType Type = anyType ) +// An InvalidType represents an invalid type with associated diagnostics. +type InvalidType struct { + Diagnostics hcl.Diagnostics +} + +func (t *InvalidType) String() string { + return "Invalid" +} + +func (*InvalidType) isType() {} + +func invalidType(diags ...*hcl.Diagnostic) (Type, hcl.Diagnostics) { + t := &InvalidType{Diagnostics: hcl.Diagnostics(diags)} + return t, hcl.Diagnostics(diags) +} + // MapType represents maps from strings to particular element types. type MapType struct { // ElementType is the element type of the map. @@ -133,6 +156,8 @@ func (*ArrayType) isType() {} // EnumType represents an enum. type EnumType struct { + // Package is the type's package. + Package *Package // Token is the type's Pulumi type token. Token string // Comment is the description of the type, if any. @@ -141,6 +166,10 @@ type EnumType struct { Elements []*Enum // ElementType is the underlying type for the enum. ElementType Type + + // IsOverlay indicates whether the type is an overlay provided by the package. Overlay code is generated by the + // package rather than using the core Pulumi codegen libraries. + IsOverlay bool } // Enum contains information about an enum. @@ -203,10 +232,32 @@ type ObjectType struct { Properties []*Property // Language specifies additional language-specific data about the object type. Language map[string]interface{} + // IsOverlay indicates whether the type is an overlay provided by the package. Overlay code is generated by the + // package rather than using the core Pulumi codegen libraries. + IsOverlay bool + + // InputShape is the input shape for this object. Only valid if IsPlainShape returns true. + InputShape *ObjectType + // PlainShape is the plain shape for this object. Only valid if IsInputShape returns true. + PlainShape *ObjectType properties map[string]*Property } +// IsPlainShape returns true if this object type is the plain shape of a (plain, input) +// pair. The plain shape of an object does not contain *InputType values and only +// references other plain shapes. +func (t *ObjectType) IsPlainShape() bool { + return t.PlainShape == nil +} + +// IsInputShape returns true if this object type is the plain shape of a (plain, input) +// pair. The input shape of an object may contain *InputType values and may +// reference other input shapes. +func (t *ObjectType) IsInputShape() bool { + return t.PlainShape != nil +} + func (t *ObjectType) Property(name string) (*Property, bool) { if t.properties == nil && len(t.Properties) > 0 { t.properties = make(map[string]*Property) @@ -219,6 +270,9 @@ func (t *ObjectType) Property(name string) (*Property, bool) { } func (t *ObjectType) String() string { + if t.PlainShape != nil { + return t.Token + "•Input" + } return t.Token } @@ -252,6 +306,30 @@ func (t *TokenType) String() string { func (*TokenType) isType() {} +// InputType represents a type that accepts either a prompt value or an output value. +type InputType struct { + // ElementType is the element type of the input. + ElementType Type +} + +func (t *InputType) String() string { + return fmt.Sprintf("Input<%v>", t.ElementType) +} + +func (*InputType) isType() {} + +// OptionalType represents a type that accepts an optional value. +type OptionalType struct { + // ElementType is the element type of the input. + ElementType Type +} + +func (t *OptionalType) String() string { + return fmt.Sprintf("Optional<%v>", t.ElementType) +} + +func (*OptionalType) isType() {} + // DefaultValue describes a default value for a property. type DefaultValue struct { // Value specifies a static default value, if any. This value must be representable in the Pulumi schema type @@ -275,16 +353,20 @@ type Property struct { ConstValue interface{} // DefaultValue is the default value for the property, if any. DefaultValue *DefaultValue - // IsRequired is true if the property must always be populated. - IsRequired bool - // IsPlain is true if the property only accepts prompt values. - IsPlain bool // DeprecationMessage indicates whether or not the property is deprecated. DeprecationMessage string // Language specifies additional language-specific data about the property. Language map[string]interface{} // Secret is true if the property is secret (default false). Secret bool + // ReplaceOnChanges specifies if the property is to be replaced instead of updated (default false). + ReplaceOnChanges bool +} + +// IsRequired returns true if this property is required (i.e. its type is not Optional). +func (p *Property) IsRequired() bool { + _, optional := p.Type.(*OptionalType) + return !optional } // Alias describes an alias for a Pulumi resource. @@ -321,6 +403,126 @@ type Resource struct { Language map[string]interface{} // IsComponent indicates whether the resource is a ComponentResource. IsComponent bool + // Methods is the list of methods for the resource. + Methods []*Method + // IsOverlay indicates whether the type is an overlay provided by the package. Overlay code is generated by the + // package rather than using the core Pulumi codegen libraries. + IsOverlay bool +} + +// The set of resource paths where ReplaceOnChanges is true. +// +// For example, if you have the following resource struct: +// +// Resource A { +// Properties: { +// Object B { +// Object D: { +// ReplaceOnChanges: true +// } +// Object F: {} +// } +// Object C { +// ReplaceOnChanges: true +// } +// } +// } +// +// A.ReplaceOnChanges() == [[B, D], [C]] +func (r *Resource) ReplaceOnChanges() (changes [][]*Property, err []error) { + for _, p := range r.Properties { + if p.ReplaceOnChanges { + changes = append(changes, []*Property{p}) + } else { + stack := map[string]struct{}{p.Type.String(): {}} + childChanges, errList := replaceOnChangesType(p.Type, &stack) + err = append(err, errList...) + + for _, c := range childChanges { + changes = append(changes, append([]*Property{p}, c...)) + } + } + } + for i, e := range err { + err[i] = fmt.Errorf("Failed to genereate full `ReplaceOnChanges`: %w", e) + } + return changes, err +} + +func replaceOnChangesType(t Type, stack *map[string]struct{}) ([][]*Property, []error) { + var errTmp []error + if o, ok := t.(*OptionalType); ok { + return replaceOnChangesType(o.ElementType, stack) + } else if o, ok := t.(*ObjectType); ok { + changes := [][]*Property{} + err := []error{} + for _, p := range o.Properties { + if p.ReplaceOnChanges { + changes = append(changes, []*Property{p}) + } else if _, ok := (*stack)[p.Type.String()]; !ok { + // We handle recursive objects + (*stack)[p.Type.String()] = struct{}{} + var object [][]*Property + object, errTmp = replaceOnChangesType(p.Type, stack) + err = append(err, errTmp...) + for _, path := range object { + changes = append(changes, append([]*Property{p}, path...)) + } + + delete(*stack, p.Type.String()) + } else { + err = append(err, fmt.Errorf("Found recursive object %q", p.Name)) + } + } + // We don't want to emit errors where replaceOnChanges is not used. + if len(changes) == 0 { + return nil, nil + } + return changes, err + } else if a, ok := t.(*ArrayType); ok { + // This looks for types internal to the array, not a property of the array. + return replaceOnChangesType(a.ElementType, stack) + } else if m, ok := t.(*MapType); ok { + // This looks for types internal to the map, not a property of the array. + return replaceOnChangesType(m.ElementType, stack) + } + return nil, nil +} + +// Joins the output of `ReplaceOnChanges` into property path names. +// +// For example, given an input [[B, D], [C]] where each property has a name +// equivalent to it's variable, this function should yield: ["B.D", "C"] +func PropertyListJoinToString(propertyList [][]*Property, nameConverter func(string) string) []string { + var nonOptional func(Type) Type + nonOptional = func(t Type) Type { + if o, ok := t.(*OptionalType); ok { + return nonOptional(o.ElementType) + } + return t + } + out := make([]string, len(propertyList)) + for i, p := range propertyList { + names := make([]string, len(p)) + for j, n := range p { + if _, ok := nonOptional(n.Type).(*ArrayType); ok { + names[j] = nameConverter(n.Name) + "[*]" + } else if _, ok := nonOptional(n.Type).(*MapType); ok { + names[j] = nameConverter(n.Name) + ".*" + } else { + names[j] = nameConverter(n.Name) + } + } + out[i] = strings.Join(names, ".") + } + return out +} + +type Method struct { + // Name is the name of the method. + Name string + // Function is the function definition for the method. + Function *Function } // Function describes a Pulumi function. @@ -339,6 +541,11 @@ type Function struct { DeprecationMessage string // Language specifies additional language-specific data about the function. Language map[string]interface{} + // IsMethod indicates whether the function is a method of a resource. + IsMethod bool + // IsOverlay indicates whether the function is an overlay provided by the package. Overlay code is generated by the + // package rather than using the core Pulumi codegen libraries. + IsOverlay bool } // Package describes a Pulumi package. @@ -347,11 +554,19 @@ type Package struct { // Name is the unqualified name of the package (e.g. "aws", "azure", "gcp", "kubernetes". "random") Name string + // DisplayName is the human-friendly name of the package. + DisplayName string // Version is the version of the package. Version *semver.Version // Description is the description of the package. Description string // Keywords is the list of keywords that are associated with the package, if any. + // Some reserved keywords can be specified as well that help with categorizing the + // package in the Pulumi registry. `category/` and `kind/` are the only + // reserved keywords at this time, where `` can be one of: + // `cloud`, `database`, `infrastructure`, `monitoring`, `network`, `utility`, `vcs` + // and `` is either `native` or `component`. If the package is a bridged Terraform + // provider, then don't include the `kind/` label. Keywords []string // Homepage is the package's homepage. Homepage string @@ -365,6 +580,8 @@ type Package struct { LogoURL string // PluginDownloadURL is the URL to use to acquire the provider plugin binary, if any. PluginDownloadURL string + // Publisher is the name of the person or organization that authored and published the package. + Publisher string // Types is the list of non-resource types defined by the package. Types []Type @@ -383,6 +600,8 @@ type Package struct { resourceTypeTable map[string]*ResourceType functionTable map[string]*Function typeTable map[string]Type + + importedLanguages map[string]struct{} } // Language provides hooks for importing language-specific metadata in a package. @@ -417,7 +636,7 @@ func importDefaultLanguages(def *DefaultValue, languages map[string]Language) er if lang, ok := languages[name]; ok { val, err := lang.ImportDefaultSpec(def, raw) if err != nil { - return errors.Wrapf(err, "importing %v metadata", name) + return fmt.Errorf("importing %v metadata: %w", name, err) } def.Language[name] = val } @@ -429,7 +648,7 @@ func importDefaultLanguages(def *DefaultValue, languages map[string]Language) er func importPropertyLanguages(property *Property, languages map[string]Language) error { if property.DefaultValue != nil { if err := importDefaultLanguages(property.DefaultValue, languages); err != nil { - return errors.Wrapf(err, "importing default value") + return fmt.Errorf("importing default value: %w", err) } } @@ -439,7 +658,7 @@ func importPropertyLanguages(property *Property, languages map[string]Language) if lang, ok := languages[name]; ok { val, err := lang.ImportPropertySpec(property, raw) if err != nil { - return errors.Wrapf(err, "importing %v metadata", name) + return fmt.Errorf("importing %v metadata: %w", name, err) } property.Language[name] = val } @@ -451,7 +670,7 @@ func importPropertyLanguages(property *Property, languages map[string]Language) func importObjectTypeLanguages(object *ObjectType, languages map[string]Language) error { for _, property := range object.Properties { if err := importPropertyLanguages(property, languages); err != nil { - return errors.Wrapf(err, "importing property %v", property.Name) + return fmt.Errorf("importing property %v: %w", property.Name, err) } } @@ -461,7 +680,7 @@ func importObjectTypeLanguages(object *ObjectType, languages map[string]Language if lang, ok := languages[name]; ok { val, err := lang.ImportObjectTypeSpec(object, raw) if err != nil { - return errors.Wrapf(err, "importing %v metadata", name) + return fmt.Errorf("importing %v metadata: %w", name, err) } object.Language[name] = val } @@ -473,20 +692,20 @@ func importObjectTypeLanguages(object *ObjectType, languages map[string]Language func importResourceLanguages(resource *Resource, languages map[string]Language) error { for _, property := range resource.InputProperties { if err := importPropertyLanguages(property, languages); err != nil { - return errors.Wrapf(err, "importing input property %v", property.Name) + return fmt.Errorf("importing input property %v: %w", property.Name, err) } } for _, property := range resource.Properties { if err := importPropertyLanguages(property, languages); err != nil { - return errors.Wrapf(err, "importing property %v", property.Name) + return fmt.Errorf("importing property %v: %w", property.Name, err) } } if resource.StateInputs != nil { for _, property := range resource.StateInputs.Properties { if err := importPropertyLanguages(property, languages); err != nil { - return errors.Wrapf(err, "importing state input property %v", property.Name) + return fmt.Errorf("importing state input property %v: %w", property.Name, err) } } } @@ -497,7 +716,7 @@ func importResourceLanguages(resource *Resource, languages map[string]Language) if lang, ok := languages[name]; ok { val, err := lang.ImportResourceSpec(resource, raw) if err != nil { - return errors.Wrapf(err, "importing %v metadata", name) + return fmt.Errorf("importing %v metadata: %w", name, err) } resource.Language[name] = val } @@ -509,12 +728,12 @@ func importResourceLanguages(resource *Resource, languages map[string]Language) func importFunctionLanguages(function *Function, languages map[string]Language) error { if function.Inputs != nil { if err := importObjectTypeLanguages(function.Inputs, languages); err != nil { - return errors.Wrapf(err, "importing inputs") + return fmt.Errorf("importing inputs: %w", err) } } if function.Outputs != nil { if err := importObjectTypeLanguages(function.Outputs, languages); err != nil { - return errors.Wrapf(err, "importing outputs") + return fmt.Errorf("importing outputs: %w", err) } } @@ -524,7 +743,7 @@ func importFunctionLanguages(function *Function, languages map[string]Language) if lang, ok := languages[name]; ok { val, err := lang.ImportFunctionSpec(function, raw) if err != nil { - return errors.Wrapf(err, "importing %v metadata", name) + return fmt.Errorf("importing %v metadata: %w", name, err) } function.Language[name] = val } @@ -534,39 +753,50 @@ func importFunctionLanguages(function *Function, languages map[string]Language) } func (pkg *Package) ImportLanguages(languages map[string]Language) error { - if len(languages) == 0 { + if pkg.importedLanguages == nil { + pkg.importedLanguages = map[string]struct{}{} + } + + any := false + for lang := range languages { + if _, ok := pkg.importedLanguages[lang]; !ok { + any = true + break + } + } + if !any { return nil } for _, t := range pkg.Types { if object, ok := t.(*ObjectType); ok { if err := importObjectTypeLanguages(object, languages); err != nil { - return errors.Wrapf(err, "importing object type %v", object.Token) + return fmt.Errorf("importing object type %v: %w", object.Token, err) } } } for _, config := range pkg.Config { if err := importPropertyLanguages(config, languages); err != nil { - return errors.Wrapf(err, "importing configuration property %v", config.Name) + return fmt.Errorf("importing configuration property %v: %w", config.Name, err) } } if pkg.Provider != nil { if err := importResourceLanguages(pkg.Provider, languages); err != nil { - return errors.Wrapf(err, "importing provider") + return fmt.Errorf("importing provider: %w", err) } } for _, resource := range pkg.Resources { if err := importResourceLanguages(resource, languages); err != nil { - return errors.Wrapf(err, "importing resource %v", resource.Token) + return fmt.Errorf("importing resource %v: %w", resource.Token, err) } } for _, function := range pkg.Functions { if err := importFunctionLanguages(function, languages); err != nil { - return errors.Wrapf(err, "importing function %v", function.Token) + return fmt.Errorf("importing function %v: %w", function.Token, err) } } @@ -576,13 +806,17 @@ func (pkg *Package) ImportLanguages(languages map[string]Language) error { if lang, ok := languages[name]; ok { val, err := lang.ImportPackageSpec(pkg, raw) if err != nil { - return errors.Wrapf(err, "importing %v metadata", name) + return fmt.Errorf("importing %v metadata: %w", name, err) } pkg.Language[name] = val } } } + for lang := range languages { + pkg.importedLanguages[lang] = struct{}{} + } + return nil } @@ -598,6 +832,10 @@ func (pkg *Package) TokenToModule(tok string) string { case "providers": return "" default: + if pkg.moduleFormat == nil { + pkg.moduleFormat = defaultModuleFormat + } + matches := pkg.moduleFormat.FindStringSubmatch(components[1]) if len(matches) < 2 || strings.HasPrefix(matches[1], "index") { return "" @@ -636,11 +874,450 @@ func (pkg *Package) GetType(token string) (Type, bool) { return t, ok } +func (pkg *Package) MarshalSpec() (spec *PackageSpec, err error) { + version := "" + if pkg.Version != nil { + version = pkg.Version.String() + } + + var metadata *MetadataSpec + if pkg.moduleFormat != nil { + metadata = &MetadataSpec{ModuleFormat: pkg.moduleFormat.String()} + } + + spec = &PackageSpec{ + Name: pkg.Name, + Version: version, + Description: pkg.Description, + Keywords: pkg.Keywords, + Homepage: pkg.Homepage, + License: pkg.License, + Attribution: pkg.Attribution, + Repository: pkg.Repository, + LogoURL: pkg.LogoURL, + PluginDownloadURL: pkg.PluginDownloadURL, + Meta: metadata, + Types: map[string]ComplexTypeSpec{}, + Resources: map[string]ResourceSpec{}, + Functions: map[string]FunctionSpec{}, + } + + spec.Config.Required, spec.Config.Variables, err = pkg.marshalProperties(pkg.Config, true) + if err != nil { + return nil, fmt.Errorf("marshaling package config: %w", err) + } + + spec.Provider, err = pkg.marshalResource(pkg.Provider) + if err != nil { + return nil, fmt.Errorf("marshaling provider: %w", err) + } + + for _, t := range pkg.Types { + switch t := t.(type) { + case *ObjectType: + if t.IsInputShape() { + continue + } + + // Use the input shape when marshaling in order to get the plain annotations right. + o, err := pkg.marshalObject(t.InputShape, false) + if err != nil { + return nil, fmt.Errorf("marshaling type '%v': %w", t.Token, err) + } + spec.Types[t.Token] = o + case *EnumType: + spec.Types[t.Token] = pkg.marshalEnum(t) + } + } + + for _, res := range pkg.Resources { + r, err := pkg.marshalResource(res) + if err != nil { + return nil, fmt.Errorf("marshaling resource '%v': %w", res.Token, err) + } + spec.Resources[res.Token] = r + } + + for _, fn := range pkg.Functions { + f, err := pkg.marshalFunction(fn) + if err != nil { + return nil, fmt.Errorf("marshaling function '%v': %w", fn.Token, err) + } + spec.Functions[fn.Token] = f + } + + return spec, nil +} + +func (pkg *Package) MarshalJSON() ([]byte, error) { + spec, err := pkg.MarshalSpec() + if err != nil { + return nil, err + } + return jsonMarshal(spec) +} + +func (pkg *Package) MarshalYAML() ([]byte, error) { + spec, err := pkg.MarshalSpec() + if err != nil { + return nil, err + } + + var b bytes.Buffer + enc := yaml.NewEncoder(&b) + enc.SetIndent(2) + if err := enc.Encode(spec); err != nil { + return nil, err + } + return b.Bytes(), nil +} + +func (pkg *Package) marshalObjectData(comment string, properties []*Property, language map[string]interface{}, + plain, isOverlay bool) (ObjectTypeSpec, error) { + + required, props, err := pkg.marshalProperties(properties, plain) + if err != nil { + return ObjectTypeSpec{}, err + } + + lang, err := marshalLanguage(language) + if err != nil { + return ObjectTypeSpec{}, err + } + + return ObjectTypeSpec{ + Description: comment, + Properties: props, + Type: "object", + Required: required, + Language: lang, + IsOverlay: isOverlay, + }, nil +} + +func (pkg *Package) marshalObject(t *ObjectType, plain bool) (ComplexTypeSpec, error) { + data, err := pkg.marshalObjectData(t.Comment, t.Properties, t.Language, plain, t.IsOverlay) + if err != nil { + return ComplexTypeSpec{}, err + } + return ComplexTypeSpec{ObjectTypeSpec: data}, nil +} + +func (pkg *Package) marshalEnum(t *EnumType) ComplexTypeSpec { + values := make([]EnumValueSpec, len(t.Elements)) + for i, el := range t.Elements { + values[i] = EnumValueSpec{ + Name: el.Name, + Description: el.Comment, + Value: el.Value, + DeprecationMessage: el.DeprecationMessage, + } + } + + return ComplexTypeSpec{ + ObjectTypeSpec: ObjectTypeSpec{Type: pkg.marshalType(t.ElementType, false).Type, IsOverlay: t.IsOverlay}, + Enum: values, + } +} + +func (pkg *Package) marshalResource(r *Resource) (ResourceSpec, error) { + object, err := pkg.marshalObjectData(r.Comment, r.Properties, r.Language, true, r.IsOverlay) + if err != nil { + return ResourceSpec{}, fmt.Errorf("marshaling properties: %w", err) + } + + requiredInputs, inputs, err := pkg.marshalProperties(r.InputProperties, false) + if err != nil { + return ResourceSpec{}, fmt.Errorf("marshaling input properties: %w", err) + } + + var stateInputs *ObjectTypeSpec + if r.StateInputs != nil { + o, err := pkg.marshalObject(r.StateInputs, false) + if err != nil { + return ResourceSpec{}, fmt.Errorf("marshaling state inputs: %w", err) + } + stateInputs = &o.ObjectTypeSpec + } + + var aliases []AliasSpec + for _, a := range r.Aliases { + aliases = append(aliases, AliasSpec{ + Name: a.Name, + Project: a.Project, + Type: a.Type, + }) + } + + var methods map[string]string + if len(r.Methods) != 0 { + methods = map[string]string{} + for _, m := range r.Methods { + methods[m.Name] = m.Function.Token + } + } + + return ResourceSpec{ + ObjectTypeSpec: object, + InputProperties: inputs, + RequiredInputs: requiredInputs, + StateInputs: stateInputs, + Aliases: aliases, + DeprecationMessage: r.DeprecationMessage, + IsComponent: r.IsComponent, + Methods: methods, + }, nil +} + +func (pkg *Package) marshalFunction(f *Function) (FunctionSpec, error) { + var inputs *ObjectTypeSpec + if f.Inputs != nil { + ins, err := pkg.marshalObject(f.Inputs, true) + if err != nil { + return FunctionSpec{}, fmt.Errorf("marshaling inputs: %w", err) + } + inputs = &ins.ObjectTypeSpec + } + + var outputs *ObjectTypeSpec + if f.Outputs != nil { + outs, err := pkg.marshalObject(f.Outputs, true) + if err != nil { + return FunctionSpec{}, fmt.Errorf("marshaloutg outputs: %w", err) + } + outputs = &outs.ObjectTypeSpec + } + + lang, err := marshalLanguage(f.Language) + if err != nil { + return FunctionSpec{}, err + } + + return FunctionSpec{ + Description: f.Comment, + Inputs: inputs, + Outputs: outputs, + Language: lang, + }, nil +} + +func (pkg *Package) marshalProperties(props []*Property, plain bool) (required []string, specs map[string]PropertySpec, + err error) { + + if len(props) == 0 { + return + } + + specs = make(map[string]PropertySpec, len(props)) + for _, p := range props { + typ := p.Type + if t, optional := typ.(*OptionalType); optional { + typ = t.ElementType + } else { + required = append(required, p.Name) + } + + var defaultValue interface{} + var defaultSpec *DefaultSpec + if p.DefaultValue != nil { + defaultValue = p.DefaultValue.Value + if len(p.DefaultValue.Environment) != 0 || len(p.DefaultValue.Language) != 0 { + lang, err := marshalLanguage(p.DefaultValue.Language) + if err != nil { + return nil, nil, fmt.Errorf("property '%v': %w", p.Name, err) + } + + defaultSpec = &DefaultSpec{ + Environment: p.DefaultValue.Environment, + Language: lang, + } + } + } + + lang, err := marshalLanguage(p.Language) + if err != nil { + return nil, nil, fmt.Errorf("property '%v': %w", p.Name, err) + } + + specs[p.Name] = PropertySpec{ + TypeSpec: pkg.marshalType(typ, plain), + Description: p.Comment, + Const: p.ConstValue, + Default: defaultValue, + DefaultInfo: defaultSpec, + DeprecationMessage: p.DeprecationMessage, + Language: lang, + Secret: p.Secret, + } + } + return required, specs, nil +} + +// marshalType marshals the given type into a TypeSpec. If plain is true, then the type is being marshaled within a +// plain type context (e.g. a resource output property or a function input/output object type), and therefore does not +// require `Plain` annotations (hence the odd-looking `Plain: !plain` fields below). +func (pkg *Package) marshalType(t Type, plain bool) TypeSpec { + switch t := t.(type) { + case *InputType: + el := pkg.marshalType(t.ElementType, plain) + el.Plain = false + return el + case *ArrayType: + el := pkg.marshalType(t.ElementType, plain) + return TypeSpec{ + Type: "array", + Items: &el, + Plain: !plain, + } + case *MapType: + el := pkg.marshalType(t.ElementType, plain) + return TypeSpec{ + Type: "object", + AdditionalProperties: &el, + Plain: !plain, + } + case *UnionType: + oneOf := make([]TypeSpec, len(t.ElementTypes)) + for i, el := range t.ElementTypes { + oneOf[i] = pkg.marshalType(el, plain) + } + + defaultType := "" + if t.DefaultType != nil { + defaultType = pkg.marshalType(t.DefaultType, plain).Type + } + + var discriminator *DiscriminatorSpec + if t.Discriminator != "" { + discriminator = &DiscriminatorSpec{ + PropertyName: t.Discriminator, + Mapping: t.Mapping, + } + } + + return TypeSpec{ + Type: defaultType, + OneOf: oneOf, + Discriminator: discriminator, + Plain: !plain, + } + case *ObjectType: + return TypeSpec{Ref: pkg.marshalTypeRef(t.Package, "types", t.Token)} + case *EnumType: + return TypeSpec{Ref: pkg.marshalTypeRef(t.Package, "types", t.Token)} + case *ResourceType: + return TypeSpec{Ref: pkg.marshalTypeRef(t.Resource.Package, "resources", t.Token)} + case *TokenType: + var defaultType string + if t.UnderlyingType != nil { + defaultType = pkg.marshalType(t.UnderlyingType, plain).Type + } + + return TypeSpec{ + Type: defaultType, + Ref: t.Token, + } + default: + switch t { + case BoolType: + return TypeSpec{Type: "boolean"} + case StringType: + return TypeSpec{Type: "string"} + case IntType: + return TypeSpec{Type: "integer"} + case NumberType: + return TypeSpec{Type: "number"} + case AnyType: + return TypeSpec{Ref: "pulumi.json#/Any"} + case ArchiveType: + return TypeSpec{Ref: "pulumi.json#/Archive"} + case AssetType: + return TypeSpec{Ref: "pulumi.json#/Asset"} + case JSONType: + return TypeSpec{Ref: "pulumi.json#/Json"} + default: + panic(fmt.Errorf("unexepcted type %v (%T)", t, t)) + } + } +} + +func (pkg *Package) marshalTypeRef(container *Package, section, token string) string { + token = url.PathEscape(token) + + if container == pkg { + return fmt.Sprintf("#/%s/%s", section, token) + } + + // TODO(schema): this isn't quite right--it doesn't handle schemas sourced from URLs--but it's good enough for now. + return fmt.Sprintf("/%s/%v/schema.json#/%s/%s", container.Name, container.Version, section, token) +} + +func marshalLanguage(lang map[string]interface{}) (map[string]RawMessage, error) { + if len(lang) == 0 { + return nil, nil + } + + result := map[string]RawMessage{} + for name, data := range lang { + bytes, err := jsonMarshal(data) + if err != nil { + return nil, fmt.Errorf("marshaling %v language data: %w", name, err) + } + result[name] = RawMessage(bytes) + } + return result, nil +} + +type RawMessage []byte + +func (m RawMessage) MarshalJSON() ([]byte, error) { + return []byte(m), nil +} + +func (m *RawMessage) UnmarshalJSON(bytes []byte) error { + *m = make([]byte, len(bytes)) + copy(*m, bytes) + return nil +} + +func (m RawMessage) MarshalYAML() ([]byte, error) { + return []byte(m), nil +} + +func (m *RawMessage) UnmarshalYAML(node *yaml.Node) error { + var value interface{} + if err := node.Decode(&value); err != nil { + return err + } + bytes, err := jsonMarshal(value) + if err != nil { + return err + } + *m = bytes + return nil +} + +//go:embed pulumi.json +var metaSchema []byte + +var MetaSchema *jsonschema.Schema + +func init() { + compiler := jsonschema.NewCompiler() + compiler.LoadURL = func(u string) (io.ReadCloser, error) { + if u == "blob://pulumi.json" { + return io.NopCloser(bytes.NewReader(metaSchema)), nil + } + return jsonschema.LoadURL(u) + } + MetaSchema = compiler.MustCompile("blob://pulumi.json") +} + // TypeSpec is the serializable form of a reference to a type. type TypeSpec struct { // Type is the primitive or composite type, if any. May be "bool", "integer", "number", "string", "array", or // "object". - Type string `json:"type,omitempty"` + Type string `json:"type,omitempty" yaml:"type,omitempty"` // Ref is a reference to a type in this or another document. For example, the built-in Archive, Asset, and Any // types are referenced as "pulumi.json#/Archive", "pulumi.json#/Asset", and "pulumi.json#/Any", respectively. // A type from this document is referenced as "#/types/pulumi:type:token". @@ -649,146 +1326,157 @@ type TypeSpec struct { // A resource from this document is referenced as "#/resources/pulumi:type:token". // A resource from another document is referenced as "path#/resources/pulumi:type:token", where path is of the form: // "/provider/vX.Y.Z/schema.json" or "pulumi.json" or "http[s]://example.com/provider/vX.Y.Z/schema.json" - Ref string `json:"$ref,omitempty"` + Ref string `json:"$ref,omitempty" yaml:"$ref,omitempty"` // AdditionalProperties, if set, describes the element type of an "object" (i.e. a string -> value map). - AdditionalProperties *TypeSpec `json:"additionalProperties,omitempty"` + AdditionalProperties *TypeSpec `json:"additionalProperties,omitempty" yaml:"additionalProperties,omitempty"` // Items, if set, describes the element type of an array. - Items *TypeSpec `json:"items,omitempty"` + Items *TypeSpec `json:"items,omitempty" yaml:"items,omitempty"` // OneOf indicates that values of the type may be one of any of the listed types. - OneOf []TypeSpec `json:"oneOf,omitempty"` + OneOf []TypeSpec `json:"oneOf,omitempty" yaml:"oneOf,omitempty"` // Discriminator informs the consumer of an alternative schema based on the value associated with it. - Discriminator *DiscriminatorSpec `json:"discriminator,omitempty"` + Discriminator *DiscriminatorSpec `json:"discriminator,omitempty" yaml:"discriminator,omitempty"` + // Plain indicates that when used as an input, this type does not accept eventual values. + Plain bool `json:"plain,omitempty" yaml:"plain,omitempty"` } // DiscriminatorSpec informs the consumer of an alternative schema based on the value associated with it. type DiscriminatorSpec struct { // PropertyName is the name of the property in the payload that will hold the discriminator value. - PropertyName string `json:"propertyName"` + PropertyName string `json:"propertyName" yaml:"propertyName"` // Mapping is an optional object to hold mappings between payload values and schema names or references. - Mapping map[string]string `json:"mapping,omitempty"` + Mapping map[string]string `json:"mapping,omitempty" yaml:"mapping,omitempty"` } // DefaultSpec is the serializable form of extra information about the default value for a property. type DefaultSpec struct { // Environment specifies a set of environment variables to probe for a default value. - Environment []string `json:"environment,omitempty"` + Environment []string `json:"environment,omitempty" yaml:"environment,omitempty"` // Language specifies additional language-specific data about the default value. - Language map[string]json.RawMessage `json:"language,omitempty"` + Language map[string]RawMessage `json:"language,omitempty" yaml:"language,omitempty"` } // PropertySpec is the serializable form of an object or resource property. type PropertySpec struct { - TypeSpec + TypeSpec `yaml:",inline"` // Description is the description of the property, if any. - Description string `json:"description,omitempty"` + Description string `json:"description,omitempty" yaml:"description,omitempty"` // Const is the constant value for the property, if any. The type of the value must be assignable to the type of // the property. - Const interface{} `json:"const,omitempty"` + Const interface{} `json:"const,omitempty" yaml:"const,omitempty"` // Default is the default value for the property, if any. The type of the value must be assignable to the type of // the property. - Default interface{} `json:"default,omitempty"` + Default interface{} `json:"default,omitempty" yaml:"default,omitempty"` // DefaultInfo contains additional information about the property's default value, if any. - DefaultInfo *DefaultSpec `json:"defaultInfo,omitempty"` + DefaultInfo *DefaultSpec `json:"defaultInfo,omitempty" yaml:"defaultInfo,omitempty"` // DeprecationMessage indicates whether or not the property is deprecated. - DeprecationMessage string `json:"deprecationMessage,omitempty"` + DeprecationMessage string `json:"deprecationMessage,omitempty" yaml:"deprecationMessage,omitempty"` // Language specifies additional language-specific data about the property. - Language map[string]json.RawMessage `json:"language,omitempty"` + Language map[string]RawMessage `json:"language,omitempty" yaml:"language,omitempty"` // Secret specifies if the property is secret (default false). - Secret bool `json:"secret,omitempty"` + Secret bool `json:"secret,omitempty" yaml:"secret,omitempty"` + // ReplaceOnChanges specifies if the property is to be replaced instead of updated (default false). + ReplaceOnChanges bool `json:"replaceOnChanges,omitempty" yaml:"replaceOnChanges,omitempty"` } // ObjectTypeSpec is the serializable form of an object type. type ObjectTypeSpec struct { // Description is the description of the type, if any. - Description string `json:"description,omitempty"` + Description string `json:"description,omitempty" yaml:"description,omitempty"` // Properties, if present, is a map from property name to PropertySpec that describes the type's properties. - Properties map[string]PropertySpec `json:"properties,omitempty"` + Properties map[string]PropertySpec `json:"properties,omitempty" yaml:"properties,omitempty"` // Type must be "object" if this is an object type, or the underlying type for an enum. - Type string `json:"type,omitempty"` + Type string `json:"type,omitempty" yaml:"type,omitempty"` // Required, if present, is a list of the names of an object type's required properties. These properties must be set // for inputs and will always be set for outputs. - Required []string `json:"required,omitempty"` - // Plain, if present, is a list of the names of an object type's plain properties. These properties only accept - // prompt values. - Plain []string `json:"plain,omitempty"` + Required []string `json:"required,omitempty" yaml:"required,omitempty"` + // Plain, was a list of the names of an object type's plain properties. This property is ignored: instead, property + // types should be marked as plain where necessary. + Plain []string `json:"plain,omitempty" yaml:"plain,omitempty"` // Language specifies additional language-specific data about the type. - Language map[string]json.RawMessage `json:"language,omitempty"` + Language map[string]RawMessage `json:"language,omitempty" yaml:"language,omitempty"` + // IsOverlay indicates whether the type is an overlay provided by the package. Overlay code is generated by the + // package rather than using the core Pulumi codegen libraries. + IsOverlay bool `json:"isOverlay,omitempty" yaml:"isOverlay,omitempty"` } // ComplexTypeSpec is the serializable form of an object or enum type. type ComplexTypeSpec struct { - ObjectTypeSpec + ObjectTypeSpec `yaml:",inline"` // Enum, if present, is the list of possible values for an enum type. - Enum []*EnumValueSpec `json:"enum,omitempty"` + Enum []EnumValueSpec `json:"enum,omitempty" yaml:"enum,omitempty"` } // EnumValuesSpec is the serializable form of the values metadata associated with an enum type. type EnumValueSpec struct { // Name, if present, overrides the name of the enum value that would usually be derived from the value. - Name string `json:"name,omitempty"` + Name string `json:"name,omitempty" yaml:"name,omitempty"` // Description of the enum value. - Description string `json:"description,omitempty"` + Description string `json:"description,omitempty" yaml:"description,omitempty"` // Value is the enum value itself. - Value interface{} `json:"value"` + Value interface{} `json:"value" yaml:"value"` // DeprecationMessage indicates whether or not the value is deprecated. - DeprecationMessage string `json:"deprecationMessage,omitempty"` + DeprecationMessage string `json:"deprecationMessage,omitempty" yaml:"deprecationMessage,omitempty"` } // AliasSpec is the serializable form of an alias description. type AliasSpec struct { // Name is the name portion of the alias, if any. - Name *string `json:"name,omitempty"` + Name *string `json:"name,omitempty" yaml:"name,omitempty"` // Project is the project portion of the alias, if any. - Project *string `json:"project,omitempty"` + Project *string `json:"project,omitempty" yaml:"project,omitempty"` // Type is the type portion of the alias, if any. - Type *string `json:"type,omitempty"` + Type *string `json:"type,omitempty" yaml:"type,omitempty"` } // ResourceSpec is the serializable form of a resource description. type ResourceSpec struct { - ObjectTypeSpec + ObjectTypeSpec `yaml:",inline"` // InputProperties is a map from property name to PropertySpec that describes the resource's input properties. - InputProperties map[string]PropertySpec `json:"inputProperties,omitempty"` + InputProperties map[string]PropertySpec `json:"inputProperties,omitempty" yaml:"inputProperties,omitempty"` // RequiredInputs is a list of the names of the resource's required input properties. - RequiredInputs []string `json:"requiredInputs,omitempty"` - // PlainInputs is a list of the names of the resource's plain input properties that only accept prompt values. - PlainInputs []string `json:"plainInputs,omitempty"` + RequiredInputs []string `json:"requiredInputs,omitempty" yaml:"requiredInputs,omitempty"` + // PlainInputs was a list of the names of the resource's plain input properties. This property is ignored: + // instead, property types should be marked as plain where necessary. + PlainInputs []string `json:"plainInputs,omitempty" yaml:"plainInputs,omitempty"` // StateInputs is an optional ObjectTypeSpec that describes additional inputs that mau be necessary to get an // existing resource. If this is unset, only an ID is necessary. - StateInputs *ObjectTypeSpec `json:"stateInputs,omitempty"` + StateInputs *ObjectTypeSpec `json:"stateInputs,omitempty" yaml:"stateInputs,omitempty"` // Aliases is the list of aliases for the resource. - Aliases []AliasSpec `json:"aliases,omitempty"` + Aliases []AliasSpec `json:"aliases,omitempty" yaml:"aliases,omitempty"` // DeprecationMessage indicates whether or not the resource is deprecated. - DeprecationMessage string `json:"deprecationMessage,omitempty"` - // Language specifies additional language-specific data about the resource. - Language map[string]json.RawMessage `json:"language,omitempty"` + DeprecationMessage string `json:"deprecationMessage,omitempty" yaml:"deprecationMessage,omitempty"` // IsComponent indicates whether the resource is a ComponentResource. - IsComponent bool `json:"isComponent,omitempty"` + IsComponent bool `json:"isComponent,omitempty" yaml:"isComponent,omitempty"` + // Methods maps method names to functions in this schema. + Methods map[string]string `json:"methods,omitempty" yaml:"methods,omitempty"` } // FunctionSpec is the serializable form of a function description. type FunctionSpec struct { // Description is the description of the function, if any. - Description string `json:"description,omitempty"` + Description string `json:"description,omitempty" yaml:"description,omitempty"` // Inputs is the bag of input values for the function, if any. - Inputs *ObjectTypeSpec `json:"inputs,omitempty"` + Inputs *ObjectTypeSpec `json:"inputs,omitempty" yaml:"inputs,omitempty"` // Outputs is the bag of output values for the function, if any. - Outputs *ObjectTypeSpec `json:"outputs,omitempty"` + Outputs *ObjectTypeSpec `json:"outputs,omitempty" yaml:"outputs,omitempty"` // DeprecationMessage indicates whether or not the function is deprecated. - DeprecationMessage string `json:"deprecationMessage,omitempty"` + DeprecationMessage string `json:"deprecationMessage,omitempty" yaml:"deprecationMessage,omitempty"` // Language specifies additional language-specific data about the function. - Language map[string]json.RawMessage `json:"language,omitempty"` + Language map[string]RawMessage `json:"language,omitempty" yaml:"language,omitempty"` + // IsOverlay indicates whether the function is an overlay provided by the package. Overlay code is generated by the + // package rather than using the core Pulumi codegen libraries. + IsOverlay bool `json:"isOverlay,omitempty" yaml:"isOverlay,omitempty"` } // ConfigSpec is the serializable description of a package's configuration variables. type ConfigSpec struct { // Variables is a map from variable name to PropertySpec that describes a package's configuration variables. - Variables map[string]PropertySpec `json:"variables,omitempty"` + Variables map[string]PropertySpec `json:"variables,omitempty" yaml:"variables,omitempty"` // Required is a list of the names of the package's required configuration variables. - Required []string `json:"defaults,omitempty"` + Required []string `json:"defaults,omitempty" yaml:"defaults,omitempty"` } // MetadataSpec contains information for the importer about this package. @@ -797,62 +1485,160 @@ type MetadataSpec struct { // type token. Packages that use the module format "namespace1/namespace2/.../namespaceN" do not need to specify // a format. The regex must define one capturing group that contains the module name, which must be formatted as // "namespace1/namespace2/...namespaceN". - ModuleFormat string `json:"moduleFormat,omitempty"` + ModuleFormat string `json:"moduleFormat,omitempty" yaml:"moduleFormat,omitempty"` } // PackageSpec is the serializable description of a Pulumi package. type PackageSpec struct { // Name is the unqualified name of the package (e.g. "aws", "azure", "gcp", "kubernetes", "random") - Name string `json:"name"` + Name string `json:"name" yaml:"name"` + // DisplayName is the human-friendly name of the package. + DisplayName string `json:"displayName,omitempty" yaml:"displayName,omitempty"` // Version is the version of the package. The version must be valid semver. - Version string `json:"version,omitempty"` + Version string `json:"version,omitempty" yaml:"version,omitempty"` // Description is the description of the package. - Description string `json:"description,omitempty"` + Description string `json:"description,omitempty" yaml:"description,omitempty"` // Keywords is the list of keywords that are associated with the package, if any. - Keywords []string `json:"keywords,omitempty"` + // Some reserved keywords can be specified as well that help with categorizing the + // package in the Pulumi registry. `category/` and `kind/` are the only + // reserved keywords at this time, where `` can be one of: + // `cloud`, `database`, `infrastructure`, `monitoring`, `network`, `utility`, `vcs` + // and `` is either `native` or `component`. If the package is a bridged Terraform + // provider, then don't include the `kind/` label. + Keywords []string `json:"keywords,omitempty" yaml:"keywords,omitempty"` // Homepage is the package's homepage. - Homepage string `json:"homepage,omitempty"` + Homepage string `json:"homepage,omitempty" yaml:"homepage,omitempty"` // License indicates which license is used for the package's contents. - License string `json:"license,omitempty"` + License string `json:"license,omitempty" yaml:"license,omitempty"` // Attribution allows freeform text attribution of derived work, if needed. - Attribution string `json:"attribution,omitempty"` + Attribution string `json:"attribution,omitempty" yaml:"attribution,omitempty"` // Repository is the URL at which the source for the package can be found. - Repository string `json:"repository,omitempty"` + Repository string `json:"repository,omitempty" yaml:"repository,omitempty"` // LogoURL is the URL for the package's logo, if any. - LogoURL string `json:"logoUrl,omitempty"` + LogoURL string `json:"logoUrl,omitempty" yaml:"logoUrl,omitempty"` // PluginDownloadURL is the URL to use to acquire the provider plugin binary, if any. - PluginDownloadURL string `json:"pluginDownloadURL,omitempty"` + PluginDownloadURL string `json:"pluginDownloadURL,omitempty" yaml:"pluginDownloadURL,omitempty"` + // Publisher is the name of the person or organization that authored and published the package. + Publisher string `json:"publisher,omitempty" yaml:"publisher,omitempty"` // Meta contains information for the importer about this package. - Meta *MetadataSpec `json:"meta,omitempty"` + Meta *MetadataSpec `json:"meta,omitempty" yaml:"meta,omitempty"` + + // A list of allowed package name in addition to the Name property. + AllowedPackageNames []string `json:"allowedPackageNames,omitempty" yaml:"allowedPackageNames,omitempty"` // Config describes the set of configuration variables defined by this package. - Config ConfigSpec `json:"config"` + Config ConfigSpec `json:"config" yaml:"config"` // Types is a map from type token to ComplexTypeSpec that describes the set of complex types (ie. object, enum) // defined by this package. - Types map[string]ComplexTypeSpec `json:"types,omitempty"` + Types map[string]ComplexTypeSpec `json:"types,omitempty" yaml:"types,omitempty"` // Provider describes the provider type for this package. - Provider ResourceSpec `json:"provider"` + Provider ResourceSpec `json:"provider" yaml:"provider"` // Resources is a map from type token to ResourceSpec that describes the set of resources defined by this package. - Resources map[string]ResourceSpec `json:"resources,omitempty"` + Resources map[string]ResourceSpec `json:"resources,omitempty" yaml:"resources,omitempty"` // Functions is a map from token to FunctionSpec that describes the set of functions defined by this package. - Functions map[string]FunctionSpec `json:"functions,omitempty"` + Functions map[string]FunctionSpec `json:"functions,omitempty" yaml:"functions,omitempty"` // Language specifies additional language-specific data about the package. - Language map[string]json.RawMessage `json:"language,omitempty"` + Language map[string]RawMessage `json:"language,omitempty" yaml:"language,omitempty"` +} + +var defaultModuleFormat = regexp.MustCompile("(.*)") + +func memberPath(section, token string, rest ...string) string { + path := fmt.Sprintf("#/%v/%v", section, url.PathEscape(token)) + if len(rest) != 0 { + path += "/" + strings.Join(rest, "/") + } + return path } -// importSpec converts a serializable PackageSpec into a Package. This function includes a loader parameter which +func errorf(path, message string, args ...interface{}) *hcl.Diagnostic { + contract.Require(path != "", "path") + + summary := path + ": " + fmt.Sprintf(message, args...) + return &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: summary, + } +} + +func validateSpec(spec PackageSpec) (hcl.Diagnostics, error) { + bytes, err := json.Marshal(spec) + if err != nil { + return nil, err + } + var raw interface{} + if err = json.Unmarshal(bytes, &raw); err != nil { + return nil, err + } + + if err = MetaSchema.Validate(raw); err == nil { + return nil, nil + } + validationError, ok := err.(*jsonschema.ValidationError) + if !ok { + return nil, err + } + + var diags hcl.Diagnostics + var appendError func(err *jsonschema.ValidationError) + appendError = func(err *jsonschema.ValidationError) { + if err.InstanceLocation != "" && err.Message != "" { + diags = diags.Append(errorf("#"+err.InstanceLocation, "%v", err.Message)) + } + for _, err := range err.Causes { + appendError(err) + } + } + appendError(validationError) + + return diags, nil +} + +// bindSpec converts a serializable PackageSpec into a Package. This function includes a loader parameter which // works as a singleton -- if it is nil, a new loader is instantiated, else the provided loader is used. This avoids // breaking downstream consumers of ImportSpec while allowing us to extend schema support to external packages. -func importSpec(spec PackageSpec, languages map[string]Language, loader Loader) (*Package, error) { +// +// A few notes on diagnostics and errors in spec binding: +// +// - Unless an error is *fatal*--i.e. binding is fundamentally unable to proceed (e.g. because a provider for a package +// failed to load)--errors should be communicated as diagnostics. Fatal errors should be communicated as error values. +// - Semantic errors during type binding should not be fatal. Instead, they should return an `InvalidType`. The invalid +// type is accepted in any position, and carries diagnostics that explain the semantic error during binding. This +// allows binding to continue and produce as much information as possible for the end user. +// - Diagnostics may be rendered to users by downstream tools, and should be written with schema authors in mind. +// - Diagnostics _must_ contain enough contextual information for a user to be able to understand the source of the +// diagnostic. Until we have line/column information, we use JSON pointers to the offending entities. These pointers +// are passed around using `path` parameters. The `errorf` function is provided as a utility to easily create a +// diagnostic error that is appropriately tagged with a JSON pointer. +// +func bindSpec(spec PackageSpec, languages map[string]Language, loader Loader, + validate bool) (*Package, hcl.Diagnostics, error) { + var diags hcl.Diagnostics + + // Validate the package against the metaschema. + if validate { + validationDiags, err := validateSpec(spec) + if err != nil { + return nil, nil, fmt.Errorf("validating spec: %w", err) + } + diags = diags.Extend(validationDiags) + } + + // Validate that there is a name + if spec.Name == "" { + diags = diags.Append(errorf("#/name", "no name provided")) + } + // Parse the version, if any. var version *semver.Version if spec.Version != "" { v, err := semver.ParseTolerant(spec.Version) if err != nil { - return nil, errors.Wrap(err, "parsing package version") + diags = diags.Append(errorf("#/version", "failed to parse semver: %v", err)) + } else { + version = &v } - version = &v } // Parse the module format, if any. @@ -862,9 +1648,11 @@ func importSpec(spec PackageSpec, languages map[string]Language, loader Loader) } moduleFormatRegexp, err := regexp.Compile(moduleFormat) if err != nil { - return nil, errors.Wrap(err, "compiling module format regexp") + diags = diags.Append(errorf("#/meta/moduleFormat", "failed to compile regex: %v", err)) } + diags = diags.Extend(spec.validateTypeTokens()) + pkg := &Package{} // We want to use the same loader instance for all referenced packages, so only instantiate the loader if the @@ -872,41 +1660,46 @@ func importSpec(spec PackageSpec, languages map[string]Language, loader Loader) if loader == nil { cwd, err := os.Getwd() if err != nil { - return nil, err + return nil, nil, err } ctx, err := plugin.NewContext(nil, nil, nil, nil, cwd, nil, false, nil) if err != nil { - return nil, err + return nil, nil, err } defer contract.IgnoreClose(ctx) loader = NewPluginLoader(ctx.Host) } - types, err := bindTypes(pkg, spec.Types, loader) + types, typeDiags, err := bindTypes(pkg, spec.Types, loader) if err != nil { - return nil, errors.Wrap(err, "binding types") + return nil, nil, err } + diags = diags.Extend(typeDiags) - config, err := bindConfig(spec.Config, types) + config, configDiags, err := bindConfig(spec.Config, types) if err != nil { - return nil, errors.Wrap(err, "binding config") + return nil, nil, err } + diags = diags.Extend(configDiags) - provider, err := bindProvider(spec.Name, spec.Provider, types) + functions, functionTable, functionDiags, err := bindFunctions(spec.Functions, types) if err != nil { - return nil, errors.Wrap(err, "binding provider") + return nil, nil, err } + diags = diags.Extend(functionDiags) - resources, resourceTable, err := bindResources(spec.Resources, types) + provider, providerDiags, err := bindProvider(spec.Name, spec.Provider, types, functionTable) if err != nil { - return nil, errors.Wrap(err, "binding resources") + return nil, nil, err } + diags = diags.Extend(providerDiags) - functions, functionTable, err := bindFunctions(spec.Functions, types) + resources, resourceTable, resourceDiags, err := bindResources(spec.Resources, types, functionTable) if err != nil { - return nil, errors.Wrap(err, "binding functions") + return nil, nil, err } + diags = diags.Extend(resourceDiags) // Build the type list. var typeList []Type @@ -914,7 +1707,9 @@ func importSpec(spec PackageSpec, languages map[string]Language, loader Loader) typeList = append(typeList, t) } for _, t := range types.objects { + // t is a plain shape: add it and its corresponding input shape to the type list. typeList = append(typeList, t) + typeList = append(typeList, t.InputShape) } for _, t := range types.arrays { typeList = append(typeList, t) @@ -938,12 +1733,13 @@ func importSpec(spec PackageSpec, languages map[string]Language, loader Loader) language := make(map[string]interface{}) for name, raw := range spec.Language { - language[name] = raw + language[name] = json.RawMessage(raw) } *pkg = Package{ moduleFormat: moduleFormatRegexp, Name: spec.Name, + DisplayName: spec.DisplayName, Version: version, Description: spec.Description, Keywords: spec.Keywords, @@ -952,6 +1748,7 @@ func importSpec(spec PackageSpec, languages map[string]Language, loader Loader) Attribution: spec.Attribution, Repository: spec.Repository, PluginDownloadURL: spec.PluginDownloadURL, + Publisher: spec.Publisher, Config: config, Types: typeList, Provider: provider, @@ -964,16 +1761,32 @@ func importSpec(spec PackageSpec, languages map[string]Language, loader Loader) resourceTypeTable: types.resources, } if err := pkg.ImportLanguages(languages); err != nil { - return nil, err + return nil, nil, err } - return pkg, nil + return pkg, diags, nil } -// ImportSpec converts a serializable PackageSpec into a Package. +// BindSpec converts a serializable PackageSpec into a Package. Any semantic errors encountered during binding are +// contained in the returned diagnostics. The returned error is only non-nil if a fatal error was encountered. +func BindSpec(spec PackageSpec, languages map[string]Language) (*Package, hcl.Diagnostics, error) { + return bindSpec(spec, languages, nil, true) +} + +// ImportSpec converts a serializable PackageSpec into a Package. Unlike BindSpec, ImportSpec does not validate its +// input against the Pulumi package metaschema. ImportSpec should only be used to load packages that are assumed to be +// well-formed (e.g. packages referenced for program code generation or by a root package being used for SDK +// generation). BindSpec should be used to load and validate a package spec prior to generating its SDKs. func ImportSpec(spec PackageSpec, languages map[string]Language) (*Package, error) { // Call the internal implementation that includes a loader parameter. - return importSpec(spec, languages, nil) + pkg, diags, err := bindSpec(spec, languages, nil, false) + if err != nil { + return nil, err + } + if diags.HasErrors() { + return nil, diags + } + return pkg, nil } // types facilitates interning (only storing a single reference to an object) during schema processing. The fields @@ -990,9 +1803,11 @@ type types struct { tokens map[string]*TokenType enums map[string]*EnumType named map[string]Type // objects and enums + inputs map[Type]*InputType + optionals map[Type]*OptionalType } -func (t *types) bindPrimitiveType(name string) (Type, error) { +func (t *types) bindPrimitiveType(path, name string) (Type, hcl.Diagnostics) { switch name { case "boolean": return BoolType, nil @@ -1003,7 +1818,7 @@ func (t *types) bindPrimitiveType(name string) (Type, error) { case "string": return StringType, nil default: - return nil, errors.Errorf("unknown primitive type %v", name) + return invalidType(errorf(path, "unknown primitive type %v", name)) } } @@ -1024,13 +1839,49 @@ const ( providerRef = "provider" ) +// Validate an individual name token. +func (spec *PackageSpec) validateTypeToken(allowedPackageNames map[string]bool, section, token string) hcl.Diagnostics { + diags := hcl.Diagnostics{} + + path := memberPath(section, token) + var packageName string + if i := strings.Index(token, ":"); i != -1 { + packageName = token[:i] + } + if !allowedPackageNames[packageName] { + error := errorf(path, "invalid token '%s' (must have package name '%s')", token, spec.Name) + diags = diags.Append(error) + } + + return diags +} + +// This is for validating non-reference type tokens. +func (spec *PackageSpec) validateTypeTokens() hcl.Diagnostics { + diags := hcl.Diagnostics{} + allowedPackageNames := map[string]bool{spec.Name: true} + for _, prefix := range spec.AllowedPackageNames { + allowedPackageNames[prefix] = true + } + for t := range spec.Resources { + diags = diags.Extend(spec.validateTypeToken(allowedPackageNames, "resources", t)) + } + for t := range spec.Types { + diags = diags.Extend(spec.validateTypeToken(allowedPackageNames, "types", t)) + } + for t := range spec.Functions { + diags = diags.Extend(spec.validateTypeToken(allowedPackageNames, "functions", t)) + } + return diags +} + // Regex used to parse external schema paths. This is declared at the package scope to avoid repeated recompilation. -var refPathRegex = regexp.MustCompile(`^/?(?P\w+)/(?Pv[^/]*)/schema\.json$`) +var refPathRegex = regexp.MustCompile(`^/?(?P[-\w]+)/(?Pv[^/]*)/schema\.json$`) -func (t *types) parseTypeSpecRef(ref string) (typeSpecRef, error) { +func (t *types) parseTypeSpecRef(refPath, ref string) (typeSpecRef, hcl.Diagnostics) { parsedURL, err := url.Parse(ref) if err != nil { - return typeSpecRef{}, errors.Wrapf(err, "failed to parse ref URL: %s", ref) + return typeSpecRef{}, hcl.Diagnostics{errorf(refPath, "failed to parse ref URL '%s': %v", ref, err)} } // Parse the package name and version if the URL contains a path. If there is no path--if the URL is just a @@ -1039,18 +1890,18 @@ func (t *types) parseTypeSpecRef(ref string) (typeSpecRef, error) { if len(parsedURL.Path) > 0 { path, err := url.PathUnescape(parsedURL.Path) if err != nil { - return typeSpecRef{}, errors.Wrapf(err, "failed to unescape path: %s", parsedURL.Path) + return typeSpecRef{}, hcl.Diagnostics{errorf(refPath, "failed to unescape path '%s': %v", parsedURL.Path, err)} } pathMatch := refPathRegex.FindStringSubmatch(path) if len(pathMatch) != 3 { - return typeSpecRef{}, fmt.Errorf("failed to parse path: %s", path) + return typeSpecRef{}, hcl.Diagnostics{errorf(refPath, "failed to parse path '%s'", path)} } pkg, versionToken := pathMatch[1], pathMatch[2] version, err := semver.ParseTolerant(versionToken) if err != nil { - return typeSpecRef{}, errors.Wrapf(err, "failed to parse package version: %s", versionToken) + return typeSpecRef{}, hcl.Diagnostics{errorf(refPath, "failed to parse package version '%s': %v", versionToken, err)} } pkgName, pkgVersion = pkg, &version @@ -1078,16 +1929,16 @@ func (t *types) parseTypeSpecRef(ref string) (typeSpecRef, error) { switch kind { case "provider": if token != "" { - return typeSpecRef{}, fmt.Errorf("invalid provider reference '%v'", ref) + return typeSpecRef{}, hcl.Diagnostics{errorf(refPath, "invalid provider reference '%v'", ref)} } token = "pulumi:providers:" + pkgName case "resources", "types": token, err = url.PathUnescape(token) if err != nil { - return typeSpecRef{}, errors.Wrapf(err, "failed to unescape token: %s", token) + return typeSpecRef{}, hcl.Diagnostics{errorf(refPath, "failed to unescape token '%s': %v", token, err)} } default: - return typeSpecRef{}, fmt.Errorf("invalid type reference '%v'", ref) + return typeSpecRef{}, hcl.Diagnostics{errorf(refPath, "invalid type reference '%v'", ref)} } return typeSpecRef{ @@ -1107,22 +1958,83 @@ func versionEquals(a, b *semver.Version) bool { return a.Equals(*b) } -func (t *types) bindTypeSpecRef(spec TypeSpec) (Type, error) { +func (t *types) newInputType(elementType Type) Type { + if _, ok := elementType.(*InputType); ok { + return elementType + } + + typ, ok := t.inputs[elementType] + if !ok { + typ = &InputType{ElementType: elementType} + t.inputs[elementType] = typ + } + return typ +} + +func (t *types) newOptionalType(elementType Type) Type { + if _, ok := elementType.(*OptionalType); ok { + return elementType + } + typ, ok := t.optionals[elementType] + if !ok { + typ = &OptionalType{ElementType: elementType} + t.optionals[elementType] = typ + } + return typ +} + +func (t *types) newMapType(elementType Type) Type { + typ, ok := t.maps[elementType] + if !ok { + typ = &MapType{ElementType: elementType} + t.maps[elementType] = typ + } + return typ +} + +func (t *types) newArrayType(elementType Type) Type { + typ, ok := t.arrays[elementType] + if !ok { + typ = &ArrayType{ElementType: elementType} + t.arrays[elementType] = typ + } + return typ +} + +func (t *types) newUnionType( + elements []Type, defaultType Type, discriminator string, mapping map[string]string) *UnionType { + union := &UnionType{ + ElementTypes: elements, + DefaultType: defaultType, + Discriminator: discriminator, + Mapping: mapping, + } + if typ, ok := t.unions[union.String()]; ok { + return typ + } + t.unions[union.String()] = union + return union +} + +func (t *types) bindTypeSpecRef(path string, spec TypeSpec, inputShape bool) (Type, hcl.Diagnostics, error) { + path = path + "/$ref" + // Explicitly handle built-in types so that we don't have to handle this type of path during ref parsing. switch spec.Ref { case "pulumi.json#/Archive": - return ArchiveType, nil + return ArchiveType, nil, nil case "pulumi.json#/Asset": - return AssetType, nil + return AssetType, nil, nil case "pulumi.json#/Json": - return JSONType, nil + return JSONType, nil, nil case "pulumi.json#/Any": - return AnyType, nil + return AnyType, nil, nil } - ref, err := t.parseTypeSpecRef(spec.Ref) - if err != nil { - return nil, err + ref, refDiags := t.parseTypeSpecRef(path, spec.Ref) + if refDiags.HasErrors() { + typ, _ := invalidType(refDiags...) + return typ, refDiags, nil } // If this is a reference to an external sch @@ -1130,324 +2042,355 @@ func (t *types) bindTypeSpecRef(spec TypeSpec) (Type, error) { if referencesExternalSchema { pkg, err := t.loader.LoadPackage(ref.Package, ref.Version) if err != nil { - return nil, errors.Wrapf(err, "resolving package %v", ref.URL) + return nil, nil, fmt.Errorf("resolving package %v: %w", ref.URL, err) } switch ref.Kind { case typesRef: typ, ok := pkg.GetType(ref.Token) if !ok { - return nil, fmt.Errorf("type %v not found in package %v", ref.Token, ref.Package) + typ, diags := invalidType(errorf(path, "type %v not found in package %v", ref.Token, ref.Package)) + return typ, diags, nil } - return typ, nil + if obj, ok := typ.(*ObjectType); ok && inputShape { + typ = obj.InputShape + } + return typ, nil, nil case resourcesRef, providerRef: typ, ok := pkg.GetResourceType(ref.Token) if !ok { - return nil, fmt.Errorf("resource type %v not found in package %v", ref.Token, ref.Package) + typ, diags := invalidType(errorf(path, "resource type %v not found in package %v", ref.Token, ref.Package)) + return typ, diags, nil } - return typ, nil + return typ, nil, nil } } switch ref.Kind { case typesRef: if typ, ok := t.objects[ref.Token]; ok { - return typ, nil + if inputShape { + return typ.InputShape, nil, nil + } + return typ, nil, nil } if typ, ok := t.enums[ref.Token]; ok { - return typ, nil + return typ, nil, nil } + + var diags hcl.Diagnostics typ, ok := t.tokens[ref.Token] if !ok { typ = &TokenType{Token: ref.Token} if spec.Type != "" { - ut, err := t.bindType(TypeSpec{Type: spec.Type}) - if err != nil { - return nil, err - } + ut, primDiags := t.bindPrimitiveType(path, spec.Type) + diags = diags.Extend(primDiags) + typ.UnderlyingType = ut } t.tokens[ref.Token] = typ } - return typ, nil + return typ, diags, nil case resourcesRef, providerRef: typ, ok := t.resources[ref.Token] if !ok { typ = &ResourceType{Token: ref.Token} t.resources[ref.Token] = typ } - return typ, nil + return typ, nil, nil default: - return nil, errors.Errorf("failed to parse ref %s", spec.Ref) + typ, diags := invalidType(errorf(path, "failed to parse ref %s", spec.Ref)) + return typ, diags, nil } } -func (t *types) bindType(spec TypeSpec) (Type, error) { +func (t *types) bindType(path string, spec TypeSpec, inputShape bool) (result Type, diags hcl.Diagnostics, err error) { + if inputShape && !spec.Plain { + defer func() { + result = t.newInputType(result) + }() + } + if spec.Ref != "" { - return t.bindTypeSpecRef(spec) + return t.bindTypeSpecRef(path, spec, inputShape) } if spec.OneOf != nil { if len(spec.OneOf) < 2 { - return nil, errors.New("oneOf should list at least two types") + diags = diags.Append(errorf(path+"/oneOf", "oneOf should list at least two types")) } var defaultType Type if spec.Type != "" { - dt, err := t.bindPrimitiveType(spec.Type) - if err != nil { - return nil, err - } + dt, primDiags := t.bindPrimitiveType(path+"/type", spec.Type) + diags = diags.Extend(primDiags) + defaultType = dt } elements := make([]Type, len(spec.OneOf)) for i, spec := range spec.OneOf { - e, err := t.bindType(spec) + e, typDiags, err := t.bindType(fmt.Sprintf("%s/oneOf/%v", path, i), spec, inputShape) + diags = diags.Extend(typDiags) + if err != nil { - return nil, err + return nil, diags, err } + elements[i] = e } var discriminator string var mapping map[string]string if spec.Discriminator != nil { + if spec.Discriminator.PropertyName == "" { + diags = diags.Append(errorf(path, "discriminator must provide a property name")) + } discriminator = spec.Discriminator.PropertyName mapping = spec.Discriminator.Mapping } - union := &UnionType{ - ElementTypes: elements, - DefaultType: defaultType, - Discriminator: discriminator, - Mapping: mapping, - } - if typ, ok := t.unions[union.String()]; ok { - return typ, nil - } - t.unions[union.String()] = union - return union, nil + return t.newUnionType(elements, defaultType, discriminator, mapping), diags, nil } // nolint: goconst switch spec.Type { case "boolean", "integer", "number", "string": - return t.bindPrimitiveType(spec.Type) + typ, typDiags := t.bindPrimitiveType(path+"/type", spec.Type) + diags = diags.Extend(typDiags) + + return typ, diags, nil case "array": if spec.Items == nil { - return nil, errors.Errorf("missing \"items\" property in type spec") + diags = diags.Append(errorf(path, "missing \"items\" property in array type spec")) + typ, _ := invalidType(diags...) + return typ, diags, nil } - elementType, err := t.bindType(*spec.Items) + elementType, elementDiags, err := t.bindType(path+"/items", *spec.Items, inputShape) + diags = diags.Extend(elementDiags) if err != nil { - return nil, err + return nil, diags, err } - typ, ok := t.arrays[elementType] - if !ok { - typ = &ArrayType{ElementType: elementType} - t.arrays[elementType] = typ - } - return typ, nil + return t.newArrayType(elementType), diags, nil case "object": - elementType := StringType + elementType, elementDiags, err := t.bindType(path, TypeSpec{Type: "string"}, inputShape) + contract.Assert(len(elementDiags) == 0) + contract.Assert(err == nil) + if spec.AdditionalProperties != nil { - et, err := t.bindType(*spec.AdditionalProperties) + et, elementDiags, err := t.bindType(path+"/additionalProperties", *spec.AdditionalProperties, inputShape) + diags = diags.Extend(elementDiags) if err != nil { - return nil, err + return nil, diags, err } + elementType = et } - typ, ok := t.maps[elementType] - if !ok { - typ = &MapType{ElementType: elementType} - t.maps[elementType] = typ - } - return typ, nil + return t.newMapType(elementType), diags, nil default: - return nil, errors.Errorf("unknown type kind %v", spec.Type) + diags = diags.Append(errorf(path+"/type", "unknown type kind %v", spec.Type)) + typ, _ := invalidType(diags...) + return typ, diags, nil + } +} + +func plainType(typ Type) Type { + for { + switch t := typ.(type) { + case *InputType: + typ = t.ElementType + case *OptionalType: + typ = t.ElementType + case *ObjectType: + if t.PlainShape == nil { + return t + } + typ = t.PlainShape + default: + return t + } } } -func bindConstValue(value interface{}, typ Type) (interface{}, error) { +func bindConstValue(path, kind string, value interface{}, typ Type) (interface{}, hcl.Diagnostics) { if value == nil { return nil, nil } - switch typ { + typeError := func(expectedType string) hcl.Diagnostics { + return hcl.Diagnostics{errorf(path, "invalid constant of type %T for %v %v", value, expectedType, kind)} + } + + switch typ = plainType(typ); typ { case BoolType: if _, ok := value.(bool); !ok { - return nil, errors.Errorf("invalid constant of type %T for boolean property", value) + return false, typeError("boolean") } case IntType: v, ok := value.(float64) if !ok { - return nil, errors.Errorf("invalid constant of type %T for integer property", value) + return 0, typeError("integer") } if math.Trunc(v) != v || v < math.MinInt32 || v > math.MaxInt32 { - return nil, errors.Errorf("invalid constant of type number for integer property") + return 0, typeError("integer") } value = int32(v) case NumberType: if _, ok := value.(float64); !ok { - return nil, errors.Errorf("invalid constant of type %T for number property", value) + return 0.0, typeError("number") } case StringType: if _, ok := value.(string); !ok { - return nil, errors.Errorf("invalid constant of type %T for string property", value) + return 0.0, typeError("string") } default: - return nil, errors.Errorf("constant values may only be provided for boolean, integer, number, and string properties") + if _, isInvalid := typ.(*InvalidType); isInvalid { + return nil, nil + } + return nil, hcl.Diagnostics{errorf(path, "type %v cannot have a constant value; only booleans, integers, "+ + "numbers and strings may have constant values", typ)} } return value, nil } -func bindDefaultValue(value interface{}, spec *DefaultSpec, typ Type) (*DefaultValue, error) { +func bindDefaultValue(path string, value interface{}, spec *DefaultSpec, typ Type) (*DefaultValue, hcl.Diagnostics) { if value == nil && spec == nil { return nil, nil } + var diags hcl.Diagnostics if value != nil { + typ = plainType(typ) switch typ := typ.(type) { case *UnionType: if typ.DefaultType != nil { - return bindDefaultValue(value, spec, typ.DefaultType) + return bindDefaultValue(path, value, spec, typ.DefaultType) } for _, elementType := range typ.ElementTypes { - v, err := bindDefaultValue(value, spec, elementType) - if err == nil { - return v, nil + v, diags := bindDefaultValue(path, value, spec, elementType) + if !diags.HasErrors() { + return v, diags } } case *EnumType: - return bindDefaultValue(value, spec, typ.ElementType) + return bindDefaultValue(path, value, spec, typ.ElementType) } - switch typ { - case BoolType: - if _, ok := value.(bool); !ok { - return nil, errors.Errorf("invalid default of type %T for boolean property", value) - } - case IntType: - v, ok := value.(float64) - if !ok { - return nil, errors.Errorf("invalid default of type %T for integer property", value) - } - if math.Trunc(v) != v || v < math.MinInt32 || v > math.MaxInt32 { - return nil, errors.Errorf("invalid default of type number for integer property") - } - value = int32(v) - case NumberType: - if _, ok := value.(float64); !ok { - return nil, errors.Errorf("invalid default of type %T for number property", value) - } - case StringType: - if _, ok := value.(string); !ok { - return nil, errors.Errorf("invalid default of type %T for string property", value) - } - default: - return nil, errors.Errorf("default values may only be provided for boolean, integer, number, and string properties") - } + v, valueDiags := bindConstValue(path, "default", value, typ) + diags = diags.Extend(valueDiags) + value = v } dv := &DefaultValue{Value: value} if spec != nil { language := make(map[string]interface{}) for name, raw := range spec.Language { - language[name] = raw + language[name] = json.RawMessage(raw) + } + if len(spec.Environment) == 0 { + diags = diags.Append(errorf(path, "Default must specify an environment")) } dv.Environment, dv.Language = spec.Environment, language } - return dv, nil + return dv, diags } // bindProperties binds the map of property specs and list of required properties into a sorted list of properties and // a lookup table. -func (t *types) bindProperties(properties map[string]PropertySpec, required []string, - plain []string) ([]*Property, map[string]*Property, error) { +func (t *types) bindProperties(path string, properties map[string]PropertySpec, requiredPath string, required []string, + inputShape bool) ([]*Property, map[string]*Property, hcl.Diagnostics, error) { + + var diags hcl.Diagnostics // Bind property types and constant or default values. propertyMap := map[string]*Property{} var result []*Property for name, spec := range properties { - typ, err := t.bindType(spec.TypeSpec) - if err != nil { - return nil, nil, errors.Wrapf(err, "error binding type for property %q", name) - } + propertyPath := path + "/" + name - cv, err := bindConstValue(spec.Const, typ) + typ, typDiags, err := t.bindType(propertyPath, spec.TypeSpec, inputShape) + diags = diags.Extend(typDiags) if err != nil { - return nil, nil, errors.Wrapf(err, "error binding constant value for property %q", name) + return nil, nil, diags, fmt.Errorf("error binding type for property %q: %w", name, err) } - dv, err := bindDefaultValue(spec.Default, spec.DefaultInfo, typ) - if err != nil { - return nil, nil, errors.Wrapf(err, "error binding default value for property %q", name) - } + cv, cvDiags := bindConstValue(propertyPath+"/const", "constant", spec.Const, typ) + diags = diags.Extend(cvDiags) + + dv, dvDiags := bindDefaultValue(propertyPath+"/default", spec.Default, spec.DefaultInfo, typ) + diags = diags.Extend(dvDiags) language := make(map[string]interface{}) for name, raw := range spec.Language { - language[name] = raw + language[name] = json.RawMessage(raw) } p := &Property{ Name: name, Comment: spec.Description, - Type: typ, + Type: t.newOptionalType(typ), ConstValue: cv, DefaultValue: dv, DeprecationMessage: spec.DeprecationMessage, Language: language, Secret: spec.Secret, + ReplaceOnChanges: spec.ReplaceOnChanges, } propertyMap[name], result = p, append(result, p) } // Compute required properties. - for _, name := range required { + for i, name := range required { p, ok := propertyMap[name] if !ok { - return nil, nil, errors.Errorf("unknown required property %q", name) + diags = diags.Append(errorf(fmt.Sprintf("%s/%v", requiredPath, i), "unknown required property %q", name)) + continue } - p.IsRequired = true - } - - // Compute plain properties. - for _, name := range plain { - p, ok := propertyMap[name] - if !ok { - return nil, nil, errors.Errorf("unknown plain property %q", name) + if typ, ok := p.Type.(*OptionalType); ok { + p.Type = typ.ElementType } - p.IsPlain = true } sort.Slice(result, func(i, j int) bool { return result[i].Name < result[j].Name }) - return result, propertyMap, nil + return result, propertyMap, diags, nil } -func (t *types) bindObjectTypeDetails(obj *ObjectType, token string, spec ObjectTypeSpec, - supportsPlainProperties bool) error { +func (t *types) bindObjectTypeDetails(path string, obj *ObjectType, token string, + spec ObjectTypeSpec) (hcl.Diagnostics, error) { - if !supportsPlainProperties && len(spec.Plain) > 0 { - return errors.New("plain cannot be specified") + var diags hcl.Diagnostics + + if len(spec.Plain) > 0 { + diags = diags.Append(errorf(path+"/plain", + "plain has been removed; the property type must be marked as plain instead")) } - properties, propertyMap, err := t.bindProperties(spec.Properties, spec.Required, spec.Plain) + properties, propertyMap, propertiesDiags, err := t.bindProperties(path+"/properties", spec.Properties, + path+"/required", spec.Required, false) + diags = diags.Extend(propertiesDiags) if err != nil { - return err + return diags, err + } + + inputProperties, inputPropertyMap, inputPropertiesDiags, err := t.bindProperties( + path+"/properties", spec.Properties, path+"/required", spec.Required, true) + diags = diags.Extend(inputPropertiesDiags) + if err != nil { + return diags, err } language := make(map[string]interface{}) for name, raw := range spec.Language { - language[name] = raw + language[name] = json.RawMessage(raw) } obj.Package = t.pkg @@ -1456,15 +2399,28 @@ func (t *types) bindObjectTypeDetails(obj *ObjectType, token string, spec Object obj.Language = language obj.Properties = properties obj.properties = propertyMap - return nil + obj.IsOverlay = spec.IsOverlay + + obj.InputShape.Package = t.pkg + obj.InputShape.Token = token + obj.InputShape.Comment = spec.Description + obj.InputShape.Language = language + obj.InputShape.Properties = inputProperties + obj.InputShape.properties = inputPropertyMap + + return diags, nil } -func (t *types) bindObjectType(token string, spec ObjectTypeSpec, supportsPlainProperties bool) (*ObjectType, error) { +func (t *types) bindObjectType(path, token string, spec ObjectTypeSpec) (*ObjectType, hcl.Diagnostics, error) { obj := &ObjectType{} - if err := t.bindObjectTypeDetails(obj, token, spec, supportsPlainProperties); err != nil { - return nil, err + obj.InputShape = &ObjectType{PlainShape: obj} + obj.IsOverlay = spec.IsOverlay + + diags, err := t.bindObjectTypeDetails(path, obj, token, spec) + if err != nil { + return nil, diags, err } - return obj, nil + return obj, diags, nil } func (t *types) bindResourceTypeDetails(obj *ResourceType, token string) error { @@ -1480,66 +2436,52 @@ func (t *types) bindResourceType(token string) (*ResourceType, error) { return r, nil } -func (t *types) bindEnumTypeDetails(enum *EnumType, token string, spec ComplexTypeSpec) error { - typ, err := t.bindType(TypeSpec{Type: spec.Type}) - if err != nil { - return err - } +func (t *types) bindEnumTypeDetails(enum *EnumType, token string, spec ComplexTypeSpec) hcl.Diagnostics { + var diags hcl.Diagnostics - values, err := t.bindEnumValues(spec.Enum, typ) - if err != nil { - return err + typ, typDiags := t.bindPrimitiveType(memberPath("types", token, "type"), spec.Type) + diags = diags.Extend(typDiags) + + switch typ { + case StringType, IntType, NumberType, BoolType: + // OK + default: + if _, isInvalid := typ.(*InvalidType); !isInvalid { + diags = diags.Append(errorf(memberPath("types", token, "type"), + "enums may only be of type string, integer, number or boolean")) + } } + values, valuesDiags := t.bindEnumValues(memberPath("types", token, "enum"), spec.Enum, typ) + diags = diags.Extend(valuesDiags) + + enum.Package = t.pkg enum.Token = token enum.Elements = values enum.ElementType = typ enum.Comment = spec.Description + enum.IsOverlay = spec.IsOverlay - return nil + return diags } -func (t *types) bindEnumValues(values []*EnumValueSpec, typ Type) ([]*Enum, error) { +func (t *types) bindEnumValues(path string, values []EnumValueSpec, typ Type) ([]*Enum, hcl.Diagnostics) { var enums []*Enum + var diags hcl.Diagnostics + + for i, spec := range values { + value, valueDiags := bindConstValue(fmt.Sprintf("%s/%v/value", path, i), "enum", spec.Value, typ) + diags = diags.Extend(valueDiags) - errorMessage := func(val interface{}, expectedType string) error { - return fmt.Errorf("cannot assign enum value of type '%T' to enum of type '%s'", val, expectedType) - } - for _, spec := range values { - switch typ { - case StringType: - if _, ok := spec.Value.(string); !ok { - return nil, errorMessage(spec.Value, typ.String()) - } - case IntType: - v, ok := spec.Value.(float64) - if !ok { - return nil, errorMessage(spec.Value, typ.String()) - } - if math.Trunc(v) != v || v < math.MinInt32 || v > math.MaxInt32 { - return nil, errors.Errorf("cannot assign enum value of type 'number' to enum of type 'integer'") - } - spec.Value = int32(v) - case NumberType: - if _, ok := spec.Value.(float64); !ok { - return nil, errorMessage(spec.Value, typ.String()) - } - case BoolType: - if _, ok := spec.Value.(bool); !ok { - return nil, errorMessage(spec.Value, typ.String()) - } - default: - return nil, fmt.Errorf("enum values may only be of string, integer, number or boolean types") - } enum := &Enum{ - Value: spec.Value, + Value: value, Comment: spec.Description, Name: spec.Name, DeprecationMessage: spec.DeprecationMessage, } enums = append(enums, enum) } - return enums, nil + return enums, diags } func (t *types) bindEnumType(token string, spec ComplexTypeSpec) (*EnumType, error) { @@ -1550,7 +2492,8 @@ func (t *types) bindEnumType(token string, spec ComplexTypeSpec) (*EnumType, err return enum, nil } -func bindTypes(pkg *Package, complexTypes map[string]ComplexTypeSpec, loader Loader) (*types, error) { +func bindTypes(pkg *Package, complexTypes map[string]ComplexTypeSpec, loader Loader) (*types, hcl.Diagnostics, error) { + var diags hcl.Diagnostics typs := &types{ pkg: pkg, @@ -1563,6 +2506,8 @@ func bindTypes(pkg *Package, complexTypes map[string]ComplexTypeSpec, loader Loa tokens: map[string]*TokenType{}, enums: map[string]*EnumType{}, named: map[string]Type{}, + inputs: map[Type]*InputType{}, + optionals: map[Type]*OptionalType{}, } // Declare object and enum types before processing properties. @@ -1573,6 +2518,7 @@ func bindTypes(pkg *Package, complexTypes map[string]ComplexTypeSpec, loader Loa // object type is its token. While this doesn't affect object types directly, it breaks the interning of types // that reference object types (e.g. arrays, maps, unions) typ := &ObjectType{Token: token} + typ.InputShape = &ObjectType{Token: token, PlainShape: typ, IsOverlay: spec.IsOverlay} typs.objects[token] = typ typs.named[token] = typ } else if len(spec.Enum) > 0 { @@ -1581,9 +2527,8 @@ func bindTypes(pkg *Package, complexTypes map[string]ComplexTypeSpec, loader Loa typs.named[token] = typ // Bind enums before object types because object type generation depends on enum values to be present. - if err := typs.bindEnumTypeDetails(typs.enums[token], token, spec); err != nil { - return nil, errors.Wrapf(err, "failed to bind type %s", token) - } + enumDiags := typs.bindEnumTypeDetails(typs.enums[token], token, spec) + diags = diags.Extend(enumDiags) } } @@ -1595,46 +2540,114 @@ func bindTypes(pkg *Package, complexTypes map[string]ComplexTypeSpec, loader Loa // Process object types. for token, spec := range complexTypes { if spec.Type == "object" { - if err := typs.bindObjectTypeDetails( - typs.objects[token], token, spec.ObjectTypeSpec, true /*supportsPlainProperties*/); err != nil { - return nil, errors.Wrapf(err, "failed to bind type %s", token) + path := memberPath("types", token) + objDiags, err := typs.bindObjectTypeDetails(path, typs.objects[token], token, spec.ObjectTypeSpec) + diags = diags.Extend(objDiags) + + if err != nil { + return nil, diags, fmt.Errorf("failed to bind type %s: %w", token, err) } } } - return typs, nil + return typs, diags, nil } -func bindConfig(spec ConfigSpec, types *types) ([]*Property, error) { - properties, _, err := types.bindProperties(spec.Variables, spec.Required, nil) - return properties, err +func bindMethods(path, resourceToken string, methods map[string]string, + functionTable map[string]*Function) ([]*Method, hcl.Diagnostics) { + + var diags hcl.Diagnostics + + names := make([]string, 0, len(methods)) + for name := range methods { + names = append(names, name) + } + sort.Strings(names) + + result := make([]*Method, 0, len(methods)) + for _, name := range names { + token := methods[name] + + methodPath := path + "/" + name + + function, ok := functionTable[token] + if !ok { + diags = diags.Append(errorf(methodPath, "unknown function %s", token)) + continue + } + if function.IsMethod { + diags = diags.Append(errorf(methodPath, "function %s is already a method", token)) + continue + } + idx := strings.LastIndex(function.Token, "/") + if idx == -1 || function.Token[:idx] != resourceToken { + diags = diags.Append(errorf(methodPath, "invalid function token format %s", token)) + continue + } + if function.Inputs == nil || function.Inputs.Properties == nil || len(function.Inputs.Properties) == 0 || + function.Inputs.Properties[0].Name != "__self__" { + diags = diags.Append(errorf(methodPath, "function %s has no __self__ parameter", token)) + continue + } + function.IsMethod = true + result = append(result, &Method{ + Name: name, + Function: function, + }) + } + return result, diags } -func bindResource(token string, spec ResourceSpec, types *types) (*Resource, error) { +func bindConfig(spec ConfigSpec, types *types) ([]*Property, hcl.Diagnostics, error) { + properties, _, diags, err := types.bindProperties("#/config/variables", spec.Variables, + "#/config/defaults", spec.Required, false) + return properties, diags, err +} + +func bindResource(path, token string, spec ResourceSpec, types *types, + functionTable map[string]*Function) (*Resource, hcl.Diagnostics, error) { + + var diags hcl.Diagnostics + if len(spec.Plain) > 0 { - return nil, errors.New("plain cannot be specified on resources") + diags = diags.Append(errorf(path+"/plain", "plain has been removed; property types must be marked as plain instead")) } - if len(spec.PlainInputs) > 0 && !spec.IsComponent { - return nil, errors.New("plainInputs can only be specified on component resources") + if len(spec.PlainInputs) > 0 { + diags = diags.Append(errorf(path+"/plainInputs", + "plainInputs has been removed; individual property types must be marked as plain instead")) } - properties, _, err := types.bindProperties(spec.Properties, spec.Required, nil) + properties, _, propertyDiags, err := types.bindProperties(path+"/properties", spec.Properties, + path+"/required", spec.Required, false) + diags = diags.Extend(propertyDiags) if err != nil { - return nil, errors.Wrap(err, "failed to bind properties") + return nil, diags, fmt.Errorf("failed to bind properties for %v: %w", token, err) } - inputProperties, _, err := types.bindProperties(spec.InputProperties, spec.RequiredInputs, spec.PlainInputs) + inputProperties, _, inputDiags, err := types.bindProperties(path+"/inputProperties", spec.InputProperties, + path+"/requiredInputs", spec.RequiredInputs, true) + diags = diags.Extend(inputDiags) if err != nil { - return nil, errors.Wrap(err, "failed to bind properties") + return nil, diags, fmt.Errorf("failed to bind input properties for %v: %w", token, err) + } + + methods, methodDiags := bindMethods(path+"/methods", token, spec.Methods, functionTable) + diags = diags.Extend(methodDiags) + + for _, method := range methods { + if _, ok := spec.Properties[method.Name]; ok { + diags = diags.Append(errorf(path+"/methods/"+method.Name, "%v already has a property named %s", token, method.Name)) + } } var stateInputs *ObjectType if spec.StateInputs != nil { - si, err := types.bindObjectType(token+"Args", *spec.StateInputs, false /*supportsPlainProperties*/) + si, stateDiags, err := types.bindObjectType(path+"/stateInputs", token+"Args", *spec.StateInputs) + diags = diags.Extend(stateDiags) if err != nil { - return nil, errors.Wrap(err, "error binding inputs") + return nil, diags, fmt.Errorf("error binding inputs for %v: %w", token, err) } - stateInputs = si + stateInputs = si.InputShape } var aliases []*Alias @@ -1644,7 +2657,7 @@ func bindResource(token string, spec ResourceSpec, types *types) (*Resource, err language := make(map[string]interface{}) for name, raw := range spec.Language { - language[name] = raw + language[name] = json.RawMessage(raw) } return &Resource{ @@ -1658,44 +2671,61 @@ func bindResource(token string, spec ResourceSpec, types *types) (*Resource, err DeprecationMessage: spec.DeprecationMessage, Language: language, IsComponent: spec.IsComponent, - }, nil + Methods: methods, + IsOverlay: spec.IsOverlay, + }, diags, nil } -func bindProvider(pkgName string, spec ResourceSpec, types *types) (*Resource, error) { - res, err := bindResource("pulumi:providers:"+pkgName, spec, types) +func bindProvider(pkgName string, spec ResourceSpec, types *types, + functionTable map[string]*Function) (*Resource, hcl.Diagnostics, error) { + + res, diags, err := bindResource("#/provider", "pulumi:providers:"+pkgName, spec, types, functionTable) if err != nil { - return nil, errors.Wrap(err, "error binding provider") + return nil, diags, fmt.Errorf("error binding provider: %w", err) } res.IsProvider = true // Since non-primitive provider configuration is currently JSON serialized, we can't handle it without // modifying the path by which it's looked up. As a temporary workaround to enable access to config which // values which are primitives, we'll simply remove any properties for the provider resource which are not - // here, before we generate the provider code. - var primitiveProperties []*Property + // strings, or types with an underlying type of string, before we generate the provider code. + var stringProperties []*Property for _, prop := range res.Properties { - if prop.Type != stringType { - continue + typ := plainType(prop.Type) + if tokenType, isTokenType := typ.(*TokenType); isTokenType { + if tokenType.UnderlyingType != stringType { + continue + } + } else { + if typ != stringType { + continue + } } - primitiveProperties = append(primitiveProperties, prop) + + stringProperties = append(stringProperties, prop) } - res.Properties = primitiveProperties + res.Properties = stringProperties types.resources[res.Token] = &ResourceType{ Token: res.Token, Resource: res, } - return res, nil + return res, diags, nil } -func bindResources(specs map[string]ResourceSpec, types *types) ([]*Resource, map[string]*Resource, error) { +func bindResources(specs map[string]ResourceSpec, types *types, + functionTable map[string]*Function) ([]*Resource, map[string]*Resource, hcl.Diagnostics, error) { + + var diags hcl.Diagnostics + resourceTable := map[string]*Resource{} var resources []*Resource for token, spec := range specs { - res, err := bindResource(token, spec, types) + res, resDiags, err := bindResource(memberPath("resources", token), token, spec, types, functionTable) + diags = diags.Extend(resDiags) if err != nil { - return nil, nil, errors.Wrapf(err, "error binding resource %v", token) + return nil, nil, diags, fmt.Errorf("error binding resource %v: %w", token, err) } resourceTable[token] = res @@ -1717,31 +2747,37 @@ func bindResources(specs map[string]ResourceSpec, types *types) ([]*Resource, ma return resources[i].Token < resources[j].Token }) - return resources, resourceTable, nil + return resources, resourceTable, diags, nil } -func bindFunction(token string, spec FunctionSpec, types *types) (*Function, error) { +func bindFunction(token string, spec FunctionSpec, types *types) (*Function, hcl.Diagnostics, error) { + var diags hcl.Diagnostics + + path := memberPath("functions", token) + var inputs *ObjectType if spec.Inputs != nil { - ins, err := types.bindObjectType(token+"Args", *spec.Inputs, false /*supportsPlainProperties*/) + ins, inDiags, err := types.bindObjectType(path+"/inputs", token+"Args", *spec.Inputs) + diags = diags.Extend(inDiags) if err != nil { - return nil, errors.Wrap(err, "error binding inputs") + return nil, diags, fmt.Errorf("error binding inputs for function %v: %w", token, err) } inputs = ins } var outputs *ObjectType if spec.Outputs != nil { - outs, err := types.bindObjectType(token+"Result", *spec.Outputs, false /*supportsPlainProperties*/) + outs, outDiags, err := types.bindObjectType(path+"/outputs", token+"Result", *spec.Outputs) + diags = diags.Extend(outDiags) if err != nil { - return nil, errors.Wrap(err, "error binding inputs") + return nil, diags, fmt.Errorf("error binding outputs for function %v: %w", token, err) } outputs = outs } language := make(map[string]interface{}) for name, raw := range spec.Language { - language[name] = raw + language[name] = json.RawMessage(raw) } return &Function{ @@ -1752,16 +2788,22 @@ func bindFunction(token string, spec FunctionSpec, types *types) (*Function, err Outputs: outputs, DeprecationMessage: spec.DeprecationMessage, Language: language, - }, nil + IsOverlay: spec.IsOverlay, + }, diags, nil } -func bindFunctions(specs map[string]FunctionSpec, types *types) ([]*Function, map[string]*Function, error) { +func bindFunctions(specs map[string]FunctionSpec, + types *types) ([]*Function, map[string]*Function, hcl.Diagnostics, error) { + + var diags hcl.Diagnostics + functionTable := map[string]*Function{} var functions []*Function for token, spec := range specs { - f, err := bindFunction(token, spec, types) + f, fdiags, err := bindFunction(token, spec, types) + diags = diags.Extend(fdiags) if err != nil { - return nil, nil, errors.Wrapf(err, "error binding function %v", token) + return nil, nil, diags, fmt.Errorf("error binding function %v: %w", token, err) } functionTable[token] = f functions = append(functions, f) @@ -1771,5 +2813,42 @@ func bindFunctions(specs map[string]FunctionSpec, types *types) ([]*Function, ma return functions[i].Token < functions[j].Token }) - return functions, functionTable, nil + return functions, functionTable, diags, nil +} + +func jsonMarshal(v interface{}) ([]byte, error) { + var b bytes.Buffer + enc := json.NewEncoder(&b) + enc.SetEscapeHTML(false) + enc.SetIndent("", " ") + if err := enc.Encode(v); err != nil { + return nil, err + } + return b.Bytes(), nil +} + +// Determines if codegen should emit a ${fn}Output version that +// automatically accepts Inputs and returns Outputs. +func (fun *Function) NeedsOutputVersion() bool { + + // Skip functions that return no value. Arguably we could + // support them and return `Task`, but there are no such + // functions in `pulumi-azure-native` or `pulumi-aws` so we + // omit to simplify. + if fun.Outputs == nil { + return false + } + + // Skip functions that have no inputs. The user can simply + // lift the `Task` to `Output` manually. + if fun.Inputs == nil { + return false + } + + // No properties is kind of like no inputs. + if len(fun.Inputs.Properties) == 0 { + return false + } + + return true } diff --git a/vendor/github.com/pulumi/pulumi/pkg/v3/codegen/utilities.go b/vendor/github.com/pulumi/pulumi/pkg/v3/codegen/utilities.go index 1001388..4ddb793 100644 --- a/vendor/github.com/pulumi/pulumi/pkg/v3/codegen/utilities.go +++ b/vendor/github.com/pulumi/pulumi/pkg/v3/codegen/utilities.go @@ -51,6 +51,11 @@ func (ss StringSet) Has(s string) bool { return ok } +// StringSet.Except returns the string set setminus s. +func (ss StringSet) Except(s string) StringSet { + return ss.Subtract(NewStringSet(s)) +} + func (ss StringSet) SortedValues() []string { values := make([]string, 0, len(ss)) for v := range ss { @@ -60,12 +65,38 @@ func (ss StringSet) SortedValues() []string { return values } +// Contains returns true if all elements of the subset are also present in the current set. It also returns true +// if subset is empty. +func (ss StringSet) Contains(subset StringSet) bool { + for v := range subset { + if !ss.Has(v) { + return false + } + } + return true +} + +// Subtract returns a new string set with all elements of the current set that are not present in the other set. +func (ss StringSet) Subtract(other StringSet) StringSet { + result := NewStringSet() + for v := range ss { + if !other.Has(v) { + result.Add(v) + } + } + return result +} + type Set map[interface{}]struct{} func (s Set) Add(v interface{}) { s[v] = struct{}{} } +func (s Set) Delete(v interface{}) { + delete(s, v) +} + func (s Set) Has(v interface{}) bool { _, ok := s[v] return ok diff --git a/vendor/github.com/pulumi/pulumi/pkg/v3/codegen/utilities_types.go b/vendor/github.com/pulumi/pulumi/pkg/v3/codegen/utilities_types.go index 2f047b0..7cc3aea 100644 --- a/vendor/github.com/pulumi/pulumi/pkg/v3/codegen/utilities_types.go +++ b/vendor/github.com/pulumi/pulumi/pkg/v3/codegen/utilities_types.go @@ -1,15 +1,10 @@ package codegen -import "github.com/pulumi/pulumi/pkg/v3/codegen/schema" +import ( + "github.com/pulumi/pulumi/pkg/v3/codegen/schema" +) -type Type struct { - schema.Type - - Plain bool - Optional bool -} - -func visitTypeClosure(t Type, visitor func(t Type), seen Set) { +func visitTypeClosure(t schema.Type, visitor func(t schema.Type), seen Set) { if seen.Has(t) { return } @@ -17,30 +12,176 @@ func visitTypeClosure(t Type, visitor func(t Type), seen Set) { visitor(t) - switch st := t.Type.(type) { + switch st := t.(type) { case *schema.ArrayType: - visitTypeClosure(Type{st.ElementType, t.Plain, t.Optional}, visitor, seen) + visitTypeClosure(st.ElementType, visitor, seen) case *schema.MapType: - visitTypeClosure(Type{st.ElementType, t.Plain, t.Optional}, visitor, seen) + visitTypeClosure(st.ElementType, visitor, seen) case *schema.ObjectType: - visitPropertyTypeClosure(t, st.Properties, visitor, seen) + for _, p := range st.Properties { + visitTypeClosure(p.Type, visitor, seen) + } case *schema.UnionType: for _, e := range st.ElementTypes { - visitTypeClosure(Type{e, t.Plain, t.Optional}, visitor, seen) + visitTypeClosure(e, visitor, seen) } + case *schema.InputType: + visitTypeClosure(st.ElementType, visitor, seen) + case *schema.OptionalType: + visitTypeClosure(st.ElementType, visitor, seen) } } -func visitPropertyTypeClosure(root Type, properties []*schema.Property, visitor func(t Type), seen Set) { +func VisitTypeClosure(properties []*schema.Property, visitor func(t schema.Type)) { + seen := Set{} for _, p := range properties { - visitTypeClosure(Type{ - Type: p.Type, - Plain: root.Plain || p.IsPlain, - Optional: !p.IsRequired, - }, visitor, seen) + visitTypeClosure(p.Type, visitor, seen) + } +} + +func SimplifyInputUnion(t schema.Type) schema.Type { + union, ok := t.(*schema.UnionType) + if !ok { + return t + } + + elements := make([]schema.Type, len(union.ElementTypes)) + for i, et := range union.ElementTypes { + if input, ok := et.(*schema.InputType); ok { + elements[i] = input.ElementType + } else { + elements[i] = et + } + } + return &schema.UnionType{ + ElementTypes: elements, + DefaultType: union.DefaultType, + Discriminator: union.Discriminator, + Mapping: union.Mapping, + } +} + +// RequiredType unwraps the OptionalType enclosing the Property's type, if any. +func RequiredType(p *schema.Property) schema.Type { + if optional, ok := p.Type.(*schema.OptionalType); ok { + return optional.ElementType + } + return p.Type +} + +// OptionalType wraps the Property's type in an OptionalType if it is not already optional. +func OptionalType(p *schema.Property) schema.Type { + if optional, ok := p.Type.(*schema.OptionalType); ok { + return optional + } + return &schema.OptionalType{ElementType: p.Type} +} + +// UnwrapType removes any outer OptionalTypes and InputTypes from t. +func UnwrapType(t schema.Type) schema.Type { + for { + switch typ := t.(type) { + case *schema.InputType: + t = typ.ElementType + case *schema.OptionalType: + t = typ.ElementType + default: + return t + } + } +} + +func IsNOptionalInput(t schema.Type) bool { + for { + switch typ := t.(type) { + case *schema.InputType: + return true + case *schema.OptionalType: + t = typ.ElementType + default: + return false + } } } -func VisitTypeClosure(properties []*schema.Property, visitor func(t Type)) { - visitPropertyTypeClosure(Type{}, properties, visitor, Set{}) +func resolvedType(t schema.Type, plainObjects bool) schema.Type { + switch typ := t.(type) { + case *schema.InputType: + return resolvedType(typ.ElementType, plainObjects) + case *schema.OptionalType: + e := resolvedType(typ.ElementType, plainObjects) + if e == typ.ElementType { + return typ + } + return &schema.OptionalType{ElementType: e} + case *schema.ArrayType: + e := resolvedType(typ.ElementType, plainObjects) + if e == typ.ElementType { + return typ + } + return &schema.ArrayType{ElementType: e} + case *schema.MapType: + e := resolvedType(typ.ElementType, plainObjects) + if e == typ.ElementType { + return typ + } + return &schema.MapType{ElementType: e} + case *schema.ObjectType: + if !plainObjects || !typ.IsInputShape() { + return typ + } + return typ.PlainShape + case *schema.UnionType: + elems, changed := make([]schema.Type, len(typ.ElementTypes)), false + for i, e := range typ.ElementTypes { + elems[i] = resolvedType(e, plainObjects) + changed = changed || elems[i] != e + } + if !changed { + return typ + } + return &schema.UnionType{ + ElementTypes: elems, + DefaultType: typ.DefaultType, + Discriminator: typ.Discriminator, + Mapping: typ.Mapping, + } + default: + return t + } +} + +// PlainType deeply removes any InputTypes from t, with the exception of argument structs. Use ResolvedType to +// unwrap argument structs as well. +func PlainType(t schema.Type) schema.Type { + return resolvedType(t, false) +} + +// ResolvedType deeply removes any InputTypes from t. +func ResolvedType(t schema.Type) schema.Type { + return resolvedType(t, true) +} + +// If a helper function needs to be invoked to provide default values for a +// plain type. The provided map cannot be reused. +func IsProvideDefaultsFuncRequired(t schema.Type) bool { + return isProvideDefaultsFuncRequiredHelper(t, map[string]bool{}) +} + +func isProvideDefaultsFuncRequiredHelper(t schema.Type, seen map[string]bool) bool { + if seen[t.String()] { + return false + } + seen[t.String()] = true + t = UnwrapType(t) + object, ok := t.(*schema.ObjectType) + if !ok { + return false + } + for _, p := range object.Properties { + if p.DefaultValue != nil || isProvideDefaultsFuncRequiredHelper(p.Type, seen) { + return true + } + } + return false } diff --git a/vendor/github.com/pulumi/pulumi/sdk/v3/go/common/apitype/core.go b/vendor/github.com/pulumi/pulumi/sdk/v3/go/common/apitype/core.go index 56551a4..1c7f249 100644 --- a/vendor/github.com/pulumi/pulumi/sdk/v3/go/common/apitype/core.go +++ b/vendor/github.com/pulumi/pulumi/sdk/v3/go/common/apitype/core.go @@ -29,6 +29,7 @@ package apitype import ( + _ "embed" // for embedded schemas "encoding/json" "time" @@ -38,6 +39,40 @@ import ( "github.com/pulumi/pulumi/sdk/v3/go/common/workspace" ) +//go:embed deployments.json +var deploymentSchema string + +// DeploymentSchemaID is the $id for the deployment schema. +const DeploymentSchemaID = "https://github.com/pulumi/pulumi/blob/master/sdk/go/common/apitype/deployments.json" + +// DeploymentSchema returns a JSON schema that can be used to validate serialized deployments (i.e. `UntypedDeployment` +// objects). +func DeploymentSchema() string { + return deploymentSchema +} + +//go:embed resources.json +var resourceSchema string + +// ResourceSchemaID is the $id for the deployment schema. +const ResourceSchemaID = "https://github.com/pulumi/pulumi/blob/master/sdk/go/common/apitype/resources.json" + +// ResourceSchema returns a JSON schema that can be used to validate serialized resource values (e.g. `ResourceV3`). +func ResourceSchema() string { + return resourceSchema +} + +//go:embed property-values.json +var propertyValueSchema string + +// PropertyValueSchemaID is the $id for the property value schema. +const PropertyValueSchemaID = "https://github.com/pulumi/pulumi/blob/master/sdk/go/common/apitype/property-values.json" + +// PropertyValueSchema returns a JSON schema that can be used to validate serialized property values. +func PropertyValueSchema() string { + return propertyValueSchema +} + const ( // DeploymentSchemaVersionCurrent is the current version of the `Deployment` schema. // Any deployments newer than this version will be rejected. @@ -283,12 +318,14 @@ type ResourceV3 struct { PendingReplacement bool `json:"pendingReplacement,omitempty" yaml:"pendingReplacement,omitempty"` // AdditionalSecretOutputs is a list of outputs that were explicitly marked as secret when the resource was created. AdditionalSecretOutputs []resource.PropertyKey `json:"additionalSecretOutputs,omitempty" yaml:"additionalSecretOutputs,omitempty"` - // Aliases is a list of previous URNs that this resource may have had in previous deployments + // Aliases is a list of previous URNs that this resource may have had in previous deployments. Aliases []resource.URN `json:"aliases,omitempty" yaml:"aliases,omitempty"` - // CustomTimeouts is a configuration block that can be used to control timeouts of CRUD operations + // CustomTimeouts is a configuration block that can be used to control timeouts of CRUD operations. CustomTimeouts *resource.CustomTimeouts `json:"customTimeouts,omitempty" yaml:"customTimeouts,omitempty"` // ImportID is the import input used for imported resources. ImportID resource.ID `json:"importID,omitempty" yaml:"importID,omitempty"` + // An auto-incrementing sequence number for each time this resource gets created/replaced (0 means sequence numbers are unknown, -1 means the last replace didn't use a sequence number). + SequenceNumber int `json:"sequenceNumber,omitempty" yaml:"sequenceNumber,omitempty"` } // ManifestV1 captures meta-information about this checkpoint file, such as versions of binaries, etc. diff --git a/vendor/github.com/pulumi/pulumi/sdk/v3/go/common/apitype/deployments.json b/vendor/github.com/pulumi/pulumi/sdk/v3/go/common/apitype/deployments.json new file mode 100644 index 0000000..8c9caf0 --- /dev/null +++ b/vendor/github.com/pulumi/pulumi/sdk/v3/go/common/apitype/deployments.json @@ -0,0 +1,173 @@ +{ + "$schema": "https://json-schema.org/draft/2020-12/schema", + "$id": "https://github.com/pulumi/pulumi/blob/master/sdk/go/common/apitype/deployments.json", + "title": "Pulumi Deployment States", + "description": "A schema for Pulumi deployment states.", + "type": "object", + "properties": { + "version": { + "description": "The deployment version.", + "type": "integer" + }, + "deployment": { + "description": "The deployment object.", + "type": "object" + } + }, + "required": ["version", "deployment"], + "additionalProperties": false, + "oneOf": [ + { "$ref": "#/$defs/deploymentV3" }, + { + "title": "Unknown Version", + "description": "Catchall for unknown deployment versions.", + "type": "object", + "properties": { + "version": { + "description": "The deployment version.", + "not": { + "enum": [ 3 ] + } + }, + "deployment": { + "description": "The deployment object.", + "type": "object" + } + } + } + ], + "$defs": { + "deploymentV3": { + "$anchor": "v3", + "title": "Version 3", + "description": "The third version of the deployment state.", + "type": "object", + "properties": { + "version": { + "description": "The deployment version. Must be `3`.", + "const": 3 + }, + "deployment": { + "description": "The deployment state.", + "type": "object", + "properties": { + "manifest": { + "description": "Metadata about the deployment.", + "$ref": "#/$defs/manifestV1" + }, + "secrets_providers": { + "description": "Configuration for this stack's secrets provider.", + "$ref": "#/$defs/secretsProviderV1" + }, + "resources": { + "description": "All resources that are part of the stack.", + "type": "array", + "items": { + "$ref": "https://github.com/pulumi/pulumi/blob/master/sdk/go/common/apitype/resources.json#v3" + } + }, + "pending_operations": { + "description": "Any operations that were pending at the time the deployment finished.", + "type": "array", + "items": { + "$ref": "#/$defs/operationV2" + } + } + }, + "required": ["manifest"], + "additionalProperties": false + } + }, + "required": ["version", "deployment"], + "additionalProperties": false + }, + "manifestV1": { + "title": "Deployment Manifest", + "description": "Captures meta-information about a deployment, such as versions of binaries, etc.", + "type": "object", + "properties": { + "time": { + "description": "The deployment's start time.", + "type": "string", + "format": "date-time" + }, + "magic": { + "description": "A magic number used to validate the manifest's integrity.", + "type": "string" + }, + "version": { + "description": "The version of the Pulumi engine that produced the deployment.", + "type": "string" + }, + "plugins": { + "description": "Information about the plugins used by the deployment.", + "type": "array", + "items": { + "title": "Plugin Info", + "description": "Information about a plugin.", + "type": "object", + "properties": { + "name": { + "description": "The plugin's name.", + "type": "string" + }, + "path": { + "description": "The path of the plugin's binary.", + "type": "string" + }, + "type": { + "description": "The plugin's type.", + "enum": [ + "analyzer", + "language", + "resource" + ] + }, + "version": { + "description": "The plugin's version.", + "type": "string" + } + }, + "required": ["name", "path", "type", "version"], + "additionalProperties": false + } + } + }, + "required": ["time", "magic", "version"], + "additionalProperties": false + }, + "secretsProviderV1": { + "title": "Secrets Provider", + "description": "Configuration information for a secrets provider.", + "type": "object", + "properties": { + "type": { + "description": "The secrets provider's type.", + "type": "string" + }, + "state": { + "description": "The secrets provider's state, if any." + } + }, + "required": ["type"], + "additionalProperties": false + }, + "operationV2": { + "title": "Resource Operation V2", + "description": "Version 2 of a resource operation state", + "type": "object", + "properties": { + "resource": { + "description": "The state of the affected resource as of the start of this operation.", + "$ref": "https://github.com/pulumi/pulumi/blob/master/sdk/go/common/apitype/resources.json#v3" + }, + "type": { + "description": "A string representation of the operation.", + "enum": ["creating", "updating", "deleting", "reading"] + } + }, + "required": ["resource", "type"], + "additionalProperties": false + } + } +} diff --git a/vendor/github.com/pulumi/pulumi/sdk/v3/go/common/apitype/plan.go b/vendor/github.com/pulumi/pulumi/sdk/v3/go/common/apitype/plan.go new file mode 100644 index 0000000..9c37cb5 --- /dev/null +++ b/vendor/github.com/pulumi/pulumi/sdk/v3/go/common/apitype/plan.go @@ -0,0 +1,88 @@ +package apitype + +import ( + "encoding/json" + + "github.com/pulumi/pulumi/sdk/v3/go/common/resource" + "github.com/pulumi/pulumi/sdk/v3/go/common/resource/config" + "github.com/pulumi/pulumi/sdk/v3/go/common/tokens" +) + +// PlanDiffV1 is the serializable version of a plan diff. +type PlanDiffV1 struct { + // the resource properties that will be added. + Adds map[string]interface{} `json:"adds,omitempty"` + // the resource properties that will be deleted. + Deletes []string `json:"deletes,omitempty"` + // the resource properties that will be updated. + Updates map[string]interface{} `json:"updates,omitempty"` +} + +// GoalV1 is the serializable version of a resource goal state. +type GoalV1 struct { + // the type of resource. + Type tokens.Type `json:"type"` + // the name for the resource's URN. + Name tokens.QName `json:"name"` + // true if this resource is custom, managed by a plugin. + Custom bool `json:"custom"` + // the resource's checked input properties that we saw during preview. + // TODO(pdg-plan): Temporary for preview release, should be removed for GA + CheckedInputs map[string]interface{} `json:"checkedInputs,omitempty"` + // the resource properties that will be changed. + InputDiff PlanDiffV1 `json:"inputDiff,omitempty"` + // the resource outputs that will be changed. + OutputDiff PlanDiffV1 `json:"outputDiff,omitempty"` + // an optional parent URN for this resource. + Parent resource.URN `json:"parent,omitempty"` + // true to protect this resource from deletion. + Protect bool `json:"protect"` + // dependencies of this resource object. + Dependencies []resource.URN `json:"dependencies,omitempty"` + // the provider to use for this resource. + Provider string `json:"provider,omitempty"` + // the set of dependencies that affect each property. + PropertyDependencies map[resource.PropertyKey][]resource.URN `json:"propertyDependencies,omitempty"` + // true if this resource should be deleted prior to replacement. + DeleteBeforeReplace *bool `json:"deleteBeforeReplace,omitempty"` + // a list of property names to ignore during changes. + IgnoreChanges []string `json:"ignoreChanges,omitempty"` + // outputs that should always be treated as secrets. + AdditionalSecretOutputs []resource.PropertyKey `json:"additionalSecretOutputs,omitempty"` + // additional URNs that should be aliased to this resource. + Aliases []resource.URN `json:"aliases,omitempty"` + // the expected ID of the resource, if any. + ID resource.ID `json:"id,omitempty"` + // an optional config object for resource options + CustomTimeouts resource.CustomTimeouts `json:"customTimeouts,omitempty"` +} + +// ResourcePlanV1 is the serializable version of a resource plan. +type ResourcePlanV1 struct { + // The goal state for the resource. + Goal *GoalV1 `json:"goal,omitempty"` + // The steps to be performed on the resource. + Steps []OpType `json:"steps,omitempty"` + // The proposed outputs for the resource, if any. Purely advisory. + Outputs map[string]interface{} `json:"state"` +} + +// VersionedDeploymentPlan is a version number plus a JSON document. The version number describes what +// version of the DeploymentPlan structure the DeploymentPlan member's JSON document can decode into. +type VersionedDeploymentPlan struct { + Version int `json:"version"` + Plan json.RawMessage `json:"plan"` +} + +// DeploymentPlanV1 is the serializable version of a deployment plan. +type DeploymentPlanV1 struct { + // TODO(pdg-plan): should there be a message here? + + // Manifest contains metadata about this plan. + Manifest ManifestV1 `json:"manifest" yaml:"manifest"` + // The configuration in use during the plan. + Config config.Map `json:"config,omitempty"` + + // The set of resource plans. + ResourcePlans map[resource.URN]ResourcePlanV1 `json:"resourcePlans,omitempty"` +} diff --git a/vendor/github.com/pulumi/pulumi/sdk/v3/go/common/apitype/property-values.json b/vendor/github.com/pulumi/pulumi/sdk/v3/go/common/apitype/property-values.json new file mode 100644 index 0000000..17b9c0d --- /dev/null +++ b/vendor/github.com/pulumi/pulumi/sdk/v3/go/common/apitype/property-values.json @@ -0,0 +1,231 @@ +{ + "$schema": "https://json-schema.org/draft/2020-12/schema", + "$id": "https://github.com/pulumi/pulumi/blob/master/sdk/go/common/apitype/property-values.json", + "$anchor": "property_value", + "title": "Pulumi Property Value", + "description": "A schema for Pulumi Property values.", + "oneOf": [ + { + "title": "Primitive property values", + "type": ["null", "boolean", "number", "string"], + "not": { "$ref": "#unknown_value" } + }, + { + "$anchor": "unknown_value", + "title": "Unknown property values", + "const": "04da6b54-80e4-46f7-96ec-b56ff0331ba9" + }, + { + "title": "Array property values", + "type": "array", + "items": { + "$ref": "#property_value" + } + }, + { + "title": "Object property values", + "type": "object", + "additionalProperties": { + "$ref": "#property_value" + }, + "$comment": "The properties map below prevents the object schema from matching special objects", + "properties": { + "4dabf18193072939515e22adb298388d": false + } + }, + { + "$anchor": "asset_value", + "title": "Asset property values", + "type": "object", + "properties": { + "4dabf18193072939515e22adb298388d": { + "description": "Asset signature", + "const": "c44067f5952c0a294b673a41bacd8c17" + }, + "hash": { + "description": "The SHA256 hash of the asset's contents.", + "type": "string" + } + }, + "required": ["4dabf18193072939515e22adb298388d"], + "oneOf": [ + { + "title": "Hash-only Asset", + "properties": { + "text": false, + "path": false, + "uri": false + }, + "required": ["hash"] + }, + { + "title": "Literal Asset", + "properties": { + "text": { + "description": "The literal contents of the asset.", + "type": "string" + }, + "path": false, + "uri": false + }, + "required": ["text"] + }, + { + "title": "Local File Asset", + "properties": { + "path": { + "description": "The path to a local file that contains the asset's contents.", + "type": "string" + }, + "text": false, + "uri": false + }, + "required": ["path"] + }, + { + "title": "URI File Asset", + "properties": { + "uri": { + "description": "The URI of a file that contains the asset's contents.", + "type": "string", + "format": "uri" + }, + "text": false, + "path": false + }, + "required": ["uri"] + } + ] + }, + { + "$anchor": "archive_value", + "title": "Archive property values", + "type": "object", + "properties": { + "4dabf18193072939515e22adb298388d": { + "description": "Archive signature", + "const": "0def7320c3a5731c473e5ecbe6d01bc7" + }, + "hash": { + "description": "The SHA256 hash of the archive's contents.", + "type": "string" + } + }, + "required": ["4dabf18193072939515e22adb298388d"], + "oneOf": [ + { + "title": "Hash-only Archive", + "properties": { + "assets": false, + "path": false, + "uri": false + }, + "required": ["hash"] + }, + { + "title": "Literal Archive", + "properties": { + "assets": { + "description": "The literal contents of the archive.", + "type": "object", + "additionalProperties": { + "oneOf": [ + { "$ref": "#asset_value" }, + { "$ref": "#archive_value" } + ] + } + }, + "path": false, + "uri": false + }, + "required": ["assets"] + }, + { + "title": "Local File Archive", + "properties": { + "path": { + "description": "The path to a local file that contains the archive's contents.", + "type": "string" + }, + "assets": false, + "uri": false + }, + "required": ["path"] + }, + { + "title": "URI File Archive", + "properties": { + "uri": { + "description": "The URI of a file that contains the archive's contents.", + "type": "string", + "format": "uri" + }, + "assets": false, + "path": false + }, + "required": ["uri"] + } + ] + }, + { + "title": "Secret Property Values", + "type": "object", + "properties": { + "4dabf18193072939515e22adb298388d": { + "description": "Secret signature", + "const": "1b47061264138c4ac30d75fd1eb44270" + } + }, + "required": ["4dabf18193072939515e22adb298388d"], + "oneOf": [ + { + "title": "Encrypted Secret", + "type": "object", + "properties": { + "ciphertext": { + "description": "The encrypted, JSON-serialized property value", + "type": "string" + }, + "plaintext": false + }, + "required": ["ciphertext"] + }, + { + "title": "Decrypted Secret", + "type": "object", + "properties": { + "plaintext": { + "description": "The decrypted, JSON-serialized property value", + "type": "string" + }, + "ciphertext": false + }, + "required": ["plaintext"] + } + ] + }, + { + "title": "Resource reference property values", + "type": "object", + "properties": { + "4dabf18193072939515e22adb298388d": { + "description": "Resource reference signature", + "const": "5cf8f73096256a8f31e491e813e4eb8e" + }, + "packageVersion": { + "description": "The package version of the referenced resource.", + "type": "string" + }, + "urn": { + "description": "The URN of the referenced resource.", + "type": "string" + }, + "id": { + "description": "The ID of the referenced resource.", + "type": "string" + } + }, + "required": ["4dabf18193072939515e22adb298388d", "urn"] + } + ] +} diff --git a/vendor/github.com/pulumi/pulumi/sdk/v3/go/common/apitype/resources.json b/vendor/github.com/pulumi/pulumi/sdk/v3/go/common/apitype/resources.json new file mode 100644 index 0000000..a71ff96 --- /dev/null +++ b/vendor/github.com/pulumi/pulumi/sdk/v3/go/common/apitype/resources.json @@ -0,0 +1,127 @@ +{ + "$schema": "https://json-schema.org/draft/2020-12/schema", + "$id": "https://github.com/pulumi/pulumi/blob/master/sdk/go/common/apitype/resources.json", + "title": "Pulumi Resource State", + "description": "Schemas for Pulumi resource states.", + "oneOf": [ + { "$ref": "#v3" } + ], + "$defs": { + "urn": { + "$comment": "It would be wonderful to include a regex here for validation, but we generate _many_ invalid URNs in tests for readability + expediency", + "title": "Unique Resource Name (URN)", + "description": "The unique name for a resource in a Pulumi stack.", + "type": "string" + }, + "resourceV3": { + "$anchor": "v3", + "title": "Resource V3", + "description": "Version 3 of a Pulumi resource state.", + "type": "object", + "properties": { + "urn": { + "description": "The resource's unique name.", + "$ref": "#/$defs/urn" + }, + "custom": { + "description": "True when the resource is managed by a plugin.", + "type": "boolean" + }, + "delete": { + "description": "True when the resource should be deleted during the next update.", + "type": "boolean" + }, + "id": { + "description": "The provider-assigned resource ID, if any, for custom resources.", + "type": "string" + }, + "type": { + "description": "The resource's full type token.", + "type": "string" + }, + "inputs": { + "description": "The input properties supplied to the provider.", + "type": "object", + "additionalProperties": { + "$ref": "https://github.com/pulumi/pulumi/blob/master/sdk/go/common/apitype/property-values.json" + } + }, + "outputs": { + "description": "The output properties returned by the provider after provisioning.", + "type": "object", + "additionalProperties": { + "$ref": "https://github.com/pulumi/pulumi/blob/master/sdk/go/common/apitype/property-values.json" + } + }, + "parent": { + "description": "An optional parent URN if this resource is a child of it.", + "$ref": "#/$defs/urn" + }, + "protect": { + "description": "True when this resource is \"protected\" and may not be deleted.", + "type": "boolean" + }, + "external": { + "description": "True when the lifecycle of this resource is not managed by Pulumi.", + "type": "boolean" + }, + "dependencies": { + "description": "The dependency edges to other resources that this depends on.", + "type": "array", + "items": { + "$ref": "#/$defs/urn" + } + }, + "initErrors": { + "description": "The set of errors encountered in the process of initializing resource (i.e. during create or update).", + "type": "array", + "items": { + "type": "string" + } + }, + "provider": { + "description": "A reference to the provider that is associated with this resource.", + "type": "string" + }, + "propertyDependencies": { + "description": "A map from each input property name to the set of resources that property depends on.", + "type": "object", + "additionalProperties": { + "type": "array", + "items": { + "$ref": "#/$defs/urn" + } + } + }, + "pendingReplacement": { + "description": "Tracks delete-before-replace resources that have been deleted but not yet recreated.", + "type": "boolean" + }, + "additionalSecretOutputs": { + "description": "A list of outputs that were explicitly marked as secret when the resource was created.", + "type": "array", + "items": { + "type": "string" + } + }, + "aliases": { + "description": "A list of previous URNs that this resource may have had in previous deployments", + "type": "array", + "items": { + "$ref": "#/$defs/urn" + } + }, + "customTimeouts": { + "description": "A configuration block that can be used to control timeouts of CRUD operations", + "type": "object" + }, + "importID": { + "description": "The import input used for imported resources.", + "type": "string" + } + }, + "additionalProperties": false, + "required": ["urn"] + } + } +} diff --git a/vendor/github.com/pulumi/pulumi/sdk/v3/go/common/apitype/stacks.go b/vendor/github.com/pulumi/pulumi/sdk/v3/go/common/apitype/stacks.go index 9c2b7d2..0e0df32 100644 --- a/vendor/github.com/pulumi/pulumi/sdk/v3/go/common/apitype/stacks.go +++ b/vendor/github.com/pulumi/pulumi/sdk/v3/go/common/apitype/stacks.go @@ -33,6 +33,12 @@ type StackSummary struct { // ListStacksResponse returns a set of stack summaries. This call is designed to be inexpensive. type ListStacksResponse struct { Stacks []StackSummary `json:"stacks"` + + // ContinuationToken is an opaque value used to mark the end of the all stacks. If non-nil, + // pass it into a subsequent call in order to get the next batch of results. + // + // A value of nil means that all stacks have been returned. + ContinuationToken *string `json:"continuationToken,omitempty"` } // CreateStackRequest defines the request body for creating a new Stack @@ -71,6 +77,23 @@ type DecryptValueResponse struct { Plaintext []byte `json:"plaintext"` } +// Log3rdPartyDecryptionEvent defines the request body for logging a 3rd party secrets provider decryption event. +type Log3rdPartyDecryptionEvent struct { + SecretName string `json:"secretName,omitempty"` + CommandName string `json:"commandName,omitempty"` +} + +// BulkDecryptValueRequest defines the request body for bulk decrypting secret values. +type BulkDecryptValueRequest struct { + Ciphertexts [][]byte `json:"ciphertexts"` +} + +// BulkDecryptValueResponse defines the response body for bulk decrypted secret values. The key in +// the map is the base64 encoding of the ciphertext. +type BulkDecryptValueResponse struct { + Plaintexts map[string][]byte `json:"plaintexts"` +} + // ExportStackResponse defines the response body for exporting a Stack. type ExportStackResponse UntypedDeployment diff --git a/vendor/github.com/pulumi/pulumi/sdk/v3/go/common/diag/colors/colors.go b/vendor/github.com/pulumi/pulumi/sdk/v3/go/common/diag/colors/colors.go index 8cd30b0..c28966c 100644 --- a/vendor/github.com/pulumi/pulumi/sdk/v3/go/common/diag/colors/colors.go +++ b/vendor/github.com/pulumi/pulumi/sdk/v3/go/common/diag/colors/colors.go @@ -26,6 +26,8 @@ import ( const colorLeft = "<{%" const colorRight = "%}>" +type Color = string + var disableColorization bool func command(s string) string { @@ -77,7 +79,7 @@ func writeCodes(w io.StringWriter, codes ...string) { contract.IgnoreError(err) } -func writeDirective(w io.StringWriter, c Colorization, directive string) { +func writeDirective(w io.StringWriter, c Colorization, directive Color) { if disableColorization || c == Never { return } @@ -126,6 +128,8 @@ func writeDirective(w io.StringWriter, c Colorization, directive string) { writeCodes(w, "48", "5", "4") case Black: // command("fg 0") // Only use with background colors. writeCodes(w, "38", "5", "0") + default: + contract.Failf("Unrecognized color code: %q", directive) } } diff --git a/vendor/github.com/pulumi/pulumi/sdk/v3/go/common/diag/colors/diag.go b/vendor/github.com/pulumi/pulumi/sdk/v3/go/common/diag/colors/diag.go index 5b8b9dc..1016217 100644 --- a/vendor/github.com/pulumi/pulumi/sdk/v3/go/common/diag/colors/diag.go +++ b/vendor/github.com/pulumi/pulumi/sdk/v3/go/common/diag/colors/diag.go @@ -38,7 +38,7 @@ func (c Colorization) Colorize(v string) string { // Don't touch the string. Output control sequences as is. return v case Always: - // Convert the constrol sequences into appropriate console escapes for the platform we're on. + // Convert the control sequences into appropriate console escapes for the platform we're on. return colorizeText(v, Always, -1) case Never: return colorizeText(v, Never, -1) @@ -53,7 +53,7 @@ func (c Colorization) Colorize(v string) string { // than maxLength. This is useful for scenarios where the string has to be printed in a a context // where there is a max allowed width. In these scenarios, we can't just measure the length of the // string as the embedded color tags would count against it, even though they end up with no length -// when actually interpretted by the console. +// when actually interpreted by the console. func TrimColorizedString(v string, maxRuneLength int) string { return colorizeText(v, Raw, maxRuneLength) } diff --git a/vendor/github.com/pulumi/pulumi/sdk/v3/go/common/diag/errors.go b/vendor/github.com/pulumi/pulumi/sdk/v3/go/common/diag/errors.go index f5f53b0..ac15903 100644 --- a/vendor/github.com/pulumi/pulumi/sdk/v3/go/common/diag/errors.go +++ b/vendor/github.com/pulumi/pulumi/sdk/v3/go/common/diag/errors.go @@ -38,7 +38,7 @@ func GetResourceInvalidError(urn resource.URN) *Diag { } func GetResourcePropertyInvalidValueError(urn resource.URN) *Diag { - return newError(urn, 2003, "%v resource '%v's property '%v' value %v has a problem: %v") + return newError(urn, 2003, "%v resource '%v': property %v value %v has a problem: %v") } func GetPreviewFailedError(urn resource.URN) *Diag { @@ -80,3 +80,7 @@ func GetResourceWillBeDestroyedButWasNotSpecifiedInTargetList(urn resource.URN) return newError(urn, 2014, `Resource '%v' will be destroyed but was not specified in --target list. Either include resource in --target list or pass --target-dependents to proceed.`) } + +func GetDefaultProviderDenied(urn resource.URN) *Diag { + return newError(urn, 2015, `Default provider for '%v' disabled. '%v' must use an explicit provider.`) +} diff --git a/vendor/github.com/pulumi/pulumi/sdk/v3/go/common/diag/sink.go b/vendor/github.com/pulumi/pulumi/sdk/v3/go/common/diag/sink.go index 24571ad..b413079 100644 --- a/vendor/github.com/pulumi/pulumi/sdk/v3/go/common/diag/sink.go +++ b/vendor/github.com/pulumi/pulumi/sdk/v3/go/common/diag/sink.go @@ -197,9 +197,6 @@ func (d *defaultSink) Stringify(sev Severity, diag *Diag, args ...interface{}) ( buffer.WriteString(colors.Reset) buffer.WriteRune('\n') - // TODO[pulumi/pulumi#15]: support Clang-style expressive diagnostics. This would entail, for example, using - // the buffer within the target document, to demonstrate the offending line/column range of code. - // Ensure that any sensitive data we know about is filtered out preemptively. filtered := logging.FilterString(buffer.String()) diff --git a/vendor/github.com/pulumi/pulumi/sdk/v3/go/common/encoding/marshal.go b/vendor/github.com/pulumi/pulumi/sdk/v3/go/common/encoding/marshal.go index 093ad7b..e0f8bce 100644 --- a/vendor/github.com/pulumi/pulumi/sdk/v3/go/common/encoding/marshal.go +++ b/vendor/github.com/pulumi/pulumi/sdk/v3/go/common/encoding/marshal.go @@ -16,6 +16,7 @@ package encoding import ( "encoding/json" + "fmt" "path/filepath" yaml "gopkg.in/yaml.v2" @@ -106,5 +107,14 @@ func (m *yamlMarshaler) Unmarshal(data []byte, v interface{}) error { // IDEA: use a "strict" marshaler, so that we can warn on unrecognized keys (avoiding silly mistakes). We should // set aside an officially sanctioned area in the metadata for extensibility by 3rd parties. - return yaml.Unmarshal(data, v) + err := yaml.Unmarshal(data, v) + if err != nil { + // Return type errors directly + if _, ok := err.(*yaml.TypeError); ok { + return err + } + // Other errors will be parse errors due to invalid syntax + return fmt.Errorf("invalid YAML file: %w", err) + } + return nil } diff --git a/vendor/github.com/pulumi/pulumi/sdk/v3/go/common/resource/config/crypt.go b/vendor/github.com/pulumi/pulumi/sdk/v3/go/common/resource/config/crypt.go index add94ea..075df42 100644 --- a/vendor/github.com/pulumi/pulumi/sdk/v3/go/common/resource/config/crypt.go +++ b/vendor/github.com/pulumi/pulumi/sdk/v3/go/common/resource/config/crypt.go @@ -36,6 +36,7 @@ type Encrypter interface { // Decrypter decrypts encrypted ciphertext to its plaintext representation. type Decrypter interface { DecryptValue(ciphertext string) (string, error) + BulkDecrypt(ciphertexts []string) (map[string]string, error) } // Crypter can both encrypt and decrypt values. @@ -54,6 +55,14 @@ func (nopCrypter) DecryptValue(ciphertext string) (string, error) { return ciphertext, nil } +func (nopCrypter) BulkDecrypt(ciphertexts []string) (map[string]string, error) { + secretMap := map[string]string{} + for _, c := range ciphertexts { + secretMap[c] = c + } + return secretMap, nil +} + func (nopCrypter) EncryptValue(plaintext string) (string, error) { return plaintext, nil } @@ -84,6 +93,22 @@ func (t *trackingDecrypter) DecryptValue(ciphertext string) (string, error) { return v, nil } +func (t *trackingDecrypter) BulkDecrypt(ciphertexts []string) (map[string]string, error) { + secretMap := map[string]string{} + for _, c := range ciphertexts { + if _, ok := secretMap[c]; ok { + continue + } + v, err := t.decrypter.DecryptValue(c) + if err != nil { + return secretMap, err + } + secretMap[c] = v + t.secureValues = append(t.secureValues, v) + } + return secretMap, nil +} + func (t *trackingDecrypter) SecureValues() []string { return t.secureValues } @@ -100,14 +125,84 @@ func NewBlindingDecrypter() Decrypter { type blindingCrypter struct{} -func (b blindingCrypter) DecryptValue(ciphertext string) (string, error) { - return "[secret]", nil +func (b blindingCrypter) DecryptValue(_ string) (string, error) { + return "[secret]", nil //nolint:goconst +} + +func (b blindingCrypter) BulkDecrypt(ciphertexts []string) (map[string]string, error) { + secretMap := map[string]string{} + for _, c := range ciphertexts { + if _, ok := secretMap[c]; ok { + continue + } + secretMap[c] = "[secret]" + } + return secretMap, nil } func (b blindingCrypter) EncryptValue(plaintext string) (string, error) { return "[secret]", nil } +type CachedDecrypter interface { + Decrypter +} + +// cachedDecrypter is a Decrypter that keeps track if decrypted values, which +// can be retrieved via SecureValues(). +type cachedDecrypter struct { + decrypter Decrypter + cache map[string]string +} + +func NewCachedDecrypter(decrypter Decrypter) CachedDecrypter { + return &cachedDecrypter{decrypter: decrypter} +} + +func (c *cachedDecrypter) BulkDecrypt(ciperTexts []string) (map[string]string, error) { + secretMap, err := c.decrypter.BulkDecrypt(ciperTexts) + if err != nil { + return nil, err + } + + if c.cache == nil { + c.cache = make(map[string]string, len(secretMap)) + } + + // lets ensure that we loop over the cipertexts to ensure that when we write to the cache + // an existing entry in the cache doesn't get updated with a new version of the decrypted value + // if this happens, we error as this may be a bug + for k, v := range secretMap { + if plaintext, ok := c.cache[k]; ok && plaintext != v { + return nil, fmt.Errorf("inconsistent decryption value found for cipertext: %q", k) + } + + c.cache[k] = v + } + + return secretMap, nil +} + +func (c *cachedDecrypter) DecryptValue(ciperText string) (string, error) { + if plainText, ok := c.cache[ciperText]; ok { + return plainText, nil + } + + // The value is not currently in the cache so we need to decrypt it + // and add it to the cache + plainText, err := c.decrypter.DecryptValue(ciperText) + if err != nil { + return "", err + } + + if c.cache == nil { + c.cache = make(map[string]string) + } + c.cache[ciperText] = plainText + + return plainText, nil +} + // NewPanicCrypter returns a new config crypter that will panic if used. func NewPanicCrypter() Crypter { return &panicCrypter{} @@ -115,11 +210,15 @@ func NewPanicCrypter() Crypter { type panicCrypter struct{} -func (p panicCrypter) EncryptValue(plaintext string) (string, error) { +func (p panicCrypter) EncryptValue(_ string) (string, error) { panic("attempt to encrypt value") } -func (p panicCrypter) DecryptValue(ciphertext string) (string, error) { +func (p panicCrypter) BulkDecrypt(_ []string) (map[string]string, error) { + return nil, nil +} + +func (p panicCrypter) DecryptValue(_ string) (string, error) { panic("attempt to decrypt value") } @@ -133,7 +232,7 @@ func NewSymmetricCrypter(key []byte) Crypter { // NewSymmetricCrypterFromPassphrase uses a passphrase and salt to generate a key, and then returns a crypter using it. func NewSymmetricCrypterFromPassphrase(phrase string, salt []byte) Crypter { // Generate a key using PBKDF2 to slow down attempts to crack it. 1,000,000 iterations was chosen because it - // took a little over a second on an i7-7700HQ Quad Core procesor + // took a little over a second on an i7-7700HQ Quad Core processor key := pbkdf2.Key([]byte(phrase), salt, 1000000, SymmetricCrypterKeyBytes, sha256.New) return NewSymmetricCrypter(key) } @@ -175,6 +274,21 @@ func (s symmetricCrypter) DecryptValue(value string) (string, error) { return decryptAES256GCM(enc, s.key, nonce) } +func (s symmetricCrypter) BulkDecrypt(ciphertexts []string) (map[string]string, error) { + secretMap := map[string]string{} + for _, c := range ciphertexts { + if _, ok := secretMap[c]; ok { + continue + } + v, err := s.DecryptValue(c) + if err != nil { + return nil, err + } + secretMap[c] = v + } + return secretMap, nil +} + // encryptAES256GCGM returns the ciphertext and the generated nonce func encryptAES256GCGM(plaintext string, key []byte) ([]byte, []byte) { contract.Requiref(len(key) == SymmetricCrypterKeyBytes, "key", "AES-256-GCM needs a 32 byte key") @@ -226,3 +340,18 @@ func (c prefixCrypter) DecryptValue(ciphertext string) (string, error) { func (c prefixCrypter) EncryptValue(plaintext string) (string, error) { return c.prefix + plaintext, nil } + +func (c prefixCrypter) BulkDecrypt(ciphertexts []string) (map[string]string, error) { + secretMap := map[string]string{} + for _, cip := range ciphertexts { + if _, ok := secretMap[cip]; ok { + continue + } + v, err := c.DecryptValue(cip) + if err != nil { + return nil, err + } + secretMap[cip] = v + } + return secretMap, nil +} diff --git a/vendor/github.com/pulumi/pulumi/sdk/v3/go/common/resource/config/key.go b/vendor/github.com/pulumi/pulumi/sdk/v3/go/common/resource/config/key.go index 76404dc..c1bfb06 100644 --- a/vendor/github.com/pulumi/pulumi/sdk/v3/go/common/resource/config/key.go +++ b/vendor/github.com/pulumi/pulumi/sdk/v3/go/common/resource/config/key.go @@ -31,7 +31,7 @@ type Key struct { // MustMakeKey constructs a config.Key for a given namespace and name. The namespace may not contain a `:` func MustMakeKey(namespace string, name string) Key { - contract.Requiref(!strings.Contains(":", namespace), "namespace", "may not contain a colon") + contract.Requiref(!strings.Contains(namespace, ":"), "namespace", "may not contain a colon") return Key{namespace: namespace, name: name} } diff --git a/vendor/github.com/pulumi/pulumi/sdk/v3/go/common/resource/plugin/langruntime_plugin.go b/vendor/github.com/pulumi/pulumi/sdk/v3/go/common/resource/plugin/langruntime_plugin.go index 496af0f..c149449 100644 --- a/vendor/github.com/pulumi/pulumi/sdk/v3/go/common/resource/plugin/langruntime_plugin.go +++ b/vendor/github.com/pulumi/pulumi/sdk/v3/go/common/resource/plugin/langruntime_plugin.go @@ -25,6 +25,7 @@ import ( "google.golang.org/grpc/codes" "github.com/pulumi/pulumi/sdk/v3/go/common/tokens" + "github.com/pulumi/pulumi/sdk/v3/go/common/util/cmdutil" "github.com/pulumi/pulumi/sdk/v3/go/common/util/contract" "github.com/pulumi/pulumi/sdk/v3/go/common/util/logging" "github.com/pulumi/pulumi/sdk/v3/go/common/util/rpcutil/rpcerror" @@ -56,18 +57,10 @@ func NewLanguageRuntime(host Host, ctx *Context, runtime string, }) } - var args []string - for k, v := range options { - args = append(args, fmt.Sprintf("-%s=%v", k, v)) - } - - root, err := filepath.Abs(ctx.Root) + args, err := buildArgsForNewPlugin(host, ctx, options) if err != nil { return nil, err } - args = append(args, fmt.Sprintf("-root=%s", filepath.Clean(root))) - - args = append(args, host.ServerAddr()) plug, err := newPlugin(ctx, ctx.Pwd, path, runtime, args, nil /*env*/) if err != nil { @@ -83,6 +76,29 @@ func NewLanguageRuntime(host Host, ctx *Context, runtime string, }, nil } +func buildArgsForNewPlugin(host Host, ctx *Context, options map[string]interface{}) ([]string, error) { + root, err := filepath.Abs(ctx.Root) + if err != nil { + return nil, err + } + var args []string + + for k, v := range options { + args = append(args, fmt.Sprintf("-%s=%v", k, v)) + } + + args = append(args, fmt.Sprintf("-root=%s", filepath.Clean(root))) + + if cmdutil.IsTracingEnabled() { + args = append(args, fmt.Sprintf("-tracing=%s", cmdutil.TracingEndpoint)) + } + + // NOTE: positional argument for the server addresss must come last + args = append(args, host.ServerAddr()) + + return args, nil +} + func NewLanguageRuntimeClient(ctx *Context, runtime string, client pulumirpc.LanguageRuntimeClient) LanguageRuntime { return &langhost{ ctx: ctx, @@ -131,10 +147,10 @@ func (h *langhost) GetRequiredPlugins(info ProgInfo) ([]workspace.PluginInfo, er return nil, errors.Errorf("unrecognized plugin kind: %s", info.GetKind()) } results = append(results, workspace.PluginInfo{ - Name: info.GetName(), - Kind: workspace.PluginKind(info.GetKind()), - Version: version, - ServerURL: info.GetServer(), + Name: info.GetName(), + Kind: workspace.PluginKind(info.GetKind()), + Version: version, + PluginDownloadURL: info.GetServer(), }) } diff --git a/vendor/github.com/pulumi/pulumi/sdk/v3/go/common/resource/plugin/plugin.go b/vendor/github.com/pulumi/pulumi/sdk/v3/go/common/resource/plugin/plugin.go index 5e2e8db..9b302d6 100644 --- a/vendor/github.com/pulumi/pulumi/sdk/v3/go/common/resource/plugin/plugin.go +++ b/vendor/github.com/pulumi/pulumi/sdk/v3/go/common/resource/plugin/plugin.go @@ -1,4 +1,4 @@ -// Copyright 2016-2018, Pulumi Corporation. +// Copyright 2016-2021, Pulumi Corporation. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -27,6 +27,7 @@ import ( "syscall" "time" + "github.com/grpc-ecosystem/grpc-opentracing/go/otgrpc" multierror "github.com/hashicorp/go-multierror" "github.com/pkg/errors" "golang.org/x/net/context" @@ -43,9 +44,10 @@ import ( ) // PulumiPluginJSON represents additional information about a package's associated Pulumi plugin. -// For Python, the content is inside a pulumiplugin.json file inside the package. +// For Python, the content is inside a pulumi-plugin.json file inside the package. // For Node.js, the content is within the package.json file, under the "pulumi" node. -// This is not currently used for .NET or Go, but we could consider adopting it for those languages. +// For .NET, the content is inside a pulumi-plugin.json file inside the NuGet package. +// For Go, the content is inside a pulumi-plugin.json file inside the module. type PulumiPluginJSON struct { // Indicates whether the package has an associated resource plugin. Set to false to indicate no plugin. Resource bool `json:"resource"` @@ -62,7 +64,7 @@ func (plugin *PulumiPluginJSON) JSON() ([]byte, error) { if err != nil { return nil, err } - return json, nil + return append(json, '\n'), nil } func LoadPulumiPluginJSON(path string) (*PulumiPluginJSON, error) { @@ -73,7 +75,7 @@ func LoadPulumiPluginJSON(path string) (*PulumiPluginJSON, error) { return nil, err } - var plugin *PulumiPluginJSON + plugin := &PulumiPluginJSON{} if err := json.Unmarshal(b, plugin); err != nil { return nil, err } @@ -112,7 +114,7 @@ var errRunPolicyModuleNotFound = errors.New("pulumi SDK does not support policy // errPluginNotFound is returned when we try to execute a plugin but it is not found on disk. var errPluginNotFound = errors.New("plugin not found") -func newPlugin(ctx *Context, pwd, bin, prefix string, args, env []string) (*plugin, error) { +func newPlugin(ctx *Context, pwd, bin, prefix string, args, env []string, options ...otgrpc.Option) (*plugin, error) { if logging.V(9) { var argstr string for i, arg := range args { @@ -148,26 +150,30 @@ func newPlugin(ctx *Context, pwd, bin, prefix string, args, env []string) (*plug for { msg, readerr := reader.ReadString('\n') - if readerr != nil { - break - } - - // We may be trying to run a plugin that isn't present in the SDK installed with the Policy Pack. - // e.g. the stack's package.json does not contain a recent enough @pulumi/pulumi. - // - // Rather than fail with an opaque error because we didn't get the gRPC port, inspect if it - // is a well-known problem and return a better error as appropriate. - if strings.Contains(msg, "Cannot find module '@pulumi/pulumi/cmd/run-policy-pack'") { - sawPolicyModuleNotFoundErr = true - } + // Even if we've hit the end of the stream, we want to check for non-empty content. + // The reason is that if the last line is missing a \n, we still want to include it. if strings.TrimSpace(msg) != "" { + // We may be trying to run a plugin that isn't present in the SDK installed with the Policy Pack. + // e.g. the stack's package.json does not contain a recent enough @pulumi/pulumi. + // + // Rather than fail with an opaque error because we didn't get the gRPC port, inspect if it + // is a well-known problem and return a better error as appropriate. + if strings.Contains(msg, "Cannot find module '@pulumi/pulumi/cmd/run-policy-pack'") { + sawPolicyModuleNotFoundErr = true + } + if stderr { ctx.Diag.Infoerrf(diag.StreamMessage("" /*urn*/, msg, errStreamID)) } else { ctx.Diag.Infof(diag.StreamMessage("" /*urn*/, msg, outStreamID)) } } + + // If we've hit the end of the stream, break out and close the channel. + if readerr != nil { + break + } } close(done) @@ -278,22 +284,14 @@ func newPlugin(ctx *Context, pwd, bin, prefix string, args, env []string) (*plug // execPlugin starts the plugin executable. func execPlugin(bin string, pluginArgs []string, pwd string, env []string) (*plugin, error) { - var args []string - // Flow the logging information if set. - if logging.LogFlow { - if logging.LogToStderr { - args = append(args, "--logtostderr") - } - if logging.Verbose > 0 { - args = append(args, "-v="+strconv.Itoa(logging.Verbose)) - } - } - // Flow tracing settings if we are using a remote collector. - if cmdutil.TracingEndpoint != "" && !cmdutil.TracingToFile { - args = append(args, "--tracing", cmdutil.TracingEndpoint) - } - args = append(args, pluginArgs...) - + args := buildPluginArguments(pluginArgumentOptions{ + pluginArgs: pluginArgs, + tracingEndpoint: cmdutil.TracingEndpoint, + tracingToFile: cmdutil.TracingToFile, + logFlow: logging.LogFlow, + logToStderr: logging.LogToStderr, + verbose: logging.Verbose, + }) cmd := exec.Command(bin, args...) cmdutil.RegisterProcessGroup(cmd) cmd.Dir = pwd @@ -332,6 +330,32 @@ func execPlugin(bin string, pluginArgs []string, pwd string, env []string) (*plu }, nil } +type pluginArgumentOptions struct { + pluginArgs []string + tracingEndpoint string + tracingToFile, logFlow, logToStderr bool + verbose int +} + +func buildPluginArguments(opts pluginArgumentOptions) []string { + var args []string + // Flow the logging information if set. + if opts.logFlow { + if opts.logToStderr { + args = append(args, "--logtostderr") + } + if opts.verbose > 0 { + args = append(args, "-v="+strconv.Itoa(opts.verbose)) + } + } + // Flow tracing settings if we are using a remote collector. + if opts.tracingEndpoint != "" && !opts.tracingToFile { + args = append(args, "--tracing", opts.tracingEndpoint) + } + args = append(args, opts.pluginArgs...) + return args +} + func (p *plugin) Close() error { if p.Conn != nil { contract.IgnoreClose(p.Conn) diff --git a/vendor/github.com/pulumi/pulumi/sdk/v3/go/common/resource/plugin/provider-implementers-guide.md b/vendor/github.com/pulumi/pulumi/sdk/v3/go/common/resource/plugin/provider-implementers-guide.md deleted file mode 100644 index 760ed35..0000000 --- a/vendor/github.com/pulumi/pulumi/sdk/v3/go/common/resource/plugin/provider-implementers-guide.md +++ /dev/null @@ -1,617 +0,0 @@ -# Resource Provider Implementer's Guide - -## Provider Programming Model - -### Resources - -The core functionality of a resource provider is the management of custom resources and -construction of component resources within the scope of a Pulumi stack. Custom resources -have a well-defined lifecycle built around the differences between their acutal state and -the desired state described by their inputs and implemented using create, read, update, -and delete (CRUD) operations defined by the provider. Component resources have no -associated lifecycle, and are constructed by registering child custom or component -resources with the Pulumi engine. - -#### URNs - -Each resource registered with the Pulumi engine is logically identified by its -uniform resource name (URN). A resource's URN is derived from the its type, parent type, -and user-supplied name. Within the scope of a resource-related provider method -([`Check`](#check), [`Diff`](#diff), [`Create`](#create), [`Read`](#read), -[`Update`](#update), [`Delete`](#delete), and [`Construct`](#construct)), the type of -the resource can be extracted from the provided URN. The structure of a URN is defined -by the grammar below. - -```ebnf -urn = "urn:pulumi:" stack "::" project "::" qualified type name "::" name ; - -stack = string ; -project = string ; -name = string ; -string = (* any sequence of unicode code points that does not contain "::" *) ; - -qualified type name = [ parent type "$" ] type ; -parent type = type ; - -type = package ":" [ module ":" ] type name ; -package = identifier ; -module = identifier ; -type name = identifier ; -identifier = unicode letter { unicode letter | unicode digit | "_" } ; -``` - -#### Custom Resources - -In addition to its URN, each custom resource has an associated ID. This ID is opaque to -the Pulumi engine, and is only meaningful to the provider as a means to identify a -physical resource. The ID must be a string. The empty ID indicates that a resource's ID -is not known because it has not yet been created. Critically, a custom resource has a -[well-defined lifecycle](#custom-resource-lifecycle) within the scope of a Pulumi stack. - -#### Component Resources - -A component resource is a logical conatiner for other resources. Besides its URN, a -component resource has a set of inputs, a set of outputs, and a tree of children. Its -only lifecycle semantics are those of its children; its inputs and outputs are not -related in the same way a [custom resource's](#custom-resources) inputs and state are -related. The engine can call a resource provider's [`Construct`](#construct) method to -request that the provider create a component resource of a particular type. - -### Functions - -A provider function is a function implemented by a provider, and has access to any of the -provider's state. Each function has a unique token, optionally accepts an input object, -and optionally produces an output object. The data passed to and returned from a function -must not be [unknown](#unknowns) or [secret](#secrets), and must not -[refer to resources](#resource-references). Note that an exception to these rules is made -for component resource methods, which may accept values of any type, and are provided -with a connection to the Pulumi engine. - -### Data Exchange Types - -The values exchanged between Pulumi resource providers and the Pulumi engine are a -superset of the values expressible in JSON. - -Pulumi supports the following data types: - -- `Null`, which represents the lack of a value -- `Bool`, which represents a boolean value -- `Number`, which represents an IEEE-754 double-precision number -- `String`, which represents a sequence of UTF-8 encoded unicode code points -- `Array`, which represents a numbered sequence of values -- `Object`, which represents an unordered map from strings to values -- [`Asset`](#assets-and-archives), which represents a blob -- [`Archive`](#assets-and-archives), which represents a map from strings to `Asset`s or - `Archive`s -- [`ResourceReference`](#resource-references), which represents a reference to a [Pulumi - resource](#resources) -- [`Unknown`](#unknowns), which represents a value whose type and concrete value are not - known -- [`Secret`](#secrets), which demarcates a value whose contents are sensitive - -#### Assets and Archives - -An `Asset` or `Archive` may contain either literal data or a reference to a file or URL. -In the former case, the literal data is a textual string or a map from strings to `Asset`s -or `Archive`s, respectively. In the latter case, the referenced file or URL is an opaque -blob or a TAR, gzipped TAR, or ZIP archive, respectively. - -Each `Asset` or `Archive` also carries the SHA-256 hash of its contents. This hash can be -used to uniquely identify the asset (e.g. for locally caching `Asset` or `Archive` -contents). - -#### Resource References - -A `ResourceReference` represents a reference to a [Pulumi resource](#Resources). Although -all that is necessary to uniquely identify a resource is its URN, a `ResourceReference` -also carries the resource's ID (if it is a [custom resource](#custom-resources)) and the -version of the provider that manages the resource. If the contents of the referenced -resource must be inspected, the reference must be resolved by invoking the `getResource` -function of the engine's builtin provider. Note that this is only possible if there is a -connection to the engine's resource monitor, e.g. within the scope of a call to `Construct`. -This implies that resource references may not be resolved within calls to other -provider methods. Therefore, configuration vales, custom resources and provider functions -should not rely on the ability to resolve resource references, and should instead treat -resource references as either their ID (if present) or URN. If the ID is present and -empty, it should be treated as an [`Unknown`](#unknowns). - -#### Unknowns - -An `Unknown` represents a value whose type and concrete value are not known. Resources -typically produce these values during [previews](#preview) for properties with values -that cannot be determined until the resource is actually created or updated. -[Functions](#functions) must not accept or return unknown values. - -#### Secrets - -A `Secret` represents a value whose contents are sensitive. Values of this type are -merely wrappers around the sensitive value. A provider should take care not to leak a -secret value. and should wrap any resource output values that are always sensitive in a -`Secret`. [Functions](#functions) must not accept or return secret values. - -#### Property Paths - -TODO: write this up - -## Schema - -TODO: document the Pulumi schema model. - -- configuration -- types -- resources -- functions - -## Provider Lifecycle - -Clients of a provider (e.g. the Pulumi CLI) must obey the provider lifecycle. This -lifecycle guarantees that a provider is configured before any resource operations are -performed or provider functions are invoked. The lifecycle of a provider instance is -described in brief below. - -1. The user [looks up](#lookup) the factory for a particular `(package, semver)` tuple - and uses the factory to create a provider instance. -2. The user [configures](#configuration) the provider instance with a particular - configuration object. -3. The user performs resource operations and/or calls provider functions with the - provider instance. -4. The user [shuts down](#shutdown) the provider instance. - -Within the scope of a Pulumi stack, each provider instance has a corresponding provider -resource. Provider resources are custom resources that are managed by the Pulumi engine, -and obey the usual [custom resource lifecycle](#custom-resource-lifecycle). The `Check` -and `Diff` methods for a provider resource are implemented using the -[`CheckConfig`](#checkconfig) and [`DiffConfig`](#diffconfig) methods of the resource's -provider instance. The latter is criticially important to the user experience: if -[`DiffConfig`](#diffconfig) indicates that the provider resource must be replaced, all of -the custom resources managed by the provider resource will _also_ be replaced. Thus, -`DiffConfig` should only indicate that replacement is required if the provider's -new configuration prevents it from managing resources associated with its old -configuration. - -### Lookup - -Before a provider can be used, it must be instantiated. Instatiating a provider requires -a `(package, semver)` tuple, which is used to find an appropriate provider factory. The -lookup process proceeds as follows: - -- Let the best available factory `B` be empty -- For each available provider factory `F` with package name `package`: - - If the `F`'s version is compatible with `semver`: - - If `B` is empty or if `F`'s version is newer than `B`'s version, set `B` to `F` -- If `B` is empty, no compatible factory is available, and lookup fails - -Within the context of the Pulumi CLI, the list of available factories is the list of -installed resource plugins plus the builtin `pulumi` provider. The list of installed -resource plugins can be viewed by running `pulumi plugin ls`. - -Once an appropriate factory has been found, it is used to construct a provider instance. - -### Configuration - -A provider may accept a set of configuration variables. After a provider is instantiated, -the instance must be configured before it may be used, even if its set of configuration -variables is empty. Configuration variables may be of [any type](#data-exchange-types). -Because it has no connection to the Pulumi engine during configuration, a provider's -configuration variables should not rely on the ability to resolve -[resource references](#resource-references). - -In general, a provider's configuration variables define the set of resources it is able -to manage: for example, the `aws` provider accepts the AWS region to use as a -configuration variable, which prevents a particular instance of the provider from -managing AWS resources in other regions. As noted in the [overview](#provider-lifecycle), -changes to a provider's configuration that prevent the provider from managing resources -that were created with its old configuration should require that those resources are -destroyed and recreated. - -Provider configuration is performed in at most three steps: - -1. [`CheckConfig`](#checkconfig), which validates configuration values and applies - defaults computed by the provider. This step is only required when configuring a - provider using user-supplied values, and can be skipped when using values that were - previously processed by `CheckConfig`. -2. [`DiffConfig`](#diffconfig), which indicates whether or not the new configuration can - be used to manage resources created with the old configuration. Note that this step is - only applicable within contexts where new and old configuration exist (e.g. during a - [preview](#preview) or [update](#update) of a Pulumi stack). -3. [`Configure`](#configure), which applies the inputs validated by `CheckConfig`. - -#### CheckConfig - -`CheckConfig` implements the semantics of a custom resource's [`Check`](#check) method, -with provider configuration in the place of resource inputs. Each call to `CheckConfig` is -provided with the provider's prior checked configuration (if any) and the configuration -supplied by the user. The provider may reject configuration values that do not conform to -the provider's schema, and may apply default values that are not statically computable. -The type of a computed default value for a property should agree with the property's -schema. - -#### DiffConfig - -`DiffConfig` implements the semantics of a custom resource's [`Diff`](#diff) method, -with provider configuration in the place of resource inputs and state. Each call to -`DiffConfig` is provided with the provider's prior and current configuration. If there -are any changes to the provider's configuration, those changes should be reflected in the -result of `DiffConfig`. If there are changes to the configuration that make the provider -unable to manage resources created using the prior configuration (e.g. changing an AWS -provider instance's region), `DiffConfig` should indicate that the provider must be -replaced. Because replacing a provider will require that all of the resources with -which it is associated are _also_ replaced, replacement semantics should be reserved -for changes to configuration properties that are guaranteed to make old resources -unmanagable (e.g. a change to an AWS access key should not require replacement, as the -set of resources accesible via an access key is easily knowable). - -#### Configure - -`Configure` applies a set of checked configuration values to a provider instance. Within -a call to `Configure`, a provider instance should use its configuration values to create -appropriate SDK instances, check connectivity, etc. If configuration fails, the provider -should return an error. - -##### Parameters - -- `inputs`: the configuration `Object` for the provider. This value may contain - [`Unknown`](#unknowns) values if the provider is being configured during a - [preview](#preview). In this case, the provider should provide as much - functionality as possible. - -##### Results - -None. - -### Shutdown - -Once a client has finished using a resource provider, it must shut the provider down. -A client requests that a provider shut down gracefully by calling its `SignalCancellation` -method. In response to this method, a provider should cancel all outstanding resource -operations and funtion calls. After calling `SignalCancellation`, the client calls -`Close` to inform the provider that it should release any resources it holds. - -`SignalCancellation` is advisory and non-blocking; it is up to the client to decide how -long to wait after calling `SignalCancellation` to call `Close`. - -## Custom Resource Lifecycle - -A custom resource has a well-defined lifecycle within the scope of a Pulumi stack. When a -custom resource is registered by a Pulumi program, the Pulumi engine first determines -whether the resource is being read, imported, or managed. Each of these operations -involves a different interaction with the resource's provider. - -If the resource is being read, the engine calls the resource's provider's [`Read`](#read) method -to fetch the resource's current state. This call to [`Read`](#read) includes the resource's ID and -any state provided by the user that may be necessary to read the resource. - -If the resource is being imported, the engine first calls the provider's [`Read`](#read) method -to fetch the resource's current state and inputs. This call to [`Read`](#read) only inclues the -ID of the resource to import; that is, _any importable resource must be identifiable using -its ID alone_. If the [`Read`](#read) succeeds, the engine calls the provider's [`Check`](#check) method with -the inputs returned by [`Read`](#read) and the inputs supplied by the user. If any of the inputs -are invalid, the import fails. Finally, the engine calls the provider's [`Diff`](#diff) method with -the inputs returned by [`Check`](#check) and the state returned by [`Read`](#read). If the call to [`Diff`](#diff) -indicates that there is no difference between the desired state described by the inputs -and the actual state, the import succeeds. Otherwise, the import fails. - -If the resource is being managed, the engine first looks up the last registered inputs and -last refreshed state for the resource's URN. The engine then calls the resource's -provider's [`Check`](#check) method with the last registered inputs (if any) and the inputs supplied -by the user. If any of the inputs are invalid, the registration fails. Otherwise, the -engine decides which operations to perform on the resource based on the difference between -the desired state described by its inputs and its actual state. If the resource does not -exist (i.e. there is no last refereshed state for its URN), the engine calls the -provider's [`Create`](#create) method, which returns the ID and state of the created resource. If the -resource does exist, the action taken depends on the differences (if any) between the -desired and actual state of the resource. - -If the resource does exist, the engine calls the provider's [`Diff`](#diff) method with the -inputs returned from [`Check`](#check), the resource's ID, and the resource's last refreshed state. -If the result of the call indicates that there is no difference between the desired and -actual state, no operation is necessary. Otherwise, the resource is either updated (if -[`Diff`](#diff) does not indicate that the resource must be replaced) or replaced (if [`Diff`](#diff) does -indicate that the resource must be replaced). - -To update a resource, the engine calls the provider's [`Update`](#update) method with the inputs -returned from [`Check`](#check), the resource's ID, and its last refreshed state. [`Update`](#update) returns -the new state of the resource. The resource's ID may not be changed by a call to [`Update`](#update). - -To replace a resource, the engine first calls [`Check`](#check) with an empty set of prior inputs -and the inputs supplied with the resource's registration. If [`Check`](#check) fails, the resource -is not replaced. Otherwise, the inputs returned by this call to [`Check`](#check) will be used to -create the replacement resource. Next, the engine inspects the resource options supplied -with the resource's registration and result of the call to [`Diff`](#diff) to determine whether -the replacement can be created before the original resource is deleted. This order of -operations is preferred when possible to avoid downtime due to the lag between the -deletion of the current resource and creation of its replacement. If the replacement may -be created before the original is deleted, the engine calls the provider's [`Create`](#create) method -with the re-checked inputs, then later calls [`Delete`](#delete) with the resource's ID and original -state. If the resource must be deleted before its replacement can be created, the engine -first deletes the transitive closure of resource that depend on the resource being -replaced. Once these deletes have completed, the engine deletes the original resource by -calling the provider's [`Delete`](#delete) method with the resource's ID and original state. Finally, -the engine creates the replacement resource by calling [`Create`](#create) with the re-checked -inputs. - -If a managed resource registered by a Pulumi program is not re-registered by the next -successful execution of a Pulumi progam in the resource's stack, the engine deletes the -resource by calling the resource's provider's [`Delete`](#delete) method with the resource's ID and -last refereshed state. - -The diagram below summarizes the custom resource lifecycle. Detailed descriptions of each -resource operation follow. - -![Custom Resource Lifeycle Diagram](./resource_lifecycle.svg) - -### Lifecycle Methods - -#### Check - -The `Check` method is responsible for validating the inputs to a resource. It may -optionally apply default values for unspecified input properties that cannot reasonably -be computed outside the provider (e.g. because they require access to the provider's -internal data structures). - -##### Parameters - -- `urn`: the [URN](#urns) of the resource. -- `olds`: the last recorded input `Object` for the resource, if any. If present, these - inputs must have been generated by a prior call to `Check` or [`Read`](#read). - These inputs will never contain [`Unknown`s](#unknowns). -- `news`: the new input `Object` for the resource. These inputs may have been provided by - the user or generated by a call to [`Read`](#read), and may contain - [`Unknown`s](#unknowns). - -##### Results - -- `inputs`: the checked input `Object` for the resource with default values applied. The - types of the properties in `inputs` should agree with the types of the - resource's input properties as described in its (schema)[#schema]. If `news` - contains [`Unknown`s](#unknowns), `inputs` may contain [`Unknown`s](#unknowns). -- `failures`: any validation failures present in the inputs. These failures should be - constrained to type and range mismatches. A failure is a tuple of a - [property path](#property-paths) and a failure reason. - -#### Diff - -The `Diff` method is responsible for calculating the differences between the actual and -desired state of a resource as represented by its last recorded state and new input -`Object` as returned from [`Check`](#check) or [`Read`](#read) and the logical -operation necessary to reconcile the two (i.e. no operation, an `Update, or a `Replace`). - -##### Parameters - -- `urn`: the [URN](#urns) of the resource. -- `id`: the [ID](#custom-resources) of the resource. -- `olds`: the last recorded state `Object` for the resource. This `Object` must have been - generated by a call to `Create`, `Read`, or `Update`, and will never contain - [`Unknown`s](#unknowns). -- `news`: the current input `Object` for the resource as returned by [`Check`](#check) or - [`Read`](#read). This value may contain [`Unknown`s](#unknowns). -- `ignoreChanges`: the set of [property paths](#property-paths) to treat as unchanged. - -##### Results - -- `detailedDiff`: the [detailed diff](#detailed-diffs) between the resource's actual and - desired state. -- `deleteBeforeReplace`: if true, the resource must be deleted before it is recreated. - This flag is ignored if `detailedDiff` does not indicate that - the resource needs to be replaced. -- `changes`: an enumeration that indicates whether the provider detected any changes, - detected no changes, or does not support detailed diff detection. Providers - should return `Some` for this value if there are any entries in - `detailedDiff`; otherwise they should return `None` to indicate no - difference. If a provider returns `Unknown` for this value, it is the - responsibility of the client to determine whether or not differences exist - by comparing the resource's last recorded _inputs_ with its current inputs. - -In addition, the following properties should be returned for compatibility with older -clients: - -- `replaceKeys`: the list of top-level input property names with changes that require that the - resource be replaced. -- `stableKeys`: the list of top-level input property names that did not change and - top-level output properties that are guaranteed not to change. -- `changedKeys`: the list of top-level input property names that changed. - -If a provider is unble to compute a diff because its configuration contained -[`Unknown`s](#unknowns), it can return an error that indicates as such. The client should -conservatively assume that the resource must be updated and warn the user. - -#### Detailed Diffs - -A detailed diff is a map from [property paths](#property-paths) to change kinds that -describes the differences between the actual and desired state of a resource and the -operations necessary to reconcile the two. - -Each entry in a detailed diff has a change kind that describes how the value of and -input property differs, whether or not the difference requires replacement, and which old -value was used for determining the difference. The core change kinds are: - -- `Add`, which denotes an `Object` property or `Array` element that was added -- `Update`, which denotes an `Object` property or `Array` element that was updated -- `Delete`, which denotes an `Object` property or `Array` element that was removed - -Each of these core kinds is paramaterized on whether or not the change requires -replacement and whether the old value of the property should was read from the -resource's old input `Object` or old state `Object`. - -*TODO*: the input/output flag is a bit clumsy, as it is the only part of the system -that implies some correspondence between input and output `Object` schemas. It was -chosen over an approach that used old/new values due in order to remove the possibility -of a provider accidentally revealing a secret value as part of a diff. We should -reconsider this approach if we can find an easy way to maintain secretness. - -#### Create - -The `Create` method is responsible for creating a new instance of a resource from an -input `Object` and returning the resource's state `Object`. `Create` may be called during -a [preview](#preview) in order to compute a hypothetical state `Object` without actually -creating the resource, in which case the `preview` argument will be `true`. - -##### Parameters - -- `urn`: the [URN](#urns) of the resource. -- `news`: the input `Object` for the resource. This value must have been generated by a - prior call to `Check`. If `preview` is true, this value may contain - [`Unknown`](#unknowns) value; otherwise, it is guaranteed to be fully-known. -- `timeout`: the timeout for the create operation. If this value is `0`, the provider - should apply the default creation timeout for the resource. -- `preview`: if true, the provider should calculate the state `Object` as accurately as it - is able without actually creating the resource. Top-level properties that - are present in the resource's [schema](#schema) but are omitted from its - state `Object` should be treated as having the value [`Unknown`](#unknowns). - Nested properties with values that are not computable must be explicitly set - to [`Unknown`](#unknowns). If it is not possible to guarantee that the value - produced by a preview will match the value that would be produced by actually - creating the resource, the value should be left unknown. - -##### Results - -- `id`: the ID for the created resource. If `preview` is true, this value will be ignored. -- `state`: the new state `Object` for the resource. If `preview` is true, this value may - contain [`Unknown`s](#unknowns). - -#### Update - -The `Update` method is responsible for updating a resource in-place in order given its -last recorded state `Object` and current input `Object`. `Update` may be called during -a [preview](#preview) in order to compute a hypothetical state `Object` without actually -updating the resource, in which case the `preview` argument will be `true`. - -##### Parameters - -- `urn`: the [URN](#urns) of the resource. -- `id`: the [ID](#custom-resources) of the resource. -- `olds`: the last recorded state `Object` for the resource. This `Object` must have been - generated by a call to `Create`, `Read`, or `Update`. -- `news`: the input `Object` for the resource. This value must have been generated by a - prior call to `Check`. If `preview` is true, this value may contain - [`Unknown`](#unknowns) value; otherwise, it is guaranteed to be fully-known. -- `timeout`: the timeout for the update operation. If this value is `0`, the provider - should apply the default update timeout for the resource. -- `ignoreChanges`: the set of [property paths](#property-paths) to treat as unchanged. -- `preview`: if true, the provider should calculate the state `Object` as accurately as it - is able without actually updating the resource. Top-level properties that - are present in the resource's [schema](#schema) but are omitted from its - state `Object` should be treated as having the value [`Unknown`](#unknowns). - Nested properties with values that are not computable must be explicitly set - to [`Unknown`](#unknowns). If it is not possible to guarantee that the value - produced by a preview will match the value that would be produced by actually - updating the resource, the value should be left unknown. - -##### Results - -- `state`: the new state `Object` for the resource. If `preview` is true, this value may - contain [`Unknown`s](#unknowns). - -#### Read - -The `Read` method is responsible for reading the current inputs and state `Object`s for a -resource. `Read` may be called during a [refresh](#refresh) or [import](#import) of a -managed resource or during a [preview](#preview) or [update](#update) for an external -resource. - -##### Parameters - -- `urn`: the [URN](#urns) of the resource. -- `id`: the [ID](#custom-resources) of the resource. -- `inputs`: the last recoded input `Object` for the resource, if any. If present, this - `Object` must have been generated by a call to `Check` or `Read`. This - parameter is omitted if the resource is being [imported](#import). -- `state`: the last recorded state `Object` for the resource, if any. This `Object` must - have been generated by a call to `Create`, `Read`, or `Update`. This property - is only present during a [refresh](#refresh), and must not be required for a - resource to support [importing](#import). - -##### Results - -- `newInputs`: the new input `Object` for the resource. If the provider does not support - [detailed diffs](#detailed-diffs), these inputs may be used by the engine - to determine whether or not the resource's actual state differs from its - desired state during the next [preview](#preview) or [update](#update). - The shape of the returned `Object` should be compatible with the resource's - [schema](#schema). If the resource is being [imported](#import), an input - `Object` must be returned. Otherwise, unless the input `Object` is used for - computing default property values or the provider does not support - [detailed diffs](#detailed-diffs), `newInputs` should simply reflect the - value of `inputs`. -- `newState`: the new state `Object` for the resource. - -#### Delete - -The `Delete` method is responsible for deleting a resource given its ID and state -`Object`. - -##### Parameters - -- `urn`: the [URN](#urns) of the resource. -- `id`: the [ID](#custom-resources) of the resource. -- `state`: the last recorded state `Object` for the resource. This `Object` must have been - generated by a call to `Create`, `Read`, or `Update`. -- `timeout`: the timeout for the delete operation. If this value is `0`, the provider - should apply the default deletion timeout for the resource. - -##### Results - -None. - -## Component Resource Lifecycle - -- TODO: user-level programming model - -### Construct - -- TODO: brief, parameters, results, etc. - -## Provider Functions - -### Invoke - -- TODO - -### StreamInvoke - -- TODO - -## CLI Scenarios - -- TODO: - - preview - - update - - import - - refresh - - destroy - -### Preview - -- TODO: - - check - - diff - - create/update preview, read operation - -### Update - -- TODO: - - check - - diff - - create/update/read/delete operation - -### Import - -- TODO: read operation - -### Refresh - -- TODO: read operations - -### Destroy - -- TODO: delete operation - -## Appendix - -### Out-of-Process Plugin Lifecycle - -### gRPC Interface - -- TODO: - - feature negotiation - - data representation diff --git a/vendor/github.com/pulumi/pulumi/sdk/v3/go/common/resource/plugin/provider.go b/vendor/github.com/pulumi/pulumi/sdk/v3/go/common/resource/plugin/provider.go index 40564ee..3cd27bb 100644 --- a/vendor/github.com/pulumi/pulumi/sdk/v3/go/common/resource/plugin/provider.go +++ b/vendor/github.com/pulumi/pulumi/sdk/v3/go/common/resource/plugin/provider.go @@ -1,4 +1,4 @@ -// Copyright 2016-2018, Pulumi Corporation. +// Copyright 2016-2021, Pulumi Corporation. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -16,6 +16,7 @@ package plugin import ( "errors" + "fmt" "io" "github.com/pulumi/pulumi/sdk/v3/go/common/resource" @@ -56,7 +57,7 @@ type Provider interface { // Check validates that the given property bag is valid for a resource of the given type and returns the inputs // that should be passed to successive calls to Diff, Create, or Update for this resource. Check(urn resource.URN, olds, news resource.PropertyMap, - allowUnknowns bool) (resource.PropertyMap, []CheckFailure, error) + allowUnknowns bool, sequenceNumber int) (resource.PropertyMap, []CheckFailure, error) // Diff checks what impacts a hypothetical update will have on the resource's properties. Diff(urn resource.URN, id resource.ID, olds resource.PropertyMap, news resource.PropertyMap, allowUnknowns bool, ignoreChanges []string) (DiffResult, error) @@ -87,6 +88,10 @@ type Provider interface { tok tokens.ModuleMember, args resource.PropertyMap, onNext func(resource.PropertyMap) error) ([]CheckFailure, error) + // Call dynamically executes a method in the provider associated with a component resource. + Call(tok tokens.ModuleMember, args resource.PropertyMap, info CallInfo, + options CallOptions) (CallResult, error) + // GetPluginInfo returns this plugin's information. GetPluginInfo() (workspace.PluginInfo, error) @@ -151,6 +156,28 @@ func (d DiffKind) IsReplace() bool { } } +// AsReplace converts a DiffKind into the equivalent replacement if it not already +// a replacement. +func (d DiffKind) AsReplace() DiffKind { + switch d { + case DiffAdd: + return DiffAddReplace + case DiffAddReplace: + return DiffAddReplace + case DiffDelete: + return DiffDeleteReplace + case DiffDeleteReplace: + return DiffDeleteReplace + case DiffUpdate: + return DiffUpdateReplace + case DiffUpdateReplace: + return DiffUpdateReplace + default: + contract.Failf("Unknown diff kind %v", int(d)) + return DiffUpdateReplace + } +} + const ( // DiffAdd indicates that the property was added. DiffAdd DiffKind = 0 @@ -172,6 +199,15 @@ type PropertyDiff struct { InputDiff bool // True if this is a diff between old and new inputs rather than old state and new inputs. } +// ToReplace converts the kind of a PropertyDiff into the equivalent replacement if it not already +// a replacement. +func (p PropertyDiff) ToReplace() PropertyDiff { + return PropertyDiff{ + InputDiff: p.InputDiff, + Kind: p.Kind.AsReplace(), + } +} + // DiffResult indicates whether an operation should replace or update an existing resource. type DiffResult struct { Changes DiffChanges // true if this diff represents a changed resource. @@ -182,6 +218,72 @@ type DiffResult struct { DeleteBeforeReplace bool // if true, this resource must be deleted before recreating it. } +// Computes the detailed diff of Updated, Added and Deleted keys. +func NewDetailedDiffFromObjectDiff(diff *resource.ObjectDiff) map[string]PropertyDiff { + if diff == nil { + return map[string]PropertyDiff{} + } + out := map[string]PropertyDiff{} + objectDiffToDetailedDiff("", diff, out) + return out +} + +func objectDiffToDetailedDiff(prefix string, diff *resource.ObjectDiff, acc map[string]PropertyDiff) { + + getPrefix := func(k resource.PropertyKey) string { + if prefix == "" { + return string(k) + } + return fmt.Sprintf("%s.%s", prefix, string(k)) + } + + for k, vd := range diff.Updates { + nestedPrefix := getPrefix(k) + valueDiffToDetailedDiff(nestedPrefix, vd, acc) + } + + for k := range diff.Adds { + nestedPrefix := getPrefix(k) + acc[nestedPrefix] = PropertyDiff{Kind: DiffAdd} + } + + for k := range diff.Deletes { + nestedPrefix := getPrefix(k) + acc[nestedPrefix] = PropertyDiff{Kind: DiffDelete} + } +} + +func arrayDiffToDetailedDiff(prefix string, d *resource.ArrayDiff, acc map[string]PropertyDiff) { + nestedPrefix := func(i int) string { return fmt.Sprintf("%s[%d]", prefix, i) } + for i, vd := range d.Updates { + valueDiffToDetailedDiff(nestedPrefix(i), vd, acc) + } + for i := range d.Adds { + acc[nestedPrefix(i)] = PropertyDiff{Kind: DiffAdd} + } + for i := range d.Deletes { + acc[nestedPrefix(i)] = PropertyDiff{Kind: DiffDelete} + } + +} + +func valueDiffToDetailedDiff(prefix string, vd resource.ValueDiff, acc map[string]PropertyDiff) { + if vd.Object != nil { + objectDiffToDetailedDiff(prefix, vd.Object, acc) + } else if vd.Array != nil { + arrayDiffToDetailedDiff(prefix, vd.Array, acc) + } else { + switch { + case vd.Old.V == nil && vd.New.V != nil: + acc[prefix] = PropertyDiff{Kind: DiffAdd} + case vd.Old.V != nil && vd.New.V == nil: + acc[prefix] = PropertyDiff{Kind: DiffDelete} + default: + acc[prefix] = PropertyDiff{Kind: DiffUpdate} + } + } +} + // Replace returns true if this diff represents a replacement. func (r DiffResult) Replace() bool { for _, v := range r.DetailedDiff { @@ -222,12 +324,13 @@ type ReadResult struct { // ConstructInfo contains all of the information required to register resources as part of a call to Construct. type ConstructInfo struct { - Project string // the project name housing the program being run. - Stack string // the stack name being evaluated. - Config map[config.Key]string // the configuration variables to apply before running. - DryRun bool // true if we are performing a dry-run (preview). - Parallel int // the degree of parallelism for resource operations (<=1 for serial). - MonitorAddress string // the RPC address to the host resource monitor. + Project string // the project name housing the program being run. + Stack string // the stack name being evaluated. + Config map[config.Key]string // the configuration variables to apply before running. + ConfigSecretKeys []config.Key // the configuration keys that have secret values. + DryRun bool // true if we are performing a dry-run (preview). + Parallel int // the degree of parallelism for resource operations (<=1 for serial). + MonitorAddress string // the RPC address to the host resource monitor. } // ConstructOptions captures options for a call to Construct. @@ -253,3 +356,29 @@ type ConstructResult struct { // The resources that each output property depends on. OutputDependencies map[resource.PropertyKey][]resource.URN } + +// CallInfo contains all of the information required to register resources as part of a call to Construct. +type CallInfo struct { + Project string // the project name housing the program being run. + Stack string // the stack name being evaluated. + Config map[config.Key]string // the configuration variables to apply before running. + DryRun bool // true if we are performing a dry-run (preview). + Parallel int // the degree of parallelism for resource operations (<=1 for serial). + MonitorAddress string // the RPC address to the host resource monitor. +} + +// CallOptions captures options for a call to Call. +type CallOptions struct { + // ArgDependencies is a map from argument keys to a list of resources that the argument depends on. + ArgDependencies map[resource.PropertyKey][]resource.URN +} + +// CallResult is the result of a call to Call. +type CallResult struct { + // The returned values, if the call was successful. + Return resource.PropertyMap + // A map from return value keys to the dependencies of the return value. + ReturnDependencies map[resource.PropertyKey][]resource.URN + // The failures if any arguments didn't pass verification. + Failures []CheckFailure +} diff --git a/vendor/github.com/pulumi/pulumi/sdk/v3/go/common/resource/plugin/provider_plugin.go b/vendor/github.com/pulumi/pulumi/sdk/v3/go/common/resource/plugin/provider_plugin.go index 0636193..20c6046 100644 --- a/vendor/github.com/pulumi/pulumi/sdk/v3/go/common/resource/plugin/provider_plugin.go +++ b/vendor/github.com/pulumi/pulumi/sdk/v3/go/common/resource/plugin/provider_plugin.go @@ -25,12 +25,15 @@ import ( "github.com/blang/semver" pbempty "github.com/golang/protobuf/ptypes/empty" _struct "github.com/golang/protobuf/ptypes/struct" + "github.com/grpc-ecosystem/grpc-opentracing/go/otgrpc" multierror "github.com/hashicorp/go-multierror" + "github.com/opentracing/opentracing-go" "github.com/pkg/errors" "google.golang.org/grpc/codes" "github.com/pulumi/pulumi/sdk/v3/go/common/resource" "github.com/pulumi/pulumi/sdk/v3/go/common/tokens" + "github.com/pulumi/pulumi/sdk/v3/go/common/util/cmdutil" "github.com/pulumi/pulumi/sdk/v3/go/common/util/contract" "github.com/pulumi/pulumi/sdk/v3/go/common/util/logging" "github.com/pulumi/pulumi/sdk/v3/go/common/util/rpcutil/rpcerror" @@ -63,8 +66,10 @@ type provider struct { cfgdone chan bool // closed when configuration has completed. acceptSecrets bool // true if this plugin accepts strongly-typed secrets. acceptResources bool // true if this plugin accepts strongly-typed resource refs. + acceptOutputs bool // true if this plugin accepts output values. supportsPreview bool // true if this plugin supports previews for Create and Update. disableProviderPreview bool // true if previews for Create and Update are disabled. + legacyPreview bool // enables legacy behavior for unconfigured provider previews. } // NewProvider attempts to bind to a given package's resource plugin and then creates a gRPC connection to it. If the @@ -91,12 +96,14 @@ func NewProvider(host Host, ctx *Context, pkg tokens.Package, version *semver.Ve } plug, err := newPlugin(ctx, ctx.Pwd, path, fmt.Sprintf("%v (resource)", pkg), - []string{host.ServerAddr()}, env) + []string{host.ServerAddr()}, env, otgrpc.SpanDecorator(decorateProviderSpans)) if err != nil { return nil, err } contract.Assertf(plug != nil, "unexpected nil resource plugin for %s", pkg) + legacyPreview := cmdutil.IsTruthy(os.Getenv("PULUMI_LEGACY_PROVIDER_PREVIEW")) + return &provider{ ctx: ctx, pkg: pkg, @@ -104,6 +111,7 @@ func NewProvider(host Host, ctx *Context, pkg tokens.Package, version *semver.Ve clientRaw: pulumirpc.NewResourceProviderClient(plug.Conn), cfgdone: make(chan bool), disableProviderPreview: disableProviderPreview, + legacyPreview: legacyPreview, }, nil } @@ -507,6 +515,7 @@ func (p *provider) Configure(inputs resource.PropertyMap) error { p.acceptSecrets = resp.GetAcceptSecrets() p.acceptResources = resp.GetAcceptResources() p.supportsPreview = resp.GetSupportsPreview() + p.acceptOutputs = resp.GetAcceptOutputs() p.cfgknown, p.cfgerr = true, err close(p.cfgdone) @@ -517,7 +526,8 @@ func (p *provider) Configure(inputs resource.PropertyMap) error { // Check validates that the given property bag is valid for a resource of the given type. func (p *provider) Check(urn resource.URN, - olds, news resource.PropertyMap, allowUnknowns bool) (resource.PropertyMap, []CheckFailure, error) { + olds, news resource.PropertyMap, + allowUnknowns bool, sequenceNumber int) (resource.PropertyMap, []CheckFailure, error) { label := fmt.Sprintf("%s.Check(%s)", p.label(), urn) logging.V(7).Infof("%s executing (#olds=%d,#news=%d", label, len(olds), len(news)) @@ -553,9 +563,10 @@ func (p *provider) Check(urn resource.URN, } resp, err := client.Check(p.requestContext(), &pulumirpc.CheckRequest{ - Urn: string(urn), - Olds: molds, - News: mnews, + Urn: string(urn), + Olds: molds, + News: mnews, + SequenceNumber: int32(sequenceNumber), }) if err != nil { rpcError := rpcerror.Convert(err) @@ -701,12 +712,26 @@ func (p *provider) Create(urn resource.URN, props resource.PropertyMap, timeout } // If this is a preview and the plugin does not support provider previews, or if the configuration for the provider - // is not fully known, hand back the inputs as the state. + // is not fully known, hand back an empty property map. This will force the language SDK will to treat all properties + // as unknown, which is conservatively correct. // - // Note that this can cause problems for the language SDKs if there are input and state properties that share a name - // but expect differently-shaped values. - if preview && (p.disableProviderPreview || !p.supportsPreview || !p.cfgknown) { - return "", props, resource.StatusOK, nil + // If the provider does not support previews, return the inputs as the state. Note that this can cause problems for + // the language SDKs if there are input and state properties that share a name but expect differently-shaped values. + if preview { + // TODO: it would be great to swap the order of these if statements. This would prevent a behavioral change for + // providers that do not support provider previews, which will always return the inputs as state regardless of + // whether or not the config is known. Unfortunately, we can't, since the `supportsPreview` bit depends on the + // result of `Configure`, which we won't call if the `cfgknown` is false. It may be worth fixing this catch-22 + // by extending the provider gRPC interface with a `SupportsFeature` API similar to the language monitor. + if !p.cfgknown { + if p.legacyPreview { + return "", props, resource.StatusOK, nil + } + return "", resource.PropertyMap{}, resource.StatusOK, nil + } + if !p.supportsPreview || p.disableProviderPreview { + return "", props, resource.StatusOK, nil + } } // We should only be calling {Create,Update,Delete} if the provider is fully configured. @@ -915,12 +940,26 @@ func (p *provider) Update(urn resource.URN, id resource.ID, } // If this is a preview and the plugin does not support provider previews, or if the configuration for the provider - // is not fully known, hand back the inputs as the state. + // is not fully known, hand back an empty property map. This will force the language SDK to treat all properties + // as unknown, which is conservatively correct. // - // Note that this can cause problems for the language SDKs if there are input and state properties that share a name - // but expect differently-shaped values. - if preview && (p.disableProviderPreview || !p.supportsPreview || !p.cfgknown) { - return news, resource.StatusOK, nil + // If the provider does not support previews, return the inputs as the state. Note that this can cause problems for + // the language SDKs if there are input and state properties that share a name but expect differently-shaped values. + if preview { + // TODO: it would be great to swap the order of these if statements. This would prevent a behavioral change for + // providers that do not support provider previews, which will always return the inputs as state regardless of + // whether or not the config is known. Unfortunately, we can't, since the `supportsPreview` bit depends on the + // result of `Configure`, which we won't call if the `cfgknown` is false. It may be worth fixing this catch-22 + // by extending the provider gRPC interface with a `SupportsFeature` API similar to the language monitor. + if !p.cfgknown { + if p.legacyPreview { + return news, resource.StatusOK, nil + } + return resource.PropertyMap{}, resource.StatusOK, nil + } + if !p.supportsPreview || p.disableProviderPreview { + return news, resource.StatusOK, nil + } } // We should only be calling {Create,Update,Delete} if the provider is fully configured. @@ -1068,6 +1107,9 @@ func (p *provider) Construct(info ConstructInfo, typ tokens.Type, name tokens.QN KeepUnknowns: true, KeepSecrets: p.acceptSecrets, KeepResources: p.acceptResources, + // To initially scope the use of this new feature, we only keep output values for + // Construct and Call (when the client accepts them). + KeepOutputValues: p.acceptOutputs, }) if err != nil { return ConstructResult{}, err @@ -1100,11 +1142,16 @@ func (p *provider) Construct(info ConstructInfo, typ tokens.Type, name tokens.QN for k, v := range info.Config { config[k.String()] = v } + configSecretKeys := []string{} + for _, k := range info.ConfigSecretKeys { + configSecretKeys = append(configSecretKeys, k.String()) + } resp, err := client.Construct(p.requestContext(), &pulumirpc.ConstructRequest{ Project: info.Project, Stack: info.Stack, Config: config, + ConfigSecretKeys: configSecretKeys, DryRun: info.DryRun, Parallel: int32(info.Parallel), MonitorEndpoint: info.MonitorAddress, @@ -1290,6 +1337,101 @@ func (p *provider) StreamInvoke( } } +// Call dynamically executes a method in the provider associated with a component resource. +func (p *provider) Call(tok tokens.ModuleMember, args resource.PropertyMap, info CallInfo, + options CallOptions) (CallResult, error) { + contract.Assert(tok != "") + + label := fmt.Sprintf("%s.Call(%s)", p.label(), tok) + logging.V(7).Infof("%s executing (#args=%d)", label, len(args)) + + // Get the RPC client and ensure it's configured. + client, err := p.getClient() + if err != nil { + return CallResult{}, err + } + + // If the provider is not fully configured, return an empty property map. + if !p.cfgknown { + return CallResult{}, nil + } + + margs, err := MarshalProperties(args, MarshalOptions{ + Label: fmt.Sprintf("%s.args", label), + KeepUnknowns: true, + KeepSecrets: true, + KeepResources: true, + // To initially scope the use of this new feature, we only keep output values for + // Construct and Call (when the client accepts them). + KeepOutputValues: p.acceptOutputs, + }) + if err != nil { + return CallResult{}, err + } + + // Marshal the arg dependencies. + argDependencies := map[string]*pulumirpc.CallRequest_ArgumentDependencies{} + for name, dependencies := range options.ArgDependencies { + urns := make([]string, len(dependencies)) + for i, urn := range dependencies { + urns[i] = string(urn) + } + argDependencies[string(name)] = &pulumirpc.CallRequest_ArgumentDependencies{Urns: urns} + } + + // Marshal the config. + config := map[string]string{} + for k, v := range info.Config { + config[k.String()] = v + } + + resp, err := client.Call(p.requestContext(), &pulumirpc.CallRequest{ + Tok: string(tok), + Args: margs, + ArgDependencies: argDependencies, + Project: info.Project, + Stack: info.Stack, + Config: config, + DryRun: info.DryRun, + Parallel: int32(info.Parallel), + MonitorEndpoint: info.MonitorAddress, + }) + if err != nil { + rpcError := rpcerror.Convert(err) + logging.V(7).Infof("%s failed: %v", label, rpcError.Message()) + return CallResult{}, rpcError + } + + // Unmarshal any return values. + ret, err := UnmarshalProperties(resp.GetReturn(), MarshalOptions{ + Label: fmt.Sprintf("%s.returns", label), + KeepUnknowns: info.DryRun, + KeepSecrets: true, + KeepResources: true, + }) + if err != nil { + return CallResult{}, err + } + + returnDependencies := map[resource.PropertyKey][]resource.URN{} + for k, rpcDeps := range resp.GetReturnDependencies() { + urns := make([]resource.URN, len(rpcDeps.Urns)) + for i, d := range rpcDeps.Urns { + urns[i] = resource.URN(d) + } + returnDependencies[resource.PropertyKey(k)] = urns + } + + // And now any properties that failed verification. + var failures []CheckFailure + for _, failure := range resp.GetFailures() { + failures = append(failures, CheckFailure{resource.PropertyKey(failure.Property), failure.Reason}) + } + + logging.V(7).Infof("%s success (#ret=%d,#failures=%d) success", label, len(ret), len(failures)) + return CallResult{Return: ret, ReturnDependencies: returnDependencies, Failures: failures}, nil +} + // GetPluginInfo returns this plugin's information. func (p *provider) GetPluginInfo() (workspace.PluginInfo, error) { label := fmt.Sprintf("%s.GetPluginInfo()", p.label()) @@ -1446,3 +1588,30 @@ func (ie *InitError) Error() string { } return err.Error() } + +func decorateSpanWithType(span opentracing.Span, urn string) { + if urn := resource.URN(urn); urn.IsValid() { + span.SetTag("pulumi-decorator", urn.Type()) + } +} + +func decorateProviderSpans(span opentracing.Span, method string, req, resp interface{}, grpcError error) { + if req == nil { + return + } + + switch method { + case "/pulumirpc.ResourceProvider/Check", "/pulumirpc.ResourceProvider/CheckConfig": + decorateSpanWithType(span, req.(*pulumirpc.CheckRequest).Urn) + case "/pulumirpc.ResourceProvider/Diff", "/pulumirpc.ResourceProvider/DiffConfig": + decorateSpanWithType(span, req.(*pulumirpc.DiffRequest).Urn) + case "/pulumirpc.ResourceProvider/Create": + decorateSpanWithType(span, req.(*pulumirpc.CreateRequest).Urn) + case "/pulumirpc.ResourceProvider/Update": + decorateSpanWithType(span, req.(*pulumirpc.UpdateRequest).Urn) + case "/pulumirpc.ResourceProvider/Delete": + decorateSpanWithType(span, req.(*pulumirpc.DeleteRequest).Urn) + case "/pulumirpc.ResourceProvider/Invoke": + span.SetTag("pulumi-decorator", req.(*pulumirpc.InvokeRequest).Tok) + } +} diff --git a/vendor/github.com/pulumi/pulumi/sdk/v3/go/common/resource/plugin/provider_server.go b/vendor/github.com/pulumi/pulumi/sdk/v3/go/common/resource/plugin/provider_server.go index 43b6ce0..cae1185 100644 --- a/vendor/github.com/pulumi/pulumi/sdk/v3/go/common/resource/plugin/provider_server.go +++ b/vendor/github.com/pulumi/pulumi/sdk/v3/go/common/resource/plugin/provider_server.go @@ -252,7 +252,7 @@ func (p *providerServer) Check(ctx context.Context, req *pulumirpc.CheckRequest) return nil, err } - newInputs, failures, err := p.provider.Check(urn, state, inputs, true) + newInputs, failures, err := p.provider.Check(urn, state, inputs, true, int(req.SequenceNumber)) if err != nil { return nil, err } @@ -409,13 +409,24 @@ func (p *providerServer) Construct(ctx context.Context, } cfg[configKey] = v } + + cfgSecretKeys := []config.Key{} + for _, k := range req.GetConfigSecretKeys() { + key, err := config.ParseKey(k) + if err != nil { + return nil, err + } + cfgSecretKeys = append(cfgSecretKeys, key) + } + info := ConstructInfo{ - Project: req.GetProject(), - Stack: req.GetStack(), - Config: cfg, - DryRun: req.GetDryRun(), - Parallel: int(req.GetParallel()), - MonitorAddress: req.GetMonitorEndpoint(), + Project: req.GetProject(), + Stack: req.GetStack(), + Config: cfg, + ConfigSecretKeys: cfgSecretKeys, + DryRun: req.GetDryRun(), + Parallel: int(req.GetParallel()), + MonitorAddress: req.GetMonitorEndpoint(), } aliases := make([]resource.URN, len(req.GetAliases())) @@ -526,3 +537,73 @@ func (p *providerServer) StreamInvoke(req *pulumirpc.InvokeRequest, return server.Send(&pulumirpc.InvokeResponse{Failures: rpcFailures}) } + +func (p *providerServer) Call(ctx context.Context, req *pulumirpc.CallRequest) (*pulumirpc.CallResponse, error) { + args, err := UnmarshalProperties(req.GetArgs(), p.unmarshalOptions("args")) + if err != nil { + return nil, err + } + + cfg := map[config.Key]string{} + for k, v := range req.GetConfig() { + configKey, err := config.ParseKey(k) + if err != nil { + return nil, err + } + cfg[configKey] = v + } + info := CallInfo{ + Project: req.GetProject(), + Stack: req.GetStack(), + Config: cfg, + DryRun: req.GetDryRun(), + Parallel: int(req.GetParallel()), + MonitorAddress: req.GetMonitorEndpoint(), + } + argDependencies := map[resource.PropertyKey][]resource.URN{} + for name, deps := range req.GetArgDependencies() { + urns := make([]resource.URN, len(deps.Urns)) + for i, urn := range deps.Urns { + urns[i] = resource.URN(urn) + } + argDependencies[resource.PropertyKey(name)] = urns + } + options := CallOptions{ + ArgDependencies: argDependencies, + } + + result, err := p.provider.Call(tokens.ModuleMember(req.GetTok()), args, info, options) + if err != nil { + return nil, err + } + + rpcResult, err := MarshalProperties(result.Return, MarshalOptions{ + Label: "result", + KeepUnknowns: true, + KeepSecrets: true, + KeepResources: true, + }) + if err != nil { + return nil, err + } + + returnDependencies := map[string]*pulumirpc.CallResponse_ReturnDependencies{} + for name, deps := range result.ReturnDependencies { + urns := make([]string, len(deps)) + for i, urn := range deps { + urns[i] = string(urn) + } + returnDependencies[string(name)] = &pulumirpc.CallResponse_ReturnDependencies{Urns: urns} + } + + rpcFailures := make([]*pulumirpc.CheckFailure, len(result.Failures)) + for i, f := range result.Failures { + rpcFailures[i] = &pulumirpc.CheckFailure{Property: string(f.Property), Reason: f.Reason} + } + + return &pulumirpc.CallResponse{ + Return: rpcResult, + ReturnDependencies: returnDependencies, + Failures: rpcFailures, + }, nil +} diff --git a/vendor/github.com/pulumi/pulumi/sdk/v3/go/common/resource/plugin/resource_lifecycle.mermaid b/vendor/github.com/pulumi/pulumi/sdk/v3/go/common/resource/plugin/resource_lifecycle.mermaid deleted file mode 100644 index aa54976..0000000 --- a/vendor/github.com/pulumi/pulumi/sdk/v3/go/common/resource/plugin/resource_lifecycle.mermaid +++ /dev/null @@ -1,47 +0,0 @@ -%% regenerate using https://github.com/mermaid-js/mermaid-cli - -flowchart TD - start("Resource registration") - - read_read("Read(reg.URN, reg.ID, reg.State)") - - import_read("Read(reg.URN, reg.ID)") - import_check("Check(reg.URN, reg.Inputs, read.Inputs)") - import_diff("Diff(reg.URN, check.Inputs, read.State)") - - manage_check("Check(reg.URN, reg.Inputs, last?.Inputs)") - manage_create("Create(reg.URN, check.Inputs)") - manage_diff("Diff(reg.URN, check.Inputs, last.ID, last.State)") - manage_update("Update(reg.URN, last.ID, last.State)") - manage_replace_check("Check(reg.URN, reg.Inputs)") - manage_replace_create_before_delete("Create(reg.URN, reg.Inputs)") - manage_replace_delete_after_create("Delete(reg.URN, last.ID, last.State)") - manage_replace_delete_dependents("Delete dependents") - manage_replace_delete_before_create("Delete(reg.URN, last.ID, last.State)") - manage_replace_create_after_delete("Create(reg.URN, check.Inputs)") - - done("Return new inputs and state") - - start-- read -->read_read - start-- import -->import_read - start-- manage -->manage_check - - read_read-->done - - import_read-->import_check - import_check-->import_diff - import_diff-->done - - manage_check-- no last state -->manage_create - manage_check-- has last state -->manage_diff - manage_diff-- can be updated -->manage_update - manage_diff-- must be replaced -->manage_replace_check - manage_replace_check-- create before delete -->manage_replace_create_before_delete - manage_replace_check-- delete after craete -->manage_replace_delete_dependents - manage_replace_create_before_delete-->manage_replace_delete_after_create - manage_replace_delete_dependents-->manage_replace_delete_before_create - manage_replace_delete_before_create-->manage_replace_create_after_delete - manage_create-->done - manage_update-->done - manage_replace_delete_after_create-->done - manage_replace_create_after_delete-->done diff --git a/vendor/github.com/pulumi/pulumi/sdk/v3/go/common/resource/plugin/resource_lifecycle.svg b/vendor/github.com/pulumi/pulumi/sdk/v3/go/common/resource/plugin/resource_lifecycle.svg deleted file mode 100644 index 6f51f3d..0000000 --- a/vendor/github.com/pulumi/pulumi/sdk/v3/go/common/resource/plugin/resource_lifecycle.svg +++ /dev/null @@ -1 +0,0 @@ -
read
import
manage
no last state
has last state
can be updated
must be replaced
create before delete
delete after craete
Resource registration
Read(reg.URN, reg.ID, reg.State)
Read(reg.URN, reg.ID)
Check(reg.URN, reg.Inputs, read.Inputs)
Diff(reg.URN, check.Inputs, read.State)
Check(reg.URN, reg.Inputs, last?.Inputs)
Create(reg.URN, check.Inputs)
Diff(reg.URN, check.Inputs, last.ID, last.State)
Update(reg.URN, last.ID, last.State)
Check(reg.URN, reg.Inputs)
Create(reg.URN, reg.Inputs)
Delete(reg.URN, last.ID, last.State)
Delete dependents
Delete(reg.URN, last.ID, last.State)
Create(reg.URN, check.Inputs)
Return new inputs and state
\ No newline at end of file diff --git a/vendor/github.com/pulumi/pulumi/sdk/v3/go/common/resource/plugin/rpc.go b/vendor/github.com/pulumi/pulumi/sdk/v3/go/common/resource/plugin/rpc.go index 79c5ea4..51993ba 100644 --- a/vendor/github.com/pulumi/pulumi/sdk/v3/go/common/resource/plugin/rpc.go +++ b/vendor/github.com/pulumi/pulumi/sdk/v3/go/common/resource/plugin/rpc.go @@ -1,4 +1,4 @@ -// Copyright 2016-2018, Pulumi Corporation. +// Copyright 2016-2021, Pulumi Corporation. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -15,6 +15,7 @@ package plugin import ( + "fmt" "reflect" "sort" @@ -38,6 +39,7 @@ type MarshalOptions struct { RejectAssets bool // true if we should return errors on Asset and Archive values. KeepResources bool // true if we are keeping resoures (otherwise we return raw urn). SkipInternalKeys bool // true to skip internal property keys (keys that start with "__") in the resulting map. + KeepOutputValues bool // true if we are keeping output values. } const ( @@ -70,14 +72,12 @@ func MarshalProperties(props resource.PropertyMap, opts MarshalOptions) (*struct for _, key := range props.StableKeys() { v := props[key] logging.V(9).Infof("Marshaling property for RPC[%s]: %s=%v", opts.Label, key, v) - if v.IsOutput() { - logging.V(9).Infof("Skipping output property for RPC[%s]: %v", opts.Label, key) - } else if opts.SkipNulls && v.IsNull() { + if opts.SkipNulls && v.IsNull() { logging.V(9).Infof("Skipping null property for RPC[%s]: %s (as requested)", opts.Label, key) } else if opts.SkipInternalKeys && resource.IsInternalPropertyKey(key) { logging.V(9).Infof("Skipping internal property for RPC[%s]: %s (as requested)", opts.Label, key) } else { - m, err := MarshalPropertyValue(v, opts) + m, err := MarshalPropertyValue(key, v, opts) if err != nil { return nil, err } else if m != nil { @@ -91,7 +91,8 @@ func MarshalProperties(props resource.PropertyMap, opts MarshalOptions) (*struct } // MarshalPropertyValue marshals a single resource property value into its "JSON-like" value representation. -func MarshalPropertyValue(v resource.PropertyValue, opts MarshalOptions) (*structpb.Value, error) { +func MarshalPropertyValue(key resource.PropertyKey, v resource.PropertyValue, + opts MarshalOptions) (*structpb.Value, error) { if v.IsNull() { return MarshalNull(opts), nil } else if v.IsBool() { @@ -111,11 +112,13 @@ func MarshalPropertyValue(v resource.PropertyValue, opts MarshalOptions) (*struc } else if v.IsArray() { var elems []*structpb.Value for _, elem := range v.ArrayValue() { - e, err := MarshalPropertyValue(elem, opts) + e, err := MarshalPropertyValue(key, elem, opts) if err != nil { return nil, err } - elems = append(elems, e) + if e != nil { + elems = append(elems, e) + } } return &structpb.Value{ Kind: &structpb.Value_ListValue{ @@ -124,12 +127,12 @@ func MarshalPropertyValue(v resource.PropertyValue, opts MarshalOptions) (*struc }, nil } else if v.IsAsset() { if opts.RejectAssets { - return nil, errors.New("unexpected Asset property value") + return nil, fmt.Errorf("unexpected Asset property value for %q", key) } return MarshalAsset(v.AssetValue(), opts) } else if v.IsArchive() { if opts.RejectAssets { - return nil, errors.New("unexpected Asset Archive property value") + return nil, fmt.Errorf("unexpected Asset Archive property value for %q", key) } return MarshalArchive(v.ArchiveValue(), opts) } else if v.IsObject() { @@ -140,34 +143,57 @@ func MarshalPropertyValue(v resource.PropertyValue, opts MarshalOptions) (*struc return MarshalStruct(obj, opts), nil } else if v.IsComputed() { if opts.RejectUnknowns { - return nil, errors.New("unexpected unknown property value") + return nil, fmt.Errorf("unexpected unknown property value for %q", key) } else if opts.KeepUnknowns { return marshalUnknownProperty(v.Input().Element, opts), nil } return nil, nil // return nil and the caller will ignore it. } else if v.IsOutput() { - // Note that at the moment we don't differentiate between computed and output properties on the wire. As - // a result, they will show up as computed on the other end. This distinction isn't currently interesting. - if opts.KeepUnknowns { - return marshalUnknownProperty(v.OutputValue().Element, opts), nil + if !opts.KeepOutputValues { + result := v.OutputValue().Element + if !v.OutputValue().Known { + // Unknown outputs are marshaled the same as Computed. + result = resource.MakeComputed(resource.NewStringProperty("")) + } + if v.OutputValue().Secret { + result = resource.MakeSecret(result) + } + return MarshalPropertyValue(key, result, opts) } - return nil, nil // return nil and the caller will ignore it. + obj := resource.PropertyMap{ + resource.SigKey: resource.NewStringProperty(resource.OutputValueSig), + } + if v.OutputValue().Known { + obj["value"] = v.OutputValue().Element + } + if v.OutputValue().Secret { + obj["secret"] = resource.NewBoolProperty(v.OutputValue().Secret) + } + if len(v.OutputValue().Dependencies) > 0 { + deps := make([]resource.PropertyValue, len(v.OutputValue().Dependencies)) + for i, dep := range v.OutputValue().Dependencies { + deps[i] = resource.NewStringProperty(string(dep)) + } + obj["dependencies"] = resource.NewArrayProperty(deps) + } + output := resource.NewObjectProperty(obj) + return MarshalPropertyValue(key, output, opts) } else if v.IsSecret() { if !opts.KeepSecrets { logging.V(5).Infof("marshalling secret value as raw value as opts.KeepSecrets is false") - return MarshalPropertyValue(v.SecretValue().Element, opts) + return MarshalPropertyValue(key, v.SecretValue().Element, opts) } secret := resource.NewObjectProperty(resource.PropertyMap{ resource.SigKey: resource.NewStringProperty(resource.SecretSig), "value": v.SecretValue().Element, }) - return MarshalPropertyValue(secret, opts) + return MarshalPropertyValue(key, secret, opts) } else if v.IsResourceReference() { ref := v.ResourceReferenceValue() if !opts.KeepResources { val := string(ref.URN) if !ref.ID.IsNull() { - return MarshalPropertyValue(ref.ID, opts) + return MarshalPropertyValue(key, ref.ID, opts) } logging.V(5).Infof("marshalling resource value as raw URN or ID as opts.KeepResources is false") return MarshalString(val, opts), nil @@ -182,10 +208,11 @@ func MarshalPropertyValue(v resource.PropertyValue, opts MarshalOptions) (*struc if ref.PackageVersion != "" { m["packageVersion"] = resource.NewStringProperty(ref.PackageVersion) } - return MarshalPropertyValue(resource.NewObjectProperty(m), opts) + return MarshalPropertyValue(key, resource.NewObjectProperty(m), opts) } - contract.Failf("Unrecognized property value in RPC[%s]: %v (type=%v)", opts.Label, v.V, reflect.TypeOf(v.V)) + contract.Failf("Unrecognized property value in RPC[%s] for %q: %v (type=%v)", + opts.Label, key, v.V, reflect.TypeOf(v.V)) return nil, nil } @@ -240,7 +267,7 @@ func UnmarshalProperties(props *structpb.Struct, opts MarshalOptions) (resource. // And now unmarshal every field it into the map. for _, key := range keys { pk := resource.PropertyKey(key) - v, err := UnmarshalPropertyValue(props.Fields[key], opts) + v, err := UnmarshalPropertyValue(pk, props.Fields[key], opts) if err != nil { return nil, err } else if v != nil { @@ -259,7 +286,8 @@ func UnmarshalProperties(props *structpb.Struct, opts MarshalOptions) (resource. } // UnmarshalPropertyValue unmarshals a single "JSON-like" value into a new property value. -func UnmarshalPropertyValue(v *structpb.Value, opts MarshalOptions) (*resource.PropertyValue, error) { +func UnmarshalPropertyValue(key resource.PropertyKey, v *structpb.Value, + opts MarshalOptions) (*resource.PropertyValue, error) { contract.Assert(v != nil) switch v.Kind.(type) { @@ -277,7 +305,7 @@ func UnmarshalPropertyValue(v *structpb.Value, opts MarshalOptions) (*resource.P s := v.GetStringValue() if unk, isunk := unmarshalUnknownPropertyValue(s, opts); isunk { if opts.RejectUnknowns { - return nil, errors.New("unexpected unknown property value") + return nil, fmt.Errorf("unexpected unknown property value for %q", key) } else if opts.KeepUnknowns { return &unk, nil } @@ -289,7 +317,7 @@ func UnmarshalPropertyValue(v *structpb.Value, opts MarshalOptions) (*resource.P lst := v.GetListValue() elems := make([]resource.PropertyValue, len(lst.GetValues())) for i, elem := range lst.GetValues() { - e, err := UnmarshalPropertyValue(elem, opts) + e, err := UnmarshalPropertyValue(key, elem, opts) if err != nil { return nil, err } else if e != nil { @@ -321,7 +349,7 @@ func UnmarshalPropertyValue(v *structpb.Value, opts MarshalOptions) (*resource.P switch sig { case resource.AssetSig: if opts.RejectAssets { - return nil, errors.New("unexpected Asset property value") + return nil, fmt.Errorf("unexpected Asset property value for %q", key) } asset, isasset, err := resource.DeserializeAsset(objmap) if err != nil { @@ -332,14 +360,14 @@ func UnmarshalPropertyValue(v *structpb.Value, opts MarshalOptions) (*resource.P contract.Assert(isasset) if opts.ComputeAssetHashes { if err = asset.EnsureHash(); err != nil { - return nil, errors.Wrapf(err, "failed to compute asset hash") + return nil, errors.Wrapf(err, "failed to compute asset hash for %q", key) } } m := resource.NewAssetProperty(asset) return &m, nil case resource.ArchiveSig: if opts.RejectAssets { - return nil, errors.New("unexpected Asset Archive property value") + return nil, fmt.Errorf("unexpected Asset Archive property value for %q", key) } archive, isarchive, err := resource.DeserializeArchive(objmap) if err != nil { @@ -350,7 +378,7 @@ func UnmarshalPropertyValue(v *structpb.Value, opts MarshalOptions) (*resource.P contract.Assert(isarchive) if opts.ComputeAssetHashes { if err = archive.EnsureHash(); err != nil { - return nil, errors.Wrapf(err, "failed to compute archive hash") + return nil, errors.Wrapf(err, "failed to compute archive hash for %q", key) } } m := resource.NewArchiveProperty(archive) @@ -358,21 +386,16 @@ func UnmarshalPropertyValue(v *structpb.Value, opts MarshalOptions) (*resource.P case resource.SecretSig: value, ok := obj["value"] if !ok { - return nil, errors.New("malformed RPC secret: missing value") + return nil, fmt.Errorf("malformed RPC secret: missing value for %q", key) } - if !opts.KeepSecrets { - logging.V(5).Infof("unmarshalling secret as raw value, as opts.KeepSecrets is false") - return &value, nil - } - s := resource.MakeSecret(value) - return &s, nil + return unmarshalSecretPropertyValue(value, opts), nil case resource.ResourceReferenceSig: urn, ok := obj["urn"] if !ok { - return nil, errors.New("malformed resource reference: missing urn") + return nil, fmt.Errorf("malformed resource reference for %q: missing urn", key) } if !urn.IsString() { - return nil, errors.New("malformed resource reference: urn not a string") + return nil, fmt.Errorf("malformed resource reference for %q: urn not a string", key) } id, hasID := "", false @@ -384,14 +407,14 @@ func UnmarshalPropertyValue(v *structpb.Value, opts MarshalOptions) (*resource.P case idProp.IsComputed(): // Leave the ID empty to indicate that it is unknown. default: - return nil, errors.New("malformed resource reference: id not a string") + return nil, fmt.Errorf("malformed resource reference for %q: id not a string", key) } } var packageVersion string if packageVersionProp, ok := obj["packageVersion"]; ok { if !packageVersionProp.IsString() { - return nil, errors.New("malformed resource reference: packageVersion not a string") + return nil, fmt.Errorf("malformed resource reference for %q: packageVersion not a string", key) } packageVersion = packageVersionProp.StringValue() } @@ -404,7 +427,7 @@ func UnmarshalPropertyValue(v *structpb.Value, opts MarshalOptions) (*resource.P v := structpb.Value{ Kind: &structpb.Value_StringValue{StringValue: UnknownStringValue}, } - return UnmarshalPropertyValue(&v, opts) + return UnmarshalPropertyValue(key, &v, opts) } value = id } @@ -419,12 +442,61 @@ func UnmarshalPropertyValue(v *structpb.Value, opts MarshalOptions) (*resource.P ref = resource.MakeComponentResourceReference(resource.URN(urn.StringValue()), packageVersion) } return &ref, nil + case resource.OutputValueSig: + value, known := obj["value"] + + var secret bool + if secretProp, ok := obj["secret"]; ok { + if !secretProp.IsBool() { + return nil, fmt.Errorf("malformed output value for %q: secret not a bool", key) + } + secret = secretProp.BoolValue() + } + + if !opts.KeepOutputValues { + result := &value + if !known { + result, err = UnmarshalPropertyValue(key, &structpb.Value{ + Kind: &structpb.Value_StringValue{StringValue: UnknownStringValue}, + }, opts) + if err != nil { + return nil, err + } + } + if secret && result != nil { + result = unmarshalSecretPropertyValue(*result, opts) + } + return result, nil + } + + var dependencies []resource.URN + if dependenciesProp, ok := obj["dependencies"]; ok { + if !dependenciesProp.IsArray() { + return nil, fmt.Errorf("malformed output value for %q: dependencies not an array", key) + } + dependencies = make([]resource.URN, len(dependenciesProp.ArrayValue())) + for i, dep := range dependenciesProp.ArrayValue() { + if !dep.IsString() { + return nil, fmt.Errorf( + "malformed output value for %q: element in dependencies not a string", key) + } + dependencies[i] = resource.URN(dep.StringValue()) + } + } + + output := resource.NewOutputProperty(resource.Output{ + Element: value, + Known: known, + Secret: secret, + Dependencies: dependencies, + }) + return &output, nil default: - return nil, errors.Errorf("unrecognized signature '%v' in property map", sig) + return nil, fmt.Errorf("unrecognized signature '%v' in property map for %q", sig, key) } default: - contract.Failf("Unrecognized structpb value kind in RPC[%s]: %v", opts.Label, reflect.TypeOf(v.Kind)) + contract.Failf("Unrecognized structpb value kind in RPC[%s] for %q: %v", opts.Label, key, reflect.TypeOf(v.Kind)) return nil, nil } } @@ -455,6 +527,15 @@ func unmarshalUnknownPropertyValue(s string, opts MarshalOptions) (resource.Prop return resource.PropertyValue{}, false } +func unmarshalSecretPropertyValue(v resource.PropertyValue, opts MarshalOptions) *resource.PropertyValue { + if !opts.KeepSecrets { + logging.V(5).Infof("unmarshalling secret as raw value, as opts.KeepSecrets is false") + return &v + } + s := resource.MakeSecret(v) + return &s +} + // MarshalNull marshals a nil to its protobuf form. func MarshalNull(opts MarshalOptions) *structpb.Value { return &structpb.Value{ @@ -500,7 +581,8 @@ func MarshalAsset(v *resource.Asset, opts MarshalOptions) (*structpb.Value, erro // To marshal an asset, we need to first serialize it, and then marshal that. sera := v.Serialize() serap := resource.NewPropertyMapFromMap(sera) - return MarshalPropertyValue(resource.NewObjectProperty(serap), opts) + pk := resource.PropertyKey(v.URI) + return MarshalPropertyValue(pk, resource.NewObjectProperty(serap), opts) } // MarshalArchive marshals an archive into its wire form for resource provider plugins. @@ -521,5 +603,6 @@ func MarshalArchive(v *resource.Archive, opts MarshalOptions) (*structpb.Value, // To marshal an archive, we need to first serialize it, and then marshal that. sera := v.Serialize() serap := resource.NewPropertyMapFromMap(sera) - return MarshalPropertyValue(resource.NewObjectProperty(serap), opts) + pk := resource.PropertyKey(v.URI) + return MarshalPropertyValue(pk, resource.NewObjectProperty(serap), opts) } diff --git a/vendor/github.com/pulumi/pulumi/sdk/v3/go/common/resource/properties.go b/vendor/github.com/pulumi/pulumi/sdk/v3/go/common/resource/properties.go index 337870f..3556582 100644 --- a/vendor/github.com/pulumi/pulumi/sdk/v3/go/common/resource/properties.go +++ b/vendor/github.com/pulumi/pulumi/sdk/v3/go/common/resource/properties.go @@ -1,4 +1,4 @@ -// Copyright 2016-2018, Pulumi Corporation. +// Copyright 2016-2021, Pulumi Corporation. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -84,7 +84,10 @@ type Computed struct { // encountered, it means the resource has not yet been created, and so the output value is unavailable. Note that an // output property is a special case of computed, but carries additional semantic meaning. type Output struct { - Element PropertyValue // the eventual value (type) of the output property. + Element PropertyValue // the value of this output if it is resolved. + Known bool `json:"-"` // true if this output's value is known. + Secret bool `json:"-"` // true if this output's value is secret. + Dependencies []URN `json:"-"` // the dependencies associated with this output. } // Secret indicates that the underlying value should be persisted securely. @@ -105,7 +108,7 @@ type Secret struct { // - The ID may be unknown (in which case it will be the unknown property value) // - Otherwise, the ID must be a string. // -//nolint: golint +//nolint: revive type ResourceReference struct { URN URN ID PropertyValue @@ -364,12 +367,15 @@ func NewPropertyValueRepl(v interface{}, // HasValue returns true if a value is semantically meaningful. func (v PropertyValue) HasValue() bool { - return !v.IsNull() && !v.IsOutput() + if v.IsOutput() { + return v.OutputValue().Known + } + return !v.IsNull() } // ContainsUnknowns returns true if the property value contains at least one unknown (deeply). func (v PropertyValue) ContainsUnknowns() bool { - if v.IsComputed() || v.IsOutput() { + if v.IsComputed() || (v.IsOutput() && !v.OutputValue().Known) { return true } else if v.IsArray() { for _, e := range v.ArrayValue() { @@ -392,7 +398,7 @@ func (v PropertyValue) ContainsSecrets() bool { } else if v.IsComputed() { return v.Input().Element.ContainsSecrets() } else if v.IsOutput() { - return v.OutputValue().Element.ContainsSecrets() + return v.OutputValue().Secret || v.OutputValue().Element.ContainsSecrets() } else if v.IsArray() { for _, e := range v.ArrayValue() { if e.ContainsSecrets() { @@ -530,7 +536,12 @@ func (v PropertyValue) TypeString() string { } else if v.IsComputed() { return "output<" + v.Input().Element.TypeString() + ">" } else if v.IsOutput() { - return "output<" + v.OutputValue().Element.TypeString() + ">" + if !v.OutputValue().Known { + return MakeComputed(v.OutputValue().Element).TypeString() + } else if v.OutputValue().Secret { + return MakeSecret(v.OutputValue().Element).TypeString() + } + return v.OutputValue().Element.TypeString() } else if v.IsSecret() { return "secret<" + v.SecretValue().Element.TypeString() + ">" } else if v.IsResourceReference() { @@ -588,9 +599,16 @@ func (v PropertyValue) MapRepl(replk func(string) (string, bool), // String implements the fmt.Stringer interface to add slightly more information to the output. func (v PropertyValue) String() string { - if v.IsComputed() || v.IsOutput() { - // For computed and output properties, show their type followed by an empty object string. + if v.IsComputed() { + // For computed properties, show the type followed by an empty object string. return fmt.Sprintf("%v{}", v.TypeString()) + } else if v.IsOutput() { + if !v.OutputValue().Known { + return MakeComputed(v.OutputValue().Element).String() + } else if v.OutputValue().Secret { + return MakeSecret(v.OutputValue().Element).String() + } + return v.OutputValue().Element.String() } // For all others, just display the underlying property value. return fmt.Sprintf("{%v}", v.V) @@ -620,6 +638,9 @@ const SecretSig = "1b47061264138c4ac30d75fd1eb44270" // ResourceReferenceSig is the unique resource reference signature. const ResourceReferenceSig = "5cf8f73096256a8f31e491e813e4eb8e" +// OutputValueSig is the unique output value signature. +const OutputValueSig = "d0e6a833031e9bbcd3f4e8bde6ca49a4" + // IsInternalPropertyKey returns true if the given property key is an internal key that should not be displayed to // users. func IsInternalPropertyKey(key PropertyKey) bool { diff --git a/vendor/github.com/pulumi/pulumi/sdk/v3/go/common/resource/properties_diff.go b/vendor/github.com/pulumi/pulumi/sdk/v3/go/common/resource/properties_diff.go index 04a757e..b2cc5b0 100644 --- a/vendor/github.com/pulumi/pulumi/sdk/v3/go/common/resource/properties_diff.go +++ b/vendor/github.com/pulumi/pulumi/sdk/v3/go/common/resource/properties_diff.go @@ -55,6 +55,12 @@ func (diff *ObjectDiff) Same(k PropertyKey) bool { return !diff.Changed(k) } +// Returns true if there are no changes (adds, deletes, updates) in the diff. Also returns true if +// diff is nil. Otherwise returns false. +func (diff *ObjectDiff) AnyChanges() bool { + return diff != nil && len(diff.Adds)+len(diff.Deletes)+len(diff.Updates) > 0 +} + // Keys returns a stable snapshot of all keys known to this object, across adds, deletes, sames, and updates. func (diff *ObjectDiff) Keys() []PropertyKey { var ks []PropertyKey @@ -74,6 +80,19 @@ func (diff *ObjectDiff) Keys() []PropertyKey { return ks } +// All keys where Changed(k) = true. +func (diff *ObjectDiff) ChangedKeys() []PropertyKey { + var ks []PropertyKey + if diff != nil { + for _, k := range diff.Keys() { + if diff.Changed(k) { + ks = append(ks, k) + } + } + } + return ks +} + // ValueDiff holds the results of diffing two property values. type ValueDiff struct { Old PropertyValue // the old value. @@ -342,6 +361,297 @@ func (v PropertyValue) DeepEquals(other PropertyValue) bool { return vid.DeepEquals(oid) } + // Outputs are equal if each of their fields is deeply equal. + if v.IsOutput() { + if !other.IsOutput() { + return false + } + vo := v.OutputValue() + oo := other.OutputValue() + + if vo.Known != oo.Known { + return false + } + if vo.Secret != oo.Secret { + return false + } + + // Note that the dependencies are assumed to be sorted. + if len(vo.Dependencies) != len(oo.Dependencies) { + return false + } + for i, dep := range vo.Dependencies { + if dep != oo.Dependencies[i] { + return false + } + } + + return vo.Element.DeepEquals(oo.Element) + } + + // For all other cases, primitives are equal if their values are equal. + return v.V == other.V +} + +// Diff returns a diffset by comparing the property map to another; it returns nil if there are no diffs. +func (props PropertyMap) DiffIncludeUnknowns(other PropertyMap, ignoreKeys ...IgnoreKeyFunc) *ObjectDiff { + adds := make(PropertyMap) + deletes := make(PropertyMap) + sames := make(PropertyMap) + updates := make(map[PropertyKey]ValueDiff) + + ignore := func(key PropertyKey) bool { + for _, ikf := range ignoreKeys { + if ikf(key) { + return true + } + } + return false + } + + // First find any updates or deletes. + for k, old := range props { + if ignore(k) { + continue + } + + if new, has := other[k]; has { + // If a new exists, use it; for output properties, however, ignore differences. + if new.IsOutput() { + sames[k] = new + } else if diff := old.DiffIncludeUnknowns(new, ignoreKeys...); diff != nil { + if !old.HasValue() { + adds[k] = new + } else if !new.HasValue() { + deletes[k] = old + } else { + updates[k] = *diff + } + } else { + sames[k] = new + } + } else { + if old.IsComputed() { + // The old property was it probably resolved to undefined so this isn't a diff, + // but it isn't really a same either... just don't add to the diff + } else if old.HasValue() { + // If there was no new property, it has been deleted. + deletes[k] = old + } + } + } + + // Next find any additions not in the old map. + for k, new := range other { + if ignore(k) { + continue + } + + if _, has := props[k]; !has && new.HasValue() { + adds[k] = new + } + } + + // If no diffs were found, return nil; else return a diff structure. + if len(adds) == 0 && len(deletes) == 0 && len(updates) == 0 { + return nil + } + return &ObjectDiff{ + Adds: adds, + Deletes: deletes, + Sames: sames, + Updates: updates, + } +} + +// Diff returns a diff by comparing a single property value to another; it returns nil if there are no diffs. +func (v PropertyValue) DiffIncludeUnknowns(other PropertyValue, ignoreKeys ...IgnoreKeyFunc) *ValueDiff { + if v.IsArray() && other.IsArray() { + old := v.ArrayValue() + new := other.ArrayValue() + // If any elements exist in the new array but not the old, track them as adds. + adds := make(map[int]PropertyValue) + for i := len(old); i < len(new); i++ { + adds[i] = new[i] + } + // If any elements exist in the old array but not the new, track them as adds. + deletes := make(map[int]PropertyValue) + for i := len(new); i < len(old); i++ { + deletes[i] = old[i] + } + // Now if elements exist in both, track them as sames or updates. + sames := make(map[int]PropertyValue) + updates := make(map[int]ValueDiff) + for i := 0; i < len(old) && i < len(new); i++ { + if diff := old[i].DiffIncludeUnknowns(new[i]); diff != nil { + updates[i] = *diff + } else { + sames[i] = new[i] + } + } + + if len(adds) == 0 && len(deletes) == 0 && len(updates) == 0 { + return nil + } + return &ValueDiff{ + Old: v, + New: other, + Array: &ArrayDiff{ + Adds: adds, + Deletes: deletes, + Sames: sames, + Updates: updates, + }, + } + } + if v.IsObject() && other.IsObject() { + old := v.ObjectValue() + new := other.ObjectValue() + if diff := old.DiffIncludeUnknowns(new, ignoreKeys...); diff != nil { + return &ValueDiff{ + Old: v, + New: other, + Object: diff, + } + } + return nil + } + + // If we got here, either the values are primitives, or they weren't the same type; do a simple diff. + if v.DeepEqualsIncludeUnknowns(other) { + return nil + } + return &ValueDiff{Old: v, New: other} +} + +func (props PropertyMap) DeepEqualsIncludeUnknowns(other PropertyMap) bool { + // If any in props either doesn't exist, or is of a different value, return false. + for _, k := range props.StableKeys() { + v := props[k] + if p, has := other[k]; has { + if !v.DeepEqualsIncludeUnknowns(p) { + return false + } + } else if v.HasValue() && !v.IsComputed() { + return false + } + } + + // If the other map has properties that this map doesn't have, return false. + for _, k := range other.StableKeys() { + if _, has := props[k]; !has && other[k].HasValue() { + return false + } + } + + return true +} + +func (v PropertyValue) DeepEqualsIncludeUnknowns(other PropertyValue) bool { + // Anything is equal to a computed + if v.IsComputed() || other.IsComputed() { + return true + } + + // Arrays are equal if they are both of the same size and elements are deeply equal. + if v.IsArray() { + if !other.IsArray() { + return false + } + va := v.ArrayValue() + oa := other.ArrayValue() + if len(va) != len(oa) { + return false + } + for i, elem := range va { + if !elem.DeepEqualsIncludeUnknowns(oa[i]) { + return false + } + } + return true + } + + // Assets and archives enjoy value equality. + if v.IsAsset() { + if !other.IsAsset() { + return false + } + return v.AssetValue().Equals(other.AssetValue()) + } else if v.IsArchive() { + if !other.IsArchive() { + return false + } + return v.ArchiveValue().Equals(other.ArchiveValue()) + } + + // Object values are equal if their contents are deeply equal. + if v.IsObject() { + if !other.IsObject() { + return false + } + vo := v.ObjectValue() + oa := other.ObjectValue() + return vo.DeepEqualsIncludeUnknowns(oa) + } + + // Secret are equal if the value they wrap are equal. + if v.IsSecret() { + if !other.IsSecret() { + return false + } + vs := v.SecretValue() + os := other.SecretValue() + + return vs.Element.DeepEqualsIncludeUnknowns(os.Element) + } + + // Resource references are equal if they refer to the same resource. The package version is ignored. + if v.IsResourceReference() { + if !other.IsResourceReference() { + return false + } + vr := v.ResourceReferenceValue() + or := other.ResourceReferenceValue() + + if vr.URN != or.URN { + return false + } + + vid, oid := vr.ID, or.ID + if vid.IsComputed() || oid.IsComputed() { + return true + } + return vid.DeepEqualsIncludeUnknowns(oid) + } + + // Outputs are equal if each of their fields is deeply equal. + if v.IsOutput() { + if !other.IsOutput() { + return false + } + vo := v.OutputValue() + oo := other.OutputValue() + + if vo.Known != oo.Known { + return false + } + if vo.Secret != oo.Secret { + return false + } + + // Note that the dependencies are assumed to be sorted. + if len(vo.Dependencies) != len(oo.Dependencies) { + return false + } + for i, dep := range vo.Dependencies { + if dep != oo.Dependencies[i] { + return false + } + } + + return vo.Element.DeepEqualsIncludeUnknowns(oo.Element) + } + // For all other cases, primitives are equal if their values are equal. return v.V == other.V } diff --git a/vendor/github.com/pulumi/pulumi/sdk/v3/go/common/resource/properties_path.go b/vendor/github.com/pulumi/pulumi/sdk/v3/go/common/resource/properties_path.go index 10927fd..fe0e844 100644 --- a/vendor/github.com/pulumi/pulumi/sdk/v3/go/common/resource/properties_path.go +++ b/vendor/github.com/pulumi/pulumi/sdk/v3/go/common/resource/properties_path.go @@ -1,6 +1,8 @@ package resource import ( + "bytes" + "fmt" "strconv" "strings" @@ -41,6 +43,8 @@ type PropertyPath []interface{} // - root["key with a ."] // - ["root key with \"escaped\" quotes"].nested // - ["root key with a ."][100] +// - root.array[*].field +// - root.array["*"].field func ParsePropertyPath(path string) (PropertyPath, error) { // We interpret the grammar above a little loosely in order to keep things simple. Specifically, we will accept // something close to the following: @@ -82,11 +86,16 @@ func ParsePropertyPath(path string) (PropertyPath, error) { return nil, errors.New("missing closing bracket in array index") } - index, err := strconv.ParseInt(path[1:rbracket], 10, 0) - if err != nil { - return nil, errors.Wrap(err, "invalid array index") + segment := path[1:rbracket] + if segment == "*" { + pathElement, path = "*", path[rbracket:] + } else { + index, err := strconv.ParseInt(segment, 10, 0) + if err != nil { + return nil, errors.Wrap(err, "invalid array index") + } + pathElement, path = int(index), path[rbracket:] } - pathElement, path = int(index), path[rbracket:] } elements, path = append(elements, pathElement), path[1:] default: @@ -257,3 +266,73 @@ func (p PropertyPath) Delete(dest PropertyValue) bool { return true } + +// Contains returns true if the receiver property path contains the other property path. +// For example, the path `foo["bar"][1]` contains the path `foo.bar[1].baz`. The key `"*"` +// is a wildcard which matches any string or int index at that same nesting level. So for example, +// the path `foo.*.baz` contains `foo.bar.baz.bam`, and the path `*` contains any path. +func (p PropertyPath) Contains(other PropertyPath) bool { + if len(other) < len(p) { + return false + } + + for i := range p { + pp := p[i] + otherp := other[i] + + switch pp := pp.(type) { + case int: + if otherpi, ok := otherp.(int); !ok || otherpi != pp { + return false + } + case string: + if pp == "*" { + continue + } + if otherps, ok := otherp.(string); !ok || otherps != pp { + return false + } + default: + // Invalid path, return false + return false + } + } + + return true +} + +func requiresQuote(c rune) bool { + return !(c >= 'A' && c <= 'Z' || c >= 'a' && c <= 'z' || c >= '0' && c <= '9' || c == '_') +} + +func (p PropertyPath) String() string { + var buf bytes.Buffer + for i, k := range p { + switch k := k.(type) { + case string: + var keyBuf bytes.Buffer + quoted := false + for _, c := range k { + if requiresQuote(c) { + quoted = true + if c == '"' { + keyBuf.WriteByte('\\') + } + } + keyBuf.WriteRune(c) + } + if !quoted { + if i == 0 { + fmt.Fprintf(&buf, "%s", keyBuf.String()) + } else { + fmt.Fprintf(&buf, ".%s", keyBuf.String()) + } + } else { + fmt.Fprintf(&buf, `["%s"]`, keyBuf.String()) + } + case int: + fmt.Fprintf(&buf, "[%d]", k) + } + } + return buf.String() +} diff --git a/vendor/github.com/pulumi/pulumi/sdk/v3/go/common/resource/resource_goal.go b/vendor/github.com/pulumi/pulumi/sdk/v3/go/common/resource/resource_goal.go index 73b00ba..a6614ee 100644 --- a/vendor/github.com/pulumi/pulumi/sdk/v3/go/common/resource/resource_goal.go +++ b/vendor/github.com/pulumi/pulumi/sdk/v3/go/common/resource/resource_goal.go @@ -32,18 +32,20 @@ type Goal struct { InitErrors []string // errors encountered as we attempted to initialize the resource. PropertyDependencies map[PropertyKey][]URN // the set of dependencies that affect each property. DeleteBeforeReplace *bool // true if this resource should be deleted prior to replacement. - IgnoreChanges []string // a list of property names to ignore during changes. + IgnoreChanges []string // a list of property paths to ignore when diffing. AdditionalSecretOutputs []PropertyKey // outputs that should always be treated as secrets. Aliases []URN // additional URNs that should be aliased to this resource. ID ID // the expected ID of the resource, if any. CustomTimeouts CustomTimeouts // an optional config object for resource options + ReplaceOnChanges []string // a list of property paths that if changed should force a replacement. } // NewGoal allocates a new resource goal state. func NewGoal(t tokens.Type, name tokens.QName, custom bool, props PropertyMap, parent URN, protect bool, dependencies []URN, provider string, initErrors []string, propertyDependencies map[PropertyKey][]URN, deleteBeforeReplace *bool, ignoreChanges []string, - additionalSecretOutputs []PropertyKey, aliases []URN, id ID, customTimeouts *CustomTimeouts) *Goal { + additionalSecretOutputs []PropertyKey, aliases []URN, id ID, customTimeouts *CustomTimeouts, + replaceOnChanges []string) *Goal { g := &Goal{ Type: t, @@ -61,6 +63,7 @@ func NewGoal(t tokens.Type, name tokens.QName, custom bool, props PropertyMap, AdditionalSecretOutputs: additionalSecretOutputs, Aliases: aliases, ID: id, + ReplaceOnChanges: replaceOnChanges, } if customTimeouts != nil { diff --git a/vendor/github.com/pulumi/pulumi/sdk/v3/go/common/resource/resource_id.go b/vendor/github.com/pulumi/pulumi/sdk/v3/go/common/resource/resource_id.go index 30d3237..998011e 100644 --- a/vendor/github.com/pulumi/pulumi/sdk/v3/go/common/resource/resource_id.go +++ b/vendor/github.com/pulumi/pulumi/sdk/v3/go/common/resource/resource_id.go @@ -15,7 +15,9 @@ package resource import ( + "crypto" cryptorand "crypto/rand" + "encoding/binary" "encoding/hex" "github.com/pkg/errors" @@ -71,7 +73,7 @@ func NewUniqueHex(prefix string, randlen, maxlen int) (string, error) { "name '%s' plus %d random chars is longer than maximum length %d", prefix, randlen, maxlen) } - bs := make([]byte, randlen+1/2) + bs := make([]byte, (randlen+1)/2) n, err := cryptorand.Read(bs) contract.AssertNoError(err) contract.Assert(n == len(bs)) @@ -86,3 +88,45 @@ func NewUniqueHexID(prefix string, randlen, maxlen int) (ID, error) { u, err := NewUniqueHex(prefix, randlen, maxlen) return ID(u), err } + +// NewFixedUniqueHex generates a new "random" hex string for use by resource providers. It will take the optional prefix +// and append randlen random characters (defaulting to 8 if not > 0). The result must not exceed maxlen total +// characterss (if > 0). Note that capping to maxlen necessarily increases the risk of collisions. +// The randomness for this method is a function of urn and sequenceNumber iff sequenceNUmber > 0, else it falls back to +// a non-deterministic source of randomness. +func NewUniqueHexV2(urn URN, sequenceNumber int, prefix string, randlen, maxlen int) (string, error) { + if randlen <= 0 { + randlen = 8 + } + if maxlen > 0 && len(prefix)+randlen > maxlen { + return "", errors.Errorf( + "name '%s' plus %d random chars is longer than maximum length %d", prefix, randlen, maxlen) + } + + if sequenceNumber == 0 { + // No sequence number fallback to old logic + return NewUniqueHex(prefix, randlen, maxlen) + } + + if randlen > 32 { + return "", errors.Errorf("randLen is longer than 32, %d", randlen) + } + + // TODO(seqnum) This is seeded by urn and sequence number, and urn has the stack and project names in it. + // But do we care about org name as well? + // Do we need a config source of randomness so if users hit a collision they can set a config value to get out of it? + hasher := crypto.SHA512.New() + + _, err := hasher.Write([]byte(urn)) + contract.AssertNoError(err) + + bytes := make([]byte, 4) + binary.LittleEndian.PutUint32(bytes, uint32(sequenceNumber)) + _, err = hasher.Write(bytes) + contract.AssertNoError(err) + + bs := hasher.Sum(nil) + contract.Assert(len(bs) == 64) + + return prefix + hex.EncodeToString(bs)[:randlen], nil +} diff --git a/vendor/github.com/pulumi/pulumi/sdk/v3/go/common/resource/resource_state.go b/vendor/github.com/pulumi/pulumi/sdk/v3/go/common/resource/resource_state.go index 5ac3779..a57ed96 100644 --- a/vendor/github.com/pulumi/pulumi/sdk/v3/go/common/resource/resource_state.go +++ b/vendor/github.com/pulumi/pulumi/sdk/v3/go/common/resource/resource_state.go @@ -33,16 +33,17 @@ type State struct { Outputs PropertyMap // the resource's complete output state (as returned by the resource provider). Parent URN // an optional parent URN that this resource belongs to. Protect bool // true to "protect" this resource (protected resources cannot be deleted). - External bool // true if this resource is "external" to Pulumi and we don't control the lifecycle - Dependencies []URN // the resource's dependencies + External bool // true if this resource is "external" to Pulumi and we don't control the lifecycle. + Dependencies []URN // the resource's dependencies. InitErrors []string // the set of errors encountered in the process of initializing resource. Provider string // the provider to use for this resource. PropertyDependencies map[PropertyKey][]URN // the set of dependencies that affect each property. PendingReplacement bool // true if this resource was deleted and is awaiting replacement. AdditionalSecretOutputs []PropertyKey // an additional set of outputs that should be treated as secrets. Aliases []URN // TODO - CustomTimeouts CustomTimeouts // A config block that will be used to configure timeouts for CRUD operations + CustomTimeouts CustomTimeouts // A config block that will be used to configure timeouts for CRUD operations. ImportID ID // the resource's import id, if this was an imported resource. + SequenceNumber int // an auto-incrementing sequence number for each time this resource gets created/replaced (0 means sequence numbers are unknown, -1 means the last replace didn't use a sequence number). } // NewState creates a new resource value from existing resource state information. @@ -51,7 +52,7 @@ func NewState(t tokens.Type, urn URN, custom bool, del bool, id ID, external bool, dependencies []URN, initErrors []string, provider string, propertyDependencies map[PropertyKey][]URN, pendingReplacement bool, additionalSecretOutputs []PropertyKey, aliases []URN, timeouts *CustomTimeouts, - importID ID) *State { + importID ID, sequenceNumber int) *State { contract.Assertf(t != "", "type was empty") contract.Assertf(custom || id == "", "is custom or had empty ID") @@ -76,6 +77,7 @@ func NewState(t tokens.Type, urn URN, custom bool, del bool, id ID, AdditionalSecretOutputs: additionalSecretOutputs, Aliases: aliases, ImportID: importID, + SequenceNumber: sequenceNumber, } if timeouts != nil { diff --git a/vendor/github.com/pulumi/pulumi/sdk/v3/go/common/tokens/names.go b/vendor/github.com/pulumi/pulumi/sdk/v3/go/common/tokens/names.go index 9d6137e..8c5159f 100644 --- a/vendor/github.com/pulumi/pulumi/sdk/v3/go/common/tokens/names.go +++ b/vendor/github.com/pulumi/pulumi/sdk/v3/go/common/tokens/names.go @@ -21,7 +21,7 @@ import ( "github.com/pulumi/pulumi/sdk/v3/go/common/util/contract" ) -// Name is an identifier. It conforms to the regex [A-Za-z_.][A-Za-z0-9_]*. +// Name is an identifier. It conforms to NameRegexpPattern. type Name string func (nm Name) String() string { return string(nm) } @@ -30,7 +30,13 @@ func (nm Name) String() string { return string(nm) } func (nm Name) Q() QName { return QName(nm) } var NameRegexp = regexp.MustCompile(NameRegexpPattern) -var NameRegexpPattern = "[A-Za-z_.][A-Za-z0-9_.]*" +var nameFirstCharRegexp = regexp.MustCompile("^" + nameFirstCharRegexpPattern + "$") +var nameRestCharRegexp = regexp.MustCompile("^" + nameRestCharRegexpPattern + "$") + +var NameRegexpPattern = nameFirstCharRegexpPattern + nameRestCharRegexpPattern + +const nameFirstCharRegexpPattern = "[A-Za-z_.]" +const nameRestCharRegexpPattern = `[A-Za-z0-9_.-]*` // IsName checks whether a string is a legal Name. func IsName(s string) bool { @@ -44,7 +50,7 @@ func AsName(s string) Name { } // QName is a qualified identifier. The "/" character optionally delimits different pieces of the name. Each element -// conforms to the Name regex [A-Za-z_.][A-Za-z0-9_.]*. For example, "pulumi/pulumi/stack". +// conforms to NameRegexpPattern. For example, "pulumi/pulumi/stack". type QName string func (nm QName) String() string { return string(nm) } @@ -60,6 +66,32 @@ func IsQName(s string) bool { return s != "" && QNameRegexp.FindString(s) == s } +// IntoQName converts an arbitrary string into a QName, converting the string to a valid QName if +// necessary. The conversion is deterministic, but also lossy. +func IntoQName(s string) QName { + output := []string{} + for _, s := range strings.Split(s, QNameDelimiter) { + if s == "" { + continue + } + segment := []byte(s) + if !nameFirstCharRegexp.Match([]byte{segment[0]}) { + segment[0] = '_' + } + for i := 1; i < len(s); i++ { + if !nameRestCharRegexp.Match([]byte{segment[i]}) { + segment[i] = '_' + } + } + output = append(output, string(segment)) + } + result := strings.Join(output, QNameDelimiter) + if result == "" { + result = "_" + } + return AsQName(result) +} + // AsQName converts a given string to a QName, asserting its validity. func AsQName(s string) QName { contract.Assertf(IsQName(s), "Expected string '%v' to be a name (%v)", s, QNameRegexpPattern) @@ -92,21 +124,11 @@ func (nm QName) Namespace() QName { return QName(qn) } -// PackageName is a qualified name referring to an imported package. It is similar to a QName, except that it permits -// dashes "-" as is commonplace with packages of various kinds. -type PackageName string +// PackageName is a qualified name referring to an imported package. +type PackageName QName func (nm PackageName) String() string { return string(nm) } -var PackageNameRegexp = regexp.MustCompile(PackageNameRegexpPattern) -var PackagePartRegexpPattern = "[A-Za-z_.][A-Za-z0-9_.-]*" -var PackageNameRegexpPattern = "(" + PackagePartRegexpPattern + "\\" + QNameDelimiter + ")*" + PackagePartRegexpPattern - -// IsPackageName checks whether a string is a legal Name. -func IsPackageName(s string) bool { - return s != "" && PackageNameRegexp.FindString(s) == s -} - // ModuleName is a qualified name referring to an imported module from a package. type ModuleName QName diff --git a/vendor/github.com/pulumi/pulumi/sdk/v3/go/common/tokens/tokens.go b/vendor/github.com/pulumi/pulumi/sdk/v3/go/common/tokens/tokens.go index d16bf7f..0eb0e2a 100644 --- a/vendor/github.com/pulumi/pulumi/sdk/v3/go/common/tokens/tokens.go +++ b/vendor/github.com/pulumi/pulumi/sdk/v3/go/common/tokens/tokens.go @@ -131,7 +131,7 @@ func (tok Token) ModuleMember() ModuleMember { type Package Token func NewPackageToken(nm PackageName) Package { - contract.Assertf(IsPackageName(string(nm)), "Package name '%v' is not a legal qualified name", nm) + contract.Assertf(IsQName(string(nm)), "Package name '%v' is not a legal qualified name", nm) return Package(nm) } diff --git a/vendor/github.com/pulumi/pulumi/sdk/v3/go/common/util/ciutil/buildkite.go b/vendor/github.com/pulumi/pulumi/sdk/v3/go/common/util/ciutil/buildkite.go new file mode 100644 index 0000000..b73db61 --- /dev/null +++ b/vendor/github.com/pulumi/pulumi/sdk/v3/go/common/util/ciutil/buildkite.go @@ -0,0 +1,56 @@ +// Copyright 2016-2021, Pulumi Corporation. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package ciutil + +import ( + "os" +) + +// buildkiteCI represents a Buildkite CI/CD system. +type buildkiteCI struct { + baseCI +} + +// DetectVars detects the env vars for a Buildkite Build. +func (bci buildkiteCI) DetectVars() Vars { + v := Vars{Name: Buildkite} + // https://buildkite.com/docs/pipelines/environment-variables#bk-env-vars-buildkite-branch + v.BranchName = os.Getenv("BUILDKITE_BRANCH") + // https://buildkite.com/docs/pipelines/environment-variables#bk-env-vars-buildkite-build-id + v.BuildID = os.Getenv("BUILDKITE_BUILD_ID") + // https://buildkite.com/docs/pipelines/environment-variables#bk-env-vars-buildkite-build-number + v.BuildNumber = os.Getenv("BUILDKITE_BUILD_NUMBER") + // https://buildkite.com/docs/pipelines/environment-variables#bk-env-vars-buildkite-build-url + v.BuildURL = os.Getenv("BUILDKITE_BUILD_URL") + // https://buildkite.com/docs/pipelines/environment-variables#bk-env-vars-buildkite-message + // This is usually the commit message but can be other messages. + v.CommitMessage = os.Getenv("BUILDKITE_MESSAGE") + // https://buildkite.com/docs/pipelines/environment-variables#bk-env-vars-buildkite-pull-request + // If Buildkite's PR env var it is a pull request of the supplied number, else the build type is + // whatever Buildkite says it is. Pull requests are webhooks just like a standard push so this allows + // us to differentiate the two. + prNumber := os.Getenv("BUILDKITE_PULL_REQUEST") + if prNumber != "false" { + v.PRNumber = prNumber + v.BuildType = "PullRequest" + } else { + // https://buildkite.com/docs/pipelines/environment-variables#bk-env-vars-buildkite-source + v.BuildType = os.Getenv("BUILDKITE_SOURCE") + } + // https://buildkite.com/docs/pipelines/environment-variables#bk-env-vars-buildkite-commit + v.SHA = os.Getenv("BUILDKITE_COMMIT") + + return v +} diff --git a/vendor/github.com/pulumi/pulumi/sdk/v3/go/common/util/ciutil/detect.go b/vendor/github.com/pulumi/pulumi/sdk/v3/go/common/util/ciutil/detect.go index 95ed2d8..6fdfa2b 100644 --- a/vendor/github.com/pulumi/pulumi/sdk/v3/go/common/util/ciutil/detect.go +++ b/vendor/github.com/pulumi/pulumi/sdk/v3/go/common/util/ciutil/detect.go @@ -48,9 +48,11 @@ var detectors = map[SystemName]system{ EnvVarsToDetect: []string{"TF_BUILD"}, }, }, - Buildkite: baseCI{ - Name: Buildkite, - EnvVarsToDetect: []string{"BUILDKITE"}, + Buildkite: buildkiteCI{ + baseCI: baseCI{ + Name: Buildkite, + EnvVarsToDetect: []string{"BUILDKITE"}, + }, }, CircleCI: circleCICI{ baseCI: baseCI{ diff --git a/vendor/github.com/pulumi/pulumi/sdk/v3/go/common/util/cmdutil/child.go b/vendor/github.com/pulumi/pulumi/sdk/v3/go/common/util/cmdutil/child.go index 7fb4d95..06994f0 100644 --- a/vendor/github.com/pulumi/pulumi/sdk/v3/go/common/util/cmdutil/child.go +++ b/vendor/github.com/pulumi/pulumi/sdk/v3/go/common/util/cmdutil/child.go @@ -11,6 +11,7 @@ // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. +//go:build !windows // +build !windows package cmdutil diff --git a/vendor/github.com/pulumi/pulumi/sdk/v3/go/common/util/cmdutil/console.go b/vendor/github.com/pulumi/pulumi/sdk/v3/go/common/util/cmdutil/console.go index 7145f31..ff77c06 100644 --- a/vendor/github.com/pulumi/pulumi/sdk/v3/go/common/util/cmdutil/console.go +++ b/vendor/github.com/pulumi/pulumi/sdk/v3/go/common/util/cmdutil/console.go @@ -15,13 +15,13 @@ package cmdutil import ( - "bufio" "fmt" "os" + "regexp" "runtime" - "strconv" "strings" + "github.com/rivo/uniseg" "golang.org/x/crypto/ssh/terminal" "github.com/pulumi/pulumi/sdk/v3/go/common/util/ciutil" @@ -69,13 +69,18 @@ func ReadConsole(prompt string) (string, error) { fmt.Print(prompt + ": ") } - reader := bufio.NewReader(os.Stdin) - raw, err := reader.ReadString('\n') - if err != nil { - return "", err + var raw strings.Builder + for { + var b [1]byte + if _, err := os.Stdin.Read(b[:]); err != nil { + return "", err + } + if b[0] == '\n' { + break + } + raw.WriteByte(b[0]) } - - return RemoveTrailingNewline(raw), nil + return RemoveTrailingNewline(raw.String()), nil } // IsTruthy returns true if the given string represents a CLI input interpreted as "true". @@ -122,12 +127,32 @@ type TableRow struct { // the max length of the items in each column. A default gap of two spaces is printed between each // column. func PrintTable(table Table) { - PrintTableWithGap(table, " ") + fmt.Print(table) } // PrintTableWithGap prints a grid of rows and columns. Width of columns is automatically determined // by the max length of the items in each column. A gap can be specified between the columns. func PrintTableWithGap(table Table, columnGap string) { + fmt.Print(table.ToStringWithGap(columnGap)) +} + +func (table Table) String() string { + return table.ToStringWithGap(" ") +} + +// 7-bit C1 ANSI sequences +var ansiEscape = regexp.MustCompile(`\x1B(?:[@-Z\\-_]|\[[0-?]*[ -/]*[@-~])`) + +// MeasureText returns the number of glyphs in a string. +// Importantly this also ignores ANSI escape sequences, so can be used to calculate layout of colorized strings. +func MeasureText(text string) int { + // Strip ansi escape sequences + clean := ansiEscape.ReplaceAllString(text, "") + // Need to count graphemes not runes or bytes + return uniseg.GraphemeClusterCount(clean) +} + +func (table *Table) ToStringWithGap(columnGap string) string { columnCount := len(table.Headers) // Figure out the preferred column width for each column. It will be set to the max length of @@ -149,38 +174,39 @@ func PrintTableWithGap(table Table, columnGap string) { } for columnIndex, val := range columns { - preferredColumnWidths[columnIndex] = max(preferredColumnWidths[columnIndex], len(val)) + preferredColumnWidths[columnIndex] = max(preferredColumnWidths[columnIndex], MeasureText(val)) } } - format := "" - for i, maxWidth := range preferredColumnWidths { - if i < len(preferredColumnWidths)-1 { - format += "%-" + strconv.Itoa(maxWidth+len(columnGap)) + "s" - } else { - // do not want whitespace appended to the last column. It would cause wrapping on lines - // that were not actually long if some other line was very long. - format += "%s" - } - } - format += "\n" - - columns := make([]interface{}, columnCount) + result := "" for _, row := range allRows { - for columnIndex, value := range row.Columns { - // Now, ensure we have the requested gap between columns as well. + result += table.Prefix + + for columnIndex, val := range row.Columns { + result += val + if columnIndex < columnCount-1 { - value += columnGap - } + // Work out how much whitespace we need to add to this string to bring it up to the + // preferredColumnWidth for this column. - columns[columnIndex] = value + maxWidth := preferredColumnWidths[columnIndex] + padding := maxWidth - MeasureText(val) + result += strings.Repeat(" ", padding) + + // Now, ensure we have the requested gap between columns as well. + result += columnGap + } + // do not want whitespace appended to the last column. It would cause wrapping on lines + // that were not actually long if some other line was very long. } - fmt.Printf(table.Prefix+format, columns...) + result += "\n" + if row.AdditionalInfo != "" { - fmt.Print(row.AdditionalInfo) + result += row.AdditionalInfo } } + return result } func max(a, b int) int { diff --git a/vendor/github.com/pulumi/pulumi/sdk/v3/go/common/util/cmdutil/trace.go b/vendor/github.com/pulumi/pulumi/sdk/v3/go/common/util/cmdutil/trace.go index 8545d2f..cb46887 100644 --- a/vendor/github.com/pulumi/pulumi/sdk/v3/go/common/util/cmdutil/trace.go +++ b/vendor/github.com/pulumi/pulumi/sdk/v3/go/common/util/cmdutil/trace.go @@ -1,4 +1,4 @@ -// Copyright 2016-2018, Pulumi Corporation. +// Copyright 2016-2021, Pulumi Corporation. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -15,10 +15,16 @@ package cmdutil import ( + "fmt" "io" + "io/ioutil" "log" + "net" "net/url" "os" + "runtime" + "strings" + "time" opentracing "github.com/opentracing/opentracing-go" "github.com/pulumi/pulumi/sdk/v3/go/common/util/contract" @@ -55,14 +61,12 @@ func IsTracingEnabled() bool { // InitTracing initializes tracing func InitTracing(name, rootSpanName, tracingEndpoint string) { + // If no tracing endpoint was provided, just return. The default global tracer is already a no-op tracer. if tracingEndpoint == "" { return } - // Store the tracing endpoint - TracingEndpoint = tracingEndpoint - endpointURL, err := url.Parse(tracingEndpoint) if err != nil { log.Fatalf("invalid tracing endpoint: %v", err) @@ -90,12 +94,33 @@ func InitTracing(name, rootSpanName, tracingEndpoint string) { collector := appdash.NewLocalCollector(store.store) tracer = appdash_opentracing.NewTracer(collector) + + proxyEndpoint, err := startProxyAppDashServer(collector) + if err != nil { + log.Fatal(err) + } + + // Instead of storing the original endpoint, store the + // proxy endpoint. The TracingEndpoint global var is + // consumed by code forking off sub-processes, and we + // want those sending data to the proxy endpoint, so + // it cleanly lands in the file managed by the parent + // process. + TracingEndpoint = proxyEndpoint + case endpointURL.Scheme == "tcp": + // Store the tracing endpoint + TracingEndpoint = tracingEndpoint + // If the endpoint scheme is tcp, use an Appdash endpoint. - collector := appdash.NewRemoteCollector(tracingEndpoint) + collector := appdash.NewRemoteCollector(endpointURL.Host) traceCloser = collector tracer = appdash_opentracing.NewTracer(collector) + default: + // Store the tracing endpoint + TracingEndpoint = tracingEndpoint + // Jaeger tracer can be initialized with a transport that will // report tracing Spans to a Zipkin backend transport, err := zipkin.NewHTTPTransport( @@ -121,7 +146,12 @@ func InitTracing(name, rootSpanName, tracingEndpoint string) { // If a root span was requested, start it now. if rootSpanName != "" { - TracingRootSpan = tracer.StartSpan(rootSpanName) + var options []opentracing.StartSpanOption + for _, tag := range rootSpanTags() { + options = append(options, tag) + } + TracingRootSpan = tracer.StartSpan(rootSpanName, options...) + go collectMemStats(rootSpanName) } } @@ -137,3 +167,154 @@ func CloseTracing() { contract.IgnoreClose(traceCloser) } + +// Starts an AppDash server listening on any available TCP port +// locally and sends the spans and annotations to the given collector. +// Returns a Pulumi-formatted tracing endpoint pointing to this +// server. +// +// See https://github.com/sourcegraph/appdash/blob/master/cmd/appdash/example_app.go +func startProxyAppDashServer(collector appdash.Collector) (string, error) { + l, err := net.ListenTCP("tcp", &net.TCPAddr{IP: net.IPv4(127, 0, 0, 1), Port: 0}) + if err != nil { + return "", err + } + collectorPort := l.Addr().(*net.TCPAddr).Port + + cs := appdash.NewServer(l, collector) + cs.Debug = true + cs.Trace = true + go cs.Start() + + // The default sends to stderr, which is unfortunate for + // end-users. Discard for now. + cs.Log = log.New(ioutil.Discard, "appdash", 0) + + return fmt.Sprintf("tcp://127.0.0.1:%d", collectorPort), nil +} + +// Computes initial tags to write to the `TracingRootSpan`, which can +// be useful for aggregating trace data in benchmarks. +func rootSpanTags() []opentracing.Tag { + + tags := []opentracing.Tag{ + { + Key: "os.Args", + Value: os.Args, + }, + { + Key: "runtime.GOOS", + Value: runtime.GOOS, + }, + { + Key: "runtime.GOARCH", + Value: runtime.GOARCH, + }, + { + Key: "runtime.NumCPU", + Value: runtime.NumCPU(), + }, + } + + // Promote all env vars `pulumi_tracing_tag_foo=bar` into tags `foo: bar`. + envPrefix := "pulumi_tracing_tag_" + for _, e := range os.Environ() { + pair := strings.SplitN(e, "=", 2) + envVarName := strings.ToLower(pair[0]) + envVarValue := pair[1] + + if strings.HasPrefix(envVarName, envPrefix) { + tags = append(tags, opentracing.Tag{ + Key: strings.TrimPrefix(envVarName, envPrefix), + Value: envVarValue, + }) + } + } + + return tags +} + +// Samples memory stats in the background at 1s intervals, and creates +// spans for the data. This is currently opt-in via +// `PULUMI_TRACING_MEMSTATS_POLL_INTERVAL=1s` or similar. Consider +// collecting this by default later whenever tracing is enabled as we +// calibrate that the overhead is low enough. +func collectMemStats(spanPrefix string) { + memStats := runtime.MemStats{} + maxStats := runtime.MemStats{} + + poll := func() { + if TracingRootSpan == nil { + return + } + + runtime.ReadMemStats(&memStats) + + // report cumulative metrics as is + TracingRootSpan.SetTag("runtime.NumCgoCall", runtime.NumCgoCall()) + TracingRootSpan.SetTag("MemStats.TotalAlloc", memStats.TotalAlloc) + TracingRootSpan.SetTag("MemStats.Mallocs", memStats.Mallocs) + TracingRootSpan.SetTag("MemStats.Frees", memStats.Frees) + TracingRootSpan.SetTag("MemStats.PauseTotalNs", memStats.PauseTotalNs) + TracingRootSpan.SetTag("MemStats.NumGC", memStats.NumGC) + + // for other metrics report the max + + if memStats.Sys > maxStats.Sys { + maxStats.Sys = memStats.Sys + TracingRootSpan.SetTag("MemStats.Sys.Max", maxStats.Sys) + } + + if memStats.HeapAlloc > maxStats.HeapAlloc { + maxStats.HeapAlloc = memStats.HeapAlloc + TracingRootSpan.SetTag("MemStats.HeapAlloc.Max", maxStats.HeapAlloc) + } + + if memStats.HeapSys > maxStats.HeapSys { + maxStats.HeapSys = memStats.HeapSys + TracingRootSpan.SetTag("MemStats.HeapSys.Max", maxStats.HeapSys) + } + + if memStats.HeapIdle > maxStats.HeapIdle { + maxStats.HeapIdle = memStats.HeapIdle + TracingRootSpan.SetTag("MemStats.HeapIdle.Max", maxStats.HeapIdle) + } + + if memStats.HeapInuse > maxStats.HeapInuse { + maxStats.HeapInuse = memStats.HeapInuse + TracingRootSpan.SetTag("MemStats.HeapInuse.Max", maxStats.HeapInuse) + } + + if memStats.HeapReleased > maxStats.HeapReleased { + maxStats.HeapReleased = memStats.HeapReleased + TracingRootSpan.SetTag("MemStats.HeapReleased.Max", maxStats.HeapReleased) + } + + if memStats.HeapObjects > maxStats.HeapObjects { + maxStats.HeapObjects = memStats.HeapObjects + TracingRootSpan.SetTag("MemStats.HeapObjects.Max", maxStats.HeapObjects) + } + + if memStats.StackInuse > maxStats.StackInuse { + maxStats.StackInuse = memStats.StackInuse + TracingRootSpan.SetTag("MemStats.StackInuse.Max", maxStats.StackInuse) + } + + if memStats.StackSys > maxStats.StackSys { + maxStats.StackSys = memStats.StackSys + TracingRootSpan.SetTag("MemStats.StackSys.Max", maxStats.StackSys) + } + } + + interval := os.Getenv("PULUMI_TRACING_MEMSTATS_POLL_INTERVAL") + + if interval != "" { + intervalDuration, err := time.ParseDuration(interval) + if err == nil { + for { + poll() + time.Sleep(intervalDuration) + } + } + } +} diff --git a/vendor/github.com/pulumi/pulumi/sdk/v3/go/common/util/fsutil/lock.go b/vendor/github.com/pulumi/pulumi/sdk/v3/go/common/util/fsutil/lock.go index 91054f0..2164722 100644 --- a/vendor/github.com/pulumi/pulumi/sdk/v3/go/common/util/fsutil/lock.go +++ b/vendor/github.com/pulumi/pulumi/sdk/v3/go/common/util/fsutil/lock.go @@ -1,4 +1,4 @@ -// Copyright 2016-2018, Pulumi Corporation. +// Copyright 2016-2021, Pulumi Corporation. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -17,23 +17,22 @@ package fsutil import ( "sync" - "github.com/pulumi/pulumi/sdk/v3/go/common/util/contract" - - "github.com/gofrs/flock" + "github.com/rogpeppe/go-internal/lockedfile" ) // FileMutex is a mutex that serializes both within and across processes. When acquired, it can be assumed that the // caller holds exclusive access over te protected resources, even if there are other consumers both within and outside // of the same process. type FileMutex struct { - proclock sync.Mutex // lock serializing in-process access to the protected resource - fslock *flock.Flock // lock serializing out-of-process access to the protected resource + proclock sync.Mutex // lock serializing in-process access to the protected resource + fslock *lockedfile.Mutex // lock serializing out-of-process access to the protected resource + fsunlock func() } // NewFileMutex creates a new FileMutex using the given file as a file lock. func NewFileMutex(path string) *FileMutex { return &FileMutex{ - fslock: flock.New(path), + fslock: lockedfile.MutexAt(path), } } @@ -48,12 +47,13 @@ func NewFileMutex(path string) *FileMutex { // calling goroutine completely owns the resource. func (fm *FileMutex) Lock() error { fm.proclock.Lock() - if err := fm.fslock.Lock(); err != nil { + fsunlock, err := fm.fslock.Lock() + if err != nil { fm.proclock.Unlock() return err } + fm.fsunlock = fsunlock - contract.Assert(fm.fslock.Locked()) return nil } @@ -61,9 +61,9 @@ func (fm *FileMutex) Lock() error { // after which it unlocks the proc lock. Unlocking the file lock first ensures that it is not possible for two // goroutines to lock or unlock the file mutex without first holding the proc lock. func (fm *FileMutex) Unlock() error { - if err := fm.fslock.Unlock(); err != nil { - fm.proclock.Unlock() - return err + if fm.fsunlock != nil { + fm.fsunlock() + fm.fsunlock = nil } fm.proclock.Unlock() diff --git a/vendor/github.com/pulumi/pulumi/sdk/v3/go/common/util/gitutil/git.go b/vendor/github.com/pulumi/pulumi/sdk/v3/go/common/util/gitutil/git.go index 888a301..30b043d 100644 --- a/vendor/github.com/pulumi/pulumi/sdk/v3/go/common/util/gitutil/git.go +++ b/vendor/github.com/pulumi/pulumi/sdk/v3/go/common/util/gitutil/git.go @@ -53,7 +53,7 @@ const ( // be sure to update its usage elsewhere in the code as well. // The nolint instruction prevents gometalinter from complaining about the length of the line. var ( - cloudSourceControlSSHRegex = regexp.MustCompile(`git@(?P[a-zA-Z]*\.com|[a-zA-Z]*\.org):(?P.*)`) //nolint + cloudSourceControlSSHRegex = regexp.MustCompile(`git@(?P[a-zA-Z.-]*\.com|[a-zA-Z.-]*\.org):(?P[^/]+/[^/]+\.git).?$`) //nolint azureSourceControlSSHRegex = regexp.MustCompile(`git@([a-zA-Z]+\.)?(?P([a-zA-Z]+\.)*[a-zA-Z]*\.com):(v[0-9]{1}/)?(?P.*)`) //nolint legacyAzureSourceControlRegex = regexp.MustCompile("(?P[a-zA-Z0-9-]*).visualstudio.com$") ) @@ -296,6 +296,35 @@ func GitCloneOrPull(url string, referenceName plumbing.ReferenceName, path strin return nil } +// We currently accept Gist URLs in the form: https://gist.github.com/owner/id. +// We may want to consider supporting https://gist.github.com/id at some point, +// as well as arbitrary revisions, e.g. https://gist.github.com/owner/id/commit. +func parseGistURL(u *url.URL) (string, error) { + path := strings.Trim(u.Path, "/") + paths := strings.Split(path, "/") + if len(paths) != 2 { + return "", errors.New("invalid Gist URL") + } + + owner := paths[0] + if owner == "" { + return "", errors.New("invalid Gist URL; no owner") + } + + id := paths[1] + if id == "" { + return "", errors.New("invalid Gist URL; no id") + } + + if !strings.HasSuffix(id, ".git") { + id = id + ".git" + } + + resultURL := u.Scheme + "://" + u.Host + "/" + id + return resultURL, nil + +} + // ParseGitRepoURL returns the URL to the Git repository and path from a raw URL. // For example, an input of "https://github.com/pulumi/templates/templates/javascript" returns // "https://github.com/pulumi/templates.git" and "templates/javascript". @@ -309,37 +338,16 @@ func ParseGitRepoURL(rawurl string) (string, string, error) { return "", "", errors.New("invalid URL scheme") } - path := strings.TrimPrefix(u.Path, "/") - // Special case Gists. if u.Hostname() == "gist.github.com" { - // We currently accept Gist URLs in the form: https://gist.github.com/owner/id. - // We may want to consider supporting https://gist.github.com/id at some point, - // as well as arbitrary revisions, e.g. https://gist.github.com/owner/id/commit. - path = strings.TrimSuffix(path, "/") - paths := strings.Split(path, "/") - if len(paths) != 2 { - return "", "", errors.New("invalid Gist URL") - } - - owner := paths[0] - if owner == "" { - return "", "", errors.New("invalid Gist URL; no owner") - } - - id := paths[1] - if id == "" { - return "", "", errors.New("invalid Gist URL; no id") - } - - if !strings.HasSuffix(id, ".git") { - id = id + ".git" + repo, err := parseGistURL(u) + if err != nil { + return "", "", err } - - resultURL := u.Scheme + "://" + u.Host + "/" + id - return resultURL, "", nil + return repo, "", nil } + path := strings.TrimPrefix(u.Path, "/") paths := strings.Split(path, "/") if len(paths) < 2 { return "", "", errors.New("invalid Git URL") @@ -361,6 +369,7 @@ func ParseGitRepoURL(rawurl string) (string, string, error) { resultURL := u.Scheme + "://" + u.Host + "/" + owner + "/" + repo resultPath := strings.TrimSuffix(strings.Join(paths[2:], "/"), "/") + return resultURL, resultPath, nil } diff --git a/vendor/github.com/pulumi/pulumi/sdk/v3/go/common/util/httputil/http.go b/vendor/github.com/pulumi/pulumi/sdk/v3/go/common/util/httputil/http.go index be29a2a..fde493b 100644 --- a/vendor/github.com/pulumi/pulumi/sdk/v3/go/common/util/httputil/http.go +++ b/vendor/github.com/pulumi/pulumi/sdk/v3/go/common/util/httputil/http.go @@ -23,11 +23,30 @@ import ( "github.com/pulumi/pulumi/sdk/v3/go/common/util/retry" ) -// maxRetryCount is the number of times to try an http request before giving up an returning the last error -const maxRetryCount = 5 +// RetryOpts defines options to configure the retry behavior. +// Leave nil for defaults. +type RetryOpts struct { + // These fields map directly to util.Acceptor. + Delay *time.Duration + Backoff *float64 + MaxDelay *time.Duration + + MaxRetryCount *int +} // DoWithRetry calls client.Do, and in the case of an error, retries the operation again after a slight delay. +// Uses the default retry delays, starting at 100ms and ramping up to ~1.3s. func DoWithRetry(req *http.Request, client *http.Client) (*http.Response, error) { + var opts RetryOpts + return doWithRetry(req, client, opts) +} + +// DoWithRetry calls client.Do, but retrying 500s (even for POSTs). Using the provided delays. +func DoWithRetryOpts(req *http.Request, client *http.Client, opts RetryOpts) (*http.Response, error) { + return doWithRetry(req, client, opts) +} + +func doWithRetry(req *http.Request, client *http.Client, opts RetryOpts) (*http.Response, error) { contract.Assertf(req.ContentLength == 0 || req.GetBody != nil, "Retryable request must have no body or rewindable body") @@ -35,8 +54,20 @@ func DoWithRetry(req *http.Request, client *http.Client) (*http.Response, error) return lower <= test && test <= upper } - _, res, err := retry.Until(context.Background(), retry.Acceptor{ - Accept: func(try int, nextRetryTime time.Duration) (bool, interface{}, error) { + // maxRetryCount is the number of times to try an http request before + // giving up an returning the last error. + maxRetryCount := 5 + if opts.MaxRetryCount != nil { + maxRetryCount = *opts.MaxRetryCount + } + + acceptor := retry.Acceptor{ + // If the opts field is nil, retry.Until will provide defaults. + Delay: opts.Delay, + Backoff: opts.Backoff, + MaxDelay: opts.MaxDelay, + + Accept: func(try int, _ time.Duration) (bool, interface{}, error) { if try > 0 && req.GetBody != nil { // Reset request body, if present, for retries. rc, bodyErr := req.GetBody() @@ -60,7 +91,8 @@ func DoWithRetry(req *http.Request, client *http.Client) (*http.Response, error) } return false, nil, nil }, - }) + } + _, res, err := retry.Until(context.Background(), acceptor) if err != nil { return nil, err diff --git a/vendor/github.com/pulumi/pulumi/sdk/v3/go/common/util/logging/log.go b/vendor/github.com/pulumi/pulumi/sdk/v3/go/common/util/logging/log.go index ac67192..b949400 100644 --- a/vendor/github.com/pulumi/pulumi/sdk/v3/go/common/util/logging/log.go +++ b/vendor/github.com/pulumi/pulumi/sdk/v3/go/common/util/logging/log.go @@ -63,6 +63,13 @@ func Flush() { glog.Flush() } +func maybeSetFlag(name, value string) { + if f := flag.Lookup(name); f != nil { + err := f.Value.Set(value) + assertNoError(err) + } +} + // InitLogging ensures the logging library has been initialized with the given settings. func InitLogging(logToStderr bool, verbose int, logFlow bool) { // Remember the settings in case someone inquires. @@ -78,12 +85,10 @@ func InitLogging(logToStderr bool, verbose int, logFlow bool) { assertNoError(err) } if logToStderr { - err := flag.Lookup("logtostderr").Value.Set("true") - assertNoError(err) + maybeSetFlag("logtostderr", "true") } if verbose > 0 { - err := flag.Lookup("v").Value.Set(strconv.Itoa(verbose)) - assertNoError(err) + maybeSetFlag("v", strconv.Itoa(verbose)) } } diff --git a/vendor/github.com/pulumi/pulumi/sdk/v3/go/common/util/result/result.go b/vendor/github.com/pulumi/pulumi/sdk/v3/go/common/util/result/result.go index 5998399..447bc68 100644 --- a/vendor/github.com/pulumi/pulumi/sdk/v3/go/common/util/result/result.go +++ b/vendor/github.com/pulumi/pulumi/sdk/v3/go/common/util/result/result.go @@ -15,6 +15,8 @@ package result import ( + "fmt" + "github.com/hashicorp/go-multierror" "github.com/pkg/errors" ) @@ -51,6 +53,18 @@ type simpleResult struct { func (r *simpleResult) Error() error { return r.err } func (r *simpleResult) IsBail() bool { return r.err == nil } +func (r *simpleResult) String() string { + if r.err == nil { + return "Bail" + } + return fmt.Sprintf("Error: %s", r.err) +} +func (r *simpleResult) GoString() string { + if r.err == nil { + return "&simpleResult{}" + } + return fmt.Sprintf("&simpleResult{err: %#v}", r.err) +} // Bail produces a Result that represents a computation that failed to complete // successfully but is not a bug in Pulumi. diff --git a/vendor/github.com/pulumi/pulumi/sdk/v3/go/common/util/rpcutil/interceptor.go b/vendor/github.com/pulumi/pulumi/sdk/v3/go/common/util/rpcutil/interceptor.go index 1739418..8087447 100644 --- a/vendor/github.com/pulumi/pulumi/sdk/v3/go/common/util/rpcutil/interceptor.go +++ b/vendor/github.com/pulumi/pulumi/sdk/v3/go/common/util/rpcutil/interceptor.go @@ -55,13 +55,11 @@ func (w metadataReaderWriter) ForeachKey(handler func(key, val string) error) er // OpenTracingServerInterceptor provides a default gRPC server interceptor for emitting tracing to the global // OpenTracing tracer. -func OpenTracingServerInterceptor(parentSpan opentracing.Span) grpc.UnaryServerInterceptor { - tracingInterceptor := otgrpc.OpenTracingServerInterceptor( - // Use the globally installed tracer - opentracing.GlobalTracer(), - // Log full payloads along with trace spans - otgrpc.LogPayloads(), - ) +func OpenTracingServerInterceptor(parentSpan opentracing.Span, options ...otgrpc.Option) grpc.UnaryServerInterceptor { + // Log full payloads along with trace spans + options = append(options, otgrpc.LogPayloads()) + + tracingInterceptor := otgrpc.OpenTracingServerInterceptor(opentracing.GlobalTracer(), options...) if parentSpan == nil { return tracingInterceptor } @@ -80,19 +78,17 @@ func OpenTracingServerInterceptor(parentSpan opentracing.Span) grpc.UnaryServerI } return tracingInterceptor(ctx, req, info, handler) } - } // OpenTracingClientInterceptor provides a default gRPC client interceptor for emitting tracing to the global // OpenTracing tracer. -func OpenTracingClientInterceptor() grpc.UnaryClientInterceptor { - return otgrpc.OpenTracingClientInterceptor( - // Use the globally installed tracer - opentracing.GlobalTracer(), +func OpenTracingClientInterceptor(options ...otgrpc.Option) grpc.UnaryClientInterceptor { + options = append(options, // Log full payloads along with trace spans otgrpc.LogPayloads(), // Do not trace calls to the empty method otgrpc.IncludingSpans(func(_ opentracing.SpanContext, method string, _, _ interface{}) bool { return method != "" })) + return otgrpc.OpenTracingClientInterceptor(opentracing.GlobalTracer(), options...) } diff --git a/vendor/github.com/pulumi/pulumi/sdk/v3/go/common/util/rpcutil/serve.go b/vendor/github.com/pulumi/pulumi/sdk/v3/go/common/util/rpcutil/serve.go index 392919e..a65e4f3 100644 --- a/vendor/github.com/pulumi/pulumi/sdk/v3/go/common/util/rpcutil/serve.go +++ b/vendor/github.com/pulumi/pulumi/sdk/v3/go/common/util/rpcutil/serve.go @@ -19,6 +19,7 @@ import ( "strconv" "strings" + "github.com/grpc-ecosystem/grpc-opentracing/go/otgrpc" opentracing "github.com/opentracing/opentracing-go" "github.com/pkg/errors" "google.golang.org/grpc" @@ -47,7 +48,7 @@ func IsBenignCloseErr(err error) bool { // eventually return an error, and an error, in case something went wrong. The channel is non-nil and waits until // the server is finished, in the case of a successful launch of the RPC server. func Serve(port int, cancel chan bool, registers []func(*grpc.Server) error, - parentSpan opentracing.Span) (int, chan error, error) { + parentSpan opentracing.Span, options ...otgrpc.Option) (int, chan error, error) { // Listen on a TCP port, but let the kernel choose a free port for us. lis, err := net.Listen("tcp", "127.0.0.1:"+strconv.Itoa(port)) @@ -57,7 +58,7 @@ func Serve(port int, cancel chan bool, registers []func(*grpc.Server) error, // Now new up a gRPC server and register any RPC interfaces the caller wants. srv := grpc.NewServer( - grpc.UnaryInterceptor(OpenTracingServerInterceptor(parentSpan)), + grpc.UnaryInterceptor(OpenTracingServerInterceptor(parentSpan, options...)), grpc.MaxRecvMsgSize(maxRPCMessageSize), ) for _, register := range registers { diff --git a/vendor/github.com/pulumi/pulumi/sdk/v3/go/common/workspace/creds.go b/vendor/github.com/pulumi/pulumi/sdk/v3/go/common/workspace/creds.go index 878e99b..d5e9824 100644 --- a/vendor/github.com/pulumi/pulumi/sdk/v3/go/common/workspace/creds.go +++ b/vendor/github.com/pulumi/pulumi/sdk/v3/go/common/workspace/creds.go @@ -1,4 +1,4 @@ -// Copyright 2016-2018, Pulumi Corporation. +// Copyright 2016-2021, Pulumi Corporation. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -15,6 +15,7 @@ package workspace import ( + "bytes" "encoding/json" "io/ioutil" "os" @@ -22,6 +23,7 @@ import ( "time" "github.com/pkg/errors" + "github.com/rogpeppe/go-internal/lockedfile" "github.com/pulumi/pulumi/sdk/v3/go/common/util/contract" "github.com/pulumi/pulumi/sdk/v3/go/common/util/logging" @@ -30,6 +32,7 @@ import ( // PulumiCredentialsPathEnvVar is a path to the folder where credentials are stored. // We use this in testing so that tests which log in and out do not impact the local developer's // credentials or tests interacting with one another +//nolint: gosec const PulumiCredentialsPathEnvVar = "PULUMI_CREDENTIALS_PATH" // PulumiBackendURLEnvVar is an environment variable which can be used to set the backend that will be @@ -183,7 +186,7 @@ func GetStoredCredentials() (Credentials, error) { return Credentials{}, err } - c, err := ioutil.ReadFile(credsFile) + c, err := lockedfile.Read(credsFile) if err != nil { if os.IsNotExist(err) { return Credentials{}, nil @@ -228,25 +231,129 @@ func StoreCredentials(creds Credentials) error { return errors.Wrapf(err, "marshalling credentials object") } + if err := lockedfile.Write(credsFile, bytes.NewReader(raw), 0600); err != nil { + return err + } + + return nil +} + +type BackendConfig struct { + DefaultOrg string `json:"defaultOrg,omitempty"` // The default org for this backend config. +} + +type PulumiConfig struct { + BackendConfig map[string]BackendConfig `json:"backends,omitempty"` // a map of arbitrary backends configs. +} + +func getConfigFilePath() (string, error) { + // Allow the folder we use to store config in to be overridden by tests + pulumiFolder := os.Getenv(PulumiCredentialsPathEnvVar) + if pulumiFolder == "" { + folder, err := GetPulumiHomeDir() + if err != nil { + return "", errors.Wrapf(err, "failed to get the home path") + } + pulumiFolder = folder + } + + err := os.MkdirAll(pulumiFolder, 0700) + if err != nil { + return "", errors.Wrapf(err, "failed to create '%s'", pulumiFolder) + } + + return filepath.Join(pulumiFolder, "config.json"), nil +} + +func GetPulumiConfig() (PulumiConfig, error) { + configFile, err := getConfigFilePath() + if err != nil { + return PulumiConfig{}, err + } + + c, err := ioutil.ReadFile(configFile) + if err != nil { + if os.IsNotExist(err) { + return PulumiConfig{}, nil + } + return PulumiConfig{}, errors.Wrapf(err, "reading '%s'", configFile) + } + + var config PulumiConfig + if err = json.Unmarshal(c, &config); err != nil { + return PulumiConfig{}, errors.Wrapf(err, "failed to read Pulumi config file") + } + + return config, nil +} + +func StorePulumiConfig(config PulumiConfig) error { + configFile, err := getConfigFilePath() + if err != nil { + return err + } + + raw, err := json.MarshalIndent(config, "", " ") + if err != nil { + return errors.Wrapf(err, "marshalling config object") + } + // Use a temporary file and atomic os.Rename to ensure the file contents are // updated atomically to ensure concurrent `pulumi` CLI operations are safe. - tempCredsFile, err := ioutil.TempFile(filepath.Dir(credsFile), "credentials-*.json") + tempConfigFile, err := ioutil.TempFile(filepath.Dir(configFile), "config-*.json") if err != nil { return err } - _, err = tempCredsFile.Write(raw) + _, err = tempConfigFile.Write(raw) if err != nil { return err } - err = tempCredsFile.Close() + err = tempConfigFile.Close() if err != nil { return err } - err = os.Rename(tempCredsFile.Name(), credsFile) + err = os.Rename(tempConfigFile.Name(), configFile) if err != nil { - contract.IgnoreError(os.Remove(tempCredsFile.Name())) + contract.IgnoreError(os.Remove(tempConfigFile.Name())) return err } return nil } + +func SetBackendConfigDefaultOrg(backendURL, defaultOrg string) error { + config, err := GetPulumiConfig() + if err != nil && !os.IsNotExist(err) { + return err + } + + if config.BackendConfig == nil { + config.BackendConfig = make(map[string]BackendConfig) + } + + config.BackendConfig[backendURL] = BackendConfig{ + DefaultOrg: defaultOrg, + } + + return StorePulumiConfig(config) +} + +func GetBackendConfigDefaultOrg() (string, error) { + config, err := GetPulumiConfig() + if err != nil && !os.IsNotExist(err) { + return "", err + } + + backendURL, err := GetCurrentCloudURL() + if err != nil { + return "", err + } + + if beConfig, ok := config.BackendConfig[backendURL]; ok { + if beConfig.DefaultOrg != "" { + return beConfig.DefaultOrg, nil + } + } + + return "", nil +} diff --git a/vendor/github.com/pulumi/pulumi/sdk/v3/go/common/workspace/loaders.go b/vendor/github.com/pulumi/pulumi/sdk/v3/go/common/workspace/loaders.go index aba23ae..5649c43 100644 --- a/vendor/github.com/pulumi/pulumi/sdk/v3/go/common/workspace/loaders.go +++ b/vendor/github.com/pulumi/pulumi/sdk/v3/go/common/workspace/loaders.go @@ -25,25 +25,25 @@ import ( // projectSingleton is a singleton instance of projectLoader, which controls a global map of instances of Project // configs (one per path). -var projectSingleton *projectLoader = &projectLoader{ +var projectSingleton = &projectLoader{ internal: map[string]*Project{}, } // projectStackSingleton is a singleton instance of projectStackLoader, which controls a global map of instances of // ProjectStack configs (one per path). -var projectStackSingleton *projectStackLoader = &projectStackLoader{ +var projectStackSingleton = &projectStackLoader{ internal: map[string]*ProjectStack{}, } // pluginProjectSingleton is a singleton instance of pluginProjectLoader, which controls a global map of instances of // PluginProject configs (one per path). -var pluginProjectSingleton *pluginProjectLoader = &pluginProjectLoader{ +var pluginProjectSingleton = &pluginProjectLoader{ internal: map[string]*PluginProject{}, } // policyPackProjectSingleton is a singleton instance of policyPackProjectLoader, which controls a global map of // instances of PolicyPackProject configs (one per path). -var policyPackProjectSingleton *policyPackProjectLoader = &policyPackProjectLoader{ +var policyPackProjectSingleton = &policyPackProjectLoader{ internal: map[string]*PolicyPackProject{}, } diff --git a/vendor/github.com/pulumi/pulumi/sdk/v3/go/common/workspace/paths.go b/vendor/github.com/pulumi/pulumi/sdk/v3/go/common/workspace/paths.go index 9d02a1a..4ea2d65 100644 --- a/vendor/github.com/pulumi/pulumi/sdk/v3/go/common/workspace/paths.go +++ b/vendor/github.com/pulumi/pulumi/sdk/v3/go/common/workspace/paths.go @@ -141,7 +141,8 @@ func DetectProjectAndPath() (*Project, string, error) { if err != nil { return nil, "", err } else if path == "" { - return nil, "", errors.Errorf("no Pulumi project found in the current working directory") + return nil, "", errors.Errorf("no Pulumi project found in the current working directory. " + + "If you're using the `--stack` flag, make sure to pass the fully qualified name (org/project/stack)") } proj, err := LoadProject(path) diff --git a/vendor/github.com/pulumi/pulumi/sdk/v3/go/common/workspace/plugins.go b/vendor/github.com/pulumi/pulumi/sdk/v3/go/common/workspace/plugins.go index 8a3abcb..c6a8e75 100644 --- a/vendor/github.com/pulumi/pulumi/sdk/v3/go/common/workspace/plugins.go +++ b/vendor/github.com/pulumi/pulumi/sdk/v3/go/common/workspace/plugins.go @@ -1,4 +1,4 @@ -// Copyright 2016-2018, Pulumi Corporation. +// Copyright 2016-2021, Pulumi Corporation. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -38,6 +38,7 @@ import ( "github.com/pulumi/pulumi/sdk/v3/go/common/diag/colors" "github.com/pulumi/pulumi/sdk/v3/go/common/util/archive" + "github.com/pulumi/pulumi/sdk/v3/go/common/util/cmdutil" "github.com/pulumi/pulumi/sdk/v3/go/common/util/contract" "github.com/pulumi/pulumi/sdk/v3/go/common/util/fsutil" "github.com/pulumi/pulumi/sdk/v3/go/common/util/httputil" @@ -55,6 +56,67 @@ var ( enableLegacyPluginBehavior = os.Getenv("PULUMI_ENABLE_LEGACY_PLUGIN_SEARCH") != "" ) +// pluginDownloadURLOverrides is a variable instead of a constant so it can be set using the `-X` `ldflag` at build +// time, if necessary. When non-empty, it's parsed into `pluginDownloadURLOverridesParsed` in `init()`. The expected +// format is `regexp=URL`, and multiple pairs can be specified separated by commas, e.g. `regexp1=URL1,regexp2=URL2`. +// +// For example, when set to "^foo.*=https://foo&^bar.*=https://bar", plugin names that start with "foo" will use +// https://foo as the download URL and names that start with "bar" will use https://bar. +var pluginDownloadURLOverrides string + +// pluginDownloadURLOverridesParsed is the parsed array from `pluginDownloadURLOverrides`. +var pluginDownloadURLOverridesParsed pluginDownloadOverrideArray + +// pluginDownloadURLOverride represents a plugin download URL override, parsed from `pluginDownloadURLOverrides`. +type pluginDownloadURLOverride struct { + reg *regexp.Regexp // The regex used to match against the plugin's name. + url string // The URL to use for the matched plugin. +} + +// pluginDownloadOverrideArray represents an array of overrides. +type pluginDownloadOverrideArray []pluginDownloadURLOverride + +// get returns the URL and true if name matches an override's regular expression, +// otherwise an empty string and false. +func (overrides pluginDownloadOverrideArray) get(name string) (string, bool) { + for _, override := range overrides { + if override.reg.MatchString(name) { + return override.url, true + } + } + return "", false +} + +func init() { + var err error + if pluginDownloadURLOverridesParsed, err = parsePluginDownloadURLOverrides(pluginDownloadURLOverrides); err != nil { + panic(fmt.Errorf("error parsing `pluginDownloadURLOverrides`: %w", err)) + } +} + +// parsePluginDownloadURLOverrides parses an overrides string with the expected format `regexp1=URL1,regexp2=URL2`. +func parsePluginDownloadURLOverrides(overrides string) (pluginDownloadOverrideArray, error) { + var result pluginDownloadOverrideArray + if overrides == "" { + return result, nil + } + for _, pair := range strings.Split(overrides, ",") { + split := strings.Split(pair, "=") + if len(split) != 2 || split[0] == "" || split[1] == "" { + return nil, fmt.Errorf("expected format to be \"regexp1=URL1,regexp2=URL2\"; got %q", overrides) + } + reg, err := regexp.Compile(split[0]) + if err != nil { + return nil, err + } + result = append(result, pluginDownloadURLOverride{ + reg: reg, + url: split[1], + }) + } + return result, nil +} + // MissingError is returned by functions that attempt to load plugins if a plugin can't be located. type MissingError struct { // Info contains information about the plugin that was not found. @@ -83,15 +145,15 @@ func (err *MissingError) Error() string { // location, by default `~/.pulumi/plugins/--/`. A plugin may contain multiple files, // however the primary loadable executable must be named `pulumi--`. type PluginInfo struct { - Name string // the simple name of the plugin. - Path string // the path that a plugin was loaded from. - Kind PluginKind // the kind of the plugin (language, resource, etc). - Version *semver.Version // the plugin's semantic version, if present. - Size int64 // the size of the plugin, in bytes. - InstallTime time.Time // the time the plugin was installed. - LastUsedTime time.Time // the last time the plugin was used. - ServerURL string // an optional server to use when downloading this plugin. - PluginDir string // if set, will be used as the root plugin dir instead of ~/.pulumi/plugins. + Name string // the simple name of the plugin. + Path string // the path that a plugin was loaded from. + Kind PluginKind // the kind of the plugin (language, resource, etc). + Version *semver.Version // the plugin's semantic version, if present. + Size int64 // the size of the plugin, in bytes. + InstallTime time.Time // the time the plugin was installed. + LastUsedTime time.Time // the last time the plugin was used. + PluginDownloadURL string // an optional server to use when downloading this plugin. + PluginDir string // if set, will be used as the root plugin dir instead of ~/.pulumi/plugins. } // Dir gets the expected plugin directory for this plugin. @@ -207,13 +269,21 @@ func (info *PluginInfo) SetFileMetadata(path string) error { return nil } +func interpolateURL(serverURL string, version semver.Version, os, arch string) string { + replacer := strings.NewReplacer( + "${VERSION}", url.QueryEscape(version.String()), + "${OS}", url.QueryEscape(os), + "${ARCH}", url.QueryEscape(arch)) + return replacer.Replace(serverURL) +} + // Download fetches an io.ReadCloser for this plugin and also returns the size of the response (if known). func (info PluginInfo) Download() (io.ReadCloser, int64, error) { // Figure out the OS/ARCH pair for the download URL. - var os string + var opSy string switch runtime.GOOS { case "darwin", "linux", "windows": - os = runtime.GOOS + opSy = runtime.GOOS default: return nil, -1, errors.Errorf("unsupported plugin OS: %s", runtime.GOOS) } @@ -225,24 +295,79 @@ func (info PluginInfo) Download() (io.ReadCloser, int64, error) { return nil, -1, errors.Errorf("unsupported plugin architecture: %s", runtime.GOARCH) } - // If the plugin has a server, associated with it, download from there. Otherwise use the "default" location, which - // is hosted by Pulumi. - serverURL := info.ServerURL - if serverURL == "" { - serverURL = "https://get.pulumi.com/releases/plugins" + // The plugin version is necessary for the endpoint. If it's not present, return an error. + if info.Version == nil { + return nil, -1, errors.Errorf("unknown version for plugin %s", info.Name) } + + if info.PluginDownloadURL != "" { + return getPluginResponse( + buildUserSpecifiedPluginURL(info.PluginDownloadURL, info.Kind, info.Name, info.Version, opSy, arch)) + } + + // If the plugin name matches an override, download the plugin from the override URL. + if url, ok := pluginDownloadURLOverridesParsed.get(info.Name); ok { + return getPluginResponse(buildUserSpecifiedPluginURL(url, info.Kind, info.Name, info.Version, opSy, arch)) + } + + if _, ok := os.LookupEnv("PULUMI_EXPERIMENTAL"); ok { + pluginURL := buildGitHubReleasesPluginURL(info.Kind, info.Name, info.Version, opSy, arch) + + resp, length, err := getPluginResponse(pluginURL) + if err == nil { + return resp, length, nil + } + + // we threw an error talking to GitHub so lets fallback to get.pulumi.com for the provider + logging.V(1).Infof("cannot find plugin on github.com/pulumi/pulumi-%s/releases", info.Name) + } + + return getPluginResponse(buildPulumiHostedPluginURL(info.Kind, info.Name, info.Version, opSy, arch)) +} + +func buildGitHubReleasesPluginURL(kind PluginKind, name string, version *semver.Version, opSy, arch string) string { + logging.V(1).Infof("%s downloading from github.com/pulumi/pulumi-%s/releases", name, name) + + return fmt.Sprintf("https://github.com/pulumi/pulumi-%s/releases/download/v%s/%s", + name, version.String(), url.QueryEscape(fmt.Sprintf("pulumi-%s-%s-v%s-%s-%s.tar.gz", + kind, name, version.String(), opSy, arch))) +} + +func buildPulumiHostedPluginURL(kind PluginKind, name string, version *semver.Version, opSy, arch string) string { + serverURL := "https://get.pulumi.com/releases/plugins" + + logging.V(1).Infof("%s downloading from %s", name, serverURL) + + serverURL = interpolateURL(serverURL, *version, opSy, arch) serverURL = strings.TrimSuffix(serverURL, "/") - logging.V(1).Infof("%s downloading from %s", info.Name, serverURL) + logging.V(1).Infof("%s downloading from %s", name, serverURL) + endpoint := fmt.Sprintf("%s/%s", + serverURL, + url.QueryEscape(fmt.Sprintf("pulumi-%s-%s-v%s-%s-%s.tar.gz", kind, name, version.String(), opSy, arch))) + + return endpoint +} + +func buildUserSpecifiedPluginURL(serverURL string, kind PluginKind, name string, version *semver.Version, + opSy, arch string) string { + logging.V(1).Infof("%s downloading from %s", name, serverURL) + + serverURL = interpolateURL(serverURL, *version, opSy, arch) + serverURL = strings.TrimSuffix(serverURL, "/") - // URL escape the path value to ensure we have the correct path for S3/CloudFront. + logging.V(1).Infof("%s downloading from %s", name, serverURL) endpoint := fmt.Sprintf("%s/%s", serverURL, - url.QueryEscape(fmt.Sprintf("pulumi-%s-%s-v%s-%s-%s.tar.gz", info.Kind, info.Name, info.Version, os, arch))) + url.QueryEscape(fmt.Sprintf("pulumi-%s-%s-v%s-%s-%s.tar.gz", kind, name, version.String(), opSy, arch))) - logging.V(9).Infof("full plugin download url: %s", endpoint) + return endpoint +} + +func getPluginResponse(pluginEndpoint string) (io.ReadCloser, int64, error) { + logging.V(9).Infof("full plugin download url: %s", pluginEndpoint) - req, err := http.NewRequest("GET", endpoint, nil) + req, err := http.NewRequest("GET", pluginEndpoint, nil) if err != nil { return nil, -1, err } @@ -260,7 +385,7 @@ func (info PluginInfo) Download() (io.ReadCloser, int64, error) { logging.V(9).Infof("plugin install response headers: %v", resp.Header) if resp.StatusCode < 200 || resp.StatusCode > 299 { - return nil, -1, errors.Errorf("%d HTTP error fetching plugin from %s", resp.StatusCode, endpoint) + return nil, -1, errors.Errorf("%d HTTP error fetching plugin from %s", resp.StatusCode, pluginEndpoint) } return resp.Body, resp.ContentLength, nil @@ -298,7 +423,7 @@ func (info PluginInfo) installLock() (unlock func(), err error) { // If a failure occurs during installation, the `.partial` file will remain, indicating the plugin wasn't fully // installed. The next time the plugin is installed, the old installation directory will be removed and replaced with // a fresh install. -func (info PluginInfo) Install(tgz io.ReadCloser) error { +func (info PluginInfo) Install(tgz io.ReadCloser, reinstall bool) error { defer contract.IgnoreClose(tgz) // Fetch the directory into which we will expand this tarball. @@ -332,16 +457,19 @@ func (info PluginInfo) Install(tgz io.ReadCloser) error { if finalDirStatErr == nil { _, partialFileStatErr := os.Stat(partialFilePath) if partialFileStatErr != nil { - if os.IsNotExist(partialFileStatErr) { - // finalDir exists and there's no partial file, so the plugin is already installed. + if !os.IsNotExist(partialFileStatErr) { + return partialFileStatErr + } + if !reinstall { + // finalDir exists, there's no partial file, and we're not reinstalling, so the plugin is already + // installed. return nil } - return partialFileStatErr } - // The partial file exists, meaning a previous attempt at installing the plugin failed. - // Delete finalDir so we can try installing again. There's no need to delete the partial - // file since we'd just be recreating it again below anyway. + // Either the partial file exists--meaning a previous attempt at installing the plugin failed--or we're + // deliberately reinstalling the plugin. Delete finalDir so we can try installing again. There's no need to + // delete the partial file since we'd just be recreating it again below anyway. if err := os.RemoveAll(finalDir); err != nil { return err } @@ -606,7 +734,8 @@ func GetPluginPath(kind PluginKind, name string, version *semver.Version) (strin // If we have a version of the plugin on its $PATH, use it, unless we have opted out of this behavior explicitly. // This supports development scenarios. - if _, isFound := os.LookupEnv("PULUMI_IGNORE_AMBIENT_PLUGINS"); !isFound { + optOut, isFound := os.LookupEnv("PULUMI_IGNORE_AMBIENT_PLUGINS") + if !(isFound && cmdutil.IsTruthy(optOut)) || kind == LanguagePlugin { filename = (&PluginInfo{Kind: kind, Name: name, Version: version}).FilePrefix() if path, err := exec.LookPath(filename); err == nil { logging.V(6).Infof("GetPluginPath(%s, %s, %v): found on $PATH %s", kind, name, version, path) diff --git a/vendor/github.com/pulumi/pulumi/sdk/v3/go/common/workspace/project.go b/vendor/github.com/pulumi/pulumi/sdk/v3/go/common/workspace/project.go index c78b466..19e86ce 100644 --- a/vendor/github.com/pulumi/pulumi/sdk/v3/go/common/workspace/project.go +++ b/vendor/github.com/pulumi/pulumi/sdk/v3/go/common/workspace/project.go @@ -58,6 +58,11 @@ type ProjectBackend struct { URL string `json:"url,omitempty" yaml:"url,omitempty"` } +type ProjectOptions struct { + // Refresh is the ability to always run a refresh as part of a pulumi update / preview / destroy + Refresh string `json:"refresh,omitempty" yaml:"refresh,omitempty"` +} + // Project is a Pulumi project manifest. // // We explicitly add yaml tags (instead of using the default behavior from https://github.com/ghodss/yaml which works @@ -90,6 +95,9 @@ type Project struct { // Backend is an optional backend configuration Backend *ProjectBackend `json:"backend,omitempty" yaml:"backend,omitempty"` + + // Options is an optional set of project options + Options *ProjectOptions `json:"options,omitempty" yaml:"options,omitempty"` } func (proj *Project) Validate() error { @@ -305,7 +313,6 @@ func save(path string, value interface{}, mkDirAll bool) error { } } - // Changing the permissions on these file is ~ a breaking change, so disable golint. //nolint: gosec return ioutil.WriteFile(path, b, 0644) } diff --git a/vendor/github.com/pulumi/pulumi/sdk/v3/go/common/workspace/templates.go b/vendor/github.com/pulumi/pulumi/sdk/v3/go/common/workspace/templates.go index d035d4f..6d7a1ee 100644 --- a/vendor/github.com/pulumi/pulumi/sdk/v3/go/common/workspace/templates.go +++ b/vendor/github.com/pulumi/pulumi/sdk/v3/go/common/workspace/templates.go @@ -287,7 +287,7 @@ func retrieveURLTemplates(rawurl string, offline bool, templateKind TemplateKind var fullPath string if fullPath, err = RetrieveGitFolder(rawurl, temp); err != nil { - return TemplateRepository{}, err + return TemplateRepository{}, fmt.Errorf("Failed to retrieve git folder: %w", err) } return TemplateRepository{ @@ -372,16 +372,35 @@ func RetrieveGitFolder(rawurl string, path string) (string, error) { ref, commit, subDirectory, err := gitutil.GetGitReferenceNameOrHashAndSubDirectory(url, urlPath) if err != nil { - return "", err + return "", fmt.Errorf("failed to get git ref: %w", err) } - if ref != "" { - if cloneErr := gitutil.GitCloneOrPull(url, ref, path, true /*shallow*/); cloneErr != nil { - return "", cloneErr + + // Different reference attempts to cycle through + // We default to master then main in that order. We need to order them to avoid breaking + // already existing processes for repos that already have a master and main branch. + refAttempts := []plumbing.ReferenceName{plumbing.Master, plumbing.NewBranchReferenceName("main")} + + if ref != plumbing.HEAD { + // If we have a non-default reference, we just use it + refAttempts = []plumbing.ReferenceName{ref} } + + var cloneErr error + for _, ref := range refAttempts { + // Attempt the clone. If it succeeds, break + cloneErr := gitutil.GitCloneOrPull(url, ref, path, true /*shallow*/) + if cloneErr == nil { + break + } + } + if cloneErr != nil { + return "", fmt.Errorf("failed to clone ref '%s': %w", refAttempts[len(refAttempts)-1], cloneErr) + } + } else { if cloneErr := gitutil.GitCloneAndCheckoutCommit(url, commit, path); cloneErr != nil { - return "", cloneErr + return "", fmt.Errorf("failed to clone and checkout %s(%s): %w", url, commit, cloneErr) } } diff --git a/vendor/github.com/pulumi/pulumi/sdk/v3/go/common/workspace/workspace.go b/vendor/github.com/pulumi/pulumi/sdk/v3/go/common/workspace/workspace.go index 6df8244..0aaeedd 100644 --- a/vendor/github.com/pulumi/pulumi/sdk/v3/go/common/workspace/workspace.go +++ b/vendor/github.com/pulumi/pulumi/sdk/v3/go/common/workspace/workspace.go @@ -1,4 +1,4 @@ -// Copyright 2016-2018, Pulumi Corporation. +// Copyright 2016-2021, Pulumi Corporation. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -19,6 +19,7 @@ import ( "crypto/sha1" "encoding/hex" "encoding/json" + "fmt" "io/ioutil" "os" "path/filepath" @@ -105,7 +106,7 @@ func NewFrom(dir string) (W, error) { err = w.readSettings() if err != nil { - return nil, err + return nil, fmt.Errorf("unable to read workspace settings: %w", err) } upsertIntoCache(dir, w) @@ -139,8 +140,30 @@ func (pw *projectWorkspace) Save() error { if err != nil { return err } + return atomicWriteFile(settingsFile, b) +} + +// atomicWriteFile provides a rename based atomic write through a temporary file. +func atomicWriteFile(path string, b []byte) error { + tmp, err := ioutil.TempFile(filepath.Dir(path), filepath.Base(path)) + if err != nil { + return errors.Wrapf(err, "failed to create temporary file %s", path) + } + defer func() { contract.Ignore(os.Remove(tmp.Name())) }() - return ioutil.WriteFile(settingsFile, b, 0600) + if err = tmp.Chmod(0600); err != nil { + return errors.Wrap(err, "failed to set temporary file permission") + } + if _, err = tmp.Write(b); err != nil { + return errors.Wrap(err, "failed to write to temporary file") + } + if err = tmp.Sync(); err != nil { + return err + } + if err = tmp.Close(); err != nil { + return err + } + return os.Rename(tmp.Name(), path) } func (pw *projectWorkspace) readSettings() error { @@ -159,7 +182,7 @@ func (pw *projectWorkspace) readSettings() error { err = json.Unmarshal(b, &settings) if err != nil { - return err + return errors.Wrapf(err, "could not parse file %s", settingsPath) } pw.settings = &settings diff --git a/vendor/github.com/pulumi/pulumi/sdk/v3/proto/go/provider.pb.go b/vendor/github.com/pulumi/pulumi/sdk/v3/proto/go/provider.pb.go index 738dc10..d673482 100644 --- a/vendor/github.com/pulumi/pulumi/sdk/v3/proto/go/provider.pb.go +++ b/vendor/github.com/pulumi/pulumi/sdk/v3/proto/go/provider.pb.go @@ -60,7 +60,7 @@ func (x PropertyDiff_Kind) String() string { } func (PropertyDiff_Kind) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_c6a9f3c02af3d1c8, []int{11, 0} + return fileDescriptor_c6a9f3c02af3d1c8, []int{13, 0} } type DiffResponse_DiffChanges int32 @@ -88,7 +88,7 @@ func (x DiffResponse_DiffChanges) String() string { } func (DiffResponse_DiffChanges) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_c6a9f3c02af3d1c8, []int{12, 0} + return fileDescriptor_c6a9f3c02af3d1c8, []int{14, 0} } type GetSchemaRequest struct { @@ -236,6 +236,7 @@ type ConfigureResponse struct { AcceptSecrets bool `protobuf:"varint,1,opt,name=acceptSecrets,proto3" json:"acceptSecrets,omitempty"` SupportsPreview bool `protobuf:"varint,2,opt,name=supportsPreview,proto3" json:"supportsPreview,omitempty"` AcceptResources bool `protobuf:"varint,3,opt,name=acceptResources,proto3" json:"acceptResources,omitempty"` + AcceptOutputs bool `protobuf:"varint,4,opt,name=acceptOutputs,proto3" json:"acceptOutputs,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` @@ -287,6 +288,13 @@ func (m *ConfigureResponse) GetAcceptResources() bool { return false } +func (m *ConfigureResponse) GetAcceptOutputs() bool { + if m != nil { + return m.AcceptOutputs + } + return false +} + // ConfigureErrorMissingKeys is sent as a Detail on an error returned from `ResourceProvider.Configure`. type ConfigureErrorMissingKeys struct { MissingKeys []*ConfigureErrorMissingKeys_MissingKey `protobuf:"bytes,1,rep,name=missingKeys,proto3" json:"missingKeys,omitempty"` @@ -380,6 +388,7 @@ type InvokeRequest struct { Provider string `protobuf:"bytes,3,opt,name=provider,proto3" json:"provider,omitempty"` Version string `protobuf:"bytes,4,opt,name=version,proto3" json:"version,omitempty"` AcceptResources bool `protobuf:"varint,5,opt,name=acceptResources,proto3" json:"acceptResources,omitempty"` + PluginDownloadURL string `protobuf:"bytes,6,opt,name=pluginDownloadURL,proto3" json:"pluginDownloadURL,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` @@ -445,6 +454,13 @@ func (m *InvokeRequest) GetAcceptResources() bool { return false } +func (m *InvokeRequest) GetPluginDownloadURL() string { + if m != nil { + return m.PluginDownloadURL + } + return "" +} + type InvokeResponse struct { Return *_struct.Struct `protobuf:"bytes,1,opt,name=return,proto3" json:"return,omitempty"` Failures []*CheckFailure `protobuf:"bytes,2,rep,name=failures,proto3" json:"failures,omitempty"` @@ -492,10 +508,281 @@ func (m *InvokeResponse) GetFailures() []*CheckFailure { return nil } +type CallRequest struct { + Tok string `protobuf:"bytes,1,opt,name=tok,proto3" json:"tok,omitempty"` + Args *_struct.Struct `protobuf:"bytes,2,opt,name=args,proto3" json:"args,omitempty"` + ArgDependencies map[string]*CallRequest_ArgumentDependencies `protobuf:"bytes,3,rep,name=argDependencies,proto3" json:"argDependencies,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + Provider string `protobuf:"bytes,4,opt,name=provider,proto3" json:"provider,omitempty"` + Version string `protobuf:"bytes,5,opt,name=version,proto3" json:"version,omitempty"` + PluginDownloadURL string `protobuf:"bytes,13,opt,name=pluginDownloadURL,proto3" json:"pluginDownloadURL,omitempty"` + Project string `protobuf:"bytes,6,opt,name=project,proto3" json:"project,omitempty"` + Stack string `protobuf:"bytes,7,opt,name=stack,proto3" json:"stack,omitempty"` + Config map[string]string `protobuf:"bytes,8,rep,name=config,proto3" json:"config,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + ConfigSecretKeys []string `protobuf:"bytes,9,rep,name=configSecretKeys,proto3" json:"configSecretKeys,omitempty"` + DryRun bool `protobuf:"varint,10,opt,name=dryRun,proto3" json:"dryRun,omitempty"` + Parallel int32 `protobuf:"varint,11,opt,name=parallel,proto3" json:"parallel,omitempty"` + MonitorEndpoint string `protobuf:"bytes,12,opt,name=monitorEndpoint,proto3" json:"monitorEndpoint,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *CallRequest) Reset() { *m = CallRequest{} } +func (m *CallRequest) String() string { return proto.CompactTextString(m) } +func (*CallRequest) ProtoMessage() {} +func (*CallRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_c6a9f3c02af3d1c8, []int{7} +} + +func (m *CallRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_CallRequest.Unmarshal(m, b) +} +func (m *CallRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_CallRequest.Marshal(b, m, deterministic) +} +func (m *CallRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_CallRequest.Merge(m, src) +} +func (m *CallRequest) XXX_Size() int { + return xxx_messageInfo_CallRequest.Size(m) +} +func (m *CallRequest) XXX_DiscardUnknown() { + xxx_messageInfo_CallRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_CallRequest proto.InternalMessageInfo + +func (m *CallRequest) GetTok() string { + if m != nil { + return m.Tok + } + return "" +} + +func (m *CallRequest) GetArgs() *_struct.Struct { + if m != nil { + return m.Args + } + return nil +} + +func (m *CallRequest) GetArgDependencies() map[string]*CallRequest_ArgumentDependencies { + if m != nil { + return m.ArgDependencies + } + return nil +} + +func (m *CallRequest) GetProvider() string { + if m != nil { + return m.Provider + } + return "" +} + +func (m *CallRequest) GetVersion() string { + if m != nil { + return m.Version + } + return "" +} + +func (m *CallRequest) GetPluginDownloadURL() string { + if m != nil { + return m.PluginDownloadURL + } + return "" +} + +func (m *CallRequest) GetProject() string { + if m != nil { + return m.Project + } + return "" +} + +func (m *CallRequest) GetStack() string { + if m != nil { + return m.Stack + } + return "" +} + +func (m *CallRequest) GetConfig() map[string]string { + if m != nil { + return m.Config + } + return nil +} + +func (m *CallRequest) GetConfigSecretKeys() []string { + if m != nil { + return m.ConfigSecretKeys + } + return nil +} + +func (m *CallRequest) GetDryRun() bool { + if m != nil { + return m.DryRun + } + return false +} + +func (m *CallRequest) GetParallel() int32 { + if m != nil { + return m.Parallel + } + return 0 +} + +func (m *CallRequest) GetMonitorEndpoint() string { + if m != nil { + return m.MonitorEndpoint + } + return "" +} + +// ArgumentDependencies describes the resources that a particular argument depends on. +type CallRequest_ArgumentDependencies struct { + Urns []string `protobuf:"bytes,1,rep,name=urns,proto3" json:"urns,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *CallRequest_ArgumentDependencies) Reset() { *m = CallRequest_ArgumentDependencies{} } +func (m *CallRequest_ArgumentDependencies) String() string { return proto.CompactTextString(m) } +func (*CallRequest_ArgumentDependencies) ProtoMessage() {} +func (*CallRequest_ArgumentDependencies) Descriptor() ([]byte, []int) { + return fileDescriptor_c6a9f3c02af3d1c8, []int{7, 0} +} + +func (m *CallRequest_ArgumentDependencies) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_CallRequest_ArgumentDependencies.Unmarshal(m, b) +} +func (m *CallRequest_ArgumentDependencies) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_CallRequest_ArgumentDependencies.Marshal(b, m, deterministic) +} +func (m *CallRequest_ArgumentDependencies) XXX_Merge(src proto.Message) { + xxx_messageInfo_CallRequest_ArgumentDependencies.Merge(m, src) +} +func (m *CallRequest_ArgumentDependencies) XXX_Size() int { + return xxx_messageInfo_CallRequest_ArgumentDependencies.Size(m) +} +func (m *CallRequest_ArgumentDependencies) XXX_DiscardUnknown() { + xxx_messageInfo_CallRequest_ArgumentDependencies.DiscardUnknown(m) +} + +var xxx_messageInfo_CallRequest_ArgumentDependencies proto.InternalMessageInfo + +func (m *CallRequest_ArgumentDependencies) GetUrns() []string { + if m != nil { + return m.Urns + } + return nil +} + +type CallResponse struct { + Return *_struct.Struct `protobuf:"bytes,1,opt,name=return,proto3" json:"return,omitempty"` + ReturnDependencies map[string]*CallResponse_ReturnDependencies `protobuf:"bytes,2,rep,name=returnDependencies,proto3" json:"returnDependencies,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + Failures []*CheckFailure `protobuf:"bytes,3,rep,name=failures,proto3" json:"failures,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *CallResponse) Reset() { *m = CallResponse{} } +func (m *CallResponse) String() string { return proto.CompactTextString(m) } +func (*CallResponse) ProtoMessage() {} +func (*CallResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_c6a9f3c02af3d1c8, []int{8} +} + +func (m *CallResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_CallResponse.Unmarshal(m, b) +} +func (m *CallResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_CallResponse.Marshal(b, m, deterministic) +} +func (m *CallResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_CallResponse.Merge(m, src) +} +func (m *CallResponse) XXX_Size() int { + return xxx_messageInfo_CallResponse.Size(m) +} +func (m *CallResponse) XXX_DiscardUnknown() { + xxx_messageInfo_CallResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_CallResponse proto.InternalMessageInfo + +func (m *CallResponse) GetReturn() *_struct.Struct { + if m != nil { + return m.Return + } + return nil +} + +func (m *CallResponse) GetReturnDependencies() map[string]*CallResponse_ReturnDependencies { + if m != nil { + return m.ReturnDependencies + } + return nil +} + +func (m *CallResponse) GetFailures() []*CheckFailure { + if m != nil { + return m.Failures + } + return nil +} + +// ReturnDependencies describes the resources that a particular return value depends on. +type CallResponse_ReturnDependencies struct { + Urns []string `protobuf:"bytes,1,rep,name=urns,proto3" json:"urns,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *CallResponse_ReturnDependencies) Reset() { *m = CallResponse_ReturnDependencies{} } +func (m *CallResponse_ReturnDependencies) String() string { return proto.CompactTextString(m) } +func (*CallResponse_ReturnDependencies) ProtoMessage() {} +func (*CallResponse_ReturnDependencies) Descriptor() ([]byte, []int) { + return fileDescriptor_c6a9f3c02af3d1c8, []int{8, 0} +} + +func (m *CallResponse_ReturnDependencies) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_CallResponse_ReturnDependencies.Unmarshal(m, b) +} +func (m *CallResponse_ReturnDependencies) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_CallResponse_ReturnDependencies.Marshal(b, m, deterministic) +} +func (m *CallResponse_ReturnDependencies) XXX_Merge(src proto.Message) { + xxx_messageInfo_CallResponse_ReturnDependencies.Merge(m, src) +} +func (m *CallResponse_ReturnDependencies) XXX_Size() int { + return xxx_messageInfo_CallResponse_ReturnDependencies.Size(m) +} +func (m *CallResponse_ReturnDependencies) XXX_DiscardUnknown() { + xxx_messageInfo_CallResponse_ReturnDependencies.DiscardUnknown(m) +} + +var xxx_messageInfo_CallResponse_ReturnDependencies proto.InternalMessageInfo + +func (m *CallResponse_ReturnDependencies) GetUrns() []string { + if m != nil { + return m.Urns + } + return nil +} + type CheckRequest struct { Urn string `protobuf:"bytes,1,opt,name=urn,proto3" json:"urn,omitempty"` Olds *_struct.Struct `protobuf:"bytes,2,opt,name=olds,proto3" json:"olds,omitempty"` News *_struct.Struct `protobuf:"bytes,3,opt,name=news,proto3" json:"news,omitempty"` + SequenceNumber int32 `protobuf:"varint,4,opt,name=sequenceNumber,proto3" json:"sequenceNumber,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` @@ -505,7 +792,7 @@ func (m *CheckRequest) Reset() { *m = CheckRequest{} } func (m *CheckRequest) String() string { return proto.CompactTextString(m) } func (*CheckRequest) ProtoMessage() {} func (*CheckRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_c6a9f3c02af3d1c8, []int{7} + return fileDescriptor_c6a9f3c02af3d1c8, []int{9} } func (m *CheckRequest) XXX_Unmarshal(b []byte) error { @@ -547,6 +834,13 @@ func (m *CheckRequest) GetNews() *_struct.Struct { return nil } +func (m *CheckRequest) GetSequenceNumber() int32 { + if m != nil { + return m.SequenceNumber + } + return 0 +} + type CheckResponse struct { Inputs *_struct.Struct `protobuf:"bytes,1,opt,name=inputs,proto3" json:"inputs,omitempty"` Failures []*CheckFailure `protobuf:"bytes,2,rep,name=failures,proto3" json:"failures,omitempty"` @@ -559,7 +853,7 @@ func (m *CheckResponse) Reset() { *m = CheckResponse{} } func (m *CheckResponse) String() string { return proto.CompactTextString(m) } func (*CheckResponse) ProtoMessage() {} func (*CheckResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_c6a9f3c02af3d1c8, []int{8} + return fileDescriptor_c6a9f3c02af3d1c8, []int{10} } func (m *CheckResponse) XXX_Unmarshal(b []byte) error { @@ -606,7 +900,7 @@ func (m *CheckFailure) Reset() { *m = CheckFailure{} } func (m *CheckFailure) String() string { return proto.CompactTextString(m) } func (*CheckFailure) ProtoMessage() {} func (*CheckFailure) Descriptor() ([]byte, []int) { - return fileDescriptor_c6a9f3c02af3d1c8, []int{9} + return fileDescriptor_c6a9f3c02af3d1c8, []int{11} } func (m *CheckFailure) XXX_Unmarshal(b []byte) error { @@ -656,7 +950,7 @@ func (m *DiffRequest) Reset() { *m = DiffRequest{} } func (m *DiffRequest) String() string { return proto.CompactTextString(m) } func (*DiffRequest) ProtoMessage() {} func (*DiffRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_c6a9f3c02af3d1c8, []int{10} + return fileDescriptor_c6a9f3c02af3d1c8, []int{12} } func (m *DiffRequest) XXX_Unmarshal(b []byte) error { @@ -724,7 +1018,7 @@ func (m *PropertyDiff) Reset() { *m = PropertyDiff{} } func (m *PropertyDiff) String() string { return proto.CompactTextString(m) } func (*PropertyDiff) ProtoMessage() {} func (*PropertyDiff) Descriptor() ([]byte, []int) { - return fileDescriptor_c6a9f3c02af3d1c8, []int{11} + return fileDescriptor_c6a9f3c02af3d1c8, []int{13} } func (m *PropertyDiff) XXX_Unmarshal(b []byte) error { @@ -806,7 +1100,7 @@ func (m *DiffResponse) Reset() { *m = DiffResponse{} } func (m *DiffResponse) String() string { return proto.CompactTextString(m) } func (*DiffResponse) ProtoMessage() {} func (*DiffResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_c6a9f3c02af3d1c8, []int{12} + return fileDescriptor_c6a9f3c02af3d1c8, []int{14} } func (m *DiffResponse) XXX_Unmarshal(b []byte) error { @@ -890,7 +1184,7 @@ func (m *CreateRequest) Reset() { *m = CreateRequest{} } func (m *CreateRequest) String() string { return proto.CompactTextString(m) } func (*CreateRequest) ProtoMessage() {} func (*CreateRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_c6a9f3c02af3d1c8, []int{13} + return fileDescriptor_c6a9f3c02af3d1c8, []int{15} } func (m *CreateRequest) XXX_Unmarshal(b []byte) error { @@ -951,7 +1245,7 @@ func (m *CreateResponse) Reset() { *m = CreateResponse{} } func (m *CreateResponse) String() string { return proto.CompactTextString(m) } func (*CreateResponse) ProtoMessage() {} func (*CreateResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_c6a9f3c02af3d1c8, []int{14} + return fileDescriptor_c6a9f3c02af3d1c8, []int{16} } func (m *CreateResponse) XXX_Unmarshal(b []byte) error { @@ -1000,7 +1294,7 @@ func (m *ReadRequest) Reset() { *m = ReadRequest{} } func (m *ReadRequest) String() string { return proto.CompactTextString(m) } func (*ReadRequest) ProtoMessage() {} func (*ReadRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_c6a9f3c02af3d1c8, []int{15} + return fileDescriptor_c6a9f3c02af3d1c8, []int{17} } func (m *ReadRequest) XXX_Unmarshal(b []byte) error { @@ -1062,7 +1356,7 @@ func (m *ReadResponse) Reset() { *m = ReadResponse{} } func (m *ReadResponse) String() string { return proto.CompactTextString(m) } func (*ReadResponse) ProtoMessage() {} func (*ReadResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_c6a9f3c02af3d1c8, []int{16} + return fileDescriptor_c6a9f3c02af3d1c8, []int{18} } func (m *ReadResponse) XXX_Unmarshal(b []byte) error { @@ -1121,7 +1415,7 @@ func (m *UpdateRequest) Reset() { *m = UpdateRequest{} } func (m *UpdateRequest) String() string { return proto.CompactTextString(m) } func (*UpdateRequest) ProtoMessage() {} func (*UpdateRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_c6a9f3c02af3d1c8, []int{17} + return fileDescriptor_c6a9f3c02af3d1c8, []int{19} } func (m *UpdateRequest) XXX_Unmarshal(b []byte) error { @@ -1202,7 +1496,7 @@ func (m *UpdateResponse) Reset() { *m = UpdateResponse{} } func (m *UpdateResponse) String() string { return proto.CompactTextString(m) } func (*UpdateResponse) ProtoMessage() {} func (*UpdateResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_c6a9f3c02af3d1c8, []int{18} + return fileDescriptor_c6a9f3c02af3d1c8, []int{20} } func (m *UpdateResponse) XXX_Unmarshal(b []byte) error { @@ -1244,7 +1538,7 @@ func (m *DeleteRequest) Reset() { *m = DeleteRequest{} } func (m *DeleteRequest) String() string { return proto.CompactTextString(m) } func (*DeleteRequest) ProtoMessage() {} func (*DeleteRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_c6a9f3c02af3d1c8, []int{19} + return fileDescriptor_c6a9f3c02af3d1c8, []int{21} } func (m *DeleteRequest) XXX_Unmarshal(b []byte) error { @@ -1309,6 +1603,7 @@ type ConstructRequest struct { Providers map[string]string `protobuf:"bytes,13,rep,name=providers,proto3" json:"providers,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` Aliases []string `protobuf:"bytes,14,rep,name=aliases,proto3" json:"aliases,omitempty"` Dependencies []string `protobuf:"bytes,15,rep,name=dependencies,proto3" json:"dependencies,omitempty"` + ConfigSecretKeys []string `protobuf:"bytes,16,rep,name=configSecretKeys,proto3" json:"configSecretKeys,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` @@ -1318,7 +1613,7 @@ func (m *ConstructRequest) Reset() { *m = ConstructRequest{} } func (m *ConstructRequest) String() string { return proto.CompactTextString(m) } func (*ConstructRequest) ProtoMessage() {} func (*ConstructRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_c6a9f3c02af3d1c8, []int{20} + return fileDescriptor_c6a9f3c02af3d1c8, []int{22} } func (m *ConstructRequest) XXX_Unmarshal(b []byte) error { @@ -1444,6 +1739,13 @@ func (m *ConstructRequest) GetDependencies() []string { return nil } +func (m *ConstructRequest) GetConfigSecretKeys() []string { + if m != nil { + return m.ConfigSecretKeys + } + return nil +} + // PropertyDependencies describes the resources that a particular property depends on. type ConstructRequest_PropertyDependencies struct { Urns []string `protobuf:"bytes,1,rep,name=urns,proto3" json:"urns,omitempty"` @@ -1456,7 +1758,7 @@ func (m *ConstructRequest_PropertyDependencies) Reset() { *m = Construct func (m *ConstructRequest_PropertyDependencies) String() string { return proto.CompactTextString(m) } func (*ConstructRequest_PropertyDependencies) ProtoMessage() {} func (*ConstructRequest_PropertyDependencies) Descriptor() ([]byte, []int) { - return fileDescriptor_c6a9f3c02af3d1c8, []int{20, 0} + return fileDescriptor_c6a9f3c02af3d1c8, []int{22, 0} } func (m *ConstructRequest_PropertyDependencies) XXX_Unmarshal(b []byte) error { @@ -1497,7 +1799,7 @@ func (m *ConstructResponse) Reset() { *m = ConstructResponse{} } func (m *ConstructResponse) String() string { return proto.CompactTextString(m) } func (*ConstructResponse) ProtoMessage() {} func (*ConstructResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_c6a9f3c02af3d1c8, []int{21} + return fileDescriptor_c6a9f3c02af3d1c8, []int{23} } func (m *ConstructResponse) XXX_Unmarshal(b []byte) error { @@ -1553,7 +1855,7 @@ func (m *ConstructResponse_PropertyDependencies) Reset() { func (m *ConstructResponse_PropertyDependencies) String() string { return proto.CompactTextString(m) } func (*ConstructResponse_PropertyDependencies) ProtoMessage() {} func (*ConstructResponse_PropertyDependencies) Descriptor() ([]byte, []int) { - return fileDescriptor_c6a9f3c02af3d1c8, []int{21, 0} + return fileDescriptor_c6a9f3c02af3d1c8, []int{23, 0} } func (m *ConstructResponse_PropertyDependencies) XXX_Unmarshal(b []byte) error { @@ -1597,7 +1899,7 @@ func (m *ErrorResourceInitFailed) Reset() { *m = ErrorResourceInitFailed func (m *ErrorResourceInitFailed) String() string { return proto.CompactTextString(m) } func (*ErrorResourceInitFailed) ProtoMessage() {} func (*ErrorResourceInitFailed) Descriptor() ([]byte, []int) { - return fileDescriptor_c6a9f3c02af3d1c8, []int{22} + return fileDescriptor_c6a9f3c02af3d1c8, []int{24} } func (m *ErrorResourceInitFailed) XXX_Unmarshal(b []byte) error { @@ -1658,6 +1960,13 @@ func init() { proto.RegisterType((*ConfigureErrorMissingKeys_MissingKey)(nil), "pulumirpc.ConfigureErrorMissingKeys.MissingKey") proto.RegisterType((*InvokeRequest)(nil), "pulumirpc.InvokeRequest") proto.RegisterType((*InvokeResponse)(nil), "pulumirpc.InvokeResponse") + proto.RegisterType((*CallRequest)(nil), "pulumirpc.CallRequest") + proto.RegisterMapType((map[string]*CallRequest_ArgumentDependencies)(nil), "pulumirpc.CallRequest.ArgDependenciesEntry") + proto.RegisterMapType((map[string]string)(nil), "pulumirpc.CallRequest.ConfigEntry") + proto.RegisterType((*CallRequest_ArgumentDependencies)(nil), "pulumirpc.CallRequest.ArgumentDependencies") + proto.RegisterType((*CallResponse)(nil), "pulumirpc.CallResponse") + proto.RegisterMapType((map[string]*CallResponse_ReturnDependencies)(nil), "pulumirpc.CallResponse.ReturnDependenciesEntry") + proto.RegisterType((*CallResponse_ReturnDependencies)(nil), "pulumirpc.CallResponse.ReturnDependencies") proto.RegisterType((*CheckRequest)(nil), "pulumirpc.CheckRequest") proto.RegisterType((*CheckResponse)(nil), "pulumirpc.CheckResponse") proto.RegisterType((*CheckFailure)(nil), "pulumirpc.CheckFailure") @@ -1686,112 +1995,129 @@ func init() { func init() { proto.RegisterFile("provider.proto", fileDescriptor_c6a9f3c02af3d1c8) } var fileDescriptor_c6a9f3c02af3d1c8 = []byte{ - // 1670 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xc4, 0x58, 0xcd, 0x72, 0x1b, 0x45, - 0x10, 0xf6, 0x4a, 0xb2, 0x6c, 0xb5, 0x7e, 0x22, 0x0f, 0xc1, 0x51, 0x14, 0x1f, 0x5c, 0x0b, 0x55, - 0x98, 0x84, 0xc8, 0xc6, 0x39, 0x40, 0x52, 0x4e, 0x05, 0xdb, 0x92, 0x8d, 0x2b, 0x89, 0x63, 0xd6, - 0x09, 0x3f, 0xa7, 0x64, 0xb3, 0x1a, 0xc9, 0x8b, 0xa5, 0xdd, 0x65, 0x76, 0x56, 0x29, 0x73, 0xe6, - 0xc0, 0x05, 0xae, 0x14, 0x0f, 0x01, 0x54, 0xf1, 0x04, 0x3c, 0x08, 0x1c, 0x79, 0x00, 0x8a, 0x17, - 0xa0, 0xe6, 0x6f, 0x3d, 0x23, 0xad, 0x7f, 0x49, 0xc1, 0x6d, 0x7b, 0xba, 0xa7, 0xa7, 0xfb, 0x9b, - 0x9e, 0xfe, 0x59, 0xa8, 0x45, 0x24, 0x1c, 0xf9, 0x5d, 0x4c, 0x5a, 0x11, 0x09, 0x69, 0x88, 0x4a, - 0x51, 0x32, 0x48, 0x86, 0x3e, 0x89, 0xbc, 0x66, 0x25, 0x1a, 0x24, 0x7d, 0x3f, 0x10, 0x8c, 0xe6, - 0x8d, 0x7e, 0x18, 0xf6, 0x07, 0x78, 0x99, 0x53, 0x2f, 0x93, 0xde, 0x32, 0x1e, 0x46, 0xf4, 0x48, - 0x32, 0x17, 0xc6, 0x99, 0x31, 0x25, 0x89, 0x47, 0x05, 0xd7, 0x7e, 0x0f, 0xea, 0xdb, 0x98, 0xee, - 0x7b, 0x07, 0x78, 0xe8, 0x3a, 0xf8, 0xab, 0x04, 0xc7, 0x14, 0x35, 0x60, 0x66, 0x84, 0x49, 0xec, - 0x87, 0x41, 0xc3, 0x5a, 0xb4, 0x96, 0xa6, 0x1d, 0x45, 0xda, 0xb7, 0x60, 0x4e, 0x93, 0x8e, 0xa3, - 0x30, 0x88, 0x31, 0x9a, 0x87, 0x62, 0xcc, 0x57, 0xb8, 0x74, 0xc9, 0x91, 0x94, 0xfd, 0x43, 0x0e, - 0xea, 0x9b, 0x61, 0xd0, 0xf3, 0xfb, 0x09, 0xc1, 0x4a, 0xf7, 0xc7, 0x50, 0x1a, 0xb9, 0xc4, 0x77, - 0x5f, 0x0e, 0x70, 0xdc, 0xb0, 0x16, 0xf3, 0x4b, 0xe5, 0xd5, 0x9b, 0xad, 0xd4, 0xaf, 0xd6, 0xb8, - 0x7c, 0xeb, 0x53, 0x25, 0xdc, 0x09, 0x28, 0x39, 0x72, 0x8e, 0x37, 0xa3, 0x5b, 0x50, 0x70, 0x49, - 0x3f, 0x6e, 0xe4, 0x16, 0xad, 0xa5, 0xf2, 0xea, 0xb5, 0x96, 0x70, 0xb3, 0xa5, 0xdc, 0x6c, 0xed, - 0x73, 0x37, 0x1d, 0x2e, 0x84, 0xde, 0x86, 0xaa, 0xeb, 0x79, 0x38, 0xa2, 0xfb, 0xd8, 0x23, 0x98, - 0xc6, 0x8d, 0xfc, 0xa2, 0xb5, 0x34, 0xeb, 0x98, 0x8b, 0x68, 0x09, 0xae, 0x88, 0x05, 0x07, 0xc7, - 0x61, 0x42, 0x3c, 0x1c, 0x37, 0x0a, 0x5c, 0x6e, 0x7c, 0xb9, 0xb9, 0x06, 0x35, 0xd3, 0x32, 0x54, - 0x87, 0xfc, 0x21, 0x3e, 0x92, 0x10, 0xb0, 0x4f, 0x74, 0x15, 0xa6, 0x47, 0xee, 0x20, 0xc1, 0xdc, - 0xc2, 0x92, 0x23, 0x88, 0x7b, 0xb9, 0x0f, 0x2d, 0xfb, 0x3b, 0x0b, 0xe6, 0x34, 0x4f, 0x25, 0x8e, - 0x13, 0x36, 0x5a, 0x27, 0xd8, 0x18, 0x27, 0x51, 0x14, 0x12, 0x1a, 0xef, 0x11, 0x3c, 0xf2, 0xf1, - 0x2b, 0xae, 0x7f, 0xd6, 0x19, 0x5f, 0xce, 0xf2, 0x26, 0x9f, 0xe9, 0x8d, 0xfd, 0xab, 0x05, 0xd7, - 0x53, 0x7b, 0x3a, 0x84, 0x84, 0xe4, 0xb1, 0x1f, 0xc7, 0x7e, 0xd0, 0x7f, 0x88, 0x8f, 0x62, 0xf4, - 0x09, 0x94, 0x87, 0xc7, 0xa4, 0xbc, 0xb4, 0xe5, 0xac, 0x4b, 0x1b, 0xdf, 0xda, 0x3a, 0xfe, 0x76, - 0x74, 0x1d, 0xcd, 0x0d, 0x80, 0x63, 0x16, 0x42, 0x50, 0x08, 0xdc, 0x21, 0x96, 0xd8, 0xf1, 0x6f, - 0xb4, 0x08, 0xe5, 0x2e, 0x8e, 0x3d, 0xe2, 0x47, 0x94, 0xc5, 0xa1, 0x80, 0x50, 0x5f, 0xb2, 0x7f, - 0xb6, 0xa0, 0xba, 0x13, 0x8c, 0xc2, 0xc3, 0x34, 0xb6, 0xea, 0x90, 0xa7, 0xe1, 0xa1, 0xba, 0x02, - 0x1a, 0x1e, 0x5e, 0x2c, 0x46, 0x9a, 0x30, 0xab, 0x1e, 0x1c, 0x07, 0xaa, 0xe4, 0xa4, 0xb4, 0xfe, - 0x24, 0x0a, 0x9c, 0xa5, 0xc8, 0x2c, 0x94, 0xa7, 0xb3, 0x51, 0x1e, 0x41, 0x4d, 0xd9, 0x2b, 0x6f, - 0x7c, 0x19, 0x8a, 0x04, 0xd3, 0x84, 0x88, 0x77, 0x76, 0x8a, 0x81, 0x52, 0x0c, 0xdd, 0x81, 0xd9, - 0x9e, 0xeb, 0x0f, 0x12, 0x82, 0x99, 0x4f, 0x79, 0xbe, 0x45, 0xbb, 0x87, 0x03, 0xec, 0x1d, 0x6e, - 0x09, 0xbe, 0x93, 0x0a, 0xda, 0x5f, 0x43, 0x85, 0x73, 0x34, 0x98, 0xd4, 0x91, 0x25, 0x87, 0x7d, - 0x32, 0x98, 0xc2, 0x41, 0xf7, 0x6c, 0x98, 0x98, 0x10, 0x13, 0x0e, 0xf0, 0x2b, 0x11, 0x4b, 0xa7, - 0x09, 0x33, 0x21, 0x3b, 0x81, 0xaa, 0x3c, 0xfb, 0xd8, 0x65, 0x3f, 0x88, 0x12, 0x19, 0xdd, 0xa7, - 0xb9, 0x2c, 0xc4, 0x2e, 0xe7, 0xf2, 0x86, 0x74, 0x59, 0x72, 0xe4, 0xd5, 0x46, 0x98, 0x50, 0xf5, - 0x42, 0x53, 0x9a, 0xa5, 0x2f, 0x82, 0xdd, 0x38, 0x0d, 0x32, 0x49, 0xd9, 0xbf, 0x58, 0x50, 0x6e, - 0xfb, 0xbd, 0x9e, 0x82, 0xad, 0x06, 0x39, 0xbf, 0x2b, 0x77, 0xe7, 0xfc, 0xae, 0x82, 0x31, 0x37, - 0x09, 0x63, 0xfe, 0x22, 0x30, 0x16, 0xce, 0x01, 0x23, 0x4b, 0x0d, 0x7e, 0x3f, 0x08, 0x09, 0xde, - 0x3c, 0x70, 0x83, 0x3e, 0x0f, 0xb1, 0xfc, 0x52, 0xc9, 0x31, 0x17, 0xed, 0xdf, 0x2c, 0xa8, 0xec, - 0x49, 0xb7, 0x98, 0xe5, 0x68, 0x05, 0x0a, 0x87, 0x7e, 0x20, 0x8c, 0xae, 0xad, 0x2e, 0x68, 0xb8, - 0xe9, 0x62, 0xad, 0x87, 0x7e, 0xd0, 0x75, 0xb8, 0x24, 0x5a, 0x80, 0x12, 0xc7, 0x9d, 0xad, 0xcb, - 0xbc, 0x72, 0xbc, 0x60, 0xbf, 0x80, 0x02, 0x93, 0x45, 0x33, 0x90, 0x5f, 0x6f, 0xb7, 0xeb, 0x53, - 0xe8, 0x0a, 0x94, 0xd7, 0xdb, 0xed, 0xe7, 0x4e, 0x67, 0xef, 0xd1, 0xfa, 0x66, 0xa7, 0x6e, 0x21, - 0x80, 0x62, 0xbb, 0xf3, 0xa8, 0xf3, 0xb4, 0x53, 0xcf, 0x21, 0x04, 0x35, 0xf1, 0x9d, 0xf2, 0xf3, - 0x8c, 0xff, 0x6c, 0xaf, 0xbd, 0xfe, 0xb4, 0x53, 0x2f, 0x30, 0xbe, 0xf8, 0x4e, 0xf9, 0xd3, 0xf6, - 0x1f, 0x79, 0xa8, 0x08, 0xd0, 0x65, 0xbc, 0x34, 0x61, 0x96, 0xe0, 0x68, 0xe0, 0x7a, 0xb2, 0x5c, - 0x94, 0x9c, 0x94, 0x66, 0x8f, 0x32, 0xa6, 0xa2, 0x92, 0xe4, 0x38, 0x4b, 0x91, 0x68, 0x05, 0xde, - 0xe8, 0xe2, 0x01, 0xa6, 0x78, 0x03, 0xf7, 0x42, 0x96, 0x62, 0xf9, 0x0e, 0x99, 0xfe, 0xb2, 0x58, - 0xe8, 0x3e, 0xcc, 0x78, 0x12, 0xdb, 0x02, 0x47, 0xeb, 0x2d, 0x0d, 0x2d, 0xdd, 0x22, 0x4e, 0x48, - 0xc4, 0x1d, 0xb5, 0x87, 0xe5, 0xfa, 0xae, 0xdf, 0xeb, 0xa9, 0x8b, 0x11, 0x04, 0x7a, 0x0c, 0x95, - 0x2e, 0xa6, 0xae, 0x3f, 0xc0, 0x5d, 0x0e, 0x68, 0x91, 0xc7, 0xef, 0xbb, 0x27, 0x6a, 0xd6, 0x64, - 0x45, 0xb9, 0x33, 0xb6, 0xb3, 0x54, 0x73, 0xe0, 0xc6, 0xba, 0x54, 0x63, 0x46, 0xa4, 0x9a, 0xb1, - 0xe5, 0xe6, 0xe7, 0x30, 0x37, 0xa1, 0x2c, 0xa3, 0x42, 0xdd, 0xd6, 0x2b, 0x94, 0xf9, 0xb0, 0xf4, - 0x00, 0xd1, 0x4b, 0xd7, 0x7d, 0xf1, 0x28, 0x24, 0x00, 0xa8, 0x0e, 0x95, 0xf6, 0xce, 0xd6, 0xd6, - 0xf3, 0x67, 0xbb, 0x0f, 0x77, 0x9f, 0x7c, 0xb6, 0x5b, 0x9f, 0x42, 0x55, 0x28, 0xf1, 0x95, 0xdd, - 0x27, 0xbb, 0x2c, 0x20, 0x14, 0xb9, 0xff, 0xe4, 0x71, 0xa7, 0x9e, 0xb3, 0xbf, 0xb7, 0xa0, 0xba, - 0x49, 0xb0, 0x4b, 0xf1, 0xc9, 0xd9, 0xe8, 0x03, 0x00, 0xf9, 0x38, 0x7d, 0x7c, 0x66, 0x4e, 0xd2, - 0x44, 0x59, 0x3c, 0x50, 0x7f, 0x88, 0xc3, 0x84, 0xf2, 0x9b, 0xb6, 0x1c, 0x45, 0x32, 0x4e, 0x24, - 0x8b, 0xa5, 0x28, 0xe8, 0x8a, 0xb4, 0xbf, 0x80, 0x9a, 0xb2, 0x47, 0x46, 0xdc, 0xf8, 0x3b, 0xbf, - 0xac, 0x39, 0xf6, 0x8f, 0x16, 0x94, 0x1d, 0xec, 0x76, 0xcf, 0x9f, 0x40, 0xcc, 0xa3, 0xf2, 0xe7, - 0xf7, 0xfc, 0x38, 0xab, 0x16, 0xce, 0x95, 0x55, 0xed, 0x6f, 0x2d, 0xa8, 0x08, 0xdb, 0x5e, 0xb3, - 0xd7, 0x9a, 0x29, 0xf9, 0xf3, 0x99, 0xf2, 0xa7, 0x05, 0xd5, 0x67, 0x51, 0x57, 0x0b, 0x89, 0xff, - 0x33, 0xd3, 0x6a, 0x31, 0x34, 0x6d, 0xc6, 0xd0, 0x44, 0x0e, 0x2e, 0x66, 0xe4, 0x60, 0x3d, 0xd2, - 0x66, 0xcc, 0x48, 0xdb, 0x81, 0x9a, 0x72, 0x53, 0x62, 0x6e, 0x62, 0x6c, 0x9d, 0x3f, 0xb2, 0xbe, - 0xb1, 0xa0, 0xda, 0xe6, 0x49, 0xec, 0x3f, 0x88, 0x2d, 0x0d, 0x91, 0x82, 0x81, 0x88, 0xfd, 0x77, - 0x91, 0x37, 0xf8, 0x62, 0x9e, 0xd0, 0x86, 0x87, 0x88, 0x84, 0x5f, 0x62, 0x8f, 0x4a, 0x73, 0x14, - 0xc9, 0x72, 0x64, 0x4c, 0x5d, 0xef, 0x50, 0xf5, 0xc3, 0x9c, 0x40, 0x0f, 0xa0, 0xe8, 0xf1, 0xfe, - 0xb1, 0x91, 0xe7, 0xd9, 0xf1, 0x1d, 0xb3, 0xb1, 0x34, 0x94, 0xcb, 0x4e, 0x53, 0xe4, 0x46, 0xb9, - 0x8d, 0xd5, 0xef, 0x2e, 0x39, 0x72, 0x92, 0x40, 0x3e, 0x6d, 0x49, 0xf1, 0x9a, 0xef, 0x12, 0x77, - 0x30, 0xc0, 0x03, 0x7e, 0x95, 0xd3, 0x4e, 0x4a, 0xb3, 0x4c, 0x3a, 0x0c, 0x03, 0x9f, 0x86, 0xa4, - 0x13, 0x74, 0xa3, 0xd0, 0x0f, 0x68, 0xa3, 0xc8, 0x8d, 0x1a, 0x5f, 0x66, 0xbd, 0x29, 0x3d, 0x8a, - 0x30, 0xbf, 0xcc, 0x92, 0xc3, 0xbf, 0xd3, 0x7e, 0x75, 0x56, 0xeb, 0x57, 0xe7, 0xa1, 0x18, 0xb9, - 0x04, 0x07, 0xb4, 0x51, 0x12, 0x5d, 0x84, 0xa0, 0xb4, 0xe7, 0x00, 0xe7, 0xeb, 0x77, 0x5e, 0xc0, - 0x9c, 0x28, 0xb8, 0x38, 0xc2, 0x41, 0x17, 0x07, 0x1e, 0xbb, 0xae, 0x32, 0x87, 0x66, 0xf5, 0x34, - 0x68, 0x76, 0xc6, 0x37, 0x09, 0x94, 0x26, 0x95, 0xc9, 0x1b, 0xa2, 0xec, 0x86, 0x2a, 0x2a, 0x44, - 0x39, 0xc9, 0x86, 0x33, 0xd5, 0xf1, 0xc6, 0x8d, 0x6a, 0xd6, 0x70, 0x66, 0x9e, 0xb9, 0xa7, 0x84, - 0xe5, 0x70, 0x96, 0x6e, 0x66, 0x67, 0xb8, 0x03, 0xdf, 0x8d, 0x71, 0xdc, 0xa8, 0x89, 0xd2, 0x2c, - 0x49, 0x64, 0xb3, 0x9a, 0xa8, 0xb9, 0x76, 0x85, 0xb3, 0x8d, 0xb5, 0xe6, 0x4d, 0xb8, 0x9a, 0xd6, - 0x1f, 0xdd, 0x72, 0x04, 0x85, 0x84, 0x04, 0xaa, 0x11, 0xe0, 0xdf, 0xcd, 0xbb, 0x50, 0xd6, 0xa2, - 0xe2, 0x22, 0x63, 0x58, 0x73, 0x04, 0xf3, 0xd9, 0xa8, 0x65, 0x68, 0xd9, 0x32, 0x4b, 0xe5, 0xca, - 0x19, 0xb0, 0x4c, 0xd8, 0xae, 0x9f, 0xbb, 0x06, 0x35, 0x13, 0xb9, 0x0b, 0x0d, 0x8f, 0xbf, 0xe7, - 0xf8, 0xf0, 0xa8, 0x8e, 0x94, 0xb9, 0x64, 0xb2, 0x8c, 0xde, 0xe6, 0xcf, 0x8d, 0xe2, 0xb3, 0x92, - 0xb7, 0x90, 0x42, 0x2e, 0xcc, 0xf1, 0x0f, 0x23, 0xee, 0xc4, 0x93, 0xbc, 0x93, 0xed, 0xac, 0xec, - 0x5a, 0xf6, 0xc7, 0x77, 0xc9, 0xc0, 0x9b, 0xd0, 0x76, 0xa1, 0x6b, 0x7d, 0x05, 0xf3, 0xd9, 0x8a, - 0x33, 0xb0, 0xda, 0x36, 0xef, 0xe6, 0xfd, 0x53, 0xcd, 0x3d, 0xe3, 0x72, 0xec, 0x9f, 0x2c, 0xb8, - 0xc6, 0xe7, 0x58, 0x35, 0xb8, 0xed, 0x04, 0x3e, 0xdd, 0xe2, 0xad, 0xd4, 0xeb, 0x2b, 0x92, 0x0d, - 0x98, 0x11, 0x53, 0x86, 0x80, 0xb8, 0xe4, 0x28, 0xf2, 0xc2, 0x95, 0x7c, 0xf5, 0xaf, 0x19, 0xa8, - 0x2b, 0x53, 0x55, 0x54, 0xb1, 0x87, 0x9c, 0xfe, 0xa7, 0x41, 0x37, 0x34, 0x3c, 0xc6, 0xff, 0xf5, - 0x34, 0x17, 0xb2, 0x99, 0x02, 0x2c, 0x7b, 0x0a, 0x6d, 0x40, 0x99, 0x4f, 0x52, 0xe2, 0x8d, 0xa1, - 0x89, 0xd9, 0x4b, 0xe9, 0x69, 0x4c, 0x32, 0x52, 0x1d, 0x0f, 0x00, 0x78, 0xcf, 0x28, 0xf3, 0xf5, - 0x44, 0xfb, 0x2b, 0x34, 0x5c, 0x3b, 0xa1, 0x2d, 0xb6, 0xa7, 0x98, 0x3b, 0xe9, 0x3f, 0x06, 0xc3, - 0x9d, 0xf1, 0xdf, 0x45, 0x86, 0x3b, 0x13, 0x7f, 0x58, 0xb8, 0x29, 0x45, 0x31, 0x83, 0x23, 0xdd, - 0x60, 0xe3, 0x37, 0x42, 0xf3, 0x7a, 0x06, 0x27, 0x55, 0xb0, 0x0d, 0x95, 0x7d, 0x4a, 0xb0, 0x3b, - 0xfc, 0x57, 0x6a, 0x56, 0x2c, 0xb4, 0x06, 0xd3, 0x1c, 0xa7, 0xcb, 0x41, 0x7a, 0x17, 0x0a, 0x7c, - 0x24, 0xb8, 0x04, 0x98, 0x0f, 0xa0, 0x28, 0x3a, 0x5e, 0xc3, 0x76, 0xa3, 0x29, 0x37, 0x6c, 0x37, - 0xdb, 0x63, 0x71, 0x36, 0x6b, 0x1d, 0x8d, 0xb3, 0xb5, 0x3e, 0xd7, 0x38, 0x5b, 0xef, 0x31, 0xc5, - 0xd9, 0xa2, 0x07, 0x32, 0xce, 0x36, 0xba, 0x3f, 0xe3, 0x6c, 0xb3, 0x61, 0xb2, 0xa7, 0xd0, 0x1a, - 0x14, 0x45, 0xe3, 0x63, 0x28, 0x30, 0x7a, 0xa1, 0xe6, 0xfc, 0xc4, 0x93, 0xe9, 0x0c, 0x23, 0x7a, - 0x94, 0xc6, 0x91, 0x48, 0x08, 0xe3, 0x71, 0x64, 0xa4, 0xf0, 0xf1, 0x38, 0x32, 0x73, 0x88, 0x3d, - 0x85, 0xee, 0x41, 0x71, 0xd3, 0x0d, 0x3c, 0x3c, 0x40, 0x27, 0x9c, 0x76, 0x8a, 0x15, 0x1f, 0x41, - 0x75, 0x1b, 0xd3, 0x3d, 0xfe, 0x03, 0x77, 0x27, 0xe8, 0x85, 0x27, 0xaa, 0x78, 0x53, 0x9f, 0xc7, - 0x52, 0x71, 0x7b, 0xea, 0x65, 0x91, 0x0b, 0xde, 0xf9, 0x27, 0x00, 0x00, 0xff, 0xff, 0xac, 0xa3, - 0xd1, 0x1f, 0x21, 0x16, 0x00, 0x00, + // 1947 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xc4, 0x58, 0xcd, 0x73, 0x1b, 0x49, + 0x15, 0xf7, 0xe8, 0xcb, 0xd2, 0xd3, 0x47, 0xe4, 0x66, 0xb1, 0xb5, 0xda, 0x1c, 0x5c, 0xc3, 0x16, + 0x98, 0x64, 0x57, 0x09, 0xce, 0x01, 0x92, 0xca, 0x56, 0xd6, 0xb1, 0xe4, 0xac, 0x2b, 0x89, 0x63, + 0xc6, 0x6b, 0x3e, 0x4e, 0xd9, 0xc9, 0xa8, 0xa5, 0x0c, 0x1e, 0xcd, 0xcc, 0xf6, 0xf4, 0x38, 0xe5, + 0x3b, 0x07, 0x4e, 0x5c, 0x29, 0x4e, 0x54, 0x71, 0x86, 0xa2, 0x0a, 0xfe, 0x01, 0xfe, 0x09, 0x6e, + 0xe1, 0xc8, 0x3f, 0xc0, 0x5f, 0x40, 0xf5, 0xd7, 0xa8, 0x5b, 0x33, 0xb2, 0x65, 0x93, 0x82, 0x5b, + 0xbf, 0xee, 0xd7, 0xaf, 0xdf, 0xfb, 0xbd, 0x8f, 0x7e, 0xdd, 0xd0, 0x89, 0x49, 0x74, 0xee, 0x8f, + 0x31, 0x19, 0xc4, 0x24, 0xa2, 0x11, 0x6a, 0xc4, 0x69, 0x90, 0xce, 0x7c, 0x12, 0x7b, 0xfd, 0x56, + 0x1c, 0xa4, 0x53, 0x3f, 0x14, 0x0b, 0xfd, 0x4f, 0xa6, 0x51, 0x34, 0x0d, 0xf0, 0x3d, 0x4e, 0xbd, + 0x49, 0x27, 0xf7, 0xf0, 0x2c, 0xa6, 0x17, 0x72, 0xf1, 0xf6, 0xe2, 0x62, 0x42, 0x49, 0xea, 0x51, + 0xb1, 0x6a, 0x7f, 0x06, 0xdd, 0x67, 0x98, 0x9e, 0x78, 0x6f, 0xf1, 0xcc, 0x75, 0xf0, 0xb7, 0x29, + 0x4e, 0x28, 0xea, 0xc1, 0xfa, 0x39, 0x26, 0x89, 0x1f, 0x85, 0x3d, 0x6b, 0xdb, 0xda, 0xa9, 0x3a, + 0x8a, 0xb4, 0xef, 0xc2, 0x86, 0xc6, 0x9d, 0xc4, 0x51, 0x98, 0x60, 0xb4, 0x09, 0xb5, 0x84, 0xcf, + 0x70, 0xee, 0x86, 0x23, 0x29, 0xfb, 0x77, 0x25, 0xe8, 0xee, 0x47, 0xe1, 0xc4, 0x9f, 0xa6, 0x04, + 0x2b, 0xd9, 0x5f, 0x41, 0xe3, 0xdc, 0x25, 0xbe, 0xfb, 0x26, 0xc0, 0x49, 0xcf, 0xda, 0x2e, 0xef, + 0x34, 0x77, 0xef, 0x0c, 0x32, 0xbb, 0x06, 0x8b, 0xfc, 0x83, 0x9f, 0x29, 0xe6, 0x51, 0x48, 0xc9, + 0x85, 0x33, 0xdf, 0x8c, 0xee, 0x42, 0xc5, 0x25, 0xd3, 0xa4, 0x57, 0xda, 0xb6, 0x76, 0x9a, 0xbb, + 0x5b, 0x03, 0x61, 0xe6, 0x40, 0x99, 0x39, 0x38, 0xe1, 0x66, 0x3a, 0x9c, 0x09, 0x7d, 0x0a, 0x6d, + 0xd7, 0xf3, 0x70, 0x4c, 0x4f, 0xb0, 0x47, 0x30, 0x4d, 0x7a, 0xe5, 0x6d, 0x6b, 0xa7, 0xee, 0x98, + 0x93, 0x68, 0x07, 0x6e, 0x89, 0x09, 0x07, 0x27, 0x51, 0x4a, 0x3c, 0x9c, 0xf4, 0x2a, 0x9c, 0x6f, + 0x71, 0xba, 0xff, 0x18, 0x3a, 0xa6, 0x66, 0xa8, 0x0b, 0xe5, 0x33, 0x7c, 0x21, 0x21, 0x60, 0x43, + 0xf4, 0x11, 0x54, 0xcf, 0xdd, 0x20, 0xc5, 0x5c, 0xc3, 0x86, 0x23, 0x88, 0x47, 0xa5, 0x9f, 0x58, + 0xf6, 0xdf, 0x2c, 0xd8, 0xd0, 0x2c, 0x95, 0x38, 0xe6, 0x74, 0xb4, 0x96, 0xe8, 0x98, 0xa4, 0x71, + 0x1c, 0x11, 0x9a, 0x1c, 0x13, 0x7c, 0xee, 0xe3, 0x77, 0x5c, 0x7e, 0xdd, 0x59, 0x9c, 0x2e, 0xb2, + 0xa6, 0x5c, 0x68, 0xcd, 0xfc, 0xe4, 0x57, 0x29, 0x8d, 0x53, 0xaa, 0xac, 0x36, 0x27, 0xed, 0xbf, + 0x5a, 0xf0, 0x71, 0xa6, 0xf5, 0x88, 0x90, 0x88, 0xbc, 0xf4, 0x93, 0xc4, 0x0f, 0xa7, 0xcf, 0xf1, + 0x45, 0x82, 0x7e, 0x0a, 0xcd, 0xd9, 0x9c, 0x94, 0xae, 0xbd, 0x57, 0xe4, 0xda, 0xc5, 0xad, 0x83, + 0xf9, 0xd8, 0xd1, 0x65, 0xf4, 0x9f, 0x02, 0xcc, 0x97, 0x10, 0x82, 0x4a, 0xe8, 0xce, 0xb0, 0x44, + 0x98, 0x8f, 0xd1, 0x36, 0x34, 0xc7, 0x38, 0xf1, 0x88, 0x1f, 0x53, 0x16, 0xad, 0x02, 0x68, 0x7d, + 0xca, 0x7e, 0x6f, 0x41, 0xfb, 0x30, 0x3c, 0x8f, 0xce, 0xb2, 0x08, 0xec, 0x42, 0x99, 0x46, 0x67, + 0xca, 0x51, 0x34, 0x3a, 0xbb, 0x5e, 0x24, 0xf5, 0xa1, 0xae, 0xd2, 0x92, 0xc3, 0xd9, 0x70, 0x32, + 0x5a, 0x4f, 0x9c, 0x0a, 0x5f, 0x52, 0x64, 0x91, 0x2f, 0xaa, 0xc5, 0xbe, 0xf8, 0x0c, 0x36, 0x44, + 0x6e, 0x0f, 0xa3, 0x77, 0x61, 0x10, 0xb9, 0xe3, 0x53, 0xe7, 0x45, 0xaf, 0xc6, 0xa5, 0xe5, 0x17, + 0xec, 0x73, 0xe8, 0x28, 0xeb, 0x64, 0x14, 0xdd, 0x83, 0x1a, 0xc1, 0x34, 0x25, 0x22, 0x77, 0x2f, + 0x31, 0x47, 0xb2, 0xa1, 0x07, 0x50, 0x9f, 0xb8, 0x7e, 0x90, 0x12, 0xcc, 0x10, 0x28, 0xf3, 0x2d, + 0x9a, 0xd7, 0xde, 0x62, 0xef, 0xec, 0x40, 0xac, 0x3b, 0x19, 0xa3, 0xfd, 0x8f, 0x2a, 0x34, 0xf7, + 0xdd, 0x20, 0xf8, 0x40, 0xa0, 0x9e, 0xc2, 0x2d, 0x97, 0x4c, 0x87, 0x38, 0xc6, 0xe1, 0x18, 0x87, + 0x9e, 0xcf, 0x43, 0x95, 0xa9, 0x72, 0x57, 0x57, 0x65, 0x7e, 0xde, 0x60, 0xcf, 0xe4, 0x16, 0xc5, + 0x61, 0x51, 0x86, 0xe1, 0xab, 0xca, 0x72, 0x5f, 0x55, 0x4d, 0x5f, 0x15, 0x7a, 0xa0, 0xbd, 0xc4, + 0x03, 0x4c, 0x4e, 0x4c, 0xa2, 0x5f, 0x61, 0x8f, 0x4a, 0x2f, 0x29, 0x92, 0xe5, 0x7f, 0x42, 0x5d, + 0xef, 0xac, 0xb7, 0x2e, 0xf2, 0x9f, 0x13, 0xe8, 0x11, 0xd4, 0x3c, 0x9e, 0x09, 0xbd, 0x3a, 0xb7, + 0xd0, 0x5e, 0x62, 0xa1, 0x48, 0x17, 0x61, 0x98, 0xdc, 0x81, 0xee, 0x40, 0x57, 0x8c, 0x44, 0x31, + 0xe0, 0x89, 0xd6, 0xd8, 0x2e, 0xef, 0x34, 0x9c, 0xdc, 0x3c, 0xab, 0xca, 0x63, 0x72, 0xe1, 0xa4, + 0x61, 0x0f, 0x78, 0xa0, 0x49, 0x8a, 0x63, 0xe2, 0x12, 0x37, 0x08, 0x70, 0xd0, 0x6b, 0xf2, 0xea, + 0x9e, 0xd1, 0x2c, 0x4a, 0x67, 0x51, 0xe8, 0xd3, 0x88, 0x8c, 0xc2, 0x71, 0x1c, 0xf9, 0x21, 0xed, + 0xb5, 0xb8, 0xee, 0x8b, 0xd3, 0xfd, 0x3b, 0xf0, 0xd1, 0x1e, 0x99, 0xa6, 0x33, 0x1c, 0x52, 0x03, + 0x71, 0x04, 0x95, 0x94, 0x84, 0x22, 0xfd, 0x1b, 0x0e, 0x1f, 0xf7, 0x23, 0xce, 0x9b, 0x73, 0x57, + 0x41, 0xc5, 0xdc, 0xd3, 0x2b, 0xe6, 0xa5, 0xce, 0xcf, 0x9d, 0xac, 0x95, 0xd7, 0xfe, 0x43, 0x68, + 0x6a, 0xe8, 0x5d, 0xab, 0x32, 0xff, 0xbb, 0x04, 0x2d, 0x71, 0xd4, 0x4d, 0xd3, 0xe9, 0x35, 0x20, + 0x31, 0x32, 0xa2, 0xb9, 0x94, 0x2f, 0x87, 0xda, 0x29, 0x03, 0x27, 0xb7, 0x43, 0x38, 0xbe, 0x40, + 0x94, 0x91, 0xaf, 0xe5, 0x15, 0xf3, 0xb5, 0xbf, 0x03, 0x28, 0x7f, 0x46, 0xa1, 0xb7, 0xbe, 0x85, + 0xad, 0x25, 0xda, 0x14, 0x00, 0xf9, 0xa5, 0xe9, 0xb0, 0x3b, 0xab, 0xdb, 0xa7, 0x83, 0xfe, 0x47, + 0x0b, 0x5a, 0x5c, 0x6f, 0xad, 0x9a, 0x28, 0xc4, 0x1b, 0x0e, 0x1b, 0xb2, 0x6a, 0x12, 0x05, 0xe3, + 0xab, 0xab, 0x09, 0x63, 0x62, 0xcc, 0x21, 0x7e, 0x27, 0x6e, 0xbb, 0xcb, 0x98, 0x19, 0x13, 0xfa, + 0x3e, 0x74, 0x12, 0x76, 0x6c, 0xe8, 0xe1, 0xa3, 0x74, 0xf6, 0x46, 0x56, 0x8a, 0xaa, 0xb3, 0x30, + 0x6b, 0xa7, 0xd0, 0x96, 0x3a, 0xce, 0x23, 0xc3, 0x0f, 0xf9, 0x6d, 0x79, 0x55, 0x64, 0x08, 0xb6, + 0x9b, 0x15, 0xda, 0xa7, 0x12, 0x1a, 0xb9, 0x22, 0x4b, 0x5a, 0x8c, 0x09, 0x55, 0x8e, 0xc8, 0x68, + 0x96, 0xf2, 0x04, 0xbb, 0x49, 0x76, 0x11, 0x4a, 0xca, 0xfe, 0x8b, 0x05, 0xcd, 0xa1, 0x3f, 0x99, + 0x28, 0x78, 0x3b, 0x50, 0xf2, 0xc7, 0x72, 0x77, 0xc9, 0x1f, 0x2b, 0xb8, 0x4b, 0x79, 0xb8, 0xcb, + 0xd7, 0x81, 0xbb, 0xb2, 0x0a, 0xdc, 0x9f, 0x42, 0xdb, 0x9f, 0x86, 0x11, 0xc1, 0xfb, 0x6f, 0xdd, + 0x70, 0xca, 0xaf, 0x41, 0x16, 0x7b, 0xe6, 0xa4, 0xfd, 0x77, 0x0b, 0x5a, 0xc7, 0xd2, 0x2c, 0xa6, + 0x39, 0xba, 0x0f, 0x95, 0x33, 0x3f, 0x14, 0x4a, 0x77, 0x76, 0x6f, 0x6b, 0xb8, 0xe9, 0x6c, 0x83, + 0xe7, 0x7e, 0x38, 0x76, 0x38, 0x27, 0xba, 0x0d, 0x0d, 0x8e, 0x3b, 0x9b, 0x97, 0x1d, 0xd2, 0x7c, + 0xc2, 0xfe, 0x06, 0x2a, 0x8c, 0x17, 0xad, 0x43, 0x79, 0x6f, 0x38, 0xec, 0xae, 0xa1, 0x5b, 0xd0, + 0xdc, 0x1b, 0x0e, 0x5f, 0x3b, 0xa3, 0xe3, 0x17, 0x7b, 0xfb, 0xa3, 0xae, 0x85, 0x00, 0x6a, 0xc3, + 0xd1, 0x8b, 0xd1, 0xd7, 0xa3, 0x6e, 0x09, 0x21, 0xe8, 0x88, 0x71, 0xb6, 0x5e, 0x66, 0xeb, 0xa7, + 0xc7, 0xc3, 0xbd, 0xaf, 0x47, 0xdd, 0x0a, 0x5b, 0x17, 0xe3, 0x6c, 0xbd, 0x6a, 0xff, 0xb3, 0x0c, + 0x2d, 0x01, 0xba, 0x8c, 0x97, 0x3e, 0xd4, 0x09, 0x8e, 0x03, 0xd7, 0xc3, 0x2a, 0xe1, 0x32, 0x9a, + 0x5d, 0x22, 0x09, 0x15, 0x3d, 0x71, 0x89, 0x2f, 0x29, 0x12, 0xdd, 0x87, 0xef, 0x8c, 0x71, 0x80, + 0x29, 0x7e, 0x8a, 0x27, 0x11, 0x6b, 0x16, 0xf9, 0x0e, 0xd9, 0xc8, 0x15, 0x2d, 0xa1, 0x2f, 0x60, + 0xdd, 0x93, 0xd8, 0x56, 0x38, 0x5a, 0xdf, 0xd3, 0xd0, 0xd2, 0x35, 0xe2, 0x84, 0x44, 0xdc, 0x51, + 0x7b, 0x58, 0x6d, 0x1c, 0xfb, 0x93, 0x89, 0x72, 0x8c, 0x20, 0xd0, 0x4b, 0x68, 0x8d, 0x31, 0x75, + 0xfd, 0x00, 0x8f, 0x39, 0xa0, 0x35, 0x1e, 0xbf, 0x3f, 0x5c, 0x2a, 0x59, 0xe3, 0x15, 0x95, 0xcc, + 0xd8, 0xce, 0x2e, 0x9a, 0xb7, 0x6e, 0xa2, 0x73, 0xf1, 0x4b, 0xb2, 0xee, 0x2c, 0x4e, 0xf7, 0x7f, + 0x01, 0x1b, 0x39, 0x61, 0x05, 0x85, 0xe8, 0x73, 0xb3, 0x10, 0x6d, 0x2d, 0x09, 0x10, 0xbd, 0xea, + 0x7c, 0x21, 0x92, 0x42, 0x02, 0x80, 0xba, 0xd0, 0x1a, 0x1e, 0x1e, 0x1c, 0xbc, 0x3e, 0x3d, 0x7a, + 0x7e, 0xf4, 0xea, 0xe7, 0x47, 0xdd, 0x35, 0xd4, 0x86, 0x06, 0x9f, 0x39, 0x7a, 0x75, 0xc4, 0x02, + 0x42, 0x91, 0x27, 0xaf, 0x5e, 0x8e, 0xba, 0x25, 0xfb, 0xb7, 0x16, 0xb4, 0xf7, 0x09, 0x76, 0x29, + 0x5e, 0x5e, 0xb5, 0x7e, 0x0c, 0x20, 0x93, 0x53, 0xdc, 0x01, 0x97, 0xe6, 0x87, 0xc6, 0xca, 0xe2, + 0x81, 0xfa, 0x33, 0x1c, 0xa5, 0x94, 0x7b, 0xda, 0x72, 0x14, 0x29, 0xda, 0x0d, 0xd1, 0xf6, 0x8b, + 0x26, 0x5d, 0x91, 0xf6, 0x2f, 0xa1, 0xa3, 0xf4, 0x91, 0x11, 0xb7, 0x98, 0xe7, 0x37, 0x55, 0xc7, + 0xfe, 0xbd, 0x05, 0x4d, 0x07, 0xbb, 0xe3, 0xd5, 0x0b, 0x88, 0x79, 0x54, 0x79, 0x75, 0xcb, 0xe7, + 0x55, 0xb5, 0xb2, 0x52, 0x55, 0xb5, 0x7f, 0x63, 0x41, 0x4b, 0xe8, 0xf6, 0x81, 0xad, 0xd6, 0x54, + 0x29, 0xaf, 0xa6, 0xca, 0xbf, 0x2c, 0x68, 0x9f, 0xc6, 0x63, 0x2d, 0x24, 0xfe, 0x9f, 0x95, 0x56, + 0x8b, 0xa1, 0xaa, 0x19, 0x43, 0xb9, 0x1a, 0x5c, 0x2b, 0xa8, 0xc1, 0x7a, 0xa4, 0xad, 0x9b, 0x91, + 0x76, 0x08, 0x1d, 0x65, 0xa6, 0xc4, 0xdc, 0xc4, 0xd8, 0x5a, 0x3d, 0xb2, 0x7e, 0x6d, 0x41, 0x7b, + 0xc8, 0x8b, 0xd8, 0xff, 0x20, 0xb6, 0x34, 0x44, 0x2a, 0x06, 0x22, 0xf6, 0x1f, 0xd6, 0xf9, 0x57, + 0x85, 0xf8, 0x19, 0xd1, 0xbe, 0x41, 0x54, 0x67, 0x6f, 0x2d, 0xe9, 0xec, 0x4b, 0x7a, 0x67, 0xff, + 0x24, 0xeb, 0xec, 0x45, 0x5b, 0xf6, 0x03, 0xf3, 0xf1, 0x6b, 0x08, 0x2f, 0x6c, 0xef, 0xe7, 0x2d, + 0x7b, 0x65, 0x69, 0xcb, 0x5e, 0xbd, 0xba, 0x65, 0xaf, 0x15, 0xb6, 0xec, 0xac, 0xd9, 0xa3, 0x17, + 0x31, 0x96, 0xaf, 0x11, 0x3e, 0xce, 0xde, 0xd4, 0x75, 0xed, 0x4d, 0xbd, 0x09, 0xb5, 0xd8, 0x25, + 0x38, 0xa4, 0xbd, 0x86, 0xe8, 0x22, 0x04, 0xa5, 0xa5, 0x03, 0xac, 0xd6, 0xef, 0x7c, 0x03, 0x1b, + 0xe2, 0xc2, 0xd5, 0x1b, 0xe1, 0x26, 0x87, 0x66, 0xf7, 0x32, 0x68, 0x0e, 0x17, 0x37, 0x09, 0x94, + 0xf2, 0xc2, 0xa4, 0x87, 0x28, 0xf3, 0x50, 0x4b, 0x85, 0x28, 0x27, 0xd1, 0x57, 0xd0, 0x50, 0x2f, + 0xbd, 0xa4, 0xd7, 0x2e, 0xfa, 0x66, 0x32, 0xcf, 0x3c, 0x56, 0xcc, 0xf2, 0x9b, 0x29, 0xdb, 0xcc, + 0xce, 0x70, 0x03, 0xdf, 0x4d, 0x70, 0xd2, 0xeb, 0x88, 0xab, 0x59, 0x92, 0xc8, 0x66, 0x77, 0xa2, + 0x66, 0xda, 0x2d, 0xbe, 0x6c, 0xcc, 0x15, 0xbe, 0xd8, 0xba, 0xc5, 0x2f, 0x36, 0xf6, 0xa6, 0xca, + 0xee, 0xaa, 0xab, 0xba, 0xf4, 0x9b, 0x3f, 0x71, 0xfa, 0xe7, 0xb0, 0x59, 0x8c, 0x70, 0x81, 0x94, + 0x03, 0xf3, 0x5a, 0xbd, 0x7f, 0x05, 0x84, 0x39, 0xdd, 0xf5, 0x73, 0x1f, 0x43, 0xc7, 0x44, 0xf9, + 0x5a, 0x0f, 0xb3, 0xf7, 0x25, 0xfe, 0x65, 0xa6, 0x8e, 0x94, 0x75, 0x27, 0x7f, 0xe5, 0x7e, 0xce, + 0x53, 0x93, 0xe2, 0xab, 0x0a, 0xbd, 0xe0, 0x42, 0x2e, 0x6c, 0xf0, 0x41, 0xc1, 0xd7, 0xc3, 0x83, + 0x62, 0x63, 0x65, 0x87, 0x73, 0xb2, 0xb8, 0x4b, 0x06, 0x69, 0x4e, 0xda, 0xb5, 0xdc, 0xfa, 0x0e, + 0x36, 0x8b, 0x05, 0x17, 0x60, 0xf5, 0xcc, 0xf4, 0xcd, 0x8f, 0x2e, 0x55, 0xf7, 0x0a, 0xe7, 0xd8, + 0x7f, 0xb6, 0x60, 0x8b, 0xff, 0xcb, 0xa9, 0x8f, 0xa8, 0xc3, 0xd0, 0xa7, 0x07, 0xbc, 0xed, 0xfa, + 0x70, 0x17, 0x6a, 0x0f, 0xd6, 0xc5, 0x8b, 0x44, 0x40, 0xdc, 0x70, 0x14, 0x79, 0xed, 0x5b, 0x7f, + 0xf7, 0x4f, 0x75, 0xe8, 0x2a, 0x55, 0x55, 0x54, 0xb1, 0xa4, 0xcf, 0x7e, 0xa7, 0xd1, 0x27, 0x1a, + 0x1e, 0x8b, 0x3f, 0xdc, 0xfd, 0xdb, 0xc5, 0x8b, 0x02, 0x2c, 0x7b, 0x0d, 0x3d, 0x85, 0x26, 0x7f, + 0x75, 0x89, 0x1c, 0x43, 0xb9, 0x77, 0x9a, 0x92, 0xd3, 0xcb, 0x2f, 0x64, 0x32, 0x9e, 0x00, 0xf0, + 0xfe, 0x52, 0xd6, 0xf6, 0x5c, 0xab, 0x2c, 0x24, 0x6c, 0x2d, 0x69, 0xa1, 0xed, 0x35, 0x66, 0x4e, + 0xf6, 0x67, 0x6a, 0x98, 0xb3, 0xf8, 0x49, 0x6e, 0x98, 0x93, 0xfb, 0x57, 0xe6, 0xaa, 0xd4, 0xc4, + 0x2f, 0x21, 0xd2, 0x15, 0x36, 0xbe, 0x45, 0xfb, 0x1f, 0x17, 0xac, 0x64, 0x02, 0x9e, 0x41, 0xeb, + 0x84, 0x12, 0xec, 0xce, 0xfe, 0x2b, 0x31, 0xf7, 0x2d, 0xf4, 0x10, 0x2a, 0xfb, 0x6e, 0x10, 0x18, + 0x70, 0x68, 0x5f, 0x3b, 0x06, 0x1c, 0xfa, 0x0f, 0x82, 0xbd, 0x86, 0x1e, 0x43, 0x95, 0x43, 0x7c, + 0x33, 0x6f, 0x3c, 0x84, 0x0a, 0x7f, 0x79, 0xdc, 0xc0, 0x0f, 0x4f, 0xa0, 0x26, 0x1a, 0x6b, 0xc3, + 0x6c, 0xa3, 0xf7, 0x37, 0xcc, 0x36, 0xbb, 0x70, 0x71, 0x36, 0xeb, 0x50, 0x8d, 0xb3, 0xb5, 0x76, + 0xda, 0x38, 0x5b, 0x6f, 0x65, 0xc5, 0xd9, 0xa2, 0xd5, 0x32, 0xce, 0x36, 0x9a, 0x4c, 0xe3, 0x6c, + 0xb3, 0x2f, 0xe3, 0xa8, 0xd5, 0x44, 0x7f, 0x65, 0x08, 0x30, 0x5a, 0xae, 0xfe, 0x66, 0x2e, 0xdb, + 0x46, 0xb3, 0x98, 0x5e, 0x64, 0x21, 0x28, 0x6a, 0xc9, 0x62, 0x08, 0x1a, 0xd5, 0x7f, 0x31, 0x04, + 0xcd, 0xf2, 0x63, 0xaf, 0xa1, 0x47, 0x50, 0xdb, 0x77, 0x43, 0x0f, 0x33, 0xd7, 0x17, 0x9e, 0x76, + 0x89, 0x16, 0x5f, 0x42, 0xfb, 0x19, 0xa6, 0xc7, 0xfc, 0xeb, 0xf5, 0x30, 0x9c, 0x44, 0x4b, 0x45, + 0x7c, 0x57, 0x7f, 0xf6, 0x65, 0xec, 0xf6, 0xda, 0x9b, 0x1a, 0x67, 0x7c, 0xf0, 0x9f, 0x00, 0x00, + 0x00, 0xff, 0xff, 0x9c, 0xb7, 0x22, 0x0e, 0x52, 0x1b, 0x00, 0x00, } // Reference imports to suppress errors if they are not otherwise used. @@ -1819,6 +2145,8 @@ type ResourceProviderClient interface { // StreamInvoke dynamically executes a built-in function in the provider, which returns a stream // of responses. StreamInvoke(ctx context.Context, in *InvokeRequest, opts ...grpc.CallOption) (ResourceProvider_StreamInvokeClient, error) + // Call dynamically executes a method in the provider associated with a component resource. + Call(ctx context.Context, in *CallRequest, opts ...grpc.CallOption) (*CallResponse, error) // Check validates that the given property bag is valid for a resource of the given type and returns the inputs // that should be passed to successive calls to Diff, Create, or Update for this resource. As a rule, the provider // inputs returned by a call to Check should preserve the original representation of the properties as present in @@ -1930,6 +2258,15 @@ func (x *resourceProviderStreamInvokeClient) Recv() (*InvokeResponse, error) { return m, nil } +func (c *resourceProviderClient) Call(ctx context.Context, in *CallRequest, opts ...grpc.CallOption) (*CallResponse, error) { + out := new(CallResponse) + err := c.cc.Invoke(ctx, "/pulumirpc.ResourceProvider/Call", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + func (c *resourceProviderClient) Check(ctx context.Context, in *CheckRequest, opts ...grpc.CallOption) (*CheckResponse, error) { out := new(CheckResponse) err := c.cc.Invoke(ctx, "/pulumirpc.ResourceProvider/Check", in, out, opts...) @@ -2026,6 +2363,8 @@ type ResourceProviderServer interface { // StreamInvoke dynamically executes a built-in function in the provider, which returns a stream // of responses. StreamInvoke(*InvokeRequest, ResourceProvider_StreamInvokeServer) error + // Call dynamically executes a method in the provider associated with a component resource. + Call(context.Context, *CallRequest) (*CallResponse, error) // Check validates that the given property bag is valid for a resource of the given type and returns the inputs // that should be passed to successive calls to Diff, Create, or Update for this resource. As a rule, the provider // inputs returned by a call to Check should preserve the original representation of the properties as present in @@ -2074,6 +2413,9 @@ func (*UnimplementedResourceProviderServer) Invoke(ctx context.Context, req *Inv func (*UnimplementedResourceProviderServer) StreamInvoke(req *InvokeRequest, srv ResourceProvider_StreamInvokeServer) error { return status.Errorf(codes.Unimplemented, "method StreamInvoke not implemented") } +func (*UnimplementedResourceProviderServer) Call(ctx context.Context, req *CallRequest) (*CallResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Call not implemented") +} func (*UnimplementedResourceProviderServer) Check(ctx context.Context, req *CheckRequest) (*CheckResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method Check not implemented") } @@ -2217,6 +2559,24 @@ func (x *resourceProviderStreamInvokeServer) Send(m *InvokeResponse) error { return x.ServerStream.SendMsg(m) } +func _ResourceProvider_Call_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(CallRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ResourceProviderServer).Call(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/pulumirpc.ResourceProvider/Call", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ResourceProviderServer).Call(ctx, req.(*CallRequest)) + } + return interceptor(ctx, in, info, handler) +} + func _ResourceProvider_Check_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(CheckRequest) if err := dec(in); err != nil { @@ -2403,6 +2763,10 @@ var _ResourceProvider_serviceDesc = grpc.ServiceDesc{ MethodName: "Invoke", Handler: _ResourceProvider_Invoke_Handler, }, + { + MethodName: "Call", + Handler: _ResourceProvider_Call_Handler, + }, { MethodName: "Check", Handler: _ResourceProvider_Check_Handler, diff --git a/vendor/github.com/pulumi/pulumi/sdk/v3/proto/go/resource.pb.go b/vendor/github.com/pulumi/pulumi/sdk/v3/proto/go/resource.pb.go index 6056ddb..610e55c 100644 --- a/vendor/github.com/pulumi/pulumi/sdk/v3/proto/go/resource.pb.go +++ b/vendor/github.com/pulumi/pulumi/sdk/v3/proto/go/resource.pb.go @@ -120,6 +120,7 @@ type ReadResourceRequest struct { AdditionalSecretOutputs []string `protobuf:"bytes,10,rep,name=additionalSecretOutputs,proto3" json:"additionalSecretOutputs,omitempty"` Aliases []string `protobuf:"bytes,11,rep,name=aliases,proto3" json:"aliases,omitempty"` AcceptResources bool `protobuf:"varint,12,opt,name=acceptResources,proto3" json:"acceptResources,omitempty"` + PluginDownloadURL string `protobuf:"bytes,13,opt,name=pluginDownloadURL,proto3" json:"pluginDownloadURL,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` @@ -234,6 +235,13 @@ func (m *ReadResourceRequest) GetAcceptResources() bool { return false } +func (m *ReadResourceRequest) GetPluginDownloadURL() string { + if m != nil { + return m.PluginDownloadURL + } + return "" +} + // ReadResourceResponse contains the result of reading a resource's state. type ReadResourceResponse struct { Urn string `protobuf:"bytes,1,opt,name=urn,proto3" json:"urn,omitempty"` @@ -306,6 +314,8 @@ type RegisterResourceRequest struct { Remote bool `protobuf:"varint,20,opt,name=remote,proto3" json:"remote,omitempty"` AcceptResources bool `protobuf:"varint,21,opt,name=acceptResources,proto3" json:"acceptResources,omitempty"` Providers map[string]string `protobuf:"bytes,22,rep,name=providers,proto3" json:"providers,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + ReplaceOnChanges []string `protobuf:"bytes,23,rep,name=replaceOnChanges,proto3" json:"replaceOnChanges,omitempty"` + PluginDownloadURL string `protobuf:"bytes,24,opt,name=pluginDownloadURL,proto3" json:"pluginDownloadURL,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` @@ -490,6 +500,20 @@ func (m *RegisterResourceRequest) GetProviders() map[string]string { return nil } +func (m *RegisterResourceRequest) GetReplaceOnChanges() []string { + if m != nil { + return m.ReplaceOnChanges + } + return nil +} + +func (m *RegisterResourceRequest) GetPluginDownloadURL() string { + if m != nil { + return m.PluginDownloadURL + } + return "" +} + // PropertyDependencies describes the resources that a particular property depends on. type RegisterResourceRequest_PropertyDependencies struct { Urns []string `protobuf:"bytes,1,rep,name=urns,proto3" json:"urns,omitempty"` @@ -784,69 +808,72 @@ func init() { func init() { proto.RegisterFile("resource.proto", fileDescriptor_d1b72f771c35e3b8) } var fileDescriptor_d1b72f771c35e3b8 = []byte{ - // 977 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x56, 0x4f, 0x73, 0xdb, 0x44, - 0x14, 0x8f, 0xed, 0xd4, 0xb1, 0x5f, 0x52, 0x27, 0x6c, 0x5c, 0x67, 0x2b, 0x98, 0x10, 0x04, 0x07, - 0xc3, 0xc1, 0x69, 0x02, 0x33, 0x0d, 0x4c, 0x81, 0x19, 0xda, 0xc2, 0xf4, 0x50, 0x5a, 0x14, 0x86, - 0x01, 0x66, 0x60, 0x66, 0x23, 0xbd, 0xb8, 0x22, 0xb6, 0x56, 0xdd, 0x5d, 0x65, 0xc6, 0x37, 0x38, - 0xf2, 0x1d, 0xb8, 0xf0, 0x55, 0xf8, 0x64, 0xcc, 0xee, 0x6a, 0x5d, 0xc9, 0x92, 0x13, 0x27, 0xbd, - 0xe9, 0xfd, 0xd7, 0xfe, 0xde, 0xef, 0xbd, 0x5d, 0xe8, 0x09, 0x94, 0x3c, 0x13, 0x21, 0x8e, 0x52, - 0xc1, 0x15, 0x27, 0xdd, 0x34, 0x9b, 0x64, 0xd3, 0x58, 0xa4, 0xa1, 0xf7, 0xee, 0x98, 0xf3, 0xf1, - 0x04, 0x0f, 0x8d, 0xe1, 0x2c, 0x3b, 0x3f, 0xc4, 0x69, 0xaa, 0x66, 0xd6, 0xcf, 0x7b, 0x6f, 0xd1, - 0x28, 0x95, 0xc8, 0x42, 0x95, 0x5b, 0x7b, 0xa9, 0xe0, 0x97, 0x71, 0x84, 0xc2, 0xca, 0xfe, 0x10, - 0x06, 0xa7, 0x59, 0x9a, 0x72, 0xa1, 0xe4, 0xb7, 0xc8, 0x54, 0x26, 0x30, 0xc0, 0xd7, 0x19, 0x4a, - 0x45, 0x7a, 0xd0, 0x8c, 0x23, 0xda, 0x38, 0x68, 0x0c, 0xbb, 0x41, 0x33, 0x8e, 0xfc, 0xcf, 0x61, - 0xaf, 0xe2, 0x29, 0x53, 0x9e, 0x48, 0x24, 0xfb, 0x00, 0xaf, 0x98, 0xcc, 0xad, 0x26, 0xa4, 0x13, - 0x14, 0x34, 0xfe, 0x3f, 0x2d, 0xd8, 0x0d, 0x90, 0x45, 0x41, 0x7e, 0xa2, 0x25, 0x25, 0x08, 0x81, - 0x75, 0x35, 0x4b, 0x91, 0x36, 0x8d, 0xc6, 0x7c, 0x6b, 0x5d, 0xc2, 0xa6, 0x48, 0x5b, 0x56, 0xa7, - 0xbf, 0xc9, 0x00, 0xda, 0x29, 0x13, 0x98, 0x28, 0xba, 0x6e, 0xb4, 0xb9, 0x44, 0x1e, 0x02, 0xa4, - 0x82, 0xa7, 0x28, 0x54, 0x8c, 0x92, 0xde, 0x39, 0x68, 0x0c, 0x37, 0x8f, 0xf7, 0x46, 0x16, 0x8f, - 0x91, 0xc3, 0x63, 0x74, 0x6a, 0xf0, 0x08, 0x0a, 0xae, 0xc4, 0x87, 0xad, 0x08, 0x53, 0x4c, 0x22, - 0x4c, 0x42, 0x1d, 0xda, 0x3e, 0x68, 0x0d, 0xbb, 0x41, 0x49, 0x47, 0x3c, 0xe8, 0x38, 0xec, 0xe8, - 0x86, 0x29, 0x3b, 0x97, 0x09, 0x85, 0x8d, 0x4b, 0x14, 0x32, 0xe6, 0x09, 0xed, 0x18, 0x93, 0x13, - 0xc9, 0x47, 0x70, 0x97, 0x85, 0x21, 0xa6, 0xea, 0x14, 0x43, 0x81, 0x4a, 0xd2, 0xae, 0x41, 0xa7, - 0xac, 0x24, 0x27, 0xb0, 0xc7, 0xa2, 0x28, 0x56, 0x31, 0x4f, 0xd8, 0xc4, 0x2a, 0x5f, 0x64, 0x2a, - 0xcd, 0x94, 0xa4, 0x60, 0x7e, 0x65, 0x99, 0x59, 0x57, 0x66, 0x93, 0x98, 0x49, 0x94, 0x74, 0xd3, - 0x78, 0x3a, 0x91, 0x0c, 0x61, 0xdb, 0x16, 0x71, 0xa8, 0x4b, 0xba, 0x65, 0x6a, 0x2f, 0xaa, 0x7d, - 0x06, 0xfd, 0x72, 0x77, 0xf2, 0xb6, 0xee, 0x40, 0x2b, 0x13, 0x49, 0xde, 0x1f, 0xfd, 0xb9, 0x00, - 0x70, 0x73, 0x65, 0x80, 0xfd, 0x7f, 0x01, 0xf6, 0x02, 0x1c, 0xc7, 0x52, 0xa1, 0x58, 0x64, 0x81, - 0xeb, 0x7a, 0xa3, 0xa6, 0xeb, 0xcd, 0xda, 0xae, 0xb7, 0x4a, 0x5d, 0x1f, 0x40, 0x3b, 0xcc, 0xa4, - 0xe2, 0x53, 0xc3, 0x86, 0x4e, 0x90, 0x4b, 0xe4, 0x10, 0xda, 0xfc, 0xec, 0x0f, 0x0c, 0xd5, 0x75, - 0x4c, 0xc8, 0xdd, 0x34, 0x96, 0xda, 0xa4, 0x23, 0xda, 0x26, 0x93, 0x13, 0x2b, 0xfc, 0xd8, 0xb8, - 0x86, 0x1f, 0x9d, 0x05, 0x7e, 0xa4, 0xd0, 0xcf, 0xc1, 0x98, 0x3d, 0x29, 0xe6, 0xe9, 0x1e, 0xb4, - 0x86, 0x9b, 0xc7, 0x8f, 0x46, 0xf3, 0xd1, 0x1e, 0x2d, 0x01, 0x69, 0xf4, 0xb2, 0x26, 0xfc, 0x69, - 0xa2, 0xc4, 0x2c, 0xa8, 0xcd, 0x4c, 0x1e, 0xc0, 0x6e, 0x84, 0x13, 0x54, 0xf8, 0x0d, 0x9e, 0x73, - 0x3d, 0xaa, 0xe9, 0x84, 0x85, 0x48, 0xc1, 0x9c, 0xab, 0xce, 0x54, 0xe4, 0xf0, 0x66, 0x85, 0xc3, - 0xf1, 0x38, 0xe1, 0x02, 0x1f, 0xbf, 0x62, 0xc9, 0xd8, 0xf0, 0x48, 0x1f, 0xbf, 0xac, 0xac, 0x32, - 0xfd, 0xee, 0x0d, 0x99, 0xde, 0x5b, 0x99, 0xe9, 0xdb, 0x65, 0xa6, 0x7b, 0xd0, 0x89, 0xa7, 0x7a, - 0xd1, 0x3c, 0x8b, 0xe8, 0x8e, 0x45, 0xde, 0xc9, 0xe4, 0x17, 0xe8, 0x59, 0x3a, 0xfc, 0x18, 0x4f, - 0x91, 0xeb, 0x32, 0xef, 0x18, 0x32, 0x1c, 0xad, 0x80, 0xf9, 0xe3, 0x52, 0x60, 0xb0, 0x90, 0x88, - 0x7c, 0x05, 0x5e, 0x0d, 0x8e, 0x4f, 0xf0, 0x3c, 0x4e, 0x30, 0xa2, 0xc4, 0x9c, 0xfe, 0x0a, 0x0f, - 0xf2, 0x19, 0xdc, 0x93, 0xf9, 0x42, 0x7d, 0xc9, 0x84, 0x8a, 0xd9, 0xe4, 0x27, 0x36, 0xc9, 0x50, - 0xd2, 0x5d, 0x13, 0x5a, 0x6f, 0xd4, 0x6c, 0x17, 0x38, 0xe5, 0x0a, 0x69, 0xdf, 0xb2, 0xdd, 0x4a, - 0x75, 0xe3, 0x7e, 0xaf, 0x76, 0xdc, 0xc9, 0x0b, 0xe8, 0x3a, 0x62, 0x4a, 0x3a, 0x30, 0x0c, 0x3c, - 0x5a, 0x8d, 0x81, 0x36, 0xc6, 0xd2, 0xee, 0x4d, 0x0e, 0xef, 0x13, 0xe8, 0xd7, 0xd1, 0x53, 0x0f, - 0x71, 0x26, 0x12, 0x49, 0x1b, 0xa6, 0x5d, 0xe6, 0xdb, 0xfb, 0x19, 0x7a, 0x65, 0x58, 0xcd, 0xf8, - 0x0a, 0x64, 0xca, 0x2d, 0x80, 0x5c, 0xd2, 0xfa, 0x2c, 0x8d, 0xb4, 0xde, 0x2e, 0x81, 0x5c, 0xd2, - 0x7a, 0x0b, 0xaa, 0x5b, 0x03, 0x56, 0xf2, 0xfe, 0x6c, 0xc0, 0xfd, 0xa5, 0x53, 0xa2, 0x77, 0xd9, - 0x05, 0xce, 0xdc, 0x2e, 0xbb, 0xc0, 0x19, 0x79, 0x0e, 0x77, 0x2e, 0x35, 0xa4, 0xf9, 0x1a, 0x7b, - 0x78, 0xcb, 0x21, 0x0c, 0x6c, 0x96, 0x2f, 0x9a, 0x27, 0x0d, 0xef, 0x11, 0xf4, 0xca, 0x28, 0xd5, - 0x94, 0xed, 0x17, 0xcb, 0x76, 0x0b, 0xd1, 0xfe, 0x7f, 0x2d, 0xa0, 0xd5, 0xca, 0x4b, 0x77, 0xb1, - 0xbd, 0x3c, 0x9b, 0xf3, 0xcb, 0xf3, 0xcd, 0xba, 0x6b, 0xad, 0xb6, 0xee, 0x06, 0xd0, 0x96, 0x8a, - 0x9d, 0x4d, 0xd0, 0xed, 0x4d, 0x2b, 0xe9, 0x41, 0xb3, 0x5f, 0xfa, 0x0a, 0x35, 0x83, 0x96, 0x8b, - 0xe4, 0xf5, 0x92, 0x35, 0xd6, 0x36, 0x24, 0xfa, 0xf2, 0x4a, 0x04, 0xed, 0x39, 0x6e, 0xba, 0xc7, - 0x6e, 0xc4, 0xad, 0xbf, 0x6e, 0xc8, 0x80, 0xef, 0xcb, 0x0c, 0x38, 0xb9, 0xed, 0xff, 0x17, 0x9b, - 0x88, 0xb0, 0xbf, 0x18, 0x9b, 0x2f, 0x30, 0x77, 0xdd, 0x55, 0x3b, 0x79, 0x04, 0x1b, 0x3c, 0xdf, - 0x81, 0xd7, 0x5c, 0xa9, 0xce, 0xef, 0xf8, 0xef, 0x75, 0xd8, 0x76, 0xf9, 0x9f, 0xf3, 0x24, 0x56, - 0x5c, 0x90, 0x5f, 0x61, 0x7b, 0xe1, 0x81, 0x46, 0x3e, 0x28, 0x1c, 0xa9, 0xfe, 0x99, 0xe7, 0xf9, - 0x57, 0xb9, 0xd8, 0x43, 0xfb, 0x6b, 0xe4, 0x6b, 0x68, 0x3f, 0x4b, 0x2e, 0xf9, 0x05, 0x12, 0x5a, - 0xf0, 0xb7, 0x2a, 0x97, 0xe9, 0x7e, 0x8d, 0x65, 0x9e, 0xe0, 0x3b, 0xd8, 0x3a, 0x55, 0x02, 0xd9, - 0xf4, 0xad, 0xd2, 0x3c, 0x68, 0x90, 0x1f, 0x60, 0xab, 0xf8, 0x58, 0x21, 0xfb, 0xa5, 0xae, 0x55, - 0xde, 0x98, 0xde, 0xfb, 0x4b, 0xed, 0xf3, 0x7f, 0xfb, 0x0d, 0x76, 0x16, 0x7b, 0x46, 0xfc, 0xeb, - 0xd7, 0x81, 0xf7, 0xe1, 0x0a, 0x84, 0xf1, 0xd7, 0xc8, 0xef, 0xd5, 0xa7, 0x8f, 0xbb, 0xd3, 0x3e, - 0xbe, 0x22, 0x43, 0x99, 0x36, 0xde, 0xa0, 0xc2, 0x89, 0xa7, 0xfa, 0xd1, 0xef, 0xaf, 0x9d, 0xb5, - 0x8d, 0xe6, 0xd3, 0xff, 0x03, 0x00, 0x00, 0xff, 0xff, 0x8a, 0xe7, 0x38, 0x56, 0x31, 0x0c, 0x00, - 0x00, + // 1038 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x57, 0x5f, 0x6f, 0x1b, 0x45, + 0x10, 0x8f, 0xed, 0xd4, 0xb1, 0x27, 0x89, 0x93, 0x6e, 0x5c, 0x7b, 0x7b, 0xa0, 0x10, 0x0e, 0x1e, + 0x4c, 0x85, 0x9c, 0x26, 0x20, 0x35, 0x45, 0x05, 0x24, 0x92, 0x82, 0x2a, 0x51, 0x52, 0x2e, 0x80, + 0x00, 0x09, 0xa4, 0x8d, 0x6f, 0xe2, 0x1e, 0x39, 0xdf, 0x5e, 0x77, 0xf7, 0x82, 0xfc, 0x06, 0x5f, + 0x0d, 0xf1, 0xc4, 0x57, 0xe2, 0x05, 0xed, 0xee, 0x9d, 0x7b, 0xe7, 0x3b, 0x27, 0x4e, 0x79, 0xbb, + 0xf9, 0xef, 0x9d, 0xf9, 0xcd, 0x6f, 0xd7, 0xd0, 0x11, 0x28, 0x79, 0x22, 0x46, 0x38, 0x8c, 0x05, + 0x57, 0x9c, 0xb4, 0xe3, 0x24, 0x4c, 0x26, 0x81, 0x88, 0x47, 0xce, 0x5b, 0x63, 0xce, 0xc7, 0x21, + 0xee, 0x1b, 0xc3, 0x79, 0x72, 0xb1, 0x8f, 0x93, 0x58, 0x4d, 0xad, 0x9f, 0xf3, 0xf6, 0xbc, 0x51, + 0x2a, 0x91, 0x8c, 0x54, 0x6a, 0xed, 0xc4, 0x82, 0x5f, 0x05, 0x3e, 0x0a, 0x2b, 0xbb, 0x03, 0xe8, + 0x9d, 0x25, 0x71, 0xcc, 0x85, 0x92, 0x5f, 0x22, 0x53, 0x89, 0x40, 0x0f, 0x5f, 0x25, 0x28, 0x15, + 0xe9, 0x40, 0x3d, 0xf0, 0x69, 0x6d, 0xaf, 0x36, 0x68, 0x7b, 0xf5, 0xc0, 0x77, 0x1f, 0x43, 0xbf, + 0xe4, 0x29, 0x63, 0x1e, 0x49, 0x24, 0xbb, 0x00, 0x2f, 0x99, 0x4c, 0xad, 0x26, 0xa4, 0xe5, 0xe5, + 0x34, 0xee, 0x3f, 0x0d, 0xd8, 0xf1, 0x90, 0xf9, 0x5e, 0x7a, 0xa2, 0x05, 0x25, 0x08, 0x81, 0x55, + 0x35, 0x8d, 0x91, 0xd6, 0x8d, 0xc6, 0x7c, 0x6b, 0x5d, 0xc4, 0x26, 0x48, 0x1b, 0x56, 0xa7, 0xbf, + 0x49, 0x0f, 0x9a, 0x31, 0x13, 0x18, 0x29, 0xba, 0x6a, 0xb4, 0xa9, 0x44, 0x1e, 0x01, 0xc4, 0x82, + 0xc7, 0x28, 0x54, 0x80, 0x92, 0xde, 0xd9, 0xab, 0x0d, 0xd6, 0x0f, 0xfb, 0x43, 0xdb, 0x8f, 0x61, + 0xd6, 0x8f, 0xe1, 0x99, 0xe9, 0x87, 0x97, 0x73, 0x25, 0x2e, 0x6c, 0xf8, 0x18, 0x63, 0xe4, 0x63, + 0x34, 0xd2, 0xa1, 0xcd, 0xbd, 0xc6, 0xa0, 0xed, 0x15, 0x74, 0xc4, 0x81, 0x56, 0xd6, 0x3b, 0xba, + 0x66, 0xca, 0xce, 0x64, 0x42, 0x61, 0xed, 0x0a, 0x85, 0x0c, 0x78, 0x44, 0x5b, 0xc6, 0x94, 0x89, + 0xe4, 0x7d, 0xd8, 0x64, 0xa3, 0x11, 0xc6, 0xea, 0x0c, 0x47, 0x02, 0x95, 0xa4, 0x6d, 0xd3, 0x9d, + 0xa2, 0x92, 0x1c, 0x41, 0x9f, 0xf9, 0x7e, 0xa0, 0x02, 0x1e, 0xb1, 0xd0, 0x2a, 0x4f, 0x13, 0x15, + 0x27, 0x4a, 0x52, 0x30, 0x3f, 0x65, 0x91, 0x59, 0x57, 0x66, 0x61, 0xc0, 0x24, 0x4a, 0xba, 0x6e, + 0x3c, 0x33, 0x91, 0x0c, 0x60, 0xcb, 0x16, 0xc9, 0xba, 0x2e, 0xe9, 0x86, 0xa9, 0x3d, 0xaf, 0x26, + 0x1f, 0xc2, 0xdd, 0x38, 0x4c, 0xc6, 0x41, 0x74, 0xc2, 0x7f, 0x8f, 0x42, 0xce, 0xfc, 0xef, 0xbd, + 0xaf, 0xe9, 0xa6, 0x39, 0x47, 0xd9, 0xe0, 0x32, 0xe8, 0x16, 0x67, 0x99, 0x82, 0x60, 0x1b, 0x1a, + 0x89, 0x88, 0xd2, 0x69, 0xea, 0xcf, 0xb9, 0x71, 0xd4, 0x97, 0x1e, 0x87, 0xfb, 0x2f, 0x40, 0xdf, + 0xc3, 0x71, 0x20, 0x15, 0x8a, 0x79, 0xcc, 0x64, 0x18, 0xa9, 0x55, 0x60, 0xa4, 0x5e, 0x89, 0x91, + 0x46, 0x01, 0x23, 0x3d, 0x68, 0x8e, 0x12, 0xa9, 0xf8, 0xc4, 0x60, 0xa7, 0xe5, 0xa5, 0x12, 0xd9, + 0x87, 0x26, 0x3f, 0xff, 0x0d, 0x47, 0xea, 0x26, 0xdc, 0xa4, 0x6e, 0xba, 0xf3, 0xda, 0xa4, 0x23, + 0x9a, 0x26, 0x53, 0x26, 0x96, 0xd0, 0xb4, 0x76, 0x03, 0x9a, 0x5a, 0x73, 0x68, 0x8a, 0xa1, 0x9b, + 0x36, 0x63, 0x7a, 0x92, 0xcf, 0xd3, 0xde, 0x6b, 0x0c, 0xd6, 0x0f, 0x9f, 0x0c, 0x67, 0x44, 0x30, + 0x5c, 0xd0, 0xa4, 0xe1, 0x8b, 0x8a, 0xf0, 0xa7, 0x91, 0x12, 0x53, 0xaf, 0x32, 0x33, 0x79, 0x08, + 0x3b, 0x3e, 0x86, 0xa8, 0xf0, 0x0b, 0xbc, 0xe0, 0x7a, 0xb1, 0xe3, 0x90, 0x8d, 0x90, 0x82, 0x39, + 0x57, 0x95, 0x29, 0x8f, 0xf8, 0xf5, 0x12, 0xe2, 0x83, 0x71, 0xc4, 0x05, 0x1e, 0xbf, 0x64, 0xd1, + 0xd8, 0xa0, 0x4e, 0x1f, 0xbf, 0xa8, 0x2c, 0xef, 0xc5, 0xe6, 0x2d, 0xf7, 0xa2, 0xb3, 0xf4, 0x5e, + 0x6c, 0x15, 0xf7, 0xc2, 0x81, 0x56, 0x30, 0xd1, 0xb4, 0xf4, 0xcc, 0xa7, 0xdb, 0xb6, 0xf3, 0x99, + 0x4c, 0x7e, 0x82, 0x8e, 0x85, 0xc3, 0x77, 0xc1, 0x04, 0xb9, 0x2e, 0x73, 0xd7, 0x80, 0xe1, 0x60, + 0x89, 0x9e, 0x1f, 0x17, 0x02, 0xbd, 0xb9, 0x44, 0xe4, 0x33, 0x70, 0x2a, 0xfa, 0x78, 0x82, 0x17, + 0x41, 0x84, 0x3e, 0x25, 0xe6, 0xf4, 0xd7, 0x78, 0x90, 0x8f, 0xe1, 0x9e, 0x4c, 0xe9, 0xf7, 0x05, + 0x13, 0x2a, 0x60, 0xe1, 0x0f, 0x2c, 0x4c, 0x50, 0xd2, 0x1d, 0x13, 0x5a, 0x6d, 0xd4, 0x68, 0x17, + 0x38, 0xe1, 0x0a, 0x69, 0xd7, 0xa2, 0xdd, 0x4a, 0x55, 0xe4, 0x70, 0xaf, 0x9a, 0x1c, 0x4e, 0xa1, + 0x9d, 0x01, 0x53, 0xd2, 0x9e, 0x41, 0xe0, 0xc1, 0x72, 0x08, 0xb4, 0x31, 0x16, 0x76, 0xaf, 0x73, + 0x90, 0x07, 0xb0, 0x2d, 0xec, 0xd1, 0x4e, 0xa3, 0x0c, 0x22, 0x7d, 0x33, 0xa2, 0x92, 0xbe, 0x9a, + 0x99, 0xe8, 0x02, 0x66, 0x72, 0x1e, 0x40, 0xb7, 0x0a, 0xf8, 0x9a, 0x1e, 0x12, 0x11, 0x49, 0x5a, + 0x33, 0x55, 0xcc, 0xb7, 0xf3, 0x23, 0x74, 0x8a, 0x03, 0x33, 0xc4, 0x20, 0x90, 0xa9, 0x8c, 0x5a, + 0x52, 0x49, 0xeb, 0x93, 0xd8, 0xd7, 0x7a, 0x4b, 0x2f, 0xa9, 0xa4, 0xf5, 0x76, 0x5c, 0x19, 0xc1, + 0x58, 0xc9, 0xf9, 0xa3, 0x06, 0xf7, 0x17, 0xee, 0x9f, 0x66, 0xc9, 0x4b, 0x9c, 0x66, 0x2c, 0x79, + 0x89, 0x53, 0xf2, 0x1c, 0xee, 0x5c, 0xe9, 0x61, 0xa5, 0x04, 0xf9, 0xe8, 0x0d, 0xd7, 0xdb, 0xb3, + 0x59, 0x3e, 0xa9, 0x1f, 0xd5, 0x9c, 0x27, 0xd0, 0x29, 0xf6, 0xbf, 0xa2, 0x6c, 0x37, 0x5f, 0xb6, + 0x9d, 0x8b, 0x76, 0xff, 0x6a, 0x00, 0x2d, 0x57, 0x5e, 0xc8, 0xf2, 0xf6, 0x12, 0xaf, 0xcf, 0x2e, + 0xf1, 0xd7, 0x44, 0xda, 0x58, 0x8e, 0x48, 0x7b, 0xd0, 0x94, 0x8a, 0x9d, 0x87, 0x98, 0x31, 0xb2, + 0x95, 0xf4, 0x0a, 0xdb, 0x2f, 0x7d, 0x95, 0x9b, 0x15, 0x4e, 0x45, 0xf2, 0x6a, 0x01, 0x41, 0x36, + 0x0d, 0x3c, 0x3f, 0xbd, 0xb6, 0x83, 0xf6, 0x1c, 0xb7, 0x65, 0xc8, 0x5b, 0x61, 0xeb, 0xcf, 0x5b, + 0x22, 0xe0, 0x9b, 0x22, 0x02, 0x8e, 0xde, 0xf4, 0xf7, 0xe7, 0x87, 0x88, 0xb0, 0x3b, 0x1f, 0x9b, + 0x52, 0x63, 0x76, 0x91, 0x96, 0x27, 0x79, 0x00, 0x6b, 0x3c, 0x65, 0xd7, 0x1b, 0x2e, 0xeb, 0xcc, + 0xef, 0xf0, 0xef, 0x55, 0xd8, 0xca, 0xf2, 0x3f, 0xe7, 0x51, 0xa0, 0xb8, 0x20, 0x3f, 0xc3, 0xd6, + 0xdc, 0x43, 0x91, 0xbc, 0x9b, 0x3b, 0x52, 0xf5, 0x73, 0xd3, 0x71, 0xaf, 0x73, 0xb1, 0x87, 0x76, + 0x57, 0xc8, 0xe7, 0xd0, 0x7c, 0x16, 0x5d, 0xf1, 0x4b, 0x24, 0x34, 0xe7, 0x6f, 0x55, 0x59, 0xa6, + 0xfb, 0x15, 0x96, 0x59, 0x82, 0xaf, 0x60, 0xe3, 0x4c, 0x09, 0x64, 0x93, 0xff, 0x95, 0xe6, 0x61, + 0x8d, 0x3c, 0x86, 0xd5, 0x63, 0x16, 0x86, 0xa4, 0x97, 0x73, 0xd3, 0x8a, 0x2c, 0xbc, 0x5f, 0xd2, + 0xcf, 0x7e, 0xc3, 0xb7, 0xb0, 0x91, 0x7f, 0x41, 0x91, 0xdd, 0xc2, 0xc0, 0x4b, 0xcf, 0x64, 0xe7, + 0x9d, 0x85, 0xf6, 0x59, 0xca, 0x5f, 0x60, 0x7b, 0x7e, 0xdc, 0xc4, 0xbd, 0x99, 0x49, 0x9c, 0xf7, + 0x96, 0xc0, 0x9a, 0xbb, 0x42, 0x7e, 0x2d, 0xbf, 0xc7, 0xb2, 0x8b, 0xf6, 0x83, 0x6b, 0x32, 0x14, + 0x11, 0xe7, 0xf4, 0x4a, 0x70, 0x7a, 0xaa, 0xff, 0xb7, 0xb8, 0x2b, 0xe7, 0x4d, 0xa3, 0xf9, 0xe8, + 0xbf, 0x00, 0x00, 0x00, 0xff, 0xff, 0x35, 0x49, 0xe5, 0xd6, 0xf4, 0x0c, 0x00, 0x00, } // Reference imports to suppress errors if they are not otherwise used. @@ -864,6 +891,7 @@ type ResourceMonitorClient interface { SupportsFeature(ctx context.Context, in *SupportsFeatureRequest, opts ...grpc.CallOption) (*SupportsFeatureResponse, error) Invoke(ctx context.Context, in *InvokeRequest, opts ...grpc.CallOption) (*InvokeResponse, error) StreamInvoke(ctx context.Context, in *InvokeRequest, opts ...grpc.CallOption) (ResourceMonitor_StreamInvokeClient, error) + Call(ctx context.Context, in *CallRequest, opts ...grpc.CallOption) (*CallResponse, error) ReadResource(ctx context.Context, in *ReadResourceRequest, opts ...grpc.CallOption) (*ReadResourceResponse, error) RegisterResource(ctx context.Context, in *RegisterResourceRequest, opts ...grpc.CallOption) (*RegisterResourceResponse, error) RegisterResourceOutputs(ctx context.Context, in *RegisterResourceOutputsRequest, opts ...grpc.CallOption) (*empty.Empty, error) @@ -927,6 +955,15 @@ func (x *resourceMonitorStreamInvokeClient) Recv() (*InvokeResponse, error) { return m, nil } +func (c *resourceMonitorClient) Call(ctx context.Context, in *CallRequest, opts ...grpc.CallOption) (*CallResponse, error) { + out := new(CallResponse) + err := c.cc.Invoke(ctx, "/pulumirpc.ResourceMonitor/Call", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + func (c *resourceMonitorClient) ReadResource(ctx context.Context, in *ReadResourceRequest, opts ...grpc.CallOption) (*ReadResourceResponse, error) { out := new(ReadResourceResponse) err := c.cc.Invoke(ctx, "/pulumirpc.ResourceMonitor/ReadResource", in, out, opts...) @@ -959,6 +996,7 @@ type ResourceMonitorServer interface { SupportsFeature(context.Context, *SupportsFeatureRequest) (*SupportsFeatureResponse, error) Invoke(context.Context, *InvokeRequest) (*InvokeResponse, error) StreamInvoke(*InvokeRequest, ResourceMonitor_StreamInvokeServer) error + Call(context.Context, *CallRequest) (*CallResponse, error) ReadResource(context.Context, *ReadResourceRequest) (*ReadResourceResponse, error) RegisterResource(context.Context, *RegisterResourceRequest) (*RegisterResourceResponse, error) RegisterResourceOutputs(context.Context, *RegisterResourceOutputsRequest) (*empty.Empty, error) @@ -977,6 +1015,9 @@ func (*UnimplementedResourceMonitorServer) Invoke(ctx context.Context, req *Invo func (*UnimplementedResourceMonitorServer) StreamInvoke(req *InvokeRequest, srv ResourceMonitor_StreamInvokeServer) error { return status.Errorf(codes.Unimplemented, "method StreamInvoke not implemented") } +func (*UnimplementedResourceMonitorServer) Call(ctx context.Context, req *CallRequest) (*CallResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Call not implemented") +} func (*UnimplementedResourceMonitorServer) ReadResource(ctx context.Context, req *ReadResourceRequest) (*ReadResourceResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method ReadResource not implemented") } @@ -1048,6 +1089,24 @@ func (x *resourceMonitorStreamInvokeServer) Send(m *InvokeResponse) error { return x.ServerStream.SendMsg(m) } +func _ResourceMonitor_Call_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(CallRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ResourceMonitorServer).Call(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/pulumirpc.ResourceMonitor/Call", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ResourceMonitorServer).Call(ctx, req.(*CallRequest)) + } + return interceptor(ctx, in, info, handler) +} + func _ResourceMonitor_ReadResource_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(ReadResourceRequest) if err := dec(in); err != nil { @@ -1114,6 +1173,10 @@ var _ResourceMonitor_serviceDesc = grpc.ServiceDesc{ MethodName: "Invoke", Handler: _ResourceMonitor_Invoke_Handler, }, + { + MethodName: "Call", + Handler: _ResourceMonitor_Call_Handler, + }, { MethodName: "ReadResource", Handler: _ResourceMonitor_ReadResource_Handler, diff --git a/vendor/github.com/pulumi/pulumi/sdk/v3/python/.gitignore b/vendor/github.com/pulumi/pulumi/sdk/v3/python/.gitignore index 04019cc..32f561b 100644 --- a/vendor/github.com/pulumi/pulumi/sdk/v3/python/.gitignore +++ b/vendor/github.com/pulumi/pulumi/sdk/v3/python/.gitignore @@ -4,3 +4,5 @@ /env/ /*.egg-info .venv/ +venv/ +.coverage diff --git a/vendor/github.com/pulumi/pulumi/sdk/v3/python/.pylintrc b/vendor/github.com/pulumi/pulumi/sdk/v3/python/.pylintrc index 849b330..95dc4fa 100644 --- a/vendor/github.com/pulumi/pulumi/sdk/v3/python/.pylintrc +++ b/vendor/github.com/pulumi/pulumi/sdk/v3/python/.pylintrc @@ -373,6 +373,9 @@ ignore-docstrings=yes # Ignore imports when computing similarities. ignore-imports=no +# Ignore function signatures when computing similarities. +ignore-signatures=yes + # Minimum lines number of a similarity. min-similarity-lines=4 diff --git a/vendor/github.com/pulumi/pulumi/sdk/v3/python/Makefile b/vendor/github.com/pulumi/pulumi/sdk/v3/python/Makefile index dec3734..2703d3b 100644 --- a/vendor/github.com/pulumi/pulumi/sdk/v3/python/Makefile +++ b/vendor/github.com/pulumi/pulumi/sdk/v3/python/Makefile @@ -2,6 +2,7 @@ PROJECT_NAME := Pulumi Python SDK LANGHOST_PKG := github.com/pulumi/pulumi/sdk/v3/python/cmd/pulumi-language-python VERSION := $(shell cd ../../ && pulumictl get version) PYPI_VERSION := $(shell cd ../../ && pulumictl get version --language python) +PROJECT_ROOT := $(realpath ../..) PYENV := ./env PYENVSRC := $(PYENV)/src @@ -11,24 +12,40 @@ TESTPARALLELISM := 10 include ../../build/common.mk +# Motivation: running `make TEST_ALL_DEPS= test_all` permits running +# `test_all` without the dependencies. +TEST_ALL_DEPS = build + ensure:: - pipenv install --dev + $(PYTHON) -m venv venv + . venv/*/activate && python -m pip install wheel + . venv/*/activate && python -m pip install -r requirements.txt mkdir -p $(PYENVSRC) -build_package:: +build_package:: ensure rm -rf $(PYENVSRC) && cp -R ./lib/. $(PYENVSRC)/ sed -i.bak "s/\$${VERSION}/$(PYPI_VERSION)/g" $(PYENVSRC)/setup.py && rm $(PYENVSRC)/setup.py.bak cp ../../README.md $(PYENVSRC) - cd $(PYENVSRC) && pipenv run python setup.py build bdist_wheel --universal + . venv/*/activate && cd $(PYENVSRC) && \ + python setup.py build bdist_wheel --universal + . venv/*/activate && python -m pip install -e $(PYENVSRC) build_plugin:: go install -ldflags "-X github.com/pulumi/pulumi/sdk/v3/go/common/version.Version=${VERSION}" ${LANGHOST_PKG} build:: build_package build_plugin -lint:: - MYPYPATH=./stubs pipenv run mypy ./lib/pulumi --config-file=mypy.ini - pipenv run pylint ./lib/pulumi --rcfile=.pylintrc +lint:: ensure + . venv/*/activate && \ + python -m black ./lib/pulumi --check + . venv/*/activate && \ + MYPYPATH=./stubs python -m mypy ./lib/pulumi --config-file=mypy.ini + . venv/*/activate && \ + python -m pylint ./lib/pulumi --rcfile=.pylintrc + +format:: ensure + . venv/*/activate && \ + python -m black ./lib/pulumi install_package:: build_package cp ./cmd/pulumi-language-python-exec "$(PULUMI_BIN)" @@ -41,16 +58,16 @@ install_plugin:: build_plugin install:: install_package install_plugin -test_fast:: build - go test -count=1 -cover -parallel ${TESTPARALLELISM} ${PROJECT_PKGS} - pipenv run pip install ./env/src - # TODO the ignored test seems to fail in pytest but not unittest. Need to trackdown why - pipenv run pytest lib/test --ignore lib/test/langhost/resource_thens/test_resource_thens.py - pipenv run python -m unittest lib/test/langhost/resource_thens/test_resource_thens.py - # Using python -m also adds lib/test_with_mocks to sys.path which avoids package resolution issues. - pushd lib/test_with_mocks ; pipenv run python -m pytest ; popd +test_go:: $(TEST_ALL_DEPS) + $(GO_TEST) ${PROJECT_PKGS} + +test_fast:: $(TEST_ALL_DEPS) + . venv/*/activate && ./scripts/test_fast.sh + +test_auto:: $(TEST_ALL_DEPS) + . venv/*/activate && ./scripts/test_auto.sh -test_all:: test_fast +test_all:: test_fast test_auto test_go dist:: go install -ldflags "-X github.com/pulumi/pulumi/sdk/v3/go/common/version.Version=${VERSION}" ${LANGHOST_PKG} @@ -67,7 +84,7 @@ brew:: publish:: build_package twine upload \ - -u pulumi -p "${PYPI_PASSWORD}" \ + -u pulumi -p "${PYPI_PASSWORD}" \ "env/src/dist"/*.whl \ --skip-existing \ --verbose diff --git a/vendor/github.com/pulumi/pulumi/sdk/v3/python/Pipfile b/vendor/github.com/pulumi/pulumi/sdk/v3/python/Pipfile deleted file mode 100644 index baac917..0000000 --- a/vendor/github.com/pulumi/pulumi/sdk/v3/python/Pipfile +++ /dev/null @@ -1,18 +0,0 @@ -[[source]] -url = "https://pypi.org/simple" -verify_ssl = true -name = "pypi" - -[packages] -# Keep this list in sync with setup.py -protobuf = ">=3.6.0" -grpcio = ">=1.33.2" -dill = ">=0.3.0" -six = ">=1.12.0" -semver = ">=2.8.1" -pyyaml = ">=5.3.1" - -[dev-packages] -pylint = ">=2.1" -mypy = ">=0.78" -pytest = "*" diff --git a/vendor/github.com/pulumi/pulumi/sdk/v3/python/python.go b/vendor/github.com/pulumi/pulumi/sdk/v3/python/python.go index 9da3f91..5dfce19 100644 --- a/vendor/github.com/pulumi/pulumi/sdk/v3/python/python.go +++ b/vendor/github.com/pulumi/pulumi/sdk/v3/python/python.go @@ -31,9 +31,10 @@ const ( pythonShimCmdFormat = "pulumi-%s-shim.cmd" ) -// Command returns an *exec.Cmd for running `python`. If the `PULUMI_PYTHON_CMD` variable is set -// it will be looked for on `PATH`, otherwise, `python3` and `python` will be looked for. -func Command(arg ...string) (*exec.Cmd, error) { +// Find the correct path and command for Python. If the `PULUMI_PYTHON_CMD` +// variable is set it will be looked for on `PATH`, otherwise, `python3` and +// `python` will be looked for. +func CommandPath() (string /*pythonPath*/, string /*pythonCmd*/, error) { var err error var pythonCmds []string @@ -65,12 +66,22 @@ func Command(arg ...string) (*exec.Cmd, error) { pythonCmd, pythonPath, err = resolveWindowsExecutionAlias(pythonCmds) } if err != nil { - return nil, errors.Errorf( + return "", "", errors.Errorf( "Failed to locate any of %q on your PATH. Have you installed Python 3.6 or greater?", pythonCmds) } } + return pythonPath, pythonCmd, nil + +} +// Command returns an *exec.Cmd for running `python`. Uses `ComandPath` +// internally to find the correct executable. +func Command(arg ...string) (*exec.Cmd, error) { + pythonPath, pythonCmd, err := CommandPath() + if err != nil { + return nil, err + } if needsPythonShim(pythonPath) { shimCmd := fmt.Sprintf(pythonShimCmdFormat, pythonCmd) return exec.Command(shimCmd, arg...), nil diff --git a/vendor/github.com/pulumi/pulumi/sdk/v3/python/requirements.txt b/vendor/github.com/pulumi/pulumi/sdk/v3/python/requirements.txt new file mode 100644 index 0000000..6cd1d17 --- /dev/null +++ b/vendor/github.com/pulumi/pulumi/sdk/v3/python/requirements.txt @@ -0,0 +1,21 @@ +# Packages needed by the library. +# Keep this list in sync with setup.py. +protobuf>=3.6.0 +grpcio>=1.33.2 +dill>=0.3.0 +six>=1.12.0 +semver>=2.8.1 +pyyaml>=5.3.1 + +# Dev packages only needed during development. +pylint==2.10.2 +mypy>=0.78 +pytest +pytest-timeout +types-six +types-pyyaml +types-protobuf +pytest-asyncio +coverage>=6.0.2 +wheel +black>=1.0.0 diff --git a/vendor/github.com/pulumi/pulumi/sdk/v3/python/shim_unix.go b/vendor/github.com/pulumi/pulumi/sdk/v3/python/shim_unix.go index 0208824..0bd871d 100644 --- a/vendor/github.com/pulumi/pulumi/sdk/v3/python/shim_unix.go +++ b/vendor/github.com/pulumi/pulumi/sdk/v3/python/shim_unix.go @@ -1,4 +1,5 @@ -//+build !windows +//go:build !windows +// +build !windows // Copyright 2020, Pulumi Corporation. // diff --git a/vendor/github.com/rivo/uniseg/LICENSE.txt b/vendor/github.com/rivo/uniseg/LICENSE.txt new file mode 100644 index 0000000..5040f1e --- /dev/null +++ b/vendor/github.com/rivo/uniseg/LICENSE.txt @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2019 Oliver Kuederle + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/rivo/uniseg/README.md b/vendor/github.com/rivo/uniseg/README.md new file mode 100644 index 0000000..f8da293 --- /dev/null +++ b/vendor/github.com/rivo/uniseg/README.md @@ -0,0 +1,62 @@ +# Unicode Text Segmentation for Go + +[![Godoc Reference](https://img.shields.io/badge/godoc-reference-blue.svg)](https://godoc.org/github.com/rivo/uniseg) +[![Go Report](https://img.shields.io/badge/go%20report-A%2B-brightgreen.svg)](https://goreportcard.com/report/github.com/rivo/uniseg) + +This Go package implements Unicode Text Segmentation according to [Unicode Standard Annex #29](http://unicode.org/reports/tr29/) (Unicode version 12.0.0). + +At this point, only the determination of grapheme cluster boundaries is implemented. + +## Background + +In Go, [strings are read-only slices of bytes](https://blog.golang.org/strings). They can be turned into Unicode code points using the `for` loop or by casting: `[]rune(str)`. However, multiple code points may be combined into one user-perceived character or what the Unicode specification calls "grapheme cluster". Here are some examples: + +|String|Bytes (UTF-8)|Code points (runes)|Grapheme clusters| +|-|-|-|-| +|Käse|6 bytes: `4b 61 cc 88 73 65`|5 code points: `4b 61 308 73 65`|4 clusters: `[4b],[61 308],[73],[65]`| +|🏳️‍🌈|14 bytes: `f0 9f 8f b3 ef b8 8f e2 80 8d f0 9f 8c 88`|4 code points: `1f3f3 fe0f 200d 1f308`|1 cluster: `[1f3f3 fe0f 200d 1f308]`| +|🇩🇪|8 bytes: `f0 9f 87 a9 f0 9f 87 aa`|2 code points: `1f1e9 1f1ea`|1 cluster: `[1f1e9 1f1ea]`| + +This package provides a tool to iterate over these grapheme clusters. This may be used to determine the number of user-perceived characters, to split strings in their intended places, or to extract individual characters which form a unit. + +## Installation + +```bash +go get github.com/rivo/uniseg +``` + +## Basic Example + +```go +package uniseg + +import ( + "fmt" + + "github.com/rivo/uniseg" +) + +func main() { + gr := uniseg.NewGraphemes("👍🏼!") + for gr.Next() { + fmt.Printf("%x ", gr.Runes()) + } + // Output: [1f44d 1f3fc] [21] +} +``` + +## Documentation + +Refer to https://godoc.org/github.com/rivo/uniseg for the package's documentation. + +## Dependencies + +This package does not depend on any packages outside the standard library. + +## Your Feedback + +Add your issue here on GitHub. Feel free to get in touch if you have any questions. + +## Version + +Version tags will be introduced once Golang modules are official. Consider this version 0.1. diff --git a/vendor/github.com/rivo/uniseg/doc.go b/vendor/github.com/rivo/uniseg/doc.go new file mode 100644 index 0000000..60c737d --- /dev/null +++ b/vendor/github.com/rivo/uniseg/doc.go @@ -0,0 +1,8 @@ +/* +Package uniseg implements Unicode Text Segmentation according to Unicode +Standard Annex #29 (http://unicode.org/reports/tr29/). + +At this point, only the determination of grapheme cluster boundaries is +implemented. +*/ +package uniseg diff --git a/vendor/github.com/rivo/uniseg/go.mod b/vendor/github.com/rivo/uniseg/go.mod new file mode 100644 index 0000000..a54280b --- /dev/null +++ b/vendor/github.com/rivo/uniseg/go.mod @@ -0,0 +1,3 @@ +module github.com/rivo/uniseg + +go 1.12 diff --git a/vendor/github.com/rivo/uniseg/grapheme.go b/vendor/github.com/rivo/uniseg/grapheme.go new file mode 100644 index 0000000..207157f --- /dev/null +++ b/vendor/github.com/rivo/uniseg/grapheme.go @@ -0,0 +1,268 @@ +package uniseg + +import "unicode/utf8" + +// The states of the grapheme cluster parser. +const ( + grAny = iota + grCR + grControlLF + grL + grLVV + grLVTT + grPrepend + grExtendedPictographic + grExtendedPictographicZWJ + grRIOdd + grRIEven +) + +// The grapheme cluster parser's breaking instructions. +const ( + grNoBoundary = iota + grBoundary +) + +// The grapheme cluster parser's state transitions. Maps (state, property) to +// (new state, breaking instruction, rule number). The breaking instruction +// always refers to the boundary between the last and next code point. +// +// This map is queried as follows: +// +// 1. Find specific state + specific property. Stop if found. +// 2. Find specific state + any property. +// 3. Find any state + specific property. +// 4. If only (2) or (3) (but not both) was found, stop. +// 5. If both (2) and (3) were found, use state and breaking instruction from +// the transition with the lower rule number, prefer (3) if rule numbers +// are equal. Stop. +// 6. Assume grAny and grBoundary. +var grTransitions = map[[2]int][3]int{ + // GB5 + {grAny, prCR}: {grCR, grBoundary, 50}, + {grAny, prLF}: {grControlLF, grBoundary, 50}, + {grAny, prControl}: {grControlLF, grBoundary, 50}, + + // GB4 + {grCR, prAny}: {grAny, grBoundary, 40}, + {grControlLF, prAny}: {grAny, grBoundary, 40}, + + // GB3. + {grCR, prLF}: {grAny, grNoBoundary, 30}, + + // GB6. + {grAny, prL}: {grL, grBoundary, 9990}, + {grL, prL}: {grL, grNoBoundary, 60}, + {grL, prV}: {grLVV, grNoBoundary, 60}, + {grL, prLV}: {grLVV, grNoBoundary, 60}, + {grL, prLVT}: {grLVTT, grNoBoundary, 60}, + + // GB7. + {grAny, prLV}: {grLVV, grBoundary, 9990}, + {grAny, prV}: {grLVV, grBoundary, 9990}, + {grLVV, prV}: {grLVV, grNoBoundary, 70}, + {grLVV, prT}: {grLVTT, grNoBoundary, 70}, + + // GB8. + {grAny, prLVT}: {grLVTT, grBoundary, 9990}, + {grAny, prT}: {grLVTT, grBoundary, 9990}, + {grLVTT, prT}: {grLVTT, grNoBoundary, 80}, + + // GB9. + {grAny, prExtend}: {grAny, grNoBoundary, 90}, + {grAny, prZWJ}: {grAny, grNoBoundary, 90}, + + // GB9a. + {grAny, prSpacingMark}: {grAny, grNoBoundary, 91}, + + // GB9b. + {grAny, prPreprend}: {grPrepend, grBoundary, 9990}, + {grPrepend, prAny}: {grAny, grNoBoundary, 92}, + + // GB11. + {grAny, prExtendedPictographic}: {grExtendedPictographic, grBoundary, 9990}, + {grExtendedPictographic, prExtend}: {grExtendedPictographic, grNoBoundary, 110}, + {grExtendedPictographic, prZWJ}: {grExtendedPictographicZWJ, grNoBoundary, 110}, + {grExtendedPictographicZWJ, prExtendedPictographic}: {grExtendedPictographic, grNoBoundary, 110}, + + // GB12 / GB13. + {grAny, prRegionalIndicator}: {grRIOdd, grBoundary, 9990}, + {grRIOdd, prRegionalIndicator}: {grRIEven, grNoBoundary, 120}, + {grRIEven, prRegionalIndicator}: {grRIOdd, grBoundary, 120}, +} + +// Graphemes implements an iterator over Unicode extended grapheme clusters, +// specified in the Unicode Standard Annex #29. Grapheme clusters correspond to +// "user-perceived characters". These characters often consist of multiple +// code points (e.g. the "woman kissing woman" emoji consists of 8 code points: +// woman + ZWJ + heavy black heart (2 code points) + ZWJ + kiss mark + ZWJ + +// woman) and the rules described in Annex #29 must be applied to group those +// code points into clusters perceived by the user as one character. +type Graphemes struct { + // The code points over which this class iterates. + codePoints []rune + + // The (byte-based) indices of the code points into the original string plus + // len(original string). Thus, len(indices) = len(codePoints) + 1. + indices []int + + // The current grapheme cluster to be returned. These are indices into + // codePoints/indices. If start == end, we either haven't started iterating + // yet (0) or the iteration has already completed (1). + start, end int + + // The index of the next code point to be parsed. + pos int + + // The current state of the code point parser. + state int +} + +// NewGraphemes returns a new grapheme cluster iterator. +func NewGraphemes(s string) *Graphemes { + l := utf8.RuneCountInString(s) + codePoints := make([]rune, l) + indices := make([]int, l+1) + i := 0 + for pos, r := range s { + codePoints[i] = r + indices[i] = pos + i++ + } + indices[l] = len(s) + g := &Graphemes{ + codePoints: codePoints, + indices: indices, + } + g.Next() // Parse ahead. + return g +} + +// Next advances the iterator by one grapheme cluster and returns false if no +// clusters are left. This function must be called before the first cluster is +// accessed. +func (g *Graphemes) Next() bool { + g.start = g.end + + // The state transition gives us a boundary instruction BEFORE the next code + // point so we always need to stay ahead by one code point. + + // Parse the next code point. + for g.pos <= len(g.codePoints) { + // GB2. + if g.pos == len(g.codePoints) { + g.end = g.pos + g.pos++ + break + } + + // Determine the property of the next character. + nextProperty := property(g.codePoints[g.pos]) + g.pos++ + + // Find the applicable transition. + var boundary bool + transition, ok := grTransitions[[2]int{g.state, nextProperty}] + if ok { + // We have a specific transition. We'll use it. + g.state = transition[0] + boundary = transition[1] == grBoundary + } else { + // No specific transition found. Try the less specific ones. + transAnyProp, okAnyProp := grTransitions[[2]int{g.state, prAny}] + transAnyState, okAnyState := grTransitions[[2]int{grAny, nextProperty}] + if okAnyProp && okAnyState { + // Both apply. We'll use a mix (see comments for grTransitions). + g.state = transAnyState[0] + boundary = transAnyState[1] == grBoundary + if transAnyProp[2] < transAnyState[2] { + g.state = transAnyProp[0] + boundary = transAnyProp[1] == grBoundary + } + } else if okAnyProp { + // We only have a specific state. + g.state = transAnyProp[0] + boundary = transAnyProp[1] == grBoundary + // This branch will probably never be reached because okAnyState will + // always be true given the current transition map. But we keep it here + // for future modifications to the transition map where this may not be + // true anymore. + } else if okAnyState { + // We only have a specific property. + g.state = transAnyState[0] + boundary = transAnyState[1] == grBoundary + } else { + // No known transition. GB999: Any x Any. + g.state = grAny + boundary = true + } + } + + // If we found a cluster boundary, let's stop here. The current cluster will + // be the one that just ended. + if g.pos-1 == 0 /* GB1 */ || boundary { + g.end = g.pos - 1 + break + } + } + + return g.start != g.end +} + +// Runes returns a slice of runes (code points) which corresponds to the current +// grapheme cluster. If the iterator is already past the end or Next() has not +// yet been called, nil is returned. +func (g *Graphemes) Runes() []rune { + if g.start == g.end { + return nil + } + return g.codePoints[g.start:g.end] +} + +// Str returns a substring of the original string which corresponds to the +// current grapheme cluster. If the iterator is already past the end or Next() +// has not yet been called, an empty string is returned. +func (g *Graphemes) Str() string { + if g.start == g.end { + return "" + } + return string(g.codePoints[g.start:g.end]) +} + +// Bytes returns a byte slice which corresponds to the current grapheme cluster. +// If the iterator is already past the end or Next() has not yet been called, +// nil is returned. +func (g *Graphemes) Bytes() []byte { + if g.start == g.end { + return nil + } + return []byte(string(g.codePoints[g.start:g.end])) +} + +// Positions returns the interval of the current grapheme cluster as byte +// positions into the original string. The first returned value "from" indexes +// the first byte and the second returned value "to" indexes the first byte that +// is not included anymore, i.e. str[from:to] is the current grapheme cluster of +// the original string "str". If Next() has not yet been called, both values are +// 0. If the iterator is already past the end, both values are 1. +func (g *Graphemes) Positions() (int, int) { + return g.indices[g.start], g.indices[g.end] +} + +// Reset puts the iterator into its initial state such that the next call to +// Next() sets it to the first grapheme cluster again. +func (g *Graphemes) Reset() { + g.start, g.end, g.pos, g.state = 0, 0, 0, grAny + g.Next() // Parse ahead again. +} + +// GraphemeClusterCount returns the number of user-perceived characters +// (grapheme clusters) for the given string. To calculate this number, it +// iterates through the string using the Graphemes iterator. +func GraphemeClusterCount(s string) (n int) { + g := NewGraphemes(s) + for g.Next() { + n++ + } + return +} diff --git a/vendor/github.com/rivo/uniseg/properties.go b/vendor/github.com/rivo/uniseg/properties.go new file mode 100644 index 0000000..a75ab58 --- /dev/null +++ b/vendor/github.com/rivo/uniseg/properties.go @@ -0,0 +1,1658 @@ +package uniseg + +// The unicode properties. Only the ones needed in the context of this package +// are included. +const ( + prAny = iota + prPreprend + prCR + prLF + prControl + prExtend + prRegionalIndicator + prSpacingMark + prL + prV + prT + prLV + prLVT + prZWJ + prExtendedPictographic +) + +// Maps code point ranges to their properties. In the context of this package, +// any code point that is not contained may map to "prAny". The code point +// ranges in this slice are numerically sorted. +// +// These ranges were taken from +// http://www.unicode.org/Public/UCD/latest/ucd/auxiliary/GraphemeBreakProperty.txt +// as well as +// https://unicode.org/Public/emoji/latest/emoji-data.txt +// ("Extended_Pictographic" only) on March 11, 2019. See +// https://www.unicode.org/license.html for the Unicode license agreement. +var codePoints = [][3]int{ + {0x0000, 0x0009, prControl}, // Cc [10] .. + {0x000A, 0x000A, prLF}, // Cc + {0x000B, 0x000C, prControl}, // Cc [2] .. + {0x000D, 0x000D, prCR}, // Cc + {0x000E, 0x001F, prControl}, // Cc [18] .. + {0x007F, 0x009F, prControl}, // Cc [33] .. + {0x00A9, 0x00A9, prExtendedPictographic}, // 1.1 [1] (©️) copyright + {0x00AD, 0x00AD, prControl}, // Cf SOFT HYPHEN + {0x00AE, 0x00AE, prExtendedPictographic}, // 1.1 [1] (®️) registered + {0x0300, 0x036F, prExtend}, // Mn [112] COMBINING GRAVE ACCENT..COMBINING LATIN SMALL LETTER X + {0x0483, 0x0487, prExtend}, // Mn [5] COMBINING CYRILLIC TITLO..COMBINING CYRILLIC POKRYTIE + {0x0488, 0x0489, prExtend}, // Me [2] COMBINING CYRILLIC HUNDRED THOUSANDS SIGN..COMBINING CYRILLIC MILLIONS SIGN + {0x0591, 0x05BD, prExtend}, // Mn [45] HEBREW ACCENT ETNAHTA..HEBREW POINT METEG + {0x05BF, 0x05BF, prExtend}, // Mn HEBREW POINT RAFE + {0x05C1, 0x05C2, prExtend}, // Mn [2] HEBREW POINT SHIN DOT..HEBREW POINT SIN DOT + {0x05C4, 0x05C5, prExtend}, // Mn [2] HEBREW MARK UPPER DOT..HEBREW MARK LOWER DOT + {0x05C7, 0x05C7, prExtend}, // Mn HEBREW POINT QAMATS QATAN + {0x0600, 0x0605, prPreprend}, // Cf [6] ARABIC NUMBER SIGN..ARABIC NUMBER MARK ABOVE + {0x0610, 0x061A, prExtend}, // Mn [11] ARABIC SIGN SALLALLAHOU ALAYHE WASSALLAM..ARABIC SMALL KASRA + {0x061C, 0x061C, prControl}, // Cf ARABIC LETTER MARK + {0x064B, 0x065F, prExtend}, // Mn [21] ARABIC FATHATAN..ARABIC WAVY HAMZA BELOW + {0x0670, 0x0670, prExtend}, // Mn ARABIC LETTER SUPERSCRIPT ALEF + {0x06D6, 0x06DC, prExtend}, // Mn [7] ARABIC SMALL HIGH LIGATURE SAD WITH LAM WITH ALEF MAKSURA..ARABIC SMALL HIGH SEEN + {0x06DD, 0x06DD, prPreprend}, // Cf ARABIC END OF AYAH + {0x06DF, 0x06E4, prExtend}, // Mn [6] ARABIC SMALL HIGH ROUNDED ZERO..ARABIC SMALL HIGH MADDA + {0x06E7, 0x06E8, prExtend}, // Mn [2] ARABIC SMALL HIGH YEH..ARABIC SMALL HIGH NOON + {0x06EA, 0x06ED, prExtend}, // Mn [4] ARABIC EMPTY CENTRE LOW STOP..ARABIC SMALL LOW MEEM + {0x070F, 0x070F, prPreprend}, // Cf SYRIAC ABBREVIATION MARK + {0x0711, 0x0711, prExtend}, // Mn SYRIAC LETTER SUPERSCRIPT ALAPH + {0x0730, 0x074A, prExtend}, // Mn [27] SYRIAC PTHAHA ABOVE..SYRIAC BARREKH + {0x07A6, 0x07B0, prExtend}, // Mn [11] THAANA ABAFILI..THAANA SUKUN + {0x07EB, 0x07F3, prExtend}, // Mn [9] NKO COMBINING SHORT HIGH TONE..NKO COMBINING DOUBLE DOT ABOVE + {0x07FD, 0x07FD, prExtend}, // Mn NKO DANTAYALAN + {0x0816, 0x0819, prExtend}, // Mn [4] SAMARITAN MARK IN..SAMARITAN MARK DAGESH + {0x081B, 0x0823, prExtend}, // Mn [9] SAMARITAN MARK EPENTHETIC YUT..SAMARITAN VOWEL SIGN A + {0x0825, 0x0827, prExtend}, // Mn [3] SAMARITAN VOWEL SIGN SHORT A..SAMARITAN VOWEL SIGN U + {0x0829, 0x082D, prExtend}, // Mn [5] SAMARITAN VOWEL SIGN LONG I..SAMARITAN MARK NEQUDAA + {0x0859, 0x085B, prExtend}, // Mn [3] MANDAIC AFFRICATION MARK..MANDAIC GEMINATION MARK + {0x08D3, 0x08E1, prExtend}, // Mn [15] ARABIC SMALL LOW WAW..ARABIC SMALL HIGH SIGN SAFHA + {0x08E2, 0x08E2, prPreprend}, // Cf ARABIC DISPUTED END OF AYAH + {0x08E3, 0x0902, prExtend}, // Mn [32] ARABIC TURNED DAMMA BELOW..DEVANAGARI SIGN ANUSVARA + {0x0903, 0x0903, prSpacingMark}, // Mc DEVANAGARI SIGN VISARGA + {0x093A, 0x093A, prExtend}, // Mn DEVANAGARI VOWEL SIGN OE + {0x093B, 0x093B, prSpacingMark}, // Mc DEVANAGARI VOWEL SIGN OOE + {0x093C, 0x093C, prExtend}, // Mn DEVANAGARI SIGN NUKTA + {0x093E, 0x0940, prSpacingMark}, // Mc [3] DEVANAGARI VOWEL SIGN AA..DEVANAGARI VOWEL SIGN II + {0x0941, 0x0948, prExtend}, // Mn [8] DEVANAGARI VOWEL SIGN U..DEVANAGARI VOWEL SIGN AI + {0x0949, 0x094C, prSpacingMark}, // Mc [4] DEVANAGARI VOWEL SIGN CANDRA O..DEVANAGARI VOWEL SIGN AU + {0x094D, 0x094D, prExtend}, // Mn DEVANAGARI SIGN VIRAMA + {0x094E, 0x094F, prSpacingMark}, // Mc [2] DEVANAGARI VOWEL SIGN PRISHTHAMATRA E..DEVANAGARI VOWEL SIGN AW + {0x0951, 0x0957, prExtend}, // Mn [7] DEVANAGARI STRESS SIGN UDATTA..DEVANAGARI VOWEL SIGN UUE + {0x0962, 0x0963, prExtend}, // Mn [2] DEVANAGARI VOWEL SIGN VOCALIC L..DEVANAGARI VOWEL SIGN VOCALIC LL + {0x0981, 0x0981, prExtend}, // Mn BENGALI SIGN CANDRABINDU + {0x0982, 0x0983, prSpacingMark}, // Mc [2] BENGALI SIGN ANUSVARA..BENGALI SIGN VISARGA + {0x09BC, 0x09BC, prExtend}, // Mn BENGALI SIGN NUKTA + {0x09BE, 0x09BE, prExtend}, // Mc BENGALI VOWEL SIGN AA + {0x09BF, 0x09C0, prSpacingMark}, // Mc [2] BENGALI VOWEL SIGN I..BENGALI VOWEL SIGN II + {0x09C1, 0x09C4, prExtend}, // Mn [4] BENGALI VOWEL SIGN U..BENGALI VOWEL SIGN VOCALIC RR + {0x09C7, 0x09C8, prSpacingMark}, // Mc [2] BENGALI VOWEL SIGN E..BENGALI VOWEL SIGN AI + {0x09CB, 0x09CC, prSpacingMark}, // Mc [2] BENGALI VOWEL SIGN O..BENGALI VOWEL SIGN AU + {0x09CD, 0x09CD, prExtend}, // Mn BENGALI SIGN VIRAMA + {0x09D7, 0x09D7, prExtend}, // Mc BENGALI AU LENGTH MARK + {0x09E2, 0x09E3, prExtend}, // Mn [2] BENGALI VOWEL SIGN VOCALIC L..BENGALI VOWEL SIGN VOCALIC LL + {0x09FE, 0x09FE, prExtend}, // Mn BENGALI SANDHI MARK + {0x0A01, 0x0A02, prExtend}, // Mn [2] GURMUKHI SIGN ADAK BINDI..GURMUKHI SIGN BINDI + {0x0A03, 0x0A03, prSpacingMark}, // Mc GURMUKHI SIGN VISARGA + {0x0A3C, 0x0A3C, prExtend}, // Mn GURMUKHI SIGN NUKTA + {0x0A3E, 0x0A40, prSpacingMark}, // Mc [3] GURMUKHI VOWEL SIGN AA..GURMUKHI VOWEL SIGN II + {0x0A41, 0x0A42, prExtend}, // Mn [2] GURMUKHI VOWEL SIGN U..GURMUKHI VOWEL SIGN UU + {0x0A47, 0x0A48, prExtend}, // Mn [2] GURMUKHI VOWEL SIGN EE..GURMUKHI VOWEL SIGN AI + {0x0A4B, 0x0A4D, prExtend}, // Mn [3] GURMUKHI VOWEL SIGN OO..GURMUKHI SIGN VIRAMA + {0x0A51, 0x0A51, prExtend}, // Mn GURMUKHI SIGN UDAAT + {0x0A70, 0x0A71, prExtend}, // Mn [2] GURMUKHI TIPPI..GURMUKHI ADDAK + {0x0A75, 0x0A75, prExtend}, // Mn GURMUKHI SIGN YAKASH + {0x0A81, 0x0A82, prExtend}, // Mn [2] GUJARATI SIGN CANDRABINDU..GUJARATI SIGN ANUSVARA + {0x0A83, 0x0A83, prSpacingMark}, // Mc GUJARATI SIGN VISARGA + {0x0ABC, 0x0ABC, prExtend}, // Mn GUJARATI SIGN NUKTA + {0x0ABE, 0x0AC0, prSpacingMark}, // Mc [3] GUJARATI VOWEL SIGN AA..GUJARATI VOWEL SIGN II + {0x0AC1, 0x0AC5, prExtend}, // Mn [5] GUJARATI VOWEL SIGN U..GUJARATI VOWEL SIGN CANDRA E + {0x0AC7, 0x0AC8, prExtend}, // Mn [2] GUJARATI VOWEL SIGN E..GUJARATI VOWEL SIGN AI + {0x0AC9, 0x0AC9, prSpacingMark}, // Mc GUJARATI VOWEL SIGN CANDRA O + {0x0ACB, 0x0ACC, prSpacingMark}, // Mc [2] GUJARATI VOWEL SIGN O..GUJARATI VOWEL SIGN AU + {0x0ACD, 0x0ACD, prExtend}, // Mn GUJARATI SIGN VIRAMA + {0x0AE2, 0x0AE3, prExtend}, // Mn [2] GUJARATI VOWEL SIGN VOCALIC L..GUJARATI VOWEL SIGN VOCALIC LL + {0x0AFA, 0x0AFF, prExtend}, // Mn [6] GUJARATI SIGN SUKUN..GUJARATI SIGN TWO-CIRCLE NUKTA ABOVE + {0x0B01, 0x0B01, prExtend}, // Mn ORIYA SIGN CANDRABINDU + {0x0B02, 0x0B03, prSpacingMark}, // Mc [2] ORIYA SIGN ANUSVARA..ORIYA SIGN VISARGA + {0x0B3C, 0x0B3C, prExtend}, // Mn ORIYA SIGN NUKTA + {0x0B3E, 0x0B3E, prExtend}, // Mc ORIYA VOWEL SIGN AA + {0x0B3F, 0x0B3F, prExtend}, // Mn ORIYA VOWEL SIGN I + {0x0B40, 0x0B40, prSpacingMark}, // Mc ORIYA VOWEL SIGN II + {0x0B41, 0x0B44, prExtend}, // Mn [4] ORIYA VOWEL SIGN U..ORIYA VOWEL SIGN VOCALIC RR + {0x0B47, 0x0B48, prSpacingMark}, // Mc [2] ORIYA VOWEL SIGN E..ORIYA VOWEL SIGN AI + {0x0B4B, 0x0B4C, prSpacingMark}, // Mc [2] ORIYA VOWEL SIGN O..ORIYA VOWEL SIGN AU + {0x0B4D, 0x0B4D, prExtend}, // Mn ORIYA SIGN VIRAMA + {0x0B56, 0x0B56, prExtend}, // Mn ORIYA AI LENGTH MARK + {0x0B57, 0x0B57, prExtend}, // Mc ORIYA AU LENGTH MARK + {0x0B62, 0x0B63, prExtend}, // Mn [2] ORIYA VOWEL SIGN VOCALIC L..ORIYA VOWEL SIGN VOCALIC LL + {0x0B82, 0x0B82, prExtend}, // Mn TAMIL SIGN ANUSVARA + {0x0BBE, 0x0BBE, prExtend}, // Mc TAMIL VOWEL SIGN AA + {0x0BBF, 0x0BBF, prSpacingMark}, // Mc TAMIL VOWEL SIGN I + {0x0BC0, 0x0BC0, prExtend}, // Mn TAMIL VOWEL SIGN II + {0x0BC1, 0x0BC2, prSpacingMark}, // Mc [2] TAMIL VOWEL SIGN U..TAMIL VOWEL SIGN UU + {0x0BC6, 0x0BC8, prSpacingMark}, // Mc [3] TAMIL VOWEL SIGN E..TAMIL VOWEL SIGN AI + {0x0BCA, 0x0BCC, prSpacingMark}, // Mc [3] TAMIL VOWEL SIGN O..TAMIL VOWEL SIGN AU + {0x0BCD, 0x0BCD, prExtend}, // Mn TAMIL SIGN VIRAMA + {0x0BD7, 0x0BD7, prExtend}, // Mc TAMIL AU LENGTH MARK + {0x0C00, 0x0C00, prExtend}, // Mn TELUGU SIGN COMBINING CANDRABINDU ABOVE + {0x0C01, 0x0C03, prSpacingMark}, // Mc [3] TELUGU SIGN CANDRABINDU..TELUGU SIGN VISARGA + {0x0C04, 0x0C04, prExtend}, // Mn TELUGU SIGN COMBINING ANUSVARA ABOVE + {0x0C3E, 0x0C40, prExtend}, // Mn [3] TELUGU VOWEL SIGN AA..TELUGU VOWEL SIGN II + {0x0C41, 0x0C44, prSpacingMark}, // Mc [4] TELUGU VOWEL SIGN U..TELUGU VOWEL SIGN VOCALIC RR + {0x0C46, 0x0C48, prExtend}, // Mn [3] TELUGU VOWEL SIGN E..TELUGU VOWEL SIGN AI + {0x0C4A, 0x0C4D, prExtend}, // Mn [4] TELUGU VOWEL SIGN O..TELUGU SIGN VIRAMA + {0x0C55, 0x0C56, prExtend}, // Mn [2] TELUGU LENGTH MARK..TELUGU AI LENGTH MARK + {0x0C62, 0x0C63, prExtend}, // Mn [2] TELUGU VOWEL SIGN VOCALIC L..TELUGU VOWEL SIGN VOCALIC LL + {0x0C81, 0x0C81, prExtend}, // Mn KANNADA SIGN CANDRABINDU + {0x0C82, 0x0C83, prSpacingMark}, // Mc [2] KANNADA SIGN ANUSVARA..KANNADA SIGN VISARGA + {0x0CBC, 0x0CBC, prExtend}, // Mn KANNADA SIGN NUKTA + {0x0CBE, 0x0CBE, prSpacingMark}, // Mc KANNADA VOWEL SIGN AA + {0x0CBF, 0x0CBF, prExtend}, // Mn KANNADA VOWEL SIGN I + {0x0CC0, 0x0CC1, prSpacingMark}, // Mc [2] KANNADA VOWEL SIGN II..KANNADA VOWEL SIGN U + {0x0CC2, 0x0CC2, prExtend}, // Mc KANNADA VOWEL SIGN UU + {0x0CC3, 0x0CC4, prSpacingMark}, // Mc [2] KANNADA VOWEL SIGN VOCALIC R..KANNADA VOWEL SIGN VOCALIC RR + {0x0CC6, 0x0CC6, prExtend}, // Mn KANNADA VOWEL SIGN E + {0x0CC7, 0x0CC8, prSpacingMark}, // Mc [2] KANNADA VOWEL SIGN EE..KANNADA VOWEL SIGN AI + {0x0CCA, 0x0CCB, prSpacingMark}, // Mc [2] KANNADA VOWEL SIGN O..KANNADA VOWEL SIGN OO + {0x0CCC, 0x0CCD, prExtend}, // Mn [2] KANNADA VOWEL SIGN AU..KANNADA SIGN VIRAMA + {0x0CD5, 0x0CD6, prExtend}, // Mc [2] KANNADA LENGTH MARK..KANNADA AI LENGTH MARK + {0x0CE2, 0x0CE3, prExtend}, // Mn [2] KANNADA VOWEL SIGN VOCALIC L..KANNADA VOWEL SIGN VOCALIC LL + {0x0D00, 0x0D01, prExtend}, // Mn [2] MALAYALAM SIGN COMBINING ANUSVARA ABOVE..MALAYALAM SIGN CANDRABINDU + {0x0D02, 0x0D03, prSpacingMark}, // Mc [2] MALAYALAM SIGN ANUSVARA..MALAYALAM SIGN VISARGA + {0x0D3B, 0x0D3C, prExtend}, // Mn [2] MALAYALAM SIGN VERTICAL BAR VIRAMA..MALAYALAM SIGN CIRCULAR VIRAMA + {0x0D3E, 0x0D3E, prExtend}, // Mc MALAYALAM VOWEL SIGN AA + {0x0D3F, 0x0D40, prSpacingMark}, // Mc [2] MALAYALAM VOWEL SIGN I..MALAYALAM VOWEL SIGN II + {0x0D41, 0x0D44, prExtend}, // Mn [4] MALAYALAM VOWEL SIGN U..MALAYALAM VOWEL SIGN VOCALIC RR + {0x0D46, 0x0D48, prSpacingMark}, // Mc [3] MALAYALAM VOWEL SIGN E..MALAYALAM VOWEL SIGN AI + {0x0D4A, 0x0D4C, prSpacingMark}, // Mc [3] MALAYALAM VOWEL SIGN O..MALAYALAM VOWEL SIGN AU + {0x0D4D, 0x0D4D, prExtend}, // Mn MALAYALAM SIGN VIRAMA + {0x0D4E, 0x0D4E, prPreprend}, // Lo MALAYALAM LETTER DOT REPH + {0x0D57, 0x0D57, prExtend}, // Mc MALAYALAM AU LENGTH MARK + {0x0D62, 0x0D63, prExtend}, // Mn [2] MALAYALAM VOWEL SIGN VOCALIC L..MALAYALAM VOWEL SIGN VOCALIC LL + {0x0D82, 0x0D83, prSpacingMark}, // Mc [2] SINHALA SIGN ANUSVARAYA..SINHALA SIGN VISARGAYA + {0x0DCA, 0x0DCA, prExtend}, // Mn SINHALA SIGN AL-LAKUNA + {0x0DCF, 0x0DCF, prExtend}, // Mc SINHALA VOWEL SIGN AELA-PILLA + {0x0DD0, 0x0DD1, prSpacingMark}, // Mc [2] SINHALA VOWEL SIGN KETTI AEDA-PILLA..SINHALA VOWEL SIGN DIGA AEDA-PILLA + {0x0DD2, 0x0DD4, prExtend}, // Mn [3] SINHALA VOWEL SIGN KETTI IS-PILLA..SINHALA VOWEL SIGN KETTI PAA-PILLA + {0x0DD6, 0x0DD6, prExtend}, // Mn SINHALA VOWEL SIGN DIGA PAA-PILLA + {0x0DD8, 0x0DDE, prSpacingMark}, // Mc [7] SINHALA VOWEL SIGN GAETTA-PILLA..SINHALA VOWEL SIGN KOMBUVA HAA GAYANUKITTA + {0x0DDF, 0x0DDF, prExtend}, // Mc SINHALA VOWEL SIGN GAYANUKITTA + {0x0DF2, 0x0DF3, prSpacingMark}, // Mc [2] SINHALA VOWEL SIGN DIGA GAETTA-PILLA..SINHALA VOWEL SIGN DIGA GAYANUKITTA + {0x0E31, 0x0E31, prExtend}, // Mn THAI CHARACTER MAI HAN-AKAT + {0x0E33, 0x0E33, prSpacingMark}, // Lo THAI CHARACTER SARA AM + {0x0E34, 0x0E3A, prExtend}, // Mn [7] THAI CHARACTER SARA I..THAI CHARACTER PHINTHU + {0x0E47, 0x0E4E, prExtend}, // Mn [8] THAI CHARACTER MAITAIKHU..THAI CHARACTER YAMAKKAN + {0x0EB1, 0x0EB1, prExtend}, // Mn LAO VOWEL SIGN MAI KAN + {0x0EB3, 0x0EB3, prSpacingMark}, // Lo LAO VOWEL SIGN AM + {0x0EB4, 0x0EBC, prExtend}, // Mn [9] LAO VOWEL SIGN I..LAO SEMIVOWEL SIGN LO + {0x0EC8, 0x0ECD, prExtend}, // Mn [6] LAO TONE MAI EK..LAO NIGGAHITA + {0x0F18, 0x0F19, prExtend}, // Mn [2] TIBETAN ASTROLOGICAL SIGN -KHYUD PA..TIBETAN ASTROLOGICAL SIGN SDONG TSHUGS + {0x0F35, 0x0F35, prExtend}, // Mn TIBETAN MARK NGAS BZUNG NYI ZLA + {0x0F37, 0x0F37, prExtend}, // Mn TIBETAN MARK NGAS BZUNG SGOR RTAGS + {0x0F39, 0x0F39, prExtend}, // Mn TIBETAN MARK TSA -PHRU + {0x0F3E, 0x0F3F, prSpacingMark}, // Mc [2] TIBETAN SIGN YAR TSHES..TIBETAN SIGN MAR TSHES + {0x0F71, 0x0F7E, prExtend}, // Mn [14] TIBETAN VOWEL SIGN AA..TIBETAN SIGN RJES SU NGA RO + {0x0F7F, 0x0F7F, prSpacingMark}, // Mc TIBETAN SIGN RNAM BCAD + {0x0F80, 0x0F84, prExtend}, // Mn [5] TIBETAN VOWEL SIGN REVERSED I..TIBETAN MARK HALANTA + {0x0F86, 0x0F87, prExtend}, // Mn [2] TIBETAN SIGN LCI RTAGS..TIBETAN SIGN YANG RTAGS + {0x0F8D, 0x0F97, prExtend}, // Mn [11] TIBETAN SUBJOINED SIGN LCE TSA CAN..TIBETAN SUBJOINED LETTER JA + {0x0F99, 0x0FBC, prExtend}, // Mn [36] TIBETAN SUBJOINED LETTER NYA..TIBETAN SUBJOINED LETTER FIXED-FORM RA + {0x0FC6, 0x0FC6, prExtend}, // Mn TIBETAN SYMBOL PADMA GDAN + {0x102D, 0x1030, prExtend}, // Mn [4] MYANMAR VOWEL SIGN I..MYANMAR VOWEL SIGN UU + {0x1031, 0x1031, prSpacingMark}, // Mc MYANMAR VOWEL SIGN E + {0x1032, 0x1037, prExtend}, // Mn [6] MYANMAR VOWEL SIGN AI..MYANMAR SIGN DOT BELOW + {0x1039, 0x103A, prExtend}, // Mn [2] MYANMAR SIGN VIRAMA..MYANMAR SIGN ASAT + {0x103B, 0x103C, prSpacingMark}, // Mc [2] MYANMAR CONSONANT SIGN MEDIAL YA..MYANMAR CONSONANT SIGN MEDIAL RA + {0x103D, 0x103E, prExtend}, // Mn [2] MYANMAR CONSONANT SIGN MEDIAL WA..MYANMAR CONSONANT SIGN MEDIAL HA + {0x1056, 0x1057, prSpacingMark}, // Mc [2] MYANMAR VOWEL SIGN VOCALIC R..MYANMAR VOWEL SIGN VOCALIC RR + {0x1058, 0x1059, prExtend}, // Mn [2] MYANMAR VOWEL SIGN VOCALIC L..MYANMAR VOWEL SIGN VOCALIC LL + {0x105E, 0x1060, prExtend}, // Mn [3] MYANMAR CONSONANT SIGN MON MEDIAL NA..MYANMAR CONSONANT SIGN MON MEDIAL LA + {0x1071, 0x1074, prExtend}, // Mn [4] MYANMAR VOWEL SIGN GEBA KAREN I..MYANMAR VOWEL SIGN KAYAH EE + {0x1082, 0x1082, prExtend}, // Mn MYANMAR CONSONANT SIGN SHAN MEDIAL WA + {0x1084, 0x1084, prSpacingMark}, // Mc MYANMAR VOWEL SIGN SHAN E + {0x1085, 0x1086, prExtend}, // Mn [2] MYANMAR VOWEL SIGN SHAN E ABOVE..MYANMAR VOWEL SIGN SHAN FINAL Y + {0x108D, 0x108D, prExtend}, // Mn MYANMAR SIGN SHAN COUNCIL EMPHATIC TONE + {0x109D, 0x109D, prExtend}, // Mn MYANMAR VOWEL SIGN AITON AI + {0x1100, 0x115F, prL}, // Lo [96] HANGUL CHOSEONG KIYEOK..HANGUL CHOSEONG FILLER + {0x1160, 0x11A7, prV}, // Lo [72] HANGUL JUNGSEONG FILLER..HANGUL JUNGSEONG O-YAE + {0x11A8, 0x11FF, prT}, // Lo [88] HANGUL JONGSEONG KIYEOK..HANGUL JONGSEONG SSANGNIEUN + {0x135D, 0x135F, prExtend}, // Mn [3] ETHIOPIC COMBINING GEMINATION AND VOWEL LENGTH MARK..ETHIOPIC COMBINING GEMINATION MARK + {0x1712, 0x1714, prExtend}, // Mn [3] TAGALOG VOWEL SIGN I..TAGALOG SIGN VIRAMA + {0x1732, 0x1734, prExtend}, // Mn [3] HANUNOO VOWEL SIGN I..HANUNOO SIGN PAMUDPOD + {0x1752, 0x1753, prExtend}, // Mn [2] BUHID VOWEL SIGN I..BUHID VOWEL SIGN U + {0x1772, 0x1773, prExtend}, // Mn [2] TAGBANWA VOWEL SIGN I..TAGBANWA VOWEL SIGN U + {0x17B4, 0x17B5, prExtend}, // Mn [2] KHMER VOWEL INHERENT AQ..KHMER VOWEL INHERENT AA + {0x17B6, 0x17B6, prSpacingMark}, // Mc KHMER VOWEL SIGN AA + {0x17B7, 0x17BD, prExtend}, // Mn [7] KHMER VOWEL SIGN I..KHMER VOWEL SIGN UA + {0x17BE, 0x17C5, prSpacingMark}, // Mc [8] KHMER VOWEL SIGN OE..KHMER VOWEL SIGN AU + {0x17C6, 0x17C6, prExtend}, // Mn KHMER SIGN NIKAHIT + {0x17C7, 0x17C8, prSpacingMark}, // Mc [2] KHMER SIGN REAHMUK..KHMER SIGN YUUKALEAPINTU + {0x17C9, 0x17D3, prExtend}, // Mn [11] KHMER SIGN MUUSIKATOAN..KHMER SIGN BATHAMASAT + {0x17DD, 0x17DD, prExtend}, // Mn KHMER SIGN ATTHACAN + {0x180B, 0x180D, prExtend}, // Mn [3] MONGOLIAN FREE VARIATION SELECTOR ONE..MONGOLIAN FREE VARIATION SELECTOR THREE + {0x180E, 0x180E, prControl}, // Cf MONGOLIAN VOWEL SEPARATOR + {0x1885, 0x1886, prExtend}, // Mn [2] MONGOLIAN LETTER ALI GALI BALUDA..MONGOLIAN LETTER ALI GALI THREE BALUDA + {0x18A9, 0x18A9, prExtend}, // Mn MONGOLIAN LETTER ALI GALI DAGALGA + {0x1920, 0x1922, prExtend}, // Mn [3] LIMBU VOWEL SIGN A..LIMBU VOWEL SIGN U + {0x1923, 0x1926, prSpacingMark}, // Mc [4] LIMBU VOWEL SIGN EE..LIMBU VOWEL SIGN AU + {0x1927, 0x1928, prExtend}, // Mn [2] LIMBU VOWEL SIGN E..LIMBU VOWEL SIGN O + {0x1929, 0x192B, prSpacingMark}, // Mc [3] LIMBU SUBJOINED LETTER YA..LIMBU SUBJOINED LETTER WA + {0x1930, 0x1931, prSpacingMark}, // Mc [2] LIMBU SMALL LETTER KA..LIMBU SMALL LETTER NGA + {0x1932, 0x1932, prExtend}, // Mn LIMBU SMALL LETTER ANUSVARA + {0x1933, 0x1938, prSpacingMark}, // Mc [6] LIMBU SMALL LETTER TA..LIMBU SMALL LETTER LA + {0x1939, 0x193B, prExtend}, // Mn [3] LIMBU SIGN MUKPHRENG..LIMBU SIGN SA-I + {0x1A17, 0x1A18, prExtend}, // Mn [2] BUGINESE VOWEL SIGN I..BUGINESE VOWEL SIGN U + {0x1A19, 0x1A1A, prSpacingMark}, // Mc [2] BUGINESE VOWEL SIGN E..BUGINESE VOWEL SIGN O + {0x1A1B, 0x1A1B, prExtend}, // Mn BUGINESE VOWEL SIGN AE + {0x1A55, 0x1A55, prSpacingMark}, // Mc TAI THAM CONSONANT SIGN MEDIAL RA + {0x1A56, 0x1A56, prExtend}, // Mn TAI THAM CONSONANT SIGN MEDIAL LA + {0x1A57, 0x1A57, prSpacingMark}, // Mc TAI THAM CONSONANT SIGN LA TANG LAI + {0x1A58, 0x1A5E, prExtend}, // Mn [7] TAI THAM SIGN MAI KANG LAI..TAI THAM CONSONANT SIGN SA + {0x1A60, 0x1A60, prExtend}, // Mn TAI THAM SIGN SAKOT + {0x1A62, 0x1A62, prExtend}, // Mn TAI THAM VOWEL SIGN MAI SAT + {0x1A65, 0x1A6C, prExtend}, // Mn [8] TAI THAM VOWEL SIGN I..TAI THAM VOWEL SIGN OA BELOW + {0x1A6D, 0x1A72, prSpacingMark}, // Mc [6] TAI THAM VOWEL SIGN OY..TAI THAM VOWEL SIGN THAM AI + {0x1A73, 0x1A7C, prExtend}, // Mn [10] TAI THAM VOWEL SIGN OA ABOVE..TAI THAM SIGN KHUEN-LUE KARAN + {0x1A7F, 0x1A7F, prExtend}, // Mn TAI THAM COMBINING CRYPTOGRAMMIC DOT + {0x1AB0, 0x1ABD, prExtend}, // Mn [14] COMBINING DOUBLED CIRCUMFLEX ACCENT..COMBINING PARENTHESES BELOW + {0x1ABE, 0x1ABE, prExtend}, // Me COMBINING PARENTHESES OVERLAY + {0x1B00, 0x1B03, prExtend}, // Mn [4] BALINESE SIGN ULU RICEM..BALINESE SIGN SURANG + {0x1B04, 0x1B04, prSpacingMark}, // Mc BALINESE SIGN BISAH + {0x1B34, 0x1B34, prExtend}, // Mn BALINESE SIGN REREKAN + {0x1B35, 0x1B35, prExtend}, // Mc BALINESE VOWEL SIGN TEDUNG + {0x1B36, 0x1B3A, prExtend}, // Mn [5] BALINESE VOWEL SIGN ULU..BALINESE VOWEL SIGN RA REPA + {0x1B3B, 0x1B3B, prSpacingMark}, // Mc BALINESE VOWEL SIGN RA REPA TEDUNG + {0x1B3C, 0x1B3C, prExtend}, // Mn BALINESE VOWEL SIGN LA LENGA + {0x1B3D, 0x1B41, prSpacingMark}, // Mc [5] BALINESE VOWEL SIGN LA LENGA TEDUNG..BALINESE VOWEL SIGN TALING REPA TEDUNG + {0x1B42, 0x1B42, prExtend}, // Mn BALINESE VOWEL SIGN PEPET + {0x1B43, 0x1B44, prSpacingMark}, // Mc [2] BALINESE VOWEL SIGN PEPET TEDUNG..BALINESE ADEG ADEG + {0x1B6B, 0x1B73, prExtend}, // Mn [9] BALINESE MUSICAL SYMBOL COMBINING TEGEH..BALINESE MUSICAL SYMBOL COMBINING GONG + {0x1B80, 0x1B81, prExtend}, // Mn [2] SUNDANESE SIGN PANYECEK..SUNDANESE SIGN PANGLAYAR + {0x1B82, 0x1B82, prSpacingMark}, // Mc SUNDANESE SIGN PANGWISAD + {0x1BA1, 0x1BA1, prSpacingMark}, // Mc SUNDANESE CONSONANT SIGN PAMINGKAL + {0x1BA2, 0x1BA5, prExtend}, // Mn [4] SUNDANESE CONSONANT SIGN PANYAKRA..SUNDANESE VOWEL SIGN PANYUKU + {0x1BA6, 0x1BA7, prSpacingMark}, // Mc [2] SUNDANESE VOWEL SIGN PANAELAENG..SUNDANESE VOWEL SIGN PANOLONG + {0x1BA8, 0x1BA9, prExtend}, // Mn [2] SUNDANESE VOWEL SIGN PAMEPET..SUNDANESE VOWEL SIGN PANEULEUNG + {0x1BAA, 0x1BAA, prSpacingMark}, // Mc SUNDANESE SIGN PAMAAEH + {0x1BAB, 0x1BAD, prExtend}, // Mn [3] SUNDANESE SIGN VIRAMA..SUNDANESE CONSONANT SIGN PASANGAN WA + {0x1BE6, 0x1BE6, prExtend}, // Mn BATAK SIGN TOMPI + {0x1BE7, 0x1BE7, prSpacingMark}, // Mc BATAK VOWEL SIGN E + {0x1BE8, 0x1BE9, prExtend}, // Mn [2] BATAK VOWEL SIGN PAKPAK E..BATAK VOWEL SIGN EE + {0x1BEA, 0x1BEC, prSpacingMark}, // Mc [3] BATAK VOWEL SIGN I..BATAK VOWEL SIGN O + {0x1BED, 0x1BED, prExtend}, // Mn BATAK VOWEL SIGN KARO O + {0x1BEE, 0x1BEE, prSpacingMark}, // Mc BATAK VOWEL SIGN U + {0x1BEF, 0x1BF1, prExtend}, // Mn [3] BATAK VOWEL SIGN U FOR SIMALUNGUN SA..BATAK CONSONANT SIGN H + {0x1BF2, 0x1BF3, prSpacingMark}, // Mc [2] BATAK PANGOLAT..BATAK PANONGONAN + {0x1C24, 0x1C2B, prSpacingMark}, // Mc [8] LEPCHA SUBJOINED LETTER YA..LEPCHA VOWEL SIGN UU + {0x1C2C, 0x1C33, prExtend}, // Mn [8] LEPCHA VOWEL SIGN E..LEPCHA CONSONANT SIGN T + {0x1C34, 0x1C35, prSpacingMark}, // Mc [2] LEPCHA CONSONANT SIGN NYIN-DO..LEPCHA CONSONANT SIGN KANG + {0x1C36, 0x1C37, prExtend}, // Mn [2] LEPCHA SIGN RAN..LEPCHA SIGN NUKTA + {0x1CD0, 0x1CD2, prExtend}, // Mn [3] VEDIC TONE KARSHANA..VEDIC TONE PRENKHA + {0x1CD4, 0x1CE0, prExtend}, // Mn [13] VEDIC SIGN YAJURVEDIC MIDLINE SVARITA..VEDIC TONE RIGVEDIC KASHMIRI INDEPENDENT SVARITA + {0x1CE1, 0x1CE1, prSpacingMark}, // Mc VEDIC TONE ATHARVAVEDIC INDEPENDENT SVARITA + {0x1CE2, 0x1CE8, prExtend}, // Mn [7] VEDIC SIGN VISARGA SVARITA..VEDIC SIGN VISARGA ANUDATTA WITH TAIL + {0x1CED, 0x1CED, prExtend}, // Mn VEDIC SIGN TIRYAK + {0x1CF4, 0x1CF4, prExtend}, // Mn VEDIC TONE CANDRA ABOVE + {0x1CF7, 0x1CF7, prSpacingMark}, // Mc VEDIC SIGN ATIKRAMA + {0x1CF8, 0x1CF9, prExtend}, // Mn [2] VEDIC TONE RING ABOVE..VEDIC TONE DOUBLE RING ABOVE + {0x1DC0, 0x1DF9, prExtend}, // Mn [58] COMBINING DOTTED GRAVE ACCENT..COMBINING WIDE INVERTED BRIDGE BELOW + {0x1DFB, 0x1DFF, prExtend}, // Mn [5] COMBINING DELETION MARK..COMBINING RIGHT ARROWHEAD AND DOWN ARROWHEAD BELOW + {0x200B, 0x200B, prControl}, // Cf ZERO WIDTH SPACE + {0x200C, 0x200C, prExtend}, // Cf ZERO WIDTH NON-JOINER + {0x200D, 0x200D, prZWJ}, // Cf ZERO WIDTH JOINER + {0x200E, 0x200F, prControl}, // Cf [2] LEFT-TO-RIGHT MARK..RIGHT-TO-LEFT MARK + {0x2028, 0x2028, prControl}, // Zl LINE SEPARATOR + {0x2029, 0x2029, prControl}, // Zp PARAGRAPH SEPARATOR + {0x202A, 0x202E, prControl}, // Cf [5] LEFT-TO-RIGHT EMBEDDING..RIGHT-TO-LEFT OVERRIDE + {0x203C, 0x203C, prExtendedPictographic}, // 1.1 [1] (‼️) double exclamation mark + {0x2049, 0x2049, prExtendedPictographic}, // 3.0 [1] (⁉️) exclamation question mark + {0x2060, 0x2064, prControl}, // Cf [5] WORD JOINER..INVISIBLE PLUS + {0x2065, 0x2065, prControl}, // Cn + {0x2066, 0x206F, prControl}, // Cf [10] LEFT-TO-RIGHT ISOLATE..NOMINAL DIGIT SHAPES + {0x20D0, 0x20DC, prExtend}, // Mn [13] COMBINING LEFT HARPOON ABOVE..COMBINING FOUR DOTS ABOVE + {0x20DD, 0x20E0, prExtend}, // Me [4] COMBINING ENCLOSING CIRCLE..COMBINING ENCLOSING CIRCLE BACKSLASH + {0x20E1, 0x20E1, prExtend}, // Mn COMBINING LEFT RIGHT ARROW ABOVE + {0x20E2, 0x20E4, prExtend}, // Me [3] COMBINING ENCLOSING SCREEN..COMBINING ENCLOSING UPWARD POINTING TRIANGLE + {0x20E5, 0x20F0, prExtend}, // Mn [12] COMBINING REVERSE SOLIDUS OVERLAY..COMBINING ASTERISK ABOVE + {0x2122, 0x2122, prExtendedPictographic}, // 1.1 [1] (™️) trade mark + {0x2139, 0x2139, prExtendedPictographic}, // 3.0 [1] (ℹ️) information + {0x2194, 0x2199, prExtendedPictographic}, // 1.1 [6] (↔️..↙️) left-right arrow..down-left arrow + {0x21A9, 0x21AA, prExtendedPictographic}, // 1.1 [2] (↩️..↪️) right arrow curving left..left arrow curving right + {0x231A, 0x231B, prExtendedPictographic}, // 1.1 [2] (⌚..⌛) watch..hourglass done + {0x2328, 0x2328, prExtendedPictographic}, // 1.1 [1] (⌨️) keyboard + {0x2388, 0x2388, prExtendedPictographic}, // 3.0 [1] (⎈) HELM SYMBOL + {0x23CF, 0x23CF, prExtendedPictographic}, // 4.0 [1] (⏏️) eject button + {0x23E9, 0x23F3, prExtendedPictographic}, // 6.0 [11] (⏩..⏳) fast-forward button..hourglass not done + {0x23F8, 0x23FA, prExtendedPictographic}, // 7.0 [3] (⏸️..⏺️) pause button..record button + {0x24C2, 0x24C2, prExtendedPictographic}, // 1.1 [1] (Ⓜ️) circled M + {0x25AA, 0x25AB, prExtendedPictographic}, // 1.1 [2] (▪️..▫️) black small square..white small square + {0x25B6, 0x25B6, prExtendedPictographic}, // 1.1 [1] (▶️) play button + {0x25C0, 0x25C0, prExtendedPictographic}, // 1.1 [1] (◀️) reverse button + {0x25FB, 0x25FE, prExtendedPictographic}, // 3.2 [4] (◻️..◾) white medium square..black medium-small square + {0x2600, 0x2605, prExtendedPictographic}, // 1.1 [6] (☀️..★) sun..BLACK STAR + {0x2607, 0x2612, prExtendedPictographic}, // 1.1 [12] (☇..☒) LIGHTNING..BALLOT BOX WITH X + {0x2614, 0x2615, prExtendedPictographic}, // 4.0 [2] (☔..☕) umbrella with rain drops..hot beverage + {0x2616, 0x2617, prExtendedPictographic}, // 3.2 [2] (☖..☗) WHITE SHOGI PIECE..BLACK SHOGI PIECE + {0x2618, 0x2618, prExtendedPictographic}, // 4.1 [1] (☘️) shamrock + {0x2619, 0x2619, prExtendedPictographic}, // 3.0 [1] (☙) REVERSED ROTATED FLORAL HEART BULLET + {0x261A, 0x266F, prExtendedPictographic}, // 1.1 [86] (☚..♯) BLACK LEFT POINTING INDEX..MUSIC SHARP SIGN + {0x2670, 0x2671, prExtendedPictographic}, // 3.0 [2] (♰..♱) WEST SYRIAC CROSS..EAST SYRIAC CROSS + {0x2672, 0x267D, prExtendedPictographic}, // 3.2 [12] (♲..♽) UNIVERSAL RECYCLING SYMBOL..PARTIALLY-RECYCLED PAPER SYMBOL + {0x267E, 0x267F, prExtendedPictographic}, // 4.1 [2] (♾️..♿) infinity..wheelchair symbol + {0x2680, 0x2685, prExtendedPictographic}, // 3.2 [6] (⚀..⚅) DIE FACE-1..DIE FACE-6 + {0x2690, 0x2691, prExtendedPictographic}, // 4.0 [2] (⚐..⚑) WHITE FLAG..BLACK FLAG + {0x2692, 0x269C, prExtendedPictographic}, // 4.1 [11] (⚒️..⚜️) hammer and pick..fleur-de-lis + {0x269D, 0x269D, prExtendedPictographic}, // 5.1 [1] (⚝) OUTLINED WHITE STAR + {0x269E, 0x269F, prExtendedPictographic}, // 5.2 [2] (⚞..⚟) THREE LINES CONVERGING RIGHT..THREE LINES CONVERGING LEFT + {0x26A0, 0x26A1, prExtendedPictographic}, // 4.0 [2] (⚠️..⚡) warning..high voltage + {0x26A2, 0x26B1, prExtendedPictographic}, // 4.1 [16] (⚢..⚱️) DOUBLED FEMALE SIGN..funeral urn + {0x26B2, 0x26B2, prExtendedPictographic}, // 5.0 [1] (⚲) NEUTER + {0x26B3, 0x26BC, prExtendedPictographic}, // 5.1 [10] (⚳..⚼) CERES..SESQUIQUADRATE + {0x26BD, 0x26BF, prExtendedPictographic}, // 5.2 [3] (⚽..⚿) soccer ball..SQUARED KEY + {0x26C0, 0x26C3, prExtendedPictographic}, // 5.1 [4] (⛀..⛃) WHITE DRAUGHTS MAN..BLACK DRAUGHTS KING + {0x26C4, 0x26CD, prExtendedPictographic}, // 5.2 [10] (⛄..⛍) snowman without snow..DISABLED CAR + {0x26CE, 0x26CE, prExtendedPictographic}, // 6.0 [1] (⛎) Ophiuchus + {0x26CF, 0x26E1, prExtendedPictographic}, // 5.2 [19] (⛏️..⛡) pick..RESTRICTED LEFT ENTRY-2 + {0x26E2, 0x26E2, prExtendedPictographic}, // 6.0 [1] (⛢) ASTRONOMICAL SYMBOL FOR URANUS + {0x26E3, 0x26E3, prExtendedPictographic}, // 5.2 [1] (⛣) HEAVY CIRCLE WITH STROKE AND TWO DOTS ABOVE + {0x26E4, 0x26E7, prExtendedPictographic}, // 6.0 [4] (⛤..⛧) PENTAGRAM..INVERTED PENTAGRAM + {0x26E8, 0x26FF, prExtendedPictographic}, // 5.2 [24] (⛨..⛿) BLACK CROSS ON SHIELD..WHITE FLAG WITH HORIZONTAL MIDDLE BLACK STRIPE + {0x2700, 0x2700, prExtendedPictographic}, // 7.0 [1] (✀) BLACK SAFETY SCISSORS + {0x2701, 0x2704, prExtendedPictographic}, // 1.1 [4] (✁..✄) UPPER BLADE SCISSORS..WHITE SCISSORS + {0x2705, 0x2705, prExtendedPictographic}, // 6.0 [1] (✅) check mark button + {0x2708, 0x2709, prExtendedPictographic}, // 1.1 [2] (✈️..✉️) airplane..envelope + {0x270A, 0x270B, prExtendedPictographic}, // 6.0 [2] (✊..✋) raised fist..raised hand + {0x270C, 0x2712, prExtendedPictographic}, // 1.1 [7] (✌️..✒️) victory hand..black nib + {0x2714, 0x2714, prExtendedPictographic}, // 1.1 [1] (✔️) check mark + {0x2716, 0x2716, prExtendedPictographic}, // 1.1 [1] (✖️) multiplication sign + {0x271D, 0x271D, prExtendedPictographic}, // 1.1 [1] (✝️) latin cross + {0x2721, 0x2721, prExtendedPictographic}, // 1.1 [1] (✡️) star of David + {0x2728, 0x2728, prExtendedPictographic}, // 6.0 [1] (✨) sparkles + {0x2733, 0x2734, prExtendedPictographic}, // 1.1 [2] (✳️..✴️) eight-spoked asterisk..eight-pointed star + {0x2744, 0x2744, prExtendedPictographic}, // 1.1 [1] (❄️) snowflake + {0x2747, 0x2747, prExtendedPictographic}, // 1.1 [1] (❇️) sparkle + {0x274C, 0x274C, prExtendedPictographic}, // 6.0 [1] (❌) cross mark + {0x274E, 0x274E, prExtendedPictographic}, // 6.0 [1] (❎) cross mark button + {0x2753, 0x2755, prExtendedPictographic}, // 6.0 [3] (❓..❕) question mark..white exclamation mark + {0x2757, 0x2757, prExtendedPictographic}, // 5.2 [1] (❗) exclamation mark + {0x2763, 0x2767, prExtendedPictographic}, // 1.1 [5] (❣️..❧) heart exclamation..ROTATED FLORAL HEART BULLET + {0x2795, 0x2797, prExtendedPictographic}, // 6.0 [3] (➕..➗) plus sign..division sign + {0x27A1, 0x27A1, prExtendedPictographic}, // 1.1 [1] (➡️) right arrow + {0x27B0, 0x27B0, prExtendedPictographic}, // 6.0 [1] (➰) curly loop + {0x27BF, 0x27BF, prExtendedPictographic}, // 6.0 [1] (➿) double curly loop + {0x2934, 0x2935, prExtendedPictographic}, // 3.2 [2] (⤴️..⤵️) right arrow curving up..right arrow curving down + {0x2B05, 0x2B07, prExtendedPictographic}, // 4.0 [3] (⬅️..⬇️) left arrow..down arrow + {0x2B1B, 0x2B1C, prExtendedPictographic}, // 5.1 [2] (⬛..⬜) black large square..white large square + {0x2B50, 0x2B50, prExtendedPictographic}, // 5.1 [1] (⭐) star + {0x2B55, 0x2B55, prExtendedPictographic}, // 5.2 [1] (⭕) hollow red circle + {0x2CEF, 0x2CF1, prExtend}, // Mn [3] COPTIC COMBINING NI ABOVE..COPTIC COMBINING SPIRITUS LENIS + {0x2D7F, 0x2D7F, prExtend}, // Mn TIFINAGH CONSONANT JOINER + {0x2DE0, 0x2DFF, prExtend}, // Mn [32] COMBINING CYRILLIC LETTER BE..COMBINING CYRILLIC LETTER IOTIFIED BIG YUS + {0x302A, 0x302D, prExtend}, // Mn [4] IDEOGRAPHIC LEVEL TONE MARK..IDEOGRAPHIC ENTERING TONE MARK + {0x302E, 0x302F, prExtend}, // Mc [2] HANGUL SINGLE DOT TONE MARK..HANGUL DOUBLE DOT TONE MARK + {0x3030, 0x3030, prExtendedPictographic}, // 1.1 [1] (〰️) wavy dash + {0x303D, 0x303D, prExtendedPictographic}, // 3.2 [1] (〽️) part alternation mark + {0x3099, 0x309A, prExtend}, // Mn [2] COMBINING KATAKANA-HIRAGANA VOICED SOUND MARK..COMBINING KATAKANA-HIRAGANA SEMI-VOICED SOUND MARK + {0x3297, 0x3297, prExtendedPictographic}, // 1.1 [1] (㊗️) Japanese “congratulations” button + {0x3299, 0x3299, prExtendedPictographic}, // 1.1 [1] (㊙️) Japanese “secret” button + {0xA66F, 0xA66F, prExtend}, // Mn COMBINING CYRILLIC VZMET + {0xA670, 0xA672, prExtend}, // Me [3] COMBINING CYRILLIC TEN MILLIONS SIGN..COMBINING CYRILLIC THOUSAND MILLIONS SIGN + {0xA674, 0xA67D, prExtend}, // Mn [10] COMBINING CYRILLIC LETTER UKRAINIAN IE..COMBINING CYRILLIC PAYEROK + {0xA69E, 0xA69F, prExtend}, // Mn [2] COMBINING CYRILLIC LETTER EF..COMBINING CYRILLIC LETTER IOTIFIED E + {0xA6F0, 0xA6F1, prExtend}, // Mn [2] BAMUM COMBINING MARK KOQNDON..BAMUM COMBINING MARK TUKWENTIS + {0xA802, 0xA802, prExtend}, // Mn SYLOTI NAGRI SIGN DVISVARA + {0xA806, 0xA806, prExtend}, // Mn SYLOTI NAGRI SIGN HASANTA + {0xA80B, 0xA80B, prExtend}, // Mn SYLOTI NAGRI SIGN ANUSVARA + {0xA823, 0xA824, prSpacingMark}, // Mc [2] SYLOTI NAGRI VOWEL SIGN A..SYLOTI NAGRI VOWEL SIGN I + {0xA825, 0xA826, prExtend}, // Mn [2] SYLOTI NAGRI VOWEL SIGN U..SYLOTI NAGRI VOWEL SIGN E + {0xA827, 0xA827, prSpacingMark}, // Mc SYLOTI NAGRI VOWEL SIGN OO + {0xA880, 0xA881, prSpacingMark}, // Mc [2] SAURASHTRA SIGN ANUSVARA..SAURASHTRA SIGN VISARGA + {0xA8B4, 0xA8C3, prSpacingMark}, // Mc [16] SAURASHTRA CONSONANT SIGN HAARU..SAURASHTRA VOWEL SIGN AU + {0xA8C4, 0xA8C5, prExtend}, // Mn [2] SAURASHTRA SIGN VIRAMA..SAURASHTRA SIGN CANDRABINDU + {0xA8E0, 0xA8F1, prExtend}, // Mn [18] COMBINING DEVANAGARI DIGIT ZERO..COMBINING DEVANAGARI SIGN AVAGRAHA + {0xA8FF, 0xA8FF, prExtend}, // Mn DEVANAGARI VOWEL SIGN AY + {0xA926, 0xA92D, prExtend}, // Mn [8] KAYAH LI VOWEL UE..KAYAH LI TONE CALYA PLOPHU + {0xA947, 0xA951, prExtend}, // Mn [11] REJANG VOWEL SIGN I..REJANG CONSONANT SIGN R + {0xA952, 0xA953, prSpacingMark}, // Mc [2] REJANG CONSONANT SIGN H..REJANG VIRAMA + {0xA960, 0xA97C, prL}, // Lo [29] HANGUL CHOSEONG TIKEUT-MIEUM..HANGUL CHOSEONG SSANGYEORINHIEUH + {0xA980, 0xA982, prExtend}, // Mn [3] JAVANESE SIGN PANYANGGA..JAVANESE SIGN LAYAR + {0xA983, 0xA983, prSpacingMark}, // Mc JAVANESE SIGN WIGNYAN + {0xA9B3, 0xA9B3, prExtend}, // Mn JAVANESE SIGN CECAK TELU + {0xA9B4, 0xA9B5, prSpacingMark}, // Mc [2] JAVANESE VOWEL SIGN TARUNG..JAVANESE VOWEL SIGN TOLONG + {0xA9B6, 0xA9B9, prExtend}, // Mn [4] JAVANESE VOWEL SIGN WULU..JAVANESE VOWEL SIGN SUKU MENDUT + {0xA9BA, 0xA9BB, prSpacingMark}, // Mc [2] JAVANESE VOWEL SIGN TALING..JAVANESE VOWEL SIGN DIRGA MURE + {0xA9BC, 0xA9BD, prExtend}, // Mn [2] JAVANESE VOWEL SIGN PEPET..JAVANESE CONSONANT SIGN KERET + {0xA9BE, 0xA9C0, prSpacingMark}, // Mc [3] JAVANESE CONSONANT SIGN PENGKAL..JAVANESE PANGKON + {0xA9E5, 0xA9E5, prExtend}, // Mn MYANMAR SIGN SHAN SAW + {0xAA29, 0xAA2E, prExtend}, // Mn [6] CHAM VOWEL SIGN AA..CHAM VOWEL SIGN OE + {0xAA2F, 0xAA30, prSpacingMark}, // Mc [2] CHAM VOWEL SIGN O..CHAM VOWEL SIGN AI + {0xAA31, 0xAA32, prExtend}, // Mn [2] CHAM VOWEL SIGN AU..CHAM VOWEL SIGN UE + {0xAA33, 0xAA34, prSpacingMark}, // Mc [2] CHAM CONSONANT SIGN YA..CHAM CONSONANT SIGN RA + {0xAA35, 0xAA36, prExtend}, // Mn [2] CHAM CONSONANT SIGN LA..CHAM CONSONANT SIGN WA + {0xAA43, 0xAA43, prExtend}, // Mn CHAM CONSONANT SIGN FINAL NG + {0xAA4C, 0xAA4C, prExtend}, // Mn CHAM CONSONANT SIGN FINAL M + {0xAA4D, 0xAA4D, prSpacingMark}, // Mc CHAM CONSONANT SIGN FINAL H + {0xAA7C, 0xAA7C, prExtend}, // Mn MYANMAR SIGN TAI LAING TONE-2 + {0xAAB0, 0xAAB0, prExtend}, // Mn TAI VIET MAI KANG + {0xAAB2, 0xAAB4, prExtend}, // Mn [3] TAI VIET VOWEL I..TAI VIET VOWEL U + {0xAAB7, 0xAAB8, prExtend}, // Mn [2] TAI VIET MAI KHIT..TAI VIET VOWEL IA + {0xAABE, 0xAABF, prExtend}, // Mn [2] TAI VIET VOWEL AM..TAI VIET TONE MAI EK + {0xAAC1, 0xAAC1, prExtend}, // Mn TAI VIET TONE MAI THO + {0xAAEB, 0xAAEB, prSpacingMark}, // Mc MEETEI MAYEK VOWEL SIGN II + {0xAAEC, 0xAAED, prExtend}, // Mn [2] MEETEI MAYEK VOWEL SIGN UU..MEETEI MAYEK VOWEL SIGN AAI + {0xAAEE, 0xAAEF, prSpacingMark}, // Mc [2] MEETEI MAYEK VOWEL SIGN AU..MEETEI MAYEK VOWEL SIGN AAU + {0xAAF5, 0xAAF5, prSpacingMark}, // Mc MEETEI MAYEK VOWEL SIGN VISARGA + {0xAAF6, 0xAAF6, prExtend}, // Mn MEETEI MAYEK VIRAMA + {0xABE3, 0xABE4, prSpacingMark}, // Mc [2] MEETEI MAYEK VOWEL SIGN ONAP..MEETEI MAYEK VOWEL SIGN INAP + {0xABE5, 0xABE5, prExtend}, // Mn MEETEI MAYEK VOWEL SIGN ANAP + {0xABE6, 0xABE7, prSpacingMark}, // Mc [2] MEETEI MAYEK VOWEL SIGN YENAP..MEETEI MAYEK VOWEL SIGN SOUNAP + {0xABE8, 0xABE8, prExtend}, // Mn MEETEI MAYEK VOWEL SIGN UNAP + {0xABE9, 0xABEA, prSpacingMark}, // Mc [2] MEETEI MAYEK VOWEL SIGN CHEINAP..MEETEI MAYEK VOWEL SIGN NUNG + {0xABEC, 0xABEC, prSpacingMark}, // Mc MEETEI MAYEK LUM IYEK + {0xABED, 0xABED, prExtend}, // Mn MEETEI MAYEK APUN IYEK + {0xAC00, 0xAC00, prLV}, // Lo HANGUL SYLLABLE GA + {0xAC01, 0xAC1B, prLVT}, // Lo [27] HANGUL SYLLABLE GAG..HANGUL SYLLABLE GAH + {0xAC1C, 0xAC1C, prLV}, // Lo HANGUL SYLLABLE GAE + {0xAC1D, 0xAC37, prLVT}, // Lo [27] HANGUL SYLLABLE GAEG..HANGUL SYLLABLE GAEH + {0xAC38, 0xAC38, prLV}, // Lo HANGUL SYLLABLE GYA + {0xAC39, 0xAC53, prLVT}, // Lo [27] HANGUL SYLLABLE GYAG..HANGUL SYLLABLE GYAH + {0xAC54, 0xAC54, prLV}, // Lo HANGUL SYLLABLE GYAE + {0xAC55, 0xAC6F, prLVT}, // Lo [27] HANGUL SYLLABLE GYAEG..HANGUL SYLLABLE GYAEH + {0xAC70, 0xAC70, prLV}, // Lo HANGUL SYLLABLE GEO + {0xAC71, 0xAC8B, prLVT}, // Lo [27] HANGUL SYLLABLE GEOG..HANGUL SYLLABLE GEOH + {0xAC8C, 0xAC8C, prLV}, // Lo HANGUL SYLLABLE GE + {0xAC8D, 0xACA7, prLVT}, // Lo [27] HANGUL SYLLABLE GEG..HANGUL SYLLABLE GEH + {0xACA8, 0xACA8, prLV}, // Lo HANGUL SYLLABLE GYEO + {0xACA9, 0xACC3, prLVT}, // Lo [27] HANGUL SYLLABLE GYEOG..HANGUL SYLLABLE GYEOH + {0xACC4, 0xACC4, prLV}, // Lo HANGUL SYLLABLE GYE + {0xACC5, 0xACDF, prLVT}, // Lo [27] HANGUL SYLLABLE GYEG..HANGUL SYLLABLE GYEH + {0xACE0, 0xACE0, prLV}, // Lo HANGUL SYLLABLE GO + {0xACE1, 0xACFB, prLVT}, // Lo [27] HANGUL SYLLABLE GOG..HANGUL SYLLABLE GOH + {0xACFC, 0xACFC, prLV}, // Lo HANGUL SYLLABLE GWA + {0xACFD, 0xAD17, prLVT}, // Lo [27] HANGUL SYLLABLE GWAG..HANGUL SYLLABLE GWAH + {0xAD18, 0xAD18, prLV}, // Lo HANGUL SYLLABLE GWAE + {0xAD19, 0xAD33, prLVT}, // Lo [27] HANGUL SYLLABLE GWAEG..HANGUL SYLLABLE GWAEH + {0xAD34, 0xAD34, prLV}, // Lo HANGUL SYLLABLE GOE + {0xAD35, 0xAD4F, prLVT}, // Lo [27] HANGUL SYLLABLE GOEG..HANGUL SYLLABLE GOEH + {0xAD50, 0xAD50, prLV}, // Lo HANGUL SYLLABLE GYO + {0xAD51, 0xAD6B, prLVT}, // Lo [27] HANGUL SYLLABLE GYOG..HANGUL SYLLABLE GYOH + {0xAD6C, 0xAD6C, prLV}, // Lo HANGUL SYLLABLE GU + {0xAD6D, 0xAD87, prLVT}, // Lo [27] HANGUL SYLLABLE GUG..HANGUL SYLLABLE GUH + {0xAD88, 0xAD88, prLV}, // Lo HANGUL SYLLABLE GWEO + {0xAD89, 0xADA3, prLVT}, // Lo [27] HANGUL SYLLABLE GWEOG..HANGUL SYLLABLE GWEOH + {0xADA4, 0xADA4, prLV}, // Lo HANGUL SYLLABLE GWE + {0xADA5, 0xADBF, prLVT}, // Lo [27] HANGUL SYLLABLE GWEG..HANGUL SYLLABLE GWEH + {0xADC0, 0xADC0, prLV}, // Lo HANGUL SYLLABLE GWI + {0xADC1, 0xADDB, prLVT}, // Lo [27] HANGUL SYLLABLE GWIG..HANGUL SYLLABLE GWIH + {0xADDC, 0xADDC, prLV}, // Lo HANGUL SYLLABLE GYU + {0xADDD, 0xADF7, prLVT}, // Lo [27] HANGUL SYLLABLE GYUG..HANGUL SYLLABLE GYUH + {0xADF8, 0xADF8, prLV}, // Lo HANGUL SYLLABLE GEU + {0xADF9, 0xAE13, prLVT}, // Lo [27] HANGUL SYLLABLE GEUG..HANGUL SYLLABLE GEUH + {0xAE14, 0xAE14, prLV}, // Lo HANGUL SYLLABLE GYI + {0xAE15, 0xAE2F, prLVT}, // Lo [27] HANGUL SYLLABLE GYIG..HANGUL SYLLABLE GYIH + {0xAE30, 0xAE30, prLV}, // Lo HANGUL SYLLABLE GI + {0xAE31, 0xAE4B, prLVT}, // Lo [27] HANGUL SYLLABLE GIG..HANGUL SYLLABLE GIH + {0xAE4C, 0xAE4C, prLV}, // Lo HANGUL SYLLABLE GGA + {0xAE4D, 0xAE67, prLVT}, // Lo [27] HANGUL SYLLABLE GGAG..HANGUL SYLLABLE GGAH + {0xAE68, 0xAE68, prLV}, // Lo HANGUL SYLLABLE GGAE + {0xAE69, 0xAE83, prLVT}, // Lo [27] HANGUL SYLLABLE GGAEG..HANGUL SYLLABLE GGAEH + {0xAE84, 0xAE84, prLV}, // Lo HANGUL SYLLABLE GGYA + {0xAE85, 0xAE9F, prLVT}, // Lo [27] HANGUL SYLLABLE GGYAG..HANGUL SYLLABLE GGYAH + {0xAEA0, 0xAEA0, prLV}, // Lo HANGUL SYLLABLE GGYAE + {0xAEA1, 0xAEBB, prLVT}, // Lo [27] HANGUL SYLLABLE GGYAEG..HANGUL SYLLABLE GGYAEH + {0xAEBC, 0xAEBC, prLV}, // Lo HANGUL SYLLABLE GGEO + {0xAEBD, 0xAED7, prLVT}, // Lo [27] HANGUL SYLLABLE GGEOG..HANGUL SYLLABLE GGEOH + {0xAED8, 0xAED8, prLV}, // Lo HANGUL SYLLABLE GGE + {0xAED9, 0xAEF3, prLVT}, // Lo [27] HANGUL SYLLABLE GGEG..HANGUL SYLLABLE GGEH + {0xAEF4, 0xAEF4, prLV}, // Lo HANGUL SYLLABLE GGYEO + {0xAEF5, 0xAF0F, prLVT}, // Lo [27] HANGUL SYLLABLE GGYEOG..HANGUL SYLLABLE GGYEOH + {0xAF10, 0xAF10, prLV}, // Lo HANGUL SYLLABLE GGYE + {0xAF11, 0xAF2B, prLVT}, // Lo [27] HANGUL SYLLABLE GGYEG..HANGUL SYLLABLE GGYEH + {0xAF2C, 0xAF2C, prLV}, // Lo HANGUL SYLLABLE GGO + {0xAF2D, 0xAF47, prLVT}, // Lo [27] HANGUL SYLLABLE GGOG..HANGUL SYLLABLE GGOH + {0xAF48, 0xAF48, prLV}, // Lo HANGUL SYLLABLE GGWA + {0xAF49, 0xAF63, prLVT}, // Lo [27] HANGUL SYLLABLE GGWAG..HANGUL SYLLABLE GGWAH + {0xAF64, 0xAF64, prLV}, // Lo HANGUL SYLLABLE GGWAE + {0xAF65, 0xAF7F, prLVT}, // Lo [27] HANGUL SYLLABLE GGWAEG..HANGUL SYLLABLE GGWAEH + {0xAF80, 0xAF80, prLV}, // Lo HANGUL SYLLABLE GGOE + {0xAF81, 0xAF9B, prLVT}, // Lo [27] HANGUL SYLLABLE GGOEG..HANGUL SYLLABLE GGOEH + {0xAF9C, 0xAF9C, prLV}, // Lo HANGUL SYLLABLE GGYO + {0xAF9D, 0xAFB7, prLVT}, // Lo [27] HANGUL SYLLABLE GGYOG..HANGUL SYLLABLE GGYOH + {0xAFB8, 0xAFB8, prLV}, // Lo HANGUL SYLLABLE GGU + {0xAFB9, 0xAFD3, prLVT}, // Lo [27] HANGUL SYLLABLE GGUG..HANGUL SYLLABLE GGUH + {0xAFD4, 0xAFD4, prLV}, // Lo HANGUL SYLLABLE GGWEO + {0xAFD5, 0xAFEF, prLVT}, // Lo [27] HANGUL SYLLABLE GGWEOG..HANGUL SYLLABLE GGWEOH + {0xAFF0, 0xAFF0, prLV}, // Lo HANGUL SYLLABLE GGWE + {0xAFF1, 0xB00B, prLVT}, // Lo [27] HANGUL SYLLABLE GGWEG..HANGUL SYLLABLE GGWEH + {0xB00C, 0xB00C, prLV}, // Lo HANGUL SYLLABLE GGWI + {0xB00D, 0xB027, prLVT}, // Lo [27] HANGUL SYLLABLE GGWIG..HANGUL SYLLABLE GGWIH + {0xB028, 0xB028, prLV}, // Lo HANGUL SYLLABLE GGYU + {0xB029, 0xB043, prLVT}, // Lo [27] HANGUL SYLLABLE GGYUG..HANGUL SYLLABLE GGYUH + {0xB044, 0xB044, prLV}, // Lo HANGUL SYLLABLE GGEU + {0xB045, 0xB05F, prLVT}, // Lo [27] HANGUL SYLLABLE GGEUG..HANGUL SYLLABLE GGEUH + {0xB060, 0xB060, prLV}, // Lo HANGUL SYLLABLE GGYI + {0xB061, 0xB07B, prLVT}, // Lo [27] HANGUL SYLLABLE GGYIG..HANGUL SYLLABLE GGYIH + {0xB07C, 0xB07C, prLV}, // Lo HANGUL SYLLABLE GGI + {0xB07D, 0xB097, prLVT}, // Lo [27] HANGUL SYLLABLE GGIG..HANGUL SYLLABLE GGIH + {0xB098, 0xB098, prLV}, // Lo HANGUL SYLLABLE NA + {0xB099, 0xB0B3, prLVT}, // Lo [27] HANGUL SYLLABLE NAG..HANGUL SYLLABLE NAH + {0xB0B4, 0xB0B4, prLV}, // Lo HANGUL SYLLABLE NAE + {0xB0B5, 0xB0CF, prLVT}, // Lo [27] HANGUL SYLLABLE NAEG..HANGUL SYLLABLE NAEH + {0xB0D0, 0xB0D0, prLV}, // Lo HANGUL SYLLABLE NYA + {0xB0D1, 0xB0EB, prLVT}, // Lo [27] HANGUL SYLLABLE NYAG..HANGUL SYLLABLE NYAH + {0xB0EC, 0xB0EC, prLV}, // Lo HANGUL SYLLABLE NYAE + {0xB0ED, 0xB107, prLVT}, // Lo [27] HANGUL SYLLABLE NYAEG..HANGUL SYLLABLE NYAEH + {0xB108, 0xB108, prLV}, // Lo HANGUL SYLLABLE NEO + {0xB109, 0xB123, prLVT}, // Lo [27] HANGUL SYLLABLE NEOG..HANGUL SYLLABLE NEOH + {0xB124, 0xB124, prLV}, // Lo HANGUL SYLLABLE NE + {0xB125, 0xB13F, prLVT}, // Lo [27] HANGUL SYLLABLE NEG..HANGUL SYLLABLE NEH + {0xB140, 0xB140, prLV}, // Lo HANGUL SYLLABLE NYEO + {0xB141, 0xB15B, prLVT}, // Lo [27] HANGUL SYLLABLE NYEOG..HANGUL SYLLABLE NYEOH + {0xB15C, 0xB15C, prLV}, // Lo HANGUL SYLLABLE NYE + {0xB15D, 0xB177, prLVT}, // Lo [27] HANGUL SYLLABLE NYEG..HANGUL SYLLABLE NYEH + {0xB178, 0xB178, prLV}, // Lo HANGUL SYLLABLE NO + {0xB179, 0xB193, prLVT}, // Lo [27] HANGUL SYLLABLE NOG..HANGUL SYLLABLE NOH + {0xB194, 0xB194, prLV}, // Lo HANGUL SYLLABLE NWA + {0xB195, 0xB1AF, prLVT}, // Lo [27] HANGUL SYLLABLE NWAG..HANGUL SYLLABLE NWAH + {0xB1B0, 0xB1B0, prLV}, // Lo HANGUL SYLLABLE NWAE + {0xB1B1, 0xB1CB, prLVT}, // Lo [27] HANGUL SYLLABLE NWAEG..HANGUL SYLLABLE NWAEH + {0xB1CC, 0xB1CC, prLV}, // Lo HANGUL SYLLABLE NOE + {0xB1CD, 0xB1E7, prLVT}, // Lo [27] HANGUL SYLLABLE NOEG..HANGUL SYLLABLE NOEH + {0xB1E8, 0xB1E8, prLV}, // Lo HANGUL SYLLABLE NYO + {0xB1E9, 0xB203, prLVT}, // Lo [27] HANGUL SYLLABLE NYOG..HANGUL SYLLABLE NYOH + {0xB204, 0xB204, prLV}, // Lo HANGUL SYLLABLE NU + {0xB205, 0xB21F, prLVT}, // Lo [27] HANGUL SYLLABLE NUG..HANGUL SYLLABLE NUH + {0xB220, 0xB220, prLV}, // Lo HANGUL SYLLABLE NWEO + {0xB221, 0xB23B, prLVT}, // Lo [27] HANGUL SYLLABLE NWEOG..HANGUL SYLLABLE NWEOH + {0xB23C, 0xB23C, prLV}, // Lo HANGUL SYLLABLE NWE + {0xB23D, 0xB257, prLVT}, // Lo [27] HANGUL SYLLABLE NWEG..HANGUL SYLLABLE NWEH + {0xB258, 0xB258, prLV}, // Lo HANGUL SYLLABLE NWI + {0xB259, 0xB273, prLVT}, // Lo [27] HANGUL SYLLABLE NWIG..HANGUL SYLLABLE NWIH + {0xB274, 0xB274, prLV}, // Lo HANGUL SYLLABLE NYU + {0xB275, 0xB28F, prLVT}, // Lo [27] HANGUL SYLLABLE NYUG..HANGUL SYLLABLE NYUH + {0xB290, 0xB290, prLV}, // Lo HANGUL SYLLABLE NEU + {0xB291, 0xB2AB, prLVT}, // Lo [27] HANGUL SYLLABLE NEUG..HANGUL SYLLABLE NEUH + {0xB2AC, 0xB2AC, prLV}, // Lo HANGUL SYLLABLE NYI + {0xB2AD, 0xB2C7, prLVT}, // Lo [27] HANGUL SYLLABLE NYIG..HANGUL SYLLABLE NYIH + {0xB2C8, 0xB2C8, prLV}, // Lo HANGUL SYLLABLE NI + {0xB2C9, 0xB2E3, prLVT}, // Lo [27] HANGUL SYLLABLE NIG..HANGUL SYLLABLE NIH + {0xB2E4, 0xB2E4, prLV}, // Lo HANGUL SYLLABLE DA + {0xB2E5, 0xB2FF, prLVT}, // Lo [27] HANGUL SYLLABLE DAG..HANGUL SYLLABLE DAH + {0xB300, 0xB300, prLV}, // Lo HANGUL SYLLABLE DAE + {0xB301, 0xB31B, prLVT}, // Lo [27] HANGUL SYLLABLE DAEG..HANGUL SYLLABLE DAEH + {0xB31C, 0xB31C, prLV}, // Lo HANGUL SYLLABLE DYA + {0xB31D, 0xB337, prLVT}, // Lo [27] HANGUL SYLLABLE DYAG..HANGUL SYLLABLE DYAH + {0xB338, 0xB338, prLV}, // Lo HANGUL SYLLABLE DYAE + {0xB339, 0xB353, prLVT}, // Lo [27] HANGUL SYLLABLE DYAEG..HANGUL SYLLABLE DYAEH + {0xB354, 0xB354, prLV}, // Lo HANGUL SYLLABLE DEO + {0xB355, 0xB36F, prLVT}, // Lo [27] HANGUL SYLLABLE DEOG..HANGUL SYLLABLE DEOH + {0xB370, 0xB370, prLV}, // Lo HANGUL SYLLABLE DE + {0xB371, 0xB38B, prLVT}, // Lo [27] HANGUL SYLLABLE DEG..HANGUL SYLLABLE DEH + {0xB38C, 0xB38C, prLV}, // Lo HANGUL SYLLABLE DYEO + {0xB38D, 0xB3A7, prLVT}, // Lo [27] HANGUL SYLLABLE DYEOG..HANGUL SYLLABLE DYEOH + {0xB3A8, 0xB3A8, prLV}, // Lo HANGUL SYLLABLE DYE + {0xB3A9, 0xB3C3, prLVT}, // Lo [27] HANGUL SYLLABLE DYEG..HANGUL SYLLABLE DYEH + {0xB3C4, 0xB3C4, prLV}, // Lo HANGUL SYLLABLE DO + {0xB3C5, 0xB3DF, prLVT}, // Lo [27] HANGUL SYLLABLE DOG..HANGUL SYLLABLE DOH + {0xB3E0, 0xB3E0, prLV}, // Lo HANGUL SYLLABLE DWA + {0xB3E1, 0xB3FB, prLVT}, // Lo [27] HANGUL SYLLABLE DWAG..HANGUL SYLLABLE DWAH + {0xB3FC, 0xB3FC, prLV}, // Lo HANGUL SYLLABLE DWAE + {0xB3FD, 0xB417, prLVT}, // Lo [27] HANGUL SYLLABLE DWAEG..HANGUL SYLLABLE DWAEH + {0xB418, 0xB418, prLV}, // Lo HANGUL SYLLABLE DOE + {0xB419, 0xB433, prLVT}, // Lo [27] HANGUL SYLLABLE DOEG..HANGUL SYLLABLE DOEH + {0xB434, 0xB434, prLV}, // Lo HANGUL SYLLABLE DYO + {0xB435, 0xB44F, prLVT}, // Lo [27] HANGUL SYLLABLE DYOG..HANGUL SYLLABLE DYOH + {0xB450, 0xB450, prLV}, // Lo HANGUL SYLLABLE DU + {0xB451, 0xB46B, prLVT}, // Lo [27] HANGUL SYLLABLE DUG..HANGUL SYLLABLE DUH + {0xB46C, 0xB46C, prLV}, // Lo HANGUL SYLLABLE DWEO + {0xB46D, 0xB487, prLVT}, // Lo [27] HANGUL SYLLABLE DWEOG..HANGUL SYLLABLE DWEOH + {0xB488, 0xB488, prLV}, // Lo HANGUL SYLLABLE DWE + {0xB489, 0xB4A3, prLVT}, // Lo [27] HANGUL SYLLABLE DWEG..HANGUL SYLLABLE DWEH + {0xB4A4, 0xB4A4, prLV}, // Lo HANGUL SYLLABLE DWI + {0xB4A5, 0xB4BF, prLVT}, // Lo [27] HANGUL SYLLABLE DWIG..HANGUL SYLLABLE DWIH + {0xB4C0, 0xB4C0, prLV}, // Lo HANGUL SYLLABLE DYU + {0xB4C1, 0xB4DB, prLVT}, // Lo [27] HANGUL SYLLABLE DYUG..HANGUL SYLLABLE DYUH + {0xB4DC, 0xB4DC, prLV}, // Lo HANGUL SYLLABLE DEU + {0xB4DD, 0xB4F7, prLVT}, // Lo [27] HANGUL SYLLABLE DEUG..HANGUL SYLLABLE DEUH + {0xB4F8, 0xB4F8, prLV}, // Lo HANGUL SYLLABLE DYI + {0xB4F9, 0xB513, prLVT}, // Lo [27] HANGUL SYLLABLE DYIG..HANGUL SYLLABLE DYIH + {0xB514, 0xB514, prLV}, // Lo HANGUL SYLLABLE DI + {0xB515, 0xB52F, prLVT}, // Lo [27] HANGUL SYLLABLE DIG..HANGUL SYLLABLE DIH + {0xB530, 0xB530, prLV}, // Lo HANGUL SYLLABLE DDA + {0xB531, 0xB54B, prLVT}, // Lo [27] HANGUL SYLLABLE DDAG..HANGUL SYLLABLE DDAH + {0xB54C, 0xB54C, prLV}, // Lo HANGUL SYLLABLE DDAE + {0xB54D, 0xB567, prLVT}, // Lo [27] HANGUL SYLLABLE DDAEG..HANGUL SYLLABLE DDAEH + {0xB568, 0xB568, prLV}, // Lo HANGUL SYLLABLE DDYA + {0xB569, 0xB583, prLVT}, // Lo [27] HANGUL SYLLABLE DDYAG..HANGUL SYLLABLE DDYAH + {0xB584, 0xB584, prLV}, // Lo HANGUL SYLLABLE DDYAE + {0xB585, 0xB59F, prLVT}, // Lo [27] HANGUL SYLLABLE DDYAEG..HANGUL SYLLABLE DDYAEH + {0xB5A0, 0xB5A0, prLV}, // Lo HANGUL SYLLABLE DDEO + {0xB5A1, 0xB5BB, prLVT}, // Lo [27] HANGUL SYLLABLE DDEOG..HANGUL SYLLABLE DDEOH + {0xB5BC, 0xB5BC, prLV}, // Lo HANGUL SYLLABLE DDE + {0xB5BD, 0xB5D7, prLVT}, // Lo [27] HANGUL SYLLABLE DDEG..HANGUL SYLLABLE DDEH + {0xB5D8, 0xB5D8, prLV}, // Lo HANGUL SYLLABLE DDYEO + {0xB5D9, 0xB5F3, prLVT}, // Lo [27] HANGUL SYLLABLE DDYEOG..HANGUL SYLLABLE DDYEOH + {0xB5F4, 0xB5F4, prLV}, // Lo HANGUL SYLLABLE DDYE + {0xB5F5, 0xB60F, prLVT}, // Lo [27] HANGUL SYLLABLE DDYEG..HANGUL SYLLABLE DDYEH + {0xB610, 0xB610, prLV}, // Lo HANGUL SYLLABLE DDO + {0xB611, 0xB62B, prLVT}, // Lo [27] HANGUL SYLLABLE DDOG..HANGUL SYLLABLE DDOH + {0xB62C, 0xB62C, prLV}, // Lo HANGUL SYLLABLE DDWA + {0xB62D, 0xB647, prLVT}, // Lo [27] HANGUL SYLLABLE DDWAG..HANGUL SYLLABLE DDWAH + {0xB648, 0xB648, prLV}, // Lo HANGUL SYLLABLE DDWAE + {0xB649, 0xB663, prLVT}, // Lo [27] HANGUL SYLLABLE DDWAEG..HANGUL SYLLABLE DDWAEH + {0xB664, 0xB664, prLV}, // Lo HANGUL SYLLABLE DDOE + {0xB665, 0xB67F, prLVT}, // Lo [27] HANGUL SYLLABLE DDOEG..HANGUL SYLLABLE DDOEH + {0xB680, 0xB680, prLV}, // Lo HANGUL SYLLABLE DDYO + {0xB681, 0xB69B, prLVT}, // Lo [27] HANGUL SYLLABLE DDYOG..HANGUL SYLLABLE DDYOH + {0xB69C, 0xB69C, prLV}, // Lo HANGUL SYLLABLE DDU + {0xB69D, 0xB6B7, prLVT}, // Lo [27] HANGUL SYLLABLE DDUG..HANGUL SYLLABLE DDUH + {0xB6B8, 0xB6B8, prLV}, // Lo HANGUL SYLLABLE DDWEO + {0xB6B9, 0xB6D3, prLVT}, // Lo [27] HANGUL SYLLABLE DDWEOG..HANGUL SYLLABLE DDWEOH + {0xB6D4, 0xB6D4, prLV}, // Lo HANGUL SYLLABLE DDWE + {0xB6D5, 0xB6EF, prLVT}, // Lo [27] HANGUL SYLLABLE DDWEG..HANGUL SYLLABLE DDWEH + {0xB6F0, 0xB6F0, prLV}, // Lo HANGUL SYLLABLE DDWI + {0xB6F1, 0xB70B, prLVT}, // Lo [27] HANGUL SYLLABLE DDWIG..HANGUL SYLLABLE DDWIH + {0xB70C, 0xB70C, prLV}, // Lo HANGUL SYLLABLE DDYU + {0xB70D, 0xB727, prLVT}, // Lo [27] HANGUL SYLLABLE DDYUG..HANGUL SYLLABLE DDYUH + {0xB728, 0xB728, prLV}, // Lo HANGUL SYLLABLE DDEU + {0xB729, 0xB743, prLVT}, // Lo [27] HANGUL SYLLABLE DDEUG..HANGUL SYLLABLE DDEUH + {0xB744, 0xB744, prLV}, // Lo HANGUL SYLLABLE DDYI + {0xB745, 0xB75F, prLVT}, // Lo [27] HANGUL SYLLABLE DDYIG..HANGUL SYLLABLE DDYIH + {0xB760, 0xB760, prLV}, // Lo HANGUL SYLLABLE DDI + {0xB761, 0xB77B, prLVT}, // Lo [27] HANGUL SYLLABLE DDIG..HANGUL SYLLABLE DDIH + {0xB77C, 0xB77C, prLV}, // Lo HANGUL SYLLABLE RA + {0xB77D, 0xB797, prLVT}, // Lo [27] HANGUL SYLLABLE RAG..HANGUL SYLLABLE RAH + {0xB798, 0xB798, prLV}, // Lo HANGUL SYLLABLE RAE + {0xB799, 0xB7B3, prLVT}, // Lo [27] HANGUL SYLLABLE RAEG..HANGUL SYLLABLE RAEH + {0xB7B4, 0xB7B4, prLV}, // Lo HANGUL SYLLABLE RYA + {0xB7B5, 0xB7CF, prLVT}, // Lo [27] HANGUL SYLLABLE RYAG..HANGUL SYLLABLE RYAH + {0xB7D0, 0xB7D0, prLV}, // Lo HANGUL SYLLABLE RYAE + {0xB7D1, 0xB7EB, prLVT}, // Lo [27] HANGUL SYLLABLE RYAEG..HANGUL SYLLABLE RYAEH + {0xB7EC, 0xB7EC, prLV}, // Lo HANGUL SYLLABLE REO + {0xB7ED, 0xB807, prLVT}, // Lo [27] HANGUL SYLLABLE REOG..HANGUL SYLLABLE REOH + {0xB808, 0xB808, prLV}, // Lo HANGUL SYLLABLE RE + {0xB809, 0xB823, prLVT}, // Lo [27] HANGUL SYLLABLE REG..HANGUL SYLLABLE REH + {0xB824, 0xB824, prLV}, // Lo HANGUL SYLLABLE RYEO + {0xB825, 0xB83F, prLVT}, // Lo [27] HANGUL SYLLABLE RYEOG..HANGUL SYLLABLE RYEOH + {0xB840, 0xB840, prLV}, // Lo HANGUL SYLLABLE RYE + {0xB841, 0xB85B, prLVT}, // Lo [27] HANGUL SYLLABLE RYEG..HANGUL SYLLABLE RYEH + {0xB85C, 0xB85C, prLV}, // Lo HANGUL SYLLABLE RO + {0xB85D, 0xB877, prLVT}, // Lo [27] HANGUL SYLLABLE ROG..HANGUL SYLLABLE ROH + {0xB878, 0xB878, prLV}, // Lo HANGUL SYLLABLE RWA + {0xB879, 0xB893, prLVT}, // Lo [27] HANGUL SYLLABLE RWAG..HANGUL SYLLABLE RWAH + {0xB894, 0xB894, prLV}, // Lo HANGUL SYLLABLE RWAE + {0xB895, 0xB8AF, prLVT}, // Lo [27] HANGUL SYLLABLE RWAEG..HANGUL SYLLABLE RWAEH + {0xB8B0, 0xB8B0, prLV}, // Lo HANGUL SYLLABLE ROE + {0xB8B1, 0xB8CB, prLVT}, // Lo [27] HANGUL SYLLABLE ROEG..HANGUL SYLLABLE ROEH + {0xB8CC, 0xB8CC, prLV}, // Lo HANGUL SYLLABLE RYO + {0xB8CD, 0xB8E7, prLVT}, // Lo [27] HANGUL SYLLABLE RYOG..HANGUL SYLLABLE RYOH + {0xB8E8, 0xB8E8, prLV}, // Lo HANGUL SYLLABLE RU + {0xB8E9, 0xB903, prLVT}, // Lo [27] HANGUL SYLLABLE RUG..HANGUL SYLLABLE RUH + {0xB904, 0xB904, prLV}, // Lo HANGUL SYLLABLE RWEO + {0xB905, 0xB91F, prLVT}, // Lo [27] HANGUL SYLLABLE RWEOG..HANGUL SYLLABLE RWEOH + {0xB920, 0xB920, prLV}, // Lo HANGUL SYLLABLE RWE + {0xB921, 0xB93B, prLVT}, // Lo [27] HANGUL SYLLABLE RWEG..HANGUL SYLLABLE RWEH + {0xB93C, 0xB93C, prLV}, // Lo HANGUL SYLLABLE RWI + {0xB93D, 0xB957, prLVT}, // Lo [27] HANGUL SYLLABLE RWIG..HANGUL SYLLABLE RWIH + {0xB958, 0xB958, prLV}, // Lo HANGUL SYLLABLE RYU + {0xB959, 0xB973, prLVT}, // Lo [27] HANGUL SYLLABLE RYUG..HANGUL SYLLABLE RYUH + {0xB974, 0xB974, prLV}, // Lo HANGUL SYLLABLE REU + {0xB975, 0xB98F, prLVT}, // Lo [27] HANGUL SYLLABLE REUG..HANGUL SYLLABLE REUH + {0xB990, 0xB990, prLV}, // Lo HANGUL SYLLABLE RYI + {0xB991, 0xB9AB, prLVT}, // Lo [27] HANGUL SYLLABLE RYIG..HANGUL SYLLABLE RYIH + {0xB9AC, 0xB9AC, prLV}, // Lo HANGUL SYLLABLE RI + {0xB9AD, 0xB9C7, prLVT}, // Lo [27] HANGUL SYLLABLE RIG..HANGUL SYLLABLE RIH + {0xB9C8, 0xB9C8, prLV}, // Lo HANGUL SYLLABLE MA + {0xB9C9, 0xB9E3, prLVT}, // Lo [27] HANGUL SYLLABLE MAG..HANGUL SYLLABLE MAH + {0xB9E4, 0xB9E4, prLV}, // Lo HANGUL SYLLABLE MAE + {0xB9E5, 0xB9FF, prLVT}, // Lo [27] HANGUL SYLLABLE MAEG..HANGUL SYLLABLE MAEH + {0xBA00, 0xBA00, prLV}, // Lo HANGUL SYLLABLE MYA + {0xBA01, 0xBA1B, prLVT}, // Lo [27] HANGUL SYLLABLE MYAG..HANGUL SYLLABLE MYAH + {0xBA1C, 0xBA1C, prLV}, // Lo HANGUL SYLLABLE MYAE + {0xBA1D, 0xBA37, prLVT}, // Lo [27] HANGUL SYLLABLE MYAEG..HANGUL SYLLABLE MYAEH + {0xBA38, 0xBA38, prLV}, // Lo HANGUL SYLLABLE MEO + {0xBA39, 0xBA53, prLVT}, // Lo [27] HANGUL SYLLABLE MEOG..HANGUL SYLLABLE MEOH + {0xBA54, 0xBA54, prLV}, // Lo HANGUL SYLLABLE ME + {0xBA55, 0xBA6F, prLVT}, // Lo [27] HANGUL SYLLABLE MEG..HANGUL SYLLABLE MEH + {0xBA70, 0xBA70, prLV}, // Lo HANGUL SYLLABLE MYEO + {0xBA71, 0xBA8B, prLVT}, // Lo [27] HANGUL SYLLABLE MYEOG..HANGUL SYLLABLE MYEOH + {0xBA8C, 0xBA8C, prLV}, // Lo HANGUL SYLLABLE MYE + {0xBA8D, 0xBAA7, prLVT}, // Lo [27] HANGUL SYLLABLE MYEG..HANGUL SYLLABLE MYEH + {0xBAA8, 0xBAA8, prLV}, // Lo HANGUL SYLLABLE MO + {0xBAA9, 0xBAC3, prLVT}, // Lo [27] HANGUL SYLLABLE MOG..HANGUL SYLLABLE MOH + {0xBAC4, 0xBAC4, prLV}, // Lo HANGUL SYLLABLE MWA + {0xBAC5, 0xBADF, prLVT}, // Lo [27] HANGUL SYLLABLE MWAG..HANGUL SYLLABLE MWAH + {0xBAE0, 0xBAE0, prLV}, // Lo HANGUL SYLLABLE MWAE + {0xBAE1, 0xBAFB, prLVT}, // Lo [27] HANGUL SYLLABLE MWAEG..HANGUL SYLLABLE MWAEH + {0xBAFC, 0xBAFC, prLV}, // Lo HANGUL SYLLABLE MOE + {0xBAFD, 0xBB17, prLVT}, // Lo [27] HANGUL SYLLABLE MOEG..HANGUL SYLLABLE MOEH + {0xBB18, 0xBB18, prLV}, // Lo HANGUL SYLLABLE MYO + {0xBB19, 0xBB33, prLVT}, // Lo [27] HANGUL SYLLABLE MYOG..HANGUL SYLLABLE MYOH + {0xBB34, 0xBB34, prLV}, // Lo HANGUL SYLLABLE MU + {0xBB35, 0xBB4F, prLVT}, // Lo [27] HANGUL SYLLABLE MUG..HANGUL SYLLABLE MUH + {0xBB50, 0xBB50, prLV}, // Lo HANGUL SYLLABLE MWEO + {0xBB51, 0xBB6B, prLVT}, // Lo [27] HANGUL SYLLABLE MWEOG..HANGUL SYLLABLE MWEOH + {0xBB6C, 0xBB6C, prLV}, // Lo HANGUL SYLLABLE MWE + {0xBB6D, 0xBB87, prLVT}, // Lo [27] HANGUL SYLLABLE MWEG..HANGUL SYLLABLE MWEH + {0xBB88, 0xBB88, prLV}, // Lo HANGUL SYLLABLE MWI + {0xBB89, 0xBBA3, prLVT}, // Lo [27] HANGUL SYLLABLE MWIG..HANGUL SYLLABLE MWIH + {0xBBA4, 0xBBA4, prLV}, // Lo HANGUL SYLLABLE MYU + {0xBBA5, 0xBBBF, prLVT}, // Lo [27] HANGUL SYLLABLE MYUG..HANGUL SYLLABLE MYUH + {0xBBC0, 0xBBC0, prLV}, // Lo HANGUL SYLLABLE MEU + {0xBBC1, 0xBBDB, prLVT}, // Lo [27] HANGUL SYLLABLE MEUG..HANGUL SYLLABLE MEUH + {0xBBDC, 0xBBDC, prLV}, // Lo HANGUL SYLLABLE MYI + {0xBBDD, 0xBBF7, prLVT}, // Lo [27] HANGUL SYLLABLE MYIG..HANGUL SYLLABLE MYIH + {0xBBF8, 0xBBF8, prLV}, // Lo HANGUL SYLLABLE MI + {0xBBF9, 0xBC13, prLVT}, // Lo [27] HANGUL SYLLABLE MIG..HANGUL SYLLABLE MIH + {0xBC14, 0xBC14, prLV}, // Lo HANGUL SYLLABLE BA + {0xBC15, 0xBC2F, prLVT}, // Lo [27] HANGUL SYLLABLE BAG..HANGUL SYLLABLE BAH + {0xBC30, 0xBC30, prLV}, // Lo HANGUL SYLLABLE BAE + {0xBC31, 0xBC4B, prLVT}, // Lo [27] HANGUL SYLLABLE BAEG..HANGUL SYLLABLE BAEH + {0xBC4C, 0xBC4C, prLV}, // Lo HANGUL SYLLABLE BYA + {0xBC4D, 0xBC67, prLVT}, // Lo [27] HANGUL SYLLABLE BYAG..HANGUL SYLLABLE BYAH + {0xBC68, 0xBC68, prLV}, // Lo HANGUL SYLLABLE BYAE + {0xBC69, 0xBC83, prLVT}, // Lo [27] HANGUL SYLLABLE BYAEG..HANGUL SYLLABLE BYAEH + {0xBC84, 0xBC84, prLV}, // Lo HANGUL SYLLABLE BEO + {0xBC85, 0xBC9F, prLVT}, // Lo [27] HANGUL SYLLABLE BEOG..HANGUL SYLLABLE BEOH + {0xBCA0, 0xBCA0, prLV}, // Lo HANGUL SYLLABLE BE + {0xBCA1, 0xBCBB, prLVT}, // Lo [27] HANGUL SYLLABLE BEG..HANGUL SYLLABLE BEH + {0xBCBC, 0xBCBC, prLV}, // Lo HANGUL SYLLABLE BYEO + {0xBCBD, 0xBCD7, prLVT}, // Lo [27] HANGUL SYLLABLE BYEOG..HANGUL SYLLABLE BYEOH + {0xBCD8, 0xBCD8, prLV}, // Lo HANGUL SYLLABLE BYE + {0xBCD9, 0xBCF3, prLVT}, // Lo [27] HANGUL SYLLABLE BYEG..HANGUL SYLLABLE BYEH + {0xBCF4, 0xBCF4, prLV}, // Lo HANGUL SYLLABLE BO + {0xBCF5, 0xBD0F, prLVT}, // Lo [27] HANGUL SYLLABLE BOG..HANGUL SYLLABLE BOH + {0xBD10, 0xBD10, prLV}, // Lo HANGUL SYLLABLE BWA + {0xBD11, 0xBD2B, prLVT}, // Lo [27] HANGUL SYLLABLE BWAG..HANGUL SYLLABLE BWAH + {0xBD2C, 0xBD2C, prLV}, // Lo HANGUL SYLLABLE BWAE + {0xBD2D, 0xBD47, prLVT}, // Lo [27] HANGUL SYLLABLE BWAEG..HANGUL SYLLABLE BWAEH + {0xBD48, 0xBD48, prLV}, // Lo HANGUL SYLLABLE BOE + {0xBD49, 0xBD63, prLVT}, // Lo [27] HANGUL SYLLABLE BOEG..HANGUL SYLLABLE BOEH + {0xBD64, 0xBD64, prLV}, // Lo HANGUL SYLLABLE BYO + {0xBD65, 0xBD7F, prLVT}, // Lo [27] HANGUL SYLLABLE BYOG..HANGUL SYLLABLE BYOH + {0xBD80, 0xBD80, prLV}, // Lo HANGUL SYLLABLE BU + {0xBD81, 0xBD9B, prLVT}, // Lo [27] HANGUL SYLLABLE BUG..HANGUL SYLLABLE BUH + {0xBD9C, 0xBD9C, prLV}, // Lo HANGUL SYLLABLE BWEO + {0xBD9D, 0xBDB7, prLVT}, // Lo [27] HANGUL SYLLABLE BWEOG..HANGUL SYLLABLE BWEOH + {0xBDB8, 0xBDB8, prLV}, // Lo HANGUL SYLLABLE BWE + {0xBDB9, 0xBDD3, prLVT}, // Lo [27] HANGUL SYLLABLE BWEG..HANGUL SYLLABLE BWEH + {0xBDD4, 0xBDD4, prLV}, // Lo HANGUL SYLLABLE BWI + {0xBDD5, 0xBDEF, prLVT}, // Lo [27] HANGUL SYLLABLE BWIG..HANGUL SYLLABLE BWIH + {0xBDF0, 0xBDF0, prLV}, // Lo HANGUL SYLLABLE BYU + {0xBDF1, 0xBE0B, prLVT}, // Lo [27] HANGUL SYLLABLE BYUG..HANGUL SYLLABLE BYUH + {0xBE0C, 0xBE0C, prLV}, // Lo HANGUL SYLLABLE BEU + {0xBE0D, 0xBE27, prLVT}, // Lo [27] HANGUL SYLLABLE BEUG..HANGUL SYLLABLE BEUH + {0xBE28, 0xBE28, prLV}, // Lo HANGUL SYLLABLE BYI + {0xBE29, 0xBE43, prLVT}, // Lo [27] HANGUL SYLLABLE BYIG..HANGUL SYLLABLE BYIH + {0xBE44, 0xBE44, prLV}, // Lo HANGUL SYLLABLE BI + {0xBE45, 0xBE5F, prLVT}, // Lo [27] HANGUL SYLLABLE BIG..HANGUL SYLLABLE BIH + {0xBE60, 0xBE60, prLV}, // Lo HANGUL SYLLABLE BBA + {0xBE61, 0xBE7B, prLVT}, // Lo [27] HANGUL SYLLABLE BBAG..HANGUL SYLLABLE BBAH + {0xBE7C, 0xBE7C, prLV}, // Lo HANGUL SYLLABLE BBAE + {0xBE7D, 0xBE97, prLVT}, // Lo [27] HANGUL SYLLABLE BBAEG..HANGUL SYLLABLE BBAEH + {0xBE98, 0xBE98, prLV}, // Lo HANGUL SYLLABLE BBYA + {0xBE99, 0xBEB3, prLVT}, // Lo [27] HANGUL SYLLABLE BBYAG..HANGUL SYLLABLE BBYAH + {0xBEB4, 0xBEB4, prLV}, // Lo HANGUL SYLLABLE BBYAE + {0xBEB5, 0xBECF, prLVT}, // Lo [27] HANGUL SYLLABLE BBYAEG..HANGUL SYLLABLE BBYAEH + {0xBED0, 0xBED0, prLV}, // Lo HANGUL SYLLABLE BBEO + {0xBED1, 0xBEEB, prLVT}, // Lo [27] HANGUL SYLLABLE BBEOG..HANGUL SYLLABLE BBEOH + {0xBEEC, 0xBEEC, prLV}, // Lo HANGUL SYLLABLE BBE + {0xBEED, 0xBF07, prLVT}, // Lo [27] HANGUL SYLLABLE BBEG..HANGUL SYLLABLE BBEH + {0xBF08, 0xBF08, prLV}, // Lo HANGUL SYLLABLE BBYEO + {0xBF09, 0xBF23, prLVT}, // Lo [27] HANGUL SYLLABLE BBYEOG..HANGUL SYLLABLE BBYEOH + {0xBF24, 0xBF24, prLV}, // Lo HANGUL SYLLABLE BBYE + {0xBF25, 0xBF3F, prLVT}, // Lo [27] HANGUL SYLLABLE BBYEG..HANGUL SYLLABLE BBYEH + {0xBF40, 0xBF40, prLV}, // Lo HANGUL SYLLABLE BBO + {0xBF41, 0xBF5B, prLVT}, // Lo [27] HANGUL SYLLABLE BBOG..HANGUL SYLLABLE BBOH + {0xBF5C, 0xBF5C, prLV}, // Lo HANGUL SYLLABLE BBWA + {0xBF5D, 0xBF77, prLVT}, // Lo [27] HANGUL SYLLABLE BBWAG..HANGUL SYLLABLE BBWAH + {0xBF78, 0xBF78, prLV}, // Lo HANGUL SYLLABLE BBWAE + {0xBF79, 0xBF93, prLVT}, // Lo [27] HANGUL SYLLABLE BBWAEG..HANGUL SYLLABLE BBWAEH + {0xBF94, 0xBF94, prLV}, // Lo HANGUL SYLLABLE BBOE + {0xBF95, 0xBFAF, prLVT}, // Lo [27] HANGUL SYLLABLE BBOEG..HANGUL SYLLABLE BBOEH + {0xBFB0, 0xBFB0, prLV}, // Lo HANGUL SYLLABLE BBYO + {0xBFB1, 0xBFCB, prLVT}, // Lo [27] HANGUL SYLLABLE BBYOG..HANGUL SYLLABLE BBYOH + {0xBFCC, 0xBFCC, prLV}, // Lo HANGUL SYLLABLE BBU + {0xBFCD, 0xBFE7, prLVT}, // Lo [27] HANGUL SYLLABLE BBUG..HANGUL SYLLABLE BBUH + {0xBFE8, 0xBFE8, prLV}, // Lo HANGUL SYLLABLE BBWEO + {0xBFE9, 0xC003, prLVT}, // Lo [27] HANGUL SYLLABLE BBWEOG..HANGUL SYLLABLE BBWEOH + {0xC004, 0xC004, prLV}, // Lo HANGUL SYLLABLE BBWE + {0xC005, 0xC01F, prLVT}, // Lo [27] HANGUL SYLLABLE BBWEG..HANGUL SYLLABLE BBWEH + {0xC020, 0xC020, prLV}, // Lo HANGUL SYLLABLE BBWI + {0xC021, 0xC03B, prLVT}, // Lo [27] HANGUL SYLLABLE BBWIG..HANGUL SYLLABLE BBWIH + {0xC03C, 0xC03C, prLV}, // Lo HANGUL SYLLABLE BBYU + {0xC03D, 0xC057, prLVT}, // Lo [27] HANGUL SYLLABLE BBYUG..HANGUL SYLLABLE BBYUH + {0xC058, 0xC058, prLV}, // Lo HANGUL SYLLABLE BBEU + {0xC059, 0xC073, prLVT}, // Lo [27] HANGUL SYLLABLE BBEUG..HANGUL SYLLABLE BBEUH + {0xC074, 0xC074, prLV}, // Lo HANGUL SYLLABLE BBYI + {0xC075, 0xC08F, prLVT}, // Lo [27] HANGUL SYLLABLE BBYIG..HANGUL SYLLABLE BBYIH + {0xC090, 0xC090, prLV}, // Lo HANGUL SYLLABLE BBI + {0xC091, 0xC0AB, prLVT}, // Lo [27] HANGUL SYLLABLE BBIG..HANGUL SYLLABLE BBIH + {0xC0AC, 0xC0AC, prLV}, // Lo HANGUL SYLLABLE SA + {0xC0AD, 0xC0C7, prLVT}, // Lo [27] HANGUL SYLLABLE SAG..HANGUL SYLLABLE SAH + {0xC0C8, 0xC0C8, prLV}, // Lo HANGUL SYLLABLE SAE + {0xC0C9, 0xC0E3, prLVT}, // Lo [27] HANGUL SYLLABLE SAEG..HANGUL SYLLABLE SAEH + {0xC0E4, 0xC0E4, prLV}, // Lo HANGUL SYLLABLE SYA + {0xC0E5, 0xC0FF, prLVT}, // Lo [27] HANGUL SYLLABLE SYAG..HANGUL SYLLABLE SYAH + {0xC100, 0xC100, prLV}, // Lo HANGUL SYLLABLE SYAE + {0xC101, 0xC11B, prLVT}, // Lo [27] HANGUL SYLLABLE SYAEG..HANGUL SYLLABLE SYAEH + {0xC11C, 0xC11C, prLV}, // Lo HANGUL SYLLABLE SEO + {0xC11D, 0xC137, prLVT}, // Lo [27] HANGUL SYLLABLE SEOG..HANGUL SYLLABLE SEOH + {0xC138, 0xC138, prLV}, // Lo HANGUL SYLLABLE SE + {0xC139, 0xC153, prLVT}, // Lo [27] HANGUL SYLLABLE SEG..HANGUL SYLLABLE SEH + {0xC154, 0xC154, prLV}, // Lo HANGUL SYLLABLE SYEO + {0xC155, 0xC16F, prLVT}, // Lo [27] HANGUL SYLLABLE SYEOG..HANGUL SYLLABLE SYEOH + {0xC170, 0xC170, prLV}, // Lo HANGUL SYLLABLE SYE + {0xC171, 0xC18B, prLVT}, // Lo [27] HANGUL SYLLABLE SYEG..HANGUL SYLLABLE SYEH + {0xC18C, 0xC18C, prLV}, // Lo HANGUL SYLLABLE SO + {0xC18D, 0xC1A7, prLVT}, // Lo [27] HANGUL SYLLABLE SOG..HANGUL SYLLABLE SOH + {0xC1A8, 0xC1A8, prLV}, // Lo HANGUL SYLLABLE SWA + {0xC1A9, 0xC1C3, prLVT}, // Lo [27] HANGUL SYLLABLE SWAG..HANGUL SYLLABLE SWAH + {0xC1C4, 0xC1C4, prLV}, // Lo HANGUL SYLLABLE SWAE + {0xC1C5, 0xC1DF, prLVT}, // Lo [27] HANGUL SYLLABLE SWAEG..HANGUL SYLLABLE SWAEH + {0xC1E0, 0xC1E0, prLV}, // Lo HANGUL SYLLABLE SOE + {0xC1E1, 0xC1FB, prLVT}, // Lo [27] HANGUL SYLLABLE SOEG..HANGUL SYLLABLE SOEH + {0xC1FC, 0xC1FC, prLV}, // Lo HANGUL SYLLABLE SYO + {0xC1FD, 0xC217, prLVT}, // Lo [27] HANGUL SYLLABLE SYOG..HANGUL SYLLABLE SYOH + {0xC218, 0xC218, prLV}, // Lo HANGUL SYLLABLE SU + {0xC219, 0xC233, prLVT}, // Lo [27] HANGUL SYLLABLE SUG..HANGUL SYLLABLE SUH + {0xC234, 0xC234, prLV}, // Lo HANGUL SYLLABLE SWEO + {0xC235, 0xC24F, prLVT}, // Lo [27] HANGUL SYLLABLE SWEOG..HANGUL SYLLABLE SWEOH + {0xC250, 0xC250, prLV}, // Lo HANGUL SYLLABLE SWE + {0xC251, 0xC26B, prLVT}, // Lo [27] HANGUL SYLLABLE SWEG..HANGUL SYLLABLE SWEH + {0xC26C, 0xC26C, prLV}, // Lo HANGUL SYLLABLE SWI + {0xC26D, 0xC287, prLVT}, // Lo [27] HANGUL SYLLABLE SWIG..HANGUL SYLLABLE SWIH + {0xC288, 0xC288, prLV}, // Lo HANGUL SYLLABLE SYU + {0xC289, 0xC2A3, prLVT}, // Lo [27] HANGUL SYLLABLE SYUG..HANGUL SYLLABLE SYUH + {0xC2A4, 0xC2A4, prLV}, // Lo HANGUL SYLLABLE SEU + {0xC2A5, 0xC2BF, prLVT}, // Lo [27] HANGUL SYLLABLE SEUG..HANGUL SYLLABLE SEUH + {0xC2C0, 0xC2C0, prLV}, // Lo HANGUL SYLLABLE SYI + {0xC2C1, 0xC2DB, prLVT}, // Lo [27] HANGUL SYLLABLE SYIG..HANGUL SYLLABLE SYIH + {0xC2DC, 0xC2DC, prLV}, // Lo HANGUL SYLLABLE SI + {0xC2DD, 0xC2F7, prLVT}, // Lo [27] HANGUL SYLLABLE SIG..HANGUL SYLLABLE SIH + {0xC2F8, 0xC2F8, prLV}, // Lo HANGUL SYLLABLE SSA + {0xC2F9, 0xC313, prLVT}, // Lo [27] HANGUL SYLLABLE SSAG..HANGUL SYLLABLE SSAH + {0xC314, 0xC314, prLV}, // Lo HANGUL SYLLABLE SSAE + {0xC315, 0xC32F, prLVT}, // Lo [27] HANGUL SYLLABLE SSAEG..HANGUL SYLLABLE SSAEH + {0xC330, 0xC330, prLV}, // Lo HANGUL SYLLABLE SSYA + {0xC331, 0xC34B, prLVT}, // Lo [27] HANGUL SYLLABLE SSYAG..HANGUL SYLLABLE SSYAH + {0xC34C, 0xC34C, prLV}, // Lo HANGUL SYLLABLE SSYAE + {0xC34D, 0xC367, prLVT}, // Lo [27] HANGUL SYLLABLE SSYAEG..HANGUL SYLLABLE SSYAEH + {0xC368, 0xC368, prLV}, // Lo HANGUL SYLLABLE SSEO + {0xC369, 0xC383, prLVT}, // Lo [27] HANGUL SYLLABLE SSEOG..HANGUL SYLLABLE SSEOH + {0xC384, 0xC384, prLV}, // Lo HANGUL SYLLABLE SSE + {0xC385, 0xC39F, prLVT}, // Lo [27] HANGUL SYLLABLE SSEG..HANGUL SYLLABLE SSEH + {0xC3A0, 0xC3A0, prLV}, // Lo HANGUL SYLLABLE SSYEO + {0xC3A1, 0xC3BB, prLVT}, // Lo [27] HANGUL SYLLABLE SSYEOG..HANGUL SYLLABLE SSYEOH + {0xC3BC, 0xC3BC, prLV}, // Lo HANGUL SYLLABLE SSYE + {0xC3BD, 0xC3D7, prLVT}, // Lo [27] HANGUL SYLLABLE SSYEG..HANGUL SYLLABLE SSYEH + {0xC3D8, 0xC3D8, prLV}, // Lo HANGUL SYLLABLE SSO + {0xC3D9, 0xC3F3, prLVT}, // Lo [27] HANGUL SYLLABLE SSOG..HANGUL SYLLABLE SSOH + {0xC3F4, 0xC3F4, prLV}, // Lo HANGUL SYLLABLE SSWA + {0xC3F5, 0xC40F, prLVT}, // Lo [27] HANGUL SYLLABLE SSWAG..HANGUL SYLLABLE SSWAH + {0xC410, 0xC410, prLV}, // Lo HANGUL SYLLABLE SSWAE + {0xC411, 0xC42B, prLVT}, // Lo [27] HANGUL SYLLABLE SSWAEG..HANGUL SYLLABLE SSWAEH + {0xC42C, 0xC42C, prLV}, // Lo HANGUL SYLLABLE SSOE + {0xC42D, 0xC447, prLVT}, // Lo [27] HANGUL SYLLABLE SSOEG..HANGUL SYLLABLE SSOEH + {0xC448, 0xC448, prLV}, // Lo HANGUL SYLLABLE SSYO + {0xC449, 0xC463, prLVT}, // Lo [27] HANGUL SYLLABLE SSYOG..HANGUL SYLLABLE SSYOH + {0xC464, 0xC464, prLV}, // Lo HANGUL SYLLABLE SSU + {0xC465, 0xC47F, prLVT}, // Lo [27] HANGUL SYLLABLE SSUG..HANGUL SYLLABLE SSUH + {0xC480, 0xC480, prLV}, // Lo HANGUL SYLLABLE SSWEO + {0xC481, 0xC49B, prLVT}, // Lo [27] HANGUL SYLLABLE SSWEOG..HANGUL SYLLABLE SSWEOH + {0xC49C, 0xC49C, prLV}, // Lo HANGUL SYLLABLE SSWE + {0xC49D, 0xC4B7, prLVT}, // Lo [27] HANGUL SYLLABLE SSWEG..HANGUL SYLLABLE SSWEH + {0xC4B8, 0xC4B8, prLV}, // Lo HANGUL SYLLABLE SSWI + {0xC4B9, 0xC4D3, prLVT}, // Lo [27] HANGUL SYLLABLE SSWIG..HANGUL SYLLABLE SSWIH + {0xC4D4, 0xC4D4, prLV}, // Lo HANGUL SYLLABLE SSYU + {0xC4D5, 0xC4EF, prLVT}, // Lo [27] HANGUL SYLLABLE SSYUG..HANGUL SYLLABLE SSYUH + {0xC4F0, 0xC4F0, prLV}, // Lo HANGUL SYLLABLE SSEU + {0xC4F1, 0xC50B, prLVT}, // Lo [27] HANGUL SYLLABLE SSEUG..HANGUL SYLLABLE SSEUH + {0xC50C, 0xC50C, prLV}, // Lo HANGUL SYLLABLE SSYI + {0xC50D, 0xC527, prLVT}, // Lo [27] HANGUL SYLLABLE SSYIG..HANGUL SYLLABLE SSYIH + {0xC528, 0xC528, prLV}, // Lo HANGUL SYLLABLE SSI + {0xC529, 0xC543, prLVT}, // Lo [27] HANGUL SYLLABLE SSIG..HANGUL SYLLABLE SSIH + {0xC544, 0xC544, prLV}, // Lo HANGUL SYLLABLE A + {0xC545, 0xC55F, prLVT}, // Lo [27] HANGUL SYLLABLE AG..HANGUL SYLLABLE AH + {0xC560, 0xC560, prLV}, // Lo HANGUL SYLLABLE AE + {0xC561, 0xC57B, prLVT}, // Lo [27] HANGUL SYLLABLE AEG..HANGUL SYLLABLE AEH + {0xC57C, 0xC57C, prLV}, // Lo HANGUL SYLLABLE YA + {0xC57D, 0xC597, prLVT}, // Lo [27] HANGUL SYLLABLE YAG..HANGUL SYLLABLE YAH + {0xC598, 0xC598, prLV}, // Lo HANGUL SYLLABLE YAE + {0xC599, 0xC5B3, prLVT}, // Lo [27] HANGUL SYLLABLE YAEG..HANGUL SYLLABLE YAEH + {0xC5B4, 0xC5B4, prLV}, // Lo HANGUL SYLLABLE EO + {0xC5B5, 0xC5CF, prLVT}, // Lo [27] HANGUL SYLLABLE EOG..HANGUL SYLLABLE EOH + {0xC5D0, 0xC5D0, prLV}, // Lo HANGUL SYLLABLE E + {0xC5D1, 0xC5EB, prLVT}, // Lo [27] HANGUL SYLLABLE EG..HANGUL SYLLABLE EH + {0xC5EC, 0xC5EC, prLV}, // Lo HANGUL SYLLABLE YEO + {0xC5ED, 0xC607, prLVT}, // Lo [27] HANGUL SYLLABLE YEOG..HANGUL SYLLABLE YEOH + {0xC608, 0xC608, prLV}, // Lo HANGUL SYLLABLE YE + {0xC609, 0xC623, prLVT}, // Lo [27] HANGUL SYLLABLE YEG..HANGUL SYLLABLE YEH + {0xC624, 0xC624, prLV}, // Lo HANGUL SYLLABLE O + {0xC625, 0xC63F, prLVT}, // Lo [27] HANGUL SYLLABLE OG..HANGUL SYLLABLE OH + {0xC640, 0xC640, prLV}, // Lo HANGUL SYLLABLE WA + {0xC641, 0xC65B, prLVT}, // Lo [27] HANGUL SYLLABLE WAG..HANGUL SYLLABLE WAH + {0xC65C, 0xC65C, prLV}, // Lo HANGUL SYLLABLE WAE + {0xC65D, 0xC677, prLVT}, // Lo [27] HANGUL SYLLABLE WAEG..HANGUL SYLLABLE WAEH + {0xC678, 0xC678, prLV}, // Lo HANGUL SYLLABLE OE + {0xC679, 0xC693, prLVT}, // Lo [27] HANGUL SYLLABLE OEG..HANGUL SYLLABLE OEH + {0xC694, 0xC694, prLV}, // Lo HANGUL SYLLABLE YO + {0xC695, 0xC6AF, prLVT}, // Lo [27] HANGUL SYLLABLE YOG..HANGUL SYLLABLE YOH + {0xC6B0, 0xC6B0, prLV}, // Lo HANGUL SYLLABLE U + {0xC6B1, 0xC6CB, prLVT}, // Lo [27] HANGUL SYLLABLE UG..HANGUL SYLLABLE UH + {0xC6CC, 0xC6CC, prLV}, // Lo HANGUL SYLLABLE WEO + {0xC6CD, 0xC6E7, prLVT}, // Lo [27] HANGUL SYLLABLE WEOG..HANGUL SYLLABLE WEOH + {0xC6E8, 0xC6E8, prLV}, // Lo HANGUL SYLLABLE WE + {0xC6E9, 0xC703, prLVT}, // Lo [27] HANGUL SYLLABLE WEG..HANGUL SYLLABLE WEH + {0xC704, 0xC704, prLV}, // Lo HANGUL SYLLABLE WI + {0xC705, 0xC71F, prLVT}, // Lo [27] HANGUL SYLLABLE WIG..HANGUL SYLLABLE WIH + {0xC720, 0xC720, prLV}, // Lo HANGUL SYLLABLE YU + {0xC721, 0xC73B, prLVT}, // Lo [27] HANGUL SYLLABLE YUG..HANGUL SYLLABLE YUH + {0xC73C, 0xC73C, prLV}, // Lo HANGUL SYLLABLE EU + {0xC73D, 0xC757, prLVT}, // Lo [27] HANGUL SYLLABLE EUG..HANGUL SYLLABLE EUH + {0xC758, 0xC758, prLV}, // Lo HANGUL SYLLABLE YI + {0xC759, 0xC773, prLVT}, // Lo [27] HANGUL SYLLABLE YIG..HANGUL SYLLABLE YIH + {0xC774, 0xC774, prLV}, // Lo HANGUL SYLLABLE I + {0xC775, 0xC78F, prLVT}, // Lo [27] HANGUL SYLLABLE IG..HANGUL SYLLABLE IH + {0xC790, 0xC790, prLV}, // Lo HANGUL SYLLABLE JA + {0xC791, 0xC7AB, prLVT}, // Lo [27] HANGUL SYLLABLE JAG..HANGUL SYLLABLE JAH + {0xC7AC, 0xC7AC, prLV}, // Lo HANGUL SYLLABLE JAE + {0xC7AD, 0xC7C7, prLVT}, // Lo [27] HANGUL SYLLABLE JAEG..HANGUL SYLLABLE JAEH + {0xC7C8, 0xC7C8, prLV}, // Lo HANGUL SYLLABLE JYA + {0xC7C9, 0xC7E3, prLVT}, // Lo [27] HANGUL SYLLABLE JYAG..HANGUL SYLLABLE JYAH + {0xC7E4, 0xC7E4, prLV}, // Lo HANGUL SYLLABLE JYAE + {0xC7E5, 0xC7FF, prLVT}, // Lo [27] HANGUL SYLLABLE JYAEG..HANGUL SYLLABLE JYAEH + {0xC800, 0xC800, prLV}, // Lo HANGUL SYLLABLE JEO + {0xC801, 0xC81B, prLVT}, // Lo [27] HANGUL SYLLABLE JEOG..HANGUL SYLLABLE JEOH + {0xC81C, 0xC81C, prLV}, // Lo HANGUL SYLLABLE JE + {0xC81D, 0xC837, prLVT}, // Lo [27] HANGUL SYLLABLE JEG..HANGUL SYLLABLE JEH + {0xC838, 0xC838, prLV}, // Lo HANGUL SYLLABLE JYEO + {0xC839, 0xC853, prLVT}, // Lo [27] HANGUL SYLLABLE JYEOG..HANGUL SYLLABLE JYEOH + {0xC854, 0xC854, prLV}, // Lo HANGUL SYLLABLE JYE + {0xC855, 0xC86F, prLVT}, // Lo [27] HANGUL SYLLABLE JYEG..HANGUL SYLLABLE JYEH + {0xC870, 0xC870, prLV}, // Lo HANGUL SYLLABLE JO + {0xC871, 0xC88B, prLVT}, // Lo [27] HANGUL SYLLABLE JOG..HANGUL SYLLABLE JOH + {0xC88C, 0xC88C, prLV}, // Lo HANGUL SYLLABLE JWA + {0xC88D, 0xC8A7, prLVT}, // Lo [27] HANGUL SYLLABLE JWAG..HANGUL SYLLABLE JWAH + {0xC8A8, 0xC8A8, prLV}, // Lo HANGUL SYLLABLE JWAE + {0xC8A9, 0xC8C3, prLVT}, // Lo [27] HANGUL SYLLABLE JWAEG..HANGUL SYLLABLE JWAEH + {0xC8C4, 0xC8C4, prLV}, // Lo HANGUL SYLLABLE JOE + {0xC8C5, 0xC8DF, prLVT}, // Lo [27] HANGUL SYLLABLE JOEG..HANGUL SYLLABLE JOEH + {0xC8E0, 0xC8E0, prLV}, // Lo HANGUL SYLLABLE JYO + {0xC8E1, 0xC8FB, prLVT}, // Lo [27] HANGUL SYLLABLE JYOG..HANGUL SYLLABLE JYOH + {0xC8FC, 0xC8FC, prLV}, // Lo HANGUL SYLLABLE JU + {0xC8FD, 0xC917, prLVT}, // Lo [27] HANGUL SYLLABLE JUG..HANGUL SYLLABLE JUH + {0xC918, 0xC918, prLV}, // Lo HANGUL SYLLABLE JWEO + {0xC919, 0xC933, prLVT}, // Lo [27] HANGUL SYLLABLE JWEOG..HANGUL SYLLABLE JWEOH + {0xC934, 0xC934, prLV}, // Lo HANGUL SYLLABLE JWE + {0xC935, 0xC94F, prLVT}, // Lo [27] HANGUL SYLLABLE JWEG..HANGUL SYLLABLE JWEH + {0xC950, 0xC950, prLV}, // Lo HANGUL SYLLABLE JWI + {0xC951, 0xC96B, prLVT}, // Lo [27] HANGUL SYLLABLE JWIG..HANGUL SYLLABLE JWIH + {0xC96C, 0xC96C, prLV}, // Lo HANGUL SYLLABLE JYU + {0xC96D, 0xC987, prLVT}, // Lo [27] HANGUL SYLLABLE JYUG..HANGUL SYLLABLE JYUH + {0xC988, 0xC988, prLV}, // Lo HANGUL SYLLABLE JEU + {0xC989, 0xC9A3, prLVT}, // Lo [27] HANGUL SYLLABLE JEUG..HANGUL SYLLABLE JEUH + {0xC9A4, 0xC9A4, prLV}, // Lo HANGUL SYLLABLE JYI + {0xC9A5, 0xC9BF, prLVT}, // Lo [27] HANGUL SYLLABLE JYIG..HANGUL SYLLABLE JYIH + {0xC9C0, 0xC9C0, prLV}, // Lo HANGUL SYLLABLE JI + {0xC9C1, 0xC9DB, prLVT}, // Lo [27] HANGUL SYLLABLE JIG..HANGUL SYLLABLE JIH + {0xC9DC, 0xC9DC, prLV}, // Lo HANGUL SYLLABLE JJA + {0xC9DD, 0xC9F7, prLVT}, // Lo [27] HANGUL SYLLABLE JJAG..HANGUL SYLLABLE JJAH + {0xC9F8, 0xC9F8, prLV}, // Lo HANGUL SYLLABLE JJAE + {0xC9F9, 0xCA13, prLVT}, // Lo [27] HANGUL SYLLABLE JJAEG..HANGUL SYLLABLE JJAEH + {0xCA14, 0xCA14, prLV}, // Lo HANGUL SYLLABLE JJYA + {0xCA15, 0xCA2F, prLVT}, // Lo [27] HANGUL SYLLABLE JJYAG..HANGUL SYLLABLE JJYAH + {0xCA30, 0xCA30, prLV}, // Lo HANGUL SYLLABLE JJYAE + {0xCA31, 0xCA4B, prLVT}, // Lo [27] HANGUL SYLLABLE JJYAEG..HANGUL SYLLABLE JJYAEH + {0xCA4C, 0xCA4C, prLV}, // Lo HANGUL SYLLABLE JJEO + {0xCA4D, 0xCA67, prLVT}, // Lo [27] HANGUL SYLLABLE JJEOG..HANGUL SYLLABLE JJEOH + {0xCA68, 0xCA68, prLV}, // Lo HANGUL SYLLABLE JJE + {0xCA69, 0xCA83, prLVT}, // Lo [27] HANGUL SYLLABLE JJEG..HANGUL SYLLABLE JJEH + {0xCA84, 0xCA84, prLV}, // Lo HANGUL SYLLABLE JJYEO + {0xCA85, 0xCA9F, prLVT}, // Lo [27] HANGUL SYLLABLE JJYEOG..HANGUL SYLLABLE JJYEOH + {0xCAA0, 0xCAA0, prLV}, // Lo HANGUL SYLLABLE JJYE + {0xCAA1, 0xCABB, prLVT}, // Lo [27] HANGUL SYLLABLE JJYEG..HANGUL SYLLABLE JJYEH + {0xCABC, 0xCABC, prLV}, // Lo HANGUL SYLLABLE JJO + {0xCABD, 0xCAD7, prLVT}, // Lo [27] HANGUL SYLLABLE JJOG..HANGUL SYLLABLE JJOH + {0xCAD8, 0xCAD8, prLV}, // Lo HANGUL SYLLABLE JJWA + {0xCAD9, 0xCAF3, prLVT}, // Lo [27] HANGUL SYLLABLE JJWAG..HANGUL SYLLABLE JJWAH + {0xCAF4, 0xCAF4, prLV}, // Lo HANGUL SYLLABLE JJWAE + {0xCAF5, 0xCB0F, prLVT}, // Lo [27] HANGUL SYLLABLE JJWAEG..HANGUL SYLLABLE JJWAEH + {0xCB10, 0xCB10, prLV}, // Lo HANGUL SYLLABLE JJOE + {0xCB11, 0xCB2B, prLVT}, // Lo [27] HANGUL SYLLABLE JJOEG..HANGUL SYLLABLE JJOEH + {0xCB2C, 0xCB2C, prLV}, // Lo HANGUL SYLLABLE JJYO + {0xCB2D, 0xCB47, prLVT}, // Lo [27] HANGUL SYLLABLE JJYOG..HANGUL SYLLABLE JJYOH + {0xCB48, 0xCB48, prLV}, // Lo HANGUL SYLLABLE JJU + {0xCB49, 0xCB63, prLVT}, // Lo [27] HANGUL SYLLABLE JJUG..HANGUL SYLLABLE JJUH + {0xCB64, 0xCB64, prLV}, // Lo HANGUL SYLLABLE JJWEO + {0xCB65, 0xCB7F, prLVT}, // Lo [27] HANGUL SYLLABLE JJWEOG..HANGUL SYLLABLE JJWEOH + {0xCB80, 0xCB80, prLV}, // Lo HANGUL SYLLABLE JJWE + {0xCB81, 0xCB9B, prLVT}, // Lo [27] HANGUL SYLLABLE JJWEG..HANGUL SYLLABLE JJWEH + {0xCB9C, 0xCB9C, prLV}, // Lo HANGUL SYLLABLE JJWI + {0xCB9D, 0xCBB7, prLVT}, // Lo [27] HANGUL SYLLABLE JJWIG..HANGUL SYLLABLE JJWIH + {0xCBB8, 0xCBB8, prLV}, // Lo HANGUL SYLLABLE JJYU + {0xCBB9, 0xCBD3, prLVT}, // Lo [27] HANGUL SYLLABLE JJYUG..HANGUL SYLLABLE JJYUH + {0xCBD4, 0xCBD4, prLV}, // Lo HANGUL SYLLABLE JJEU + {0xCBD5, 0xCBEF, prLVT}, // Lo [27] HANGUL SYLLABLE JJEUG..HANGUL SYLLABLE JJEUH + {0xCBF0, 0xCBF0, prLV}, // Lo HANGUL SYLLABLE JJYI + {0xCBF1, 0xCC0B, prLVT}, // Lo [27] HANGUL SYLLABLE JJYIG..HANGUL SYLLABLE JJYIH + {0xCC0C, 0xCC0C, prLV}, // Lo HANGUL SYLLABLE JJI + {0xCC0D, 0xCC27, prLVT}, // Lo [27] HANGUL SYLLABLE JJIG..HANGUL SYLLABLE JJIH + {0xCC28, 0xCC28, prLV}, // Lo HANGUL SYLLABLE CA + {0xCC29, 0xCC43, prLVT}, // Lo [27] HANGUL SYLLABLE CAG..HANGUL SYLLABLE CAH + {0xCC44, 0xCC44, prLV}, // Lo HANGUL SYLLABLE CAE + {0xCC45, 0xCC5F, prLVT}, // Lo [27] HANGUL SYLLABLE CAEG..HANGUL SYLLABLE CAEH + {0xCC60, 0xCC60, prLV}, // Lo HANGUL SYLLABLE CYA + {0xCC61, 0xCC7B, prLVT}, // Lo [27] HANGUL SYLLABLE CYAG..HANGUL SYLLABLE CYAH + {0xCC7C, 0xCC7C, prLV}, // Lo HANGUL SYLLABLE CYAE + {0xCC7D, 0xCC97, prLVT}, // Lo [27] HANGUL SYLLABLE CYAEG..HANGUL SYLLABLE CYAEH + {0xCC98, 0xCC98, prLV}, // Lo HANGUL SYLLABLE CEO + {0xCC99, 0xCCB3, prLVT}, // Lo [27] HANGUL SYLLABLE CEOG..HANGUL SYLLABLE CEOH + {0xCCB4, 0xCCB4, prLV}, // Lo HANGUL SYLLABLE CE + {0xCCB5, 0xCCCF, prLVT}, // Lo [27] HANGUL SYLLABLE CEG..HANGUL SYLLABLE CEH + {0xCCD0, 0xCCD0, prLV}, // Lo HANGUL SYLLABLE CYEO + {0xCCD1, 0xCCEB, prLVT}, // Lo [27] HANGUL SYLLABLE CYEOG..HANGUL SYLLABLE CYEOH + {0xCCEC, 0xCCEC, prLV}, // Lo HANGUL SYLLABLE CYE + {0xCCED, 0xCD07, prLVT}, // Lo [27] HANGUL SYLLABLE CYEG..HANGUL SYLLABLE CYEH + {0xCD08, 0xCD08, prLV}, // Lo HANGUL SYLLABLE CO + {0xCD09, 0xCD23, prLVT}, // Lo [27] HANGUL SYLLABLE COG..HANGUL SYLLABLE COH + {0xCD24, 0xCD24, prLV}, // Lo HANGUL SYLLABLE CWA + {0xCD25, 0xCD3F, prLVT}, // Lo [27] HANGUL SYLLABLE CWAG..HANGUL SYLLABLE CWAH + {0xCD40, 0xCD40, prLV}, // Lo HANGUL SYLLABLE CWAE + {0xCD41, 0xCD5B, prLVT}, // Lo [27] HANGUL SYLLABLE CWAEG..HANGUL SYLLABLE CWAEH + {0xCD5C, 0xCD5C, prLV}, // Lo HANGUL SYLLABLE COE + {0xCD5D, 0xCD77, prLVT}, // Lo [27] HANGUL SYLLABLE COEG..HANGUL SYLLABLE COEH + {0xCD78, 0xCD78, prLV}, // Lo HANGUL SYLLABLE CYO + {0xCD79, 0xCD93, prLVT}, // Lo [27] HANGUL SYLLABLE CYOG..HANGUL SYLLABLE CYOH + {0xCD94, 0xCD94, prLV}, // Lo HANGUL SYLLABLE CU + {0xCD95, 0xCDAF, prLVT}, // Lo [27] HANGUL SYLLABLE CUG..HANGUL SYLLABLE CUH + {0xCDB0, 0xCDB0, prLV}, // Lo HANGUL SYLLABLE CWEO + {0xCDB1, 0xCDCB, prLVT}, // Lo [27] HANGUL SYLLABLE CWEOG..HANGUL SYLLABLE CWEOH + {0xCDCC, 0xCDCC, prLV}, // Lo HANGUL SYLLABLE CWE + {0xCDCD, 0xCDE7, prLVT}, // Lo [27] HANGUL SYLLABLE CWEG..HANGUL SYLLABLE CWEH + {0xCDE8, 0xCDE8, prLV}, // Lo HANGUL SYLLABLE CWI + {0xCDE9, 0xCE03, prLVT}, // Lo [27] HANGUL SYLLABLE CWIG..HANGUL SYLLABLE CWIH + {0xCE04, 0xCE04, prLV}, // Lo HANGUL SYLLABLE CYU + {0xCE05, 0xCE1F, prLVT}, // Lo [27] HANGUL SYLLABLE CYUG..HANGUL SYLLABLE CYUH + {0xCE20, 0xCE20, prLV}, // Lo HANGUL SYLLABLE CEU + {0xCE21, 0xCE3B, prLVT}, // Lo [27] HANGUL SYLLABLE CEUG..HANGUL SYLLABLE CEUH + {0xCE3C, 0xCE3C, prLV}, // Lo HANGUL SYLLABLE CYI + {0xCE3D, 0xCE57, prLVT}, // Lo [27] HANGUL SYLLABLE CYIG..HANGUL SYLLABLE CYIH + {0xCE58, 0xCE58, prLV}, // Lo HANGUL SYLLABLE CI + {0xCE59, 0xCE73, prLVT}, // Lo [27] HANGUL SYLLABLE CIG..HANGUL SYLLABLE CIH + {0xCE74, 0xCE74, prLV}, // Lo HANGUL SYLLABLE KA + {0xCE75, 0xCE8F, prLVT}, // Lo [27] HANGUL SYLLABLE KAG..HANGUL SYLLABLE KAH + {0xCE90, 0xCE90, prLV}, // Lo HANGUL SYLLABLE KAE + {0xCE91, 0xCEAB, prLVT}, // Lo [27] HANGUL SYLLABLE KAEG..HANGUL SYLLABLE KAEH + {0xCEAC, 0xCEAC, prLV}, // Lo HANGUL SYLLABLE KYA + {0xCEAD, 0xCEC7, prLVT}, // Lo [27] HANGUL SYLLABLE KYAG..HANGUL SYLLABLE KYAH + {0xCEC8, 0xCEC8, prLV}, // Lo HANGUL SYLLABLE KYAE + {0xCEC9, 0xCEE3, prLVT}, // Lo [27] HANGUL SYLLABLE KYAEG..HANGUL SYLLABLE KYAEH + {0xCEE4, 0xCEE4, prLV}, // Lo HANGUL SYLLABLE KEO + {0xCEE5, 0xCEFF, prLVT}, // Lo [27] HANGUL SYLLABLE KEOG..HANGUL SYLLABLE KEOH + {0xCF00, 0xCF00, prLV}, // Lo HANGUL SYLLABLE KE + {0xCF01, 0xCF1B, prLVT}, // Lo [27] HANGUL SYLLABLE KEG..HANGUL SYLLABLE KEH + {0xCF1C, 0xCF1C, prLV}, // Lo HANGUL SYLLABLE KYEO + {0xCF1D, 0xCF37, prLVT}, // Lo [27] HANGUL SYLLABLE KYEOG..HANGUL SYLLABLE KYEOH + {0xCF38, 0xCF38, prLV}, // Lo HANGUL SYLLABLE KYE + {0xCF39, 0xCF53, prLVT}, // Lo [27] HANGUL SYLLABLE KYEG..HANGUL SYLLABLE KYEH + {0xCF54, 0xCF54, prLV}, // Lo HANGUL SYLLABLE KO + {0xCF55, 0xCF6F, prLVT}, // Lo [27] HANGUL SYLLABLE KOG..HANGUL SYLLABLE KOH + {0xCF70, 0xCF70, prLV}, // Lo HANGUL SYLLABLE KWA + {0xCF71, 0xCF8B, prLVT}, // Lo [27] HANGUL SYLLABLE KWAG..HANGUL SYLLABLE KWAH + {0xCF8C, 0xCF8C, prLV}, // Lo HANGUL SYLLABLE KWAE + {0xCF8D, 0xCFA7, prLVT}, // Lo [27] HANGUL SYLLABLE KWAEG..HANGUL SYLLABLE KWAEH + {0xCFA8, 0xCFA8, prLV}, // Lo HANGUL SYLLABLE KOE + {0xCFA9, 0xCFC3, prLVT}, // Lo [27] HANGUL SYLLABLE KOEG..HANGUL SYLLABLE KOEH + {0xCFC4, 0xCFC4, prLV}, // Lo HANGUL SYLLABLE KYO + {0xCFC5, 0xCFDF, prLVT}, // Lo [27] HANGUL SYLLABLE KYOG..HANGUL SYLLABLE KYOH + {0xCFE0, 0xCFE0, prLV}, // Lo HANGUL SYLLABLE KU + {0xCFE1, 0xCFFB, prLVT}, // Lo [27] HANGUL SYLLABLE KUG..HANGUL SYLLABLE KUH + {0xCFFC, 0xCFFC, prLV}, // Lo HANGUL SYLLABLE KWEO + {0xCFFD, 0xD017, prLVT}, // Lo [27] HANGUL SYLLABLE KWEOG..HANGUL SYLLABLE KWEOH + {0xD018, 0xD018, prLV}, // Lo HANGUL SYLLABLE KWE + {0xD019, 0xD033, prLVT}, // Lo [27] HANGUL SYLLABLE KWEG..HANGUL SYLLABLE KWEH + {0xD034, 0xD034, prLV}, // Lo HANGUL SYLLABLE KWI + {0xD035, 0xD04F, prLVT}, // Lo [27] HANGUL SYLLABLE KWIG..HANGUL SYLLABLE KWIH + {0xD050, 0xD050, prLV}, // Lo HANGUL SYLLABLE KYU + {0xD051, 0xD06B, prLVT}, // Lo [27] HANGUL SYLLABLE KYUG..HANGUL SYLLABLE KYUH + {0xD06C, 0xD06C, prLV}, // Lo HANGUL SYLLABLE KEU + {0xD06D, 0xD087, prLVT}, // Lo [27] HANGUL SYLLABLE KEUG..HANGUL SYLLABLE KEUH + {0xD088, 0xD088, prLV}, // Lo HANGUL SYLLABLE KYI + {0xD089, 0xD0A3, prLVT}, // Lo [27] HANGUL SYLLABLE KYIG..HANGUL SYLLABLE KYIH + {0xD0A4, 0xD0A4, prLV}, // Lo HANGUL SYLLABLE KI + {0xD0A5, 0xD0BF, prLVT}, // Lo [27] HANGUL SYLLABLE KIG..HANGUL SYLLABLE KIH + {0xD0C0, 0xD0C0, prLV}, // Lo HANGUL SYLLABLE TA + {0xD0C1, 0xD0DB, prLVT}, // Lo [27] HANGUL SYLLABLE TAG..HANGUL SYLLABLE TAH + {0xD0DC, 0xD0DC, prLV}, // Lo HANGUL SYLLABLE TAE + {0xD0DD, 0xD0F7, prLVT}, // Lo [27] HANGUL SYLLABLE TAEG..HANGUL SYLLABLE TAEH + {0xD0F8, 0xD0F8, prLV}, // Lo HANGUL SYLLABLE TYA + {0xD0F9, 0xD113, prLVT}, // Lo [27] HANGUL SYLLABLE TYAG..HANGUL SYLLABLE TYAH + {0xD114, 0xD114, prLV}, // Lo HANGUL SYLLABLE TYAE + {0xD115, 0xD12F, prLVT}, // Lo [27] HANGUL SYLLABLE TYAEG..HANGUL SYLLABLE TYAEH + {0xD130, 0xD130, prLV}, // Lo HANGUL SYLLABLE TEO + {0xD131, 0xD14B, prLVT}, // Lo [27] HANGUL SYLLABLE TEOG..HANGUL SYLLABLE TEOH + {0xD14C, 0xD14C, prLV}, // Lo HANGUL SYLLABLE TE + {0xD14D, 0xD167, prLVT}, // Lo [27] HANGUL SYLLABLE TEG..HANGUL SYLLABLE TEH + {0xD168, 0xD168, prLV}, // Lo HANGUL SYLLABLE TYEO + {0xD169, 0xD183, prLVT}, // Lo [27] HANGUL SYLLABLE TYEOG..HANGUL SYLLABLE TYEOH + {0xD184, 0xD184, prLV}, // Lo HANGUL SYLLABLE TYE + {0xD185, 0xD19F, prLVT}, // Lo [27] HANGUL SYLLABLE TYEG..HANGUL SYLLABLE TYEH + {0xD1A0, 0xD1A0, prLV}, // Lo HANGUL SYLLABLE TO + {0xD1A1, 0xD1BB, prLVT}, // Lo [27] HANGUL SYLLABLE TOG..HANGUL SYLLABLE TOH + {0xD1BC, 0xD1BC, prLV}, // Lo HANGUL SYLLABLE TWA + {0xD1BD, 0xD1D7, prLVT}, // Lo [27] HANGUL SYLLABLE TWAG..HANGUL SYLLABLE TWAH + {0xD1D8, 0xD1D8, prLV}, // Lo HANGUL SYLLABLE TWAE + {0xD1D9, 0xD1F3, prLVT}, // Lo [27] HANGUL SYLLABLE TWAEG..HANGUL SYLLABLE TWAEH + {0xD1F4, 0xD1F4, prLV}, // Lo HANGUL SYLLABLE TOE + {0xD1F5, 0xD20F, prLVT}, // Lo [27] HANGUL SYLLABLE TOEG..HANGUL SYLLABLE TOEH + {0xD210, 0xD210, prLV}, // Lo HANGUL SYLLABLE TYO + {0xD211, 0xD22B, prLVT}, // Lo [27] HANGUL SYLLABLE TYOG..HANGUL SYLLABLE TYOH + {0xD22C, 0xD22C, prLV}, // Lo HANGUL SYLLABLE TU + {0xD22D, 0xD247, prLVT}, // Lo [27] HANGUL SYLLABLE TUG..HANGUL SYLLABLE TUH + {0xD248, 0xD248, prLV}, // Lo HANGUL SYLLABLE TWEO + {0xD249, 0xD263, prLVT}, // Lo [27] HANGUL SYLLABLE TWEOG..HANGUL SYLLABLE TWEOH + {0xD264, 0xD264, prLV}, // Lo HANGUL SYLLABLE TWE + {0xD265, 0xD27F, prLVT}, // Lo [27] HANGUL SYLLABLE TWEG..HANGUL SYLLABLE TWEH + {0xD280, 0xD280, prLV}, // Lo HANGUL SYLLABLE TWI + {0xD281, 0xD29B, prLVT}, // Lo [27] HANGUL SYLLABLE TWIG..HANGUL SYLLABLE TWIH + {0xD29C, 0xD29C, prLV}, // Lo HANGUL SYLLABLE TYU + {0xD29D, 0xD2B7, prLVT}, // Lo [27] HANGUL SYLLABLE TYUG..HANGUL SYLLABLE TYUH + {0xD2B8, 0xD2B8, prLV}, // Lo HANGUL SYLLABLE TEU + {0xD2B9, 0xD2D3, prLVT}, // Lo [27] HANGUL SYLLABLE TEUG..HANGUL SYLLABLE TEUH + {0xD2D4, 0xD2D4, prLV}, // Lo HANGUL SYLLABLE TYI + {0xD2D5, 0xD2EF, prLVT}, // Lo [27] HANGUL SYLLABLE TYIG..HANGUL SYLLABLE TYIH + {0xD2F0, 0xD2F0, prLV}, // Lo HANGUL SYLLABLE TI + {0xD2F1, 0xD30B, prLVT}, // Lo [27] HANGUL SYLLABLE TIG..HANGUL SYLLABLE TIH + {0xD30C, 0xD30C, prLV}, // Lo HANGUL SYLLABLE PA + {0xD30D, 0xD327, prLVT}, // Lo [27] HANGUL SYLLABLE PAG..HANGUL SYLLABLE PAH + {0xD328, 0xD328, prLV}, // Lo HANGUL SYLLABLE PAE + {0xD329, 0xD343, prLVT}, // Lo [27] HANGUL SYLLABLE PAEG..HANGUL SYLLABLE PAEH + {0xD344, 0xD344, prLV}, // Lo HANGUL SYLLABLE PYA + {0xD345, 0xD35F, prLVT}, // Lo [27] HANGUL SYLLABLE PYAG..HANGUL SYLLABLE PYAH + {0xD360, 0xD360, prLV}, // Lo HANGUL SYLLABLE PYAE + {0xD361, 0xD37B, prLVT}, // Lo [27] HANGUL SYLLABLE PYAEG..HANGUL SYLLABLE PYAEH + {0xD37C, 0xD37C, prLV}, // Lo HANGUL SYLLABLE PEO + {0xD37D, 0xD397, prLVT}, // Lo [27] HANGUL SYLLABLE PEOG..HANGUL SYLLABLE PEOH + {0xD398, 0xD398, prLV}, // Lo HANGUL SYLLABLE PE + {0xD399, 0xD3B3, prLVT}, // Lo [27] HANGUL SYLLABLE PEG..HANGUL SYLLABLE PEH + {0xD3B4, 0xD3B4, prLV}, // Lo HANGUL SYLLABLE PYEO + {0xD3B5, 0xD3CF, prLVT}, // Lo [27] HANGUL SYLLABLE PYEOG..HANGUL SYLLABLE PYEOH + {0xD3D0, 0xD3D0, prLV}, // Lo HANGUL SYLLABLE PYE + {0xD3D1, 0xD3EB, prLVT}, // Lo [27] HANGUL SYLLABLE PYEG..HANGUL SYLLABLE PYEH + {0xD3EC, 0xD3EC, prLV}, // Lo HANGUL SYLLABLE PO + {0xD3ED, 0xD407, prLVT}, // Lo [27] HANGUL SYLLABLE POG..HANGUL SYLLABLE POH + {0xD408, 0xD408, prLV}, // Lo HANGUL SYLLABLE PWA + {0xD409, 0xD423, prLVT}, // Lo [27] HANGUL SYLLABLE PWAG..HANGUL SYLLABLE PWAH + {0xD424, 0xD424, prLV}, // Lo HANGUL SYLLABLE PWAE + {0xD425, 0xD43F, prLVT}, // Lo [27] HANGUL SYLLABLE PWAEG..HANGUL SYLLABLE PWAEH + {0xD440, 0xD440, prLV}, // Lo HANGUL SYLLABLE POE + {0xD441, 0xD45B, prLVT}, // Lo [27] HANGUL SYLLABLE POEG..HANGUL SYLLABLE POEH + {0xD45C, 0xD45C, prLV}, // Lo HANGUL SYLLABLE PYO + {0xD45D, 0xD477, prLVT}, // Lo [27] HANGUL SYLLABLE PYOG..HANGUL SYLLABLE PYOH + {0xD478, 0xD478, prLV}, // Lo HANGUL SYLLABLE PU + {0xD479, 0xD493, prLVT}, // Lo [27] HANGUL SYLLABLE PUG..HANGUL SYLLABLE PUH + {0xD494, 0xD494, prLV}, // Lo HANGUL SYLLABLE PWEO + {0xD495, 0xD4AF, prLVT}, // Lo [27] HANGUL SYLLABLE PWEOG..HANGUL SYLLABLE PWEOH + {0xD4B0, 0xD4B0, prLV}, // Lo HANGUL SYLLABLE PWE + {0xD4B1, 0xD4CB, prLVT}, // Lo [27] HANGUL SYLLABLE PWEG..HANGUL SYLLABLE PWEH + {0xD4CC, 0xD4CC, prLV}, // Lo HANGUL SYLLABLE PWI + {0xD4CD, 0xD4E7, prLVT}, // Lo [27] HANGUL SYLLABLE PWIG..HANGUL SYLLABLE PWIH + {0xD4E8, 0xD4E8, prLV}, // Lo HANGUL SYLLABLE PYU + {0xD4E9, 0xD503, prLVT}, // Lo [27] HANGUL SYLLABLE PYUG..HANGUL SYLLABLE PYUH + {0xD504, 0xD504, prLV}, // Lo HANGUL SYLLABLE PEU + {0xD505, 0xD51F, prLVT}, // Lo [27] HANGUL SYLLABLE PEUG..HANGUL SYLLABLE PEUH + {0xD520, 0xD520, prLV}, // Lo HANGUL SYLLABLE PYI + {0xD521, 0xD53B, prLVT}, // Lo [27] HANGUL SYLLABLE PYIG..HANGUL SYLLABLE PYIH + {0xD53C, 0xD53C, prLV}, // Lo HANGUL SYLLABLE PI + {0xD53D, 0xD557, prLVT}, // Lo [27] HANGUL SYLLABLE PIG..HANGUL SYLLABLE PIH + {0xD558, 0xD558, prLV}, // Lo HANGUL SYLLABLE HA + {0xD559, 0xD573, prLVT}, // Lo [27] HANGUL SYLLABLE HAG..HANGUL SYLLABLE HAH + {0xD574, 0xD574, prLV}, // Lo HANGUL SYLLABLE HAE + {0xD575, 0xD58F, prLVT}, // Lo [27] HANGUL SYLLABLE HAEG..HANGUL SYLLABLE HAEH + {0xD590, 0xD590, prLV}, // Lo HANGUL SYLLABLE HYA + {0xD591, 0xD5AB, prLVT}, // Lo [27] HANGUL SYLLABLE HYAG..HANGUL SYLLABLE HYAH + {0xD5AC, 0xD5AC, prLV}, // Lo HANGUL SYLLABLE HYAE + {0xD5AD, 0xD5C7, prLVT}, // Lo [27] HANGUL SYLLABLE HYAEG..HANGUL SYLLABLE HYAEH + {0xD5C8, 0xD5C8, prLV}, // Lo HANGUL SYLLABLE HEO + {0xD5C9, 0xD5E3, prLVT}, // Lo [27] HANGUL SYLLABLE HEOG..HANGUL SYLLABLE HEOH + {0xD5E4, 0xD5E4, prLV}, // Lo HANGUL SYLLABLE HE + {0xD5E5, 0xD5FF, prLVT}, // Lo [27] HANGUL SYLLABLE HEG..HANGUL SYLLABLE HEH + {0xD600, 0xD600, prLV}, // Lo HANGUL SYLLABLE HYEO + {0xD601, 0xD61B, prLVT}, // Lo [27] HANGUL SYLLABLE HYEOG..HANGUL SYLLABLE HYEOH + {0xD61C, 0xD61C, prLV}, // Lo HANGUL SYLLABLE HYE + {0xD61D, 0xD637, prLVT}, // Lo [27] HANGUL SYLLABLE HYEG..HANGUL SYLLABLE HYEH + {0xD638, 0xD638, prLV}, // Lo HANGUL SYLLABLE HO + {0xD639, 0xD653, prLVT}, // Lo [27] HANGUL SYLLABLE HOG..HANGUL SYLLABLE HOH + {0xD654, 0xD654, prLV}, // Lo HANGUL SYLLABLE HWA + {0xD655, 0xD66F, prLVT}, // Lo [27] HANGUL SYLLABLE HWAG..HANGUL SYLLABLE HWAH + {0xD670, 0xD670, prLV}, // Lo HANGUL SYLLABLE HWAE + {0xD671, 0xD68B, prLVT}, // Lo [27] HANGUL SYLLABLE HWAEG..HANGUL SYLLABLE HWAEH + {0xD68C, 0xD68C, prLV}, // Lo HANGUL SYLLABLE HOE + {0xD68D, 0xD6A7, prLVT}, // Lo [27] HANGUL SYLLABLE HOEG..HANGUL SYLLABLE HOEH + {0xD6A8, 0xD6A8, prLV}, // Lo HANGUL SYLLABLE HYO + {0xD6A9, 0xD6C3, prLVT}, // Lo [27] HANGUL SYLLABLE HYOG..HANGUL SYLLABLE HYOH + {0xD6C4, 0xD6C4, prLV}, // Lo HANGUL SYLLABLE HU + {0xD6C5, 0xD6DF, prLVT}, // Lo [27] HANGUL SYLLABLE HUG..HANGUL SYLLABLE HUH + {0xD6E0, 0xD6E0, prLV}, // Lo HANGUL SYLLABLE HWEO + {0xD6E1, 0xD6FB, prLVT}, // Lo [27] HANGUL SYLLABLE HWEOG..HANGUL SYLLABLE HWEOH + {0xD6FC, 0xD6FC, prLV}, // Lo HANGUL SYLLABLE HWE + {0xD6FD, 0xD717, prLVT}, // Lo [27] HANGUL SYLLABLE HWEG..HANGUL SYLLABLE HWEH + {0xD718, 0xD718, prLV}, // Lo HANGUL SYLLABLE HWI + {0xD719, 0xD733, prLVT}, // Lo [27] HANGUL SYLLABLE HWIG..HANGUL SYLLABLE HWIH + {0xD734, 0xD734, prLV}, // Lo HANGUL SYLLABLE HYU + {0xD735, 0xD74F, prLVT}, // Lo [27] HANGUL SYLLABLE HYUG..HANGUL SYLLABLE HYUH + {0xD750, 0xD750, prLV}, // Lo HANGUL SYLLABLE HEU + {0xD751, 0xD76B, prLVT}, // Lo [27] HANGUL SYLLABLE HEUG..HANGUL SYLLABLE HEUH + {0xD76C, 0xD76C, prLV}, // Lo HANGUL SYLLABLE HYI + {0xD76D, 0xD787, prLVT}, // Lo [27] HANGUL SYLLABLE HYIG..HANGUL SYLLABLE HYIH + {0xD788, 0xD788, prLV}, // Lo HANGUL SYLLABLE HI + {0xD789, 0xD7A3, prLVT}, // Lo [27] HANGUL SYLLABLE HIG..HANGUL SYLLABLE HIH + {0xD7B0, 0xD7C6, prV}, // Lo [23] HANGUL JUNGSEONG O-YEO..HANGUL JUNGSEONG ARAEA-E + {0xD7CB, 0xD7FB, prT}, // Lo [49] HANGUL JONGSEONG NIEUN-RIEUL..HANGUL JONGSEONG PHIEUPH-THIEUTH + {0xFB1E, 0xFB1E, prExtend}, // Mn HEBREW POINT JUDEO-SPANISH VARIKA + {0xFE00, 0xFE0F, prExtend}, // Mn [16] VARIATION SELECTOR-1..VARIATION SELECTOR-16 + {0xFE20, 0xFE2F, prExtend}, // Mn [16] COMBINING LIGATURE LEFT HALF..COMBINING CYRILLIC TITLO RIGHT HALF + {0xFEFF, 0xFEFF, prControl}, // Cf ZERO WIDTH NO-BREAK SPACE + {0xFF9E, 0xFF9F, prExtend}, // Lm [2] HALFWIDTH KATAKANA VOICED SOUND MARK..HALFWIDTH KATAKANA SEMI-VOICED SOUND MARK + {0xFFF0, 0xFFF8, prControl}, // Cn [9] .. + {0xFFF9, 0xFFFB, prControl}, // Cf [3] INTERLINEAR ANNOTATION ANCHOR..INTERLINEAR ANNOTATION TERMINATOR + {0x101FD, 0x101FD, prExtend}, // Mn PHAISTOS DISC SIGN COMBINING OBLIQUE STROKE + {0x102E0, 0x102E0, prExtend}, // Mn COPTIC EPACT THOUSANDS MARK + {0x10376, 0x1037A, prExtend}, // Mn [5] COMBINING OLD PERMIC LETTER AN..COMBINING OLD PERMIC LETTER SII + {0x10A01, 0x10A03, prExtend}, // Mn [3] KHAROSHTHI VOWEL SIGN I..KHAROSHTHI VOWEL SIGN VOCALIC R + {0x10A05, 0x10A06, prExtend}, // Mn [2] KHAROSHTHI VOWEL SIGN E..KHAROSHTHI VOWEL SIGN O + {0x10A0C, 0x10A0F, prExtend}, // Mn [4] KHAROSHTHI VOWEL LENGTH MARK..KHAROSHTHI SIGN VISARGA + {0x10A38, 0x10A3A, prExtend}, // Mn [3] KHAROSHTHI SIGN BAR ABOVE..KHAROSHTHI SIGN DOT BELOW + {0x10A3F, 0x10A3F, prExtend}, // Mn KHAROSHTHI VIRAMA + {0x10AE5, 0x10AE6, prExtend}, // Mn [2] MANICHAEAN ABBREVIATION MARK ABOVE..MANICHAEAN ABBREVIATION MARK BELOW + {0x10D24, 0x10D27, prExtend}, // Mn [4] HANIFI ROHINGYA SIGN HARBAHAY..HANIFI ROHINGYA SIGN TASSI + {0x10F46, 0x10F50, prExtend}, // Mn [11] SOGDIAN COMBINING DOT BELOW..SOGDIAN COMBINING STROKE BELOW + {0x11000, 0x11000, prSpacingMark}, // Mc BRAHMI SIGN CANDRABINDU + {0x11001, 0x11001, prExtend}, // Mn BRAHMI SIGN ANUSVARA + {0x11002, 0x11002, prSpacingMark}, // Mc BRAHMI SIGN VISARGA + {0x11038, 0x11046, prExtend}, // Mn [15] BRAHMI VOWEL SIGN AA..BRAHMI VIRAMA + {0x1107F, 0x11081, prExtend}, // Mn [3] BRAHMI NUMBER JOINER..KAITHI SIGN ANUSVARA + {0x11082, 0x11082, prSpacingMark}, // Mc KAITHI SIGN VISARGA + {0x110B0, 0x110B2, prSpacingMark}, // Mc [3] KAITHI VOWEL SIGN AA..KAITHI VOWEL SIGN II + {0x110B3, 0x110B6, prExtend}, // Mn [4] KAITHI VOWEL SIGN U..KAITHI VOWEL SIGN AI + {0x110B7, 0x110B8, prSpacingMark}, // Mc [2] KAITHI VOWEL SIGN O..KAITHI VOWEL SIGN AU + {0x110B9, 0x110BA, prExtend}, // Mn [2] KAITHI SIGN VIRAMA..KAITHI SIGN NUKTA + {0x110BD, 0x110BD, prPreprend}, // Cf KAITHI NUMBER SIGN + {0x110CD, 0x110CD, prPreprend}, // Cf KAITHI NUMBER SIGN ABOVE + {0x11100, 0x11102, prExtend}, // Mn [3] CHAKMA SIGN CANDRABINDU..CHAKMA SIGN VISARGA + {0x11127, 0x1112B, prExtend}, // Mn [5] CHAKMA VOWEL SIGN A..CHAKMA VOWEL SIGN UU + {0x1112C, 0x1112C, prSpacingMark}, // Mc CHAKMA VOWEL SIGN E + {0x1112D, 0x11134, prExtend}, // Mn [8] CHAKMA VOWEL SIGN AI..CHAKMA MAAYYAA + {0x11145, 0x11146, prSpacingMark}, // Mc [2] CHAKMA VOWEL SIGN AA..CHAKMA VOWEL SIGN EI + {0x11173, 0x11173, prExtend}, // Mn MAHAJANI SIGN NUKTA + {0x11180, 0x11181, prExtend}, // Mn [2] SHARADA SIGN CANDRABINDU..SHARADA SIGN ANUSVARA + {0x11182, 0x11182, prSpacingMark}, // Mc SHARADA SIGN VISARGA + {0x111B3, 0x111B5, prSpacingMark}, // Mc [3] SHARADA VOWEL SIGN AA..SHARADA VOWEL SIGN II + {0x111B6, 0x111BE, prExtend}, // Mn [9] SHARADA VOWEL SIGN U..SHARADA VOWEL SIGN O + {0x111BF, 0x111C0, prSpacingMark}, // Mc [2] SHARADA VOWEL SIGN AU..SHARADA SIGN VIRAMA + {0x111C2, 0x111C3, prPreprend}, // Lo [2] SHARADA SIGN JIHVAMULIYA..SHARADA SIGN UPADHMANIYA + {0x111C9, 0x111CC, prExtend}, // Mn [4] SHARADA SANDHI MARK..SHARADA EXTRA SHORT VOWEL MARK + {0x1122C, 0x1122E, prSpacingMark}, // Mc [3] KHOJKI VOWEL SIGN AA..KHOJKI VOWEL SIGN II + {0x1122F, 0x11231, prExtend}, // Mn [3] KHOJKI VOWEL SIGN U..KHOJKI VOWEL SIGN AI + {0x11232, 0x11233, prSpacingMark}, // Mc [2] KHOJKI VOWEL SIGN O..KHOJKI VOWEL SIGN AU + {0x11234, 0x11234, prExtend}, // Mn KHOJKI SIGN ANUSVARA + {0x11235, 0x11235, prSpacingMark}, // Mc KHOJKI SIGN VIRAMA + {0x11236, 0x11237, prExtend}, // Mn [2] KHOJKI SIGN NUKTA..KHOJKI SIGN SHADDA + {0x1123E, 0x1123E, prExtend}, // Mn KHOJKI SIGN SUKUN + {0x112DF, 0x112DF, prExtend}, // Mn KHUDAWADI SIGN ANUSVARA + {0x112E0, 0x112E2, prSpacingMark}, // Mc [3] KHUDAWADI VOWEL SIGN AA..KHUDAWADI VOWEL SIGN II + {0x112E3, 0x112EA, prExtend}, // Mn [8] KHUDAWADI VOWEL SIGN U..KHUDAWADI SIGN VIRAMA + {0x11300, 0x11301, prExtend}, // Mn [2] GRANTHA SIGN COMBINING ANUSVARA ABOVE..GRANTHA SIGN CANDRABINDU + {0x11302, 0x11303, prSpacingMark}, // Mc [2] GRANTHA SIGN ANUSVARA..GRANTHA SIGN VISARGA + {0x1133B, 0x1133C, prExtend}, // Mn [2] COMBINING BINDU BELOW..GRANTHA SIGN NUKTA + {0x1133E, 0x1133E, prExtend}, // Mc GRANTHA VOWEL SIGN AA + {0x1133F, 0x1133F, prSpacingMark}, // Mc GRANTHA VOWEL SIGN I + {0x11340, 0x11340, prExtend}, // Mn GRANTHA VOWEL SIGN II + {0x11341, 0x11344, prSpacingMark}, // Mc [4] GRANTHA VOWEL SIGN U..GRANTHA VOWEL SIGN VOCALIC RR + {0x11347, 0x11348, prSpacingMark}, // Mc [2] GRANTHA VOWEL SIGN EE..GRANTHA VOWEL SIGN AI + {0x1134B, 0x1134D, prSpacingMark}, // Mc [3] GRANTHA VOWEL SIGN OO..GRANTHA SIGN VIRAMA + {0x11357, 0x11357, prExtend}, // Mc GRANTHA AU LENGTH MARK + {0x11362, 0x11363, prSpacingMark}, // Mc [2] GRANTHA VOWEL SIGN VOCALIC L..GRANTHA VOWEL SIGN VOCALIC LL + {0x11366, 0x1136C, prExtend}, // Mn [7] COMBINING GRANTHA DIGIT ZERO..COMBINING GRANTHA DIGIT SIX + {0x11370, 0x11374, prExtend}, // Mn [5] COMBINING GRANTHA LETTER A..COMBINING GRANTHA LETTER PA + {0x11435, 0x11437, prSpacingMark}, // Mc [3] NEWA VOWEL SIGN AA..NEWA VOWEL SIGN II + {0x11438, 0x1143F, prExtend}, // Mn [8] NEWA VOWEL SIGN U..NEWA VOWEL SIGN AI + {0x11440, 0x11441, prSpacingMark}, // Mc [2] NEWA VOWEL SIGN O..NEWA VOWEL SIGN AU + {0x11442, 0x11444, prExtend}, // Mn [3] NEWA SIGN VIRAMA..NEWA SIGN ANUSVARA + {0x11445, 0x11445, prSpacingMark}, // Mc NEWA SIGN VISARGA + {0x11446, 0x11446, prExtend}, // Mn NEWA SIGN NUKTA + {0x1145E, 0x1145E, prExtend}, // Mn NEWA SANDHI MARK + {0x114B0, 0x114B0, prExtend}, // Mc TIRHUTA VOWEL SIGN AA + {0x114B1, 0x114B2, prSpacingMark}, // Mc [2] TIRHUTA VOWEL SIGN I..TIRHUTA VOWEL SIGN II + {0x114B3, 0x114B8, prExtend}, // Mn [6] TIRHUTA VOWEL SIGN U..TIRHUTA VOWEL SIGN VOCALIC LL + {0x114B9, 0x114B9, prSpacingMark}, // Mc TIRHUTA VOWEL SIGN E + {0x114BA, 0x114BA, prExtend}, // Mn TIRHUTA VOWEL SIGN SHORT E + {0x114BB, 0x114BC, prSpacingMark}, // Mc [2] TIRHUTA VOWEL SIGN AI..TIRHUTA VOWEL SIGN O + {0x114BD, 0x114BD, prExtend}, // Mc TIRHUTA VOWEL SIGN SHORT O + {0x114BE, 0x114BE, prSpacingMark}, // Mc TIRHUTA VOWEL SIGN AU + {0x114BF, 0x114C0, prExtend}, // Mn [2] TIRHUTA SIGN CANDRABINDU..TIRHUTA SIGN ANUSVARA + {0x114C1, 0x114C1, prSpacingMark}, // Mc TIRHUTA SIGN VISARGA + {0x114C2, 0x114C3, prExtend}, // Mn [2] TIRHUTA SIGN VIRAMA..TIRHUTA SIGN NUKTA + {0x115AF, 0x115AF, prExtend}, // Mc SIDDHAM VOWEL SIGN AA + {0x115B0, 0x115B1, prSpacingMark}, // Mc [2] SIDDHAM VOWEL SIGN I..SIDDHAM VOWEL SIGN II + {0x115B2, 0x115B5, prExtend}, // Mn [4] SIDDHAM VOWEL SIGN U..SIDDHAM VOWEL SIGN VOCALIC RR + {0x115B8, 0x115BB, prSpacingMark}, // Mc [4] SIDDHAM VOWEL SIGN E..SIDDHAM VOWEL SIGN AU + {0x115BC, 0x115BD, prExtend}, // Mn [2] SIDDHAM SIGN CANDRABINDU..SIDDHAM SIGN ANUSVARA + {0x115BE, 0x115BE, prSpacingMark}, // Mc SIDDHAM SIGN VISARGA + {0x115BF, 0x115C0, prExtend}, // Mn [2] SIDDHAM SIGN VIRAMA..SIDDHAM SIGN NUKTA + {0x115DC, 0x115DD, prExtend}, // Mn [2] SIDDHAM VOWEL SIGN ALTERNATE U..SIDDHAM VOWEL SIGN ALTERNATE UU + {0x11630, 0x11632, prSpacingMark}, // Mc [3] MODI VOWEL SIGN AA..MODI VOWEL SIGN II + {0x11633, 0x1163A, prExtend}, // Mn [8] MODI VOWEL SIGN U..MODI VOWEL SIGN AI + {0x1163B, 0x1163C, prSpacingMark}, // Mc [2] MODI VOWEL SIGN O..MODI VOWEL SIGN AU + {0x1163D, 0x1163D, prExtend}, // Mn MODI SIGN ANUSVARA + {0x1163E, 0x1163E, prSpacingMark}, // Mc MODI SIGN VISARGA + {0x1163F, 0x11640, prExtend}, // Mn [2] MODI SIGN VIRAMA..MODI SIGN ARDHACANDRA + {0x116AB, 0x116AB, prExtend}, // Mn TAKRI SIGN ANUSVARA + {0x116AC, 0x116AC, prSpacingMark}, // Mc TAKRI SIGN VISARGA + {0x116AD, 0x116AD, prExtend}, // Mn TAKRI VOWEL SIGN AA + {0x116AE, 0x116AF, prSpacingMark}, // Mc [2] TAKRI VOWEL SIGN I..TAKRI VOWEL SIGN II + {0x116B0, 0x116B5, prExtend}, // Mn [6] TAKRI VOWEL SIGN U..TAKRI VOWEL SIGN AU + {0x116B6, 0x116B6, prSpacingMark}, // Mc TAKRI SIGN VIRAMA + {0x116B7, 0x116B7, prExtend}, // Mn TAKRI SIGN NUKTA + {0x1171D, 0x1171F, prExtend}, // Mn [3] AHOM CONSONANT SIGN MEDIAL LA..AHOM CONSONANT SIGN MEDIAL LIGATING RA + {0x11720, 0x11721, prSpacingMark}, // Mc [2] AHOM VOWEL SIGN A..AHOM VOWEL SIGN AA + {0x11722, 0x11725, prExtend}, // Mn [4] AHOM VOWEL SIGN I..AHOM VOWEL SIGN UU + {0x11726, 0x11726, prSpacingMark}, // Mc AHOM VOWEL SIGN E + {0x11727, 0x1172B, prExtend}, // Mn [5] AHOM VOWEL SIGN AW..AHOM SIGN KILLER + {0x1182C, 0x1182E, prSpacingMark}, // Mc [3] DOGRA VOWEL SIGN AA..DOGRA VOWEL SIGN II + {0x1182F, 0x11837, prExtend}, // Mn [9] DOGRA VOWEL SIGN U..DOGRA SIGN ANUSVARA + {0x11838, 0x11838, prSpacingMark}, // Mc DOGRA SIGN VISARGA + {0x11839, 0x1183A, prExtend}, // Mn [2] DOGRA SIGN VIRAMA..DOGRA SIGN NUKTA + {0x119D1, 0x119D3, prSpacingMark}, // Mc [3] NANDINAGARI VOWEL SIGN AA..NANDINAGARI VOWEL SIGN II + {0x119D4, 0x119D7, prExtend}, // Mn [4] NANDINAGARI VOWEL SIGN U..NANDINAGARI VOWEL SIGN VOCALIC RR + {0x119DA, 0x119DB, prExtend}, // Mn [2] NANDINAGARI VOWEL SIGN E..NANDINAGARI VOWEL SIGN AI + {0x119DC, 0x119DF, prSpacingMark}, // Mc [4] NANDINAGARI VOWEL SIGN O..NANDINAGARI SIGN VISARGA + {0x119E0, 0x119E0, prExtend}, // Mn NANDINAGARI SIGN VIRAMA + {0x119E4, 0x119E4, prSpacingMark}, // Mc NANDINAGARI VOWEL SIGN PRISHTHAMATRA E + {0x11A01, 0x11A0A, prExtend}, // Mn [10] ZANABAZAR SQUARE VOWEL SIGN I..ZANABAZAR SQUARE VOWEL LENGTH MARK + {0x11A33, 0x11A38, prExtend}, // Mn [6] ZANABAZAR SQUARE FINAL CONSONANT MARK..ZANABAZAR SQUARE SIGN ANUSVARA + {0x11A39, 0x11A39, prSpacingMark}, // Mc ZANABAZAR SQUARE SIGN VISARGA + {0x11A3A, 0x11A3A, prPreprend}, // Lo ZANABAZAR SQUARE CLUSTER-INITIAL LETTER RA + {0x11A3B, 0x11A3E, prExtend}, // Mn [4] ZANABAZAR SQUARE CLUSTER-FINAL LETTER YA..ZANABAZAR SQUARE CLUSTER-FINAL LETTER VA + {0x11A47, 0x11A47, prExtend}, // Mn ZANABAZAR SQUARE SUBJOINER + {0x11A51, 0x11A56, prExtend}, // Mn [6] SOYOMBO VOWEL SIGN I..SOYOMBO VOWEL SIGN OE + {0x11A57, 0x11A58, prSpacingMark}, // Mc [2] SOYOMBO VOWEL SIGN AI..SOYOMBO VOWEL SIGN AU + {0x11A59, 0x11A5B, prExtend}, // Mn [3] SOYOMBO VOWEL SIGN VOCALIC R..SOYOMBO VOWEL LENGTH MARK + {0x11A84, 0x11A89, prPreprend}, // Lo [6] SOYOMBO SIGN JIHVAMULIYA..SOYOMBO CLUSTER-INITIAL LETTER SA + {0x11A8A, 0x11A96, prExtend}, // Mn [13] SOYOMBO FINAL CONSONANT SIGN G..SOYOMBO SIGN ANUSVARA + {0x11A97, 0x11A97, prSpacingMark}, // Mc SOYOMBO SIGN VISARGA + {0x11A98, 0x11A99, prExtend}, // Mn [2] SOYOMBO GEMINATION MARK..SOYOMBO SUBJOINER + {0x11C2F, 0x11C2F, prSpacingMark}, // Mc BHAIKSUKI VOWEL SIGN AA + {0x11C30, 0x11C36, prExtend}, // Mn [7] BHAIKSUKI VOWEL SIGN I..BHAIKSUKI VOWEL SIGN VOCALIC L + {0x11C38, 0x11C3D, prExtend}, // Mn [6] BHAIKSUKI VOWEL SIGN E..BHAIKSUKI SIGN ANUSVARA + {0x11C3E, 0x11C3E, prSpacingMark}, // Mc BHAIKSUKI SIGN VISARGA + {0x11C3F, 0x11C3F, prExtend}, // Mn BHAIKSUKI SIGN VIRAMA + {0x11C92, 0x11CA7, prExtend}, // Mn [22] MARCHEN SUBJOINED LETTER KA..MARCHEN SUBJOINED LETTER ZA + {0x11CA9, 0x11CA9, prSpacingMark}, // Mc MARCHEN SUBJOINED LETTER YA + {0x11CAA, 0x11CB0, prExtend}, // Mn [7] MARCHEN SUBJOINED LETTER RA..MARCHEN VOWEL SIGN AA + {0x11CB1, 0x11CB1, prSpacingMark}, // Mc MARCHEN VOWEL SIGN I + {0x11CB2, 0x11CB3, prExtend}, // Mn [2] MARCHEN VOWEL SIGN U..MARCHEN VOWEL SIGN E + {0x11CB4, 0x11CB4, prSpacingMark}, // Mc MARCHEN VOWEL SIGN O + {0x11CB5, 0x11CB6, prExtend}, // Mn [2] MARCHEN SIGN ANUSVARA..MARCHEN SIGN CANDRABINDU + {0x11D31, 0x11D36, prExtend}, // Mn [6] MASARAM GONDI VOWEL SIGN AA..MASARAM GONDI VOWEL SIGN VOCALIC R + {0x11D3A, 0x11D3A, prExtend}, // Mn MASARAM GONDI VOWEL SIGN E + {0x11D3C, 0x11D3D, prExtend}, // Mn [2] MASARAM GONDI VOWEL SIGN AI..MASARAM GONDI VOWEL SIGN O + {0x11D3F, 0x11D45, prExtend}, // Mn [7] MASARAM GONDI VOWEL SIGN AU..MASARAM GONDI VIRAMA + {0x11D46, 0x11D46, prPreprend}, // Lo MASARAM GONDI REPHA + {0x11D47, 0x11D47, prExtend}, // Mn MASARAM GONDI RA-KARA + {0x11D8A, 0x11D8E, prSpacingMark}, // Mc [5] GUNJALA GONDI VOWEL SIGN AA..GUNJALA GONDI VOWEL SIGN UU + {0x11D90, 0x11D91, prExtend}, // Mn [2] GUNJALA GONDI VOWEL SIGN EE..GUNJALA GONDI VOWEL SIGN AI + {0x11D93, 0x11D94, prSpacingMark}, // Mc [2] GUNJALA GONDI VOWEL SIGN OO..GUNJALA GONDI VOWEL SIGN AU + {0x11D95, 0x11D95, prExtend}, // Mn GUNJALA GONDI SIGN ANUSVARA + {0x11D96, 0x11D96, prSpacingMark}, // Mc GUNJALA GONDI SIGN VISARGA + {0x11D97, 0x11D97, prExtend}, // Mn GUNJALA GONDI VIRAMA + {0x11EF3, 0x11EF4, prExtend}, // Mn [2] MAKASAR VOWEL SIGN I..MAKASAR VOWEL SIGN U + {0x11EF5, 0x11EF6, prSpacingMark}, // Mc [2] MAKASAR VOWEL SIGN E..MAKASAR VOWEL SIGN O + {0x13430, 0x13438, prControl}, // Cf [9] EGYPTIAN HIEROGLYPH VERTICAL JOINER..EGYPTIAN HIEROGLYPH END SEGMENT + {0x16AF0, 0x16AF4, prExtend}, // Mn [5] BASSA VAH COMBINING HIGH TONE..BASSA VAH COMBINING HIGH-LOW TONE + {0x16B30, 0x16B36, prExtend}, // Mn [7] PAHAWH HMONG MARK CIM TUB..PAHAWH HMONG MARK CIM TAUM + {0x16F4F, 0x16F4F, prExtend}, // Mn MIAO SIGN CONSONANT MODIFIER BAR + {0x16F51, 0x16F87, prSpacingMark}, // Mc [55] MIAO SIGN ASPIRATION..MIAO VOWEL SIGN UI + {0x16F8F, 0x16F92, prExtend}, // Mn [4] MIAO TONE RIGHT..MIAO TONE BELOW + {0x1BC9D, 0x1BC9E, prExtend}, // Mn [2] DUPLOYAN THICK LETTER SELECTOR..DUPLOYAN DOUBLE MARK + {0x1BCA0, 0x1BCA3, prControl}, // Cf [4] SHORTHAND FORMAT LETTER OVERLAP..SHORTHAND FORMAT UP STEP + {0x1D165, 0x1D165, prExtend}, // Mc MUSICAL SYMBOL COMBINING STEM + {0x1D166, 0x1D166, prSpacingMark}, // Mc MUSICAL SYMBOL COMBINING SPRECHGESANG STEM + {0x1D167, 0x1D169, prExtend}, // Mn [3] MUSICAL SYMBOL COMBINING TREMOLO-1..MUSICAL SYMBOL COMBINING TREMOLO-3 + {0x1D16D, 0x1D16D, prSpacingMark}, // Mc MUSICAL SYMBOL COMBINING AUGMENTATION DOT + {0x1D16E, 0x1D172, prExtend}, // Mc [5] MUSICAL SYMBOL COMBINING FLAG-1..MUSICAL SYMBOL COMBINING FLAG-5 + {0x1D173, 0x1D17A, prControl}, // Cf [8] MUSICAL SYMBOL BEGIN BEAM..MUSICAL SYMBOL END PHRASE + {0x1D17B, 0x1D182, prExtend}, // Mn [8] MUSICAL SYMBOL COMBINING ACCENT..MUSICAL SYMBOL COMBINING LOURE + {0x1D185, 0x1D18B, prExtend}, // Mn [7] MUSICAL SYMBOL COMBINING DOIT..MUSICAL SYMBOL COMBINING TRIPLE TONGUE + {0x1D1AA, 0x1D1AD, prExtend}, // Mn [4] MUSICAL SYMBOL COMBINING DOWN BOW..MUSICAL SYMBOL COMBINING SNAP PIZZICATO + {0x1D242, 0x1D244, prExtend}, // Mn [3] COMBINING GREEK MUSICAL TRISEME..COMBINING GREEK MUSICAL PENTASEME + {0x1DA00, 0x1DA36, prExtend}, // Mn [55] SIGNWRITING HEAD RIM..SIGNWRITING AIR SUCKING IN + {0x1DA3B, 0x1DA6C, prExtend}, // Mn [50] SIGNWRITING MOUTH CLOSED NEUTRAL..SIGNWRITING EXCITEMENT + {0x1DA75, 0x1DA75, prExtend}, // Mn SIGNWRITING UPPER BODY TILTING FROM HIP JOINTS + {0x1DA84, 0x1DA84, prExtend}, // Mn SIGNWRITING LOCATION HEAD NECK + {0x1DA9B, 0x1DA9F, prExtend}, // Mn [5] SIGNWRITING FILL MODIFIER-2..SIGNWRITING FILL MODIFIER-6 + {0x1DAA1, 0x1DAAF, prExtend}, // Mn [15] SIGNWRITING ROTATION MODIFIER-2..SIGNWRITING ROTATION MODIFIER-16 + {0x1E000, 0x1E006, prExtend}, // Mn [7] COMBINING GLAGOLITIC LETTER AZU..COMBINING GLAGOLITIC LETTER ZHIVETE + {0x1E008, 0x1E018, prExtend}, // Mn [17] COMBINING GLAGOLITIC LETTER ZEMLJA..COMBINING GLAGOLITIC LETTER HERU + {0x1E01B, 0x1E021, prExtend}, // Mn [7] COMBINING GLAGOLITIC LETTER SHTA..COMBINING GLAGOLITIC LETTER YATI + {0x1E023, 0x1E024, prExtend}, // Mn [2] COMBINING GLAGOLITIC LETTER YU..COMBINING GLAGOLITIC LETTER SMALL YUS + {0x1E026, 0x1E02A, prExtend}, // Mn [5] COMBINING GLAGOLITIC LETTER YO..COMBINING GLAGOLITIC LETTER FITA + {0x1E130, 0x1E136, prExtend}, // Mn [7] NYIAKENG PUACHUE HMONG TONE-B..NYIAKENG PUACHUE HMONG TONE-D + {0x1E2EC, 0x1E2EF, prExtend}, // Mn [4] WANCHO TONE TUP..WANCHO TONE KOINI + {0x1E8D0, 0x1E8D6, prExtend}, // Mn [7] MENDE KIKAKUI COMBINING NUMBER TEENS..MENDE KIKAKUI COMBINING NUMBER MILLIONS + {0x1E944, 0x1E94A, prExtend}, // Mn [7] ADLAM ALIF LENGTHENER..ADLAM NUKTA + {0x1F000, 0x1F02B, prExtendedPictographic}, // 5.1 [44] (🀀..🀫) MAHJONG TILE EAST WIND..MAHJONG TILE BACK + {0x1F02C, 0x1F02F, prExtendedPictographic}, // NA [4] (🀬..🀯) .. + {0x1F030, 0x1F093, prExtendedPictographic}, // 5.1[100] (🀰..🂓) DOMINO TILE HORIZONTAL BACK..DOMINO TILE VERTICAL-06-06 + {0x1F094, 0x1F09F, prExtendedPictographic}, // NA [12] (🂔..🂟) .. + {0x1F0A0, 0x1F0AE, prExtendedPictographic}, // 6.0 [15] (🂠..🂮) PLAYING CARD BACK..PLAYING CARD KING OF SPADES + {0x1F0AF, 0x1F0B0, prExtendedPictographic}, // NA [2] (🂯..🂰) .. + {0x1F0B1, 0x1F0BE, prExtendedPictographic}, // 6.0 [14] (🂱..🂾) PLAYING CARD ACE OF HEARTS..PLAYING CARD KING OF HEARTS + {0x1F0BF, 0x1F0BF, prExtendedPictographic}, // 7.0 [1] (🂿) PLAYING CARD RED JOKER + {0x1F0C0, 0x1F0C0, prExtendedPictographic}, // NA [1] (🃀) + {0x1F0C1, 0x1F0CF, prExtendedPictographic}, // 6.0 [15] (🃁..🃏) PLAYING CARD ACE OF DIAMONDS..joker + {0x1F0D0, 0x1F0D0, prExtendedPictographic}, // NA [1] (🃐) + {0x1F0D1, 0x1F0DF, prExtendedPictographic}, // 6.0 [15] (🃑..🃟) PLAYING CARD ACE OF CLUBS..PLAYING CARD WHITE JOKER + {0x1F0E0, 0x1F0F5, prExtendedPictographic}, // 7.0 [22] (🃠..🃵) PLAYING CARD FOOL..PLAYING CARD TRUMP-21 + {0x1F0F6, 0x1F0FF, prExtendedPictographic}, // NA [10] (🃶..🃿) .. + {0x1F10D, 0x1F10F, prExtendedPictographic}, // NA [3] (🄍..🄏) .. + {0x1F12F, 0x1F12F, prExtendedPictographic}, // 11.0 [1] (🄯) COPYLEFT SYMBOL + {0x1F16C, 0x1F16C, prExtendedPictographic}, // 12.0 [1] (🅬) RAISED MR SIGN + {0x1F16D, 0x1F16F, prExtendedPictographic}, // NA [3] (🅭..🅯) .. + {0x1F170, 0x1F171, prExtendedPictographic}, // 6.0 [2] (🅰️..🅱️) A button (blood type)..B button (blood type) + {0x1F17E, 0x1F17E, prExtendedPictographic}, // 6.0 [1] (🅾️) O button (blood type) + {0x1F17F, 0x1F17F, prExtendedPictographic}, // 5.2 [1] (🅿️) P button + {0x1F18E, 0x1F18E, prExtendedPictographic}, // 6.0 [1] (🆎) AB button (blood type) + {0x1F191, 0x1F19A, prExtendedPictographic}, // 6.0 [10] (🆑..🆚) CL button..VS button + {0x1F1AD, 0x1F1E5, prExtendedPictographic}, // NA [57] (🆭..🇥) .. + {0x1F1E6, 0x1F1FF, prRegionalIndicator}, // So [26] REGIONAL INDICATOR SYMBOL LETTER A..REGIONAL INDICATOR SYMBOL LETTER Z + {0x1F201, 0x1F202, prExtendedPictographic}, // 6.0 [2] (🈁..🈂️) Japanese “here” button..Japanese “service charge” button + {0x1F203, 0x1F20F, prExtendedPictographic}, // NA [13] (🈃..🈏) .. + {0x1F21A, 0x1F21A, prExtendedPictographic}, // 5.2 [1] (🈚) Japanese “free of charge” button + {0x1F22F, 0x1F22F, prExtendedPictographic}, // 5.2 [1] (🈯) Japanese “reserved” button + {0x1F232, 0x1F23A, prExtendedPictographic}, // 6.0 [9] (🈲..🈺) Japanese “prohibited” button..Japanese “open for business” button + {0x1F23C, 0x1F23F, prExtendedPictographic}, // NA [4] (🈼..🈿) .. + {0x1F249, 0x1F24F, prExtendedPictographic}, // NA [7] (🉉..🉏) .. + {0x1F250, 0x1F251, prExtendedPictographic}, // 6.0 [2] (🉐..🉑) Japanese “bargain” button..Japanese “acceptable” button + {0x1F252, 0x1F25F, prExtendedPictographic}, // NA [14] (🉒..🉟) .. + {0x1F260, 0x1F265, prExtendedPictographic}, // 10.0 [6] (🉠..🉥) ROUNDED SYMBOL FOR FU..ROUNDED SYMBOL FOR CAI + {0x1F266, 0x1F2FF, prExtendedPictographic}, // NA[154] (🉦..🋿) .. + {0x1F300, 0x1F320, prExtendedPictographic}, // 6.0 [33] (🌀..🌠) cyclone..shooting star + {0x1F321, 0x1F32C, prExtendedPictographic}, // 7.0 [12] (🌡️..🌬️) thermometer..wind face + {0x1F32D, 0x1F32F, prExtendedPictographic}, // 8.0 [3] (🌭..🌯) hot dog..burrito + {0x1F330, 0x1F335, prExtendedPictographic}, // 6.0 [6] (🌰..🌵) chestnut..cactus + {0x1F336, 0x1F336, prExtendedPictographic}, // 7.0 [1] (🌶️) hot pepper + {0x1F337, 0x1F37C, prExtendedPictographic}, // 6.0 [70] (🌷..🍼) tulip..baby bottle + {0x1F37D, 0x1F37D, prExtendedPictographic}, // 7.0 [1] (🍽️) fork and knife with plate + {0x1F37E, 0x1F37F, prExtendedPictographic}, // 8.0 [2] (🍾..🍿) bottle with popping cork..popcorn + {0x1F380, 0x1F393, prExtendedPictographic}, // 6.0 [20] (🎀..🎓) ribbon..graduation cap + {0x1F394, 0x1F39F, prExtendedPictographic}, // 7.0 [12] (🎔..🎟️) HEART WITH TIP ON THE LEFT..admission tickets + {0x1F3A0, 0x1F3C4, prExtendedPictographic}, // 6.0 [37] (🎠..🏄) carousel horse..person surfing + {0x1F3C5, 0x1F3C5, prExtendedPictographic}, // 7.0 [1] (🏅) sports medal + {0x1F3C6, 0x1F3CA, prExtendedPictographic}, // 6.0 [5] (🏆..🏊) trophy..person swimming + {0x1F3CB, 0x1F3CE, prExtendedPictographic}, // 7.0 [4] (🏋️..🏎️) person lifting weights..racing car + {0x1F3CF, 0x1F3D3, prExtendedPictographic}, // 8.0 [5] (🏏..🏓) cricket game..ping pong + {0x1F3D4, 0x1F3DF, prExtendedPictographic}, // 7.0 [12] (🏔️..🏟️) snow-capped mountain..stadium + {0x1F3E0, 0x1F3F0, prExtendedPictographic}, // 6.0 [17] (🏠..🏰) house..castle + {0x1F3F1, 0x1F3F7, prExtendedPictographic}, // 7.0 [7] (🏱..🏷️) WHITE PENNANT..label + {0x1F3F8, 0x1F3FA, prExtendedPictographic}, // 8.0 [3] (🏸..🏺) badminton..amphora + {0x1F3FB, 0x1F3FF, prExtend}, // Sk [5] EMOJI MODIFIER FITZPATRICK TYPE-1-2..EMOJI MODIFIER FITZPATRICK TYPE-6 + {0x1F400, 0x1F43E, prExtendedPictographic}, // 6.0 [63] (🐀..🐾) rat..paw prints + {0x1F43F, 0x1F43F, prExtendedPictographic}, // 7.0 [1] (🐿️) chipmunk + {0x1F440, 0x1F440, prExtendedPictographic}, // 6.0 [1] (👀) eyes + {0x1F441, 0x1F441, prExtendedPictographic}, // 7.0 [1] (👁️) eye + {0x1F442, 0x1F4F7, prExtendedPictographic}, // 6.0[182] (👂..📷) ear..camera + {0x1F4F8, 0x1F4F8, prExtendedPictographic}, // 7.0 [1] (📸) camera with flash + {0x1F4F9, 0x1F4FC, prExtendedPictographic}, // 6.0 [4] (📹..📼) video camera..videocassette + {0x1F4FD, 0x1F4FE, prExtendedPictographic}, // 7.0 [2] (📽️..📾) film projector..PORTABLE STEREO + {0x1F4FF, 0x1F4FF, prExtendedPictographic}, // 8.0 [1] (📿) prayer beads + {0x1F500, 0x1F53D, prExtendedPictographic}, // 6.0 [62] (🔀..🔽) shuffle tracks button..downwards button + {0x1F546, 0x1F54A, prExtendedPictographic}, // 7.0 [5] (🕆..🕊️) WHITE LATIN CROSS..dove + {0x1F54B, 0x1F54F, prExtendedPictographic}, // 8.0 [5] (🕋..🕏) kaaba..BOWL OF HYGIEIA + {0x1F550, 0x1F567, prExtendedPictographic}, // 6.0 [24] (🕐..🕧) one o’clock..twelve-thirty + {0x1F568, 0x1F579, prExtendedPictographic}, // 7.0 [18] (🕨..🕹️) RIGHT SPEAKER..joystick + {0x1F57A, 0x1F57A, prExtendedPictographic}, // 9.0 [1] (🕺) man dancing + {0x1F57B, 0x1F5A3, prExtendedPictographic}, // 7.0 [41] (🕻..🖣) LEFT HAND TELEPHONE RECEIVER..BLACK DOWN POINTING BACKHAND INDEX + {0x1F5A4, 0x1F5A4, prExtendedPictographic}, // 9.0 [1] (🖤) black heart + {0x1F5A5, 0x1F5FA, prExtendedPictographic}, // 7.0 [86] (🖥️..🗺️) desktop computer..world map + {0x1F5FB, 0x1F5FF, prExtendedPictographic}, // 6.0 [5] (🗻..🗿) mount fuji..moai + {0x1F600, 0x1F600, prExtendedPictographic}, // 6.1 [1] (😀) grinning face + {0x1F601, 0x1F610, prExtendedPictographic}, // 6.0 [16] (😁..😐) beaming face with smiling eyes..neutral face + {0x1F611, 0x1F611, prExtendedPictographic}, // 6.1 [1] (😑) expressionless face + {0x1F612, 0x1F614, prExtendedPictographic}, // 6.0 [3] (😒..😔) unamused face..pensive face + {0x1F615, 0x1F615, prExtendedPictographic}, // 6.1 [1] (😕) confused face + {0x1F616, 0x1F616, prExtendedPictographic}, // 6.0 [1] (😖) confounded face + {0x1F617, 0x1F617, prExtendedPictographic}, // 6.1 [1] (😗) kissing face + {0x1F618, 0x1F618, prExtendedPictographic}, // 6.0 [1] (😘) face blowing a kiss + {0x1F619, 0x1F619, prExtendedPictographic}, // 6.1 [1] (😙) kissing face with smiling eyes + {0x1F61A, 0x1F61A, prExtendedPictographic}, // 6.0 [1] (😚) kissing face with closed eyes + {0x1F61B, 0x1F61B, prExtendedPictographic}, // 6.1 [1] (😛) face with tongue + {0x1F61C, 0x1F61E, prExtendedPictographic}, // 6.0 [3] (😜..😞) winking face with tongue..disappointed face + {0x1F61F, 0x1F61F, prExtendedPictographic}, // 6.1 [1] (😟) worried face + {0x1F620, 0x1F625, prExtendedPictographic}, // 6.0 [6] (😠..😥) angry face..sad but relieved face + {0x1F626, 0x1F627, prExtendedPictographic}, // 6.1 [2] (😦..😧) frowning face with open mouth..anguished face + {0x1F628, 0x1F62B, prExtendedPictographic}, // 6.0 [4] (😨..😫) fearful face..tired face + {0x1F62C, 0x1F62C, prExtendedPictographic}, // 6.1 [1] (😬) grimacing face + {0x1F62D, 0x1F62D, prExtendedPictographic}, // 6.0 [1] (😭) loudly crying face + {0x1F62E, 0x1F62F, prExtendedPictographic}, // 6.1 [2] (😮..😯) face with open mouth..hushed face + {0x1F630, 0x1F633, prExtendedPictographic}, // 6.0 [4] (😰..😳) anxious face with sweat..flushed face + {0x1F634, 0x1F634, prExtendedPictographic}, // 6.1 [1] (😴) sleeping face + {0x1F635, 0x1F640, prExtendedPictographic}, // 6.0 [12] (😵..🙀) dizzy face..weary cat + {0x1F641, 0x1F642, prExtendedPictographic}, // 7.0 [2] (🙁..🙂) slightly frowning face..slightly smiling face + {0x1F643, 0x1F644, prExtendedPictographic}, // 8.0 [2] (🙃..🙄) upside-down face..face with rolling eyes + {0x1F645, 0x1F64F, prExtendedPictographic}, // 6.0 [11] (🙅..🙏) person gesturing NO..folded hands + {0x1F680, 0x1F6C5, prExtendedPictographic}, // 6.0 [70] (🚀..🛅) rocket..left luggage + {0x1F6C6, 0x1F6CF, prExtendedPictographic}, // 7.0 [10] (🛆..🛏️) TRIANGLE WITH ROUNDED CORNERS..bed + {0x1F6D0, 0x1F6D0, prExtendedPictographic}, // 8.0 [1] (🛐) place of worship + {0x1F6D1, 0x1F6D2, prExtendedPictographic}, // 9.0 [2] (🛑..🛒) stop sign..shopping cart + {0x1F6D3, 0x1F6D4, prExtendedPictographic}, // 10.0 [2] (🛓..🛔) STUPA..PAGODA + {0x1F6D5, 0x1F6D5, prExtendedPictographic}, // 12.0 [1] (🛕) hindu temple + {0x1F6D6, 0x1F6DF, prExtendedPictographic}, // NA [10] (🛖..🛟) .. + {0x1F6E0, 0x1F6EC, prExtendedPictographic}, // 7.0 [13] (🛠️..🛬) hammer and wrench..airplane arrival + {0x1F6ED, 0x1F6EF, prExtendedPictographic}, // NA [3] (🛭..🛯) .. + {0x1F6F0, 0x1F6F3, prExtendedPictographic}, // 7.0 [4] (🛰️..🛳️) satellite..passenger ship + {0x1F6F4, 0x1F6F6, prExtendedPictographic}, // 9.0 [3] (🛴..🛶) kick scooter..canoe + {0x1F6F7, 0x1F6F8, prExtendedPictographic}, // 10.0 [2] (🛷..🛸) sled..flying saucer + {0x1F6F9, 0x1F6F9, prExtendedPictographic}, // 11.0 [1] (🛹) skateboard + {0x1F6FA, 0x1F6FA, prExtendedPictographic}, // 12.0 [1] (🛺) auto rickshaw + {0x1F6FB, 0x1F6FF, prExtendedPictographic}, // NA [5] (🛻..🛿) .. + {0x1F774, 0x1F77F, prExtendedPictographic}, // NA [12] (🝴..🝿) .. + {0x1F7D5, 0x1F7D8, prExtendedPictographic}, // 11.0 [4] (🟕..🟘) CIRCLED TRIANGLE..NEGATIVE CIRCLED SQUARE + {0x1F7D9, 0x1F7DF, prExtendedPictographic}, // NA [7] (🟙..🟟) .. + {0x1F7E0, 0x1F7EB, prExtendedPictographic}, // 12.0 [12] (🟠..🟫) orange circle..brown square + {0x1F7EC, 0x1F7FF, prExtendedPictographic}, // NA [20] (🟬..🟿) .. + {0x1F80C, 0x1F80F, prExtendedPictographic}, // NA [4] (🠌..🠏) .. + {0x1F848, 0x1F84F, prExtendedPictographic}, // NA [8] (🡈..🡏) .. + {0x1F85A, 0x1F85F, prExtendedPictographic}, // NA [6] (🡚..🡟) .. + {0x1F888, 0x1F88F, prExtendedPictographic}, // NA [8] (🢈..🢏) .. + {0x1F8AE, 0x1F8FF, prExtendedPictographic}, // NA [82] (🢮..🣿) .. + {0x1F90C, 0x1F90C, prExtendedPictographic}, // NA [1] (🤌) + {0x1F90D, 0x1F90F, prExtendedPictographic}, // 12.0 [3] (🤍..🤏) white heart..pinching hand + {0x1F910, 0x1F918, prExtendedPictographic}, // 8.0 [9] (🤐..🤘) zipper-mouth face..sign of the horns + {0x1F919, 0x1F91E, prExtendedPictographic}, // 9.0 [6] (🤙..🤞) call me hand..crossed fingers + {0x1F91F, 0x1F91F, prExtendedPictographic}, // 10.0 [1] (🤟) love-you gesture + {0x1F920, 0x1F927, prExtendedPictographic}, // 9.0 [8] (🤠..🤧) cowboy hat face..sneezing face + {0x1F928, 0x1F92F, prExtendedPictographic}, // 10.0 [8] (🤨..🤯) face with raised eyebrow..exploding head + {0x1F930, 0x1F930, prExtendedPictographic}, // 9.0 [1] (🤰) pregnant woman + {0x1F931, 0x1F932, prExtendedPictographic}, // 10.0 [2] (🤱..🤲) breast-feeding..palms up together + {0x1F933, 0x1F93A, prExtendedPictographic}, // 9.0 [8] (🤳..🤺) selfie..person fencing + {0x1F93C, 0x1F93E, prExtendedPictographic}, // 9.0 [3] (🤼..🤾) people wrestling..person playing handball + {0x1F93F, 0x1F93F, prExtendedPictographic}, // 12.0 [1] (🤿) diving mask + {0x1F940, 0x1F945, prExtendedPictographic}, // 9.0 [6] (🥀..🥅) wilted flower..goal net + {0x1F947, 0x1F94B, prExtendedPictographic}, // 9.0 [5] (🥇..🥋) 1st place medal..martial arts uniform + {0x1F94C, 0x1F94C, prExtendedPictographic}, // 10.0 [1] (🥌) curling stone + {0x1F94D, 0x1F94F, prExtendedPictographic}, // 11.0 [3] (🥍..🥏) lacrosse..flying disc + {0x1F950, 0x1F95E, prExtendedPictographic}, // 9.0 [15] (🥐..🥞) croissant..pancakes + {0x1F95F, 0x1F96B, prExtendedPictographic}, // 10.0 [13] (🥟..🥫) dumpling..canned food + {0x1F96C, 0x1F970, prExtendedPictographic}, // 11.0 [5] (🥬..🥰) leafy green..smiling face with hearts + {0x1F971, 0x1F971, prExtendedPictographic}, // 12.0 [1] (🥱) yawning face + {0x1F972, 0x1F972, prExtendedPictographic}, // NA [1] (🥲) + {0x1F973, 0x1F976, prExtendedPictographic}, // 11.0 [4] (🥳..🥶) partying face..cold face + {0x1F977, 0x1F979, prExtendedPictographic}, // NA [3] (🥷..🥹) .. + {0x1F97A, 0x1F97A, prExtendedPictographic}, // 11.0 [1] (🥺) pleading face + {0x1F97B, 0x1F97B, prExtendedPictographic}, // 12.0 [1] (🥻) sari + {0x1F97C, 0x1F97F, prExtendedPictographic}, // 11.0 [4] (🥼..🥿) lab coat..flat shoe + {0x1F980, 0x1F984, prExtendedPictographic}, // 8.0 [5] (🦀..🦄) crab..unicorn + {0x1F985, 0x1F991, prExtendedPictographic}, // 9.0 [13] (🦅..🦑) eagle..squid + {0x1F992, 0x1F997, prExtendedPictographic}, // 10.0 [6] (🦒..🦗) giraffe..cricket + {0x1F998, 0x1F9A2, prExtendedPictographic}, // 11.0 [11] (🦘..🦢) kangaroo..swan + {0x1F9A3, 0x1F9A4, prExtendedPictographic}, // NA [2] (🦣..🦤) .. + {0x1F9A5, 0x1F9AA, prExtendedPictographic}, // 12.0 [6] (🦥..🦪) sloth..oyster + {0x1F9AB, 0x1F9AD, prExtendedPictographic}, // NA [3] (🦫..🦭) .. + {0x1F9AE, 0x1F9AF, prExtendedPictographic}, // 12.0 [2] (🦮..🦯) guide dog..probing cane + {0x1F9B0, 0x1F9B9, prExtendedPictographic}, // 11.0 [10] (🦰..🦹) red hair..supervillain + {0x1F9BA, 0x1F9BF, prExtendedPictographic}, // 12.0 [6] (🦺..🦿) safety vest..mechanical leg + {0x1F9C0, 0x1F9C0, prExtendedPictographic}, // 8.0 [1] (🧀) cheese wedge + {0x1F9C1, 0x1F9C2, prExtendedPictographic}, // 11.0 [2] (🧁..🧂) cupcake..salt + {0x1F9C3, 0x1F9CA, prExtendedPictographic}, // 12.0 [8] (🧃..🧊) beverage box..ice cube + {0x1F9CB, 0x1F9CC, prExtendedPictographic}, // NA [2] (🧋..🧌) .. + {0x1F9CD, 0x1F9CF, prExtendedPictographic}, // 12.0 [3] (🧍..🧏) person standing..deaf person + {0x1F9D0, 0x1F9E6, prExtendedPictographic}, // 10.0 [23] (🧐..🧦) face with monocle..socks + {0x1F9E7, 0x1F9FF, prExtendedPictographic}, // 11.0 [25] (🧧..🧿) red envelope..nazar amulet + {0x1FA00, 0x1FA53, prExtendedPictographic}, // 12.0 [84] (🨀..🩓) NEUTRAL CHESS KING..BLACK CHESS KNIGHT-BISHOP + {0x1FA54, 0x1FA5F, prExtendedPictographic}, // NA [12] (🩔..🩟) .. + {0x1FA60, 0x1FA6D, prExtendedPictographic}, // 11.0 [14] (🩠..🩭) XIANGQI RED GENERAL..XIANGQI BLACK SOLDIER + {0x1FA6E, 0x1FA6F, prExtendedPictographic}, // NA [2] (🩮..🩯) .. + {0x1FA70, 0x1FA73, prExtendedPictographic}, // 12.0 [4] (🩰..🩳) ballet shoes..shorts + {0x1FA74, 0x1FA77, prExtendedPictographic}, // NA [4] (🩴..🩷) .. + {0x1FA78, 0x1FA7A, prExtendedPictographic}, // 12.0 [3] (🩸..🩺) drop of blood..stethoscope + {0x1FA7B, 0x1FA7F, prExtendedPictographic}, // NA [5] (🩻..🩿) .. + {0x1FA80, 0x1FA82, prExtendedPictographic}, // 12.0 [3] (🪀..🪂) yo-yo..parachute + {0x1FA83, 0x1FA8F, prExtendedPictographic}, // NA [13] (🪃..🪏) .. + {0x1FA90, 0x1FA95, prExtendedPictographic}, // 12.0 [6] (🪐..🪕) ringed planet..banjo + {0x1FA96, 0x1FFFD, prExtendedPictographic}, // NA[1384] (🪖..🿽) .. + {0xE0000, 0xE0000, prControl}, // Cn + {0xE0001, 0xE0001, prControl}, // Cf LANGUAGE TAG + {0xE0002, 0xE001F, prControl}, // Cn [30] .. + {0xE0020, 0xE007F, prExtend}, // Cf [96] TAG SPACE..CANCEL TAG + {0xE0080, 0xE00FF, prControl}, // Cn [128] .. + {0xE0100, 0xE01EF, prExtend}, // Mn [240] VARIATION SELECTOR-17..VARIATION SELECTOR-256 + {0xE01F0, 0xE0FFF, prControl}, // Cn [3600] .. +} + +// property returns the Unicode property value (see constants above) of the +// given code point. +func property(r rune) int { + // Run a binary search. + from := 0 + to := len(codePoints) + for to > from { + middle := (from + to) / 2 + cpRange := codePoints[middle] + if int(r) < cpRange[0] { + to = middle + continue + } + if int(r) > cpRange[1] { + from = middle + 1 + continue + } + return cpRange[2] + } + return prAny +} diff --git a/vendor/github.com/rogpeppe/go-internal/LICENSE b/vendor/github.com/rogpeppe/go-internal/LICENSE new file mode 100644 index 0000000..49ea0f9 --- /dev/null +++ b/vendor/github.com/rogpeppe/go-internal/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2018 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/rogpeppe/go-internal/internal/syscall/windows/mksyscall.go b/vendor/github.com/rogpeppe/go-internal/internal/syscall/windows/mksyscall.go new file mode 100644 index 0000000..a8edafb --- /dev/null +++ b/vendor/github.com/rogpeppe/go-internal/internal/syscall/windows/mksyscall.go @@ -0,0 +1,7 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package windows + +//go:generate go run $GOROOT/src/syscall/mksyscall_windows.go -output zsyscall_windows.go syscall_windows.go security_windows.go psapi_windows.go symlink_windows.go diff --git a/vendor/github.com/rogpeppe/go-internal/internal/syscall/windows/psapi_windows.go b/vendor/github.com/rogpeppe/go-internal/internal/syscall/windows/psapi_windows.go new file mode 100644 index 0000000..b138e65 --- /dev/null +++ b/vendor/github.com/rogpeppe/go-internal/internal/syscall/windows/psapi_windows.go @@ -0,0 +1,20 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package windows + +type PROCESS_MEMORY_COUNTERS struct { + CB uint32 + PageFaultCount uint32 + PeakWorkingSetSize uintptr + WorkingSetSize uintptr + QuotaPeakPagedPoolUsage uintptr + QuotaPagedPoolUsage uintptr + QuotaPeakNonPagedPoolUsage uintptr + QuotaNonPagedPoolUsage uintptr + PagefileUsage uintptr + PeakPagefileUsage uintptr +} + +//sys GetProcessMemoryInfo(handle syscall.Handle, memCounters *PROCESS_MEMORY_COUNTERS, cb uint32) (err error) = psapi.GetProcessMemoryInfo diff --git a/vendor/github.com/rogpeppe/go-internal/internal/syscall/windows/reparse_windows.go b/vendor/github.com/rogpeppe/go-internal/internal/syscall/windows/reparse_windows.go new file mode 100644 index 0000000..7c6ad8f --- /dev/null +++ b/vendor/github.com/rogpeppe/go-internal/internal/syscall/windows/reparse_windows.go @@ -0,0 +1,64 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package windows + +const ( + FSCTL_SET_REPARSE_POINT = 0x000900A4 + IO_REPARSE_TAG_MOUNT_POINT = 0xA0000003 + + SYMLINK_FLAG_RELATIVE = 1 +) + +// These structures are described +// in https://msdn.microsoft.com/en-us/library/cc232007.aspx +// and https://msdn.microsoft.com/en-us/library/cc232006.aspx. + +// REPARSE_DATA_BUFFER_HEADER is a common part of REPARSE_DATA_BUFFER structure. +type REPARSE_DATA_BUFFER_HEADER struct { + ReparseTag uint32 + // The size, in bytes, of the reparse data that follows + // the common portion of the REPARSE_DATA_BUFFER element. + // This value is the length of the data starting at the + // SubstituteNameOffset field. + ReparseDataLength uint16 + Reserved uint16 +} + +type SymbolicLinkReparseBuffer struct { + // The integer that contains the offset, in bytes, + // of the substitute name string in the PathBuffer array, + // computed as an offset from byte 0 of PathBuffer. Note that + // this offset must be divided by 2 to get the array index. + SubstituteNameOffset uint16 + // The integer that contains the length, in bytes, of the + // substitute name string. If this string is null-terminated, + // SubstituteNameLength does not include the Unicode null character. + SubstituteNameLength uint16 + // PrintNameOffset is similar to SubstituteNameOffset. + PrintNameOffset uint16 + // PrintNameLength is similar to SubstituteNameLength. + PrintNameLength uint16 + // Flags specifies whether the substitute name is a full path name or + // a path name relative to the directory containing the symbolic link. + Flags uint32 + PathBuffer [1]uint16 +} + +type MountPointReparseBuffer struct { + // The integer that contains the offset, in bytes, + // of the substitute name string in the PathBuffer array, + // computed as an offset from byte 0 of PathBuffer. Note that + // this offset must be divided by 2 to get the array index. + SubstituteNameOffset uint16 + // The integer that contains the length, in bytes, of the + // substitute name string. If this string is null-terminated, + // SubstituteNameLength does not include the Unicode null character. + SubstituteNameLength uint16 + // PrintNameOffset is similar to SubstituteNameOffset. + PrintNameOffset uint16 + // PrintNameLength is similar to SubstituteNameLength. + PrintNameLength uint16 + PathBuffer [1]uint16 +} diff --git a/vendor/github.com/rogpeppe/go-internal/internal/syscall/windows/security_windows.go b/vendor/github.com/rogpeppe/go-internal/internal/syscall/windows/security_windows.go new file mode 100644 index 0000000..4a2dfc0 --- /dev/null +++ b/vendor/github.com/rogpeppe/go-internal/internal/syscall/windows/security_windows.go @@ -0,0 +1,128 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package windows + +import ( + "syscall" + "unsafe" +) + +const ( + SecurityAnonymous = 0 + SecurityIdentification = 1 + SecurityImpersonation = 2 + SecurityDelegation = 3 +) + +//sys ImpersonateSelf(impersonationlevel uint32) (err error) = advapi32.ImpersonateSelf +//sys RevertToSelf() (err error) = advapi32.RevertToSelf + +const ( + TOKEN_ADJUST_PRIVILEGES = 0x0020 + SE_PRIVILEGE_ENABLED = 0x00000002 +) + +type LUID struct { + LowPart uint32 + HighPart int32 +} + +type LUID_AND_ATTRIBUTES struct { + Luid LUID + Attributes uint32 +} + +type TOKEN_PRIVILEGES struct { + PrivilegeCount uint32 + Privileges [1]LUID_AND_ATTRIBUTES +} + +//sys OpenThreadToken(h syscall.Handle, access uint32, openasself bool, token *syscall.Token) (err error) = advapi32.OpenThreadToken +//sys LookupPrivilegeValue(systemname *uint16, name *uint16, luid *LUID) (err error) = advapi32.LookupPrivilegeValueW +//sys adjustTokenPrivileges(token syscall.Token, disableAllPrivileges bool, newstate *TOKEN_PRIVILEGES, buflen uint32, prevstate *TOKEN_PRIVILEGES, returnlen *uint32) (ret uint32, err error) [true] = advapi32.AdjustTokenPrivileges + +func AdjustTokenPrivileges(token syscall.Token, disableAllPrivileges bool, newstate *TOKEN_PRIVILEGES, buflen uint32, prevstate *TOKEN_PRIVILEGES, returnlen *uint32) error { + ret, err := adjustTokenPrivileges(token, disableAllPrivileges, newstate, buflen, prevstate, returnlen) + if ret == 0 { + // AdjustTokenPrivileges call failed + return err + } + // AdjustTokenPrivileges call succeeded + if err == syscall.EINVAL { + // GetLastError returned ERROR_SUCCESS + return nil + } + return err +} + +//sys DuplicateTokenEx(hExistingToken syscall.Token, dwDesiredAccess uint32, lpTokenAttributes *syscall.SecurityAttributes, impersonationLevel uint32, tokenType TokenType, phNewToken *syscall.Token) (err error) = advapi32.DuplicateTokenEx +//sys SetTokenInformation(tokenHandle syscall.Token, tokenInformationClass uint32, tokenInformation uintptr, tokenInformationLength uint32) (err error) = advapi32.SetTokenInformation + +type SID_AND_ATTRIBUTES struct { + Sid *syscall.SID + Attributes uint32 +} + +type TOKEN_MANDATORY_LABEL struct { + Label SID_AND_ATTRIBUTES +} + +func (tml *TOKEN_MANDATORY_LABEL) Size() uint32 { + return uint32(unsafe.Sizeof(TOKEN_MANDATORY_LABEL{})) + syscall.GetLengthSid(tml.Label.Sid) +} + +const SE_GROUP_INTEGRITY = 0x00000020 + +type TokenType uint32 + +const ( + TokenPrimary TokenType = 1 + TokenImpersonation TokenType = 2 +) + +//sys GetProfilesDirectory(dir *uint16, dirLen *uint32) (err error) = userenv.GetProfilesDirectoryW + +const ( + LG_INCLUDE_INDIRECT = 0x1 + MAX_PREFERRED_LENGTH = 0xFFFFFFFF +) + +type LocalGroupUserInfo0 struct { + Name *uint16 +} + +type UserInfo4 struct { + Name *uint16 + Password *uint16 + PasswordAge uint32 + Priv uint32 + HomeDir *uint16 + Comment *uint16 + Flags uint32 + ScriptPath *uint16 + AuthFlags uint32 + FullName *uint16 + UsrComment *uint16 + Parms *uint16 + Workstations *uint16 + LastLogon uint32 + LastLogoff uint32 + AcctExpires uint32 + MaxStorage uint32 + UnitsPerWeek uint32 + LogonHours *byte + BadPwCount uint32 + NumLogons uint32 + LogonServer *uint16 + CountryCode uint32 + CodePage uint32 + UserSid *syscall.SID + PrimaryGroupID uint32 + Profile *uint16 + HomeDirDrive *uint16 + PasswordExpired uint32 +} + +//sys NetUserGetLocalGroups(serverName *uint16, userName *uint16, level uint32, flags uint32, buf **byte, prefMaxLen uint32, entriesRead *uint32, totalEntries *uint32) (neterr error) = netapi32.NetUserGetLocalGroups diff --git a/vendor/github.com/rogpeppe/go-internal/internal/syscall/windows/symlink_windows.go b/vendor/github.com/rogpeppe/go-internal/internal/syscall/windows/symlink_windows.go new file mode 100644 index 0000000..b64d058 --- /dev/null +++ b/vendor/github.com/rogpeppe/go-internal/internal/syscall/windows/symlink_windows.go @@ -0,0 +1,39 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package windows + +import "syscall" + +const ( + ERROR_INVALID_PARAMETER syscall.Errno = 87 + + // symlink support for CreateSymbolicLink() starting with Windows 10 (1703, v10.0.14972) + SYMBOLIC_LINK_FLAG_ALLOW_UNPRIVILEGED_CREATE = 0x2 + + // FileInformationClass values + FileBasicInfo = 0 // FILE_BASIC_INFO + FileStandardInfo = 1 // FILE_STANDARD_INFO + FileNameInfo = 2 // FILE_NAME_INFO + FileStreamInfo = 7 // FILE_STREAM_INFO + FileCompressionInfo = 8 // FILE_COMPRESSION_INFO + FileAttributeTagInfo = 9 // FILE_ATTRIBUTE_TAG_INFO + FileIdBothDirectoryInfo = 0xa // FILE_ID_BOTH_DIR_INFO + FileIdBothDirectoryRestartInfo = 0xb // FILE_ID_BOTH_DIR_INFO + FileRemoteProtocolInfo = 0xd // FILE_REMOTE_PROTOCOL_INFO + FileFullDirectoryInfo = 0xe // FILE_FULL_DIR_INFO + FileFullDirectoryRestartInfo = 0xf // FILE_FULL_DIR_INFO + FileStorageInfo = 0x10 // FILE_STORAGE_INFO + FileAlignmentInfo = 0x11 // FILE_ALIGNMENT_INFO + FileIdInfo = 0x12 // FILE_ID_INFO + FileIdExtdDirectoryInfo = 0x13 // FILE_ID_EXTD_DIR_INFO + FileIdExtdDirectoryRestartInfo = 0x14 // FILE_ID_EXTD_DIR_INFO +) + +type FILE_ATTRIBUTE_TAG_INFO struct { + FileAttributes uint32 + ReparseTag uint32 +} + +//sys GetFileInformationByHandleEx(handle syscall.Handle, class uint32, info *byte, bufsize uint32) (err error) diff --git a/vendor/github.com/rogpeppe/go-internal/internal/syscall/windows/syscall_windows.go b/vendor/github.com/rogpeppe/go-internal/internal/syscall/windows/syscall_windows.go new file mode 100644 index 0000000..121132f --- /dev/null +++ b/vendor/github.com/rogpeppe/go-internal/internal/syscall/windows/syscall_windows.go @@ -0,0 +1,307 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package windows + +import ( + "sync" + "syscall" + "unsafe" +) + +const ( + ERROR_SHARING_VIOLATION syscall.Errno = 32 + ERROR_LOCK_VIOLATION syscall.Errno = 33 + ERROR_NOT_SUPPORTED syscall.Errno = 50 + ERROR_CALL_NOT_IMPLEMENTED syscall.Errno = 120 + ERROR_INVALID_NAME syscall.Errno = 123 + ERROR_LOCK_FAILED syscall.Errno = 167 + ERROR_NO_UNICODE_TRANSLATION syscall.Errno = 1113 +) + +const GAA_FLAG_INCLUDE_PREFIX = 0x00000010 + +const ( + IF_TYPE_OTHER = 1 + IF_TYPE_ETHERNET_CSMACD = 6 + IF_TYPE_ISO88025_TOKENRING = 9 + IF_TYPE_PPP = 23 + IF_TYPE_SOFTWARE_LOOPBACK = 24 + IF_TYPE_ATM = 37 + IF_TYPE_IEEE80211 = 71 + IF_TYPE_TUNNEL = 131 + IF_TYPE_IEEE1394 = 144 +) + +type SocketAddress struct { + Sockaddr *syscall.RawSockaddrAny + SockaddrLength int32 +} + +type IpAdapterUnicastAddress struct { + Length uint32 + Flags uint32 + Next *IpAdapterUnicastAddress + Address SocketAddress + PrefixOrigin int32 + SuffixOrigin int32 + DadState int32 + ValidLifetime uint32 + PreferredLifetime uint32 + LeaseLifetime uint32 + OnLinkPrefixLength uint8 +} + +type IpAdapterAnycastAddress struct { + Length uint32 + Flags uint32 + Next *IpAdapterAnycastAddress + Address SocketAddress +} + +type IpAdapterMulticastAddress struct { + Length uint32 + Flags uint32 + Next *IpAdapterMulticastAddress + Address SocketAddress +} + +type IpAdapterDnsServerAdapter struct { + Length uint32 + Reserved uint32 + Next *IpAdapterDnsServerAdapter + Address SocketAddress +} + +type IpAdapterPrefix struct { + Length uint32 + Flags uint32 + Next *IpAdapterPrefix + Address SocketAddress + PrefixLength uint32 +} + +type IpAdapterAddresses struct { + Length uint32 + IfIndex uint32 + Next *IpAdapterAddresses + AdapterName *byte + FirstUnicastAddress *IpAdapterUnicastAddress + FirstAnycastAddress *IpAdapterAnycastAddress + FirstMulticastAddress *IpAdapterMulticastAddress + FirstDnsServerAddress *IpAdapterDnsServerAdapter + DnsSuffix *uint16 + Description *uint16 + FriendlyName *uint16 + PhysicalAddress [syscall.MAX_ADAPTER_ADDRESS_LENGTH]byte + PhysicalAddressLength uint32 + Flags uint32 + Mtu uint32 + IfType uint32 + OperStatus uint32 + Ipv6IfIndex uint32 + ZoneIndices [16]uint32 + FirstPrefix *IpAdapterPrefix + /* more fields might be present here. */ +} + +const ( + IfOperStatusUp = 1 + IfOperStatusDown = 2 + IfOperStatusTesting = 3 + IfOperStatusUnknown = 4 + IfOperStatusDormant = 5 + IfOperStatusNotPresent = 6 + IfOperStatusLowerLayerDown = 7 +) + +//sys GetAdaptersAddresses(family uint32, flags uint32, reserved uintptr, adapterAddresses *IpAdapterAddresses, sizePointer *uint32) (errcode error) = iphlpapi.GetAdaptersAddresses +//sys GetComputerNameEx(nameformat uint32, buf *uint16, n *uint32) (err error) = GetComputerNameExW +//sys MoveFileEx(from *uint16, to *uint16, flags uint32) (err error) = MoveFileExW +//sys GetModuleFileName(module syscall.Handle, fn *uint16, len uint32) (n uint32, err error) = kernel32.GetModuleFileNameW + +const ( + WSA_FLAG_OVERLAPPED = 0x01 + WSA_FLAG_NO_HANDLE_INHERIT = 0x80 + + WSAEMSGSIZE syscall.Errno = 10040 + + MSG_PEEK = 0x2 + MSG_TRUNC = 0x0100 + MSG_CTRUNC = 0x0200 + + socket_error = uintptr(^uint32(0)) +) + +var WSAID_WSASENDMSG = syscall.GUID{ + Data1: 0xa441e712, + Data2: 0x754f, + Data3: 0x43ca, + Data4: [8]byte{0x84, 0xa7, 0x0d, 0xee, 0x44, 0xcf, 0x60, 0x6d}, +} + +var WSAID_WSARECVMSG = syscall.GUID{ + Data1: 0xf689d7c8, + Data2: 0x6f1f, + Data3: 0x436b, + Data4: [8]byte{0x8a, 0x53, 0xe5, 0x4f, 0xe3, 0x51, 0xc3, 0x22}, +} + +var sendRecvMsgFunc struct { + once sync.Once + sendAddr uintptr + recvAddr uintptr + err error +} + +type WSAMsg struct { + Name *syscall.RawSockaddrAny + Namelen int32 + Buffers *syscall.WSABuf + BufferCount uint32 + Control syscall.WSABuf + Flags uint32 +} + +//sys WSASocket(af int32, typ int32, protocol int32, protinfo *syscall.WSAProtocolInfo, group uint32, flags uint32) (handle syscall.Handle, err error) [failretval==syscall.InvalidHandle] = ws2_32.WSASocketW + +func loadWSASendRecvMsg() error { + sendRecvMsgFunc.once.Do(func() { + var s syscall.Handle + s, sendRecvMsgFunc.err = syscall.Socket(syscall.AF_INET, syscall.SOCK_DGRAM, syscall.IPPROTO_UDP) + if sendRecvMsgFunc.err != nil { + return + } + defer syscall.CloseHandle(s) + var n uint32 + sendRecvMsgFunc.err = syscall.WSAIoctl(s, + syscall.SIO_GET_EXTENSION_FUNCTION_POINTER, + (*byte)(unsafe.Pointer(&WSAID_WSARECVMSG)), + uint32(unsafe.Sizeof(WSAID_WSARECVMSG)), + (*byte)(unsafe.Pointer(&sendRecvMsgFunc.recvAddr)), + uint32(unsafe.Sizeof(sendRecvMsgFunc.recvAddr)), + &n, nil, 0) + if sendRecvMsgFunc.err != nil { + return + } + sendRecvMsgFunc.err = syscall.WSAIoctl(s, + syscall.SIO_GET_EXTENSION_FUNCTION_POINTER, + (*byte)(unsafe.Pointer(&WSAID_WSASENDMSG)), + uint32(unsafe.Sizeof(WSAID_WSASENDMSG)), + (*byte)(unsafe.Pointer(&sendRecvMsgFunc.sendAddr)), + uint32(unsafe.Sizeof(sendRecvMsgFunc.sendAddr)), + &n, nil, 0) + }) + return sendRecvMsgFunc.err +} + +func WSASendMsg(fd syscall.Handle, msg *WSAMsg, flags uint32, bytesSent *uint32, overlapped *syscall.Overlapped, croutine *byte) error { + err := loadWSASendRecvMsg() + if err != nil { + return err + } + r1, _, e1 := syscall.Syscall6(sendRecvMsgFunc.sendAddr, 6, uintptr(fd), uintptr(unsafe.Pointer(msg)), uintptr(flags), uintptr(unsafe.Pointer(bytesSent)), uintptr(unsafe.Pointer(overlapped)), uintptr(unsafe.Pointer(croutine))) + if r1 == socket_error { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return err +} + +func WSARecvMsg(fd syscall.Handle, msg *WSAMsg, bytesReceived *uint32, overlapped *syscall.Overlapped, croutine *byte) error { + err := loadWSASendRecvMsg() + if err != nil { + return err + } + r1, _, e1 := syscall.Syscall6(sendRecvMsgFunc.recvAddr, 5, uintptr(fd), uintptr(unsafe.Pointer(msg)), uintptr(unsafe.Pointer(bytesReceived)), uintptr(unsafe.Pointer(overlapped)), uintptr(unsafe.Pointer(croutine)), 0) + if r1 == socket_error { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return err +} + +const ( + ComputerNameNetBIOS = 0 + ComputerNameDnsHostname = 1 + ComputerNameDnsDomain = 2 + ComputerNameDnsFullyQualified = 3 + ComputerNamePhysicalNetBIOS = 4 + ComputerNamePhysicalDnsHostname = 5 + ComputerNamePhysicalDnsDomain = 6 + ComputerNamePhysicalDnsFullyQualified = 7 + ComputerNameMax = 8 + + MOVEFILE_REPLACE_EXISTING = 0x1 + MOVEFILE_COPY_ALLOWED = 0x2 + MOVEFILE_DELAY_UNTIL_REBOOT = 0x4 + MOVEFILE_WRITE_THROUGH = 0x8 + MOVEFILE_CREATE_HARDLINK = 0x10 + MOVEFILE_FAIL_IF_NOT_TRACKABLE = 0x20 +) + +func Rename(oldpath, newpath string) error { + from, err := syscall.UTF16PtrFromString(oldpath) + if err != nil { + return err + } + to, err := syscall.UTF16PtrFromString(newpath) + if err != nil { + return err + } + return MoveFileEx(from, to, MOVEFILE_REPLACE_EXISTING) +} + +//sys LockFileEx(file syscall.Handle, flags uint32, reserved uint32, bytesLow uint32, bytesHigh uint32, overlapped *syscall.Overlapped) (err error) = kernel32.LockFileEx +//sys UnlockFileEx(file syscall.Handle, reserved uint32, bytesLow uint32, bytesHigh uint32, overlapped *syscall.Overlapped) (err error) = kernel32.UnlockFileEx + +const ( + LOCKFILE_FAIL_IMMEDIATELY = 0x00000001 + LOCKFILE_EXCLUSIVE_LOCK = 0x00000002 +) + +const MB_ERR_INVALID_CHARS = 8 + +//sys GetACP() (acp uint32) = kernel32.GetACP +//sys GetConsoleCP() (ccp uint32) = kernel32.GetConsoleCP +//sys MultiByteToWideChar(codePage uint32, dwFlags uint32, str *byte, nstr int32, wchar *uint16, nwchar int32) (nwrite int32, err error) = kernel32.MultiByteToWideChar +//sys GetCurrentThread() (pseudoHandle syscall.Handle, err error) = kernel32.GetCurrentThread + +const STYPE_DISKTREE = 0x00 + +type SHARE_INFO_2 struct { + Netname *uint16 + Type uint32 + Remark *uint16 + Permissions uint32 + MaxUses uint32 + CurrentUses uint32 + Path *uint16 + Passwd *uint16 +} + +//sys NetShareAdd(serverName *uint16, level uint32, buf *byte, parmErr *uint16) (neterr error) = netapi32.NetShareAdd +//sys NetShareDel(serverName *uint16, netName *uint16, reserved uint32) (neterr error) = netapi32.NetShareDel + +const ( + FILE_NAME_NORMALIZED = 0x0 + FILE_NAME_OPENED = 0x8 + + VOLUME_NAME_DOS = 0x0 + VOLUME_NAME_GUID = 0x1 + VOLUME_NAME_NONE = 0x4 + VOLUME_NAME_NT = 0x2 +) + +//sys GetFinalPathNameByHandle(file syscall.Handle, filePath *uint16, filePathSize uint32, flags uint32) (n uint32, err error) = kernel32.GetFinalPathNameByHandleW + +func LoadGetFinalPathNameByHandle() error { + return procGetFinalPathNameByHandleW.Find() +} diff --git a/vendor/github.com/rogpeppe/go-internal/internal/syscall/windows/sysdll/sysdll.go b/vendor/github.com/rogpeppe/go-internal/internal/syscall/windows/sysdll/sysdll.go new file mode 100644 index 0000000..4e0018f --- /dev/null +++ b/vendor/github.com/rogpeppe/go-internal/internal/syscall/windows/sysdll/sysdll.go @@ -0,0 +1,28 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package sysdll is an internal leaf package that records and reports +// which Windows DLL names are used by Go itself. These DLLs are then +// only loaded from the System32 directory. See Issue 14959. +package sysdll + +// IsSystemDLL reports whether the named dll key (a base name, like +// "foo.dll") is a system DLL which should only be loaded from the +// Windows SYSTEM32 directory. +// +// Filenames are case sensitive, but that doesn't matter because +// the case registered with Add is also the same case used with +// LoadDLL later. +// +// It has no associated mutex and should only be mutated serially +// (currently: during init), and not concurrent with DLL loading. +var IsSystemDLL = map[string]bool{} + +// Add notes that dll is a system32 DLL which should only be loaded +// from the Windows SYSTEM32 directory. It returns its argument back, +// for ease of use in generated code. +func Add(dll string) string { + IsSystemDLL[dll] = true + return dll +} diff --git a/vendor/github.com/rogpeppe/go-internal/internal/syscall/windows/zsyscall_windows.go b/vendor/github.com/rogpeppe/go-internal/internal/syscall/windows/zsyscall_windows.go new file mode 100644 index 0000000..3ed2d9f --- /dev/null +++ b/vendor/github.com/rogpeppe/go-internal/internal/syscall/windows/zsyscall_windows.go @@ -0,0 +1,363 @@ +// Code generated by 'go generate'; DO NOT EDIT. + +package windows + +import ( + "syscall" + "unsafe" + + "github.com/rogpeppe/go-internal/internal/syscall/windows/sysdll" +) + +var _ unsafe.Pointer + +// Do the interface allocations only once for common +// Errno values. +const ( + errnoERROR_IO_PENDING = 997 +) + +var ( + errERROR_IO_PENDING error = syscall.Errno(errnoERROR_IO_PENDING) +) + +// errnoErr returns common boxed Errno values, to prevent +// allocations at runtime. +func errnoErr(e syscall.Errno) error { + switch e { + case 0: + return nil + case errnoERROR_IO_PENDING: + return errERROR_IO_PENDING + } + // TODO: add more here, after collecting data on the common + // error values see on Windows. (perhaps when running + // all.bat?) + return e +} + +var ( + modiphlpapi = syscall.NewLazyDLL(sysdll.Add("iphlpapi.dll")) + modkernel32 = syscall.NewLazyDLL(sysdll.Add("kernel32.dll")) + modws2_32 = syscall.NewLazyDLL(sysdll.Add("ws2_32.dll")) + modnetapi32 = syscall.NewLazyDLL(sysdll.Add("netapi32.dll")) + modadvapi32 = syscall.NewLazyDLL(sysdll.Add("advapi32.dll")) + moduserenv = syscall.NewLazyDLL(sysdll.Add("userenv.dll")) + modpsapi = syscall.NewLazyDLL(sysdll.Add("psapi.dll")) + + procGetAdaptersAddresses = modiphlpapi.NewProc("GetAdaptersAddresses") + procGetComputerNameExW = modkernel32.NewProc("GetComputerNameExW") + procMoveFileExW = modkernel32.NewProc("MoveFileExW") + procGetModuleFileNameW = modkernel32.NewProc("GetModuleFileNameW") + procWSASocketW = modws2_32.NewProc("WSASocketW") + procLockFileEx = modkernel32.NewProc("LockFileEx") + procUnlockFileEx = modkernel32.NewProc("UnlockFileEx") + procGetACP = modkernel32.NewProc("GetACP") + procGetConsoleCP = modkernel32.NewProc("GetConsoleCP") + procMultiByteToWideChar = modkernel32.NewProc("MultiByteToWideChar") + procGetCurrentThread = modkernel32.NewProc("GetCurrentThread") + procNetShareAdd = modnetapi32.NewProc("NetShareAdd") + procNetShareDel = modnetapi32.NewProc("NetShareDel") + procGetFinalPathNameByHandleW = modkernel32.NewProc("GetFinalPathNameByHandleW") + procImpersonateSelf = modadvapi32.NewProc("ImpersonateSelf") + procRevertToSelf = modadvapi32.NewProc("RevertToSelf") + procOpenThreadToken = modadvapi32.NewProc("OpenThreadToken") + procLookupPrivilegeValueW = modadvapi32.NewProc("LookupPrivilegeValueW") + procAdjustTokenPrivileges = modadvapi32.NewProc("AdjustTokenPrivileges") + procDuplicateTokenEx = modadvapi32.NewProc("DuplicateTokenEx") + procSetTokenInformation = modadvapi32.NewProc("SetTokenInformation") + procGetProfilesDirectoryW = moduserenv.NewProc("GetProfilesDirectoryW") + procNetUserGetLocalGroups = modnetapi32.NewProc("NetUserGetLocalGroups") + procGetProcessMemoryInfo = modpsapi.NewProc("GetProcessMemoryInfo") + procGetFileInformationByHandleEx = modkernel32.NewProc("GetFileInformationByHandleEx") +) + +func GetAdaptersAddresses(family uint32, flags uint32, reserved uintptr, adapterAddresses *IpAdapterAddresses, sizePointer *uint32) (errcode error) { + r0, _, _ := syscall.Syscall6(procGetAdaptersAddresses.Addr(), 5, uintptr(family), uintptr(flags), uintptr(reserved), uintptr(unsafe.Pointer(adapterAddresses)), uintptr(unsafe.Pointer(sizePointer)), 0) + if r0 != 0 { + errcode = syscall.Errno(r0) + } + return +} + +func GetComputerNameEx(nameformat uint32, buf *uint16, n *uint32) (err error) { + r1, _, e1 := syscall.Syscall(procGetComputerNameExW.Addr(), 3, uintptr(nameformat), uintptr(unsafe.Pointer(buf)), uintptr(unsafe.Pointer(n))) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func MoveFileEx(from *uint16, to *uint16, flags uint32) (err error) { + r1, _, e1 := syscall.Syscall(procMoveFileExW.Addr(), 3, uintptr(unsafe.Pointer(from)), uintptr(unsafe.Pointer(to)), uintptr(flags)) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func GetModuleFileName(module syscall.Handle, fn *uint16, len uint32) (n uint32, err error) { + r0, _, e1 := syscall.Syscall(procGetModuleFileNameW.Addr(), 3, uintptr(module), uintptr(unsafe.Pointer(fn)), uintptr(len)) + n = uint32(r0) + if n == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func WSASocket(af int32, typ int32, protocol int32, protinfo *syscall.WSAProtocolInfo, group uint32, flags uint32) (handle syscall.Handle, err error) { + r0, _, e1 := syscall.Syscall6(procWSASocketW.Addr(), 6, uintptr(af), uintptr(typ), uintptr(protocol), uintptr(unsafe.Pointer(protinfo)), uintptr(group), uintptr(flags)) + handle = syscall.Handle(r0) + if handle == syscall.InvalidHandle { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func LockFileEx(file syscall.Handle, flags uint32, reserved uint32, bytesLow uint32, bytesHigh uint32, overlapped *syscall.Overlapped) (err error) { + r1, _, e1 := syscall.Syscall6(procLockFileEx.Addr(), 6, uintptr(file), uintptr(flags), uintptr(reserved), uintptr(bytesLow), uintptr(bytesHigh), uintptr(unsafe.Pointer(overlapped))) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func UnlockFileEx(file syscall.Handle, reserved uint32, bytesLow uint32, bytesHigh uint32, overlapped *syscall.Overlapped) (err error) { + r1, _, e1 := syscall.Syscall6(procUnlockFileEx.Addr(), 5, uintptr(file), uintptr(reserved), uintptr(bytesLow), uintptr(bytesHigh), uintptr(unsafe.Pointer(overlapped)), 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func GetACP() (acp uint32) { + r0, _, _ := syscall.Syscall(procGetACP.Addr(), 0, 0, 0, 0) + acp = uint32(r0) + return +} + +func GetConsoleCP() (ccp uint32) { + r0, _, _ := syscall.Syscall(procGetConsoleCP.Addr(), 0, 0, 0, 0) + ccp = uint32(r0) + return +} + +func MultiByteToWideChar(codePage uint32, dwFlags uint32, str *byte, nstr int32, wchar *uint16, nwchar int32) (nwrite int32, err error) { + r0, _, e1 := syscall.Syscall6(procMultiByteToWideChar.Addr(), 6, uintptr(codePage), uintptr(dwFlags), uintptr(unsafe.Pointer(str)), uintptr(nstr), uintptr(unsafe.Pointer(wchar)), uintptr(nwchar)) + nwrite = int32(r0) + if nwrite == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func GetCurrentThread() (pseudoHandle syscall.Handle, err error) { + r0, _, e1 := syscall.Syscall(procGetCurrentThread.Addr(), 0, 0, 0, 0) + pseudoHandle = syscall.Handle(r0) + if pseudoHandle == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func NetShareAdd(serverName *uint16, level uint32, buf *byte, parmErr *uint16) (neterr error) { + r0, _, _ := syscall.Syscall6(procNetShareAdd.Addr(), 4, uintptr(unsafe.Pointer(serverName)), uintptr(level), uintptr(unsafe.Pointer(buf)), uintptr(unsafe.Pointer(parmErr)), 0, 0) + if r0 != 0 { + neterr = syscall.Errno(r0) + } + return +} + +func NetShareDel(serverName *uint16, netName *uint16, reserved uint32) (neterr error) { + r0, _, _ := syscall.Syscall(procNetShareDel.Addr(), 3, uintptr(unsafe.Pointer(serverName)), uintptr(unsafe.Pointer(netName)), uintptr(reserved)) + if r0 != 0 { + neterr = syscall.Errno(r0) + } + return +} + +func GetFinalPathNameByHandle(file syscall.Handle, filePath *uint16, filePathSize uint32, flags uint32) (n uint32, err error) { + r0, _, e1 := syscall.Syscall6(procGetFinalPathNameByHandleW.Addr(), 4, uintptr(file), uintptr(unsafe.Pointer(filePath)), uintptr(filePathSize), uintptr(flags), 0, 0) + n = uint32(r0) + if n == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func ImpersonateSelf(impersonationlevel uint32) (err error) { + r1, _, e1 := syscall.Syscall(procImpersonateSelf.Addr(), 1, uintptr(impersonationlevel), 0, 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func RevertToSelf() (err error) { + r1, _, e1 := syscall.Syscall(procRevertToSelf.Addr(), 0, 0, 0, 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func OpenThreadToken(h syscall.Handle, access uint32, openasself bool, token *syscall.Token) (err error) { + var _p0 uint32 + if openasself { + _p0 = 1 + } else { + _p0 = 0 + } + r1, _, e1 := syscall.Syscall6(procOpenThreadToken.Addr(), 4, uintptr(h), uintptr(access), uintptr(_p0), uintptr(unsafe.Pointer(token)), 0, 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func LookupPrivilegeValue(systemname *uint16, name *uint16, luid *LUID) (err error) { + r1, _, e1 := syscall.Syscall(procLookupPrivilegeValueW.Addr(), 3, uintptr(unsafe.Pointer(systemname)), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(luid))) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func adjustTokenPrivileges(token syscall.Token, disableAllPrivileges bool, newstate *TOKEN_PRIVILEGES, buflen uint32, prevstate *TOKEN_PRIVILEGES, returnlen *uint32) (ret uint32, err error) { + var _p0 uint32 + if disableAllPrivileges { + _p0 = 1 + } else { + _p0 = 0 + } + r0, _, e1 := syscall.Syscall6(procAdjustTokenPrivileges.Addr(), 6, uintptr(token), uintptr(_p0), uintptr(unsafe.Pointer(newstate)), uintptr(buflen), uintptr(unsafe.Pointer(prevstate)), uintptr(unsafe.Pointer(returnlen))) + ret = uint32(r0) + if true { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func DuplicateTokenEx(hExistingToken syscall.Token, dwDesiredAccess uint32, lpTokenAttributes *syscall.SecurityAttributes, impersonationLevel uint32, tokenType TokenType, phNewToken *syscall.Token) (err error) { + r1, _, e1 := syscall.Syscall6(procDuplicateTokenEx.Addr(), 6, uintptr(hExistingToken), uintptr(dwDesiredAccess), uintptr(unsafe.Pointer(lpTokenAttributes)), uintptr(impersonationLevel), uintptr(tokenType), uintptr(unsafe.Pointer(phNewToken))) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func SetTokenInformation(tokenHandle syscall.Token, tokenInformationClass uint32, tokenInformation uintptr, tokenInformationLength uint32) (err error) { + r1, _, e1 := syscall.Syscall6(procSetTokenInformation.Addr(), 4, uintptr(tokenHandle), uintptr(tokenInformationClass), uintptr(tokenInformation), uintptr(tokenInformationLength), 0, 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func GetProfilesDirectory(dir *uint16, dirLen *uint32) (err error) { + r1, _, e1 := syscall.Syscall(procGetProfilesDirectoryW.Addr(), 2, uintptr(unsafe.Pointer(dir)), uintptr(unsafe.Pointer(dirLen)), 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func NetUserGetLocalGroups(serverName *uint16, userName *uint16, level uint32, flags uint32, buf **byte, prefMaxLen uint32, entriesRead *uint32, totalEntries *uint32) (neterr error) { + r0, _, _ := syscall.Syscall9(procNetUserGetLocalGroups.Addr(), 8, uintptr(unsafe.Pointer(serverName)), uintptr(unsafe.Pointer(userName)), uintptr(level), uintptr(flags), uintptr(unsafe.Pointer(buf)), uintptr(prefMaxLen), uintptr(unsafe.Pointer(entriesRead)), uintptr(unsafe.Pointer(totalEntries)), 0) + if r0 != 0 { + neterr = syscall.Errno(r0) + } + return +} + +func GetProcessMemoryInfo(handle syscall.Handle, memCounters *PROCESS_MEMORY_COUNTERS, cb uint32) (err error) { + r1, _, e1 := syscall.Syscall(procGetProcessMemoryInfo.Addr(), 3, uintptr(handle), uintptr(unsafe.Pointer(memCounters)), uintptr(cb)) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func GetFileInformationByHandleEx(handle syscall.Handle, class uint32, info *byte, bufsize uint32) (err error) { + r1, _, e1 := syscall.Syscall6(procGetFileInformationByHandleEx.Addr(), 4, uintptr(handle), uintptr(class), uintptr(unsafe.Pointer(info)), uintptr(bufsize), 0, 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} diff --git a/vendor/github.com/rogpeppe/go-internal/lockedfile/internal/filelock/filelock.go b/vendor/github.com/rogpeppe/go-internal/lockedfile/internal/filelock/filelock.go new file mode 100644 index 0000000..aba3eed --- /dev/null +++ b/vendor/github.com/rogpeppe/go-internal/lockedfile/internal/filelock/filelock.go @@ -0,0 +1,98 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package filelock provides a platform-independent API for advisory file +// locking. Calls to functions in this package on platforms that do not support +// advisory locks will return errors for which IsNotSupported returns true. +package filelock + +import ( + "errors" + "os" +) + +// A File provides the minimal set of methods required to lock an open file. +// File implementations must be usable as map keys. +// The usual implementation is *os.File. +type File interface { + // Name returns the name of the file. + Name() string + + // Fd returns a valid file descriptor. + // (If the File is an *os.File, it must not be closed.) + Fd() uintptr + + // Stat returns the FileInfo structure describing file. + Stat() (os.FileInfo, error) +} + +// Lock places an advisory write lock on the file, blocking until it can be +// locked. +// +// If Lock returns nil, no other process will be able to place a read or write +// lock on the file until this process exits, closes f, or calls Unlock on it. +// +// If f's descriptor is already read- or write-locked, the behavior of Lock is +// unspecified. +// +// Closing the file may or may not release the lock promptly. Callers should +// ensure that Unlock is always called when Lock succeeds. +func Lock(f File) error { + return lock(f, writeLock) +} + +// RLock places an advisory read lock on the file, blocking until it can be locked. +// +// If RLock returns nil, no other process will be able to place a write lock on +// the file until this process exits, closes f, or calls Unlock on it. +// +// If f is already read- or write-locked, the behavior of RLock is unspecified. +// +// Closing the file may or may not release the lock promptly. Callers should +// ensure that Unlock is always called if RLock succeeds. +func RLock(f File) error { + return lock(f, readLock) +} + +// Unlock removes an advisory lock placed on f by this process. +// +// The caller must not attempt to unlock a file that is not locked. +func Unlock(f File) error { + return unlock(f) +} + +// String returns the name of the function corresponding to lt +// (Lock, RLock, or Unlock). +func (lt lockType) String() string { + switch lt { + case readLock: + return "RLock" + case writeLock: + return "Lock" + default: + return "Unlock" + } +} + +// IsNotSupported returns a boolean indicating whether the error is known to +// report that a function is not supported (possibly for a specific input). +// It is satisfied by ErrNotSupported as well as some syscall errors. +func IsNotSupported(err error) bool { + return isNotSupported(underlyingError(err)) +} + +var ErrNotSupported = errors.New("operation not supported") + +// underlyingError returns the underlying error for known os error types. +func underlyingError(err error) error { + switch err := err.(type) { + case *os.PathError: + return err.Err + case *os.LinkError: + return err.Err + case *os.SyscallError: + return err.Err + } + return err +} diff --git a/vendor/github.com/rogpeppe/go-internal/lockedfile/internal/filelock/filelock_fcntl.go b/vendor/github.com/rogpeppe/go-internal/lockedfile/internal/filelock/filelock_fcntl.go new file mode 100644 index 0000000..4e64481 --- /dev/null +++ b/vendor/github.com/rogpeppe/go-internal/lockedfile/internal/filelock/filelock_fcntl.go @@ -0,0 +1,220 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build aix || solaris +// +build aix solaris + +// This code implements the filelock API using POSIX 'fcntl' locks, which attach +// to an (inode, process) pair rather than a file descriptor. To avoid unlocking +// files prematurely when the same file is opened through different descriptors, +// we allow only one read-lock at a time. +// +// Most platforms provide some alternative API, such as an 'flock' system call +// or an F_OFD_SETLK command for 'fcntl', that allows for better concurrency and +// does not require per-inode bookkeeping in the application. +// +// TODO(golang.org/issue/35618): add a syscall.Flock binding for Illumos and +// switch it over to use filelock_unix.go. + +package filelock + +import ( + "errors" + "io" + "math/rand" + "os" + "sync" + "syscall" + "time" +) + +type lockType int16 + +const ( + readLock lockType = syscall.F_RDLCK + writeLock lockType = syscall.F_WRLCK +) + +type inode = uint64 // type of syscall.Stat_t.Ino + +type inodeLock struct { + owner File + queue []<-chan File +} + +type token struct{} + +var ( + mu sync.Mutex + inodes = map[File]inode{} + locks = map[inode]inodeLock{} +) + +func lock(f File, lt lockType) (err error) { + // POSIX locks apply per inode and process, and the lock for an inode is + // released when *any* descriptor for that inode is closed. So we need to + // synchronize access to each inode internally, and must serialize lock and + // unlock calls that refer to the same inode through different descriptors. + fi, err := f.Stat() + if err != nil { + return err + } + ino := fi.Sys().(*syscall.Stat_t).Ino + + mu.Lock() + if i, dup := inodes[f]; dup && i != ino { + mu.Unlock() + return &os.PathError{ + Op: lt.String(), + Path: f.Name(), + Err: errors.New("inode for file changed since last Lock or RLock"), + } + } + inodes[f] = ino + + var wait chan File + l := locks[ino] + if l.owner == f { + // This file already owns the lock, but the call may change its lock type. + } else if l.owner == nil { + // No owner: it's ours now. + l.owner = f + } else { + // Already owned: add a channel to wait on. + wait = make(chan File) + l.queue = append(l.queue, wait) + } + locks[ino] = l + mu.Unlock() + + if wait != nil { + wait <- f + } + + // Spurious EDEADLK errors arise on platforms that compute deadlock graphs at + // the process, rather than thread, level. Consider processes P and Q, with + // threads P.1, P.2, and Q.3. The following trace is NOT a deadlock, but will be + // reported as a deadlock on systems that consider only process granularity: + // + // P.1 locks file A. + // Q.3 locks file B. + // Q.3 blocks on file A. + // P.2 blocks on file B. (This is erroneously reported as a deadlock.) + // P.1 unlocks file A. + // Q.3 unblocks and locks file A. + // Q.3 unlocks files A and B. + // P.2 unblocks and locks file B. + // P.2 unlocks file B. + // + // These spurious errors were observed in practice on AIX and Solaris in + // cmd/go: see https://golang.org/issue/32817. + // + // We work around this bug by treating EDEADLK as always spurious. If there + // really is a lock-ordering bug between the interacting processes, it will + // become a livelock instead, but that's not appreciably worse than if we had + // a proper flock implementation (which generally does not even attempt to + // diagnose deadlocks). + // + // In the above example, that changes the trace to: + // + // P.1 locks file A. + // Q.3 locks file B. + // Q.3 blocks on file A. + // P.2 spuriously fails to lock file B and goes to sleep. + // P.1 unlocks file A. + // Q.3 unblocks and locks file A. + // Q.3 unlocks files A and B. + // P.2 wakes up and locks file B. + // P.2 unlocks file B. + // + // We know that the retry loop will not introduce a *spurious* livelock + // because, according to the POSIX specification, EDEADLK is only to be + // returned when “the lock is blocked by a lock from another process”. + // If that process is blocked on some lock that we are holding, then the + // resulting livelock is due to a real deadlock (and would manifest as such + // when using, for example, the flock implementation of this package). + // If the other process is *not* blocked on some other lock that we are + // holding, then it will eventually release the requested lock. + + nextSleep := 1 * time.Millisecond + const maxSleep = 500 * time.Millisecond + for { + err = setlkw(f.Fd(), lt) + if err != syscall.EDEADLK { + break + } + time.Sleep(nextSleep) + + nextSleep += nextSleep + if nextSleep > maxSleep { + nextSleep = maxSleep + } + // Apply 10% jitter to avoid synchronizing collisions when we finally unblock. + nextSleep += time.Duration((0.1*rand.Float64() - 0.05) * float64(nextSleep)) + } + + if err != nil { + unlock(f) + return &os.PathError{ + Op: lt.String(), + Path: f.Name(), + Err: err, + } + } + + return nil +} + +func unlock(f File) error { + var owner File + + mu.Lock() + ino, ok := inodes[f] + if ok { + owner = locks[ino].owner + } + mu.Unlock() + + if owner != f { + panic("unlock called on a file that is not locked") + } + + err := setlkw(f.Fd(), syscall.F_UNLCK) + + mu.Lock() + l := locks[ino] + if len(l.queue) == 0 { + // No waiters: remove the map entry. + delete(locks, ino) + } else { + // The first waiter is sending us their file now. + // Receive it and update the queue. + l.owner = <-l.queue[0] + l.queue = l.queue[1:] + locks[ino] = l + } + delete(inodes, f) + mu.Unlock() + + return err +} + +// setlkw calls FcntlFlock with F_SETLKW for the entire file indicated by fd. +func setlkw(fd uintptr, lt lockType) error { + for { + err := syscall.FcntlFlock(fd, syscall.F_SETLKW, &syscall.Flock_t{ + Type: int16(lt), + Whence: io.SeekStart, + Start: 0, + Len: 0, // All bytes. + }) + if err != syscall.EINTR { + return err + } + } +} + +func isNotSupported(err error) bool { + return err == syscall.ENOSYS || err == syscall.ENOTSUP || err == syscall.EOPNOTSUPP || err == ErrNotSupported +} diff --git a/vendor/github.com/rogpeppe/go-internal/lockedfile/internal/filelock/filelock_other.go b/vendor/github.com/rogpeppe/go-internal/lockedfile/internal/filelock/filelock_other.go new file mode 100644 index 0000000..cfc5338 --- /dev/null +++ b/vendor/github.com/rogpeppe/go-internal/lockedfile/internal/filelock/filelock_other.go @@ -0,0 +1,37 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !aix && !darwin && !dragonfly && !freebsd && !linux && !netbsd && !openbsd && !plan9 && !solaris && !windows +// +build !aix,!darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd,!plan9,!solaris,!windows + +package filelock + +import "os" + +type lockType int8 + +const ( + readLock = iota + 1 + writeLock +) + +func lock(f File, lt lockType) error { + return &os.PathError{ + Op: lt.String(), + Path: f.Name(), + Err: ErrNotSupported, + } +} + +func unlock(f File) error { + return &os.PathError{ + Op: "Unlock", + Path: f.Name(), + Err: ErrNotSupported, + } +} + +func isNotSupported(err error) bool { + return err == ErrNotSupported +} diff --git a/vendor/github.com/rogpeppe/go-internal/lockedfile/internal/filelock/filelock_plan9.go b/vendor/github.com/rogpeppe/go-internal/lockedfile/internal/filelock/filelock_plan9.go new file mode 100644 index 0000000..5ae3cc2 --- /dev/null +++ b/vendor/github.com/rogpeppe/go-internal/lockedfile/internal/filelock/filelock_plan9.go @@ -0,0 +1,39 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build plan9 +// +build plan9 + +package filelock + +import ( + "os" +) + +type lockType int8 + +const ( + readLock = iota + 1 + writeLock +) + +func lock(f File, lt lockType) error { + return &os.PathError{ + Op: lt.String(), + Path: f.Name(), + Err: ErrNotSupported, + } +} + +func unlock(f File) error { + return &os.PathError{ + Op: "Unlock", + Path: f.Name(), + Err: ErrNotSupported, + } +} + +func isNotSupported(err error) bool { + return err == ErrNotSupported +} diff --git a/vendor/github.com/rogpeppe/go-internal/lockedfile/internal/filelock/filelock_unix.go b/vendor/github.com/rogpeppe/go-internal/lockedfile/internal/filelock/filelock_unix.go new file mode 100644 index 0000000..09549ef --- /dev/null +++ b/vendor/github.com/rogpeppe/go-internal/lockedfile/internal/filelock/filelock_unix.go @@ -0,0 +1,45 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build darwin || dragonfly || freebsd || linux || netbsd || openbsd +// +build darwin dragonfly freebsd linux netbsd openbsd + +package filelock + +import ( + "os" + "syscall" +) + +type lockType int16 + +const ( + readLock lockType = syscall.LOCK_SH + writeLock lockType = syscall.LOCK_EX +) + +func lock(f File, lt lockType) (err error) { + for { + err = syscall.Flock(int(f.Fd()), int(lt)) + if err != syscall.EINTR { + break + } + } + if err != nil { + return &os.PathError{ + Op: lt.String(), + Path: f.Name(), + Err: err, + } + } + return nil +} + +func unlock(f File) error { + return lock(f, syscall.LOCK_UN) +} + +func isNotSupported(err error) bool { + return err == syscall.ENOSYS || err == syscall.ENOTSUP || err == syscall.EOPNOTSUPP || err == ErrNotSupported +} diff --git a/vendor/github.com/rogpeppe/go-internal/lockedfile/internal/filelock/filelock_windows.go b/vendor/github.com/rogpeppe/go-internal/lockedfile/internal/filelock/filelock_windows.go new file mode 100644 index 0000000..2bd3eb9 --- /dev/null +++ b/vendor/github.com/rogpeppe/go-internal/lockedfile/internal/filelock/filelock_windows.go @@ -0,0 +1,68 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build windows +// +build windows + +package filelock + +import ( + "os" + "syscall" + + "github.com/rogpeppe/go-internal/internal/syscall/windows" +) + +type lockType uint32 + +const ( + readLock lockType = 0 + writeLock lockType = windows.LOCKFILE_EXCLUSIVE_LOCK +) + +const ( + reserved = 0 + allBytes = ^uint32(0) +) + +func lock(f File, lt lockType) error { + // Per https://golang.org/issue/19098, “Programs currently expect the Fd + // method to return a handle that uses ordinary synchronous I/O.” + // However, LockFileEx still requires an OVERLAPPED structure, + // which contains the file offset of the beginning of the lock range. + // We want to lock the entire file, so we leave the offset as zero. + ol := new(syscall.Overlapped) + + err := windows.LockFileEx(syscall.Handle(f.Fd()), uint32(lt), reserved, allBytes, allBytes, ol) + if err != nil { + return &os.PathError{ + Op: lt.String(), + Path: f.Name(), + Err: err, + } + } + return nil +} + +func unlock(f File) error { + ol := new(syscall.Overlapped) + err := windows.UnlockFileEx(syscall.Handle(f.Fd()), reserved, allBytes, allBytes, ol) + if err != nil { + return &os.PathError{ + Op: "Unlock", + Path: f.Name(), + Err: err, + } + } + return nil +} + +func isNotSupported(err error) bool { + switch err { + case windows.ERROR_NOT_SUPPORTED, windows.ERROR_CALL_NOT_IMPLEMENTED, ErrNotSupported: + return true + default: + return false + } +} diff --git a/vendor/github.com/rogpeppe/go-internal/lockedfile/lockedfile.go b/vendor/github.com/rogpeppe/go-internal/lockedfile/lockedfile.go new file mode 100644 index 0000000..bb184b1 --- /dev/null +++ b/vendor/github.com/rogpeppe/go-internal/lockedfile/lockedfile.go @@ -0,0 +1,122 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package lockedfile creates and manipulates files whose contents should only +// change atomically. +package lockedfile + +import ( + "fmt" + "io" + "io/ioutil" + "os" + "runtime" +) + +// A File is a locked *os.File. +// +// Closing the file releases the lock. +// +// If the program exits while a file is locked, the operating system releases +// the lock but may not do so promptly: callers must ensure that all locked +// files are closed before exiting. +type File struct { + osFile + closed bool +} + +// osFile embeds a *os.File while keeping the pointer itself unexported. +// (When we close a File, it must be the same file descriptor that we opened!) +type osFile struct { + *os.File +} + +// OpenFile is like os.OpenFile, but returns a locked file. +// If flag includes os.O_WRONLY or os.O_RDWR, the file is write-locked; +// otherwise, it is read-locked. +func OpenFile(name string, flag int, perm os.FileMode) (*File, error) { + var ( + f = new(File) + err error + ) + f.osFile.File, err = openFile(name, flag, perm) + if err != nil { + return nil, err + } + + // Although the operating system will drop locks for open files when the go + // command exits, we want to hold locks for as little time as possible, and we + // especially don't want to leave a file locked after we're done with it. Our + // Close method is what releases the locks, so use a finalizer to report + // missing Close calls on a best-effort basis. + runtime.SetFinalizer(f, func(f *File) { + panic(fmt.Sprintf("lockedfile.File %s became unreachable without a call to Close", f.Name())) + }) + + return f, nil +} + +// Open is like os.Open, but returns a read-locked file. +func Open(name string) (*File, error) { + return OpenFile(name, os.O_RDONLY, 0) +} + +// Create is like os.Create, but returns a write-locked file. +func Create(name string) (*File, error) { + return OpenFile(name, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0666) +} + +// Edit creates the named file with mode 0666 (before umask), +// but does not truncate existing contents. +// +// If Edit succeeds, methods on the returned File can be used for I/O. +// The associated file descriptor has mode O_RDWR and the file is write-locked. +func Edit(name string) (*File, error) { + return OpenFile(name, os.O_RDWR|os.O_CREATE, 0666) +} + +// Close unlocks and closes the underlying file. +// +// Close may be called multiple times; all calls after the first will return a +// non-nil error. +func (f *File) Close() error { + if f.closed { + return &os.PathError{ + Op: "close", + Path: f.Name(), + Err: os.ErrClosed, + } + } + f.closed = true + + err := closeFile(f.osFile.File) + runtime.SetFinalizer(f, nil) + return err +} + +// Read opens the named file with a read-lock and returns its contents. +func Read(name string) ([]byte, error) { + f, err := Open(name) + if err != nil { + return nil, err + } + defer f.Close() + + return ioutil.ReadAll(f) +} + +// Write opens the named file (creating it with the given permissions if needed), +// then write-locks it and overwrites it with the given content. +func Write(name string, content io.Reader, perm os.FileMode) (err error) { + f, err := OpenFile(name, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, perm) + if err != nil { + return err + } + + _, err = io.Copy(f, content) + if closeErr := f.Close(); err == nil { + err = closeErr + } + return err +} diff --git a/vendor/github.com/rogpeppe/go-internal/lockedfile/lockedfile_filelock.go b/vendor/github.com/rogpeppe/go-internal/lockedfile/lockedfile_filelock.go new file mode 100644 index 0000000..6a03173 --- /dev/null +++ b/vendor/github.com/rogpeppe/go-internal/lockedfile/lockedfile_filelock.go @@ -0,0 +1,65 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !plan9 +// +build !plan9 + +package lockedfile + +import ( + "os" + + "github.com/rogpeppe/go-internal/lockedfile/internal/filelock" +) + +func openFile(name string, flag int, perm os.FileMode) (*os.File, error) { + // On BSD systems, we could add the O_SHLOCK or O_EXLOCK flag to the OpenFile + // call instead of locking separately, but we have to support separate locking + // calls for Linux and Windows anyway, so it's simpler to use that approach + // consistently. + + f, err := os.OpenFile(name, flag&^os.O_TRUNC, perm) + if err != nil { + return nil, err + } + + switch flag & (os.O_RDONLY | os.O_WRONLY | os.O_RDWR) { + case os.O_WRONLY, os.O_RDWR: + err = filelock.Lock(f) + default: + err = filelock.RLock(f) + } + if err != nil { + f.Close() + return nil, err + } + + if flag&os.O_TRUNC == os.O_TRUNC { + if err := f.Truncate(0); err != nil { + // The documentation for os.O_TRUNC says “if possible, truncate file when + // opened”, but doesn't define “possible” (golang.org/issue/28699). + // We'll treat regular files (and symlinks to regular files) as “possible” + // and ignore errors for the rest. + if fi, statErr := f.Stat(); statErr != nil || fi.Mode().IsRegular() { + filelock.Unlock(f) + f.Close() + return nil, err + } + } + } + + return f, nil +} + +func closeFile(f *os.File) error { + // Since locking syscalls operate on file descriptors, we must unlock the file + // while the descriptor is still valid — that is, before the file is closed — + // and avoid unlocking files that are already closed. + err := filelock.Unlock(f) + + if closeErr := f.Close(); err == nil { + err = closeErr + } + return err +} diff --git a/vendor/github.com/rogpeppe/go-internal/lockedfile/lockedfile_plan9.go b/vendor/github.com/rogpeppe/go-internal/lockedfile/lockedfile_plan9.go new file mode 100644 index 0000000..02221c5 --- /dev/null +++ b/vendor/github.com/rogpeppe/go-internal/lockedfile/lockedfile_plan9.go @@ -0,0 +1,94 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build plan9 +// +build plan9 + +package lockedfile + +import ( + "math/rand" + "os" + "strings" + "time" +) + +// Opening an exclusive-use file returns an error. +// The expected error strings are: +// +// - "open/create -- file is locked" (cwfs, kfs) +// - "exclusive lock" (fossil) +// - "exclusive use file already open" (ramfs) +var lockedErrStrings = [...]string{ + "file is locked", + "exclusive lock", + "exclusive use file already open", +} + +// Even though plan9 doesn't support the Lock/RLock/Unlock functions to +// manipulate already-open files, IsLocked is still meaningful: os.OpenFile +// itself may return errors that indicate that a file with the ModeExclusive bit +// set is already open. +func isLocked(err error) bool { + s := err.Error() + + for _, frag := range lockedErrStrings { + if strings.Contains(s, frag) { + return true + } + } + + return false +} + +func openFile(name string, flag int, perm os.FileMode) (*os.File, error) { + // Plan 9 uses a mode bit instead of explicit lock/unlock syscalls. + // + // Per http://man.cat-v.org/plan_9/5/stat: “Exclusive use files may be open + // for I/O by only one fid at a time across all clients of the server. If a + // second open is attempted, it draws an error.” + // + // So we can try to open a locked file, but if it fails we're on our own to + // figure out when it becomes available. We'll use exponential backoff with + // some jitter and an arbitrary limit of 500ms. + + // If the file was unpacked or created by some other program, it might not + // have the ModeExclusive bit set. Set it before we call OpenFile, so that we + // can be confident that a successful OpenFile implies exclusive use. + if fi, err := os.Stat(name); err == nil { + if fi.Mode()&os.ModeExclusive == 0 { + if err := os.Chmod(name, fi.Mode()|os.ModeExclusive); err != nil { + return nil, err + } + } + } else if !os.IsNotExist(err) { + return nil, err + } + + nextSleep := 1 * time.Millisecond + const maxSleep = 500 * time.Millisecond + for { + f, err := os.OpenFile(name, flag, perm|os.ModeExclusive) + if err == nil { + return f, nil + } + + if !isLocked(err) { + return nil, err + } + + time.Sleep(nextSleep) + + nextSleep += nextSleep + if nextSleep > maxSleep { + nextSleep = maxSleep + } + // Apply 10% jitter to avoid synchronizing collisions. + nextSleep += time.Duration((0.1*rand.Float64() - 0.05) * float64(nextSleep)) + } +} + +func closeFile(f *os.File) error { + return f.Close() +} diff --git a/vendor/github.com/rogpeppe/go-internal/lockedfile/mutex.go b/vendor/github.com/rogpeppe/go-internal/lockedfile/mutex.go new file mode 100644 index 0000000..17f3751 --- /dev/null +++ b/vendor/github.com/rogpeppe/go-internal/lockedfile/mutex.go @@ -0,0 +1,60 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package lockedfile + +import ( + "fmt" + "os" +) + +// A Mutex provides mutual exclusion within and across processes by locking a +// well-known file. Such a file generally guards some other part of the +// filesystem: for example, a Mutex file in a directory might guard access to +// the entire tree rooted in that directory. +// +// Mutex does not implement sync.Locker: unlike a sync.Mutex, a lockedfile.Mutex +// can fail to lock (e.g. if there is a permission error in the filesystem). +// +// Like a sync.Mutex, a Mutex may be included as a field of a larger struct but +// must not be copied after first use. The Path field must be set before first +// use and must not be change thereafter. +type Mutex struct { + Path string // The path to the well-known lock file. Must be non-empty. +} + +// MutexAt returns a new Mutex with Path set to the given non-empty path. +func MutexAt(path string) *Mutex { + if path == "" { + panic("lockedfile.MutexAt: path must be non-empty") + } + return &Mutex{Path: path} +} + +func (mu *Mutex) String() string { + return fmt.Sprintf("lockedfile.Mutex(%s)", mu.Path) +} + +// Lock attempts to lock the Mutex. +// +// If successful, Lock returns a non-nil unlock function: it is provided as a +// return-value instead of a separate method to remind the caller to check the +// accompanying error. (See https://golang.org/issue/20803.) +func (mu *Mutex) Lock() (unlock func(), err error) { + if mu.Path == "" { + panic("lockedfile.Mutex: missing Path during Lock") + } + + // We could use either O_RDWR or O_WRONLY here. If we choose O_RDWR and the + // file at mu.Path is write-only, the call to OpenFile will fail with a + // permission error. That's actually what we want: if we add an RLock method + // in the future, it should call OpenFile with O_RDONLY and will require the + // files must be readable, so we should not let the caller make any + // assumptions about Mutex working with write-only files. + f, err := OpenFile(mu.Path, os.O_RDWR|os.O_CREATE, 0666) + if err != nil { + return nil, err + } + return func() { f.Close() }, nil +} diff --git a/vendor/github.com/santhosh-tekuri/jsonschema/v5/.gitignore b/vendor/github.com/santhosh-tekuri/jsonschema/v5/.gitignore new file mode 100644 index 0000000..fb22c99 --- /dev/null +++ b/vendor/github.com/santhosh-tekuri/jsonschema/v5/.gitignore @@ -0,0 +1,4 @@ +.vscode +.idea +*.swp +jv diff --git a/vendor/github.com/santhosh-tekuri/jsonschema/v5/LICENSE b/vendor/github.com/santhosh-tekuri/jsonschema/v5/LICENSE new file mode 100644 index 0000000..19dc35b --- /dev/null +++ b/vendor/github.com/santhosh-tekuri/jsonschema/v5/LICENSE @@ -0,0 +1,175 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. \ No newline at end of file diff --git a/vendor/github.com/santhosh-tekuri/jsonschema/v5/README.md b/vendor/github.com/santhosh-tekuri/jsonschema/v5/README.md new file mode 100644 index 0000000..75d1b53 --- /dev/null +++ b/vendor/github.com/santhosh-tekuri/jsonschema/v5/README.md @@ -0,0 +1,208 @@ +# jsonschema v5.0.0 + +[![License](https://img.shields.io/badge/License-Apache%202.0-blue.svg)](https://opensource.org/licenses/Apache-2.0) +[![GoDoc](https://godoc.org/github.com/santhosh-tekuri/jsonschema?status.svg)](https://pkg.go.dev/github.com/santhosh-tekuri/jsonschema/v5) +[![Go Report Card](https://goreportcard.com/badge/github.com/santhosh-tekuri/jsonschema)](https://goreportcard.com/report/github.com/santhosh-tekuri/jsonschema) +[![Build Status](https://github.com/santhosh-tekuri/jsonschema/actions/workflows/go.yaml/badge.svg?branch=master)](https://github.com/santhosh-tekuri/jsonschema/actions/workflows/go.yaml) +[![codecov.io](https://codecov.io/github/santhosh-tekuri/jsonschema/coverage.svg?branch=master)](https://codecov.io/github/santhosh-tekuri/jsonschema?branch=master) + +Package jsonschema provides json-schema compilation and validation. + +### Features: + - implements + [draft 2020-12](https://json-schema.org/specification-links.html#2020-12), + [draft 2019-09](https://json-schema.org/specification-links.html#draft-2019-09-formerly-known-as-draft-8), + [draft-7](https://json-schema.org/specification-links.html#draft-7), + [draft-6](https://json-schema.org/specification-links.html#draft-6), + [draft-4](https://json-schema.org/specification-links.html#draft-4) + - fully compliant with [JSON-Schema-Test-Suite](https://github.com/json-schema-org/JSON-Schema-Test-Suite), (excluding some optional) + - list of optional tests that are excluded can be found in schema_test.go(variable [skipTests](https://github.com/santhosh-tekuri/jsonschema/blob/master/schema_test.go#L30)) + - validates schemas against meta-schema + - full support of remote references + - support of recursive references between schemas + - detects infinite loop in schemas + - thread safe validation + - rich, intutive hierarchial error messages with json-pointers to exact location + - supports output formats flag, basic and detailed + - supports enabling format and content Assertions in draft2019-09 or above + - change `Compiler.AssertFormat`, `Compiler.AssertContent` to `true` + - compiled schema can be introspected. easier to develop tools like generating go structs given schema + - supports user-defined keywords via [extensions](https://pkg.go.dev/github.com/santhosh-tekuri/jsonschema/v5/#example-package-Extension) + - implements following formats (supports [user-defined](https://pkg.go.dev/github.com/santhosh-tekuri/jsonschema/v5/#example-package-UserDefinedFormat)) + - date-time, date, time, duration (supports leap-second) + - uuid, hostname, email + - ip-address, ipv4, ipv6 + - uri, uriref, uri-template(limited validation) + - json-pointer, relative-json-pointer + - regex, format + - implements following contentEncoding (supports [user-defined](https://pkg.go.dev/github.com/santhosh-tekuri/jsonschema/v5/#example-package-UserDefinedContent)) + - base64 + - implements following contentMediaType (supports [user-defined](https://pkg.go.dev/github.com/santhosh-tekuri/jsonschema/v5/#example-package-UserDefinedContent)) + - application/json + - can load from files/http/https/[string](https://pkg.go.dev/github.com/santhosh-tekuri/jsonschema/v5/#example-package-FromString)/[]byte/io.Reader (suports [user-defined](https://pkg.go.dev/github.com/santhosh-tekuri/jsonschema/v5/#example-package-UserDefinedLoader)) + + +see examples in [godoc](https://pkg.go.dev/github.com/santhosh-tekuri/jsonschema/v5) + +The schema is compiled against the version specified in `$schema` property. +If `$schema` property is missing, it uses latest draft which currently is draft7. +You can force to use specific version, when `$schema` is missing, as follows: + +```go +compiler := jsonschema.NewCompiler() +compler.Draft = jsonschema.Draft4 +``` + +you can also validate go value using `schema.ValidateInterface(interface{})` method. +but the argument should not be user-defined struct. + +This package supports loading json-schema from filePath and fileURL. + +To load json-schema from HTTPURL, add following import: + +```go +import _ "github.com/santhosh-tekuri/jsonschema/v5/httploader" +``` + +## Rich Errors + +The ValidationError returned by Validate method contains detailed context to understand why and where the error is. + +schema.json: +```json +{ + "$ref": "t.json#/definitions/employee" +} +``` + +t.json: +```json +{ + "definitions": { + "employee": { + "type": "string" + } + } +} +``` + +doc.json: +```json +1 +``` + +assuming `err` is the ValidationError returned when `doc.json` validated with `schema.json`, +```go +fmt.Printf("%#v\n", err) // using %#v prints errors hierarchy +``` +Prints: +``` +[I#] [S#] doesn't validate with file:///Users/santhosh/jsonschema/schema.json# + [I#] [S#/$ref] doesn't validate with 'file:///Users/santhosh/jsonschema/t.json#/definitions/employee' + [I#] [S#/definitions/employee/type] expected string, but got number +``` + +Here `I` stands for instance document and `S` stands for schema document. +The json-fragments that caused error in instance and schema documents are represented using json-pointer notation. +Nested causes are printed with indent. + +To output `err` in `flag` output format: +```go +b, _ := json.MarshalIndent(err.FlagOutput(), "", " ") +fmt.Println(string(b)) +``` +Prints: +```json +{ + "valid": false +} +``` +To output `err` in `basic` output format: +```go +b, _ := json.MarshalIndent(err.BasicOutput(), "", " ") +fmt.Println(string(b)) +``` +Prints: +```json +{ + "valid": false, + "errors": [ + { + "keywordLocation": "", + "absoluteKeywordLocation": "file:///Users/santhosh/jsonschema/schema.json#", + "instanceLocation": "", + "error": "doesn't validate with file:///Users/santhosh/jsonschema/schema.json#" + }, + { + "keywordLocation": "/$ref", + "absoluteKeywordLocation": "file:///Users/santhosh/jsonschema/schema.json#/$ref", + "instanceLocation": "", + "error": "doesn't validate with 'file:///Users/santhosh/jsonschema/t.json#/definitions/employee'" + }, + { + "keywordLocation": "/$ref/type", + "absoluteKeywordLocation": "file:///Users/santhosh/jsonschema/t.json#/definitions/employee/type", + "instanceLocation": "", + "error": "expected string, but got number" + } + ] +} +``` +To output `err` in `detailed` output format: +```go +b, _ := json.MarshalIndent(err.DetailedOutput(), "", " ") +fmt.Println(string(b)) +``` +Prints: +```json +{ + "valid": false, + "keywordLocation": "", + "absoluteKeywordLocation": "file:///Users/santhosh/jsonschema/schema.json#", + "instanceLocation": "", + "errors": [ + { + "valid": false, + "keywordLocation": "/$ref", + "absoluteKeywordLocation": "file:///Users/santhosh/jsonschema/schema.json#/$ref", + "instanceLocation": "", + "errors": [ + { + "valid": false, + "keywordLocation": "/$ref/type", + "absoluteKeywordLocation": "file:///Users/santhosh/jsonschema/t.json#/definitions/employee/type", + "instanceLocation": "", + "error": "expected string, but got number" + } + ] + } + ] +} +``` + +## CLI + +```bash +jv [-draft INT] [-output FORMAT] []... + -draft int + draft used when '$schema' attribute is missing. valid values 4, 5, 7, 2019, 2020 (default 2020) + -output string + output format. valid values flag, basic, detailed +``` + +if no `` arguments are passed, it simply validates the ``. +if `$schema` attribute is missing in schema, it uses latest version. this can be overriden by passing `-draft` flag + +exit-code is 1, if there are any validation errors + +## Validating YAML Document + +since yaml supports non-string keys, such yaml documents are rendered as invalid json documents. +yaml parser returns `map[interface{}]interface{}` for object, whereas json parser returns `map[string]interafce{}`. +this package accepts only `map[string]interface{}`, so we need to manually convert them to `map[string]interface{}` + +https://play.golang.org/p/voSN4i0u973 + +the above example shows how to validate yaml document with jsonschema. +the convertion explained above is implemented by `toStringKeys` function + diff --git a/vendor/github.com/santhosh-tekuri/jsonschema/v5/compiler.go b/vendor/github.com/santhosh-tekuri/jsonschema/v5/compiler.go new file mode 100644 index 0000000..738f772 --- /dev/null +++ b/vendor/github.com/santhosh-tekuri/jsonschema/v5/compiler.go @@ -0,0 +1,696 @@ +package jsonschema + +import ( + "encoding/json" + "fmt" + "io" + "math/big" + "regexp" + "strconv" + "strings" +) + +// A Compiler represents a json-schema compiler. +type Compiler struct { + // Draft represents the draft used when '$schema' attribute is missing. + // + // This defaults to latest draft (currently draft2019-09). + Draft *Draft + resources map[string]*resource + + // Extensions is used to register extensions. + extensions map[string]extension + + // ExtractAnnotations tells whether schema annotations has to be extracted + // in compiled Schema or not. + ExtractAnnotations bool + + // LoadURL loads the document at given absolute URL. + // + // If nil, package global LoadURL is used. + LoadURL func(s string) (io.ReadCloser, error) + + // AssertFormat for specifications >= draft2019-09. + AssertFormat bool + + // AssertContent for specifications >= draft2019-09. + AssertContent bool +} + +// Compile parses json-schema at given url returns, if successful, +// a Schema object that can be used to match against json. +// +// Returned error can be *SchemaError +func Compile(url string) (*Schema, error) { + return NewCompiler().Compile(url) +} + +// MustCompile is like Compile but panics if the url cannot be compiled to *Schema. +// It simplifies safe initialization of global variables holding compiled Schemas. +func MustCompile(url string) *Schema { + return NewCompiler().MustCompile(url) +} + +// CompileString parses and compiles the given schema with given base url. +func CompileString(url, schema string) (*Schema, error) { + c := NewCompiler() + if err := c.AddResource(url, strings.NewReader(schema)); err != nil { + return nil, err + } + return c.Compile(url) +} + +// MustCompileString is like CompileString but panics on error. +// It simplified safe initialization of global variables holding compiled Schema. +func MustCompileString(url, schema string) *Schema { + c := NewCompiler() + if err := c.AddResource(url, strings.NewReader(schema)); err != nil { + panic(err) + } + return c.MustCompile(url) +} + +// NewCompiler returns a json-schema Compiler object. +// if '$schema' attribute is missing, it is treated as draft7. to change this +// behavior change Compiler.Draft value +func NewCompiler() *Compiler { + return &Compiler{Draft: latest, resources: make(map[string]*resource), extensions: make(map[string]extension)} +} + +// AddResource adds in-memory resource to the compiler. +// +// Note that url must not have fragment +func (c *Compiler) AddResource(url string, r io.Reader) error { + res, err := newResource(url, r) + if err != nil { + return err + } + c.resources[res.url] = res + return nil +} + +// MustCompile is like Compile but panics if the url cannot be compiled to *Schema. +// It simplifies safe initialization of global variables holding compiled Schemas. +func (c *Compiler) MustCompile(url string) *Schema { + s, err := c.Compile(url) + if err != nil { + panic(fmt.Sprintf("jsonschema: %#v", err)) + } + return s +} + +// Compile parses json-schema at given url returns, if successful, +// a Schema object that can be used to match against json. +// +// error returned will be of type *SchemaError +func (c *Compiler) Compile(url string) (*Schema, error) { + // make url absolute + u, err := toAbs(url) + if err != nil { + return nil, &SchemaError{url, err} + } + url = u + + sch, err := c.compileURL(url, nil, "#") + if err != nil { + err = &SchemaError{url, err} + } + return sch, err +} + +func (c *Compiler) findResource(url string) (*resource, error) { + if _, ok := c.resources[url]; !ok { + // load resource + loadURL := LoadURL + if c.LoadURL != nil { + loadURL = c.LoadURL + } + rdr, err := loadURL(url) + if err != nil { + return nil, err + } + defer rdr.Close() + if err := c.AddResource(url, rdr); err != nil { + return nil, err + } + } + + r := c.resources[url] + if r.draft != nil { + return r, nil + } + + // set draft + r.draft = c.Draft + if m, ok := r.doc.(map[string]interface{}); ok { + if sch, ok := m["$schema"]; ok { + if _, ok = sch.(string); !ok { + return nil, fmt.Errorf("jsonschema: invalid $schema in %s", url) + } + r.draft = findDraft(sch.(string)) + if r.draft == nil { + return nil, fmt.Errorf("jsonschema: invalid $schema in %s", url) + } + } + } + + id, err := r.draft.resolveID(r.url, r.doc) + if err != nil { + return nil, err + } + if id != "" { + r.url = id + } + + if err := r.fillSubschemas(c, r); err != nil { + return nil, err + } + + return r, nil +} + +func (c *Compiler) compileURL(url string, stack []schemaRef, ptr string) (*Schema, error) { + // if url points to a draft, return Draft.meta + if d := findDraft(url); d != nil && d.meta != nil { + return d.meta, nil + } + + b, f := split(url) + r, err := c.findResource(b) + if err != nil { + return nil, err + } + return c.compileRef(r, stack, ptr, r, f) +} + +func (c *Compiler) compileRef(r *resource, stack []schemaRef, refPtr string, res *resource, ref string) (*Schema, error) { + base := r.baseURL(res.floc) + ref, err := resolveURL(base, ref) + if err != nil { + return nil, err + } + + u, f := split(ref) + sr := r.findResource(u) + if sr == nil { + // external resource + return c.compileURL(ref, stack, refPtr) + } + sr, err = r.resolveFragment(c, sr, f) + if err != nil { + return nil, err + } + if sr == nil { + return nil, fmt.Errorf("jsonschema: %s not found", ref) + } + + if sr.schema != nil { + if err := checkLoop(stack, schemaRef{refPtr, sr.schema, false}); err != nil { + return nil, err + } + return sr.schema, nil + } + + sr.schema = newSchema(r.url, sr.floc, sr.doc) + return c.compile(r, stack, schemaRef{refPtr, sr.schema, false}, sr) +} + +func (c *Compiler) compileDynamicAnchors(r *resource, res *resource) error { + if r.draft.version < 2020 { + return nil + } + + rr := r.listResources(res) + rr = append(rr, res) + for _, sr := range rr { + if m, ok := sr.doc.(map[string]interface{}); ok { + if _, ok := m["$dynamicAnchor"]; ok { + sch, err := c.compileRef(r, nil, "IGNORED", r, sr.floc) + if err != nil { + return err + } + res.schema.dynamicAnchors = append(res.schema.dynamicAnchors, sch) + } + } + } + return nil +} + +func (c *Compiler) compile(r *resource, stack []schemaRef, sref schemaRef, res *resource) (*Schema, error) { + if err := c.compileDynamicAnchors(r, res); err != nil { + return nil, err + } + + switch v := res.doc.(type) { + case bool: + res.schema.Always = &v + return res.schema, nil + default: + return res.schema, c.compileMap(r, stack, sref, res) + } +} + +func (c *Compiler) compileMap(r *resource, stack []schemaRef, sref schemaRef, res *resource) error { + m := res.doc.(map[string]interface{}) + + if err := checkLoop(stack, sref); err != nil { + return err + } + stack = append(stack, sref) + + var s = res.schema + var err error + + if ref, ok := m["$ref"]; ok { + s.Ref, err = c.compileRef(r, stack, "$ref", res, ref.(string)) + if err != nil { + return err + } + if r.draft.version < 2019 { + // All other properties in a "$ref" object MUST be ignored + return nil + } + } + + if r.draft.version >= 2019 { + if ref, ok := m["$recursiveRef"]; ok { + s.RecursiveRef, err = c.compileRef(r, stack, "$recursiveRef", res, ref.(string)) + if err != nil { + return err + } + } + } + if r.draft.version >= 2020 { + if dref, ok := m["$dynamicRef"]; ok { + s.DynamicRef, err = c.compileRef(r, stack, "$dynamicRef", res, dref.(string)) + if err != nil { + return err + } + } + } + + if t, ok := m["type"]; ok { + switch t := t.(type) { + case string: + s.Types = []string{t} + case []interface{}: + s.Types = toStrings(t) + } + } + + if e, ok := m["enum"]; ok { + s.Enum = e.([]interface{}) + allPrimitives := true + for _, item := range s.Enum { + switch jsonType(item) { + case "object", "array": + allPrimitives = false + break + } + } + s.enumError = "enum failed" + if allPrimitives { + if len(s.Enum) == 1 { + s.enumError = fmt.Sprintf("value must be %#v", s.Enum[0]) + } else { + strEnum := make([]string, len(s.Enum)) + for i, item := range s.Enum { + strEnum[i] = fmt.Sprintf("%#v", item) + } + s.enumError = fmt.Sprintf("value must be one of %s", strings.Join(strEnum, ", ")) + } + } + } + + compile := func(stack []schemaRef, ptr string) (*Schema, error) { + return c.compileRef(r, stack, ptr, res, r.url+res.floc+"/"+ptr) + } + + loadSchema := func(pname string, stack []schemaRef) (*Schema, error) { + if _, ok := m[pname]; ok { + return compile(stack, escape(pname)) + } + return nil, nil + } + + if s.Not, err = loadSchema("not", stack); err != nil { + return err + } + + loadSchemas := func(pname string, stack []schemaRef) ([]*Schema, error) { + if pvalue, ok := m[pname]; ok { + pvalue := pvalue.([]interface{}) + schemas := make([]*Schema, len(pvalue)) + for i := range pvalue { + sch, err := compile(stack, escape(pname)+"/"+strconv.Itoa(i)) + if err != nil { + return nil, err + } + schemas[i] = sch + } + return schemas, nil + } + return nil, nil + } + if s.AllOf, err = loadSchemas("allOf", stack); err != nil { + return err + } + if s.AnyOf, err = loadSchemas("anyOf", stack); err != nil { + return err + } + if s.OneOf, err = loadSchemas("oneOf", stack); err != nil { + return err + } + + loadInt := func(pname string) int { + if num, ok := m[pname]; ok { + i, _ := num.(json.Number).Int64() + return int(i) + } + return -1 + } + s.MinProperties, s.MaxProperties = loadInt("minProperties"), loadInt("maxProperties") + + if req, ok := m["required"]; ok { + s.Required = toStrings(req.([]interface{})) + } + + if props, ok := m["properties"]; ok { + props := props.(map[string]interface{}) + s.Properties = make(map[string]*Schema, len(props)) + for pname := range props { + s.Properties[pname], err = compile(nil, "properties/"+escape(pname)) + if err != nil { + return err + } + } + } + + if regexProps, ok := m["regexProperties"]; ok { + s.RegexProperties = regexProps.(bool) + } + + if patternProps, ok := m["patternProperties"]; ok { + patternProps := patternProps.(map[string]interface{}) + s.PatternProperties = make(map[*regexp.Regexp]*Schema, len(patternProps)) + for pattern := range patternProps { + s.PatternProperties[regexp.MustCompile(pattern)], err = compile(nil, "patternProperties/"+escape(pattern)) + if err != nil { + return err + } + } + } + + if additionalProps, ok := m["additionalProperties"]; ok { + switch additionalProps := additionalProps.(type) { + case bool: + s.AdditionalProperties = additionalProps + case map[string]interface{}: + s.AdditionalProperties, err = compile(nil, "additionalProperties") + if err != nil { + return err + } + } + } + + if deps, ok := m["dependencies"]; ok { + deps := deps.(map[string]interface{}) + s.Dependencies = make(map[string]interface{}, len(deps)) + for pname, pvalue := range deps { + switch pvalue := pvalue.(type) { + case []interface{}: + s.Dependencies[pname] = toStrings(pvalue) + default: + s.Dependencies[pname], err = compile(stack, "dependencies/"+escape(pname)) + if err != nil { + return err + } + } + } + } + + if r.draft.version >= 2019 { + if deps, ok := m["dependentRequired"]; ok { + deps := deps.(map[string]interface{}) + s.DependentRequired = make(map[string][]string, len(deps)) + for pname, pvalue := range deps { + s.DependentRequired[pname] = toStrings(pvalue.([]interface{})) + } + } + if deps, ok := m["dependentSchemas"]; ok { + deps := deps.(map[string]interface{}) + s.DependentSchemas = make(map[string]*Schema, len(deps)) + for pname := range deps { + s.DependentSchemas[pname], err = compile(stack, "dependentSchemas/"+escape(pname)) + if err != nil { + return err + } + } + } + if s.UnevaluatedProperties, err = loadSchema("unevaluatedProperties", nil); err != nil { + return err + } + if s.UnevaluatedItems, err = loadSchema("unevaluatedItems", nil); err != nil { + return err + } + } + + s.MinItems, s.MaxItems = loadInt("minItems"), loadInt("maxItems") + + if unique, ok := m["uniqueItems"]; ok { + s.UniqueItems = unique.(bool) + } + + if r.draft.version >= 2020 { + if s.PrefixItems, err = loadSchemas("prefixItems", nil); err != nil { + return err + } + if s.Items2020, err = loadSchema("items", nil); err != nil { + return err + } + } else { + if items, ok := m["items"]; ok { + switch items.(type) { + case []interface{}: + s.Items, err = loadSchemas("items", nil) + if err != nil { + return err + } + if additionalItems, ok := m["additionalItems"]; ok { + switch additionalItems := additionalItems.(type) { + case bool: + s.AdditionalItems = additionalItems + case map[string]interface{}: + s.AdditionalItems, err = compile(nil, "additionalItems") + if err != nil { + return err + } + } + } + default: + s.Items, err = compile(nil, "items") + if err != nil { + return err + } + } + } + } + + s.MinLength, s.MaxLength = loadInt("minLength"), loadInt("maxLength") + + if pattern, ok := m["pattern"]; ok { + s.Pattern = regexp.MustCompile(pattern.(string)) + } + + if format, ok := m["format"]; ok { + s.Format = format.(string) + s.format, _ = Formats[s.Format] + } + + loadRat := func(pname string) *big.Rat { + if num, ok := m[pname]; ok { + r, _ := new(big.Rat).SetString(string(num.(json.Number))) + return r + } + return nil + } + + s.Minimum = loadRat("minimum") + if exclusive, ok := m["exclusiveMinimum"]; ok { + if exclusive, ok := exclusive.(bool); ok { + if exclusive { + s.Minimum, s.ExclusiveMinimum = nil, s.Minimum + } + } else { + s.ExclusiveMinimum = loadRat("exclusiveMinimum") + } + } + + s.Maximum = loadRat("maximum") + if exclusive, ok := m["exclusiveMaximum"]; ok { + if exclusive, ok := exclusive.(bool); ok { + if exclusive { + s.Maximum, s.ExclusiveMaximum = nil, s.Maximum + } + } else { + s.ExclusiveMaximum = loadRat("exclusiveMaximum") + } + } + + s.MultipleOf = loadRat("multipleOf") + + if c.ExtractAnnotations { + if title, ok := m["title"]; ok { + s.Title = title.(string) + } + if description, ok := m["description"]; ok { + s.Description = description.(string) + } + s.Default = m["default"] + } + + if r.draft.version >= 6 { + if c, ok := m["const"]; ok { + s.Constant = []interface{}{c} + } + if s.PropertyNames, err = loadSchema("propertyNames", nil); err != nil { + return err + } + if s.Contains, err = loadSchema("contains", nil); err != nil { + return err + } + if r.draft.version >= 2020 { + // any item in an array that passes validation of the contains schema is considered "evaluated" + s.ContainsEval = true + } + s.MinContains, s.MaxContains = 1, -1 + } + + if r.draft.version >= 7 { + if m["if"] != nil { + if s.If, err = loadSchema("if", stack); err != nil { + return err + } + if s.Then, err = loadSchema("then", stack); err != nil { + return err + } + if s.Else, err = loadSchema("else", stack); err != nil { + return err + } + } + if encoding, ok := m["contentEncoding"]; ok { + s.ContentEncoding = encoding.(string) + s.decoder, _ = Decoders[s.ContentEncoding] + } + if mediaType, ok := m["contentMediaType"]; ok { + s.ContentMediaType = mediaType.(string) + s.mediaType, _ = MediaTypes[s.ContentMediaType] + } + if c.ExtractAnnotations { + if comment, ok := m["$comment"]; ok { + s.Comment = comment.(string) + } + if readOnly, ok := m["readOnly"]; ok { + s.ReadOnly = readOnly.(bool) + } + if writeOnly, ok := m["writeOnly"]; ok { + s.WriteOnly = writeOnly.(bool) + } + if examples, ok := m["examples"]; ok { + s.Examples = examples.([]interface{}) + } + } + } + + if r.draft.version >= 2019 { + s.decoder = nil + s.mediaType = nil + if !c.AssertFormat { + s.format = nil + } + + s.MinContains, s.MaxContains = loadInt("minContains"), loadInt("maxContains") + if s.MinContains == -1 { + s.MinContains = 1 + } + + if c.ExtractAnnotations { + if deprecated, ok := m["deprecated"]; ok { + s.Deprecated = deprecated.(bool) + } + } + } + + for name, ext := range c.extensions { + es, err := ext.compiler.Compile(CompilerContext{c, r, stack, res}, m) + if err != nil { + return err + } + if es != nil { + if s.Extensions == nil { + s.Extensions = make(map[string]ExtSchema) + } + s.Extensions[name] = es + } + } + + return nil +} + +func (c *Compiler) validateSchema(r *resource, v interface{}, vloc string) error { + validate := func(meta *Schema) error { + if meta == nil { + return nil + } + return meta.validateValue(v, vloc) + } + + if err := validate(r.draft.meta); err != nil { + return err + } + for _, ext := range c.extensions { + if err := validate(ext.meta); err != nil { + return err + } + } + return nil +} + +func toStrings(arr []interface{}) []string { + s := make([]string, len(arr)) + for i, v := range arr { + s[i] = v.(string) + } + return s +} + +// SchemaRef captures schema and the path refering to it. +type schemaRef struct { + path string // relative-json-pointer to schema + schema *Schema // target schema + discard bool // true when scope left +} + +func (sr schemaRef) String() string { + return fmt.Sprintf("(%s)%v", sr.path, sr.schema) +} + +func checkLoop(stack []schemaRef, sref schemaRef) error { + for _, ref := range stack { + if ref.schema == sref.schema { + return infiniteLoopError(stack, sref) + } + } + return nil +} + +func keywordLocation(stack []schemaRef, path string) string { + var loc string + for _, ref := range stack[1:] { + loc += "/" + ref.path + } + if path != "" { + loc = loc + "/" + path + } + return loc +} diff --git a/vendor/github.com/santhosh-tekuri/jsonschema/v5/content.go b/vendor/github.com/santhosh-tekuri/jsonschema/v5/content.go new file mode 100644 index 0000000..7570b8b --- /dev/null +++ b/vendor/github.com/santhosh-tekuri/jsonschema/v5/content.go @@ -0,0 +1,29 @@ +package jsonschema + +import ( + "encoding/base64" + "encoding/json" +) + +// Decoders is a registry of functions, which know how to decode +// string encoded in specific format. +// +// New Decoders can be registered by adding to this map. Key is encoding name, +// value is function that knows how to decode string in that format. +var Decoders = map[string]func(string) ([]byte, error){ + "base64": base64.StdEncoding.DecodeString, +} + +// MediaTypes is a registry of functions, which know how to validate +// whether the bytes represent data of that mediaType. +// +// New mediaTypes can be registered by adding to this map. Key is mediaType name, +// value is function that knows how to validate that mediaType. +var MediaTypes = map[string]func([]byte) error{ + "application/json": validateJSON, +} + +func validateJSON(b []byte) error { + var v interface{} + return json.Unmarshal(b, &v) +} diff --git a/vendor/github.com/santhosh-tekuri/jsonschema/v5/doc.go b/vendor/github.com/santhosh-tekuri/jsonschema/v5/doc.go new file mode 100644 index 0000000..f9a083f --- /dev/null +++ b/vendor/github.com/santhosh-tekuri/jsonschema/v5/doc.go @@ -0,0 +1,52 @@ +/* +Package jsonschema provides json-schema compilation and validation. + +Features: + - implements draft 2020-12, 2019-09, draft-7, draft-6, draft-4 + - fully compliant with JSON-Schema-Test-Suite, (excluding some optional) + - list of optional tests that are excluded can be found in schema_test.go(variable skipTests) + - validates schemas against meta-schema + - full support of remote references + - support of recursive references between schemas + - detects infinite loop in schemas + - thread safe validation + - rich, intutive hierarchial error messages with json-pointers to exact location + - supports output formats flag, basic and detailed + - supports enabling format and content Assertions in draft2019-09 or above + - change Compiler.AssertFormat, Compiler.AssertContent to true + - compiled schema can be introspected. easier to develop tools like generating go structs given schema + - supports user-defined keywords via extensions + - implements following formats (supports user-defined) + - date-time, date, time, duration (supports leap-second) + - uuid, hostname, email + - ip-address, ipv4, ipv6 + - uri, uriref, uri-template(limited validation) + - json-pointer, relative-json-pointer + - regex, format + - implements following contentEncoding (supports user-defined) + - base64 + - implements following contentMediaType (supports user-defined) + - application/json + - can load from files/http/https/string/[]byte/io.Reader (suports user-defined) + +The schema is compiled against the version specified in "$schema" property. +If "$schema" property is missing, it uses latest draft which currently implemented +by this library. + +You can force to use specific draft, when "$schema" is missing, as follows: + + compiler := jsonschema.NewCompiler() + compler.Draft = jsonschema.Draft4 + +you can also validate go value using schema.ValidateInterface(interface{}) method. +but the argument should not be user-defined struct. + +This package supports loading json-schema from filePath and fileURL. + +To load json-schema from HTTPURL, add following import: + + import _ "github.com/santhosh-tekuri/jsonschema/v5/httploader" + +you can validate yaml documents. see https://play.golang.org/p/sJy1qY7dXgA +*/ +package jsonschema diff --git a/vendor/github.com/santhosh-tekuri/jsonschema/v5/draft.go b/vendor/github.com/santhosh-tekuri/jsonschema/v5/draft.go new file mode 100644 index 0000000..e847165 --- /dev/null +++ b/vendor/github.com/santhosh-tekuri/jsonschema/v5/draft.go @@ -0,0 +1,1373 @@ +package jsonschema + +import ( + "strconv" + "strings" +) + +// A Draft represents json-schema draft +type Draft struct { + version int + meta *Schema + id string // property name used to represent schema id. + boolSchema bool // is boolean valid schema + subschemas map[string]position +} + +func (d *Draft) loadMeta(base string, schemas map[string]string) { + c := NewCompiler() + c.AssertFormat = true + for u, schema := range schemas { + if err := c.AddResource(base+"/"+u, strings.NewReader(schema)); err != nil { + panic(err) + } + } + d.meta = c.MustCompile(base + "/schema") +} + +func (d *Draft) getID(sch interface{}) string { + m, ok := sch.(map[string]interface{}) + if !ok { + return "" + } + v, ok := m[d.id] + if !ok { + return "" + } + id, ok := v.(string) + if !ok { + return "" + } + return id +} + +func (d *Draft) resolveID(base string, sch interface{}) (string, error) { + id, _ := split(d.getID(sch)) // strip fragment + if id == "" { + return "", nil + } + url, err := resolveURL(base, id) + url, _ = split(url) // strip fragment + return url, err +} + +func (d *Draft) anchors(sch interface{}) []string { + m, ok := sch.(map[string]interface{}) + if !ok { + return nil + } + + var anchors []string + + // before draft2019, anchor is specified in id + _, f := split(d.getID(m)) + if f != "#" { + anchors = append(anchors, f[1:]) + } + + if v, ok := m["$anchor"]; ok && d.version >= 2019 { + anchors = append(anchors, v.(string)) + } + if v, ok := m["$dynamicAnchor"]; ok && d.version >= 2020 { + anchors = append(anchors, v.(string)) + } + return anchors +} + +// listSubschemas collects subschemas in r into rr. +func (d *Draft) listSubschemas(r *resource, base string, rr map[string]*resource) error { + add := func(loc string, sch interface{}) error { + url, err := d.resolveID(base, sch) + if err != nil { + return err + } + floc := r.floc + "/" + loc + sr := &resource{url: url, floc: floc, doc: sch} + rr[floc] = sr + + base := base + if url != "" { + base = url + } + return d.listSubschemas(sr, base, rr) + } + + sch, ok := r.doc.(map[string]interface{}) + if !ok { + return nil + } + for kw, pos := range d.subschemas { + v, ok := sch[kw] + if !ok { + continue + } + if pos&self != 0 { + switch v := v.(type) { + case map[string]interface{}: + if err := add(kw, v); err != nil { + return err + } + case bool: + if d.boolSchema { + if err := add(kw, v); err != nil { + return err + } + } + } + } + if pos&item != 0 { + if v, ok := v.([]interface{}); ok { + for i, item := range v { + if err := add(kw+"/"+strconv.Itoa(i), item); err != nil { + return err + } + } + } + } + if pos&prop != 0 { + if v, ok := v.(map[string]interface{}); ok { + for pname, pval := range v { + if err := add(kw+"/"+escape(pname), pval); err != nil { + return err + } + } + } + } + } + return nil +} + +type position uint + +const ( + self position = 1 << iota + prop + item +) + +// supported drafts +var ( + Draft4 = &Draft{version: 4, id: "id", boolSchema: false} + Draft6 = &Draft{version: 6, id: "$id", boolSchema: true} + Draft7 = &Draft{version: 7, id: "$id", boolSchema: true} + Draft2019 = &Draft{version: 2019, id: "$id", boolSchema: true} + Draft2020 = &Draft{version: 2020, id: "$id", boolSchema: true} + + latest = Draft2020 +) + +func findDraft(url string) *Draft { + if strings.HasPrefix(url, "http://") { + url = "https://" + strings.TrimPrefix(url, "http://") + } + if strings.HasSuffix(url, "#") || strings.HasSuffix(url, "#/") { + url = url[:strings.IndexByte(url, '#')] + } + switch url { + case "https://json-schema.org/schema": + return latest + case "https://json-schema.org/draft/2020-12/schema": + return Draft2020 + case "https://json-schema.org/draft/2019-09/schema": + return Draft2019 + case "https://json-schema.org/draft-07/schema": + return Draft7 + case "https://json-schema.org/draft-06/schema": + return Draft6 + case "https://json-schema.org/draft-04/schema": + return Draft4 + } + return nil +} + +func init() { + subschemas := map[string]position{ + // type agnostic + "definitions": prop, + "not": self, + "allOf": item, + "anyOf": item, + "oneOf": item, + // object + "properties": prop, + "additionalProperties": self, + "patternProperties": prop, + // array + "items": self | item, + "additionalItems": self, + "dependencies": prop, + } + Draft4.subschemas = clone(subschemas) + + subschemas["propertyNames"] = self + subschemas["contains"] = self + Draft6.subschemas = clone(subschemas) + + subschemas["if"] = self + subschemas["then"] = self + subschemas["else"] = self + Draft7.subschemas = clone(subschemas) + + subschemas["$defs"] = prop + subschemas["dependentSchemas"] = prop + subschemas["unevaluatedProperties"] = self + subschemas["unevaluatedItems"] = self + Draft2019.subschemas = clone(subschemas) + + subschemas["prefixItems"] = item + Draft2020.subschemas = clone(subschemas) + + Draft4.loadMeta("http://json-schema.org/draft-04", map[string]string{ + "schema": `{ + "$schema": "http://json-schema.org/draft-04/schema#", + "description": "Core schema meta-schema", + "definitions": { + "schemaArray": { + "type": "array", + "minItems": 1, + "items": { "$ref": "#" } + }, + "positiveInteger": { + "type": "integer", + "minimum": 0 + }, + "positiveIntegerDefault0": { + "allOf": [ { "$ref": "#/definitions/positiveInteger" }, { "default": 0 } ] + }, + "simpleTypes": { + "enum": [ "array", "boolean", "integer", "null", "number", "object", "string" ] + }, + "stringArray": { + "type": "array", + "items": { "type": "string" }, + "minItems": 1, + "uniqueItems": true + } + }, + "type": "object", + "properties": { + "id": { + "type": "string", + "format": "uriref" + }, + "$schema": { + "type": "string", + "format": "uri" + }, + "title": { + "type": "string" + }, + "description": { + "type": "string" + }, + "default": {}, + "multipleOf": { + "type": "number", + "minimum": 0, + "exclusiveMinimum": true + }, + "maximum": { + "type": "number" + }, + "exclusiveMaximum": { + "type": "boolean", + "default": false + }, + "minimum": { + "type": "number" + }, + "exclusiveMinimum": { + "type": "boolean", + "default": false + }, + "maxLength": { "$ref": "#/definitions/positiveInteger" }, + "minLength": { "$ref": "#/definitions/positiveIntegerDefault0" }, + "pattern": { + "type": "string", + "format": "regex" + }, + "additionalItems": { + "anyOf": [ + { "type": "boolean" }, + { "$ref": "#" } + ], + "default": {} + }, + "items": { + "anyOf": [ + { "$ref": "#" }, + { "$ref": "#/definitions/schemaArray" } + ], + "default": {} + }, + "maxItems": { "$ref": "#/definitions/positiveInteger" }, + "minItems": { "$ref": "#/definitions/positiveIntegerDefault0" }, + "uniqueItems": { + "type": "boolean", + "default": false + }, + "maxProperties": { "$ref": "#/definitions/positiveInteger" }, + "minProperties": { "$ref": "#/definitions/positiveIntegerDefault0" }, + "required": { "$ref": "#/definitions/stringArray" }, + "additionalProperties": { + "anyOf": [ + { "type": "boolean" }, + { "$ref": "#" } + ], + "default": {} + }, + "definitions": { + "type": "object", + "additionalProperties": { "$ref": "#" }, + "default": {} + }, + "properties": { + "type": "object", + "additionalProperties": { "$ref": "#" }, + "default": {} + }, + "patternProperties": { + "type": "object", + "regexProperties": true, + "additionalProperties": { "$ref": "#" }, + "default": {} + }, + "regexProperties": { "type": "boolean" }, + "dependencies": { + "type": "object", + "additionalProperties": { + "anyOf": [ + { "$ref": "#" }, + { "$ref": "#/definitions/stringArray" } + ] + } + }, + "enum": { + "type": "array", + "minItems": 1, + "uniqueItems": true + }, + "type": { + "anyOf": [ + { "$ref": "#/definitions/simpleTypes" }, + { + "type": "array", + "items": { "$ref": "#/definitions/simpleTypes" }, + "minItems": 1, + "uniqueItems": true + } + ] + }, + "allOf": { "$ref": "#/definitions/schemaArray" }, + "anyOf": { "$ref": "#/definitions/schemaArray" }, + "oneOf": { "$ref": "#/definitions/schemaArray" }, + "not": { "$ref": "#" }, + "format": { "type": "string" }, + "$ref": { "type": "string" } + }, + "dependencies": { + "exclusiveMaximum": [ "maximum" ], + "exclusiveMinimum": [ "minimum" ] + }, + "default": {} + }`, + }) + Draft6.loadMeta("http://json-schema.org/draft-06", map[string]string{ + "schema": `{ + "$schema": "http://json-schema.org/draft-06/schema#", + "$id": "http://json-schema.org/draft-06/schema#", + "title": "Core schema meta-schema", + "definitions": { + "schemaArray": { + "type": "array", + "minItems": 1, + "items": { "$ref": "#" } + }, + "nonNegativeInteger": { + "type": "integer", + "minimum": 0 + }, + "nonNegativeIntegerDefault0": { + "allOf": [ + { "$ref": "#/definitions/nonNegativeInteger" }, + { "default": 0 } + ] + }, + "simpleTypes": { + "enum": [ + "array", + "boolean", + "integer", + "null", + "number", + "object", + "string" + ] + }, + "stringArray": { + "type": "array", + "items": { "type": "string" }, + "uniqueItems": true, + "default": [] + } + }, + "type": ["object", "boolean"], + "properties": { + "$id": { + "type": "string", + "format": "uri-reference" + }, + "$schema": { + "type": "string", + "format": "uri" + }, + "$ref": { + "type": "string", + "format": "uri-reference" + }, + "title": { + "type": "string" + }, + "description": { + "type": "string" + }, + "default": {}, + "multipleOf": { + "type": "number", + "exclusiveMinimum": 0 + }, + "maximum": { + "type": "number" + }, + "exclusiveMaximum": { + "type": "number" + }, + "minimum": { + "type": "number" + }, + "exclusiveMinimum": { + "type": "number" + }, + "maxLength": { "$ref": "#/definitions/nonNegativeInteger" }, + "minLength": { "$ref": "#/definitions/nonNegativeIntegerDefault0" }, + "pattern": { + "type": "string", + "format": "regex" + }, + "additionalItems": { "$ref": "#" }, + "items": { + "anyOf": [ + { "$ref": "#" }, + { "$ref": "#/definitions/schemaArray" } + ], + "default": {} + }, + "maxItems": { "$ref": "#/definitions/nonNegativeInteger" }, + "minItems": { "$ref": "#/definitions/nonNegativeIntegerDefault0" }, + "uniqueItems": { + "type": "boolean", + "default": false + }, + "contains": { "$ref": "#" }, + "maxProperties": { "$ref": "#/definitions/nonNegativeInteger" }, + "minProperties": { "$ref": "#/definitions/nonNegativeIntegerDefault0" }, + "required": { "$ref": "#/definitions/stringArray" }, + "additionalProperties": { "$ref": "#" }, + "definitions": { + "type": "object", + "additionalProperties": { "$ref": "#" }, + "default": {} + }, + "properties": { + "type": "object", + "additionalProperties": { "$ref": "#" }, + "default": {} + }, + "patternProperties": { + "type": "object", + "regexProperties": true, + "additionalProperties": { "$ref": "#" }, + "default": {} + }, + "dependencies": { + "type": "object", + "additionalProperties": { + "anyOf": [ + { "$ref": "#" }, + { "$ref": "#/definitions/stringArray" } + ] + } + }, + "propertyNames": { "$ref": "#" }, + "const": {}, + "enum": { + "type": "array", + "minItems": 1, + "uniqueItems": true + }, + "type": { + "anyOf": [ + { "$ref": "#/definitions/simpleTypes" }, + { + "type": "array", + "items": { "$ref": "#/definitions/simpleTypes" }, + "minItems": 1, + "uniqueItems": true + } + ] + }, + "format": { "type": "string" }, + "allOf": { "$ref": "#/definitions/schemaArray" }, + "anyOf": { "$ref": "#/definitions/schemaArray" }, + "oneOf": { "$ref": "#/definitions/schemaArray" }, + "not": { "$ref": "#" } + }, + "default": {} + }`, + }) + Draft7.loadMeta("http://json-schema.org/draft-07", map[string]string{ + "schema": `{ + "$schema": "http://json-schema.org/draft-07/schema#", + "$id": "http://json-schema.org/draft-07/schema#", + "title": "Core schema meta-schema", + "definitions": { + "schemaArray": { + "type": "array", + "minItems": 1, + "items": { "$ref": "#" } + }, + "nonNegativeInteger": { + "type": "integer", + "minimum": 0 + }, + "nonNegativeIntegerDefault0": { + "allOf": [ + { "$ref": "#/definitions/nonNegativeInteger" }, + { "default": 0 } + ] + }, + "simpleTypes": { + "enum": [ + "array", + "boolean", + "integer", + "null", + "number", + "object", + "string" + ] + }, + "stringArray": { + "type": "array", + "items": { "type": "string" }, + "uniqueItems": true, + "default": [] + } + }, + "type": ["object", "boolean"], + "properties": { + "$id": { + "type": "string", + "format": "uri-reference" + }, + "$schema": { + "type": "string", + "format": "uri" + }, + "$ref": { + "type": "string", + "format": "uri-reference" + }, + "$comment": { + "type": "string" + }, + "title": { + "type": "string" + }, + "description": { + "type": "string" + }, + "default": true, + "readOnly": { + "type": "boolean", + "default": false + }, + "writeOnly": { + "type": "boolean", + "default": false + }, + "examples": { + "type": "array", + "items": true + }, + "multipleOf": { + "type": "number", + "exclusiveMinimum": 0 + }, + "maximum": { + "type": "number" + }, + "exclusiveMaximum": { + "type": "number" + }, + "minimum": { + "type": "number" + }, + "exclusiveMinimum": { + "type": "number" + }, + "maxLength": { "$ref": "#/definitions/nonNegativeInteger" }, + "minLength": { "$ref": "#/definitions/nonNegativeIntegerDefault0" }, + "pattern": { + "type": "string", + "format": "regex" + }, + "additionalItems": { "$ref": "#" }, + "items": { + "anyOf": [ + { "$ref": "#" }, + { "$ref": "#/definitions/schemaArray" } + ], + "default": true + }, + "maxItems": { "$ref": "#/definitions/nonNegativeInteger" }, + "minItems": { "$ref": "#/definitions/nonNegativeIntegerDefault0" }, + "uniqueItems": { + "type": "boolean", + "default": false + }, + "contains": { "$ref": "#" }, + "maxProperties": { "$ref": "#/definitions/nonNegativeInteger" }, + "minProperties": { "$ref": "#/definitions/nonNegativeIntegerDefault0" }, + "required": { "$ref": "#/definitions/stringArray" }, + "additionalProperties": { "$ref": "#" }, + "definitions": { + "type": "object", + "additionalProperties": { "$ref": "#" }, + "default": {} + }, + "properties": { + "type": "object", + "additionalProperties": { "$ref": "#" }, + "default": {} + }, + "patternProperties": { + "type": "object", + "additionalProperties": { "$ref": "#" }, + "propertyNames": { "format": "regex" }, + "default": {} + }, + "dependencies": { + "type": "object", + "additionalProperties": { + "anyOf": [ + { "$ref": "#" }, + { "$ref": "#/definitions/stringArray" } + ] + } + }, + "propertyNames": { "$ref": "#" }, + "const": true, + "enum": { + "type": "array", + "items": true, + "minItems": 1, + "uniqueItems": true + }, + "type": { + "anyOf": [ + { "$ref": "#/definitions/simpleTypes" }, + { + "type": "array", + "items": { "$ref": "#/definitions/simpleTypes" }, + "minItems": 1, + "uniqueItems": true + } + ] + }, + "format": { "type": "string" }, + "contentMediaType": { "type": "string" }, + "contentEncoding": { "type": "string" }, + "if": { "$ref": "#" }, + "then": { "$ref": "#" }, + "else": { "$ref": "#" }, + "allOf": { "$ref": "#/definitions/schemaArray" }, + "anyOf": { "$ref": "#/definitions/schemaArray" }, + "oneOf": { "$ref": "#/definitions/schemaArray" }, + "not": { "$ref": "#" } + }, + "default": true + }`, + }) + Draft2019.loadMeta("https://json-schema.org/draft/2019-09", map[string]string{ + "schema": `{ + "$schema": "https://json-schema.org/draft/2019-09/schema", + "$id": "https://json-schema.org/draft/2019-09/schema", + "$vocabulary": { + "https://json-schema.org/draft/2019-09/vocab/core": true, + "https://json-schema.org/draft/2019-09/vocab/applicator": true, + "https://json-schema.org/draft/2019-09/vocab/validation": true, + "https://json-schema.org/draft/2019-09/vocab/meta-data": true, + "https://json-schema.org/draft/2019-09/vocab/format": false, + "https://json-schema.org/draft/2019-09/vocab/content": true + }, + "$recursiveAnchor": true, + + "title": "Core and Validation specifications meta-schema", + "allOf": [ + {"$ref": "meta/core"}, + {"$ref": "meta/applicator"}, + {"$ref": "meta/validation"}, + {"$ref": "meta/meta-data"}, + {"$ref": "meta/format"}, + {"$ref": "meta/content"} + ], + "type": ["object", "boolean"], + "properties": { + "definitions": { + "$comment": "While no longer an official keyword as it is replaced by $defs, this keyword is retained in the meta-schema to prevent incompatible extensions as it remains in common use.", + "type": "object", + "additionalProperties": { "$recursiveRef": "#" }, + "default": {} + }, + "dependencies": { + "$comment": "\"dependencies\" is no longer a keyword, but schema authors should avoid redefining it to facilitate a smooth transition to \"dependentSchemas\" and \"dependentRequired\"", + "type": "object", + "additionalProperties": { + "anyOf": [ + { "$recursiveRef": "#" }, + { "$ref": "meta/validation#/$defs/stringArray" } + ] + } + } + } + }`, + "meta/core": `{ + "$schema": "https://json-schema.org/draft/2019-09/schema", + "$id": "https://json-schema.org/draft/2019-09/meta/core", + "$vocabulary": { + "https://json-schema.org/draft/2019-09/vocab/core": true + }, + "$recursiveAnchor": true, + + "title": "Core vocabulary meta-schema", + "type": ["object", "boolean"], + "properties": { + "$id": { + "type": "string", + "format": "uri-reference", + "$comment": "Non-empty fragments not allowed.", + "pattern": "^[^#]*#?$" + }, + "$schema": { + "type": "string", + "format": "uri" + }, + "$anchor": { + "type": "string", + "pattern": "^[A-Za-z][-A-Za-z0-9.:_]*$" + }, + "$ref": { + "type": "string", + "format": "uri-reference" + }, + "$recursiveRef": { + "type": "string", + "format": "uri-reference" + }, + "$recursiveAnchor": { + "type": "boolean", + "default": false + }, + "$vocabulary": { + "type": "object", + "propertyNames": { + "type": "string", + "format": "uri" + }, + "additionalProperties": { + "type": "boolean" + } + }, + "$comment": { + "type": "string" + }, + "$defs": { + "type": "object", + "additionalProperties": { "$recursiveRef": "#" }, + "default": {} + } + } + }`, + "meta/applicator": `{ + "$schema": "https://json-schema.org/draft/2019-09/schema", + "$id": "https://json-schema.org/draft/2019-09/meta/applicator", + "$vocabulary": { + "https://json-schema.org/draft/2019-09/vocab/applicator": true + }, + "$recursiveAnchor": true, + + "title": "Applicator vocabulary meta-schema", + "type": ["object", "boolean"], + "properties": { + "additionalItems": { "$recursiveRef": "#" }, + "unevaluatedItems": { "$recursiveRef": "#" }, + "items": { + "anyOf": [ + { "$recursiveRef": "#" }, + { "$ref": "#/$defs/schemaArray" } + ] + }, + "contains": { "$recursiveRef": "#" }, + "additionalProperties": { "$recursiveRef": "#" }, + "unevaluatedProperties": { "$recursiveRef": "#" }, + "properties": { + "type": "object", + "additionalProperties": { "$recursiveRef": "#" }, + "default": {} + }, + "patternProperties": { + "type": "object", + "additionalProperties": { "$recursiveRef": "#" }, + "propertyNames": { "format": "regex" }, + "default": {} + }, + "dependentSchemas": { + "type": "object", + "additionalProperties": { + "$recursiveRef": "#" + } + }, + "propertyNames": { "$recursiveRef": "#" }, + "if": { "$recursiveRef": "#" }, + "then": { "$recursiveRef": "#" }, + "else": { "$recursiveRef": "#" }, + "allOf": { "$ref": "#/$defs/schemaArray" }, + "anyOf": { "$ref": "#/$defs/schemaArray" }, + "oneOf": { "$ref": "#/$defs/schemaArray" }, + "not": { "$recursiveRef": "#" } + }, + "$defs": { + "schemaArray": { + "type": "array", + "minItems": 1, + "items": { "$recursiveRef": "#" } + } + } + }`, + "meta/validation": `{ + "$schema": "https://json-schema.org/draft/2019-09/schema", + "$id": "https://json-schema.org/draft/2019-09/meta/validation", + "$vocabulary": { + "https://json-schema.org/draft/2019-09/vocab/validation": true + }, + "$recursiveAnchor": true, + + "title": "Validation vocabulary meta-schema", + "type": ["object", "boolean"], + "properties": { + "multipleOf": { + "type": "number", + "exclusiveMinimum": 0 + }, + "maximum": { + "type": "number" + }, + "exclusiveMaximum": { + "type": "number" + }, + "minimum": { + "type": "number" + }, + "exclusiveMinimum": { + "type": "number" + }, + "maxLength": { "$ref": "#/$defs/nonNegativeInteger" }, + "minLength": { "$ref": "#/$defs/nonNegativeIntegerDefault0" }, + "pattern": { + "type": "string", + "format": "regex" + }, + "maxItems": { "$ref": "#/$defs/nonNegativeInteger" }, + "minItems": { "$ref": "#/$defs/nonNegativeIntegerDefault0" }, + "uniqueItems": { + "type": "boolean", + "default": false + }, + "maxContains": { "$ref": "#/$defs/nonNegativeInteger" }, + "minContains": { + "$ref": "#/$defs/nonNegativeInteger", + "default": 1 + }, + "maxProperties": { "$ref": "#/$defs/nonNegativeInteger" }, + "minProperties": { "$ref": "#/$defs/nonNegativeIntegerDefault0" }, + "required": { "$ref": "#/$defs/stringArray" }, + "dependentRequired": { + "type": "object", + "additionalProperties": { + "$ref": "#/$defs/stringArray" + } + }, + "const": true, + "enum": { + "type": "array", + "items": true + }, + "type": { + "anyOf": [ + { "$ref": "#/$defs/simpleTypes" }, + { + "type": "array", + "items": { "$ref": "#/$defs/simpleTypes" }, + "minItems": 1, + "uniqueItems": true + } + ] + } + }, + "$defs": { + "nonNegativeInteger": { + "type": "integer", + "minimum": 0 + }, + "nonNegativeIntegerDefault0": { + "$ref": "#/$defs/nonNegativeInteger", + "default": 0 + }, + "simpleTypes": { + "enum": [ + "array", + "boolean", + "integer", + "null", + "number", + "object", + "string" + ] + }, + "stringArray": { + "type": "array", + "items": { "type": "string" }, + "uniqueItems": true, + "default": [] + } + } + }`, + "meta/meta-data": `{ + "$schema": "https://json-schema.org/draft/2019-09/schema", + "$id": "https://json-schema.org/draft/2019-09/meta/meta-data", + "$vocabulary": { + "https://json-schema.org/draft/2019-09/vocab/meta-data": true + }, + "$recursiveAnchor": true, + + "title": "Meta-data vocabulary meta-schema", + + "type": ["object", "boolean"], + "properties": { + "title": { + "type": "string" + }, + "description": { + "type": "string" + }, + "default": true, + "deprecated": { + "type": "boolean", + "default": false + }, + "readOnly": { + "type": "boolean", + "default": false + }, + "writeOnly": { + "type": "boolean", + "default": false + }, + "examples": { + "type": "array", + "items": true + } + } + }`, + "meta/format": `{ + "$schema": "https://json-schema.org/draft/2019-09/schema", + "$id": "https://json-schema.org/draft/2019-09/meta/format", + "$vocabulary": { + "https://json-schema.org/draft/2019-09/vocab/format": true + }, + "$recursiveAnchor": true, + + "title": "Format vocabulary meta-schema", + "type": ["object", "boolean"], + "properties": { + "format": { "type": "string" } + } + }`, + "meta/content": `{ + "$schema": "https://json-schema.org/draft/2019-09/schema", + "$id": "https://json-schema.org/draft/2019-09/meta/content", + "$vocabulary": { + "https://json-schema.org/draft/2019-09/vocab/content": true + }, + "$recursiveAnchor": true, + + "title": "Content vocabulary meta-schema", + + "type": ["object", "boolean"], + "properties": { + "contentMediaType": { "type": "string" }, + "contentEncoding": { "type": "string" }, + "contentSchema": { "$recursiveRef": "#" } + } + }`, + }) + Draft2020.loadMeta("https://json-schema.org/draft/2020-12", map[string]string{ + "schema": `{ + "$schema": "https://json-schema.org/draft/2020-12/schema", + "$id": "https://json-schema.org/draft/2020-12/schema", + "$vocabulary": { + "https://json-schema.org/draft/2020-12/vocab/core": true, + "https://json-schema.org/draft/2020-12/vocab/applicator": true, + "https://json-schema.org/draft/2020-12/vocab/unevaluated": true, + "https://json-schema.org/draft/2020-12/vocab/validation": true, + "https://json-schema.org/draft/2020-12/vocab/meta-data": true, + "https://json-schema.org/draft/2020-12/vocab/format-annotation": true, + "https://json-schema.org/draft/2020-12/vocab/content": true + }, + "$dynamicAnchor": "meta", + + "title": "Core and Validation specifications meta-schema", + "allOf": [ + {"$ref": "meta/core"}, + {"$ref": "meta/applicator"}, + {"$ref": "meta/unevaluated"}, + {"$ref": "meta/validation"}, + {"$ref": "meta/meta-data"}, + {"$ref": "meta/format-annotation"}, + {"$ref": "meta/content"} + ], + "type": ["object", "boolean"], + "$comment": "This meta-schema also defines keywords that have appeared in previous drafts in order to prevent incompatible extensions as they remain in common use.", + "properties": { + "definitions": { + "$comment": "\"definitions\" has been replaced by \"$defs\".", + "type": "object", + "additionalProperties": { "$dynamicRef": "#meta" }, + "deprecated": true, + "default": {} + }, + "dependencies": { + "$comment": "\"dependencies\" has been split and replaced by \"dependentSchemas\" and \"dependentRequired\" in order to serve their differing semantics.", + "type": "object", + "additionalProperties": { + "anyOf": [ + { "$dynamicRef": "#meta" }, + { "$ref": "meta/validation#/$defs/stringArray" } + ] + }, + "deprecated": true, + "default": {} + }, + "$recursiveAnchor": { + "$comment": "\"$recursiveAnchor\" has been replaced by \"$dynamicAnchor\".", + "$ref": "meta/core#/$defs/anchorString", + "deprecated": true + }, + "$recursiveRef": { + "$comment": "\"$recursiveRef\" has been replaced by \"$dynamicRef\".", + "$ref": "meta/core#/$defs/uriReferenceString", + "deprecated": true + } + } + }`, + "meta/core": `{ + "$schema": "https://json-schema.org/draft/2020-12/schema", + "$id": "https://json-schema.org/draft/2020-12/meta/core", + "$vocabulary": { + "https://json-schema.org/draft/2020-12/vocab/core": true + }, + "$dynamicAnchor": "meta", + + "title": "Core vocabulary meta-schema", + "type": ["object", "boolean"], + "properties": { + "$id": { + "$ref": "#/$defs/uriReferenceString", + "$comment": "Non-empty fragments not allowed.", + "pattern": "^[^#]*#?$" + }, + "$schema": { "$ref": "#/$defs/uriString" }, + "$ref": { "$ref": "#/$defs/uriReferenceString" }, + "$anchor": { "$ref": "#/$defs/anchorString" }, + "$dynamicRef": { "$ref": "#/$defs/uriReferenceString" }, + "$dynamicAnchor": { "$ref": "#/$defs/anchorString" }, + "$vocabulary": { + "type": "object", + "propertyNames": { "$ref": "#/$defs/uriString" }, + "additionalProperties": { + "type": "boolean" + } + }, + "$comment": { + "type": "string" + }, + "$defs": { + "type": "object", + "additionalProperties": { "$dynamicRef": "#meta" } + } + }, + "$defs": { + "anchorString": { + "type": "string", + "pattern": "^[A-Za-z_][-A-Za-z0-9._]*$" + }, + "uriString": { + "type": "string", + "format": "uri" + }, + "uriReferenceString": { + "type": "string", + "format": "uri-reference" + } + } + }`, + "meta/applicator": `{ + "$schema": "https://json-schema.org/draft/2020-12/schema", + "$id": "https://json-schema.org/draft/2020-12/meta/applicator", + "$vocabulary": { + "https://json-schema.org/draft/2020-12/vocab/applicator": true + }, + "$dynamicAnchor": "meta", + + "title": "Applicator vocabulary meta-schema", + "type": ["object", "boolean"], + "properties": { + "prefixItems": { "$ref": "#/$defs/schemaArray" }, + "items": { "$dynamicRef": "#meta" }, + "contains": { "$dynamicRef": "#meta" }, + "additionalProperties": { "$dynamicRef": "#meta" }, + "properties": { + "type": "object", + "additionalProperties": { "$dynamicRef": "#meta" }, + "default": {} + }, + "patternProperties": { + "type": "object", + "additionalProperties": { "$dynamicRef": "#meta" }, + "propertyNames": { "format": "regex" }, + "default": {} + }, + "dependentSchemas": { + "type": "object", + "additionalProperties": { "$dynamicRef": "#meta" }, + "default": {} + }, + "propertyNames": { "$dynamicRef": "#meta" }, + "if": { "$dynamicRef": "#meta" }, + "then": { "$dynamicRef": "#meta" }, + "else": { "$dynamicRef": "#meta" }, + "allOf": { "$ref": "#/$defs/schemaArray" }, + "anyOf": { "$ref": "#/$defs/schemaArray" }, + "oneOf": { "$ref": "#/$defs/schemaArray" }, + "not": { "$dynamicRef": "#meta" } + }, + "$defs": { + "schemaArray": { + "type": "array", + "minItems": 1, + "items": { "$dynamicRef": "#meta" } + } + } + }`, + "meta/unevaluated": `{ + "$schema": "https://json-schema.org/draft/2020-12/schema", + "$id": "https://json-schema.org/draft/2020-12/meta/unevaluated", + "$vocabulary": { + "https://json-schema.org/draft/2020-12/vocab/unevaluated": true + }, + "$dynamicAnchor": "meta", + + "title": "Unevaluated applicator vocabulary meta-schema", + "type": ["object", "boolean"], + "properties": { + "unevaluatedItems": { "$dynamicRef": "#meta" }, + "unevaluatedProperties": { "$dynamicRef": "#meta" } + } + }`, + "meta/validation": `{ + "$schema": "https://json-schema.org/draft/2020-12/schema", + "$id": "https://json-schema.org/draft/2020-12/meta/validation", + "$vocabulary": { + "https://json-schema.org/draft/2020-12/vocab/validation": true + }, + "$dynamicAnchor": "meta", + + "title": "Validation vocabulary meta-schema", + "type": ["object", "boolean"], + "properties": { + "type": { + "anyOf": [ + { "$ref": "#/$defs/simpleTypes" }, + { + "type": "array", + "items": { "$ref": "#/$defs/simpleTypes" }, + "minItems": 1, + "uniqueItems": true + } + ] + }, + "const": true, + "enum": { + "type": "array", + "items": true + }, + "multipleOf": { + "type": "number", + "exclusiveMinimum": 0 + }, + "maximum": { + "type": "number" + }, + "exclusiveMaximum": { + "type": "number" + }, + "minimum": { + "type": "number" + }, + "exclusiveMinimum": { + "type": "number" + }, + "maxLength": { "$ref": "#/$defs/nonNegativeInteger" }, + "minLength": { "$ref": "#/$defs/nonNegativeIntegerDefault0" }, + "pattern": { + "type": "string", + "format": "regex" + }, + "maxItems": { "$ref": "#/$defs/nonNegativeInteger" }, + "minItems": { "$ref": "#/$defs/nonNegativeIntegerDefault0" }, + "uniqueItems": { + "type": "boolean", + "default": false + }, + "maxContains": { "$ref": "#/$defs/nonNegativeInteger" }, + "minContains": { + "$ref": "#/$defs/nonNegativeInteger", + "default": 1 + }, + "maxProperties": { "$ref": "#/$defs/nonNegativeInteger" }, + "minProperties": { "$ref": "#/$defs/nonNegativeIntegerDefault0" }, + "required": { "$ref": "#/$defs/stringArray" }, + "dependentRequired": { + "type": "object", + "additionalProperties": { + "$ref": "#/$defs/stringArray" + } + } + }, + "$defs": { + "nonNegativeInteger": { + "type": "integer", + "minimum": 0 + }, + "nonNegativeIntegerDefault0": { + "$ref": "#/$defs/nonNegativeInteger", + "default": 0 + }, + "simpleTypes": { + "enum": [ + "array", + "boolean", + "integer", + "null", + "number", + "object", + "string" + ] + }, + "stringArray": { + "type": "array", + "items": { "type": "string" }, + "uniqueItems": true, + "default": [] + } + } + }`, + "meta/meta-data": `{ + "$schema": "https://json-schema.org/draft/2020-12/schema", + "$id": "https://json-schema.org/draft/2020-12/meta/meta-data", + "$vocabulary": { + "https://json-schema.org/draft/2020-12/vocab/meta-data": true + }, + "$dynamicAnchor": "meta", + + "title": "Meta-data vocabulary meta-schema", + + "type": ["object", "boolean"], + "properties": { + "title": { + "type": "string" + }, + "description": { + "type": "string" + }, + "default": true, + "deprecated": { + "type": "boolean", + "default": false + }, + "readOnly": { + "type": "boolean", + "default": false + }, + "writeOnly": { + "type": "boolean", + "default": false + }, + "examples": { + "type": "array", + "items": true + } + } + }`, + "meta/format-annotation": `{ + "$schema": "https://json-schema.org/draft/2020-12/schema", + "$id": "https://json-schema.org/draft/2020-12/meta/format-annotation", + "$vocabulary": { + "https://json-schema.org/draft/2020-12/vocab/format-annotation": true + }, + "$dynamicAnchor": "meta", + + "title": "Format vocabulary meta-schema for annotation results", + "type": ["object", "boolean"], + "properties": { + "format": { "type": "string" } + } + }`, + "meta/content": `{ + "$schema": "https://json-schema.org/draft/2020-12/schema", + "$id": "https://json-schema.org/draft/2020-12/meta/content", + "$vocabulary": { + "https://json-schema.org/draft/2020-12/vocab/content": true + }, + "$dynamicAnchor": "meta", + + "title": "Content vocabulary meta-schema", + + "type": ["object", "boolean"], + "properties": { + "contentEncoding": { "type": "string" }, + "contentMediaType": { "type": "string" }, + "contentSchema": { "$dynamicRef": "#meta" } + } + }`, + }) +} + +func clone(m map[string]position) map[string]position { + mm := make(map[string]position) + for k, v := range m { + mm[k] = v + } + return mm +} diff --git a/vendor/github.com/santhosh-tekuri/jsonschema/v5/errors.go b/vendor/github.com/santhosh-tekuri/jsonschema/v5/errors.go new file mode 100644 index 0000000..5592dce --- /dev/null +++ b/vendor/github.com/santhosh-tekuri/jsonschema/v5/errors.go @@ -0,0 +1,214 @@ +package jsonschema + +import ( + "bytes" + "fmt" + "strings" + "unicode/utf8" +) + +// InvalidJSONTypeError is the error type returned by ValidateInterface. +// this tells that specified go object is not valid jsonType. +type InvalidJSONTypeError string + +func (e InvalidJSONTypeError) Error() string { + return fmt.Sprintf("jsonschema: invalid jsonType: %s", string(e)) +} + +// InfiniteLoopError is returned by Compile/Validate. +// this gives url#keywordLocation that lead to infinity loop. +type InfiniteLoopError string + +func (e InfiniteLoopError) Error() string { + return "jsonschema: infinite loop " + string(e) +} + +func infiniteLoopError(stack []schemaRef, sref schemaRef) InfiniteLoopError { + var path string + for _, ref := range stack { + if path == "" { + path += ref.schema.Location + } else { + path += "/" + ref.path + } + } + return InfiniteLoopError(path + "/" + sref.path) +} + +// SchemaError is the error type returned by Compile. +type SchemaError struct { + // SchemaURL is the url to json-schema that filed to compile. + // This is helpful, if your schema refers to external schemas + SchemaURL string + + // Err is the error that occurred during compilation. + // It could be ValidationError, because compilation validates + // given schema against the json meta-schema + Err error +} + +func (se *SchemaError) Unwrap() error { + return se.Err +} + +func (se *SchemaError) Error() string { + s := fmt.Sprintf("jsonschema %s compilation failed", se.SchemaURL) + if se.Err != nil { + return fmt.Sprintf("%s: %v", s, strings.TrimPrefix(se.Err.Error(), "jsonschema: ")) + } + return s +} + +func (se *SchemaError) GoString() string { + if _, ok := se.Err.(*ValidationError); ok { + return fmt.Sprintf("jsonschema %s compilation failed\n%#v", se.SchemaURL, se.Err) + } + return se.Error() +} + +// ValidationError is the error type returned by Validate. +type ValidationError struct { + KeywordLocation string // validation path of validating keyword or schema + AbsoluteKeywordLocation string // absolute location of validating keyword or schema + InstanceLocation string // location of the json value within the instance being validated + Message string // describes error + Causes []*ValidationError // nested validation errors +} + +func (ve *ValidationError) add(causes ...error) error { + for _, cause := range causes { + ve.Causes = append(ve.Causes, cause.(*ValidationError)) + } + return ve +} + +func (ve *ValidationError) causes(err error) error { + if err := err.(*ValidationError); err.Message == "" { + ve.Causes = err.Causes + } else { + ve.add(err) + } + return ve +} + +func (ve *ValidationError) leaf() *ValidationError { + if strings.HasSuffix(ve.KeywordLocation, "/anyOf") || strings.HasSuffix(ve.KeywordLocation, "/oneOf") { + if len(ve.Causes) == 1 { + return ve.Causes[0].leaf() + } + return ve + } + if len(ve.Causes) > 0 { + return ve.Causes[0].leaf() + } + return ve +} + +func (ve *ValidationError) Error() string { + err := ve.leaf() + u, _ := split(ve.AbsoluteKeywordLocation) + return fmt.Sprintf("jsonschema: %s does not validate with %s: %s", quote(err.InstanceLocation), u+"#"+err.KeywordLocation, err.Message) +} + +func (ve *ValidationError) GoString() string { + sloc := ve.AbsoluteKeywordLocation + sloc = sloc[strings.IndexByte(sloc, '#')+1:] + msg := fmt.Sprintf("[I#%s] [S#%s] %s", ve.InstanceLocation, sloc, ve.Message) + for _, c := range ve.Causes { + for _, line := range strings.Split(c.GoString(), "\n") { + msg += "\n " + line + } + } + return msg +} + +func joinPtr(ptr1, ptr2 string) string { + if len(ptr1) == 0 { + return ptr2 + } + if len(ptr2) == 0 { + return ptr1 + } + return ptr1 + "/" + ptr2 +} + +func quote(s string) string { + var w = bytes.NewBuffer(make([]byte, 0, len(s)+10)) + w.WriteByte('\'') + start := 0 + for i := 0; i < len(s); { + b := s[i] + if b < 0x20 { + if start < i { + w.WriteString(s[start:i]) + } + switch b { + case '\n': + w.WriteString(`\n`) + case '\r': + w.WriteString(`\r`) + case '\f': + w.WriteString(`\f`) + case '\t': + w.WriteString(`\t`) + default: + w.WriteString(`\u00`) + w.Write(hex(b >> 4)) + w.Write(hex(b & 0xF)) + } + i++ + start = i + continue + } + if b < utf8.RuneSelf { + if b == '\\' || b == '"' { + if start < i { + w.WriteString(s[start:i]) + } + switch b { + case '\\': + w.WriteString(`\\`) + case '\'': + w.WriteString(`\'`) + } + i++ + start = i + continue + } + i++ + continue + } + r, size := utf8.DecodeRuneInString(s[i:]) + if r == utf8.RuneError && size == 1 { + if start < i { + w.WriteString(s[start:i]) + } + w.WriteString(`\ufffd`) + i += size + start = i + continue + } + if r == '\u2028' || r == '\u2029' { + if start < i { + w.WriteString(s[start:i]) + } + w.WriteString(`\u202`) + w.Write(hex(uint8(r & 0xF))) + i += size + start = i + continue + } + i += size + } + if start < len(s) { + w.WriteString(s[start:]) + } + w.WriteByte('\'') + return w.String() +} + +var hexBytes = []byte("0123456789abcdef") + +func hex(i uint8) []byte { + return hexBytes[i : i+1] +} diff --git a/vendor/github.com/santhosh-tekuri/jsonschema/v5/extension.go b/vendor/github.com/santhosh-tekuri/jsonschema/v5/extension.go new file mode 100644 index 0000000..452ba11 --- /dev/null +++ b/vendor/github.com/santhosh-tekuri/jsonschema/v5/extension.go @@ -0,0 +1,116 @@ +package jsonschema + +// ExtCompiler compiles custom keyword(s) into ExtSchema. +type ExtCompiler interface { + // Compile compiles the custom keywords in schema m and returns its compiled representation. + // if the schema m does not contain the keywords defined by this extension, + // compiled representation nil should be returned. + Compile(ctx CompilerContext, m map[string]interface{}) (ExtSchema, error) +} + +// ExtSchema is schema representation of custom keyword(s) +type ExtSchema interface { + // Validate validates the json value v with this ExtSchema. + // Returned error must be *ValidationError. + Validate(ctx ValidationContext, v interface{}) error +} + +type extension struct { + meta *Schema + compiler ExtCompiler +} + +// RegisterExtension registers custom keyword(s) into this compiler. +// +// name is extension name, used only to avoid name collisions. +// meta captures the metaschema for the new keywords. +// This is used to validate the schema before calling ext.Compile. +func (c *Compiler) RegisterExtension(name string, meta *Schema, ext ExtCompiler) { + c.extensions[name] = extension{meta, ext} +} + +// CompilerContext --- + +// CompilerContext provides additional context required in compiling for extension. +type CompilerContext struct { + c *Compiler + r *resource + stack []schemaRef + res *resource +} + +// Compile compiles given value at ptr into *Schema. This is useful in implementing +// keyword like allOf/not/patternProperties. +// +// schPath is the relative-json-pointer to the schema to be compiled from parent schema. +// +// applicableOnSameInstance tells whether current schema and the given schema +// are applied on same instance value. this is used to detect infinite loop in schema. +func (ctx CompilerContext) Compile(schPath string, applicableOnSameInstance bool) (*Schema, error) { + var stack []schemaRef + if applicableOnSameInstance { + stack = ctx.stack + } + return ctx.c.compileRef(ctx.r, stack, schPath, ctx.res, ctx.r.url+ctx.res.floc+"/"+schPath) +} + +// CompileRef compiles the schema referenced by ref uri +// +// refPath is the relative-json-pointer to ref. +// +// applicableOnSameInstance tells whether current schema and the given schema +// are applied on same instance value. this is used to detect infinite loop in schema. +func (ctx CompilerContext) CompileRef(ref string, refPath string, applicableOnSameInstance bool) (*Schema, error) { + var stack []schemaRef + if applicableOnSameInstance { + stack = ctx.stack + } + return ctx.c.compileRef(ctx.r, stack, refPath, ctx.res, ref) +} + +// ValidationContext --- + +// ValidationContext provides additional context required in validating for extension. +type ValidationContext struct { + result validationResult + validate func(sch *Schema, schPath string, v interface{}, vpath string) error + validateInplace func(sch *Schema, schPath string) error + validationError func(keywordPath string, format string, a ...interface{}) *ValidationError +} + +// EvaluatedProp marks given property of object as evaluated. +func (ctx ValidationContext) EvaluatedProp(prop string) { + delete(ctx.result.unevalProps, prop) +} + +// EvaluatedItem marks given index of array as evaluated. +func (ctx ValidationContext) EvaluatedItem(index int) { + delete(ctx.result.unevalItems, index) +} + +// Validate validates schema s with value v. Extension must use this method instead of +// *Schema.ValidateInterface method. This will be useful in implementing keywords like +// allOf/oneOf +// +// spath is relative-json-pointer to s +// vpath is relative-json-pointer to v. +func (ctx ValidationContext) Validate(s *Schema, spath string, v interface{}, vpath string) error { + if vpath == "" { + return ctx.validateInplace(s, spath) + } + return ctx.validate(s, spath, v, vpath) +} + +// Error used to construct validation error by extensions. +// +// keywordPath is relative-json-pointer to keyword. +func (ctx ValidationContext) Error(keywordPath string, format string, a ...interface{}) *ValidationError { + return ctx.validationError(keywordPath, format, a...) +} + +// Group is used by extensions to group multiple errors as causes to parent error. +// This is useful in implementing keywords like allOf where each schema specified +// in allOf can result a validationError. +func (ValidationError) Group(parent *ValidationError, causes ...error) error { + return parent.add(causes...) +} diff --git a/vendor/github.com/santhosh-tekuri/jsonschema/v5/format.go b/vendor/github.com/santhosh-tekuri/jsonschema/v5/format.go new file mode 100644 index 0000000..053b01e --- /dev/null +++ b/vendor/github.com/santhosh-tekuri/jsonschema/v5/format.go @@ -0,0 +1,537 @@ +package jsonschema + +import ( + "errors" + "net" + "net/mail" + "net/url" + "regexp" + "strconv" + "strings" + "time" +) + +// Formats is a registry of functions, which know how to validate +// a specific format. +// +// New Formats can be registered by adding to this map. Key is format name, +// value is function that knows how to validate that format. +var Formats = map[string]func(interface{}) bool{ + "date-time": isDateTime, + "date": isDate, + "time": isTime, + "duration": isDuration, + "hostname": isHostname, + "email": isEmail, + "ip-address": isIPV4, + "ipv4": isIPV4, + "ipv6": isIPV6, + "uri": isURI, + "iri": isURI, + "uri-reference": isURIReference, + "uriref": isURIReference, + "iri-reference": isURIReference, + "uri-template": isURITemplate, + "regex": isRegex, + "json-pointer": isJSONPointer, + "relative-json-pointer": isRelativeJSONPointer, + "uuid": isUUID, +} + +// isDateTime tells whether given string is a valid date representation +// as defined by RFC 3339, section 5.6. +// +// see https://datatracker.ietf.org/doc/html/rfc3339#section-5.6, for details +func isDateTime(v interface{}) bool { + s, ok := v.(string) + if !ok { + return true + } + if len(s) < 20 { // yyyy-mm-ddThh:mm:ssZ + return false + } + if s[10] != 'T' && s[10] != 't' { + return false + } + return isDate(s[:10]) && isTime(s[11:]) +} + +// isDate tells whether given string is a valid full-date production +// as defined by RFC 3339, section 5.6. +// +// see https://datatracker.ietf.org/doc/html/rfc3339#section-5.6, for details +func isDate(v interface{}) bool { + s, ok := v.(string) + if !ok { + return true + } + _, err := time.Parse("2006-01-02", s) + return err == nil +} + +// isTime tells whether given string is a valid full-time production +// as defined by RFC 3339, section 5.6. +// +// see https://datatracker.ietf.org/doc/html/rfc3339#section-5.6, for details +func isTime(v interface{}) bool { + str, ok := v.(string) + if !ok { + return true + } + + // golang time package does not support leap seconds. + // so we are parsing it manually here. + + // hh:mm:ss + // 01234567 + if len(str) < 9 || str[2] != ':' || str[5] != ':' { + return false + } + isInRange := func(str string, min, max int) (int, bool) { + n, err := strconv.Atoi(str) + if err != nil { + return 0, false + } + if n < min || n > max { + return 0, false + } + return n, true + } + var h, m, s int + if h, ok = isInRange(str[0:2], 0, 23); !ok { + return false + } + if m, ok = isInRange(str[3:5], 0, 59); !ok { + return false + } + if s, ok = isInRange(str[6:8], 0, 60); !ok { + return false + } + str = str[8:] + + // parse secfrac if present + if str[0] == '.' { + // dot following more than one digit + str = str[1:] + var numDigits int + for str != "" { + if str[0] < '0' || str[0] > '9' { + break + } + numDigits++ + str = str[1:] + } + if numDigits == 0 { + return false + } + } + + if len(str) == 0 { + return false + } + + if str[0] == 'z' || str[0] == 'Z' { + if len(str) != 1 { + return false + } + } else { + // time-numoffset + // +hh:mm + // 012345 + if len(str) != 6 || str[3] != ':' { + return false + } + + var sign int + if str[0] == '+' { + sign = -1 + } else if str[0] == '-' { + sign = +1 + } else { + return false + } + + var zh, zm int + if zh, ok = isInRange(str[1:3], 0, 23); !ok { + return false + } + if zm, ok = isInRange(str[4:6], 0, 59); !ok { + return false + } + + // apply timezone offset + hm := (h*60 + m) + sign*(zh*60+zm) + if hm < 0 { + hm += 24 * 60 + } + h, m = hm/60, hm%60 + } + + // check leapsecond + if s == 60 { // leap second + if h != 23 || m != 59 { + return false + } + } + + return true +} + +// isDuration tells whether given string is a valid duration format +// from the ISO 8601 ABNF as given in Appendix A of RFC 3339. +// +// see https://datatracker.ietf.org/doc/html/rfc3339#appendix-A, for details +func isDuration(v interface{}) bool { + s, ok := v.(string) + if !ok { + return true + } + if len(s) == 0 || s[0] != 'P' { + return false + } + s = s[1:] + parseUnits := func() (units string, ok bool) { + for len(s) > 0 && s[0] != 'T' { + digits := false + for { + if len(s) == 0 { + break + } + if s[0] < '0' || s[0] > '9' { + break + } + digits = true + s = s[1:] + } + if !digits || len(s) == 0 { + return units, false + } + units += s[:1] + s = s[1:] + } + return units, true + } + units, ok := parseUnits() + if !ok { + return false + } + if units == "W" { + return len(s) == 0 // P_W + } + if len(units) > 0 { + if strings.Index("YMD", units) == -1 { + return false + } + if len(s) == 0 { + return true // "P" dur-date + } + } + if len(s) == 0 || s[0] != 'T' { + return false + } + s = s[1:] + units, ok = parseUnits() + return ok && len(s) == 0 && len(units) > 0 && strings.Index("HMS", units) != -1 +} + +// isHostname tells whether given string is a valid representation +// for an Internet host name, as defined by RFC 1034 section 3.1 and +// RFC 1123 section 2.1. +// +// See https://en.wikipedia.org/wiki/Hostname#Restrictions_on_valid_host_names, for details. +func isHostname(v interface{}) bool { + s, ok := v.(string) + if !ok { + return true + } + // entire hostname (including the delimiting dots but not a trailing dot) has a maximum of 253 ASCII characters + s = strings.TrimSuffix(s, ".") + if len(s) > 253 { + return false + } + + // Hostnames are composed of series of labels concatenated with dots, as are all domain names + for _, label := range strings.Split(s, ".") { + // Each label must be from 1 to 63 characters long + if labelLen := len(label); labelLen < 1 || labelLen > 63 { + return false + } + + // labels must not start with a hyphen + // RFC 1123 section 2.1: restriction on the first character + // is relaxed to allow either a letter or a digit + if first := s[0]; first == '-' { + return false + } + + // must not end with a hyphen + if label[len(label)-1] == '-' { + return false + } + + // labels may contain only the ASCII letters 'a' through 'z' (in a case-insensitive manner), + // the digits '0' through '9', and the hyphen ('-') + for _, c := range label { + if valid := (c >= 'a' && c <= 'z') || (c >= 'A' && c <= 'Z') || (c >= '0' && c <= '9') || (c == '-'); !valid { + return false + } + } + } + + return true +} + +// isEmail tells whether given string is a valid Internet email address +// as defined by RFC 5322, section 3.4.1. +// +// See https://en.wikipedia.org/wiki/Email_address, for details. +func isEmail(v interface{}) bool { + s, ok := v.(string) + if !ok { + return true + } + // entire email address to be no more than 254 characters long + if len(s) > 254 { + return false + } + + // email address is generally recognized as having two parts joined with an at-sign + at := strings.LastIndexByte(s, '@') + if at == -1 { + return false + } + local := s[0:at] + domain := s[at+1:] + + // local part may be up to 64 characters long + if len(local) > 64 { + return false + } + + // domain must match the requirements for a hostname + if !isHostname(domain) { + return false + } + + _, err := mail.ParseAddress(s) + return err == nil +} + +// isIPV4 tells whether given string is a valid representation of an IPv4 address +// according to the "dotted-quad" ABNF syntax as defined in RFC 2673, section 3.2. +func isIPV4(v interface{}) bool { + s, ok := v.(string) + if !ok { + return true + } + groups := strings.Split(s, ".") + if len(groups) != 4 { + return false + } + for _, group := range groups { + n, err := strconv.Atoi(group) + if err != nil { + return false + } + if n < 0 || n > 255 { + return false + } + if n != 0 && group[0] == '0' { + return false // leading zeroes should be rejected, as they are treated as octals + } + } + return true +} + +// isIPV6 tells whether given string is a valid representation of an IPv6 address +// as defined in RFC 2373, section 2.2. +func isIPV6(v interface{}) bool { + s, ok := v.(string) + if !ok { + return true + } + if !strings.Contains(s, ":") { + return false + } + return net.ParseIP(s) != nil +} + +// isURI tells whether given string is valid URI, according to RFC 3986. +func isURI(v interface{}) bool { + s, ok := v.(string) + if !ok { + return true + } + u, err := urlParse(s) + return err == nil && u.IsAbs() +} + +func urlParse(s string) (*url.URL, error) { + u, err := url.Parse(s) + if err != nil { + return nil, err + } + + // if hostname is ipv6, validate it + hostname := u.Hostname() + if strings.IndexByte(hostname, ':') != -1 { + if strings.IndexByte(u.Host, '[') == -1 || strings.IndexByte(u.Host, ']') == -1 { + return nil, errors.New("ipv6 address is not enclosed in brackets") + } + if !isIPV6(hostname) { + return nil, errors.New("invalid ipv6 address") + } + } + return u, nil +} + +// isURIReference tells whether given string is a valid URI Reference +// (either a URI or a relative-reference), according to RFC 3986. +func isURIReference(v interface{}) bool { + s, ok := v.(string) + if !ok { + return true + } + _, err := urlParse(s) + return err == nil && !strings.Contains(s, `\`) +} + +// isURITemplate tells whether given string is a valid URI Template +// according to RFC6570. +// +// Current implementation does minimal validation. +func isURITemplate(v interface{}) bool { + s, ok := v.(string) + if !ok { + return true + } + u, err := urlParse(s) + if err != nil { + return false + } + for _, item := range strings.Split(u.RawPath, "/") { + depth := 0 + for _, ch := range item { + switch ch { + case '{': + depth++ + if depth != 1 { + return false + } + case '}': + depth-- + if depth != 0 { + return false + } + } + } + if depth != 0 { + return false + } + } + return true +} + +// isRegex tells whether given string is a valid regular expression, +// according to the ECMA 262 regular expression dialect. +// +// The implementation uses go-lang regexp package. +func isRegex(v interface{}) bool { + s, ok := v.(string) + if !ok { + return true + } + _, err := regexp.Compile(s) + return err == nil +} + +// isJSONPointer tells whether given string is a valid JSON Pointer. +// +// Note: It returns false for JSON Pointer URI fragments. +func isJSONPointer(v interface{}) bool { + s, ok := v.(string) + if !ok { + return true + } + if s != "" && !strings.HasPrefix(s, "/") { + return false + } + for _, item := range strings.Split(s, "/") { + for i := 0; i < len(item); i++ { + if item[i] == '~' { + if i == len(item)-1 { + return false + } + switch item[i+1] { + case '~', '0', '1': + // valid + default: + return false + } + } + } + } + return true +} + +// isRelativeJSONPointer tells whether given string is a valid Relative JSON Pointer. +// +// see https://tools.ietf.org/html/draft-handrews-relative-json-pointer-01#section-3 +func isRelativeJSONPointer(v interface{}) bool { + s, ok := v.(string) + if !ok { + return true + } + if s == "" { + return false + } + if s[0] == '0' { + s = s[1:] + } else if s[0] >= '0' && s[0] <= '9' { + for s != "" && s[0] >= '0' && s[0] <= '9' { + s = s[1:] + } + } else { + return false + } + return s == "#" || isJSONPointer(s) +} + +// isUUID tells whether given string is a valid uuid format +// as specified in RFC4122. +// +// see https://datatracker.ietf.org/doc/html/rfc4122#page-4, for details +func isUUID(v interface{}) bool { + s, ok := v.(string) + if !ok { + return true + } + parseHex := func(n int) bool { + for n > 0 { + if len(s) == 0 { + return false + } + hex := (s[0] >= '0' && s[0] <= '9') || (s[0] >= 'a' && s[0] <= 'f') || (s[0] >= 'A' && s[0] <= 'F') + if !hex { + return false + } + s = s[1:] + n-- + } + return true + } + groups := []int{8, 4, 4, 4, 12} + for i, numDigits := range groups { + if !parseHex(numDigits) { + return false + } + if i == len(groups)-1 { + return len(s) == 0 + } + if len(s) == 0 || s[0] != '-' { + return false + } + s = s[1:] + } + return true +} diff --git a/vendor/github.com/santhosh-tekuri/jsonschema/v5/go.mod b/vendor/github.com/santhosh-tekuri/jsonschema/v5/go.mod new file mode 100644 index 0000000..9d00fda --- /dev/null +++ b/vendor/github.com/santhosh-tekuri/jsonschema/v5/go.mod @@ -0,0 +1,3 @@ +module github.com/santhosh-tekuri/jsonschema/v5 + +go 1.15 diff --git a/vendor/github.com/santhosh-tekuri/jsonschema/v5/loader.go b/vendor/github.com/santhosh-tekuri/jsonschema/v5/loader.go new file mode 100644 index 0000000..c94195c --- /dev/null +++ b/vendor/github.com/santhosh-tekuri/jsonschema/v5/loader.go @@ -0,0 +1,60 @@ +package jsonschema + +import ( + "fmt" + "io" + "net/url" + "os" + "path/filepath" + "runtime" + "strings" +) + +func loadFileURL(s string) (io.ReadCloser, error) { + u, err := url.Parse(s) + if err != nil { + return nil, err + } + f := u.Path + if runtime.GOOS == "windows" { + f = strings.TrimPrefix(f, "/") + f = filepath.FromSlash(f) + } + return os.Open(f) +} + +// Loaders is a registry of functions, which know how to load +// absolute url of specific schema. +// +// New loaders can be registered by adding to this map. Key is schema, +// value is function that knows how to load url of that schema +var Loaders = map[string]func(url string) (io.ReadCloser, error){ + "file": loadFileURL, +} + +// LoaderNotFoundError is the error type returned by Load function. +// It tells that no Loader is registered for that URL Scheme. +type LoaderNotFoundError string + +func (e LoaderNotFoundError) Error() string { + return fmt.Sprintf("jsonschema: no Loader found for %s", string(e)) +} + +// LoadURL loads document at given absolute URL. The default implementation +// uses Loaders registry to lookup by schema and uses that loader. +// +// Users can change this variable, if they would like to take complete +// responsibility of loading given URL. Used by Compiler if its LoadURL +// field is nil. +var LoadURL = func(s string) (io.ReadCloser, error) { + u, err := url.Parse(s) + if err != nil { + return nil, err + } + loader, ok := Loaders[u.Scheme] + if !ok { + return nil, LoaderNotFoundError(s) + + } + return loader(s) +} diff --git a/vendor/github.com/santhosh-tekuri/jsonschema/v5/output.go b/vendor/github.com/santhosh-tekuri/jsonschema/v5/output.go new file mode 100644 index 0000000..7ce4fcd --- /dev/null +++ b/vendor/github.com/santhosh-tekuri/jsonschema/v5/output.go @@ -0,0 +1,77 @@ +package jsonschema + +// Flag is output format with simple boolean property valid. +type Flag struct { + Valid bool `json:"valid"` +} + +// FlagOutput returns output in flag format +func (ve *ValidationError) FlagOutput() Flag { + return Flag{} +} + +// Basic --- + +// Basic is output format with flat list of output units. +type Basic struct { + Valid bool `json:"valid"` + Errors []BasicError `json:"errors"` +} + +// BasicError is output unit in basic format. +type BasicError struct { + KeywordLocation string `json:"keywordLocation"` + AbsoluteKeywordLocation string `json:"absoluteKeywordLocation"` + InstanceLocation string `json:"instanceLocation"` + Error string `json:"error"` +} + +// BasicOutput returns output in basic format +func (ve *ValidationError) BasicOutput() Basic { + var errors []BasicError + var flatten func(*ValidationError) + flatten = func(ve *ValidationError) { + errors = append(errors, BasicError{ + KeywordLocation: ve.KeywordLocation, + AbsoluteKeywordLocation: ve.AbsoluteKeywordLocation, + InstanceLocation: ve.InstanceLocation, + Error: ve.Message, + }) + for _, cause := range ve.Causes { + flatten(cause) + } + } + flatten(ve) + return Basic{Errors: errors} +} + +// Detailed --- + +// Detailed is output format based on structre of schema. +type Detailed struct { + Valid bool `json:"valid"` + KeywordLocation string `json:"keywordLocation"` + AbsoluteKeywordLocation string `json:"absoluteKeywordLocation"` + InstanceLocation string `json:"instanceLocation"` + Error string `json:"error,omitempty"` + Errors []Detailed `json:"errors,omitempty"` +} + +// DetailedOutput returns output in detailed format +func (ve *ValidationError) DetailedOutput() Detailed { + var errors []Detailed + for _, cause := range ve.Causes { + errors = append(errors, cause.DetailedOutput()) + } + var message = ve.Message + if len(ve.Causes) > 0 { + message = "" + } + return Detailed{ + KeywordLocation: ve.KeywordLocation, + AbsoluteKeywordLocation: ve.AbsoluteKeywordLocation, + InstanceLocation: ve.InstanceLocation, + Error: message, + Errors: errors, + } +} diff --git a/vendor/github.com/santhosh-tekuri/jsonschema/v5/resource.go b/vendor/github.com/santhosh-tekuri/jsonschema/v5/resource.go new file mode 100644 index 0000000..ee9d38c --- /dev/null +++ b/vendor/github.com/santhosh-tekuri/jsonschema/v5/resource.go @@ -0,0 +1,272 @@ +package jsonschema + +import ( + "encoding/json" + "fmt" + "io" + "net/url" + "path/filepath" + "runtime" + "strconv" + "strings" +) + +type resource struct { + url string // base url of resource. can be empty + floc string // fragment with json-pointer from root resource + doc interface{} + draft *Draft + subresources map[string]*resource // key is floc. only applicable for root resource + schema *Schema +} + +func (r *resource) String() string { + return r.url + r.floc +} + +func newResource(url string, r io.Reader) (*resource, error) { + if strings.IndexByte(url, '#') != -1 { + panic(fmt.Sprintf("BUG: newResource(%q)", url)) + } + doc, err := unmarshal(r) + if err != nil { + return nil, fmt.Errorf("jsonschema: invalid json %s: %v", url, err) + } + url, err = toAbs(url) + if err != nil { + return nil, err + } + return &resource{ + url: url, + floc: "#", + doc: doc, + }, nil +} + +// fillSubschemas fills subschemas in res into r.subresources +func (r *resource) fillSubschemas(c *Compiler, res *resource) error { + if err := c.validateSchema(r, res.doc, res.floc[1:]); err != nil { + return err + } + + if r.subresources == nil { + r.subresources = make(map[string]*resource) + } + if err := r.draft.listSubschemas(res, r.baseURL(res.floc), r.subresources); err != nil { + return err + } + + // ensure subresource.url uniquness + url2floc := make(map[string]string) + for _, sr := range r.subresources { + if sr.url != "" { + if floc, ok := url2floc[sr.url]; ok { + return fmt.Errorf("jsonschema: %q and %q in %s have same canonical-uri", floc[1:], sr.floc[1:], r.url) + } + url2floc[sr.url] = sr.floc + } + } + + return nil +} + +// listResources lists all subresources in res +func (r *resource) listResources(res *resource) []*resource { + var result []*resource + prefix := res.floc + "/" + for _, sr := range r.subresources { + if strings.HasPrefix(sr.floc, prefix) { + result = append(result, sr) + } + } + return result +} + +func (r *resource) findResource(url string) *resource { + if r.url == url { + return r + } + for _, res := range r.subresources { + if res.url == url { + return res + } + } + return nil +} + +// resolve fragment f with sr as base +func (r *resource) resolveFragment(c *Compiler, sr *resource, f string) (*resource, error) { + if f == "#" || f == "#/" { + return sr, nil + } + + // resolve by anchor + if !strings.HasPrefix(f, "#/") { + // check in given resource + for _, anchor := range r.draft.anchors(sr.doc) { + if anchor == f[1:] { + return sr, nil + } + } + + // check in subresources that has same base url + prefix := sr.floc + "/" + for _, res := range r.subresources { + if strings.HasPrefix(res.floc, prefix) && r.baseURL(res.floc) == sr.url { + for _, anchor := range r.draft.anchors(res.doc) { + if anchor == f[1:] { + return res, nil + } + } + } + } + return nil, nil + } + + // resolve by ptr + floc := sr.floc + f[1:] + if res, ok := r.subresources[floc]; ok { + return res, nil + } + + // non-standrad location + doc := r.doc + for _, item := range strings.Split(floc[2:], "/") { + item = strings.Replace(item, "~1", "/", -1) + item = strings.Replace(item, "~0", "~", -1) + item, err := url.PathUnescape(item) + if err != nil { + return nil, err + } + switch d := doc.(type) { + case map[string]interface{}: + if _, ok := d[item]; !ok { + return nil, nil + } + doc = d[item] + case []interface{}: + index, err := strconv.Atoi(item) + if err != nil { + return nil, err + } + if index < 0 || index >= len(d) { + return nil, nil + } + doc = d[index] + default: + return nil, nil + } + } + + id, err := r.draft.resolveID(r.baseURL(floc), doc) + if err != nil { + return nil, err + } + res := &resource{url: id, floc: floc, doc: doc} + r.subresources[floc] = res + if err := r.fillSubschemas(c, res); err != nil { + return nil, err + } + return res, nil +} + +func (r *resource) baseURL(floc string) string { + for { + if sr, ok := r.subresources[floc]; ok { + if sr.url != "" { + return sr.url + } + } + slash := strings.LastIndexByte(floc, '/') + if slash == -1 { + break + } + floc = floc[:slash] + } + return r.url +} + +// url helpers --- + +func toAbs(s string) (string, error) { + // if windows absolute file path, convert to file url + // because: net/url parses driver name as scheme + if runtime.GOOS == "windows" && len(s) >= 3 && s[1:3] == `:\` { + s = "file:///" + filepath.ToSlash(s) + } + + u, err := url.Parse(s) + if err != nil { + return "", err + } + if u.IsAbs() { + return s, nil + } + + // s is filepath + if s, err = filepath.Abs(s); err != nil { + return "", err + } + if runtime.GOOS == "windows" { + s = "file:///" + filepath.ToSlash(s) + } else { + s = "file://" + s + } + u, err = url.Parse(s) // to fix spaces in filepath + return u.String(), err +} + +func resolveURL(base, ref string) (string, error) { + if ref == "" { + return base, nil + } + + refURL, err := url.Parse(ref) + if err != nil { + return "", err + } + if refURL.IsAbs() { + return ref, nil + } + + baseURL, err := url.Parse(base) + if err != nil { + return "", err + } + return baseURL.ResolveReference(refURL).String(), nil +} + +func split(uri string) (string, string) { + hash := strings.IndexByte(uri, '#') + if hash == -1 { + return uri, "#" + } + f := uri[hash:] + if f == "#/" { + f = "#" + } + return uri[0:hash], f +} + +func (s *Schema) url() string { + u, _ := split(s.Location) + return u +} + +func (s *Schema) loc() string { + _, f := split(s.Location) + return f[1:] +} + +func unmarshal(r io.Reader) (interface{}, error) { + decoder := json.NewDecoder(r) + decoder.UseNumber() + var doc interface{} + if err := decoder.Decode(&doc); err != nil { + return nil, err + } + if t, _ := decoder.Token(); t != nil { + return nil, fmt.Errorf("invalid character %v after top-level value", t) + } + return doc, nil +} diff --git a/vendor/github.com/santhosh-tekuri/jsonschema/v5/schema.go b/vendor/github.com/santhosh-tekuri/jsonschema/v5/schema.go new file mode 100644 index 0000000..ebb6a67 --- /dev/null +++ b/vendor/github.com/santhosh-tekuri/jsonschema/v5/schema.go @@ -0,0 +1,793 @@ +package jsonschema + +import ( + "encoding/json" + "fmt" + "math/big" + "net/url" + "regexp" + "strconv" + "strings" + "unicode/utf8" +) + +// A Schema represents compiled version of json-schema. +type Schema struct { + Location string // absolute location + + dynamicAnchors []*Schema + + // type agnostic validations + Format string + format func(interface{}) bool + Always *bool // always pass/fail. used when booleans are used as schemas in draft-07. + Ref *Schema + RecursiveAnchor bool + RecursiveRef *Schema + DynamicAnchor string + DynamicRef *Schema + Types []string // allowed types. + Constant []interface{} // first element in slice is constant value. note: slice is used to capture nil constant. + Enum []interface{} // allowed values. + enumError string // error message for enum fail. captured here to avoid constructing error message every time. + Not *Schema + AllOf []*Schema + AnyOf []*Schema + OneOf []*Schema + If *Schema + Then *Schema // nil, when If is nil. + Else *Schema // nil, when If is nil. + + // object validations + MinProperties int // -1 if not specified. + MaxProperties int // -1 if not specified. + Required []string // list of required properties. + Properties map[string]*Schema + PropertyNames *Schema + RegexProperties bool // property names must be valid regex. used only in draft4 as workaround in metaschema. + PatternProperties map[*regexp.Regexp]*Schema + AdditionalProperties interface{} // nil or bool or *Schema. + Dependencies map[string]interface{} // map value is *Schema or []string. + DependentRequired map[string][]string + DependentSchemas map[string]*Schema + UnevaluatedProperties *Schema + + // array validations + MinItems int // -1 if not specified. + MaxItems int // -1 if not specified. + UniqueItems bool + Items interface{} // nil or *Schema or []*Schema + AdditionalItems interface{} // nil or bool or *Schema. + PrefixItems []*Schema + Items2020 *Schema // items keyword reintroduced in draft 2020-12 + Contains *Schema + ContainsEval bool // whether any item in an array that passes validation of the contains schema is considered "evaluated" + MinContains int // 1 if not specified + MaxContains int // -1 if not specified + UnevaluatedItems *Schema + + // string validations + MinLength int // -1 if not specified. + MaxLength int // -1 if not specified. + Pattern *regexp.Regexp + ContentEncoding string + decoder func(string) ([]byte, error) + ContentMediaType string + mediaType func([]byte) error + + // number validators + Minimum *big.Rat + ExclusiveMinimum *big.Rat + Maximum *big.Rat + ExclusiveMaximum *big.Rat + MultipleOf *big.Rat + + // annotations. captured only when Compiler.ExtractAnnotations is true. + Title string + Description string + Default interface{} + Comment string + ReadOnly bool + WriteOnly bool + Examples []interface{} + Deprecated bool + + // user defined extensions + Extensions map[string]ExtSchema +} + +func (s *Schema) String() string { + return s.Location +} + +func newSchema(url, floc string, doc interface{}) *Schema { + // fill with default values + s := &Schema{ + Location: url + floc, + MinProperties: -1, + MaxProperties: -1, + MinItems: -1, + MaxItems: -1, + MinContains: 1, + MaxContains: -1, + MinLength: -1, + MaxLength: -1, + } + + if doc, ok := doc.(map[string]interface{}); ok { + if ra, ok := doc["$recursiveAnchor"]; ok { + if ra, ok := ra.(bool); ok { + s.RecursiveAnchor = ra + } + } + if da, ok := doc["$dynamicAnchor"]; ok { + if da, ok := da.(string); ok { + s.DynamicAnchor = da + } + } + } + return s +} + +// Validate validates given doc, against the json-schema s. +// +// the v must be the raw json value. for number precision +// unmarshal with json.UseNumber(). +// +// returns *ValidationError if v does not confirm with schema s. +// returns InfiniteLoopError if it detects loop during validation. +// returns InvalidJSONTypeError if it detects any non json value in v. +func (s *Schema) Validate(v interface{}) (err error) { + return s.validateValue(v, "") +} + +func (s *Schema) validateValue(v interface{}, vloc string) (err error) { + defer func() { + if r := recover(); r != nil { + switch r := r.(type) { + case InfiniteLoopError, InvalidJSONTypeError: + err = r.(error) + default: + panic(r) + } + } + }() + if _, err := s.validate(nil, 0, "", v, vloc); err != nil { + ve := ValidationError{ + KeywordLocation: "", + AbsoluteKeywordLocation: s.Location, + InstanceLocation: vloc, + Message: fmt.Sprintf("doesn't validate with %s", s.Location), + } + return ve.causes(err) + } + return nil +} + +// validate validates given value v with this schema. +func (s *Schema) validate(scope []schemaRef, vscope int, spath string, v interface{}, vloc string) (result validationResult, err error) { + validationError := func(keywordPath string, format string, a ...interface{}) *ValidationError { + return &ValidationError{ + KeywordLocation: keywordLocation(scope, keywordPath), + AbsoluteKeywordLocation: joinPtr(s.Location, keywordPath), + InstanceLocation: vloc, + Message: fmt.Sprintf(format, a...), + } + } + + sref := schemaRef{spath, s, false} + if err := checkLoop(scope[len(scope)-vscope:], sref); err != nil { + panic(err) + } + scope = append(scope, sref) + vscope++ + + // populate result + switch v := v.(type) { + case map[string]interface{}: + result.unevalProps = make(map[string]struct{}) + for pname := range v { + result.unevalProps[pname] = struct{}{} + } + case []interface{}: + result.unevalItems = make(map[int]struct{}) + for i := range v { + result.unevalItems[i] = struct{}{} + } + } + + validate := func(sch *Schema, schPath string, v interface{}, vpath string) error { + vloc := vloc + if vpath != "" { + vloc += "/" + vpath + } + _, err := sch.validate(scope, 0, schPath, v, vloc) + return err + } + + validateInplace := func(sch *Schema, schPath string) error { + vr, err := sch.validate(scope, vscope, schPath, v, vloc) + if err == nil { + // update result + for pname := range result.unevalProps { + if _, ok := vr.unevalProps[pname]; !ok { + delete(result.unevalProps, pname) + } + } + for i := range result.unevalItems { + if _, ok := vr.unevalItems[i]; !ok { + delete(result.unevalItems, i) + } + } + } + return err + } + + if s.Always != nil { + if !*s.Always { + return result, validationError("", "not allowed") + } + return result, nil + } + + if len(s.Types) > 0 { + vType := jsonType(v) + matched := false + for _, t := range s.Types { + if vType == t { + matched = true + break + } else if t == "integer" && vType == "number" { + num, _ := new(big.Rat).SetString(fmt.Sprint(v)) + if num.IsInt() { + matched = true + break + } + } + } + if !matched { + return result, validationError("type", "expected %s, but got %s", strings.Join(s.Types, " or "), vType) + } + } + + var errors []error + + if len(s.Constant) > 0 { + if !equals(v, s.Constant[0]) { + switch jsonType(s.Constant[0]) { + case "object", "array": + errors = append(errors, validationError("const", "const failed")) + default: + errors = append(errors, validationError("const", "value must be %#v", s.Constant[0])) + } + } + } + + if len(s.Enum) > 0 { + matched := false + for _, item := range s.Enum { + if equals(v, item) { + matched = true + break + } + } + if !matched { + errors = append(errors, validationError("enum", s.enumError)) + } + } + + if s.format != nil && !s.format(v) { + var val = v + if v, ok := v.(string); ok { + val = quote(v) + } + errors = append(errors, validationError("format", "%v is not valid %s", val, quote(s.Format))) + } + + switch v := v.(type) { + case map[string]interface{}: + if s.MinProperties != -1 && len(v) < s.MinProperties { + errors = append(errors, validationError("minProperties", "minimum %d properties allowed, but found %d properties", s.MinProperties, len(v))) + } + if s.MaxProperties != -1 && len(v) > s.MaxProperties { + errors = append(errors, validationError("maxProperties", "maximum %d properties allowed, but found %d properties", s.MaxProperties, len(v))) + } + if len(s.Required) > 0 { + var missing []string + for _, pname := range s.Required { + if _, ok := v[pname]; !ok { + missing = append(missing, quote(pname)) + } + } + if len(missing) > 0 { + errors = append(errors, validationError("required", "missing properties: %s", strings.Join(missing, ", "))) + } + } + + for pname, sch := range s.Properties { + if pvalue, ok := v[pname]; ok { + delete(result.unevalProps, pname) + if err := validate(sch, "properties/"+escape(pname), pvalue, escape(pname)); err != nil { + errors = append(errors, err) + } + } + } + + if s.PropertyNames != nil { + for pname := range v { + if err := validate(s.PropertyNames, "propertyNames", pname, escape(pname)); err != nil { + errors = append(errors, err) + } + } + } + + if s.RegexProperties { + for pname := range v { + if !isRegex(pname) { + errors = append(errors, validationError("", "patternProperty %s is not valid regex", quote(pname))) + } + } + } + for pattern, sch := range s.PatternProperties { + for pname, pvalue := range v { + if pattern.MatchString(pname) { + delete(result.unevalProps, pname) + if err := validate(sch, "patternProperties/"+escape(pattern.String()), pvalue, escape(pname)); err != nil { + errors = append(errors, err) + } + } + } + } + if s.AdditionalProperties != nil { + if allowed, ok := s.AdditionalProperties.(bool); ok { + if !allowed && len(result.unevalProps) > 0 { + errors = append(errors, validationError("additionalProperties", "additionalProperties %s not allowed", result.unevalPnames())) + } + } else { + schema := s.AdditionalProperties.(*Schema) + for pname := range result.unevalProps { + if pvalue, ok := v[pname]; ok { + if err := validate(schema, "additionalProperties", pvalue, escape(pname)); err != nil { + errors = append(errors, err) + } + } + } + } + result.unevalProps = nil + } + for dname, dvalue := range s.Dependencies { + if _, ok := v[dname]; ok { + switch dvalue := dvalue.(type) { + case *Schema: + if err := validateInplace(dvalue, "dependencies/"+escape(dname)); err != nil { + errors = append(errors, err) + } + case []string: + for i, pname := range dvalue { + if _, ok := v[pname]; !ok { + errors = append(errors, validationError("dependencies/"+escape(dname)+"/"+strconv.Itoa(i), "property %s is required, if %s property exists", quote(pname), quote(dname))) + } + } + } + } + } + for dname, dvalue := range s.DependentRequired { + if _, ok := v[dname]; ok { + for i, pname := range dvalue { + if _, ok := v[pname]; !ok { + errors = append(errors, validationError("dependentRequired/"+escape(dname)+"/"+strconv.Itoa(i), "property %s is required, if %s property exists", quote(pname), quote(dname))) + } + } + } + } + for dname, sch := range s.DependentSchemas { + if _, ok := v[dname]; ok { + if err := validateInplace(sch, "dependentSchemas/"+escape(dname)); err != nil { + errors = append(errors, err) + } + } + } + + case []interface{}: + if s.MinItems != -1 && len(v) < s.MinItems { + errors = append(errors, validationError("minItems", "minimum %d items required, but found %d items", s.MinItems, len(v))) + } + if s.MaxItems != -1 && len(v) > s.MaxItems { + errors = append(errors, validationError("maxItems", "maximum %d items required, but found %d items", s.MaxItems, len(v))) + } + if s.UniqueItems { + for i := 1; i < len(v); i++ { + for j := 0; j < i; j++ { + if equals(v[i], v[j]) { + errors = append(errors, validationError("uniqueItems", "items at index %d and %d are equal", j, i)) + } + } + } + } + + // items + additionalItems + switch items := s.Items.(type) { + case *Schema: + for i, item := range v { + if err := validate(items, "items", item, strconv.Itoa(i)); err != nil { + errors = append(errors, err) + } + } + result.unevalItems = nil + case []*Schema: + for i, item := range v { + if i < len(items) { + delete(result.unevalItems, i) + if err := validate(items[i], "items/"+strconv.Itoa(i), item, strconv.Itoa(i)); err != nil { + errors = append(errors, err) + } + } else if sch, ok := s.AdditionalItems.(*Schema); ok { + delete(result.unevalItems, i) + if err := validate(sch, "additionalItems", item, strconv.Itoa(i)); err != nil { + errors = append(errors, err) + } + } else { + break + } + } + if additionalItems, ok := s.AdditionalItems.(bool); ok { + if additionalItems { + result.unevalItems = nil + } else if len(v) > len(items) { + errors = append(errors, validationError("additionalItems", "only %d items are allowed, but found %d items", len(items), len(v))) + } + } + } + + // prefixItems + items + for i, item := range v { + if i < len(s.PrefixItems) { + delete(result.unevalItems, i) + if err := validate(s.PrefixItems[i], "prefixItems/"+strconv.Itoa(i), item, strconv.Itoa(i)); err != nil { + errors = append(errors, err) + } + } else if s.Items2020 != nil { + delete(result.unevalItems, i) + if err := validate(s.Items2020, "items", item, strconv.Itoa(i)); err != nil { + errors = append(errors, err) + } + } else { + break + } + } + + // contains + minContains + maxContains + if s.Contains != nil && (s.MinContains != -1 || s.MaxContains != -1) { + matched := 0 + var causes []error + for i, item := range v { + if err := validate(s.Contains, "contains", item, strconv.Itoa(i)); err != nil { + causes = append(causes, err) + } else { + matched++ + if s.ContainsEval { + delete(result.unevalItems, i) + } + } + } + if s.MinContains != -1 && matched < s.MinContains { + errors = append(errors, validationError("minContains", "valid must be >= %d, but got %d", s.MinContains, matched).add(causes...)) + } + if s.MaxContains != -1 && matched > s.MaxContains { + errors = append(errors, validationError("maxContains", "valid must be <= %d, but got %d", s.MaxContains, matched)) + } + } + + case string: + // minLength + maxLength + if s.MinLength != -1 || s.MaxLength != -1 { + length := utf8.RuneCount([]byte(v)) + if s.MinLength != -1 && length < s.MinLength { + errors = append(errors, validationError("minLength", "length must be >= %d, but got %d", s.MinLength, length)) + } + if s.MaxLength != -1 && length > s.MaxLength { + errors = append(errors, validationError("maxLength", "length must be <= %d, but got %d", s.MaxLength, length)) + } + } + + if s.Pattern != nil && !s.Pattern.MatchString(v) { + errors = append(errors, validationError("pattern", "does not match pattern %s", quote(s.Pattern.String()))) + } + + // contentEncoding + contentMediaType + if s.decoder != nil || s.mediaType != nil { + decoded := s.ContentEncoding == "" + var content []byte + if s.decoder != nil { + b, err := s.decoder(v) + if err != nil { + errors = append(errors, validationError("contentEncoding", "%s is not %s encoded", quote(v), s.ContentEncoding)) + } else { + content, decoded = b, true + } + } + if decoded && s.mediaType != nil { + if s.decoder == nil { + content = []byte(v) + } + if err := s.mediaType(content); err != nil { + errors = append(errors, validationError("contentMediaType", "value is not of mediatype %s", quote(s.ContentMediaType))) + } + } + } + + case json.Number, float64, int, int32, int64: + // lazy convert to *big.Rat to avoid allocation + var numVal *big.Rat + num := func() *big.Rat { + if numVal == nil { + numVal, _ = new(big.Rat).SetString(fmt.Sprint(v)) + } + return numVal + } + f64 := func(r *big.Rat) float64 { + f, _ := r.Float64() + return f + } + if s.Minimum != nil && num().Cmp(s.Minimum) < 0 { + errors = append(errors, validationError("minimum", "must be >= %v but found %v", f64(s.Minimum), v)) + } + if s.ExclusiveMinimum != nil && num().Cmp(s.ExclusiveMinimum) <= 0 { + errors = append(errors, validationError("exclusiveMinimum", "must be > %v but found %v", f64(s.ExclusiveMinimum), v)) + } + if s.Maximum != nil && num().Cmp(s.Maximum) > 0 { + errors = append(errors, validationError("maximum", "must be <= %v but found %v", f64(s.Maximum), v)) + } + if s.ExclusiveMaximum != nil && num().Cmp(s.ExclusiveMaximum) >= 0 { + errors = append(errors, validationError("exclusiveMaximum", "must be < %v but found %v", f64(s.ExclusiveMaximum), v)) + } + if s.MultipleOf != nil { + if q := new(big.Rat).Quo(num(), s.MultipleOf); !q.IsInt() { + errors = append(errors, validationError("multipleOf", "%v not multipleOf %v", v, f64(s.MultipleOf))) + } + } + } + + // $ref + $recursiveRef + $dynamicRef + validateRef := func(sch *Schema, refPath string) error { + if sch != nil { + if err := validateInplace(sch, refPath); err != nil { + var url = sch.Location + if s.url() == sch.url() { + url = sch.loc() + } + return validationError(refPath, "doesn't validate with %s", quote(url)).causes(err) + } + } + return nil + } + if err := validateRef(s.Ref, "$ref"); err != nil { + errors = append(errors, err) + } + if s.RecursiveRef != nil { + sch := s.RecursiveRef + if sch.RecursiveAnchor { + // recursiveRef based on scope + for _, e := range scope { + if e.schema.RecursiveAnchor { + sch = e.schema + break + } + } + } + if err := validateRef(sch, "$recursiveRef"); err != nil { + errors = append(errors, err) + } + } + if s.DynamicRef != nil { + sch := s.DynamicRef + if sch.DynamicAnchor != "" { + // dynamicRef based on scope + for i := len(scope) - 1; i >= 0; i-- { + sr := scope[i] + if sr.discard { + break + } + for _, da := range sr.schema.dynamicAnchors { + if da.DynamicAnchor == s.DynamicRef.DynamicAnchor && da != s.DynamicRef { + sch = da + break + } + } + } + } + if err := validateRef(sch, "$dynamicRef"); err != nil { + errors = append(errors, err) + } + } + + if s.Not != nil && validateInplace(s.Not, "not") == nil { + errors = append(errors, validationError("not", "not failed")) + } + + for i, sch := range s.AllOf { + schPath := "allOf/" + strconv.Itoa(i) + if err := validateInplace(sch, schPath); err != nil { + errors = append(errors, validationError(schPath, "allOf failed").add(err)) + } + } + + if len(s.AnyOf) > 0 { + matched := false + var causes []error + for i, sch := range s.AnyOf { + if err := validateInplace(sch, "anyOf/"+strconv.Itoa(i)); err == nil { + matched = true + } else { + causes = append(causes, err) + } + } + if !matched { + errors = append(errors, validationError("anyOf", "anyOf failed").add(causes...)) + } + } + + if len(s.OneOf) > 0 { + matched := -1 + var causes []error + for i, sch := range s.OneOf { + if err := validateInplace(sch, "oneOf/"+strconv.Itoa(i)); err == nil { + if matched == -1 { + matched = i + } else { + errors = append(errors, validationError("oneOf", "valid against schemas at indexes %d and %d", matched, i)) + break + } + } else { + causes = append(causes, err) + } + } + if matched == -1 { + errors = append(errors, validationError("oneOf", "oneOf failed").add(causes...)) + } + } + + // if + then + else + if s.If != nil { + err := validateInplace(s.If, "if") + // "if" leaves dynamic scope + scope[len(scope)-1].discard = true + if err == nil { + if s.Then != nil { + if err := validateInplace(s.Then, "then"); err != nil { + errors = append(errors, validationError("then", "if-then failed").add(err)) + } + } + } else { + if s.Else != nil { + if err := validateInplace(s.Else, "else"); err != nil { + errors = append(errors, validationError("else", "if-else failed").add(err)) + } + } + } + // restore dynamic scope + scope[len(scope)-1].discard = false + } + + for _, ext := range s.Extensions { + if err := ext.Validate(ValidationContext{result, validate, validateInplace, validationError}, v); err != nil { + errors = append(errors, err) + } + } + + // UnevaluatedProperties + UnevaluatedItems + switch v := v.(type) { + case map[string]interface{}: + if s.UnevaluatedProperties != nil { + for pname := range result.unevalProps { + if pvalue, ok := v[pname]; ok { + if err := validate(s.UnevaluatedProperties, "UnevaluatedProperties", pvalue, escape(pname)); err != nil { + errors = append(errors, err) + } + } + } + result.unevalProps = nil + } + case []interface{}: + if s.UnevaluatedItems != nil { + for i := range result.unevalItems { + if err := validate(s.UnevaluatedItems, "UnevaluatedItems", v[i], strconv.Itoa(i)); err != nil { + errors = append(errors, err) + } + } + result.unevalItems = nil + } + } + + switch len(errors) { + case 0: + return result, nil + case 1: + return result, errors[0] + default: + return result, validationError("", "").add(errors...) // empty message, used just for wrapping + } +} + +type validationResult struct { + unevalProps map[string]struct{} + unevalItems map[int]struct{} +} + +func (vr validationResult) unevalPnames() string { + pnames := make([]string, 0, len(vr.unevalProps)) + for pname := range vr.unevalProps { + pnames = append(pnames, quote(pname)) + } + return strings.Join(pnames, ", ") +} + +// jsonType returns the json type of given value v. +// +// It panics if the given value is not valid json value +func jsonType(v interface{}) string { + switch v.(type) { + case nil: + return "null" + case bool: + return "boolean" + case json.Number, float64, int, int32, int64: + return "number" + case string: + return "string" + case []interface{}: + return "array" + case map[string]interface{}: + return "object" + } + panic(InvalidJSONTypeError(fmt.Sprintf("%T", v))) +} + +// equals tells if given two json values are equal or not. +func equals(v1, v2 interface{}) bool { + v1Type := jsonType(v1) + if v1Type != jsonType(v2) { + return false + } + switch v1Type { + case "array": + arr1, arr2 := v1.([]interface{}), v2.([]interface{}) + if len(arr1) != len(arr2) { + return false + } + for i := range arr1 { + if !equals(arr1[i], arr2[i]) { + return false + } + } + return true + case "object": + obj1, obj2 := v1.(map[string]interface{}), v2.(map[string]interface{}) + if len(obj1) != len(obj2) { + return false + } + for k, v1 := range obj1 { + if v2, ok := obj2[k]; ok { + if !equals(v1, v2) { + return false + } + } else { + return false + } + } + return true + case "number": + num1, _ := new(big.Rat).SetString(fmt.Sprint(v1)) + num2, _ := new(big.Rat).SetString(fmt.Sprint(v2)) + return num1.Cmp(num2) == 0 + default: + return v1 == v2 + } +} + +// escape converts given token to valid json-pointer token +func escape(token string) string { + token = strings.Replace(token, "~", "~0", -1) + token = strings.Replace(token, "/", "~1", -1) + return url.PathEscape(token) +} diff --git a/vendor/golang.org/x/sys/unix/ifreq_linux.go b/vendor/golang.org/x/sys/unix/ifreq_linux.go new file mode 100644 index 0000000..fd3eecc --- /dev/null +++ b/vendor/golang.org/x/sys/unix/ifreq_linux.go @@ -0,0 +1,109 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build linux +// +build linux + +package unix + +import ( + "bytes" + "unsafe" +) + +// Helpers for dealing with ifreq since it contains a union and thus requires a +// lot of unsafe.Pointer casts to use properly. + +// An Ifreq is a type-safe wrapper around the raw ifreq struct. An Ifreq +// contains an interface name and a union of arbitrary data which can be +// accessed using the Ifreq's methods. To create an Ifreq, use the NewIfreq +// function. +// +// Use the Name method to access the stored interface name. The union data +// fields can be get and set using the following methods: +// - Uint16/SetUint16: flags +// - Uint32/SetUint32: ifindex, metric, mtu +type Ifreq struct{ raw ifreq } + +// NewIfreq creates an Ifreq with the input network interface name after +// validating the name does not exceed IFNAMSIZ-1 (trailing NULL required) +// bytes. +func NewIfreq(name string) (*Ifreq, error) { + // Leave room for terminating NULL byte. + if len(name) >= IFNAMSIZ { + return nil, EINVAL + } + + var ifr ifreq + copy(ifr.Ifrn[:], name) + + return &Ifreq{raw: ifr}, nil +} + +// TODO(mdlayher): get/set methods for sockaddr, char array, etc. + +// Name returns the interface name associated with the Ifreq. +func (ifr *Ifreq) Name() string { + // BytePtrToString requires a NULL terminator or the program may crash. If + // one is not present, just return the empty string. + if !bytes.Contains(ifr.raw.Ifrn[:], []byte{0x00}) { + return "" + } + + return BytePtrToString(&ifr.raw.Ifrn[0]) +} + +// Uint16 returns the Ifreq union data as a C short/Go uint16 value. +func (ifr *Ifreq) Uint16() uint16 { + return *(*uint16)(unsafe.Pointer(&ifr.raw.Ifru[:2][0])) +} + +// SetUint16 sets a C short/Go uint16 value as the Ifreq's union data. +func (ifr *Ifreq) SetUint16(v uint16) { + ifr.clear() + *(*uint16)(unsafe.Pointer(&ifr.raw.Ifru[:2][0])) = v +} + +// Uint32 returns the Ifreq union data as a C int/Go uint32 value. +func (ifr *Ifreq) Uint32() uint32 { + return *(*uint32)(unsafe.Pointer(&ifr.raw.Ifru[:4][0])) +} + +// SetUint32 sets a C int/Go uint32 value as the Ifreq's union data. +func (ifr *Ifreq) SetUint32(v uint32) { + ifr.clear() + *(*uint32)(unsafe.Pointer(&ifr.raw.Ifru[:4][0])) = v +} + +// clear zeroes the ifreq's union field to prevent trailing garbage data from +// being sent to the kernel if an ifreq is reused. +func (ifr *Ifreq) clear() { + for i := range ifr.raw.Ifru { + ifr.raw.Ifru[i] = 0 + } +} + +// TODO(mdlayher): export as IfreqData? For now we can provide helpers such as +// IoctlGetEthtoolDrvinfo which use these APIs under the hood. + +// An ifreqData is an Ifreq which carries pointer data. To produce an ifreqData, +// use the Ifreq.withData method. +type ifreqData struct { + name [IFNAMSIZ]byte + // A type separate from ifreq is required in order to comply with the + // unsafe.Pointer rules since the "pointer-ness" of data would not be + // preserved if it were cast into the byte array of a raw ifreq. + data unsafe.Pointer + // Pad to the same size as ifreq. + _ [len(ifreq{}.Ifru) - SizeofPtr]byte +} + +// withData produces an ifreqData with the pointer p set for ioctls which require +// arbitrary pointer data. +func (ifr Ifreq) withData(p unsafe.Pointer) ifreqData { + return ifreqData{ + name: ifr.raw.Ifrn, + data: p, + } +} diff --git a/vendor/golang.org/x/sys/unix/ioctl_linux.go b/vendor/golang.org/x/sys/unix/ioctl_linux.go index 48773f7..1dadead 100644 --- a/vendor/golang.org/x/sys/unix/ioctl_linux.go +++ b/vendor/golang.org/x/sys/unix/ioctl_linux.go @@ -5,7 +5,6 @@ package unix import ( - "runtime" "unsafe" ) @@ -22,56 +21,42 @@ func IoctlRetInt(fd int, req uint) (int, error) { func IoctlGetUint32(fd int, req uint) (uint32, error) { var value uint32 - err := ioctl(fd, req, uintptr(unsafe.Pointer(&value))) + err := ioctlPtr(fd, req, unsafe.Pointer(&value)) return value, err } func IoctlGetRTCTime(fd int) (*RTCTime, error) { var value RTCTime - err := ioctl(fd, RTC_RD_TIME, uintptr(unsafe.Pointer(&value))) + err := ioctlPtr(fd, RTC_RD_TIME, unsafe.Pointer(&value)) return &value, err } func IoctlSetRTCTime(fd int, value *RTCTime) error { - err := ioctl(fd, RTC_SET_TIME, uintptr(unsafe.Pointer(value))) - runtime.KeepAlive(value) - return err + return ioctlPtr(fd, RTC_SET_TIME, unsafe.Pointer(value)) } func IoctlGetRTCWkAlrm(fd int) (*RTCWkAlrm, error) { var value RTCWkAlrm - err := ioctl(fd, RTC_WKALM_RD, uintptr(unsafe.Pointer(&value))) + err := ioctlPtr(fd, RTC_WKALM_RD, unsafe.Pointer(&value)) return &value, err } func IoctlSetRTCWkAlrm(fd int, value *RTCWkAlrm) error { - err := ioctl(fd, RTC_WKALM_SET, uintptr(unsafe.Pointer(value))) - runtime.KeepAlive(value) - return err -} - -type ifreqEthtool struct { - name [IFNAMSIZ]byte - data unsafe.Pointer + return ioctlPtr(fd, RTC_WKALM_SET, unsafe.Pointer(value)) } // IoctlGetEthtoolDrvinfo fetches ethtool driver information for the network // device specified by ifname. func IoctlGetEthtoolDrvinfo(fd int, ifname string) (*EthtoolDrvinfo, error) { - // Leave room for terminating NULL byte. - if len(ifname) >= IFNAMSIZ { - return nil, EINVAL + ifr, err := NewIfreq(ifname) + if err != nil { + return nil, err } - value := EthtoolDrvinfo{ - Cmd: ETHTOOL_GDRVINFO, - } - ifreq := ifreqEthtool{ - data: unsafe.Pointer(&value), - } - copy(ifreq.name[:], ifname) - err := ioctl(fd, SIOCETHTOOL, uintptr(unsafe.Pointer(&ifreq))) - runtime.KeepAlive(ifreq) + value := EthtoolDrvinfo{Cmd: ETHTOOL_GDRVINFO} + ifrd := ifr.withData(unsafe.Pointer(&value)) + + err = ioctlIfreqData(fd, SIOCETHTOOL, &ifrd) return &value, err } @@ -80,7 +65,7 @@ func IoctlGetEthtoolDrvinfo(fd int, ifname string) (*EthtoolDrvinfo, error) { // https://www.kernel.org/doc/html/latest/watchdog/watchdog-api.html. func IoctlGetWatchdogInfo(fd int) (*WatchdogInfo, error) { var value WatchdogInfo - err := ioctl(fd, WDIOC_GETSUPPORT, uintptr(unsafe.Pointer(&value))) + err := ioctlPtr(fd, WDIOC_GETSUPPORT, unsafe.Pointer(&value)) return &value, err } @@ -88,6 +73,7 @@ func IoctlGetWatchdogInfo(fd int) (*WatchdogInfo, error) { // more information, see: // https://www.kernel.org/doc/html/latest/watchdog/watchdog-api.html. func IoctlWatchdogKeepalive(fd int) error { + // arg is ignored and not a pointer, so ioctl is fine instead of ioctlPtr. return ioctl(fd, WDIOC_KEEPALIVE, 0) } @@ -95,9 +81,7 @@ func IoctlWatchdogKeepalive(fd int) error { // range of data conveyed in value to the file associated with the file // descriptor destFd. See the ioctl_ficlonerange(2) man page for details. func IoctlFileCloneRange(destFd int, value *FileCloneRange) error { - err := ioctl(destFd, FICLONERANGE, uintptr(unsafe.Pointer(value))) - runtime.KeepAlive(value) - return err + return ioctlPtr(destFd, FICLONERANGE, unsafe.Pointer(value)) } // IoctlFileClone performs an FICLONE ioctl operation to clone the entire file @@ -148,7 +132,7 @@ func IoctlFileDedupeRange(srcFd int, value *FileDedupeRange) error { rawinfo.Reserved = value.Info[i].Reserved } - err := ioctl(srcFd, FIDEDUPERANGE, uintptr(unsafe.Pointer(&buf[0]))) + err := ioctlPtr(srcFd, FIDEDUPERANGE, unsafe.Pointer(&buf[0])) // Output for i := range value.Info { @@ -166,31 +150,47 @@ func IoctlFileDedupeRange(srcFd int, value *FileDedupeRange) error { } func IoctlHIDGetDesc(fd int, value *HIDRawReportDescriptor) error { - err := ioctl(fd, HIDIOCGRDESC, uintptr(unsafe.Pointer(value))) - runtime.KeepAlive(value) - return err + return ioctlPtr(fd, HIDIOCGRDESC, unsafe.Pointer(value)) } func IoctlHIDGetRawInfo(fd int) (*HIDRawDevInfo, error) { var value HIDRawDevInfo - err := ioctl(fd, HIDIOCGRAWINFO, uintptr(unsafe.Pointer(&value))) + err := ioctlPtr(fd, HIDIOCGRAWINFO, unsafe.Pointer(&value)) return &value, err } func IoctlHIDGetRawName(fd int) (string, error) { var value [_HIDIOCGRAWNAME_LEN]byte - err := ioctl(fd, _HIDIOCGRAWNAME, uintptr(unsafe.Pointer(&value[0]))) + err := ioctlPtr(fd, _HIDIOCGRAWNAME, unsafe.Pointer(&value[0])) return ByteSliceToString(value[:]), err } func IoctlHIDGetRawPhys(fd int) (string, error) { var value [_HIDIOCGRAWPHYS_LEN]byte - err := ioctl(fd, _HIDIOCGRAWPHYS, uintptr(unsafe.Pointer(&value[0]))) + err := ioctlPtr(fd, _HIDIOCGRAWPHYS, unsafe.Pointer(&value[0])) return ByteSliceToString(value[:]), err } func IoctlHIDGetRawUniq(fd int) (string, error) { var value [_HIDIOCGRAWUNIQ_LEN]byte - err := ioctl(fd, _HIDIOCGRAWUNIQ, uintptr(unsafe.Pointer(&value[0]))) + err := ioctlPtr(fd, _HIDIOCGRAWUNIQ, unsafe.Pointer(&value[0])) return ByteSliceToString(value[:]), err } + +// IoctlIfreq performs an ioctl using an Ifreq structure for input and/or +// output. See the netdevice(7) man page for details. +func IoctlIfreq(fd int, req uint, value *Ifreq) error { + // It is possible we will add more fields to *Ifreq itself later to prevent + // misuse, so pass the raw *ifreq directly. + return ioctlPtr(fd, req, unsafe.Pointer(&value.raw)) +} + +// TODO(mdlayher): export if and when IfreqData is exported. + +// ioctlIfreqData performs an ioctl using an ifreqData structure for input +// and/or output. See the netdevice(7) man page for details. +func ioctlIfreqData(fd int, req uint, value *ifreqData) error { + // The memory layout of IfreqData (type-safe) and ifreq (not type-safe) are + // identical so pass *IfreqData directly. + return ioctlPtr(fd, req, unsafe.Pointer(value)) +} diff --git a/vendor/golang.org/x/sys/unix/mkerrors.sh b/vendor/golang.org/x/sys/unix/mkerrors.sh index 6e6afca..2ed4b6d 100644 --- a/vendor/golang.org/x/sys/unix/mkerrors.sh +++ b/vendor/golang.org/x/sys/unix/mkerrors.sh @@ -217,8 +217,6 @@ struct ltchars { #include #include #include -#include -#include #include #include #include diff --git a/vendor/golang.org/x/sys/unix/syscall_linux.go b/vendor/golang.org/x/sys/unix/syscall_linux.go index 41b91fd..43569fe 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux.go @@ -66,11 +66,18 @@ func Fchmodat(dirfd int, path string, mode uint32, flags int) (err error) { return fchmodat(dirfd, path, mode) } -//sys ioctl(fd int, req uint, arg uintptr) (err error) +//sys ioctl(fd int, req uint, arg uintptr) (err error) = SYS_IOCTL +//sys ioctlPtr(fd int, req uint, arg unsafe.Pointer) (err error) = SYS_IOCTL -// ioctl itself should not be exposed directly, but additional get/set -// functions for specific types are permissible. -// These are defined in ioctl.go and ioctl_linux.go. +// ioctl itself should not be exposed directly, but additional get/set functions +// for specific types are permissible. These are defined in ioctl.go and +// ioctl_linux.go. +// +// The third argument to ioctl is often a pointer but sometimes an integer. +// Callers should use ioctlPtr when the third argument is a pointer and ioctl +// when the third argument is an integer. +// +// TODO: some existing code incorrectly uses ioctl when it should use ioctlPtr. //sys Linkat(olddirfd int, oldpath string, newdirfd int, newpath string, flags int) (err error) @@ -1859,7 +1866,7 @@ func Getpgrp() (pid int) { //sys Nanosleep(time *Timespec, leftover *Timespec) (err error) //sys PerfEventOpen(attr *PerfEventAttr, pid int, cpu int, groupFd int, flags int) (fd int, err error) //sys PivotRoot(newroot string, putold string) (err error) = SYS_PIVOT_ROOT -//sysnb prlimit(pid int, resource int, newlimit *Rlimit, old *Rlimit) (err error) = SYS_PRLIMIT64 +//sysnb Prlimit(pid int, resource int, newlimit *Rlimit, old *Rlimit) (err error) = SYS_PRLIMIT64 //sys Prctl(option int, arg2 uintptr, arg3 uintptr, arg4 uintptr, arg5 uintptr) (err error) //sys Pselect(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timespec, sigmask *Sigset_t) (n int, err error) = SYS_PSELECT6 //sys read(fd int, p []byte) (n int, err error) diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_386.go b/vendor/golang.org/x/sys/unix/syscall_linux_386.go index b430536..91317d7 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_386.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_386.go @@ -105,7 +105,7 @@ const rlimInf32 = ^uint32(0) const rlimInf64 = ^uint64(0) func Getrlimit(resource int, rlim *Rlimit) (err error) { - err = prlimit(0, resource, nil, rlim) + err = Prlimit(0, resource, nil, rlim) if err != ENOSYS { return err } @@ -133,7 +133,7 @@ func Getrlimit(resource int, rlim *Rlimit) (err error) { //sysnb setrlimit(resource int, rlim *rlimit32) (err error) = SYS_SETRLIMIT func Setrlimit(resource int, rlim *Rlimit) (err error) { - err = prlimit(0, resource, rlim, nil) + err = Prlimit(0, resource, rlim, nil) if err != ENOSYS { return err } diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_arm.go b/vendor/golang.org/x/sys/unix/syscall_linux_arm.go index 39a864d..b961a62 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_arm.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_arm.go @@ -184,7 +184,7 @@ const rlimInf32 = ^uint32(0) const rlimInf64 = ^uint64(0) func Getrlimit(resource int, rlim *Rlimit) (err error) { - err = prlimit(0, resource, nil, rlim) + err = Prlimit(0, resource, nil, rlim) if err != ENOSYS { return err } @@ -212,7 +212,7 @@ func Getrlimit(resource int, rlim *Rlimit) (err error) { //sysnb setrlimit(resource int, rlim *rlimit32) (err error) = SYS_SETRLIMIT func Setrlimit(resource int, rlim *Rlimit) (err error) { - err = prlimit(0, resource, rlim, nil) + err = Prlimit(0, resource, rlim, nil) if err != ENOSYS { return err } diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_arm64.go b/vendor/golang.org/x/sys/unix/syscall_linux_arm64.go index 7f27ebf..4b977ba 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_arm64.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_arm64.go @@ -171,7 +171,7 @@ func Pipe2(p []int, flags int) (err error) { // Getrlimit prefers the prlimit64 system call. See issue 38604. func Getrlimit(resource int, rlim *Rlimit) error { - err := prlimit(0, resource, nil, rlim) + err := Prlimit(0, resource, nil, rlim) if err != ENOSYS { return err } @@ -180,7 +180,7 @@ func Getrlimit(resource int, rlim *Rlimit) error { // Setrlimit prefers the prlimit64 system call. See issue 38604. func Setrlimit(resource int, rlim *Rlimit) error { - err := prlimit(0, resource, rlim, nil) + err := Prlimit(0, resource, rlim, nil) if err != ENOSYS { return err } diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_mipsx.go b/vendor/golang.org/x/sys/unix/syscall_linux_mipsx.go index 3a5621e..21d74e2 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_mipsx.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_mipsx.go @@ -157,7 +157,7 @@ type rlimit32 struct { //sysnb getrlimit(resource int, rlim *rlimit32) (err error) = SYS_GETRLIMIT func Getrlimit(resource int, rlim *Rlimit) (err error) { - err = prlimit(0, resource, nil, rlim) + err = Prlimit(0, resource, nil, rlim) if err != ENOSYS { return err } @@ -185,7 +185,7 @@ func Getrlimit(resource int, rlim *Rlimit) (err error) { //sysnb setrlimit(resource int, rlim *rlimit32) (err error) = SYS_SETRLIMIT func Setrlimit(resource int, rlim *Rlimit) (err error) { - err = prlimit(0, resource, rlim, nil) + err = Prlimit(0, resource, rlim, nil) if err != ENOSYS { return err } diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_ppc.go b/vendor/golang.org/x/sys/unix/syscall_linux_ppc.go index cf0d36f..e475d09 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_ppc.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_ppc.go @@ -143,7 +143,7 @@ const rlimInf32 = ^uint32(0) const rlimInf64 = ^uint64(0) func Getrlimit(resource int, rlim *Rlimit) (err error) { - err = prlimit(0, resource, nil, rlim) + err = Prlimit(0, resource, nil, rlim) if err != ENOSYS { return err } @@ -171,7 +171,7 @@ func Getrlimit(resource int, rlim *Rlimit) (err error) { //sysnb setrlimit(resource int, rlim *rlimit32) (err error) = SYS_SETRLIMIT func Setrlimit(resource int, rlim *Rlimit) (err error) { - err = prlimit(0, resource, rlim, nil) + err = Prlimit(0, resource, rlim, nil) if err != ENOSYS { return err } diff --git a/vendor/golang.org/x/sys/unix/syscall_solaris.go b/vendor/golang.org/x/sys/unix/syscall_solaris.go index 77fcde7..d2a6495 100644 --- a/vendor/golang.org/x/sys/unix/syscall_solaris.go +++ b/vendor/golang.org/x/sys/unix/syscall_solaris.go @@ -13,7 +13,10 @@ package unix import ( + "fmt" + "os" "runtime" + "sync" "syscall" "unsafe" ) @@ -744,3 +747,240 @@ func Mmap(fd int, offset int64, length int, prot int, flags int) (data []byte, e func Munmap(b []byte) (err error) { return mapper.Munmap(b) } + +// Event Ports + +type fileObjCookie struct { + fobj *fileObj + cookie interface{} +} + +// EventPort provides a safe abstraction on top of Solaris/illumos Event Ports. +type EventPort struct { + port int + mu sync.Mutex + fds map[uintptr]interface{} + paths map[string]*fileObjCookie +} + +// PortEvent is an abstraction of the port_event C struct. +// Compare Source against PORT_SOURCE_FILE or PORT_SOURCE_FD +// to see if Path or Fd was the event source. The other will be +// uninitialized. +type PortEvent struct { + Cookie interface{} + Events int32 + Fd uintptr + Path string + Source uint16 + fobj *fileObj +} + +// NewEventPort creates a new EventPort including the +// underlying call to port_create(3c). +func NewEventPort() (*EventPort, error) { + port, err := port_create() + if err != nil { + return nil, err + } + e := &EventPort{ + port: port, + fds: make(map[uintptr]interface{}), + paths: make(map[string]*fileObjCookie), + } + return e, nil +} + +//sys port_create() (n int, err error) +//sys port_associate(port int, source int, object uintptr, events int, user *byte) (n int, err error) +//sys port_dissociate(port int, source int, object uintptr) (n int, err error) +//sys port_get(port int, pe *portEvent, timeout *Timespec) (n int, err error) +//sys port_getn(port int, pe *portEvent, max uint32, nget *uint32, timeout *Timespec) (n int, err error) + +// Close closes the event port. +func (e *EventPort) Close() error { + e.mu.Lock() + defer e.mu.Unlock() + e.fds = nil + e.paths = nil + return Close(e.port) +} + +// PathIsWatched checks to see if path is associated with this EventPort. +func (e *EventPort) PathIsWatched(path string) bool { + e.mu.Lock() + defer e.mu.Unlock() + _, found := e.paths[path] + return found +} + +// FdIsWatched checks to see if fd is associated with this EventPort. +func (e *EventPort) FdIsWatched(fd uintptr) bool { + e.mu.Lock() + defer e.mu.Unlock() + _, found := e.fds[fd] + return found +} + +// AssociatePath wraps port_associate(3c) for a filesystem path including +// creating the necessary file_obj from the provided stat information. +func (e *EventPort) AssociatePath(path string, stat os.FileInfo, events int, cookie interface{}) error { + e.mu.Lock() + defer e.mu.Unlock() + if _, found := e.paths[path]; found { + return fmt.Errorf("%v is already associated with this Event Port", path) + } + fobj, err := createFileObj(path, stat) + if err != nil { + return err + } + fCookie := &fileObjCookie{fobj, cookie} + _, err = port_associate(e.port, PORT_SOURCE_FILE, uintptr(unsafe.Pointer(fobj)), events, (*byte)(unsafe.Pointer(&fCookie.cookie))) + if err != nil { + return err + } + e.paths[path] = fCookie + return nil +} + +// DissociatePath wraps port_dissociate(3c) for a filesystem path. +func (e *EventPort) DissociatePath(path string) error { + e.mu.Lock() + defer e.mu.Unlock() + f, ok := e.paths[path] + if !ok { + return fmt.Errorf("%v is not associated with this Event Port", path) + } + _, err := port_dissociate(e.port, PORT_SOURCE_FILE, uintptr(unsafe.Pointer(f.fobj))) + if err != nil { + return err + } + delete(e.paths, path) + return nil +} + +// AssociateFd wraps calls to port_associate(3c) on file descriptors. +func (e *EventPort) AssociateFd(fd uintptr, events int, cookie interface{}) error { + e.mu.Lock() + defer e.mu.Unlock() + if _, found := e.fds[fd]; found { + return fmt.Errorf("%v is already associated with this Event Port", fd) + } + pcookie := &cookie + _, err := port_associate(e.port, PORT_SOURCE_FD, fd, events, (*byte)(unsafe.Pointer(pcookie))) + if err != nil { + return err + } + e.fds[fd] = pcookie + return nil +} + +// DissociateFd wraps calls to port_dissociate(3c) on file descriptors. +func (e *EventPort) DissociateFd(fd uintptr) error { + e.mu.Lock() + defer e.mu.Unlock() + _, ok := e.fds[fd] + if !ok { + return fmt.Errorf("%v is not associated with this Event Port", fd) + } + _, err := port_dissociate(e.port, PORT_SOURCE_FD, fd) + if err != nil { + return err + } + delete(e.fds, fd) + return nil +} + +func createFileObj(name string, stat os.FileInfo) (*fileObj, error) { + fobj := new(fileObj) + bs, err := ByteSliceFromString(name) + if err != nil { + return nil, err + } + fobj.Name = (*int8)(unsafe.Pointer(&bs[0])) + s := stat.Sys().(*syscall.Stat_t) + fobj.Atim.Sec = s.Atim.Sec + fobj.Atim.Nsec = s.Atim.Nsec + fobj.Mtim.Sec = s.Mtim.Sec + fobj.Mtim.Nsec = s.Mtim.Nsec + fobj.Ctim.Sec = s.Ctim.Sec + fobj.Ctim.Nsec = s.Ctim.Nsec + return fobj, nil +} + +// GetOne wraps port_get(3c) and returns a single PortEvent. +func (e *EventPort) GetOne(t *Timespec) (*PortEvent, error) { + pe := new(portEvent) + _, err := port_get(e.port, pe, t) + if err != nil { + return nil, err + } + p := new(PortEvent) + p.Events = pe.Events + p.Source = pe.Source + e.mu.Lock() + defer e.mu.Unlock() + switch pe.Source { + case PORT_SOURCE_FD: + p.Fd = uintptr(pe.Object) + cookie := (*interface{})(unsafe.Pointer(pe.User)) + p.Cookie = *cookie + delete(e.fds, p.Fd) + case PORT_SOURCE_FILE: + p.fobj = (*fileObj)(unsafe.Pointer(uintptr(pe.Object))) + p.Path = BytePtrToString((*byte)(unsafe.Pointer(p.fobj.Name))) + cookie := (*interface{})(unsafe.Pointer(pe.User)) + p.Cookie = *cookie + delete(e.paths, p.Path) + } + return p, nil +} + +// Pending wraps port_getn(3c) and returns how many events are pending. +func (e *EventPort) Pending() (int, error) { + var n uint32 = 0 + _, err := port_getn(e.port, nil, 0, &n, nil) + return int(n), err +} + +// Get wraps port_getn(3c) and fills a slice of PortEvent. +// It will block until either min events have been received +// or the timeout has been exceeded. It will return how many +// events were actually received along with any error information. +func (e *EventPort) Get(s []PortEvent, min int, timeout *Timespec) (int, error) { + if min == 0 { + return 0, fmt.Errorf("need to request at least one event or use Pending() instead") + } + if len(s) < min { + return 0, fmt.Errorf("len(s) (%d) is less than min events requested (%d)", len(s), min) + } + got := uint32(min) + max := uint32(len(s)) + var err error + ps := make([]portEvent, max, max) + _, err = port_getn(e.port, &ps[0], max, &got, timeout) + // got will be trustworthy with ETIME, but not any other error. + if err != nil && err != ETIME { + return 0, err + } + e.mu.Lock() + defer e.mu.Unlock() + for i := 0; i < int(got); i++ { + s[i].Events = ps[i].Events + s[i].Source = ps[i].Source + switch ps[i].Source { + case PORT_SOURCE_FD: + s[i].Fd = uintptr(ps[i].Object) + cookie := (*interface{})(unsafe.Pointer(ps[i].User)) + s[i].Cookie = *cookie + delete(e.fds, s[i].Fd) + case PORT_SOURCE_FILE: + s[i].fobj = (*fileObj)(unsafe.Pointer(uintptr(ps[i].Object))) + s[i].Path = BytePtrToString((*byte)(unsafe.Pointer(s[i].fobj.Name))) + cookie := (*interface{})(unsafe.Pointer(ps[i].User)) + s[i].Cookie = *cookie + delete(e.paths, s[i].Path) + } + } + return int(got), err +} diff --git a/vendor/golang.org/x/sys/unix/syscall_unix.go b/vendor/golang.org/x/sys/unix/syscall_unix.go index a7618ce..cf296a2 100644 --- a/vendor/golang.org/x/sys/unix/syscall_unix.go +++ b/vendor/golang.org/x/sys/unix/syscall_unix.go @@ -313,6 +313,10 @@ func Recvfrom(fd int, p []byte, flags int) (n int, from Sockaddr, err error) { return } +func Send(s int, buf []byte, flags int) (err error) { + return sendto(s, buf, flags, nil, 0) +} + func Sendto(fd int, p []byte, flags int, to Sockaddr) (err error) { ptr, n, err := to.sockaddr() if err != nil { diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux.go b/vendor/golang.org/x/sys/unix/zerrors_linux.go index 52f5bbc..5ed10c4 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux.go @@ -228,6 +228,8 @@ const ( BPF_OR = 0x40 BPF_PSEUDO_BTF_ID = 0x3 BPF_PSEUDO_CALL = 0x1 + BPF_PSEUDO_FUNC = 0x4 + BPF_PSEUDO_KFUNC_CALL = 0x2 BPF_PSEUDO_MAP_FD = 0x1 BPF_PSEUDO_MAP_VALUE = 0x2 BPF_RET = 0x6 @@ -475,6 +477,8 @@ const ( DM_LIST_VERSIONS = 0xc138fd0d DM_MAX_TYPE_NAME = 0x10 DM_NAME_LEN = 0x80 + DM_NAME_LIST_FLAG_DOESNT_HAVE_UUID = 0x2 + DM_NAME_LIST_FLAG_HAS_UUID = 0x1 DM_NOFLUSH_FLAG = 0x800 DM_PERSISTENT_DEV_FLAG = 0x8 DM_QUERY_INACTIVE_TABLE_FLAG = 0x1000 @@ -494,9 +498,9 @@ const ( DM_UUID_FLAG = 0x4000 DM_UUID_LEN = 0x81 DM_VERSION = 0xc138fd00 - DM_VERSION_EXTRA = "-ioctl (2021-02-01)" + DM_VERSION_EXTRA = "-ioctl (2021-03-22)" DM_VERSION_MAJOR = 0x4 - DM_VERSION_MINOR = 0x2c + DM_VERSION_MINOR = 0x2d DM_VERSION_PATCHLEVEL = 0x0 DT_BLK = 0x6 DT_CHR = 0x2 @@ -981,12 +985,6 @@ const ( HPFS_SUPER_MAGIC = 0xf995e849 HUGETLBFS_MAGIC = 0x958458f6 IBSHIFT = 0x10 - ICMPV6_FILTER = 0x1 - ICMPV6_FILTER_BLOCK = 0x1 - ICMPV6_FILTER_BLOCKOTHERS = 0x3 - ICMPV6_FILTER_PASS = 0x2 - ICMPV6_FILTER_PASSONLY = 0x4 - ICMP_FILTER = 0x1 ICRNL = 0x100 IFA_F_DADFAILED = 0x8 IFA_F_DEPRECATED = 0x20 @@ -1257,6 +1255,7 @@ const ( KEXEC_ARCH_PARISC = 0xf0000 KEXEC_ARCH_PPC = 0x140000 KEXEC_ARCH_PPC64 = 0x150000 + KEXEC_ARCH_RISCV = 0xf30000 KEXEC_ARCH_S390 = 0x160000 KEXEC_ARCH_SH = 0x2a0000 KEXEC_ARCH_X86_64 = 0x3e0000 @@ -1756,14 +1755,19 @@ const ( PERF_ATTR_SIZE_VER4 = 0x68 PERF_ATTR_SIZE_VER5 = 0x70 PERF_ATTR_SIZE_VER6 = 0x78 + PERF_ATTR_SIZE_VER7 = 0x80 PERF_AUX_FLAG_COLLISION = 0x8 + PERF_AUX_FLAG_CORESIGHT_FORMAT_CORESIGHT = 0x0 + PERF_AUX_FLAG_CORESIGHT_FORMAT_RAW = 0x100 PERF_AUX_FLAG_OVERWRITE = 0x2 PERF_AUX_FLAG_PARTIAL = 0x4 + PERF_AUX_FLAG_PMU_FORMAT_TYPE_MASK = 0xff00 PERF_AUX_FLAG_TRUNCATED = 0x1 PERF_FLAG_FD_CLOEXEC = 0x8 PERF_FLAG_FD_NO_GROUP = 0x1 PERF_FLAG_FD_OUTPUT = 0x2 PERF_FLAG_PID_CGROUP = 0x4 + PERF_HW_EVENT_MASK = 0xffffffff PERF_MAX_CONTEXTS_PER_STACK = 0x8 PERF_MAX_STACK_DEPTH = 0x7f PERF_MEM_BLK_ADDR = 0x4 @@ -1822,6 +1826,7 @@ const ( PERF_MEM_TLB_OS = 0x40 PERF_MEM_TLB_SHIFT = 0x1a PERF_MEM_TLB_WK = 0x20 + PERF_PMU_TYPE_SHIFT = 0x20 PERF_RECORD_KSYMBOL_FLAGS_UNREGISTER = 0x1 PERF_RECORD_MISC_COMM_EXEC = 0x2000 PERF_RECORD_MISC_CPUMODE_MASK = 0x7 @@ -1921,7 +1926,9 @@ const ( PR_PAC_APGAKEY = 0x10 PR_PAC_APIAKEY = 0x1 PR_PAC_APIBKEY = 0x2 + PR_PAC_GET_ENABLED_KEYS = 0x3d PR_PAC_RESET_KEYS = 0x36 + PR_PAC_SET_ENABLED_KEYS = 0x3c PR_SET_CHILD_SUBREAPER = 0x24 PR_SET_DUMPABLE = 0x4 PR_SET_ENDIAN = 0x14 @@ -2003,6 +2010,7 @@ const ( PTRACE_GETREGSET = 0x4204 PTRACE_GETSIGINFO = 0x4202 PTRACE_GETSIGMASK = 0x420a + PTRACE_GET_RSEQ_CONFIGURATION = 0x420f PTRACE_GET_SYSCALL_INFO = 0x420e PTRACE_INTERRUPT = 0x4207 PTRACE_KILL = 0x8 @@ -2163,6 +2171,7 @@ const ( RTM_DELNEIGH = 0x1d RTM_DELNETCONF = 0x51 RTM_DELNEXTHOP = 0x69 + RTM_DELNEXTHOPBUCKET = 0x75 RTM_DELNSID = 0x59 RTM_DELQDISC = 0x25 RTM_DELROUTE = 0x19 @@ -2193,6 +2202,7 @@ const ( RTM_GETNEIGHTBL = 0x42 RTM_GETNETCONF = 0x52 RTM_GETNEXTHOP = 0x6a + RTM_GETNEXTHOPBUCKET = 0x76 RTM_GETNSID = 0x5a RTM_GETQDISC = 0x26 RTM_GETROUTE = 0x1a @@ -2201,7 +2211,7 @@ const ( RTM_GETTCLASS = 0x2a RTM_GETTFILTER = 0x2e RTM_GETVLAN = 0x72 - RTM_MAX = 0x73 + RTM_MAX = 0x77 RTM_NEWACTION = 0x30 RTM_NEWADDR = 0x14 RTM_NEWADDRLABEL = 0x48 @@ -2215,6 +2225,7 @@ const ( RTM_NEWNEIGHTBL = 0x40 RTM_NEWNETCONF = 0x50 RTM_NEWNEXTHOP = 0x68 + RTM_NEWNEXTHOPBUCKET = 0x74 RTM_NEWNSID = 0x58 RTM_NEWNVLAN = 0x70 RTM_NEWPREFIX = 0x34 @@ -2224,8 +2235,8 @@ const ( RTM_NEWSTATS = 0x5c RTM_NEWTCLASS = 0x28 RTM_NEWTFILTER = 0x2c - RTM_NR_FAMILIES = 0x19 - RTM_NR_MSGTYPES = 0x64 + RTM_NR_FAMILIES = 0x1a + RTM_NR_MSGTYPES = 0x68 RTM_SETDCB = 0x4f RTM_SETLINK = 0x13 RTM_SETNEIGHTBL = 0x43 @@ -2253,6 +2264,7 @@ const ( RTPROT_MROUTED = 0x11 RTPROT_MRT = 0xa RTPROT_NTK = 0xf + RTPROT_OPENR = 0x63 RTPROT_OSPF = 0xbc RTPROT_RA = 0x9 RTPROT_REDIRECT = 0x1 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_386.go b/vendor/golang.org/x/sys/unix/zerrors_linux_386.go index 09fc559..cca248d 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_386.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_386.go @@ -147,6 +147,7 @@ const ( NS_GET_USERNS = 0xb701 OLCUC = 0x2 ONLCR = 0x4 + OTPERASE = 0x400c4d19 OTPGETREGIONCOUNT = 0x40044d0e OTPGETREGIONINFO = 0x400c4d0f OTPLOCK = 0x800c4d10 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go index 75730cc..9521a48 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go @@ -147,6 +147,7 @@ const ( NS_GET_USERNS = 0xb701 OLCUC = 0x2 ONLCR = 0x4 + OTPERASE = 0x400c4d19 OTPGETREGIONCOUNT = 0x40044d0e OTPGETREGIONINFO = 0x400c4d0f OTPLOCK = 0x800c4d10 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go b/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go index 127cf17..ddb40a4 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go @@ -145,6 +145,7 @@ const ( NS_GET_USERNS = 0xb701 OLCUC = 0x2 ONLCR = 0x4 + OTPERASE = 0x400c4d19 OTPGETREGIONCOUNT = 0x40044d0e OTPGETREGIONINFO = 0x400c4d0f OTPLOCK = 0x800c4d10 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go index 957ca1f..3df31e0 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go @@ -148,6 +148,7 @@ const ( NS_GET_USERNS = 0xb701 OLCUC = 0x2 ONLCR = 0x4 + OTPERASE = 0x400c4d19 OTPGETREGIONCOUNT = 0x40044d0e OTPGETREGIONINFO = 0x400c4d0f OTPLOCK = 0x800c4d10 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go index 314a205..179c7d6 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go @@ -145,6 +145,7 @@ const ( NS_GET_USERNS = 0x2000b701 OLCUC = 0x2 ONLCR = 0x4 + OTPERASE = 0x800c4d19 OTPGETREGIONCOUNT = 0x80044d0e OTPGETREGIONINFO = 0x800c4d0f OTPLOCK = 0x400c4d10 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go index 457e8de..84ab15a 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go @@ -145,6 +145,7 @@ const ( NS_GET_USERNS = 0x2000b701 OLCUC = 0x2 ONLCR = 0x4 + OTPERASE = 0x800c4d19 OTPGETREGIONCOUNT = 0x80044d0e OTPGETREGIONINFO = 0x800c4d0f OTPLOCK = 0x400c4d10 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go index 33cd28f..6aa064d 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go @@ -145,6 +145,7 @@ const ( NS_GET_USERNS = 0x2000b701 OLCUC = 0x2 ONLCR = 0x4 + OTPERASE = 0x800c4d19 OTPGETREGIONCOUNT = 0x80044d0e OTPGETREGIONINFO = 0x800c4d0f OTPLOCK = 0x400c4d10 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go index 0e085ba..960650f 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go @@ -145,6 +145,7 @@ const ( NS_GET_USERNS = 0x2000b701 OLCUC = 0x2 ONLCR = 0x4 + OTPERASE = 0x800c4d19 OTPGETREGIONCOUNT = 0x80044d0e OTPGETREGIONINFO = 0x800c4d0f OTPLOCK = 0x400c4d10 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go index 1b5928c..7365221 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go @@ -147,6 +147,7 @@ const ( NS_GET_USERNS = 0x2000b701 OLCUC = 0x4 ONLCR = 0x2 + OTPERASE = 0x800c4d19 OTPGETREGIONCOUNT = 0x80044d0e OTPGETREGIONINFO = 0x800c4d0f OTPLOCK = 0x400c4d10 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go index f3a41d6..5967db3 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go @@ -147,6 +147,7 @@ const ( NS_GET_USERNS = 0x2000b701 OLCUC = 0x4 ONLCR = 0x2 + OTPERASE = 0x800c4d19 OTPGETREGIONCOUNT = 0x80044d0e OTPGETREGIONINFO = 0x800c4d0f OTPLOCK = 0x400c4d10 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go index 6a5a555..f888698 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go @@ -147,6 +147,7 @@ const ( NS_GET_USERNS = 0x2000b701 OLCUC = 0x4 ONLCR = 0x2 + OTPERASE = 0x800c4d19 OTPGETREGIONCOUNT = 0x80044d0e OTPGETREGIONINFO = 0x800c4d0f OTPLOCK = 0x400c4d10 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go index a4da67e..8048706 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go @@ -145,6 +145,7 @@ const ( NS_GET_USERNS = 0xb701 OLCUC = 0x2 ONLCR = 0x4 + OTPERASE = 0x400c4d19 OTPGETREGIONCOUNT = 0x40044d0e OTPGETREGIONINFO = 0x400c4d0f OTPLOCK = 0x800c4d10 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go b/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go index a7028e0..fb78594 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go @@ -145,6 +145,7 @@ const ( NS_GET_USERNS = 0xb701 OLCUC = 0x2 ONLCR = 0x4 + OTPERASE = 0x400c4d19 OTPGETREGIONCOUNT = 0x40044d0e OTPGETREGIONINFO = 0x400c4d0f OTPLOCK = 0x800c4d10 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go index ed3b328..81e18d2 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go @@ -150,6 +150,7 @@ const ( NS_GET_USERNS = 0x2000b701 OLCUC = 0x2 ONLCR = 0x4 + OTPERASE = 0x800c4d19 OTPGETREGIONCOUNT = 0x80044d0e OTPGETREGIONINFO = 0x800c4d0f OTPLOCK = 0x400c4d10 diff --git a/vendor/golang.org/x/sys/unix/zerrors_openbsd_386.go b/vendor/golang.org/x/sys/unix/zerrors_openbsd_386.go index 593cc0f..6d56edc 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_openbsd_386.go +++ b/vendor/golang.org/x/sys/unix/zerrors_openbsd_386.go @@ -1020,7 +1020,10 @@ const ( RLIMIT_CPU = 0x0 RLIMIT_DATA = 0x2 RLIMIT_FSIZE = 0x1 + RLIMIT_MEMLOCK = 0x6 RLIMIT_NOFILE = 0x8 + RLIMIT_NPROC = 0x7 + RLIMIT_RSS = 0x5 RLIMIT_STACK = 0x3 RLIM_INFINITY = 0x7fffffffffffffff RTAX_AUTHOR = 0x6 diff --git a/vendor/golang.org/x/sys/unix/zerrors_openbsd_arm.go b/vendor/golang.org/x/sys/unix/zerrors_openbsd_arm.go index a4e4c22..aef6c08 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_openbsd_arm.go +++ b/vendor/golang.org/x/sys/unix/zerrors_openbsd_arm.go @@ -1020,7 +1020,10 @@ const ( RLIMIT_CPU = 0x0 RLIMIT_DATA = 0x2 RLIMIT_FSIZE = 0x1 + RLIMIT_MEMLOCK = 0x6 RLIMIT_NOFILE = 0x8 + RLIMIT_NPROC = 0x7 + RLIMIT_RSS = 0x5 RLIMIT_STACK = 0x3 RLIM_INFINITY = 0x7fffffffffffffff RTAX_AUTHOR = 0x6 diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux.go b/vendor/golang.org/x/sys/unix/zsyscall_linux.go index 7305cc9..2dbe3da 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux.go @@ -48,6 +48,16 @@ func ioctl(fd int, req uint, arg uintptr) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func ioctlPtr(fd int, req uint, arg unsafe.Pointer) (err error) { + _, _, e1 := Syscall(SYS_IOCTL, uintptr(fd), uintptr(req), uintptr(arg)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Linkat(olddirfd int, oldpath string, newdirfd int, newpath string, flags int) (err error) { var _p0 *byte _p0, err = BytePtrFromString(oldpath) @@ -1201,7 +1211,7 @@ func PivotRoot(newroot string, putold string) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func prlimit(pid int, resource int, newlimit *Rlimit, old *Rlimit) (err error) { +func Prlimit(pid int, resource int, newlimit *Rlimit, old *Rlimit) (err error) { _, _, e1 := RawSyscall6(SYS_PRLIMIT64, uintptr(pid), uintptr(resource), uintptr(unsafe.Pointer(newlimit)), uintptr(unsafe.Pointer(old)), 0, 0) if e1 != 0 { err = errnoErr(e1) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_solaris_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_solaris_amd64.go index 4e18d5c..b5f926c 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_solaris_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_solaris_amd64.go @@ -141,6 +141,11 @@ import ( //go:cgo_import_dynamic libc_getpeername getpeername "libsocket.so" //go:cgo_import_dynamic libc_setsockopt setsockopt "libsocket.so" //go:cgo_import_dynamic libc_recvfrom recvfrom "libsocket.so" +//go:cgo_import_dynamic libc_port_create port_create "libc.so" +//go:cgo_import_dynamic libc_port_associate port_associate "libc.so" +//go:cgo_import_dynamic libc_port_dissociate port_dissociate "libc.so" +//go:cgo_import_dynamic libc_port_get port_get "libc.so" +//go:cgo_import_dynamic libc_port_getn port_getn "libc.so" //go:linkname procpipe libc_pipe //go:linkname procpipe2 libc_pipe2 @@ -272,6 +277,11 @@ import ( //go:linkname procgetpeername libc_getpeername //go:linkname procsetsockopt libc_setsockopt //go:linkname procrecvfrom libc_recvfrom +//go:linkname procport_create libc_port_create +//go:linkname procport_associate libc_port_associate +//go:linkname procport_dissociate libc_port_dissociate +//go:linkname procport_get libc_port_get +//go:linkname procport_getn libc_port_getn var ( procpipe, @@ -403,7 +413,12 @@ var ( proc__xnet_getsockopt, procgetpeername, procsetsockopt, - procrecvfrom syscallFunc + procrecvfrom, + procport_create, + procport_associate, + procport_dissociate, + procport_get, + procport_getn syscallFunc ) // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -1981,3 +1996,58 @@ func recvfrom(fd int, p []byte, flags int, from *RawSockaddrAny, fromlen *_Sockl } return } + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func port_create() (n int, err error) { + r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procport_create)), 0, 0, 0, 0, 0, 0, 0) + n = int(r0) + if e1 != 0 { + err = e1 + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func port_associate(port int, source int, object uintptr, events int, user *byte) (n int, err error) { + r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procport_associate)), 5, uintptr(port), uintptr(source), uintptr(object), uintptr(events), uintptr(unsafe.Pointer(user)), 0) + n = int(r0) + if e1 != 0 { + err = e1 + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func port_dissociate(port int, source int, object uintptr) (n int, err error) { + r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procport_dissociate)), 3, uintptr(port), uintptr(source), uintptr(object), 0, 0, 0) + n = int(r0) + if e1 != 0 { + err = e1 + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func port_get(port int, pe *portEvent, timeout *Timespec) (n int, err error) { + r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procport_get)), 3, uintptr(port), uintptr(unsafe.Pointer(pe)), uintptr(unsafe.Pointer(timeout)), 0, 0, 0) + n = int(r0) + if e1 != 0 { + err = e1 + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func port_getn(port int, pe *portEvent, max uint32, nget *uint32, timeout *Timespec) (n int, err error) { + r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procport_getn)), 5, uintptr(port), uintptr(unsafe.Pointer(pe)), uintptr(max), uintptr(unsafe.Pointer(nget)), uintptr(unsafe.Pointer(timeout)), 0) + n = int(r0) + if e1 != 0 { + err = e1 + } + return +} diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_386.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_386.go index fbc59b7..eb3afe6 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_386.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_386.go @@ -439,4 +439,7 @@ const ( SYS_PROCESS_MADVISE = 440 SYS_EPOLL_PWAIT2 = 441 SYS_MOUNT_SETATTR = 442 + SYS_LANDLOCK_CREATE_RULESET = 444 + SYS_LANDLOCK_ADD_RULE = 445 + SYS_LANDLOCK_RESTRICT_SELF = 446 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go index 04d16d7..8e7e3ae 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go @@ -7,358 +7,361 @@ package unix const ( - SYS_READ = 0 - SYS_WRITE = 1 - SYS_OPEN = 2 - SYS_CLOSE = 3 - SYS_STAT = 4 - SYS_FSTAT = 5 - SYS_LSTAT = 6 - SYS_POLL = 7 - SYS_LSEEK = 8 - SYS_MMAP = 9 - SYS_MPROTECT = 10 - SYS_MUNMAP = 11 - SYS_BRK = 12 - SYS_RT_SIGACTION = 13 - SYS_RT_SIGPROCMASK = 14 - SYS_RT_SIGRETURN = 15 - SYS_IOCTL = 16 - SYS_PREAD64 = 17 - SYS_PWRITE64 = 18 - SYS_READV = 19 - SYS_WRITEV = 20 - SYS_ACCESS = 21 - SYS_PIPE = 22 - SYS_SELECT = 23 - SYS_SCHED_YIELD = 24 - SYS_MREMAP = 25 - SYS_MSYNC = 26 - SYS_MINCORE = 27 - SYS_MADVISE = 28 - SYS_SHMGET = 29 - SYS_SHMAT = 30 - SYS_SHMCTL = 31 - SYS_DUP = 32 - SYS_DUP2 = 33 - SYS_PAUSE = 34 - SYS_NANOSLEEP = 35 - SYS_GETITIMER = 36 - SYS_ALARM = 37 - SYS_SETITIMER = 38 - SYS_GETPID = 39 - SYS_SENDFILE = 40 - SYS_SOCKET = 41 - SYS_CONNECT = 42 - SYS_ACCEPT = 43 - SYS_SENDTO = 44 - SYS_RECVFROM = 45 - SYS_SENDMSG = 46 - SYS_RECVMSG = 47 - SYS_SHUTDOWN = 48 - SYS_BIND = 49 - SYS_LISTEN = 50 - SYS_GETSOCKNAME = 51 - SYS_GETPEERNAME = 52 - SYS_SOCKETPAIR = 53 - SYS_SETSOCKOPT = 54 - SYS_GETSOCKOPT = 55 - SYS_CLONE = 56 - SYS_FORK = 57 - SYS_VFORK = 58 - SYS_EXECVE = 59 - SYS_EXIT = 60 - SYS_WAIT4 = 61 - SYS_KILL = 62 - SYS_UNAME = 63 - SYS_SEMGET = 64 - SYS_SEMOP = 65 - SYS_SEMCTL = 66 - SYS_SHMDT = 67 - SYS_MSGGET = 68 - SYS_MSGSND = 69 - SYS_MSGRCV = 70 - SYS_MSGCTL = 71 - SYS_FCNTL = 72 - SYS_FLOCK = 73 - SYS_FSYNC = 74 - SYS_FDATASYNC = 75 - SYS_TRUNCATE = 76 - SYS_FTRUNCATE = 77 - SYS_GETDENTS = 78 - SYS_GETCWD = 79 - SYS_CHDIR = 80 - SYS_FCHDIR = 81 - SYS_RENAME = 82 - SYS_MKDIR = 83 - SYS_RMDIR = 84 - SYS_CREAT = 85 - SYS_LINK = 86 - SYS_UNLINK = 87 - SYS_SYMLINK = 88 - SYS_READLINK = 89 - SYS_CHMOD = 90 - SYS_FCHMOD = 91 - SYS_CHOWN = 92 - SYS_FCHOWN = 93 - SYS_LCHOWN = 94 - SYS_UMASK = 95 - SYS_GETTIMEOFDAY = 96 - SYS_GETRLIMIT = 97 - SYS_GETRUSAGE = 98 - SYS_SYSINFO = 99 - SYS_TIMES = 100 - SYS_PTRACE = 101 - SYS_GETUID = 102 - SYS_SYSLOG = 103 - SYS_GETGID = 104 - SYS_SETUID = 105 - SYS_SETGID = 106 - SYS_GETEUID = 107 - SYS_GETEGID = 108 - SYS_SETPGID = 109 - SYS_GETPPID = 110 - SYS_GETPGRP = 111 - SYS_SETSID = 112 - SYS_SETREUID = 113 - SYS_SETREGID = 114 - SYS_GETGROUPS = 115 - SYS_SETGROUPS = 116 - SYS_SETRESUID = 117 - SYS_GETRESUID = 118 - SYS_SETRESGID = 119 - SYS_GETRESGID = 120 - SYS_GETPGID = 121 - SYS_SETFSUID = 122 - SYS_SETFSGID = 123 - SYS_GETSID = 124 - SYS_CAPGET = 125 - SYS_CAPSET = 126 - SYS_RT_SIGPENDING = 127 - SYS_RT_SIGTIMEDWAIT = 128 - SYS_RT_SIGQUEUEINFO = 129 - SYS_RT_SIGSUSPEND = 130 - SYS_SIGALTSTACK = 131 - SYS_UTIME = 132 - SYS_MKNOD = 133 - SYS_USELIB = 134 - SYS_PERSONALITY = 135 - SYS_USTAT = 136 - SYS_STATFS = 137 - SYS_FSTATFS = 138 - SYS_SYSFS = 139 - SYS_GETPRIORITY = 140 - SYS_SETPRIORITY = 141 - SYS_SCHED_SETPARAM = 142 - SYS_SCHED_GETPARAM = 143 - SYS_SCHED_SETSCHEDULER = 144 - SYS_SCHED_GETSCHEDULER = 145 - SYS_SCHED_GET_PRIORITY_MAX = 146 - SYS_SCHED_GET_PRIORITY_MIN = 147 - SYS_SCHED_RR_GET_INTERVAL = 148 - SYS_MLOCK = 149 - SYS_MUNLOCK = 150 - SYS_MLOCKALL = 151 - SYS_MUNLOCKALL = 152 - SYS_VHANGUP = 153 - SYS_MODIFY_LDT = 154 - SYS_PIVOT_ROOT = 155 - SYS__SYSCTL = 156 - SYS_PRCTL = 157 - SYS_ARCH_PRCTL = 158 - SYS_ADJTIMEX = 159 - SYS_SETRLIMIT = 160 - SYS_CHROOT = 161 - SYS_SYNC = 162 - SYS_ACCT = 163 - SYS_SETTIMEOFDAY = 164 - SYS_MOUNT = 165 - SYS_UMOUNT2 = 166 - SYS_SWAPON = 167 - SYS_SWAPOFF = 168 - SYS_REBOOT = 169 - SYS_SETHOSTNAME = 170 - SYS_SETDOMAINNAME = 171 - SYS_IOPL = 172 - SYS_IOPERM = 173 - SYS_CREATE_MODULE = 174 - SYS_INIT_MODULE = 175 - SYS_DELETE_MODULE = 176 - SYS_GET_KERNEL_SYMS = 177 - SYS_QUERY_MODULE = 178 - SYS_QUOTACTL = 179 - SYS_NFSSERVCTL = 180 - SYS_GETPMSG = 181 - SYS_PUTPMSG = 182 - SYS_AFS_SYSCALL = 183 - SYS_TUXCALL = 184 - SYS_SECURITY = 185 - SYS_GETTID = 186 - SYS_READAHEAD = 187 - SYS_SETXATTR = 188 - SYS_LSETXATTR = 189 - SYS_FSETXATTR = 190 - SYS_GETXATTR = 191 - SYS_LGETXATTR = 192 - SYS_FGETXATTR = 193 - SYS_LISTXATTR = 194 - SYS_LLISTXATTR = 195 - SYS_FLISTXATTR = 196 - SYS_REMOVEXATTR = 197 - SYS_LREMOVEXATTR = 198 - SYS_FREMOVEXATTR = 199 - SYS_TKILL = 200 - SYS_TIME = 201 - SYS_FUTEX = 202 - SYS_SCHED_SETAFFINITY = 203 - SYS_SCHED_GETAFFINITY = 204 - SYS_SET_THREAD_AREA = 205 - SYS_IO_SETUP = 206 - SYS_IO_DESTROY = 207 - SYS_IO_GETEVENTS = 208 - SYS_IO_SUBMIT = 209 - SYS_IO_CANCEL = 210 - SYS_GET_THREAD_AREA = 211 - SYS_LOOKUP_DCOOKIE = 212 - SYS_EPOLL_CREATE = 213 - SYS_EPOLL_CTL_OLD = 214 - SYS_EPOLL_WAIT_OLD = 215 - SYS_REMAP_FILE_PAGES = 216 - SYS_GETDENTS64 = 217 - SYS_SET_TID_ADDRESS = 218 - SYS_RESTART_SYSCALL = 219 - SYS_SEMTIMEDOP = 220 - SYS_FADVISE64 = 221 - SYS_TIMER_CREATE = 222 - SYS_TIMER_SETTIME = 223 - SYS_TIMER_GETTIME = 224 - SYS_TIMER_GETOVERRUN = 225 - SYS_TIMER_DELETE = 226 - SYS_CLOCK_SETTIME = 227 - SYS_CLOCK_GETTIME = 228 - SYS_CLOCK_GETRES = 229 - SYS_CLOCK_NANOSLEEP = 230 - SYS_EXIT_GROUP = 231 - SYS_EPOLL_WAIT = 232 - SYS_EPOLL_CTL = 233 - SYS_TGKILL = 234 - SYS_UTIMES = 235 - SYS_VSERVER = 236 - SYS_MBIND = 237 - SYS_SET_MEMPOLICY = 238 - SYS_GET_MEMPOLICY = 239 - SYS_MQ_OPEN = 240 - SYS_MQ_UNLINK = 241 - SYS_MQ_TIMEDSEND = 242 - SYS_MQ_TIMEDRECEIVE = 243 - SYS_MQ_NOTIFY = 244 - SYS_MQ_GETSETATTR = 245 - SYS_KEXEC_LOAD = 246 - SYS_WAITID = 247 - SYS_ADD_KEY = 248 - SYS_REQUEST_KEY = 249 - SYS_KEYCTL = 250 - SYS_IOPRIO_SET = 251 - SYS_IOPRIO_GET = 252 - SYS_INOTIFY_INIT = 253 - SYS_INOTIFY_ADD_WATCH = 254 - SYS_INOTIFY_RM_WATCH = 255 - SYS_MIGRATE_PAGES = 256 - SYS_OPENAT = 257 - SYS_MKDIRAT = 258 - SYS_MKNODAT = 259 - SYS_FCHOWNAT = 260 - SYS_FUTIMESAT = 261 - SYS_NEWFSTATAT = 262 - SYS_UNLINKAT = 263 - SYS_RENAMEAT = 264 - SYS_LINKAT = 265 - SYS_SYMLINKAT = 266 - SYS_READLINKAT = 267 - SYS_FCHMODAT = 268 - SYS_FACCESSAT = 269 - SYS_PSELECT6 = 270 - SYS_PPOLL = 271 - SYS_UNSHARE = 272 - SYS_SET_ROBUST_LIST = 273 - SYS_GET_ROBUST_LIST = 274 - SYS_SPLICE = 275 - SYS_TEE = 276 - SYS_SYNC_FILE_RANGE = 277 - SYS_VMSPLICE = 278 - SYS_MOVE_PAGES = 279 - SYS_UTIMENSAT = 280 - SYS_EPOLL_PWAIT = 281 - SYS_SIGNALFD = 282 - SYS_TIMERFD_CREATE = 283 - SYS_EVENTFD = 284 - SYS_FALLOCATE = 285 - SYS_TIMERFD_SETTIME = 286 - SYS_TIMERFD_GETTIME = 287 - SYS_ACCEPT4 = 288 - SYS_SIGNALFD4 = 289 - SYS_EVENTFD2 = 290 - SYS_EPOLL_CREATE1 = 291 - SYS_DUP3 = 292 - SYS_PIPE2 = 293 - SYS_INOTIFY_INIT1 = 294 - SYS_PREADV = 295 - SYS_PWRITEV = 296 - SYS_RT_TGSIGQUEUEINFO = 297 - SYS_PERF_EVENT_OPEN = 298 - SYS_RECVMMSG = 299 - SYS_FANOTIFY_INIT = 300 - SYS_FANOTIFY_MARK = 301 - SYS_PRLIMIT64 = 302 - SYS_NAME_TO_HANDLE_AT = 303 - SYS_OPEN_BY_HANDLE_AT = 304 - SYS_CLOCK_ADJTIME = 305 - SYS_SYNCFS = 306 - SYS_SENDMMSG = 307 - SYS_SETNS = 308 - SYS_GETCPU = 309 - SYS_PROCESS_VM_READV = 310 - SYS_PROCESS_VM_WRITEV = 311 - SYS_KCMP = 312 - SYS_FINIT_MODULE = 313 - SYS_SCHED_SETATTR = 314 - SYS_SCHED_GETATTR = 315 - SYS_RENAMEAT2 = 316 - SYS_SECCOMP = 317 - SYS_GETRANDOM = 318 - SYS_MEMFD_CREATE = 319 - SYS_KEXEC_FILE_LOAD = 320 - SYS_BPF = 321 - SYS_EXECVEAT = 322 - SYS_USERFAULTFD = 323 - SYS_MEMBARRIER = 324 - SYS_MLOCK2 = 325 - SYS_COPY_FILE_RANGE = 326 - SYS_PREADV2 = 327 - SYS_PWRITEV2 = 328 - SYS_PKEY_MPROTECT = 329 - SYS_PKEY_ALLOC = 330 - SYS_PKEY_FREE = 331 - SYS_STATX = 332 - SYS_IO_PGETEVENTS = 333 - SYS_RSEQ = 334 - SYS_PIDFD_SEND_SIGNAL = 424 - SYS_IO_URING_SETUP = 425 - SYS_IO_URING_ENTER = 426 - SYS_IO_URING_REGISTER = 427 - SYS_OPEN_TREE = 428 - SYS_MOVE_MOUNT = 429 - SYS_FSOPEN = 430 - SYS_FSCONFIG = 431 - SYS_FSMOUNT = 432 - SYS_FSPICK = 433 - SYS_PIDFD_OPEN = 434 - SYS_CLONE3 = 435 - SYS_CLOSE_RANGE = 436 - SYS_OPENAT2 = 437 - SYS_PIDFD_GETFD = 438 - SYS_FACCESSAT2 = 439 - SYS_PROCESS_MADVISE = 440 - SYS_EPOLL_PWAIT2 = 441 - SYS_MOUNT_SETATTR = 442 + SYS_READ = 0 + SYS_WRITE = 1 + SYS_OPEN = 2 + SYS_CLOSE = 3 + SYS_STAT = 4 + SYS_FSTAT = 5 + SYS_LSTAT = 6 + SYS_POLL = 7 + SYS_LSEEK = 8 + SYS_MMAP = 9 + SYS_MPROTECT = 10 + SYS_MUNMAP = 11 + SYS_BRK = 12 + SYS_RT_SIGACTION = 13 + SYS_RT_SIGPROCMASK = 14 + SYS_RT_SIGRETURN = 15 + SYS_IOCTL = 16 + SYS_PREAD64 = 17 + SYS_PWRITE64 = 18 + SYS_READV = 19 + SYS_WRITEV = 20 + SYS_ACCESS = 21 + SYS_PIPE = 22 + SYS_SELECT = 23 + SYS_SCHED_YIELD = 24 + SYS_MREMAP = 25 + SYS_MSYNC = 26 + SYS_MINCORE = 27 + SYS_MADVISE = 28 + SYS_SHMGET = 29 + SYS_SHMAT = 30 + SYS_SHMCTL = 31 + SYS_DUP = 32 + SYS_DUP2 = 33 + SYS_PAUSE = 34 + SYS_NANOSLEEP = 35 + SYS_GETITIMER = 36 + SYS_ALARM = 37 + SYS_SETITIMER = 38 + SYS_GETPID = 39 + SYS_SENDFILE = 40 + SYS_SOCKET = 41 + SYS_CONNECT = 42 + SYS_ACCEPT = 43 + SYS_SENDTO = 44 + SYS_RECVFROM = 45 + SYS_SENDMSG = 46 + SYS_RECVMSG = 47 + SYS_SHUTDOWN = 48 + SYS_BIND = 49 + SYS_LISTEN = 50 + SYS_GETSOCKNAME = 51 + SYS_GETPEERNAME = 52 + SYS_SOCKETPAIR = 53 + SYS_SETSOCKOPT = 54 + SYS_GETSOCKOPT = 55 + SYS_CLONE = 56 + SYS_FORK = 57 + SYS_VFORK = 58 + SYS_EXECVE = 59 + SYS_EXIT = 60 + SYS_WAIT4 = 61 + SYS_KILL = 62 + SYS_UNAME = 63 + SYS_SEMGET = 64 + SYS_SEMOP = 65 + SYS_SEMCTL = 66 + SYS_SHMDT = 67 + SYS_MSGGET = 68 + SYS_MSGSND = 69 + SYS_MSGRCV = 70 + SYS_MSGCTL = 71 + SYS_FCNTL = 72 + SYS_FLOCK = 73 + SYS_FSYNC = 74 + SYS_FDATASYNC = 75 + SYS_TRUNCATE = 76 + SYS_FTRUNCATE = 77 + SYS_GETDENTS = 78 + SYS_GETCWD = 79 + SYS_CHDIR = 80 + SYS_FCHDIR = 81 + SYS_RENAME = 82 + SYS_MKDIR = 83 + SYS_RMDIR = 84 + SYS_CREAT = 85 + SYS_LINK = 86 + SYS_UNLINK = 87 + SYS_SYMLINK = 88 + SYS_READLINK = 89 + SYS_CHMOD = 90 + SYS_FCHMOD = 91 + SYS_CHOWN = 92 + SYS_FCHOWN = 93 + SYS_LCHOWN = 94 + SYS_UMASK = 95 + SYS_GETTIMEOFDAY = 96 + SYS_GETRLIMIT = 97 + SYS_GETRUSAGE = 98 + SYS_SYSINFO = 99 + SYS_TIMES = 100 + SYS_PTRACE = 101 + SYS_GETUID = 102 + SYS_SYSLOG = 103 + SYS_GETGID = 104 + SYS_SETUID = 105 + SYS_SETGID = 106 + SYS_GETEUID = 107 + SYS_GETEGID = 108 + SYS_SETPGID = 109 + SYS_GETPPID = 110 + SYS_GETPGRP = 111 + SYS_SETSID = 112 + SYS_SETREUID = 113 + SYS_SETREGID = 114 + SYS_GETGROUPS = 115 + SYS_SETGROUPS = 116 + SYS_SETRESUID = 117 + SYS_GETRESUID = 118 + SYS_SETRESGID = 119 + SYS_GETRESGID = 120 + SYS_GETPGID = 121 + SYS_SETFSUID = 122 + SYS_SETFSGID = 123 + SYS_GETSID = 124 + SYS_CAPGET = 125 + SYS_CAPSET = 126 + SYS_RT_SIGPENDING = 127 + SYS_RT_SIGTIMEDWAIT = 128 + SYS_RT_SIGQUEUEINFO = 129 + SYS_RT_SIGSUSPEND = 130 + SYS_SIGALTSTACK = 131 + SYS_UTIME = 132 + SYS_MKNOD = 133 + SYS_USELIB = 134 + SYS_PERSONALITY = 135 + SYS_USTAT = 136 + SYS_STATFS = 137 + SYS_FSTATFS = 138 + SYS_SYSFS = 139 + SYS_GETPRIORITY = 140 + SYS_SETPRIORITY = 141 + SYS_SCHED_SETPARAM = 142 + SYS_SCHED_GETPARAM = 143 + SYS_SCHED_SETSCHEDULER = 144 + SYS_SCHED_GETSCHEDULER = 145 + SYS_SCHED_GET_PRIORITY_MAX = 146 + SYS_SCHED_GET_PRIORITY_MIN = 147 + SYS_SCHED_RR_GET_INTERVAL = 148 + SYS_MLOCK = 149 + SYS_MUNLOCK = 150 + SYS_MLOCKALL = 151 + SYS_MUNLOCKALL = 152 + SYS_VHANGUP = 153 + SYS_MODIFY_LDT = 154 + SYS_PIVOT_ROOT = 155 + SYS__SYSCTL = 156 + SYS_PRCTL = 157 + SYS_ARCH_PRCTL = 158 + SYS_ADJTIMEX = 159 + SYS_SETRLIMIT = 160 + SYS_CHROOT = 161 + SYS_SYNC = 162 + SYS_ACCT = 163 + SYS_SETTIMEOFDAY = 164 + SYS_MOUNT = 165 + SYS_UMOUNT2 = 166 + SYS_SWAPON = 167 + SYS_SWAPOFF = 168 + SYS_REBOOT = 169 + SYS_SETHOSTNAME = 170 + SYS_SETDOMAINNAME = 171 + SYS_IOPL = 172 + SYS_IOPERM = 173 + SYS_CREATE_MODULE = 174 + SYS_INIT_MODULE = 175 + SYS_DELETE_MODULE = 176 + SYS_GET_KERNEL_SYMS = 177 + SYS_QUERY_MODULE = 178 + SYS_QUOTACTL = 179 + SYS_NFSSERVCTL = 180 + SYS_GETPMSG = 181 + SYS_PUTPMSG = 182 + SYS_AFS_SYSCALL = 183 + SYS_TUXCALL = 184 + SYS_SECURITY = 185 + SYS_GETTID = 186 + SYS_READAHEAD = 187 + SYS_SETXATTR = 188 + SYS_LSETXATTR = 189 + SYS_FSETXATTR = 190 + SYS_GETXATTR = 191 + SYS_LGETXATTR = 192 + SYS_FGETXATTR = 193 + SYS_LISTXATTR = 194 + SYS_LLISTXATTR = 195 + SYS_FLISTXATTR = 196 + SYS_REMOVEXATTR = 197 + SYS_LREMOVEXATTR = 198 + SYS_FREMOVEXATTR = 199 + SYS_TKILL = 200 + SYS_TIME = 201 + SYS_FUTEX = 202 + SYS_SCHED_SETAFFINITY = 203 + SYS_SCHED_GETAFFINITY = 204 + SYS_SET_THREAD_AREA = 205 + SYS_IO_SETUP = 206 + SYS_IO_DESTROY = 207 + SYS_IO_GETEVENTS = 208 + SYS_IO_SUBMIT = 209 + SYS_IO_CANCEL = 210 + SYS_GET_THREAD_AREA = 211 + SYS_LOOKUP_DCOOKIE = 212 + SYS_EPOLL_CREATE = 213 + SYS_EPOLL_CTL_OLD = 214 + SYS_EPOLL_WAIT_OLD = 215 + SYS_REMAP_FILE_PAGES = 216 + SYS_GETDENTS64 = 217 + SYS_SET_TID_ADDRESS = 218 + SYS_RESTART_SYSCALL = 219 + SYS_SEMTIMEDOP = 220 + SYS_FADVISE64 = 221 + SYS_TIMER_CREATE = 222 + SYS_TIMER_SETTIME = 223 + SYS_TIMER_GETTIME = 224 + SYS_TIMER_GETOVERRUN = 225 + SYS_TIMER_DELETE = 226 + SYS_CLOCK_SETTIME = 227 + SYS_CLOCK_GETTIME = 228 + SYS_CLOCK_GETRES = 229 + SYS_CLOCK_NANOSLEEP = 230 + SYS_EXIT_GROUP = 231 + SYS_EPOLL_WAIT = 232 + SYS_EPOLL_CTL = 233 + SYS_TGKILL = 234 + SYS_UTIMES = 235 + SYS_VSERVER = 236 + SYS_MBIND = 237 + SYS_SET_MEMPOLICY = 238 + SYS_GET_MEMPOLICY = 239 + SYS_MQ_OPEN = 240 + SYS_MQ_UNLINK = 241 + SYS_MQ_TIMEDSEND = 242 + SYS_MQ_TIMEDRECEIVE = 243 + SYS_MQ_NOTIFY = 244 + SYS_MQ_GETSETATTR = 245 + SYS_KEXEC_LOAD = 246 + SYS_WAITID = 247 + SYS_ADD_KEY = 248 + SYS_REQUEST_KEY = 249 + SYS_KEYCTL = 250 + SYS_IOPRIO_SET = 251 + SYS_IOPRIO_GET = 252 + SYS_INOTIFY_INIT = 253 + SYS_INOTIFY_ADD_WATCH = 254 + SYS_INOTIFY_RM_WATCH = 255 + SYS_MIGRATE_PAGES = 256 + SYS_OPENAT = 257 + SYS_MKDIRAT = 258 + SYS_MKNODAT = 259 + SYS_FCHOWNAT = 260 + SYS_FUTIMESAT = 261 + SYS_NEWFSTATAT = 262 + SYS_UNLINKAT = 263 + SYS_RENAMEAT = 264 + SYS_LINKAT = 265 + SYS_SYMLINKAT = 266 + SYS_READLINKAT = 267 + SYS_FCHMODAT = 268 + SYS_FACCESSAT = 269 + SYS_PSELECT6 = 270 + SYS_PPOLL = 271 + SYS_UNSHARE = 272 + SYS_SET_ROBUST_LIST = 273 + SYS_GET_ROBUST_LIST = 274 + SYS_SPLICE = 275 + SYS_TEE = 276 + SYS_SYNC_FILE_RANGE = 277 + SYS_VMSPLICE = 278 + SYS_MOVE_PAGES = 279 + SYS_UTIMENSAT = 280 + SYS_EPOLL_PWAIT = 281 + SYS_SIGNALFD = 282 + SYS_TIMERFD_CREATE = 283 + SYS_EVENTFD = 284 + SYS_FALLOCATE = 285 + SYS_TIMERFD_SETTIME = 286 + SYS_TIMERFD_GETTIME = 287 + SYS_ACCEPT4 = 288 + SYS_SIGNALFD4 = 289 + SYS_EVENTFD2 = 290 + SYS_EPOLL_CREATE1 = 291 + SYS_DUP3 = 292 + SYS_PIPE2 = 293 + SYS_INOTIFY_INIT1 = 294 + SYS_PREADV = 295 + SYS_PWRITEV = 296 + SYS_RT_TGSIGQUEUEINFO = 297 + SYS_PERF_EVENT_OPEN = 298 + SYS_RECVMMSG = 299 + SYS_FANOTIFY_INIT = 300 + SYS_FANOTIFY_MARK = 301 + SYS_PRLIMIT64 = 302 + SYS_NAME_TO_HANDLE_AT = 303 + SYS_OPEN_BY_HANDLE_AT = 304 + SYS_CLOCK_ADJTIME = 305 + SYS_SYNCFS = 306 + SYS_SENDMMSG = 307 + SYS_SETNS = 308 + SYS_GETCPU = 309 + SYS_PROCESS_VM_READV = 310 + SYS_PROCESS_VM_WRITEV = 311 + SYS_KCMP = 312 + SYS_FINIT_MODULE = 313 + SYS_SCHED_SETATTR = 314 + SYS_SCHED_GETATTR = 315 + SYS_RENAMEAT2 = 316 + SYS_SECCOMP = 317 + SYS_GETRANDOM = 318 + SYS_MEMFD_CREATE = 319 + SYS_KEXEC_FILE_LOAD = 320 + SYS_BPF = 321 + SYS_EXECVEAT = 322 + SYS_USERFAULTFD = 323 + SYS_MEMBARRIER = 324 + SYS_MLOCK2 = 325 + SYS_COPY_FILE_RANGE = 326 + SYS_PREADV2 = 327 + SYS_PWRITEV2 = 328 + SYS_PKEY_MPROTECT = 329 + SYS_PKEY_ALLOC = 330 + SYS_PKEY_FREE = 331 + SYS_STATX = 332 + SYS_IO_PGETEVENTS = 333 + SYS_RSEQ = 334 + SYS_PIDFD_SEND_SIGNAL = 424 + SYS_IO_URING_SETUP = 425 + SYS_IO_URING_ENTER = 426 + SYS_IO_URING_REGISTER = 427 + SYS_OPEN_TREE = 428 + SYS_MOVE_MOUNT = 429 + SYS_FSOPEN = 430 + SYS_FSCONFIG = 431 + SYS_FSMOUNT = 432 + SYS_FSPICK = 433 + SYS_PIDFD_OPEN = 434 + SYS_CLONE3 = 435 + SYS_CLOSE_RANGE = 436 + SYS_OPENAT2 = 437 + SYS_PIDFD_GETFD = 438 + SYS_FACCESSAT2 = 439 + SYS_PROCESS_MADVISE = 440 + SYS_EPOLL_PWAIT2 = 441 + SYS_MOUNT_SETATTR = 442 + SYS_LANDLOCK_CREATE_RULESET = 444 + SYS_LANDLOCK_ADD_RULE = 445 + SYS_LANDLOCK_RESTRICT_SELF = 446 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go index 3b1c105..0e6ebfe 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go @@ -403,4 +403,7 @@ const ( SYS_PROCESS_MADVISE = 440 SYS_EPOLL_PWAIT2 = 441 SYS_MOUNT_SETATTR = 442 + SYS_LANDLOCK_CREATE_RULESET = 444 + SYS_LANDLOCK_ADD_RULE = 445 + SYS_LANDLOCK_RESTRICT_SELF = 446 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go index 3198adc..cd2a3ef 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go @@ -7,303 +7,306 @@ package unix const ( - SYS_IO_SETUP = 0 - SYS_IO_DESTROY = 1 - SYS_IO_SUBMIT = 2 - SYS_IO_CANCEL = 3 - SYS_IO_GETEVENTS = 4 - SYS_SETXATTR = 5 - SYS_LSETXATTR = 6 - SYS_FSETXATTR = 7 - SYS_GETXATTR = 8 - SYS_LGETXATTR = 9 - SYS_FGETXATTR = 10 - SYS_LISTXATTR = 11 - SYS_LLISTXATTR = 12 - SYS_FLISTXATTR = 13 - SYS_REMOVEXATTR = 14 - SYS_LREMOVEXATTR = 15 - SYS_FREMOVEXATTR = 16 - SYS_GETCWD = 17 - SYS_LOOKUP_DCOOKIE = 18 - SYS_EVENTFD2 = 19 - SYS_EPOLL_CREATE1 = 20 - SYS_EPOLL_CTL = 21 - SYS_EPOLL_PWAIT = 22 - SYS_DUP = 23 - SYS_DUP3 = 24 - SYS_FCNTL = 25 - SYS_INOTIFY_INIT1 = 26 - SYS_INOTIFY_ADD_WATCH = 27 - SYS_INOTIFY_RM_WATCH = 28 - SYS_IOCTL = 29 - SYS_IOPRIO_SET = 30 - SYS_IOPRIO_GET = 31 - SYS_FLOCK = 32 - SYS_MKNODAT = 33 - SYS_MKDIRAT = 34 - SYS_UNLINKAT = 35 - SYS_SYMLINKAT = 36 - SYS_LINKAT = 37 - SYS_RENAMEAT = 38 - SYS_UMOUNT2 = 39 - SYS_MOUNT = 40 - SYS_PIVOT_ROOT = 41 - SYS_NFSSERVCTL = 42 - SYS_STATFS = 43 - SYS_FSTATFS = 44 - SYS_TRUNCATE = 45 - SYS_FTRUNCATE = 46 - SYS_FALLOCATE = 47 - SYS_FACCESSAT = 48 - SYS_CHDIR = 49 - SYS_FCHDIR = 50 - SYS_CHROOT = 51 - SYS_FCHMOD = 52 - SYS_FCHMODAT = 53 - SYS_FCHOWNAT = 54 - SYS_FCHOWN = 55 - SYS_OPENAT = 56 - SYS_CLOSE = 57 - SYS_VHANGUP = 58 - SYS_PIPE2 = 59 - SYS_QUOTACTL = 60 - SYS_GETDENTS64 = 61 - SYS_LSEEK = 62 - SYS_READ = 63 - SYS_WRITE = 64 - SYS_READV = 65 - SYS_WRITEV = 66 - SYS_PREAD64 = 67 - SYS_PWRITE64 = 68 - SYS_PREADV = 69 - SYS_PWRITEV = 70 - SYS_SENDFILE = 71 - SYS_PSELECT6 = 72 - SYS_PPOLL = 73 - SYS_SIGNALFD4 = 74 - SYS_VMSPLICE = 75 - SYS_SPLICE = 76 - SYS_TEE = 77 - SYS_READLINKAT = 78 - SYS_FSTATAT = 79 - SYS_FSTAT = 80 - SYS_SYNC = 81 - SYS_FSYNC = 82 - SYS_FDATASYNC = 83 - SYS_SYNC_FILE_RANGE = 84 - SYS_TIMERFD_CREATE = 85 - SYS_TIMERFD_SETTIME = 86 - SYS_TIMERFD_GETTIME = 87 - SYS_UTIMENSAT = 88 - SYS_ACCT = 89 - SYS_CAPGET = 90 - SYS_CAPSET = 91 - SYS_PERSONALITY = 92 - SYS_EXIT = 93 - SYS_EXIT_GROUP = 94 - SYS_WAITID = 95 - SYS_SET_TID_ADDRESS = 96 - SYS_UNSHARE = 97 - SYS_FUTEX = 98 - SYS_SET_ROBUST_LIST = 99 - SYS_GET_ROBUST_LIST = 100 - SYS_NANOSLEEP = 101 - SYS_GETITIMER = 102 - SYS_SETITIMER = 103 - SYS_KEXEC_LOAD = 104 - SYS_INIT_MODULE = 105 - SYS_DELETE_MODULE = 106 - SYS_TIMER_CREATE = 107 - SYS_TIMER_GETTIME = 108 - SYS_TIMER_GETOVERRUN = 109 - SYS_TIMER_SETTIME = 110 - SYS_TIMER_DELETE = 111 - SYS_CLOCK_SETTIME = 112 - SYS_CLOCK_GETTIME = 113 - SYS_CLOCK_GETRES = 114 - SYS_CLOCK_NANOSLEEP = 115 - SYS_SYSLOG = 116 - SYS_PTRACE = 117 - SYS_SCHED_SETPARAM = 118 - SYS_SCHED_SETSCHEDULER = 119 - SYS_SCHED_GETSCHEDULER = 120 - SYS_SCHED_GETPARAM = 121 - SYS_SCHED_SETAFFINITY = 122 - SYS_SCHED_GETAFFINITY = 123 - SYS_SCHED_YIELD = 124 - SYS_SCHED_GET_PRIORITY_MAX = 125 - SYS_SCHED_GET_PRIORITY_MIN = 126 - SYS_SCHED_RR_GET_INTERVAL = 127 - SYS_RESTART_SYSCALL = 128 - SYS_KILL = 129 - SYS_TKILL = 130 - SYS_TGKILL = 131 - SYS_SIGALTSTACK = 132 - SYS_RT_SIGSUSPEND = 133 - SYS_RT_SIGACTION = 134 - SYS_RT_SIGPROCMASK = 135 - SYS_RT_SIGPENDING = 136 - SYS_RT_SIGTIMEDWAIT = 137 - SYS_RT_SIGQUEUEINFO = 138 - SYS_RT_SIGRETURN = 139 - SYS_SETPRIORITY = 140 - SYS_GETPRIORITY = 141 - SYS_REBOOT = 142 - SYS_SETREGID = 143 - SYS_SETGID = 144 - SYS_SETREUID = 145 - SYS_SETUID = 146 - SYS_SETRESUID = 147 - SYS_GETRESUID = 148 - SYS_SETRESGID = 149 - SYS_GETRESGID = 150 - SYS_SETFSUID = 151 - SYS_SETFSGID = 152 - SYS_TIMES = 153 - SYS_SETPGID = 154 - SYS_GETPGID = 155 - SYS_GETSID = 156 - SYS_SETSID = 157 - SYS_GETGROUPS = 158 - SYS_SETGROUPS = 159 - SYS_UNAME = 160 - SYS_SETHOSTNAME = 161 - SYS_SETDOMAINNAME = 162 - SYS_GETRLIMIT = 163 - SYS_SETRLIMIT = 164 - SYS_GETRUSAGE = 165 - SYS_UMASK = 166 - SYS_PRCTL = 167 - SYS_GETCPU = 168 - SYS_GETTIMEOFDAY = 169 - SYS_SETTIMEOFDAY = 170 - SYS_ADJTIMEX = 171 - SYS_GETPID = 172 - SYS_GETPPID = 173 - SYS_GETUID = 174 - SYS_GETEUID = 175 - SYS_GETGID = 176 - SYS_GETEGID = 177 - SYS_GETTID = 178 - SYS_SYSINFO = 179 - SYS_MQ_OPEN = 180 - SYS_MQ_UNLINK = 181 - SYS_MQ_TIMEDSEND = 182 - SYS_MQ_TIMEDRECEIVE = 183 - SYS_MQ_NOTIFY = 184 - SYS_MQ_GETSETATTR = 185 - SYS_MSGGET = 186 - SYS_MSGCTL = 187 - SYS_MSGRCV = 188 - SYS_MSGSND = 189 - SYS_SEMGET = 190 - SYS_SEMCTL = 191 - SYS_SEMTIMEDOP = 192 - SYS_SEMOP = 193 - SYS_SHMGET = 194 - SYS_SHMCTL = 195 - SYS_SHMAT = 196 - SYS_SHMDT = 197 - SYS_SOCKET = 198 - SYS_SOCKETPAIR = 199 - SYS_BIND = 200 - SYS_LISTEN = 201 - SYS_ACCEPT = 202 - SYS_CONNECT = 203 - SYS_GETSOCKNAME = 204 - SYS_GETPEERNAME = 205 - SYS_SENDTO = 206 - SYS_RECVFROM = 207 - SYS_SETSOCKOPT = 208 - SYS_GETSOCKOPT = 209 - SYS_SHUTDOWN = 210 - SYS_SENDMSG = 211 - SYS_RECVMSG = 212 - SYS_READAHEAD = 213 - SYS_BRK = 214 - SYS_MUNMAP = 215 - SYS_MREMAP = 216 - SYS_ADD_KEY = 217 - SYS_REQUEST_KEY = 218 - SYS_KEYCTL = 219 - SYS_CLONE = 220 - SYS_EXECVE = 221 - SYS_MMAP = 222 - SYS_FADVISE64 = 223 - SYS_SWAPON = 224 - SYS_SWAPOFF = 225 - SYS_MPROTECT = 226 - SYS_MSYNC = 227 - SYS_MLOCK = 228 - SYS_MUNLOCK = 229 - SYS_MLOCKALL = 230 - SYS_MUNLOCKALL = 231 - SYS_MINCORE = 232 - SYS_MADVISE = 233 - SYS_REMAP_FILE_PAGES = 234 - SYS_MBIND = 235 - SYS_GET_MEMPOLICY = 236 - SYS_SET_MEMPOLICY = 237 - SYS_MIGRATE_PAGES = 238 - SYS_MOVE_PAGES = 239 - SYS_RT_TGSIGQUEUEINFO = 240 - SYS_PERF_EVENT_OPEN = 241 - SYS_ACCEPT4 = 242 - SYS_RECVMMSG = 243 - SYS_ARCH_SPECIFIC_SYSCALL = 244 - SYS_WAIT4 = 260 - SYS_PRLIMIT64 = 261 - SYS_FANOTIFY_INIT = 262 - SYS_FANOTIFY_MARK = 263 - SYS_NAME_TO_HANDLE_AT = 264 - SYS_OPEN_BY_HANDLE_AT = 265 - SYS_CLOCK_ADJTIME = 266 - SYS_SYNCFS = 267 - SYS_SETNS = 268 - SYS_SENDMMSG = 269 - SYS_PROCESS_VM_READV = 270 - SYS_PROCESS_VM_WRITEV = 271 - SYS_KCMP = 272 - SYS_FINIT_MODULE = 273 - SYS_SCHED_SETATTR = 274 - SYS_SCHED_GETATTR = 275 - SYS_RENAMEAT2 = 276 - SYS_SECCOMP = 277 - SYS_GETRANDOM = 278 - SYS_MEMFD_CREATE = 279 - SYS_BPF = 280 - SYS_EXECVEAT = 281 - SYS_USERFAULTFD = 282 - SYS_MEMBARRIER = 283 - SYS_MLOCK2 = 284 - SYS_COPY_FILE_RANGE = 285 - SYS_PREADV2 = 286 - SYS_PWRITEV2 = 287 - SYS_PKEY_MPROTECT = 288 - SYS_PKEY_ALLOC = 289 - SYS_PKEY_FREE = 290 - SYS_STATX = 291 - SYS_IO_PGETEVENTS = 292 - SYS_RSEQ = 293 - SYS_KEXEC_FILE_LOAD = 294 - SYS_PIDFD_SEND_SIGNAL = 424 - SYS_IO_URING_SETUP = 425 - SYS_IO_URING_ENTER = 426 - SYS_IO_URING_REGISTER = 427 - SYS_OPEN_TREE = 428 - SYS_MOVE_MOUNT = 429 - SYS_FSOPEN = 430 - SYS_FSCONFIG = 431 - SYS_FSMOUNT = 432 - SYS_FSPICK = 433 - SYS_PIDFD_OPEN = 434 - SYS_CLONE3 = 435 - SYS_CLOSE_RANGE = 436 - SYS_OPENAT2 = 437 - SYS_PIDFD_GETFD = 438 - SYS_FACCESSAT2 = 439 - SYS_PROCESS_MADVISE = 440 - SYS_EPOLL_PWAIT2 = 441 - SYS_MOUNT_SETATTR = 442 + SYS_IO_SETUP = 0 + SYS_IO_DESTROY = 1 + SYS_IO_SUBMIT = 2 + SYS_IO_CANCEL = 3 + SYS_IO_GETEVENTS = 4 + SYS_SETXATTR = 5 + SYS_LSETXATTR = 6 + SYS_FSETXATTR = 7 + SYS_GETXATTR = 8 + SYS_LGETXATTR = 9 + SYS_FGETXATTR = 10 + SYS_LISTXATTR = 11 + SYS_LLISTXATTR = 12 + SYS_FLISTXATTR = 13 + SYS_REMOVEXATTR = 14 + SYS_LREMOVEXATTR = 15 + SYS_FREMOVEXATTR = 16 + SYS_GETCWD = 17 + SYS_LOOKUP_DCOOKIE = 18 + SYS_EVENTFD2 = 19 + SYS_EPOLL_CREATE1 = 20 + SYS_EPOLL_CTL = 21 + SYS_EPOLL_PWAIT = 22 + SYS_DUP = 23 + SYS_DUP3 = 24 + SYS_FCNTL = 25 + SYS_INOTIFY_INIT1 = 26 + SYS_INOTIFY_ADD_WATCH = 27 + SYS_INOTIFY_RM_WATCH = 28 + SYS_IOCTL = 29 + SYS_IOPRIO_SET = 30 + SYS_IOPRIO_GET = 31 + SYS_FLOCK = 32 + SYS_MKNODAT = 33 + SYS_MKDIRAT = 34 + SYS_UNLINKAT = 35 + SYS_SYMLINKAT = 36 + SYS_LINKAT = 37 + SYS_RENAMEAT = 38 + SYS_UMOUNT2 = 39 + SYS_MOUNT = 40 + SYS_PIVOT_ROOT = 41 + SYS_NFSSERVCTL = 42 + SYS_STATFS = 43 + SYS_FSTATFS = 44 + SYS_TRUNCATE = 45 + SYS_FTRUNCATE = 46 + SYS_FALLOCATE = 47 + SYS_FACCESSAT = 48 + SYS_CHDIR = 49 + SYS_FCHDIR = 50 + SYS_CHROOT = 51 + SYS_FCHMOD = 52 + SYS_FCHMODAT = 53 + SYS_FCHOWNAT = 54 + SYS_FCHOWN = 55 + SYS_OPENAT = 56 + SYS_CLOSE = 57 + SYS_VHANGUP = 58 + SYS_PIPE2 = 59 + SYS_QUOTACTL = 60 + SYS_GETDENTS64 = 61 + SYS_LSEEK = 62 + SYS_READ = 63 + SYS_WRITE = 64 + SYS_READV = 65 + SYS_WRITEV = 66 + SYS_PREAD64 = 67 + SYS_PWRITE64 = 68 + SYS_PREADV = 69 + SYS_PWRITEV = 70 + SYS_SENDFILE = 71 + SYS_PSELECT6 = 72 + SYS_PPOLL = 73 + SYS_SIGNALFD4 = 74 + SYS_VMSPLICE = 75 + SYS_SPLICE = 76 + SYS_TEE = 77 + SYS_READLINKAT = 78 + SYS_FSTATAT = 79 + SYS_FSTAT = 80 + SYS_SYNC = 81 + SYS_FSYNC = 82 + SYS_FDATASYNC = 83 + SYS_SYNC_FILE_RANGE = 84 + SYS_TIMERFD_CREATE = 85 + SYS_TIMERFD_SETTIME = 86 + SYS_TIMERFD_GETTIME = 87 + SYS_UTIMENSAT = 88 + SYS_ACCT = 89 + SYS_CAPGET = 90 + SYS_CAPSET = 91 + SYS_PERSONALITY = 92 + SYS_EXIT = 93 + SYS_EXIT_GROUP = 94 + SYS_WAITID = 95 + SYS_SET_TID_ADDRESS = 96 + SYS_UNSHARE = 97 + SYS_FUTEX = 98 + SYS_SET_ROBUST_LIST = 99 + SYS_GET_ROBUST_LIST = 100 + SYS_NANOSLEEP = 101 + SYS_GETITIMER = 102 + SYS_SETITIMER = 103 + SYS_KEXEC_LOAD = 104 + SYS_INIT_MODULE = 105 + SYS_DELETE_MODULE = 106 + SYS_TIMER_CREATE = 107 + SYS_TIMER_GETTIME = 108 + SYS_TIMER_GETOVERRUN = 109 + SYS_TIMER_SETTIME = 110 + SYS_TIMER_DELETE = 111 + SYS_CLOCK_SETTIME = 112 + SYS_CLOCK_GETTIME = 113 + SYS_CLOCK_GETRES = 114 + SYS_CLOCK_NANOSLEEP = 115 + SYS_SYSLOG = 116 + SYS_PTRACE = 117 + SYS_SCHED_SETPARAM = 118 + SYS_SCHED_SETSCHEDULER = 119 + SYS_SCHED_GETSCHEDULER = 120 + SYS_SCHED_GETPARAM = 121 + SYS_SCHED_SETAFFINITY = 122 + SYS_SCHED_GETAFFINITY = 123 + SYS_SCHED_YIELD = 124 + SYS_SCHED_GET_PRIORITY_MAX = 125 + SYS_SCHED_GET_PRIORITY_MIN = 126 + SYS_SCHED_RR_GET_INTERVAL = 127 + SYS_RESTART_SYSCALL = 128 + SYS_KILL = 129 + SYS_TKILL = 130 + SYS_TGKILL = 131 + SYS_SIGALTSTACK = 132 + SYS_RT_SIGSUSPEND = 133 + SYS_RT_SIGACTION = 134 + SYS_RT_SIGPROCMASK = 135 + SYS_RT_SIGPENDING = 136 + SYS_RT_SIGTIMEDWAIT = 137 + SYS_RT_SIGQUEUEINFO = 138 + SYS_RT_SIGRETURN = 139 + SYS_SETPRIORITY = 140 + SYS_GETPRIORITY = 141 + SYS_REBOOT = 142 + SYS_SETREGID = 143 + SYS_SETGID = 144 + SYS_SETREUID = 145 + SYS_SETUID = 146 + SYS_SETRESUID = 147 + SYS_GETRESUID = 148 + SYS_SETRESGID = 149 + SYS_GETRESGID = 150 + SYS_SETFSUID = 151 + SYS_SETFSGID = 152 + SYS_TIMES = 153 + SYS_SETPGID = 154 + SYS_GETPGID = 155 + SYS_GETSID = 156 + SYS_SETSID = 157 + SYS_GETGROUPS = 158 + SYS_SETGROUPS = 159 + SYS_UNAME = 160 + SYS_SETHOSTNAME = 161 + SYS_SETDOMAINNAME = 162 + SYS_GETRLIMIT = 163 + SYS_SETRLIMIT = 164 + SYS_GETRUSAGE = 165 + SYS_UMASK = 166 + SYS_PRCTL = 167 + SYS_GETCPU = 168 + SYS_GETTIMEOFDAY = 169 + SYS_SETTIMEOFDAY = 170 + SYS_ADJTIMEX = 171 + SYS_GETPID = 172 + SYS_GETPPID = 173 + SYS_GETUID = 174 + SYS_GETEUID = 175 + SYS_GETGID = 176 + SYS_GETEGID = 177 + SYS_GETTID = 178 + SYS_SYSINFO = 179 + SYS_MQ_OPEN = 180 + SYS_MQ_UNLINK = 181 + SYS_MQ_TIMEDSEND = 182 + SYS_MQ_TIMEDRECEIVE = 183 + SYS_MQ_NOTIFY = 184 + SYS_MQ_GETSETATTR = 185 + SYS_MSGGET = 186 + SYS_MSGCTL = 187 + SYS_MSGRCV = 188 + SYS_MSGSND = 189 + SYS_SEMGET = 190 + SYS_SEMCTL = 191 + SYS_SEMTIMEDOP = 192 + SYS_SEMOP = 193 + SYS_SHMGET = 194 + SYS_SHMCTL = 195 + SYS_SHMAT = 196 + SYS_SHMDT = 197 + SYS_SOCKET = 198 + SYS_SOCKETPAIR = 199 + SYS_BIND = 200 + SYS_LISTEN = 201 + SYS_ACCEPT = 202 + SYS_CONNECT = 203 + SYS_GETSOCKNAME = 204 + SYS_GETPEERNAME = 205 + SYS_SENDTO = 206 + SYS_RECVFROM = 207 + SYS_SETSOCKOPT = 208 + SYS_GETSOCKOPT = 209 + SYS_SHUTDOWN = 210 + SYS_SENDMSG = 211 + SYS_RECVMSG = 212 + SYS_READAHEAD = 213 + SYS_BRK = 214 + SYS_MUNMAP = 215 + SYS_MREMAP = 216 + SYS_ADD_KEY = 217 + SYS_REQUEST_KEY = 218 + SYS_KEYCTL = 219 + SYS_CLONE = 220 + SYS_EXECVE = 221 + SYS_MMAP = 222 + SYS_FADVISE64 = 223 + SYS_SWAPON = 224 + SYS_SWAPOFF = 225 + SYS_MPROTECT = 226 + SYS_MSYNC = 227 + SYS_MLOCK = 228 + SYS_MUNLOCK = 229 + SYS_MLOCKALL = 230 + SYS_MUNLOCKALL = 231 + SYS_MINCORE = 232 + SYS_MADVISE = 233 + SYS_REMAP_FILE_PAGES = 234 + SYS_MBIND = 235 + SYS_GET_MEMPOLICY = 236 + SYS_SET_MEMPOLICY = 237 + SYS_MIGRATE_PAGES = 238 + SYS_MOVE_PAGES = 239 + SYS_RT_TGSIGQUEUEINFO = 240 + SYS_PERF_EVENT_OPEN = 241 + SYS_ACCEPT4 = 242 + SYS_RECVMMSG = 243 + SYS_ARCH_SPECIFIC_SYSCALL = 244 + SYS_WAIT4 = 260 + SYS_PRLIMIT64 = 261 + SYS_FANOTIFY_INIT = 262 + SYS_FANOTIFY_MARK = 263 + SYS_NAME_TO_HANDLE_AT = 264 + SYS_OPEN_BY_HANDLE_AT = 265 + SYS_CLOCK_ADJTIME = 266 + SYS_SYNCFS = 267 + SYS_SETNS = 268 + SYS_SENDMMSG = 269 + SYS_PROCESS_VM_READV = 270 + SYS_PROCESS_VM_WRITEV = 271 + SYS_KCMP = 272 + SYS_FINIT_MODULE = 273 + SYS_SCHED_SETATTR = 274 + SYS_SCHED_GETATTR = 275 + SYS_RENAMEAT2 = 276 + SYS_SECCOMP = 277 + SYS_GETRANDOM = 278 + SYS_MEMFD_CREATE = 279 + SYS_BPF = 280 + SYS_EXECVEAT = 281 + SYS_USERFAULTFD = 282 + SYS_MEMBARRIER = 283 + SYS_MLOCK2 = 284 + SYS_COPY_FILE_RANGE = 285 + SYS_PREADV2 = 286 + SYS_PWRITEV2 = 287 + SYS_PKEY_MPROTECT = 288 + SYS_PKEY_ALLOC = 289 + SYS_PKEY_FREE = 290 + SYS_STATX = 291 + SYS_IO_PGETEVENTS = 292 + SYS_RSEQ = 293 + SYS_KEXEC_FILE_LOAD = 294 + SYS_PIDFD_SEND_SIGNAL = 424 + SYS_IO_URING_SETUP = 425 + SYS_IO_URING_ENTER = 426 + SYS_IO_URING_REGISTER = 427 + SYS_OPEN_TREE = 428 + SYS_MOVE_MOUNT = 429 + SYS_FSOPEN = 430 + SYS_FSCONFIG = 431 + SYS_FSMOUNT = 432 + SYS_FSPICK = 433 + SYS_PIDFD_OPEN = 434 + SYS_CLONE3 = 435 + SYS_CLOSE_RANGE = 436 + SYS_OPENAT2 = 437 + SYS_PIDFD_GETFD = 438 + SYS_FACCESSAT2 = 439 + SYS_PROCESS_MADVISE = 440 + SYS_EPOLL_PWAIT2 = 441 + SYS_MOUNT_SETATTR = 442 + SYS_LANDLOCK_CREATE_RULESET = 444 + SYS_LANDLOCK_ADD_RULE = 445 + SYS_LANDLOCK_RESTRICT_SELF = 446 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go index c877ec6..773640b 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go @@ -424,4 +424,7 @@ const ( SYS_PROCESS_MADVISE = 4440 SYS_EPOLL_PWAIT2 = 4441 SYS_MOUNT_SETATTR = 4442 + SYS_LANDLOCK_CREATE_RULESET = 4444 + SYS_LANDLOCK_ADD_RULE = 4445 + SYS_LANDLOCK_RESTRICT_SELF = 4446 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go index b5f2903..86a41e5 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go @@ -7,351 +7,354 @@ package unix const ( - SYS_READ = 5000 - SYS_WRITE = 5001 - SYS_OPEN = 5002 - SYS_CLOSE = 5003 - SYS_STAT = 5004 - SYS_FSTAT = 5005 - SYS_LSTAT = 5006 - SYS_POLL = 5007 - SYS_LSEEK = 5008 - SYS_MMAP = 5009 - SYS_MPROTECT = 5010 - SYS_MUNMAP = 5011 - SYS_BRK = 5012 - SYS_RT_SIGACTION = 5013 - SYS_RT_SIGPROCMASK = 5014 - SYS_IOCTL = 5015 - SYS_PREAD64 = 5016 - SYS_PWRITE64 = 5017 - SYS_READV = 5018 - SYS_WRITEV = 5019 - SYS_ACCESS = 5020 - SYS_PIPE = 5021 - SYS__NEWSELECT = 5022 - SYS_SCHED_YIELD = 5023 - SYS_MREMAP = 5024 - SYS_MSYNC = 5025 - SYS_MINCORE = 5026 - SYS_MADVISE = 5027 - SYS_SHMGET = 5028 - SYS_SHMAT = 5029 - SYS_SHMCTL = 5030 - SYS_DUP = 5031 - SYS_DUP2 = 5032 - SYS_PAUSE = 5033 - SYS_NANOSLEEP = 5034 - SYS_GETITIMER = 5035 - SYS_SETITIMER = 5036 - SYS_ALARM = 5037 - SYS_GETPID = 5038 - SYS_SENDFILE = 5039 - SYS_SOCKET = 5040 - SYS_CONNECT = 5041 - SYS_ACCEPT = 5042 - SYS_SENDTO = 5043 - SYS_RECVFROM = 5044 - SYS_SENDMSG = 5045 - SYS_RECVMSG = 5046 - SYS_SHUTDOWN = 5047 - SYS_BIND = 5048 - SYS_LISTEN = 5049 - SYS_GETSOCKNAME = 5050 - SYS_GETPEERNAME = 5051 - SYS_SOCKETPAIR = 5052 - SYS_SETSOCKOPT = 5053 - SYS_GETSOCKOPT = 5054 - SYS_CLONE = 5055 - SYS_FORK = 5056 - SYS_EXECVE = 5057 - SYS_EXIT = 5058 - SYS_WAIT4 = 5059 - SYS_KILL = 5060 - SYS_UNAME = 5061 - SYS_SEMGET = 5062 - SYS_SEMOP = 5063 - SYS_SEMCTL = 5064 - SYS_SHMDT = 5065 - SYS_MSGGET = 5066 - SYS_MSGSND = 5067 - SYS_MSGRCV = 5068 - SYS_MSGCTL = 5069 - SYS_FCNTL = 5070 - SYS_FLOCK = 5071 - SYS_FSYNC = 5072 - SYS_FDATASYNC = 5073 - SYS_TRUNCATE = 5074 - SYS_FTRUNCATE = 5075 - SYS_GETDENTS = 5076 - SYS_GETCWD = 5077 - SYS_CHDIR = 5078 - SYS_FCHDIR = 5079 - SYS_RENAME = 5080 - SYS_MKDIR = 5081 - SYS_RMDIR = 5082 - SYS_CREAT = 5083 - SYS_LINK = 5084 - SYS_UNLINK = 5085 - SYS_SYMLINK = 5086 - SYS_READLINK = 5087 - SYS_CHMOD = 5088 - SYS_FCHMOD = 5089 - SYS_CHOWN = 5090 - SYS_FCHOWN = 5091 - SYS_LCHOWN = 5092 - SYS_UMASK = 5093 - SYS_GETTIMEOFDAY = 5094 - SYS_GETRLIMIT = 5095 - SYS_GETRUSAGE = 5096 - SYS_SYSINFO = 5097 - SYS_TIMES = 5098 - SYS_PTRACE = 5099 - SYS_GETUID = 5100 - SYS_SYSLOG = 5101 - SYS_GETGID = 5102 - SYS_SETUID = 5103 - SYS_SETGID = 5104 - SYS_GETEUID = 5105 - SYS_GETEGID = 5106 - SYS_SETPGID = 5107 - SYS_GETPPID = 5108 - SYS_GETPGRP = 5109 - SYS_SETSID = 5110 - SYS_SETREUID = 5111 - SYS_SETREGID = 5112 - SYS_GETGROUPS = 5113 - SYS_SETGROUPS = 5114 - SYS_SETRESUID = 5115 - SYS_GETRESUID = 5116 - SYS_SETRESGID = 5117 - SYS_GETRESGID = 5118 - SYS_GETPGID = 5119 - SYS_SETFSUID = 5120 - SYS_SETFSGID = 5121 - SYS_GETSID = 5122 - SYS_CAPGET = 5123 - SYS_CAPSET = 5124 - SYS_RT_SIGPENDING = 5125 - SYS_RT_SIGTIMEDWAIT = 5126 - SYS_RT_SIGQUEUEINFO = 5127 - SYS_RT_SIGSUSPEND = 5128 - SYS_SIGALTSTACK = 5129 - SYS_UTIME = 5130 - SYS_MKNOD = 5131 - SYS_PERSONALITY = 5132 - SYS_USTAT = 5133 - SYS_STATFS = 5134 - SYS_FSTATFS = 5135 - SYS_SYSFS = 5136 - SYS_GETPRIORITY = 5137 - SYS_SETPRIORITY = 5138 - SYS_SCHED_SETPARAM = 5139 - SYS_SCHED_GETPARAM = 5140 - SYS_SCHED_SETSCHEDULER = 5141 - SYS_SCHED_GETSCHEDULER = 5142 - SYS_SCHED_GET_PRIORITY_MAX = 5143 - SYS_SCHED_GET_PRIORITY_MIN = 5144 - SYS_SCHED_RR_GET_INTERVAL = 5145 - SYS_MLOCK = 5146 - SYS_MUNLOCK = 5147 - SYS_MLOCKALL = 5148 - SYS_MUNLOCKALL = 5149 - SYS_VHANGUP = 5150 - SYS_PIVOT_ROOT = 5151 - SYS__SYSCTL = 5152 - SYS_PRCTL = 5153 - SYS_ADJTIMEX = 5154 - SYS_SETRLIMIT = 5155 - SYS_CHROOT = 5156 - SYS_SYNC = 5157 - SYS_ACCT = 5158 - SYS_SETTIMEOFDAY = 5159 - SYS_MOUNT = 5160 - SYS_UMOUNT2 = 5161 - SYS_SWAPON = 5162 - SYS_SWAPOFF = 5163 - SYS_REBOOT = 5164 - SYS_SETHOSTNAME = 5165 - SYS_SETDOMAINNAME = 5166 - SYS_CREATE_MODULE = 5167 - SYS_INIT_MODULE = 5168 - SYS_DELETE_MODULE = 5169 - SYS_GET_KERNEL_SYMS = 5170 - SYS_QUERY_MODULE = 5171 - SYS_QUOTACTL = 5172 - SYS_NFSSERVCTL = 5173 - SYS_GETPMSG = 5174 - SYS_PUTPMSG = 5175 - SYS_AFS_SYSCALL = 5176 - SYS_RESERVED177 = 5177 - SYS_GETTID = 5178 - SYS_READAHEAD = 5179 - SYS_SETXATTR = 5180 - SYS_LSETXATTR = 5181 - SYS_FSETXATTR = 5182 - SYS_GETXATTR = 5183 - SYS_LGETXATTR = 5184 - SYS_FGETXATTR = 5185 - SYS_LISTXATTR = 5186 - SYS_LLISTXATTR = 5187 - SYS_FLISTXATTR = 5188 - SYS_REMOVEXATTR = 5189 - SYS_LREMOVEXATTR = 5190 - SYS_FREMOVEXATTR = 5191 - SYS_TKILL = 5192 - SYS_RESERVED193 = 5193 - SYS_FUTEX = 5194 - SYS_SCHED_SETAFFINITY = 5195 - SYS_SCHED_GETAFFINITY = 5196 - SYS_CACHEFLUSH = 5197 - SYS_CACHECTL = 5198 - SYS_SYSMIPS = 5199 - SYS_IO_SETUP = 5200 - SYS_IO_DESTROY = 5201 - SYS_IO_GETEVENTS = 5202 - SYS_IO_SUBMIT = 5203 - SYS_IO_CANCEL = 5204 - SYS_EXIT_GROUP = 5205 - SYS_LOOKUP_DCOOKIE = 5206 - SYS_EPOLL_CREATE = 5207 - SYS_EPOLL_CTL = 5208 - SYS_EPOLL_WAIT = 5209 - SYS_REMAP_FILE_PAGES = 5210 - SYS_RT_SIGRETURN = 5211 - SYS_SET_TID_ADDRESS = 5212 - SYS_RESTART_SYSCALL = 5213 - SYS_SEMTIMEDOP = 5214 - SYS_FADVISE64 = 5215 - SYS_TIMER_CREATE = 5216 - SYS_TIMER_SETTIME = 5217 - SYS_TIMER_GETTIME = 5218 - SYS_TIMER_GETOVERRUN = 5219 - SYS_TIMER_DELETE = 5220 - SYS_CLOCK_SETTIME = 5221 - SYS_CLOCK_GETTIME = 5222 - SYS_CLOCK_GETRES = 5223 - SYS_CLOCK_NANOSLEEP = 5224 - SYS_TGKILL = 5225 - SYS_UTIMES = 5226 - SYS_MBIND = 5227 - SYS_GET_MEMPOLICY = 5228 - SYS_SET_MEMPOLICY = 5229 - SYS_MQ_OPEN = 5230 - SYS_MQ_UNLINK = 5231 - SYS_MQ_TIMEDSEND = 5232 - SYS_MQ_TIMEDRECEIVE = 5233 - SYS_MQ_NOTIFY = 5234 - SYS_MQ_GETSETATTR = 5235 - SYS_VSERVER = 5236 - SYS_WAITID = 5237 - SYS_ADD_KEY = 5239 - SYS_REQUEST_KEY = 5240 - SYS_KEYCTL = 5241 - SYS_SET_THREAD_AREA = 5242 - SYS_INOTIFY_INIT = 5243 - SYS_INOTIFY_ADD_WATCH = 5244 - SYS_INOTIFY_RM_WATCH = 5245 - SYS_MIGRATE_PAGES = 5246 - SYS_OPENAT = 5247 - SYS_MKDIRAT = 5248 - SYS_MKNODAT = 5249 - SYS_FCHOWNAT = 5250 - SYS_FUTIMESAT = 5251 - SYS_NEWFSTATAT = 5252 - SYS_UNLINKAT = 5253 - SYS_RENAMEAT = 5254 - SYS_LINKAT = 5255 - SYS_SYMLINKAT = 5256 - SYS_READLINKAT = 5257 - SYS_FCHMODAT = 5258 - SYS_FACCESSAT = 5259 - SYS_PSELECT6 = 5260 - SYS_PPOLL = 5261 - SYS_UNSHARE = 5262 - SYS_SPLICE = 5263 - SYS_SYNC_FILE_RANGE = 5264 - SYS_TEE = 5265 - SYS_VMSPLICE = 5266 - SYS_MOVE_PAGES = 5267 - SYS_SET_ROBUST_LIST = 5268 - SYS_GET_ROBUST_LIST = 5269 - SYS_KEXEC_LOAD = 5270 - SYS_GETCPU = 5271 - SYS_EPOLL_PWAIT = 5272 - SYS_IOPRIO_SET = 5273 - SYS_IOPRIO_GET = 5274 - SYS_UTIMENSAT = 5275 - SYS_SIGNALFD = 5276 - SYS_TIMERFD = 5277 - SYS_EVENTFD = 5278 - SYS_FALLOCATE = 5279 - SYS_TIMERFD_CREATE = 5280 - SYS_TIMERFD_GETTIME = 5281 - SYS_TIMERFD_SETTIME = 5282 - SYS_SIGNALFD4 = 5283 - SYS_EVENTFD2 = 5284 - SYS_EPOLL_CREATE1 = 5285 - SYS_DUP3 = 5286 - SYS_PIPE2 = 5287 - SYS_INOTIFY_INIT1 = 5288 - SYS_PREADV = 5289 - SYS_PWRITEV = 5290 - SYS_RT_TGSIGQUEUEINFO = 5291 - SYS_PERF_EVENT_OPEN = 5292 - SYS_ACCEPT4 = 5293 - SYS_RECVMMSG = 5294 - SYS_FANOTIFY_INIT = 5295 - SYS_FANOTIFY_MARK = 5296 - SYS_PRLIMIT64 = 5297 - SYS_NAME_TO_HANDLE_AT = 5298 - SYS_OPEN_BY_HANDLE_AT = 5299 - SYS_CLOCK_ADJTIME = 5300 - SYS_SYNCFS = 5301 - SYS_SENDMMSG = 5302 - SYS_SETNS = 5303 - SYS_PROCESS_VM_READV = 5304 - SYS_PROCESS_VM_WRITEV = 5305 - SYS_KCMP = 5306 - SYS_FINIT_MODULE = 5307 - SYS_GETDENTS64 = 5308 - SYS_SCHED_SETATTR = 5309 - SYS_SCHED_GETATTR = 5310 - SYS_RENAMEAT2 = 5311 - SYS_SECCOMP = 5312 - SYS_GETRANDOM = 5313 - SYS_MEMFD_CREATE = 5314 - SYS_BPF = 5315 - SYS_EXECVEAT = 5316 - SYS_USERFAULTFD = 5317 - SYS_MEMBARRIER = 5318 - SYS_MLOCK2 = 5319 - SYS_COPY_FILE_RANGE = 5320 - SYS_PREADV2 = 5321 - SYS_PWRITEV2 = 5322 - SYS_PKEY_MPROTECT = 5323 - SYS_PKEY_ALLOC = 5324 - SYS_PKEY_FREE = 5325 - SYS_STATX = 5326 - SYS_RSEQ = 5327 - SYS_IO_PGETEVENTS = 5328 - SYS_PIDFD_SEND_SIGNAL = 5424 - SYS_IO_URING_SETUP = 5425 - SYS_IO_URING_ENTER = 5426 - SYS_IO_URING_REGISTER = 5427 - SYS_OPEN_TREE = 5428 - SYS_MOVE_MOUNT = 5429 - SYS_FSOPEN = 5430 - SYS_FSCONFIG = 5431 - SYS_FSMOUNT = 5432 - SYS_FSPICK = 5433 - SYS_PIDFD_OPEN = 5434 - SYS_CLONE3 = 5435 - SYS_CLOSE_RANGE = 5436 - SYS_OPENAT2 = 5437 - SYS_PIDFD_GETFD = 5438 - SYS_FACCESSAT2 = 5439 - SYS_PROCESS_MADVISE = 5440 - SYS_EPOLL_PWAIT2 = 5441 - SYS_MOUNT_SETATTR = 5442 + SYS_READ = 5000 + SYS_WRITE = 5001 + SYS_OPEN = 5002 + SYS_CLOSE = 5003 + SYS_STAT = 5004 + SYS_FSTAT = 5005 + SYS_LSTAT = 5006 + SYS_POLL = 5007 + SYS_LSEEK = 5008 + SYS_MMAP = 5009 + SYS_MPROTECT = 5010 + SYS_MUNMAP = 5011 + SYS_BRK = 5012 + SYS_RT_SIGACTION = 5013 + SYS_RT_SIGPROCMASK = 5014 + SYS_IOCTL = 5015 + SYS_PREAD64 = 5016 + SYS_PWRITE64 = 5017 + SYS_READV = 5018 + SYS_WRITEV = 5019 + SYS_ACCESS = 5020 + SYS_PIPE = 5021 + SYS__NEWSELECT = 5022 + SYS_SCHED_YIELD = 5023 + SYS_MREMAP = 5024 + SYS_MSYNC = 5025 + SYS_MINCORE = 5026 + SYS_MADVISE = 5027 + SYS_SHMGET = 5028 + SYS_SHMAT = 5029 + SYS_SHMCTL = 5030 + SYS_DUP = 5031 + SYS_DUP2 = 5032 + SYS_PAUSE = 5033 + SYS_NANOSLEEP = 5034 + SYS_GETITIMER = 5035 + SYS_SETITIMER = 5036 + SYS_ALARM = 5037 + SYS_GETPID = 5038 + SYS_SENDFILE = 5039 + SYS_SOCKET = 5040 + SYS_CONNECT = 5041 + SYS_ACCEPT = 5042 + SYS_SENDTO = 5043 + SYS_RECVFROM = 5044 + SYS_SENDMSG = 5045 + SYS_RECVMSG = 5046 + SYS_SHUTDOWN = 5047 + SYS_BIND = 5048 + SYS_LISTEN = 5049 + SYS_GETSOCKNAME = 5050 + SYS_GETPEERNAME = 5051 + SYS_SOCKETPAIR = 5052 + SYS_SETSOCKOPT = 5053 + SYS_GETSOCKOPT = 5054 + SYS_CLONE = 5055 + SYS_FORK = 5056 + SYS_EXECVE = 5057 + SYS_EXIT = 5058 + SYS_WAIT4 = 5059 + SYS_KILL = 5060 + SYS_UNAME = 5061 + SYS_SEMGET = 5062 + SYS_SEMOP = 5063 + SYS_SEMCTL = 5064 + SYS_SHMDT = 5065 + SYS_MSGGET = 5066 + SYS_MSGSND = 5067 + SYS_MSGRCV = 5068 + SYS_MSGCTL = 5069 + SYS_FCNTL = 5070 + SYS_FLOCK = 5071 + SYS_FSYNC = 5072 + SYS_FDATASYNC = 5073 + SYS_TRUNCATE = 5074 + SYS_FTRUNCATE = 5075 + SYS_GETDENTS = 5076 + SYS_GETCWD = 5077 + SYS_CHDIR = 5078 + SYS_FCHDIR = 5079 + SYS_RENAME = 5080 + SYS_MKDIR = 5081 + SYS_RMDIR = 5082 + SYS_CREAT = 5083 + SYS_LINK = 5084 + SYS_UNLINK = 5085 + SYS_SYMLINK = 5086 + SYS_READLINK = 5087 + SYS_CHMOD = 5088 + SYS_FCHMOD = 5089 + SYS_CHOWN = 5090 + SYS_FCHOWN = 5091 + SYS_LCHOWN = 5092 + SYS_UMASK = 5093 + SYS_GETTIMEOFDAY = 5094 + SYS_GETRLIMIT = 5095 + SYS_GETRUSAGE = 5096 + SYS_SYSINFO = 5097 + SYS_TIMES = 5098 + SYS_PTRACE = 5099 + SYS_GETUID = 5100 + SYS_SYSLOG = 5101 + SYS_GETGID = 5102 + SYS_SETUID = 5103 + SYS_SETGID = 5104 + SYS_GETEUID = 5105 + SYS_GETEGID = 5106 + SYS_SETPGID = 5107 + SYS_GETPPID = 5108 + SYS_GETPGRP = 5109 + SYS_SETSID = 5110 + SYS_SETREUID = 5111 + SYS_SETREGID = 5112 + SYS_GETGROUPS = 5113 + SYS_SETGROUPS = 5114 + SYS_SETRESUID = 5115 + SYS_GETRESUID = 5116 + SYS_SETRESGID = 5117 + SYS_GETRESGID = 5118 + SYS_GETPGID = 5119 + SYS_SETFSUID = 5120 + SYS_SETFSGID = 5121 + SYS_GETSID = 5122 + SYS_CAPGET = 5123 + SYS_CAPSET = 5124 + SYS_RT_SIGPENDING = 5125 + SYS_RT_SIGTIMEDWAIT = 5126 + SYS_RT_SIGQUEUEINFO = 5127 + SYS_RT_SIGSUSPEND = 5128 + SYS_SIGALTSTACK = 5129 + SYS_UTIME = 5130 + SYS_MKNOD = 5131 + SYS_PERSONALITY = 5132 + SYS_USTAT = 5133 + SYS_STATFS = 5134 + SYS_FSTATFS = 5135 + SYS_SYSFS = 5136 + SYS_GETPRIORITY = 5137 + SYS_SETPRIORITY = 5138 + SYS_SCHED_SETPARAM = 5139 + SYS_SCHED_GETPARAM = 5140 + SYS_SCHED_SETSCHEDULER = 5141 + SYS_SCHED_GETSCHEDULER = 5142 + SYS_SCHED_GET_PRIORITY_MAX = 5143 + SYS_SCHED_GET_PRIORITY_MIN = 5144 + SYS_SCHED_RR_GET_INTERVAL = 5145 + SYS_MLOCK = 5146 + SYS_MUNLOCK = 5147 + SYS_MLOCKALL = 5148 + SYS_MUNLOCKALL = 5149 + SYS_VHANGUP = 5150 + SYS_PIVOT_ROOT = 5151 + SYS__SYSCTL = 5152 + SYS_PRCTL = 5153 + SYS_ADJTIMEX = 5154 + SYS_SETRLIMIT = 5155 + SYS_CHROOT = 5156 + SYS_SYNC = 5157 + SYS_ACCT = 5158 + SYS_SETTIMEOFDAY = 5159 + SYS_MOUNT = 5160 + SYS_UMOUNT2 = 5161 + SYS_SWAPON = 5162 + SYS_SWAPOFF = 5163 + SYS_REBOOT = 5164 + SYS_SETHOSTNAME = 5165 + SYS_SETDOMAINNAME = 5166 + SYS_CREATE_MODULE = 5167 + SYS_INIT_MODULE = 5168 + SYS_DELETE_MODULE = 5169 + SYS_GET_KERNEL_SYMS = 5170 + SYS_QUERY_MODULE = 5171 + SYS_QUOTACTL = 5172 + SYS_NFSSERVCTL = 5173 + SYS_GETPMSG = 5174 + SYS_PUTPMSG = 5175 + SYS_AFS_SYSCALL = 5176 + SYS_RESERVED177 = 5177 + SYS_GETTID = 5178 + SYS_READAHEAD = 5179 + SYS_SETXATTR = 5180 + SYS_LSETXATTR = 5181 + SYS_FSETXATTR = 5182 + SYS_GETXATTR = 5183 + SYS_LGETXATTR = 5184 + SYS_FGETXATTR = 5185 + SYS_LISTXATTR = 5186 + SYS_LLISTXATTR = 5187 + SYS_FLISTXATTR = 5188 + SYS_REMOVEXATTR = 5189 + SYS_LREMOVEXATTR = 5190 + SYS_FREMOVEXATTR = 5191 + SYS_TKILL = 5192 + SYS_RESERVED193 = 5193 + SYS_FUTEX = 5194 + SYS_SCHED_SETAFFINITY = 5195 + SYS_SCHED_GETAFFINITY = 5196 + SYS_CACHEFLUSH = 5197 + SYS_CACHECTL = 5198 + SYS_SYSMIPS = 5199 + SYS_IO_SETUP = 5200 + SYS_IO_DESTROY = 5201 + SYS_IO_GETEVENTS = 5202 + SYS_IO_SUBMIT = 5203 + SYS_IO_CANCEL = 5204 + SYS_EXIT_GROUP = 5205 + SYS_LOOKUP_DCOOKIE = 5206 + SYS_EPOLL_CREATE = 5207 + SYS_EPOLL_CTL = 5208 + SYS_EPOLL_WAIT = 5209 + SYS_REMAP_FILE_PAGES = 5210 + SYS_RT_SIGRETURN = 5211 + SYS_SET_TID_ADDRESS = 5212 + SYS_RESTART_SYSCALL = 5213 + SYS_SEMTIMEDOP = 5214 + SYS_FADVISE64 = 5215 + SYS_TIMER_CREATE = 5216 + SYS_TIMER_SETTIME = 5217 + SYS_TIMER_GETTIME = 5218 + SYS_TIMER_GETOVERRUN = 5219 + SYS_TIMER_DELETE = 5220 + SYS_CLOCK_SETTIME = 5221 + SYS_CLOCK_GETTIME = 5222 + SYS_CLOCK_GETRES = 5223 + SYS_CLOCK_NANOSLEEP = 5224 + SYS_TGKILL = 5225 + SYS_UTIMES = 5226 + SYS_MBIND = 5227 + SYS_GET_MEMPOLICY = 5228 + SYS_SET_MEMPOLICY = 5229 + SYS_MQ_OPEN = 5230 + SYS_MQ_UNLINK = 5231 + SYS_MQ_TIMEDSEND = 5232 + SYS_MQ_TIMEDRECEIVE = 5233 + SYS_MQ_NOTIFY = 5234 + SYS_MQ_GETSETATTR = 5235 + SYS_VSERVER = 5236 + SYS_WAITID = 5237 + SYS_ADD_KEY = 5239 + SYS_REQUEST_KEY = 5240 + SYS_KEYCTL = 5241 + SYS_SET_THREAD_AREA = 5242 + SYS_INOTIFY_INIT = 5243 + SYS_INOTIFY_ADD_WATCH = 5244 + SYS_INOTIFY_RM_WATCH = 5245 + SYS_MIGRATE_PAGES = 5246 + SYS_OPENAT = 5247 + SYS_MKDIRAT = 5248 + SYS_MKNODAT = 5249 + SYS_FCHOWNAT = 5250 + SYS_FUTIMESAT = 5251 + SYS_NEWFSTATAT = 5252 + SYS_UNLINKAT = 5253 + SYS_RENAMEAT = 5254 + SYS_LINKAT = 5255 + SYS_SYMLINKAT = 5256 + SYS_READLINKAT = 5257 + SYS_FCHMODAT = 5258 + SYS_FACCESSAT = 5259 + SYS_PSELECT6 = 5260 + SYS_PPOLL = 5261 + SYS_UNSHARE = 5262 + SYS_SPLICE = 5263 + SYS_SYNC_FILE_RANGE = 5264 + SYS_TEE = 5265 + SYS_VMSPLICE = 5266 + SYS_MOVE_PAGES = 5267 + SYS_SET_ROBUST_LIST = 5268 + SYS_GET_ROBUST_LIST = 5269 + SYS_KEXEC_LOAD = 5270 + SYS_GETCPU = 5271 + SYS_EPOLL_PWAIT = 5272 + SYS_IOPRIO_SET = 5273 + SYS_IOPRIO_GET = 5274 + SYS_UTIMENSAT = 5275 + SYS_SIGNALFD = 5276 + SYS_TIMERFD = 5277 + SYS_EVENTFD = 5278 + SYS_FALLOCATE = 5279 + SYS_TIMERFD_CREATE = 5280 + SYS_TIMERFD_GETTIME = 5281 + SYS_TIMERFD_SETTIME = 5282 + SYS_SIGNALFD4 = 5283 + SYS_EVENTFD2 = 5284 + SYS_EPOLL_CREATE1 = 5285 + SYS_DUP3 = 5286 + SYS_PIPE2 = 5287 + SYS_INOTIFY_INIT1 = 5288 + SYS_PREADV = 5289 + SYS_PWRITEV = 5290 + SYS_RT_TGSIGQUEUEINFO = 5291 + SYS_PERF_EVENT_OPEN = 5292 + SYS_ACCEPT4 = 5293 + SYS_RECVMMSG = 5294 + SYS_FANOTIFY_INIT = 5295 + SYS_FANOTIFY_MARK = 5296 + SYS_PRLIMIT64 = 5297 + SYS_NAME_TO_HANDLE_AT = 5298 + SYS_OPEN_BY_HANDLE_AT = 5299 + SYS_CLOCK_ADJTIME = 5300 + SYS_SYNCFS = 5301 + SYS_SENDMMSG = 5302 + SYS_SETNS = 5303 + SYS_PROCESS_VM_READV = 5304 + SYS_PROCESS_VM_WRITEV = 5305 + SYS_KCMP = 5306 + SYS_FINIT_MODULE = 5307 + SYS_GETDENTS64 = 5308 + SYS_SCHED_SETATTR = 5309 + SYS_SCHED_GETATTR = 5310 + SYS_RENAMEAT2 = 5311 + SYS_SECCOMP = 5312 + SYS_GETRANDOM = 5313 + SYS_MEMFD_CREATE = 5314 + SYS_BPF = 5315 + SYS_EXECVEAT = 5316 + SYS_USERFAULTFD = 5317 + SYS_MEMBARRIER = 5318 + SYS_MLOCK2 = 5319 + SYS_COPY_FILE_RANGE = 5320 + SYS_PREADV2 = 5321 + SYS_PWRITEV2 = 5322 + SYS_PKEY_MPROTECT = 5323 + SYS_PKEY_ALLOC = 5324 + SYS_PKEY_FREE = 5325 + SYS_STATX = 5326 + SYS_RSEQ = 5327 + SYS_IO_PGETEVENTS = 5328 + SYS_PIDFD_SEND_SIGNAL = 5424 + SYS_IO_URING_SETUP = 5425 + SYS_IO_URING_ENTER = 5426 + SYS_IO_URING_REGISTER = 5427 + SYS_OPEN_TREE = 5428 + SYS_MOVE_MOUNT = 5429 + SYS_FSOPEN = 5430 + SYS_FSCONFIG = 5431 + SYS_FSMOUNT = 5432 + SYS_FSPICK = 5433 + SYS_PIDFD_OPEN = 5434 + SYS_CLONE3 = 5435 + SYS_CLOSE_RANGE = 5436 + SYS_OPENAT2 = 5437 + SYS_PIDFD_GETFD = 5438 + SYS_FACCESSAT2 = 5439 + SYS_PROCESS_MADVISE = 5440 + SYS_EPOLL_PWAIT2 = 5441 + SYS_MOUNT_SETATTR = 5442 + SYS_LANDLOCK_CREATE_RULESET = 5444 + SYS_LANDLOCK_ADD_RULE = 5445 + SYS_LANDLOCK_RESTRICT_SELF = 5446 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go index 4607768..77f5728 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go @@ -7,351 +7,354 @@ package unix const ( - SYS_READ = 5000 - SYS_WRITE = 5001 - SYS_OPEN = 5002 - SYS_CLOSE = 5003 - SYS_STAT = 5004 - SYS_FSTAT = 5005 - SYS_LSTAT = 5006 - SYS_POLL = 5007 - SYS_LSEEK = 5008 - SYS_MMAP = 5009 - SYS_MPROTECT = 5010 - SYS_MUNMAP = 5011 - SYS_BRK = 5012 - SYS_RT_SIGACTION = 5013 - SYS_RT_SIGPROCMASK = 5014 - SYS_IOCTL = 5015 - SYS_PREAD64 = 5016 - SYS_PWRITE64 = 5017 - SYS_READV = 5018 - SYS_WRITEV = 5019 - SYS_ACCESS = 5020 - SYS_PIPE = 5021 - SYS__NEWSELECT = 5022 - SYS_SCHED_YIELD = 5023 - SYS_MREMAP = 5024 - SYS_MSYNC = 5025 - SYS_MINCORE = 5026 - SYS_MADVISE = 5027 - SYS_SHMGET = 5028 - SYS_SHMAT = 5029 - SYS_SHMCTL = 5030 - SYS_DUP = 5031 - SYS_DUP2 = 5032 - SYS_PAUSE = 5033 - SYS_NANOSLEEP = 5034 - SYS_GETITIMER = 5035 - SYS_SETITIMER = 5036 - SYS_ALARM = 5037 - SYS_GETPID = 5038 - SYS_SENDFILE = 5039 - SYS_SOCKET = 5040 - SYS_CONNECT = 5041 - SYS_ACCEPT = 5042 - SYS_SENDTO = 5043 - SYS_RECVFROM = 5044 - SYS_SENDMSG = 5045 - SYS_RECVMSG = 5046 - SYS_SHUTDOWN = 5047 - SYS_BIND = 5048 - SYS_LISTEN = 5049 - SYS_GETSOCKNAME = 5050 - SYS_GETPEERNAME = 5051 - SYS_SOCKETPAIR = 5052 - SYS_SETSOCKOPT = 5053 - SYS_GETSOCKOPT = 5054 - SYS_CLONE = 5055 - SYS_FORK = 5056 - SYS_EXECVE = 5057 - SYS_EXIT = 5058 - SYS_WAIT4 = 5059 - SYS_KILL = 5060 - SYS_UNAME = 5061 - SYS_SEMGET = 5062 - SYS_SEMOP = 5063 - SYS_SEMCTL = 5064 - SYS_SHMDT = 5065 - SYS_MSGGET = 5066 - SYS_MSGSND = 5067 - SYS_MSGRCV = 5068 - SYS_MSGCTL = 5069 - SYS_FCNTL = 5070 - SYS_FLOCK = 5071 - SYS_FSYNC = 5072 - SYS_FDATASYNC = 5073 - SYS_TRUNCATE = 5074 - SYS_FTRUNCATE = 5075 - SYS_GETDENTS = 5076 - SYS_GETCWD = 5077 - SYS_CHDIR = 5078 - SYS_FCHDIR = 5079 - SYS_RENAME = 5080 - SYS_MKDIR = 5081 - SYS_RMDIR = 5082 - SYS_CREAT = 5083 - SYS_LINK = 5084 - SYS_UNLINK = 5085 - SYS_SYMLINK = 5086 - SYS_READLINK = 5087 - SYS_CHMOD = 5088 - SYS_FCHMOD = 5089 - SYS_CHOWN = 5090 - SYS_FCHOWN = 5091 - SYS_LCHOWN = 5092 - SYS_UMASK = 5093 - SYS_GETTIMEOFDAY = 5094 - SYS_GETRLIMIT = 5095 - SYS_GETRUSAGE = 5096 - SYS_SYSINFO = 5097 - SYS_TIMES = 5098 - SYS_PTRACE = 5099 - SYS_GETUID = 5100 - SYS_SYSLOG = 5101 - SYS_GETGID = 5102 - SYS_SETUID = 5103 - SYS_SETGID = 5104 - SYS_GETEUID = 5105 - SYS_GETEGID = 5106 - SYS_SETPGID = 5107 - SYS_GETPPID = 5108 - SYS_GETPGRP = 5109 - SYS_SETSID = 5110 - SYS_SETREUID = 5111 - SYS_SETREGID = 5112 - SYS_GETGROUPS = 5113 - SYS_SETGROUPS = 5114 - SYS_SETRESUID = 5115 - SYS_GETRESUID = 5116 - SYS_SETRESGID = 5117 - SYS_GETRESGID = 5118 - SYS_GETPGID = 5119 - SYS_SETFSUID = 5120 - SYS_SETFSGID = 5121 - SYS_GETSID = 5122 - SYS_CAPGET = 5123 - SYS_CAPSET = 5124 - SYS_RT_SIGPENDING = 5125 - SYS_RT_SIGTIMEDWAIT = 5126 - SYS_RT_SIGQUEUEINFO = 5127 - SYS_RT_SIGSUSPEND = 5128 - SYS_SIGALTSTACK = 5129 - SYS_UTIME = 5130 - SYS_MKNOD = 5131 - SYS_PERSONALITY = 5132 - SYS_USTAT = 5133 - SYS_STATFS = 5134 - SYS_FSTATFS = 5135 - SYS_SYSFS = 5136 - SYS_GETPRIORITY = 5137 - SYS_SETPRIORITY = 5138 - SYS_SCHED_SETPARAM = 5139 - SYS_SCHED_GETPARAM = 5140 - SYS_SCHED_SETSCHEDULER = 5141 - SYS_SCHED_GETSCHEDULER = 5142 - SYS_SCHED_GET_PRIORITY_MAX = 5143 - SYS_SCHED_GET_PRIORITY_MIN = 5144 - SYS_SCHED_RR_GET_INTERVAL = 5145 - SYS_MLOCK = 5146 - SYS_MUNLOCK = 5147 - SYS_MLOCKALL = 5148 - SYS_MUNLOCKALL = 5149 - SYS_VHANGUP = 5150 - SYS_PIVOT_ROOT = 5151 - SYS__SYSCTL = 5152 - SYS_PRCTL = 5153 - SYS_ADJTIMEX = 5154 - SYS_SETRLIMIT = 5155 - SYS_CHROOT = 5156 - SYS_SYNC = 5157 - SYS_ACCT = 5158 - SYS_SETTIMEOFDAY = 5159 - SYS_MOUNT = 5160 - SYS_UMOUNT2 = 5161 - SYS_SWAPON = 5162 - SYS_SWAPOFF = 5163 - SYS_REBOOT = 5164 - SYS_SETHOSTNAME = 5165 - SYS_SETDOMAINNAME = 5166 - SYS_CREATE_MODULE = 5167 - SYS_INIT_MODULE = 5168 - SYS_DELETE_MODULE = 5169 - SYS_GET_KERNEL_SYMS = 5170 - SYS_QUERY_MODULE = 5171 - SYS_QUOTACTL = 5172 - SYS_NFSSERVCTL = 5173 - SYS_GETPMSG = 5174 - SYS_PUTPMSG = 5175 - SYS_AFS_SYSCALL = 5176 - SYS_RESERVED177 = 5177 - SYS_GETTID = 5178 - SYS_READAHEAD = 5179 - SYS_SETXATTR = 5180 - SYS_LSETXATTR = 5181 - SYS_FSETXATTR = 5182 - SYS_GETXATTR = 5183 - SYS_LGETXATTR = 5184 - SYS_FGETXATTR = 5185 - SYS_LISTXATTR = 5186 - SYS_LLISTXATTR = 5187 - SYS_FLISTXATTR = 5188 - SYS_REMOVEXATTR = 5189 - SYS_LREMOVEXATTR = 5190 - SYS_FREMOVEXATTR = 5191 - SYS_TKILL = 5192 - SYS_RESERVED193 = 5193 - SYS_FUTEX = 5194 - SYS_SCHED_SETAFFINITY = 5195 - SYS_SCHED_GETAFFINITY = 5196 - SYS_CACHEFLUSH = 5197 - SYS_CACHECTL = 5198 - SYS_SYSMIPS = 5199 - SYS_IO_SETUP = 5200 - SYS_IO_DESTROY = 5201 - SYS_IO_GETEVENTS = 5202 - SYS_IO_SUBMIT = 5203 - SYS_IO_CANCEL = 5204 - SYS_EXIT_GROUP = 5205 - SYS_LOOKUP_DCOOKIE = 5206 - SYS_EPOLL_CREATE = 5207 - SYS_EPOLL_CTL = 5208 - SYS_EPOLL_WAIT = 5209 - SYS_REMAP_FILE_PAGES = 5210 - SYS_RT_SIGRETURN = 5211 - SYS_SET_TID_ADDRESS = 5212 - SYS_RESTART_SYSCALL = 5213 - SYS_SEMTIMEDOP = 5214 - SYS_FADVISE64 = 5215 - SYS_TIMER_CREATE = 5216 - SYS_TIMER_SETTIME = 5217 - SYS_TIMER_GETTIME = 5218 - SYS_TIMER_GETOVERRUN = 5219 - SYS_TIMER_DELETE = 5220 - SYS_CLOCK_SETTIME = 5221 - SYS_CLOCK_GETTIME = 5222 - SYS_CLOCK_GETRES = 5223 - SYS_CLOCK_NANOSLEEP = 5224 - SYS_TGKILL = 5225 - SYS_UTIMES = 5226 - SYS_MBIND = 5227 - SYS_GET_MEMPOLICY = 5228 - SYS_SET_MEMPOLICY = 5229 - SYS_MQ_OPEN = 5230 - SYS_MQ_UNLINK = 5231 - SYS_MQ_TIMEDSEND = 5232 - SYS_MQ_TIMEDRECEIVE = 5233 - SYS_MQ_NOTIFY = 5234 - SYS_MQ_GETSETATTR = 5235 - SYS_VSERVER = 5236 - SYS_WAITID = 5237 - SYS_ADD_KEY = 5239 - SYS_REQUEST_KEY = 5240 - SYS_KEYCTL = 5241 - SYS_SET_THREAD_AREA = 5242 - SYS_INOTIFY_INIT = 5243 - SYS_INOTIFY_ADD_WATCH = 5244 - SYS_INOTIFY_RM_WATCH = 5245 - SYS_MIGRATE_PAGES = 5246 - SYS_OPENAT = 5247 - SYS_MKDIRAT = 5248 - SYS_MKNODAT = 5249 - SYS_FCHOWNAT = 5250 - SYS_FUTIMESAT = 5251 - SYS_NEWFSTATAT = 5252 - SYS_UNLINKAT = 5253 - SYS_RENAMEAT = 5254 - SYS_LINKAT = 5255 - SYS_SYMLINKAT = 5256 - SYS_READLINKAT = 5257 - SYS_FCHMODAT = 5258 - SYS_FACCESSAT = 5259 - SYS_PSELECT6 = 5260 - SYS_PPOLL = 5261 - SYS_UNSHARE = 5262 - SYS_SPLICE = 5263 - SYS_SYNC_FILE_RANGE = 5264 - SYS_TEE = 5265 - SYS_VMSPLICE = 5266 - SYS_MOVE_PAGES = 5267 - SYS_SET_ROBUST_LIST = 5268 - SYS_GET_ROBUST_LIST = 5269 - SYS_KEXEC_LOAD = 5270 - SYS_GETCPU = 5271 - SYS_EPOLL_PWAIT = 5272 - SYS_IOPRIO_SET = 5273 - SYS_IOPRIO_GET = 5274 - SYS_UTIMENSAT = 5275 - SYS_SIGNALFD = 5276 - SYS_TIMERFD = 5277 - SYS_EVENTFD = 5278 - SYS_FALLOCATE = 5279 - SYS_TIMERFD_CREATE = 5280 - SYS_TIMERFD_GETTIME = 5281 - SYS_TIMERFD_SETTIME = 5282 - SYS_SIGNALFD4 = 5283 - SYS_EVENTFD2 = 5284 - SYS_EPOLL_CREATE1 = 5285 - SYS_DUP3 = 5286 - SYS_PIPE2 = 5287 - SYS_INOTIFY_INIT1 = 5288 - SYS_PREADV = 5289 - SYS_PWRITEV = 5290 - SYS_RT_TGSIGQUEUEINFO = 5291 - SYS_PERF_EVENT_OPEN = 5292 - SYS_ACCEPT4 = 5293 - SYS_RECVMMSG = 5294 - SYS_FANOTIFY_INIT = 5295 - SYS_FANOTIFY_MARK = 5296 - SYS_PRLIMIT64 = 5297 - SYS_NAME_TO_HANDLE_AT = 5298 - SYS_OPEN_BY_HANDLE_AT = 5299 - SYS_CLOCK_ADJTIME = 5300 - SYS_SYNCFS = 5301 - SYS_SENDMMSG = 5302 - SYS_SETNS = 5303 - SYS_PROCESS_VM_READV = 5304 - SYS_PROCESS_VM_WRITEV = 5305 - SYS_KCMP = 5306 - SYS_FINIT_MODULE = 5307 - SYS_GETDENTS64 = 5308 - SYS_SCHED_SETATTR = 5309 - SYS_SCHED_GETATTR = 5310 - SYS_RENAMEAT2 = 5311 - SYS_SECCOMP = 5312 - SYS_GETRANDOM = 5313 - SYS_MEMFD_CREATE = 5314 - SYS_BPF = 5315 - SYS_EXECVEAT = 5316 - SYS_USERFAULTFD = 5317 - SYS_MEMBARRIER = 5318 - SYS_MLOCK2 = 5319 - SYS_COPY_FILE_RANGE = 5320 - SYS_PREADV2 = 5321 - SYS_PWRITEV2 = 5322 - SYS_PKEY_MPROTECT = 5323 - SYS_PKEY_ALLOC = 5324 - SYS_PKEY_FREE = 5325 - SYS_STATX = 5326 - SYS_RSEQ = 5327 - SYS_IO_PGETEVENTS = 5328 - SYS_PIDFD_SEND_SIGNAL = 5424 - SYS_IO_URING_SETUP = 5425 - SYS_IO_URING_ENTER = 5426 - SYS_IO_URING_REGISTER = 5427 - SYS_OPEN_TREE = 5428 - SYS_MOVE_MOUNT = 5429 - SYS_FSOPEN = 5430 - SYS_FSCONFIG = 5431 - SYS_FSMOUNT = 5432 - SYS_FSPICK = 5433 - SYS_PIDFD_OPEN = 5434 - SYS_CLONE3 = 5435 - SYS_CLOSE_RANGE = 5436 - SYS_OPENAT2 = 5437 - SYS_PIDFD_GETFD = 5438 - SYS_FACCESSAT2 = 5439 - SYS_PROCESS_MADVISE = 5440 - SYS_EPOLL_PWAIT2 = 5441 - SYS_MOUNT_SETATTR = 5442 + SYS_READ = 5000 + SYS_WRITE = 5001 + SYS_OPEN = 5002 + SYS_CLOSE = 5003 + SYS_STAT = 5004 + SYS_FSTAT = 5005 + SYS_LSTAT = 5006 + SYS_POLL = 5007 + SYS_LSEEK = 5008 + SYS_MMAP = 5009 + SYS_MPROTECT = 5010 + SYS_MUNMAP = 5011 + SYS_BRK = 5012 + SYS_RT_SIGACTION = 5013 + SYS_RT_SIGPROCMASK = 5014 + SYS_IOCTL = 5015 + SYS_PREAD64 = 5016 + SYS_PWRITE64 = 5017 + SYS_READV = 5018 + SYS_WRITEV = 5019 + SYS_ACCESS = 5020 + SYS_PIPE = 5021 + SYS__NEWSELECT = 5022 + SYS_SCHED_YIELD = 5023 + SYS_MREMAP = 5024 + SYS_MSYNC = 5025 + SYS_MINCORE = 5026 + SYS_MADVISE = 5027 + SYS_SHMGET = 5028 + SYS_SHMAT = 5029 + SYS_SHMCTL = 5030 + SYS_DUP = 5031 + SYS_DUP2 = 5032 + SYS_PAUSE = 5033 + SYS_NANOSLEEP = 5034 + SYS_GETITIMER = 5035 + SYS_SETITIMER = 5036 + SYS_ALARM = 5037 + SYS_GETPID = 5038 + SYS_SENDFILE = 5039 + SYS_SOCKET = 5040 + SYS_CONNECT = 5041 + SYS_ACCEPT = 5042 + SYS_SENDTO = 5043 + SYS_RECVFROM = 5044 + SYS_SENDMSG = 5045 + SYS_RECVMSG = 5046 + SYS_SHUTDOWN = 5047 + SYS_BIND = 5048 + SYS_LISTEN = 5049 + SYS_GETSOCKNAME = 5050 + SYS_GETPEERNAME = 5051 + SYS_SOCKETPAIR = 5052 + SYS_SETSOCKOPT = 5053 + SYS_GETSOCKOPT = 5054 + SYS_CLONE = 5055 + SYS_FORK = 5056 + SYS_EXECVE = 5057 + SYS_EXIT = 5058 + SYS_WAIT4 = 5059 + SYS_KILL = 5060 + SYS_UNAME = 5061 + SYS_SEMGET = 5062 + SYS_SEMOP = 5063 + SYS_SEMCTL = 5064 + SYS_SHMDT = 5065 + SYS_MSGGET = 5066 + SYS_MSGSND = 5067 + SYS_MSGRCV = 5068 + SYS_MSGCTL = 5069 + SYS_FCNTL = 5070 + SYS_FLOCK = 5071 + SYS_FSYNC = 5072 + SYS_FDATASYNC = 5073 + SYS_TRUNCATE = 5074 + SYS_FTRUNCATE = 5075 + SYS_GETDENTS = 5076 + SYS_GETCWD = 5077 + SYS_CHDIR = 5078 + SYS_FCHDIR = 5079 + SYS_RENAME = 5080 + SYS_MKDIR = 5081 + SYS_RMDIR = 5082 + SYS_CREAT = 5083 + SYS_LINK = 5084 + SYS_UNLINK = 5085 + SYS_SYMLINK = 5086 + SYS_READLINK = 5087 + SYS_CHMOD = 5088 + SYS_FCHMOD = 5089 + SYS_CHOWN = 5090 + SYS_FCHOWN = 5091 + SYS_LCHOWN = 5092 + SYS_UMASK = 5093 + SYS_GETTIMEOFDAY = 5094 + SYS_GETRLIMIT = 5095 + SYS_GETRUSAGE = 5096 + SYS_SYSINFO = 5097 + SYS_TIMES = 5098 + SYS_PTRACE = 5099 + SYS_GETUID = 5100 + SYS_SYSLOG = 5101 + SYS_GETGID = 5102 + SYS_SETUID = 5103 + SYS_SETGID = 5104 + SYS_GETEUID = 5105 + SYS_GETEGID = 5106 + SYS_SETPGID = 5107 + SYS_GETPPID = 5108 + SYS_GETPGRP = 5109 + SYS_SETSID = 5110 + SYS_SETREUID = 5111 + SYS_SETREGID = 5112 + SYS_GETGROUPS = 5113 + SYS_SETGROUPS = 5114 + SYS_SETRESUID = 5115 + SYS_GETRESUID = 5116 + SYS_SETRESGID = 5117 + SYS_GETRESGID = 5118 + SYS_GETPGID = 5119 + SYS_SETFSUID = 5120 + SYS_SETFSGID = 5121 + SYS_GETSID = 5122 + SYS_CAPGET = 5123 + SYS_CAPSET = 5124 + SYS_RT_SIGPENDING = 5125 + SYS_RT_SIGTIMEDWAIT = 5126 + SYS_RT_SIGQUEUEINFO = 5127 + SYS_RT_SIGSUSPEND = 5128 + SYS_SIGALTSTACK = 5129 + SYS_UTIME = 5130 + SYS_MKNOD = 5131 + SYS_PERSONALITY = 5132 + SYS_USTAT = 5133 + SYS_STATFS = 5134 + SYS_FSTATFS = 5135 + SYS_SYSFS = 5136 + SYS_GETPRIORITY = 5137 + SYS_SETPRIORITY = 5138 + SYS_SCHED_SETPARAM = 5139 + SYS_SCHED_GETPARAM = 5140 + SYS_SCHED_SETSCHEDULER = 5141 + SYS_SCHED_GETSCHEDULER = 5142 + SYS_SCHED_GET_PRIORITY_MAX = 5143 + SYS_SCHED_GET_PRIORITY_MIN = 5144 + SYS_SCHED_RR_GET_INTERVAL = 5145 + SYS_MLOCK = 5146 + SYS_MUNLOCK = 5147 + SYS_MLOCKALL = 5148 + SYS_MUNLOCKALL = 5149 + SYS_VHANGUP = 5150 + SYS_PIVOT_ROOT = 5151 + SYS__SYSCTL = 5152 + SYS_PRCTL = 5153 + SYS_ADJTIMEX = 5154 + SYS_SETRLIMIT = 5155 + SYS_CHROOT = 5156 + SYS_SYNC = 5157 + SYS_ACCT = 5158 + SYS_SETTIMEOFDAY = 5159 + SYS_MOUNT = 5160 + SYS_UMOUNT2 = 5161 + SYS_SWAPON = 5162 + SYS_SWAPOFF = 5163 + SYS_REBOOT = 5164 + SYS_SETHOSTNAME = 5165 + SYS_SETDOMAINNAME = 5166 + SYS_CREATE_MODULE = 5167 + SYS_INIT_MODULE = 5168 + SYS_DELETE_MODULE = 5169 + SYS_GET_KERNEL_SYMS = 5170 + SYS_QUERY_MODULE = 5171 + SYS_QUOTACTL = 5172 + SYS_NFSSERVCTL = 5173 + SYS_GETPMSG = 5174 + SYS_PUTPMSG = 5175 + SYS_AFS_SYSCALL = 5176 + SYS_RESERVED177 = 5177 + SYS_GETTID = 5178 + SYS_READAHEAD = 5179 + SYS_SETXATTR = 5180 + SYS_LSETXATTR = 5181 + SYS_FSETXATTR = 5182 + SYS_GETXATTR = 5183 + SYS_LGETXATTR = 5184 + SYS_FGETXATTR = 5185 + SYS_LISTXATTR = 5186 + SYS_LLISTXATTR = 5187 + SYS_FLISTXATTR = 5188 + SYS_REMOVEXATTR = 5189 + SYS_LREMOVEXATTR = 5190 + SYS_FREMOVEXATTR = 5191 + SYS_TKILL = 5192 + SYS_RESERVED193 = 5193 + SYS_FUTEX = 5194 + SYS_SCHED_SETAFFINITY = 5195 + SYS_SCHED_GETAFFINITY = 5196 + SYS_CACHEFLUSH = 5197 + SYS_CACHECTL = 5198 + SYS_SYSMIPS = 5199 + SYS_IO_SETUP = 5200 + SYS_IO_DESTROY = 5201 + SYS_IO_GETEVENTS = 5202 + SYS_IO_SUBMIT = 5203 + SYS_IO_CANCEL = 5204 + SYS_EXIT_GROUP = 5205 + SYS_LOOKUP_DCOOKIE = 5206 + SYS_EPOLL_CREATE = 5207 + SYS_EPOLL_CTL = 5208 + SYS_EPOLL_WAIT = 5209 + SYS_REMAP_FILE_PAGES = 5210 + SYS_RT_SIGRETURN = 5211 + SYS_SET_TID_ADDRESS = 5212 + SYS_RESTART_SYSCALL = 5213 + SYS_SEMTIMEDOP = 5214 + SYS_FADVISE64 = 5215 + SYS_TIMER_CREATE = 5216 + SYS_TIMER_SETTIME = 5217 + SYS_TIMER_GETTIME = 5218 + SYS_TIMER_GETOVERRUN = 5219 + SYS_TIMER_DELETE = 5220 + SYS_CLOCK_SETTIME = 5221 + SYS_CLOCK_GETTIME = 5222 + SYS_CLOCK_GETRES = 5223 + SYS_CLOCK_NANOSLEEP = 5224 + SYS_TGKILL = 5225 + SYS_UTIMES = 5226 + SYS_MBIND = 5227 + SYS_GET_MEMPOLICY = 5228 + SYS_SET_MEMPOLICY = 5229 + SYS_MQ_OPEN = 5230 + SYS_MQ_UNLINK = 5231 + SYS_MQ_TIMEDSEND = 5232 + SYS_MQ_TIMEDRECEIVE = 5233 + SYS_MQ_NOTIFY = 5234 + SYS_MQ_GETSETATTR = 5235 + SYS_VSERVER = 5236 + SYS_WAITID = 5237 + SYS_ADD_KEY = 5239 + SYS_REQUEST_KEY = 5240 + SYS_KEYCTL = 5241 + SYS_SET_THREAD_AREA = 5242 + SYS_INOTIFY_INIT = 5243 + SYS_INOTIFY_ADD_WATCH = 5244 + SYS_INOTIFY_RM_WATCH = 5245 + SYS_MIGRATE_PAGES = 5246 + SYS_OPENAT = 5247 + SYS_MKDIRAT = 5248 + SYS_MKNODAT = 5249 + SYS_FCHOWNAT = 5250 + SYS_FUTIMESAT = 5251 + SYS_NEWFSTATAT = 5252 + SYS_UNLINKAT = 5253 + SYS_RENAMEAT = 5254 + SYS_LINKAT = 5255 + SYS_SYMLINKAT = 5256 + SYS_READLINKAT = 5257 + SYS_FCHMODAT = 5258 + SYS_FACCESSAT = 5259 + SYS_PSELECT6 = 5260 + SYS_PPOLL = 5261 + SYS_UNSHARE = 5262 + SYS_SPLICE = 5263 + SYS_SYNC_FILE_RANGE = 5264 + SYS_TEE = 5265 + SYS_VMSPLICE = 5266 + SYS_MOVE_PAGES = 5267 + SYS_SET_ROBUST_LIST = 5268 + SYS_GET_ROBUST_LIST = 5269 + SYS_KEXEC_LOAD = 5270 + SYS_GETCPU = 5271 + SYS_EPOLL_PWAIT = 5272 + SYS_IOPRIO_SET = 5273 + SYS_IOPRIO_GET = 5274 + SYS_UTIMENSAT = 5275 + SYS_SIGNALFD = 5276 + SYS_TIMERFD = 5277 + SYS_EVENTFD = 5278 + SYS_FALLOCATE = 5279 + SYS_TIMERFD_CREATE = 5280 + SYS_TIMERFD_GETTIME = 5281 + SYS_TIMERFD_SETTIME = 5282 + SYS_SIGNALFD4 = 5283 + SYS_EVENTFD2 = 5284 + SYS_EPOLL_CREATE1 = 5285 + SYS_DUP3 = 5286 + SYS_PIPE2 = 5287 + SYS_INOTIFY_INIT1 = 5288 + SYS_PREADV = 5289 + SYS_PWRITEV = 5290 + SYS_RT_TGSIGQUEUEINFO = 5291 + SYS_PERF_EVENT_OPEN = 5292 + SYS_ACCEPT4 = 5293 + SYS_RECVMMSG = 5294 + SYS_FANOTIFY_INIT = 5295 + SYS_FANOTIFY_MARK = 5296 + SYS_PRLIMIT64 = 5297 + SYS_NAME_TO_HANDLE_AT = 5298 + SYS_OPEN_BY_HANDLE_AT = 5299 + SYS_CLOCK_ADJTIME = 5300 + SYS_SYNCFS = 5301 + SYS_SENDMMSG = 5302 + SYS_SETNS = 5303 + SYS_PROCESS_VM_READV = 5304 + SYS_PROCESS_VM_WRITEV = 5305 + SYS_KCMP = 5306 + SYS_FINIT_MODULE = 5307 + SYS_GETDENTS64 = 5308 + SYS_SCHED_SETATTR = 5309 + SYS_SCHED_GETATTR = 5310 + SYS_RENAMEAT2 = 5311 + SYS_SECCOMP = 5312 + SYS_GETRANDOM = 5313 + SYS_MEMFD_CREATE = 5314 + SYS_BPF = 5315 + SYS_EXECVEAT = 5316 + SYS_USERFAULTFD = 5317 + SYS_MEMBARRIER = 5318 + SYS_MLOCK2 = 5319 + SYS_COPY_FILE_RANGE = 5320 + SYS_PREADV2 = 5321 + SYS_PWRITEV2 = 5322 + SYS_PKEY_MPROTECT = 5323 + SYS_PKEY_ALLOC = 5324 + SYS_PKEY_FREE = 5325 + SYS_STATX = 5326 + SYS_RSEQ = 5327 + SYS_IO_PGETEVENTS = 5328 + SYS_PIDFD_SEND_SIGNAL = 5424 + SYS_IO_URING_SETUP = 5425 + SYS_IO_URING_ENTER = 5426 + SYS_IO_URING_REGISTER = 5427 + SYS_OPEN_TREE = 5428 + SYS_MOVE_MOUNT = 5429 + SYS_FSOPEN = 5430 + SYS_FSCONFIG = 5431 + SYS_FSMOUNT = 5432 + SYS_FSPICK = 5433 + SYS_PIDFD_OPEN = 5434 + SYS_CLONE3 = 5435 + SYS_CLOSE_RANGE = 5436 + SYS_OPENAT2 = 5437 + SYS_PIDFD_GETFD = 5438 + SYS_FACCESSAT2 = 5439 + SYS_PROCESS_MADVISE = 5440 + SYS_EPOLL_PWAIT2 = 5441 + SYS_MOUNT_SETATTR = 5442 + SYS_LANDLOCK_CREATE_RULESET = 5444 + SYS_LANDLOCK_ADD_RULE = 5445 + SYS_LANDLOCK_RESTRICT_SELF = 5446 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go index 80e6696..dcd9265 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go @@ -424,4 +424,7 @@ const ( SYS_PROCESS_MADVISE = 4440 SYS_EPOLL_PWAIT2 = 4441 SYS_MOUNT_SETATTR = 4442 + SYS_LANDLOCK_CREATE_RULESET = 4444 + SYS_LANDLOCK_ADD_RULE = 4445 + SYS_LANDLOCK_RESTRICT_SELF = 4446 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc.go index b9d697f..d5ee2c9 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc.go @@ -431,4 +431,7 @@ const ( SYS_PROCESS_MADVISE = 440 SYS_EPOLL_PWAIT2 = 441 SYS_MOUNT_SETATTR = 442 + SYS_LANDLOCK_CREATE_RULESET = 444 + SYS_LANDLOCK_ADD_RULE = 445 + SYS_LANDLOCK_RESTRICT_SELF = 446 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go index 08edc54..fec3220 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go @@ -7,400 +7,403 @@ package unix const ( - SYS_RESTART_SYSCALL = 0 - SYS_EXIT = 1 - SYS_FORK = 2 - SYS_READ = 3 - SYS_WRITE = 4 - SYS_OPEN = 5 - SYS_CLOSE = 6 - SYS_WAITPID = 7 - SYS_CREAT = 8 - SYS_LINK = 9 - SYS_UNLINK = 10 - SYS_EXECVE = 11 - SYS_CHDIR = 12 - SYS_TIME = 13 - SYS_MKNOD = 14 - SYS_CHMOD = 15 - SYS_LCHOWN = 16 - SYS_BREAK = 17 - SYS_OLDSTAT = 18 - SYS_LSEEK = 19 - SYS_GETPID = 20 - SYS_MOUNT = 21 - SYS_UMOUNT = 22 - SYS_SETUID = 23 - SYS_GETUID = 24 - SYS_STIME = 25 - SYS_PTRACE = 26 - SYS_ALARM = 27 - SYS_OLDFSTAT = 28 - SYS_PAUSE = 29 - SYS_UTIME = 30 - SYS_STTY = 31 - SYS_GTTY = 32 - SYS_ACCESS = 33 - SYS_NICE = 34 - SYS_FTIME = 35 - SYS_SYNC = 36 - SYS_KILL = 37 - SYS_RENAME = 38 - SYS_MKDIR = 39 - SYS_RMDIR = 40 - SYS_DUP = 41 - SYS_PIPE = 42 - SYS_TIMES = 43 - SYS_PROF = 44 - SYS_BRK = 45 - SYS_SETGID = 46 - SYS_GETGID = 47 - SYS_SIGNAL = 48 - SYS_GETEUID = 49 - SYS_GETEGID = 50 - SYS_ACCT = 51 - SYS_UMOUNT2 = 52 - SYS_LOCK = 53 - SYS_IOCTL = 54 - SYS_FCNTL = 55 - SYS_MPX = 56 - SYS_SETPGID = 57 - SYS_ULIMIT = 58 - SYS_OLDOLDUNAME = 59 - SYS_UMASK = 60 - SYS_CHROOT = 61 - SYS_USTAT = 62 - SYS_DUP2 = 63 - SYS_GETPPID = 64 - SYS_GETPGRP = 65 - SYS_SETSID = 66 - SYS_SIGACTION = 67 - SYS_SGETMASK = 68 - SYS_SSETMASK = 69 - SYS_SETREUID = 70 - SYS_SETREGID = 71 - SYS_SIGSUSPEND = 72 - SYS_SIGPENDING = 73 - SYS_SETHOSTNAME = 74 - SYS_SETRLIMIT = 75 - SYS_GETRLIMIT = 76 - SYS_GETRUSAGE = 77 - SYS_GETTIMEOFDAY = 78 - SYS_SETTIMEOFDAY = 79 - SYS_GETGROUPS = 80 - SYS_SETGROUPS = 81 - SYS_SELECT = 82 - SYS_SYMLINK = 83 - SYS_OLDLSTAT = 84 - SYS_READLINK = 85 - SYS_USELIB = 86 - SYS_SWAPON = 87 - SYS_REBOOT = 88 - SYS_READDIR = 89 - SYS_MMAP = 90 - SYS_MUNMAP = 91 - SYS_TRUNCATE = 92 - SYS_FTRUNCATE = 93 - SYS_FCHMOD = 94 - SYS_FCHOWN = 95 - SYS_GETPRIORITY = 96 - SYS_SETPRIORITY = 97 - SYS_PROFIL = 98 - SYS_STATFS = 99 - SYS_FSTATFS = 100 - SYS_IOPERM = 101 - SYS_SOCKETCALL = 102 - SYS_SYSLOG = 103 - SYS_SETITIMER = 104 - SYS_GETITIMER = 105 - SYS_STAT = 106 - SYS_LSTAT = 107 - SYS_FSTAT = 108 - SYS_OLDUNAME = 109 - SYS_IOPL = 110 - SYS_VHANGUP = 111 - SYS_IDLE = 112 - SYS_VM86 = 113 - SYS_WAIT4 = 114 - SYS_SWAPOFF = 115 - SYS_SYSINFO = 116 - SYS_IPC = 117 - SYS_FSYNC = 118 - SYS_SIGRETURN = 119 - SYS_CLONE = 120 - SYS_SETDOMAINNAME = 121 - SYS_UNAME = 122 - SYS_MODIFY_LDT = 123 - SYS_ADJTIMEX = 124 - SYS_MPROTECT = 125 - SYS_SIGPROCMASK = 126 - SYS_CREATE_MODULE = 127 - SYS_INIT_MODULE = 128 - SYS_DELETE_MODULE = 129 - SYS_GET_KERNEL_SYMS = 130 - SYS_QUOTACTL = 131 - SYS_GETPGID = 132 - SYS_FCHDIR = 133 - SYS_BDFLUSH = 134 - SYS_SYSFS = 135 - SYS_PERSONALITY = 136 - SYS_AFS_SYSCALL = 137 - SYS_SETFSUID = 138 - SYS_SETFSGID = 139 - SYS__LLSEEK = 140 - SYS_GETDENTS = 141 - SYS__NEWSELECT = 142 - SYS_FLOCK = 143 - SYS_MSYNC = 144 - SYS_READV = 145 - SYS_WRITEV = 146 - SYS_GETSID = 147 - SYS_FDATASYNC = 148 - SYS__SYSCTL = 149 - SYS_MLOCK = 150 - SYS_MUNLOCK = 151 - SYS_MLOCKALL = 152 - SYS_MUNLOCKALL = 153 - SYS_SCHED_SETPARAM = 154 - SYS_SCHED_GETPARAM = 155 - SYS_SCHED_SETSCHEDULER = 156 - SYS_SCHED_GETSCHEDULER = 157 - SYS_SCHED_YIELD = 158 - SYS_SCHED_GET_PRIORITY_MAX = 159 - SYS_SCHED_GET_PRIORITY_MIN = 160 - SYS_SCHED_RR_GET_INTERVAL = 161 - SYS_NANOSLEEP = 162 - SYS_MREMAP = 163 - SYS_SETRESUID = 164 - SYS_GETRESUID = 165 - SYS_QUERY_MODULE = 166 - SYS_POLL = 167 - SYS_NFSSERVCTL = 168 - SYS_SETRESGID = 169 - SYS_GETRESGID = 170 - SYS_PRCTL = 171 - SYS_RT_SIGRETURN = 172 - SYS_RT_SIGACTION = 173 - SYS_RT_SIGPROCMASK = 174 - SYS_RT_SIGPENDING = 175 - SYS_RT_SIGTIMEDWAIT = 176 - SYS_RT_SIGQUEUEINFO = 177 - SYS_RT_SIGSUSPEND = 178 - SYS_PREAD64 = 179 - SYS_PWRITE64 = 180 - SYS_CHOWN = 181 - SYS_GETCWD = 182 - SYS_CAPGET = 183 - SYS_CAPSET = 184 - SYS_SIGALTSTACK = 185 - SYS_SENDFILE = 186 - SYS_GETPMSG = 187 - SYS_PUTPMSG = 188 - SYS_VFORK = 189 - SYS_UGETRLIMIT = 190 - SYS_READAHEAD = 191 - SYS_PCICONFIG_READ = 198 - SYS_PCICONFIG_WRITE = 199 - SYS_PCICONFIG_IOBASE = 200 - SYS_MULTIPLEXER = 201 - SYS_GETDENTS64 = 202 - SYS_PIVOT_ROOT = 203 - SYS_MADVISE = 205 - SYS_MINCORE = 206 - SYS_GETTID = 207 - SYS_TKILL = 208 - SYS_SETXATTR = 209 - SYS_LSETXATTR = 210 - SYS_FSETXATTR = 211 - SYS_GETXATTR = 212 - SYS_LGETXATTR = 213 - SYS_FGETXATTR = 214 - SYS_LISTXATTR = 215 - SYS_LLISTXATTR = 216 - SYS_FLISTXATTR = 217 - SYS_REMOVEXATTR = 218 - SYS_LREMOVEXATTR = 219 - SYS_FREMOVEXATTR = 220 - SYS_FUTEX = 221 - SYS_SCHED_SETAFFINITY = 222 - SYS_SCHED_GETAFFINITY = 223 - SYS_TUXCALL = 225 - SYS_IO_SETUP = 227 - SYS_IO_DESTROY = 228 - SYS_IO_GETEVENTS = 229 - SYS_IO_SUBMIT = 230 - SYS_IO_CANCEL = 231 - SYS_SET_TID_ADDRESS = 232 - SYS_FADVISE64 = 233 - SYS_EXIT_GROUP = 234 - SYS_LOOKUP_DCOOKIE = 235 - SYS_EPOLL_CREATE = 236 - SYS_EPOLL_CTL = 237 - SYS_EPOLL_WAIT = 238 - SYS_REMAP_FILE_PAGES = 239 - SYS_TIMER_CREATE = 240 - SYS_TIMER_SETTIME = 241 - SYS_TIMER_GETTIME = 242 - SYS_TIMER_GETOVERRUN = 243 - SYS_TIMER_DELETE = 244 - SYS_CLOCK_SETTIME = 245 - SYS_CLOCK_GETTIME = 246 - SYS_CLOCK_GETRES = 247 - SYS_CLOCK_NANOSLEEP = 248 - SYS_SWAPCONTEXT = 249 - SYS_TGKILL = 250 - SYS_UTIMES = 251 - SYS_STATFS64 = 252 - SYS_FSTATFS64 = 253 - SYS_RTAS = 255 - SYS_SYS_DEBUG_SETCONTEXT = 256 - SYS_MIGRATE_PAGES = 258 - SYS_MBIND = 259 - SYS_GET_MEMPOLICY = 260 - SYS_SET_MEMPOLICY = 261 - SYS_MQ_OPEN = 262 - SYS_MQ_UNLINK = 263 - SYS_MQ_TIMEDSEND = 264 - SYS_MQ_TIMEDRECEIVE = 265 - SYS_MQ_NOTIFY = 266 - SYS_MQ_GETSETATTR = 267 - SYS_KEXEC_LOAD = 268 - SYS_ADD_KEY = 269 - SYS_REQUEST_KEY = 270 - SYS_KEYCTL = 271 - SYS_WAITID = 272 - SYS_IOPRIO_SET = 273 - SYS_IOPRIO_GET = 274 - SYS_INOTIFY_INIT = 275 - SYS_INOTIFY_ADD_WATCH = 276 - SYS_INOTIFY_RM_WATCH = 277 - SYS_SPU_RUN = 278 - SYS_SPU_CREATE = 279 - SYS_PSELECT6 = 280 - SYS_PPOLL = 281 - SYS_UNSHARE = 282 - SYS_SPLICE = 283 - SYS_TEE = 284 - SYS_VMSPLICE = 285 - SYS_OPENAT = 286 - SYS_MKDIRAT = 287 - SYS_MKNODAT = 288 - SYS_FCHOWNAT = 289 - SYS_FUTIMESAT = 290 - SYS_NEWFSTATAT = 291 - SYS_UNLINKAT = 292 - SYS_RENAMEAT = 293 - SYS_LINKAT = 294 - SYS_SYMLINKAT = 295 - SYS_READLINKAT = 296 - SYS_FCHMODAT = 297 - SYS_FACCESSAT = 298 - SYS_GET_ROBUST_LIST = 299 - SYS_SET_ROBUST_LIST = 300 - SYS_MOVE_PAGES = 301 - SYS_GETCPU = 302 - SYS_EPOLL_PWAIT = 303 - SYS_UTIMENSAT = 304 - SYS_SIGNALFD = 305 - SYS_TIMERFD_CREATE = 306 - SYS_EVENTFD = 307 - SYS_SYNC_FILE_RANGE2 = 308 - SYS_FALLOCATE = 309 - SYS_SUBPAGE_PROT = 310 - SYS_TIMERFD_SETTIME = 311 - SYS_TIMERFD_GETTIME = 312 - SYS_SIGNALFD4 = 313 - SYS_EVENTFD2 = 314 - SYS_EPOLL_CREATE1 = 315 - SYS_DUP3 = 316 - SYS_PIPE2 = 317 - SYS_INOTIFY_INIT1 = 318 - SYS_PERF_EVENT_OPEN = 319 - SYS_PREADV = 320 - SYS_PWRITEV = 321 - SYS_RT_TGSIGQUEUEINFO = 322 - SYS_FANOTIFY_INIT = 323 - SYS_FANOTIFY_MARK = 324 - SYS_PRLIMIT64 = 325 - SYS_SOCKET = 326 - SYS_BIND = 327 - SYS_CONNECT = 328 - SYS_LISTEN = 329 - SYS_ACCEPT = 330 - SYS_GETSOCKNAME = 331 - SYS_GETPEERNAME = 332 - SYS_SOCKETPAIR = 333 - SYS_SEND = 334 - SYS_SENDTO = 335 - SYS_RECV = 336 - SYS_RECVFROM = 337 - SYS_SHUTDOWN = 338 - SYS_SETSOCKOPT = 339 - SYS_GETSOCKOPT = 340 - SYS_SENDMSG = 341 - SYS_RECVMSG = 342 - SYS_RECVMMSG = 343 - SYS_ACCEPT4 = 344 - SYS_NAME_TO_HANDLE_AT = 345 - SYS_OPEN_BY_HANDLE_AT = 346 - SYS_CLOCK_ADJTIME = 347 - SYS_SYNCFS = 348 - SYS_SENDMMSG = 349 - SYS_SETNS = 350 - SYS_PROCESS_VM_READV = 351 - SYS_PROCESS_VM_WRITEV = 352 - SYS_FINIT_MODULE = 353 - SYS_KCMP = 354 - SYS_SCHED_SETATTR = 355 - SYS_SCHED_GETATTR = 356 - SYS_RENAMEAT2 = 357 - SYS_SECCOMP = 358 - SYS_GETRANDOM = 359 - SYS_MEMFD_CREATE = 360 - SYS_BPF = 361 - SYS_EXECVEAT = 362 - SYS_SWITCH_ENDIAN = 363 - SYS_USERFAULTFD = 364 - SYS_MEMBARRIER = 365 - SYS_MLOCK2 = 378 - SYS_COPY_FILE_RANGE = 379 - SYS_PREADV2 = 380 - SYS_PWRITEV2 = 381 - SYS_KEXEC_FILE_LOAD = 382 - SYS_STATX = 383 - SYS_PKEY_ALLOC = 384 - SYS_PKEY_FREE = 385 - SYS_PKEY_MPROTECT = 386 - SYS_RSEQ = 387 - SYS_IO_PGETEVENTS = 388 - SYS_SEMTIMEDOP = 392 - SYS_SEMGET = 393 - SYS_SEMCTL = 394 - SYS_SHMGET = 395 - SYS_SHMCTL = 396 - SYS_SHMAT = 397 - SYS_SHMDT = 398 - SYS_MSGGET = 399 - SYS_MSGSND = 400 - SYS_MSGRCV = 401 - SYS_MSGCTL = 402 - SYS_PIDFD_SEND_SIGNAL = 424 - SYS_IO_URING_SETUP = 425 - SYS_IO_URING_ENTER = 426 - SYS_IO_URING_REGISTER = 427 - SYS_OPEN_TREE = 428 - SYS_MOVE_MOUNT = 429 - SYS_FSOPEN = 430 - SYS_FSCONFIG = 431 - SYS_FSMOUNT = 432 - SYS_FSPICK = 433 - SYS_PIDFD_OPEN = 434 - SYS_CLONE3 = 435 - SYS_CLOSE_RANGE = 436 - SYS_OPENAT2 = 437 - SYS_PIDFD_GETFD = 438 - SYS_FACCESSAT2 = 439 - SYS_PROCESS_MADVISE = 440 - SYS_EPOLL_PWAIT2 = 441 - SYS_MOUNT_SETATTR = 442 + SYS_RESTART_SYSCALL = 0 + SYS_EXIT = 1 + SYS_FORK = 2 + SYS_READ = 3 + SYS_WRITE = 4 + SYS_OPEN = 5 + SYS_CLOSE = 6 + SYS_WAITPID = 7 + SYS_CREAT = 8 + SYS_LINK = 9 + SYS_UNLINK = 10 + SYS_EXECVE = 11 + SYS_CHDIR = 12 + SYS_TIME = 13 + SYS_MKNOD = 14 + SYS_CHMOD = 15 + SYS_LCHOWN = 16 + SYS_BREAK = 17 + SYS_OLDSTAT = 18 + SYS_LSEEK = 19 + SYS_GETPID = 20 + SYS_MOUNT = 21 + SYS_UMOUNT = 22 + SYS_SETUID = 23 + SYS_GETUID = 24 + SYS_STIME = 25 + SYS_PTRACE = 26 + SYS_ALARM = 27 + SYS_OLDFSTAT = 28 + SYS_PAUSE = 29 + SYS_UTIME = 30 + SYS_STTY = 31 + SYS_GTTY = 32 + SYS_ACCESS = 33 + SYS_NICE = 34 + SYS_FTIME = 35 + SYS_SYNC = 36 + SYS_KILL = 37 + SYS_RENAME = 38 + SYS_MKDIR = 39 + SYS_RMDIR = 40 + SYS_DUP = 41 + SYS_PIPE = 42 + SYS_TIMES = 43 + SYS_PROF = 44 + SYS_BRK = 45 + SYS_SETGID = 46 + SYS_GETGID = 47 + SYS_SIGNAL = 48 + SYS_GETEUID = 49 + SYS_GETEGID = 50 + SYS_ACCT = 51 + SYS_UMOUNT2 = 52 + SYS_LOCK = 53 + SYS_IOCTL = 54 + SYS_FCNTL = 55 + SYS_MPX = 56 + SYS_SETPGID = 57 + SYS_ULIMIT = 58 + SYS_OLDOLDUNAME = 59 + SYS_UMASK = 60 + SYS_CHROOT = 61 + SYS_USTAT = 62 + SYS_DUP2 = 63 + SYS_GETPPID = 64 + SYS_GETPGRP = 65 + SYS_SETSID = 66 + SYS_SIGACTION = 67 + SYS_SGETMASK = 68 + SYS_SSETMASK = 69 + SYS_SETREUID = 70 + SYS_SETREGID = 71 + SYS_SIGSUSPEND = 72 + SYS_SIGPENDING = 73 + SYS_SETHOSTNAME = 74 + SYS_SETRLIMIT = 75 + SYS_GETRLIMIT = 76 + SYS_GETRUSAGE = 77 + SYS_GETTIMEOFDAY = 78 + SYS_SETTIMEOFDAY = 79 + SYS_GETGROUPS = 80 + SYS_SETGROUPS = 81 + SYS_SELECT = 82 + SYS_SYMLINK = 83 + SYS_OLDLSTAT = 84 + SYS_READLINK = 85 + SYS_USELIB = 86 + SYS_SWAPON = 87 + SYS_REBOOT = 88 + SYS_READDIR = 89 + SYS_MMAP = 90 + SYS_MUNMAP = 91 + SYS_TRUNCATE = 92 + SYS_FTRUNCATE = 93 + SYS_FCHMOD = 94 + SYS_FCHOWN = 95 + SYS_GETPRIORITY = 96 + SYS_SETPRIORITY = 97 + SYS_PROFIL = 98 + SYS_STATFS = 99 + SYS_FSTATFS = 100 + SYS_IOPERM = 101 + SYS_SOCKETCALL = 102 + SYS_SYSLOG = 103 + SYS_SETITIMER = 104 + SYS_GETITIMER = 105 + SYS_STAT = 106 + SYS_LSTAT = 107 + SYS_FSTAT = 108 + SYS_OLDUNAME = 109 + SYS_IOPL = 110 + SYS_VHANGUP = 111 + SYS_IDLE = 112 + SYS_VM86 = 113 + SYS_WAIT4 = 114 + SYS_SWAPOFF = 115 + SYS_SYSINFO = 116 + SYS_IPC = 117 + SYS_FSYNC = 118 + SYS_SIGRETURN = 119 + SYS_CLONE = 120 + SYS_SETDOMAINNAME = 121 + SYS_UNAME = 122 + SYS_MODIFY_LDT = 123 + SYS_ADJTIMEX = 124 + SYS_MPROTECT = 125 + SYS_SIGPROCMASK = 126 + SYS_CREATE_MODULE = 127 + SYS_INIT_MODULE = 128 + SYS_DELETE_MODULE = 129 + SYS_GET_KERNEL_SYMS = 130 + SYS_QUOTACTL = 131 + SYS_GETPGID = 132 + SYS_FCHDIR = 133 + SYS_BDFLUSH = 134 + SYS_SYSFS = 135 + SYS_PERSONALITY = 136 + SYS_AFS_SYSCALL = 137 + SYS_SETFSUID = 138 + SYS_SETFSGID = 139 + SYS__LLSEEK = 140 + SYS_GETDENTS = 141 + SYS__NEWSELECT = 142 + SYS_FLOCK = 143 + SYS_MSYNC = 144 + SYS_READV = 145 + SYS_WRITEV = 146 + SYS_GETSID = 147 + SYS_FDATASYNC = 148 + SYS__SYSCTL = 149 + SYS_MLOCK = 150 + SYS_MUNLOCK = 151 + SYS_MLOCKALL = 152 + SYS_MUNLOCKALL = 153 + SYS_SCHED_SETPARAM = 154 + SYS_SCHED_GETPARAM = 155 + SYS_SCHED_SETSCHEDULER = 156 + SYS_SCHED_GETSCHEDULER = 157 + SYS_SCHED_YIELD = 158 + SYS_SCHED_GET_PRIORITY_MAX = 159 + SYS_SCHED_GET_PRIORITY_MIN = 160 + SYS_SCHED_RR_GET_INTERVAL = 161 + SYS_NANOSLEEP = 162 + SYS_MREMAP = 163 + SYS_SETRESUID = 164 + SYS_GETRESUID = 165 + SYS_QUERY_MODULE = 166 + SYS_POLL = 167 + SYS_NFSSERVCTL = 168 + SYS_SETRESGID = 169 + SYS_GETRESGID = 170 + SYS_PRCTL = 171 + SYS_RT_SIGRETURN = 172 + SYS_RT_SIGACTION = 173 + SYS_RT_SIGPROCMASK = 174 + SYS_RT_SIGPENDING = 175 + SYS_RT_SIGTIMEDWAIT = 176 + SYS_RT_SIGQUEUEINFO = 177 + SYS_RT_SIGSUSPEND = 178 + SYS_PREAD64 = 179 + SYS_PWRITE64 = 180 + SYS_CHOWN = 181 + SYS_GETCWD = 182 + SYS_CAPGET = 183 + SYS_CAPSET = 184 + SYS_SIGALTSTACK = 185 + SYS_SENDFILE = 186 + SYS_GETPMSG = 187 + SYS_PUTPMSG = 188 + SYS_VFORK = 189 + SYS_UGETRLIMIT = 190 + SYS_READAHEAD = 191 + SYS_PCICONFIG_READ = 198 + SYS_PCICONFIG_WRITE = 199 + SYS_PCICONFIG_IOBASE = 200 + SYS_MULTIPLEXER = 201 + SYS_GETDENTS64 = 202 + SYS_PIVOT_ROOT = 203 + SYS_MADVISE = 205 + SYS_MINCORE = 206 + SYS_GETTID = 207 + SYS_TKILL = 208 + SYS_SETXATTR = 209 + SYS_LSETXATTR = 210 + SYS_FSETXATTR = 211 + SYS_GETXATTR = 212 + SYS_LGETXATTR = 213 + SYS_FGETXATTR = 214 + SYS_LISTXATTR = 215 + SYS_LLISTXATTR = 216 + SYS_FLISTXATTR = 217 + SYS_REMOVEXATTR = 218 + SYS_LREMOVEXATTR = 219 + SYS_FREMOVEXATTR = 220 + SYS_FUTEX = 221 + SYS_SCHED_SETAFFINITY = 222 + SYS_SCHED_GETAFFINITY = 223 + SYS_TUXCALL = 225 + SYS_IO_SETUP = 227 + SYS_IO_DESTROY = 228 + SYS_IO_GETEVENTS = 229 + SYS_IO_SUBMIT = 230 + SYS_IO_CANCEL = 231 + SYS_SET_TID_ADDRESS = 232 + SYS_FADVISE64 = 233 + SYS_EXIT_GROUP = 234 + SYS_LOOKUP_DCOOKIE = 235 + SYS_EPOLL_CREATE = 236 + SYS_EPOLL_CTL = 237 + SYS_EPOLL_WAIT = 238 + SYS_REMAP_FILE_PAGES = 239 + SYS_TIMER_CREATE = 240 + SYS_TIMER_SETTIME = 241 + SYS_TIMER_GETTIME = 242 + SYS_TIMER_GETOVERRUN = 243 + SYS_TIMER_DELETE = 244 + SYS_CLOCK_SETTIME = 245 + SYS_CLOCK_GETTIME = 246 + SYS_CLOCK_GETRES = 247 + SYS_CLOCK_NANOSLEEP = 248 + SYS_SWAPCONTEXT = 249 + SYS_TGKILL = 250 + SYS_UTIMES = 251 + SYS_STATFS64 = 252 + SYS_FSTATFS64 = 253 + SYS_RTAS = 255 + SYS_SYS_DEBUG_SETCONTEXT = 256 + SYS_MIGRATE_PAGES = 258 + SYS_MBIND = 259 + SYS_GET_MEMPOLICY = 260 + SYS_SET_MEMPOLICY = 261 + SYS_MQ_OPEN = 262 + SYS_MQ_UNLINK = 263 + SYS_MQ_TIMEDSEND = 264 + SYS_MQ_TIMEDRECEIVE = 265 + SYS_MQ_NOTIFY = 266 + SYS_MQ_GETSETATTR = 267 + SYS_KEXEC_LOAD = 268 + SYS_ADD_KEY = 269 + SYS_REQUEST_KEY = 270 + SYS_KEYCTL = 271 + SYS_WAITID = 272 + SYS_IOPRIO_SET = 273 + SYS_IOPRIO_GET = 274 + SYS_INOTIFY_INIT = 275 + SYS_INOTIFY_ADD_WATCH = 276 + SYS_INOTIFY_RM_WATCH = 277 + SYS_SPU_RUN = 278 + SYS_SPU_CREATE = 279 + SYS_PSELECT6 = 280 + SYS_PPOLL = 281 + SYS_UNSHARE = 282 + SYS_SPLICE = 283 + SYS_TEE = 284 + SYS_VMSPLICE = 285 + SYS_OPENAT = 286 + SYS_MKDIRAT = 287 + SYS_MKNODAT = 288 + SYS_FCHOWNAT = 289 + SYS_FUTIMESAT = 290 + SYS_NEWFSTATAT = 291 + SYS_UNLINKAT = 292 + SYS_RENAMEAT = 293 + SYS_LINKAT = 294 + SYS_SYMLINKAT = 295 + SYS_READLINKAT = 296 + SYS_FCHMODAT = 297 + SYS_FACCESSAT = 298 + SYS_GET_ROBUST_LIST = 299 + SYS_SET_ROBUST_LIST = 300 + SYS_MOVE_PAGES = 301 + SYS_GETCPU = 302 + SYS_EPOLL_PWAIT = 303 + SYS_UTIMENSAT = 304 + SYS_SIGNALFD = 305 + SYS_TIMERFD_CREATE = 306 + SYS_EVENTFD = 307 + SYS_SYNC_FILE_RANGE2 = 308 + SYS_FALLOCATE = 309 + SYS_SUBPAGE_PROT = 310 + SYS_TIMERFD_SETTIME = 311 + SYS_TIMERFD_GETTIME = 312 + SYS_SIGNALFD4 = 313 + SYS_EVENTFD2 = 314 + SYS_EPOLL_CREATE1 = 315 + SYS_DUP3 = 316 + SYS_PIPE2 = 317 + SYS_INOTIFY_INIT1 = 318 + SYS_PERF_EVENT_OPEN = 319 + SYS_PREADV = 320 + SYS_PWRITEV = 321 + SYS_RT_TGSIGQUEUEINFO = 322 + SYS_FANOTIFY_INIT = 323 + SYS_FANOTIFY_MARK = 324 + SYS_PRLIMIT64 = 325 + SYS_SOCKET = 326 + SYS_BIND = 327 + SYS_CONNECT = 328 + SYS_LISTEN = 329 + SYS_ACCEPT = 330 + SYS_GETSOCKNAME = 331 + SYS_GETPEERNAME = 332 + SYS_SOCKETPAIR = 333 + SYS_SEND = 334 + SYS_SENDTO = 335 + SYS_RECV = 336 + SYS_RECVFROM = 337 + SYS_SHUTDOWN = 338 + SYS_SETSOCKOPT = 339 + SYS_GETSOCKOPT = 340 + SYS_SENDMSG = 341 + SYS_RECVMSG = 342 + SYS_RECVMMSG = 343 + SYS_ACCEPT4 = 344 + SYS_NAME_TO_HANDLE_AT = 345 + SYS_OPEN_BY_HANDLE_AT = 346 + SYS_CLOCK_ADJTIME = 347 + SYS_SYNCFS = 348 + SYS_SENDMMSG = 349 + SYS_SETNS = 350 + SYS_PROCESS_VM_READV = 351 + SYS_PROCESS_VM_WRITEV = 352 + SYS_FINIT_MODULE = 353 + SYS_KCMP = 354 + SYS_SCHED_SETATTR = 355 + SYS_SCHED_GETATTR = 356 + SYS_RENAMEAT2 = 357 + SYS_SECCOMP = 358 + SYS_GETRANDOM = 359 + SYS_MEMFD_CREATE = 360 + SYS_BPF = 361 + SYS_EXECVEAT = 362 + SYS_SWITCH_ENDIAN = 363 + SYS_USERFAULTFD = 364 + SYS_MEMBARRIER = 365 + SYS_MLOCK2 = 378 + SYS_COPY_FILE_RANGE = 379 + SYS_PREADV2 = 380 + SYS_PWRITEV2 = 381 + SYS_KEXEC_FILE_LOAD = 382 + SYS_STATX = 383 + SYS_PKEY_ALLOC = 384 + SYS_PKEY_FREE = 385 + SYS_PKEY_MPROTECT = 386 + SYS_RSEQ = 387 + SYS_IO_PGETEVENTS = 388 + SYS_SEMTIMEDOP = 392 + SYS_SEMGET = 393 + SYS_SEMCTL = 394 + SYS_SHMGET = 395 + SYS_SHMCTL = 396 + SYS_SHMAT = 397 + SYS_SHMDT = 398 + SYS_MSGGET = 399 + SYS_MSGSND = 400 + SYS_MSGRCV = 401 + SYS_MSGCTL = 402 + SYS_PIDFD_SEND_SIGNAL = 424 + SYS_IO_URING_SETUP = 425 + SYS_IO_URING_ENTER = 426 + SYS_IO_URING_REGISTER = 427 + SYS_OPEN_TREE = 428 + SYS_MOVE_MOUNT = 429 + SYS_FSOPEN = 430 + SYS_FSCONFIG = 431 + SYS_FSMOUNT = 432 + SYS_FSPICK = 433 + SYS_PIDFD_OPEN = 434 + SYS_CLONE3 = 435 + SYS_CLOSE_RANGE = 436 + SYS_OPENAT2 = 437 + SYS_PIDFD_GETFD = 438 + SYS_FACCESSAT2 = 439 + SYS_PROCESS_MADVISE = 440 + SYS_EPOLL_PWAIT2 = 441 + SYS_MOUNT_SETATTR = 442 + SYS_LANDLOCK_CREATE_RULESET = 444 + SYS_LANDLOCK_ADD_RULE = 445 + SYS_LANDLOCK_RESTRICT_SELF = 446 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go index 33b33b0..53a89b2 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go @@ -7,400 +7,403 @@ package unix const ( - SYS_RESTART_SYSCALL = 0 - SYS_EXIT = 1 - SYS_FORK = 2 - SYS_READ = 3 - SYS_WRITE = 4 - SYS_OPEN = 5 - SYS_CLOSE = 6 - SYS_WAITPID = 7 - SYS_CREAT = 8 - SYS_LINK = 9 - SYS_UNLINK = 10 - SYS_EXECVE = 11 - SYS_CHDIR = 12 - SYS_TIME = 13 - SYS_MKNOD = 14 - SYS_CHMOD = 15 - SYS_LCHOWN = 16 - SYS_BREAK = 17 - SYS_OLDSTAT = 18 - SYS_LSEEK = 19 - SYS_GETPID = 20 - SYS_MOUNT = 21 - SYS_UMOUNT = 22 - SYS_SETUID = 23 - SYS_GETUID = 24 - SYS_STIME = 25 - SYS_PTRACE = 26 - SYS_ALARM = 27 - SYS_OLDFSTAT = 28 - SYS_PAUSE = 29 - SYS_UTIME = 30 - SYS_STTY = 31 - SYS_GTTY = 32 - SYS_ACCESS = 33 - SYS_NICE = 34 - SYS_FTIME = 35 - SYS_SYNC = 36 - SYS_KILL = 37 - SYS_RENAME = 38 - SYS_MKDIR = 39 - SYS_RMDIR = 40 - SYS_DUP = 41 - SYS_PIPE = 42 - SYS_TIMES = 43 - SYS_PROF = 44 - SYS_BRK = 45 - SYS_SETGID = 46 - SYS_GETGID = 47 - SYS_SIGNAL = 48 - SYS_GETEUID = 49 - SYS_GETEGID = 50 - SYS_ACCT = 51 - SYS_UMOUNT2 = 52 - SYS_LOCK = 53 - SYS_IOCTL = 54 - SYS_FCNTL = 55 - SYS_MPX = 56 - SYS_SETPGID = 57 - SYS_ULIMIT = 58 - SYS_OLDOLDUNAME = 59 - SYS_UMASK = 60 - SYS_CHROOT = 61 - SYS_USTAT = 62 - SYS_DUP2 = 63 - SYS_GETPPID = 64 - SYS_GETPGRP = 65 - SYS_SETSID = 66 - SYS_SIGACTION = 67 - SYS_SGETMASK = 68 - SYS_SSETMASK = 69 - SYS_SETREUID = 70 - SYS_SETREGID = 71 - SYS_SIGSUSPEND = 72 - SYS_SIGPENDING = 73 - SYS_SETHOSTNAME = 74 - SYS_SETRLIMIT = 75 - SYS_GETRLIMIT = 76 - SYS_GETRUSAGE = 77 - SYS_GETTIMEOFDAY = 78 - SYS_SETTIMEOFDAY = 79 - SYS_GETGROUPS = 80 - SYS_SETGROUPS = 81 - SYS_SELECT = 82 - SYS_SYMLINK = 83 - SYS_OLDLSTAT = 84 - SYS_READLINK = 85 - SYS_USELIB = 86 - SYS_SWAPON = 87 - SYS_REBOOT = 88 - SYS_READDIR = 89 - SYS_MMAP = 90 - SYS_MUNMAP = 91 - SYS_TRUNCATE = 92 - SYS_FTRUNCATE = 93 - SYS_FCHMOD = 94 - SYS_FCHOWN = 95 - SYS_GETPRIORITY = 96 - SYS_SETPRIORITY = 97 - SYS_PROFIL = 98 - SYS_STATFS = 99 - SYS_FSTATFS = 100 - SYS_IOPERM = 101 - SYS_SOCKETCALL = 102 - SYS_SYSLOG = 103 - SYS_SETITIMER = 104 - SYS_GETITIMER = 105 - SYS_STAT = 106 - SYS_LSTAT = 107 - SYS_FSTAT = 108 - SYS_OLDUNAME = 109 - SYS_IOPL = 110 - SYS_VHANGUP = 111 - SYS_IDLE = 112 - SYS_VM86 = 113 - SYS_WAIT4 = 114 - SYS_SWAPOFF = 115 - SYS_SYSINFO = 116 - SYS_IPC = 117 - SYS_FSYNC = 118 - SYS_SIGRETURN = 119 - SYS_CLONE = 120 - SYS_SETDOMAINNAME = 121 - SYS_UNAME = 122 - SYS_MODIFY_LDT = 123 - SYS_ADJTIMEX = 124 - SYS_MPROTECT = 125 - SYS_SIGPROCMASK = 126 - SYS_CREATE_MODULE = 127 - SYS_INIT_MODULE = 128 - SYS_DELETE_MODULE = 129 - SYS_GET_KERNEL_SYMS = 130 - SYS_QUOTACTL = 131 - SYS_GETPGID = 132 - SYS_FCHDIR = 133 - SYS_BDFLUSH = 134 - SYS_SYSFS = 135 - SYS_PERSONALITY = 136 - SYS_AFS_SYSCALL = 137 - SYS_SETFSUID = 138 - SYS_SETFSGID = 139 - SYS__LLSEEK = 140 - SYS_GETDENTS = 141 - SYS__NEWSELECT = 142 - SYS_FLOCK = 143 - SYS_MSYNC = 144 - SYS_READV = 145 - SYS_WRITEV = 146 - SYS_GETSID = 147 - SYS_FDATASYNC = 148 - SYS__SYSCTL = 149 - SYS_MLOCK = 150 - SYS_MUNLOCK = 151 - SYS_MLOCKALL = 152 - SYS_MUNLOCKALL = 153 - SYS_SCHED_SETPARAM = 154 - SYS_SCHED_GETPARAM = 155 - SYS_SCHED_SETSCHEDULER = 156 - SYS_SCHED_GETSCHEDULER = 157 - SYS_SCHED_YIELD = 158 - SYS_SCHED_GET_PRIORITY_MAX = 159 - SYS_SCHED_GET_PRIORITY_MIN = 160 - SYS_SCHED_RR_GET_INTERVAL = 161 - SYS_NANOSLEEP = 162 - SYS_MREMAP = 163 - SYS_SETRESUID = 164 - SYS_GETRESUID = 165 - SYS_QUERY_MODULE = 166 - SYS_POLL = 167 - SYS_NFSSERVCTL = 168 - SYS_SETRESGID = 169 - SYS_GETRESGID = 170 - SYS_PRCTL = 171 - SYS_RT_SIGRETURN = 172 - SYS_RT_SIGACTION = 173 - SYS_RT_SIGPROCMASK = 174 - SYS_RT_SIGPENDING = 175 - SYS_RT_SIGTIMEDWAIT = 176 - SYS_RT_SIGQUEUEINFO = 177 - SYS_RT_SIGSUSPEND = 178 - SYS_PREAD64 = 179 - SYS_PWRITE64 = 180 - SYS_CHOWN = 181 - SYS_GETCWD = 182 - SYS_CAPGET = 183 - SYS_CAPSET = 184 - SYS_SIGALTSTACK = 185 - SYS_SENDFILE = 186 - SYS_GETPMSG = 187 - SYS_PUTPMSG = 188 - SYS_VFORK = 189 - SYS_UGETRLIMIT = 190 - SYS_READAHEAD = 191 - SYS_PCICONFIG_READ = 198 - SYS_PCICONFIG_WRITE = 199 - SYS_PCICONFIG_IOBASE = 200 - SYS_MULTIPLEXER = 201 - SYS_GETDENTS64 = 202 - SYS_PIVOT_ROOT = 203 - SYS_MADVISE = 205 - SYS_MINCORE = 206 - SYS_GETTID = 207 - SYS_TKILL = 208 - SYS_SETXATTR = 209 - SYS_LSETXATTR = 210 - SYS_FSETXATTR = 211 - SYS_GETXATTR = 212 - SYS_LGETXATTR = 213 - SYS_FGETXATTR = 214 - SYS_LISTXATTR = 215 - SYS_LLISTXATTR = 216 - SYS_FLISTXATTR = 217 - SYS_REMOVEXATTR = 218 - SYS_LREMOVEXATTR = 219 - SYS_FREMOVEXATTR = 220 - SYS_FUTEX = 221 - SYS_SCHED_SETAFFINITY = 222 - SYS_SCHED_GETAFFINITY = 223 - SYS_TUXCALL = 225 - SYS_IO_SETUP = 227 - SYS_IO_DESTROY = 228 - SYS_IO_GETEVENTS = 229 - SYS_IO_SUBMIT = 230 - SYS_IO_CANCEL = 231 - SYS_SET_TID_ADDRESS = 232 - SYS_FADVISE64 = 233 - SYS_EXIT_GROUP = 234 - SYS_LOOKUP_DCOOKIE = 235 - SYS_EPOLL_CREATE = 236 - SYS_EPOLL_CTL = 237 - SYS_EPOLL_WAIT = 238 - SYS_REMAP_FILE_PAGES = 239 - SYS_TIMER_CREATE = 240 - SYS_TIMER_SETTIME = 241 - SYS_TIMER_GETTIME = 242 - SYS_TIMER_GETOVERRUN = 243 - SYS_TIMER_DELETE = 244 - SYS_CLOCK_SETTIME = 245 - SYS_CLOCK_GETTIME = 246 - SYS_CLOCK_GETRES = 247 - SYS_CLOCK_NANOSLEEP = 248 - SYS_SWAPCONTEXT = 249 - SYS_TGKILL = 250 - SYS_UTIMES = 251 - SYS_STATFS64 = 252 - SYS_FSTATFS64 = 253 - SYS_RTAS = 255 - SYS_SYS_DEBUG_SETCONTEXT = 256 - SYS_MIGRATE_PAGES = 258 - SYS_MBIND = 259 - SYS_GET_MEMPOLICY = 260 - SYS_SET_MEMPOLICY = 261 - SYS_MQ_OPEN = 262 - SYS_MQ_UNLINK = 263 - SYS_MQ_TIMEDSEND = 264 - SYS_MQ_TIMEDRECEIVE = 265 - SYS_MQ_NOTIFY = 266 - SYS_MQ_GETSETATTR = 267 - SYS_KEXEC_LOAD = 268 - SYS_ADD_KEY = 269 - SYS_REQUEST_KEY = 270 - SYS_KEYCTL = 271 - SYS_WAITID = 272 - SYS_IOPRIO_SET = 273 - SYS_IOPRIO_GET = 274 - SYS_INOTIFY_INIT = 275 - SYS_INOTIFY_ADD_WATCH = 276 - SYS_INOTIFY_RM_WATCH = 277 - SYS_SPU_RUN = 278 - SYS_SPU_CREATE = 279 - SYS_PSELECT6 = 280 - SYS_PPOLL = 281 - SYS_UNSHARE = 282 - SYS_SPLICE = 283 - SYS_TEE = 284 - SYS_VMSPLICE = 285 - SYS_OPENAT = 286 - SYS_MKDIRAT = 287 - SYS_MKNODAT = 288 - SYS_FCHOWNAT = 289 - SYS_FUTIMESAT = 290 - SYS_NEWFSTATAT = 291 - SYS_UNLINKAT = 292 - SYS_RENAMEAT = 293 - SYS_LINKAT = 294 - SYS_SYMLINKAT = 295 - SYS_READLINKAT = 296 - SYS_FCHMODAT = 297 - SYS_FACCESSAT = 298 - SYS_GET_ROBUST_LIST = 299 - SYS_SET_ROBUST_LIST = 300 - SYS_MOVE_PAGES = 301 - SYS_GETCPU = 302 - SYS_EPOLL_PWAIT = 303 - SYS_UTIMENSAT = 304 - SYS_SIGNALFD = 305 - SYS_TIMERFD_CREATE = 306 - SYS_EVENTFD = 307 - SYS_SYNC_FILE_RANGE2 = 308 - SYS_FALLOCATE = 309 - SYS_SUBPAGE_PROT = 310 - SYS_TIMERFD_SETTIME = 311 - SYS_TIMERFD_GETTIME = 312 - SYS_SIGNALFD4 = 313 - SYS_EVENTFD2 = 314 - SYS_EPOLL_CREATE1 = 315 - SYS_DUP3 = 316 - SYS_PIPE2 = 317 - SYS_INOTIFY_INIT1 = 318 - SYS_PERF_EVENT_OPEN = 319 - SYS_PREADV = 320 - SYS_PWRITEV = 321 - SYS_RT_TGSIGQUEUEINFO = 322 - SYS_FANOTIFY_INIT = 323 - SYS_FANOTIFY_MARK = 324 - SYS_PRLIMIT64 = 325 - SYS_SOCKET = 326 - SYS_BIND = 327 - SYS_CONNECT = 328 - SYS_LISTEN = 329 - SYS_ACCEPT = 330 - SYS_GETSOCKNAME = 331 - SYS_GETPEERNAME = 332 - SYS_SOCKETPAIR = 333 - SYS_SEND = 334 - SYS_SENDTO = 335 - SYS_RECV = 336 - SYS_RECVFROM = 337 - SYS_SHUTDOWN = 338 - SYS_SETSOCKOPT = 339 - SYS_GETSOCKOPT = 340 - SYS_SENDMSG = 341 - SYS_RECVMSG = 342 - SYS_RECVMMSG = 343 - SYS_ACCEPT4 = 344 - SYS_NAME_TO_HANDLE_AT = 345 - SYS_OPEN_BY_HANDLE_AT = 346 - SYS_CLOCK_ADJTIME = 347 - SYS_SYNCFS = 348 - SYS_SENDMMSG = 349 - SYS_SETNS = 350 - SYS_PROCESS_VM_READV = 351 - SYS_PROCESS_VM_WRITEV = 352 - SYS_FINIT_MODULE = 353 - SYS_KCMP = 354 - SYS_SCHED_SETATTR = 355 - SYS_SCHED_GETATTR = 356 - SYS_RENAMEAT2 = 357 - SYS_SECCOMP = 358 - SYS_GETRANDOM = 359 - SYS_MEMFD_CREATE = 360 - SYS_BPF = 361 - SYS_EXECVEAT = 362 - SYS_SWITCH_ENDIAN = 363 - SYS_USERFAULTFD = 364 - SYS_MEMBARRIER = 365 - SYS_MLOCK2 = 378 - SYS_COPY_FILE_RANGE = 379 - SYS_PREADV2 = 380 - SYS_PWRITEV2 = 381 - SYS_KEXEC_FILE_LOAD = 382 - SYS_STATX = 383 - SYS_PKEY_ALLOC = 384 - SYS_PKEY_FREE = 385 - SYS_PKEY_MPROTECT = 386 - SYS_RSEQ = 387 - SYS_IO_PGETEVENTS = 388 - SYS_SEMTIMEDOP = 392 - SYS_SEMGET = 393 - SYS_SEMCTL = 394 - SYS_SHMGET = 395 - SYS_SHMCTL = 396 - SYS_SHMAT = 397 - SYS_SHMDT = 398 - SYS_MSGGET = 399 - SYS_MSGSND = 400 - SYS_MSGRCV = 401 - SYS_MSGCTL = 402 - SYS_PIDFD_SEND_SIGNAL = 424 - SYS_IO_URING_SETUP = 425 - SYS_IO_URING_ENTER = 426 - SYS_IO_URING_REGISTER = 427 - SYS_OPEN_TREE = 428 - SYS_MOVE_MOUNT = 429 - SYS_FSOPEN = 430 - SYS_FSCONFIG = 431 - SYS_FSMOUNT = 432 - SYS_FSPICK = 433 - SYS_PIDFD_OPEN = 434 - SYS_CLONE3 = 435 - SYS_CLOSE_RANGE = 436 - SYS_OPENAT2 = 437 - SYS_PIDFD_GETFD = 438 - SYS_FACCESSAT2 = 439 - SYS_PROCESS_MADVISE = 440 - SYS_EPOLL_PWAIT2 = 441 - SYS_MOUNT_SETATTR = 442 + SYS_RESTART_SYSCALL = 0 + SYS_EXIT = 1 + SYS_FORK = 2 + SYS_READ = 3 + SYS_WRITE = 4 + SYS_OPEN = 5 + SYS_CLOSE = 6 + SYS_WAITPID = 7 + SYS_CREAT = 8 + SYS_LINK = 9 + SYS_UNLINK = 10 + SYS_EXECVE = 11 + SYS_CHDIR = 12 + SYS_TIME = 13 + SYS_MKNOD = 14 + SYS_CHMOD = 15 + SYS_LCHOWN = 16 + SYS_BREAK = 17 + SYS_OLDSTAT = 18 + SYS_LSEEK = 19 + SYS_GETPID = 20 + SYS_MOUNT = 21 + SYS_UMOUNT = 22 + SYS_SETUID = 23 + SYS_GETUID = 24 + SYS_STIME = 25 + SYS_PTRACE = 26 + SYS_ALARM = 27 + SYS_OLDFSTAT = 28 + SYS_PAUSE = 29 + SYS_UTIME = 30 + SYS_STTY = 31 + SYS_GTTY = 32 + SYS_ACCESS = 33 + SYS_NICE = 34 + SYS_FTIME = 35 + SYS_SYNC = 36 + SYS_KILL = 37 + SYS_RENAME = 38 + SYS_MKDIR = 39 + SYS_RMDIR = 40 + SYS_DUP = 41 + SYS_PIPE = 42 + SYS_TIMES = 43 + SYS_PROF = 44 + SYS_BRK = 45 + SYS_SETGID = 46 + SYS_GETGID = 47 + SYS_SIGNAL = 48 + SYS_GETEUID = 49 + SYS_GETEGID = 50 + SYS_ACCT = 51 + SYS_UMOUNT2 = 52 + SYS_LOCK = 53 + SYS_IOCTL = 54 + SYS_FCNTL = 55 + SYS_MPX = 56 + SYS_SETPGID = 57 + SYS_ULIMIT = 58 + SYS_OLDOLDUNAME = 59 + SYS_UMASK = 60 + SYS_CHROOT = 61 + SYS_USTAT = 62 + SYS_DUP2 = 63 + SYS_GETPPID = 64 + SYS_GETPGRP = 65 + SYS_SETSID = 66 + SYS_SIGACTION = 67 + SYS_SGETMASK = 68 + SYS_SSETMASK = 69 + SYS_SETREUID = 70 + SYS_SETREGID = 71 + SYS_SIGSUSPEND = 72 + SYS_SIGPENDING = 73 + SYS_SETHOSTNAME = 74 + SYS_SETRLIMIT = 75 + SYS_GETRLIMIT = 76 + SYS_GETRUSAGE = 77 + SYS_GETTIMEOFDAY = 78 + SYS_SETTIMEOFDAY = 79 + SYS_GETGROUPS = 80 + SYS_SETGROUPS = 81 + SYS_SELECT = 82 + SYS_SYMLINK = 83 + SYS_OLDLSTAT = 84 + SYS_READLINK = 85 + SYS_USELIB = 86 + SYS_SWAPON = 87 + SYS_REBOOT = 88 + SYS_READDIR = 89 + SYS_MMAP = 90 + SYS_MUNMAP = 91 + SYS_TRUNCATE = 92 + SYS_FTRUNCATE = 93 + SYS_FCHMOD = 94 + SYS_FCHOWN = 95 + SYS_GETPRIORITY = 96 + SYS_SETPRIORITY = 97 + SYS_PROFIL = 98 + SYS_STATFS = 99 + SYS_FSTATFS = 100 + SYS_IOPERM = 101 + SYS_SOCKETCALL = 102 + SYS_SYSLOG = 103 + SYS_SETITIMER = 104 + SYS_GETITIMER = 105 + SYS_STAT = 106 + SYS_LSTAT = 107 + SYS_FSTAT = 108 + SYS_OLDUNAME = 109 + SYS_IOPL = 110 + SYS_VHANGUP = 111 + SYS_IDLE = 112 + SYS_VM86 = 113 + SYS_WAIT4 = 114 + SYS_SWAPOFF = 115 + SYS_SYSINFO = 116 + SYS_IPC = 117 + SYS_FSYNC = 118 + SYS_SIGRETURN = 119 + SYS_CLONE = 120 + SYS_SETDOMAINNAME = 121 + SYS_UNAME = 122 + SYS_MODIFY_LDT = 123 + SYS_ADJTIMEX = 124 + SYS_MPROTECT = 125 + SYS_SIGPROCMASK = 126 + SYS_CREATE_MODULE = 127 + SYS_INIT_MODULE = 128 + SYS_DELETE_MODULE = 129 + SYS_GET_KERNEL_SYMS = 130 + SYS_QUOTACTL = 131 + SYS_GETPGID = 132 + SYS_FCHDIR = 133 + SYS_BDFLUSH = 134 + SYS_SYSFS = 135 + SYS_PERSONALITY = 136 + SYS_AFS_SYSCALL = 137 + SYS_SETFSUID = 138 + SYS_SETFSGID = 139 + SYS__LLSEEK = 140 + SYS_GETDENTS = 141 + SYS__NEWSELECT = 142 + SYS_FLOCK = 143 + SYS_MSYNC = 144 + SYS_READV = 145 + SYS_WRITEV = 146 + SYS_GETSID = 147 + SYS_FDATASYNC = 148 + SYS__SYSCTL = 149 + SYS_MLOCK = 150 + SYS_MUNLOCK = 151 + SYS_MLOCKALL = 152 + SYS_MUNLOCKALL = 153 + SYS_SCHED_SETPARAM = 154 + SYS_SCHED_GETPARAM = 155 + SYS_SCHED_SETSCHEDULER = 156 + SYS_SCHED_GETSCHEDULER = 157 + SYS_SCHED_YIELD = 158 + SYS_SCHED_GET_PRIORITY_MAX = 159 + SYS_SCHED_GET_PRIORITY_MIN = 160 + SYS_SCHED_RR_GET_INTERVAL = 161 + SYS_NANOSLEEP = 162 + SYS_MREMAP = 163 + SYS_SETRESUID = 164 + SYS_GETRESUID = 165 + SYS_QUERY_MODULE = 166 + SYS_POLL = 167 + SYS_NFSSERVCTL = 168 + SYS_SETRESGID = 169 + SYS_GETRESGID = 170 + SYS_PRCTL = 171 + SYS_RT_SIGRETURN = 172 + SYS_RT_SIGACTION = 173 + SYS_RT_SIGPROCMASK = 174 + SYS_RT_SIGPENDING = 175 + SYS_RT_SIGTIMEDWAIT = 176 + SYS_RT_SIGQUEUEINFO = 177 + SYS_RT_SIGSUSPEND = 178 + SYS_PREAD64 = 179 + SYS_PWRITE64 = 180 + SYS_CHOWN = 181 + SYS_GETCWD = 182 + SYS_CAPGET = 183 + SYS_CAPSET = 184 + SYS_SIGALTSTACK = 185 + SYS_SENDFILE = 186 + SYS_GETPMSG = 187 + SYS_PUTPMSG = 188 + SYS_VFORK = 189 + SYS_UGETRLIMIT = 190 + SYS_READAHEAD = 191 + SYS_PCICONFIG_READ = 198 + SYS_PCICONFIG_WRITE = 199 + SYS_PCICONFIG_IOBASE = 200 + SYS_MULTIPLEXER = 201 + SYS_GETDENTS64 = 202 + SYS_PIVOT_ROOT = 203 + SYS_MADVISE = 205 + SYS_MINCORE = 206 + SYS_GETTID = 207 + SYS_TKILL = 208 + SYS_SETXATTR = 209 + SYS_LSETXATTR = 210 + SYS_FSETXATTR = 211 + SYS_GETXATTR = 212 + SYS_LGETXATTR = 213 + SYS_FGETXATTR = 214 + SYS_LISTXATTR = 215 + SYS_LLISTXATTR = 216 + SYS_FLISTXATTR = 217 + SYS_REMOVEXATTR = 218 + SYS_LREMOVEXATTR = 219 + SYS_FREMOVEXATTR = 220 + SYS_FUTEX = 221 + SYS_SCHED_SETAFFINITY = 222 + SYS_SCHED_GETAFFINITY = 223 + SYS_TUXCALL = 225 + SYS_IO_SETUP = 227 + SYS_IO_DESTROY = 228 + SYS_IO_GETEVENTS = 229 + SYS_IO_SUBMIT = 230 + SYS_IO_CANCEL = 231 + SYS_SET_TID_ADDRESS = 232 + SYS_FADVISE64 = 233 + SYS_EXIT_GROUP = 234 + SYS_LOOKUP_DCOOKIE = 235 + SYS_EPOLL_CREATE = 236 + SYS_EPOLL_CTL = 237 + SYS_EPOLL_WAIT = 238 + SYS_REMAP_FILE_PAGES = 239 + SYS_TIMER_CREATE = 240 + SYS_TIMER_SETTIME = 241 + SYS_TIMER_GETTIME = 242 + SYS_TIMER_GETOVERRUN = 243 + SYS_TIMER_DELETE = 244 + SYS_CLOCK_SETTIME = 245 + SYS_CLOCK_GETTIME = 246 + SYS_CLOCK_GETRES = 247 + SYS_CLOCK_NANOSLEEP = 248 + SYS_SWAPCONTEXT = 249 + SYS_TGKILL = 250 + SYS_UTIMES = 251 + SYS_STATFS64 = 252 + SYS_FSTATFS64 = 253 + SYS_RTAS = 255 + SYS_SYS_DEBUG_SETCONTEXT = 256 + SYS_MIGRATE_PAGES = 258 + SYS_MBIND = 259 + SYS_GET_MEMPOLICY = 260 + SYS_SET_MEMPOLICY = 261 + SYS_MQ_OPEN = 262 + SYS_MQ_UNLINK = 263 + SYS_MQ_TIMEDSEND = 264 + SYS_MQ_TIMEDRECEIVE = 265 + SYS_MQ_NOTIFY = 266 + SYS_MQ_GETSETATTR = 267 + SYS_KEXEC_LOAD = 268 + SYS_ADD_KEY = 269 + SYS_REQUEST_KEY = 270 + SYS_KEYCTL = 271 + SYS_WAITID = 272 + SYS_IOPRIO_SET = 273 + SYS_IOPRIO_GET = 274 + SYS_INOTIFY_INIT = 275 + SYS_INOTIFY_ADD_WATCH = 276 + SYS_INOTIFY_RM_WATCH = 277 + SYS_SPU_RUN = 278 + SYS_SPU_CREATE = 279 + SYS_PSELECT6 = 280 + SYS_PPOLL = 281 + SYS_UNSHARE = 282 + SYS_SPLICE = 283 + SYS_TEE = 284 + SYS_VMSPLICE = 285 + SYS_OPENAT = 286 + SYS_MKDIRAT = 287 + SYS_MKNODAT = 288 + SYS_FCHOWNAT = 289 + SYS_FUTIMESAT = 290 + SYS_NEWFSTATAT = 291 + SYS_UNLINKAT = 292 + SYS_RENAMEAT = 293 + SYS_LINKAT = 294 + SYS_SYMLINKAT = 295 + SYS_READLINKAT = 296 + SYS_FCHMODAT = 297 + SYS_FACCESSAT = 298 + SYS_GET_ROBUST_LIST = 299 + SYS_SET_ROBUST_LIST = 300 + SYS_MOVE_PAGES = 301 + SYS_GETCPU = 302 + SYS_EPOLL_PWAIT = 303 + SYS_UTIMENSAT = 304 + SYS_SIGNALFD = 305 + SYS_TIMERFD_CREATE = 306 + SYS_EVENTFD = 307 + SYS_SYNC_FILE_RANGE2 = 308 + SYS_FALLOCATE = 309 + SYS_SUBPAGE_PROT = 310 + SYS_TIMERFD_SETTIME = 311 + SYS_TIMERFD_GETTIME = 312 + SYS_SIGNALFD4 = 313 + SYS_EVENTFD2 = 314 + SYS_EPOLL_CREATE1 = 315 + SYS_DUP3 = 316 + SYS_PIPE2 = 317 + SYS_INOTIFY_INIT1 = 318 + SYS_PERF_EVENT_OPEN = 319 + SYS_PREADV = 320 + SYS_PWRITEV = 321 + SYS_RT_TGSIGQUEUEINFO = 322 + SYS_FANOTIFY_INIT = 323 + SYS_FANOTIFY_MARK = 324 + SYS_PRLIMIT64 = 325 + SYS_SOCKET = 326 + SYS_BIND = 327 + SYS_CONNECT = 328 + SYS_LISTEN = 329 + SYS_ACCEPT = 330 + SYS_GETSOCKNAME = 331 + SYS_GETPEERNAME = 332 + SYS_SOCKETPAIR = 333 + SYS_SEND = 334 + SYS_SENDTO = 335 + SYS_RECV = 336 + SYS_RECVFROM = 337 + SYS_SHUTDOWN = 338 + SYS_SETSOCKOPT = 339 + SYS_GETSOCKOPT = 340 + SYS_SENDMSG = 341 + SYS_RECVMSG = 342 + SYS_RECVMMSG = 343 + SYS_ACCEPT4 = 344 + SYS_NAME_TO_HANDLE_AT = 345 + SYS_OPEN_BY_HANDLE_AT = 346 + SYS_CLOCK_ADJTIME = 347 + SYS_SYNCFS = 348 + SYS_SENDMMSG = 349 + SYS_SETNS = 350 + SYS_PROCESS_VM_READV = 351 + SYS_PROCESS_VM_WRITEV = 352 + SYS_FINIT_MODULE = 353 + SYS_KCMP = 354 + SYS_SCHED_SETATTR = 355 + SYS_SCHED_GETATTR = 356 + SYS_RENAMEAT2 = 357 + SYS_SECCOMP = 358 + SYS_GETRANDOM = 359 + SYS_MEMFD_CREATE = 360 + SYS_BPF = 361 + SYS_EXECVEAT = 362 + SYS_SWITCH_ENDIAN = 363 + SYS_USERFAULTFD = 364 + SYS_MEMBARRIER = 365 + SYS_MLOCK2 = 378 + SYS_COPY_FILE_RANGE = 379 + SYS_PREADV2 = 380 + SYS_PWRITEV2 = 381 + SYS_KEXEC_FILE_LOAD = 382 + SYS_STATX = 383 + SYS_PKEY_ALLOC = 384 + SYS_PKEY_FREE = 385 + SYS_PKEY_MPROTECT = 386 + SYS_RSEQ = 387 + SYS_IO_PGETEVENTS = 388 + SYS_SEMTIMEDOP = 392 + SYS_SEMGET = 393 + SYS_SEMCTL = 394 + SYS_SHMGET = 395 + SYS_SHMCTL = 396 + SYS_SHMAT = 397 + SYS_SHMDT = 398 + SYS_MSGGET = 399 + SYS_MSGSND = 400 + SYS_MSGRCV = 401 + SYS_MSGCTL = 402 + SYS_PIDFD_SEND_SIGNAL = 424 + SYS_IO_URING_SETUP = 425 + SYS_IO_URING_ENTER = 426 + SYS_IO_URING_REGISTER = 427 + SYS_OPEN_TREE = 428 + SYS_MOVE_MOUNT = 429 + SYS_FSOPEN = 430 + SYS_FSCONFIG = 431 + SYS_FSMOUNT = 432 + SYS_FSPICK = 433 + SYS_PIDFD_OPEN = 434 + SYS_CLONE3 = 435 + SYS_CLOSE_RANGE = 436 + SYS_OPENAT2 = 437 + SYS_PIDFD_GETFD = 438 + SYS_FACCESSAT2 = 439 + SYS_PROCESS_MADVISE = 440 + SYS_EPOLL_PWAIT2 = 441 + SYS_MOUNT_SETATTR = 442 + SYS_LANDLOCK_CREATE_RULESET = 444 + SYS_LANDLOCK_ADD_RULE = 445 + SYS_LANDLOCK_RESTRICT_SELF = 446 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go index 66c8a8e..0db9fbb 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go @@ -7,302 +7,305 @@ package unix const ( - SYS_IO_SETUP = 0 - SYS_IO_DESTROY = 1 - SYS_IO_SUBMIT = 2 - SYS_IO_CANCEL = 3 - SYS_IO_GETEVENTS = 4 - SYS_SETXATTR = 5 - SYS_LSETXATTR = 6 - SYS_FSETXATTR = 7 - SYS_GETXATTR = 8 - SYS_LGETXATTR = 9 - SYS_FGETXATTR = 10 - SYS_LISTXATTR = 11 - SYS_LLISTXATTR = 12 - SYS_FLISTXATTR = 13 - SYS_REMOVEXATTR = 14 - SYS_LREMOVEXATTR = 15 - SYS_FREMOVEXATTR = 16 - SYS_GETCWD = 17 - SYS_LOOKUP_DCOOKIE = 18 - SYS_EVENTFD2 = 19 - SYS_EPOLL_CREATE1 = 20 - SYS_EPOLL_CTL = 21 - SYS_EPOLL_PWAIT = 22 - SYS_DUP = 23 - SYS_DUP3 = 24 - SYS_FCNTL = 25 - SYS_INOTIFY_INIT1 = 26 - SYS_INOTIFY_ADD_WATCH = 27 - SYS_INOTIFY_RM_WATCH = 28 - SYS_IOCTL = 29 - SYS_IOPRIO_SET = 30 - SYS_IOPRIO_GET = 31 - SYS_FLOCK = 32 - SYS_MKNODAT = 33 - SYS_MKDIRAT = 34 - SYS_UNLINKAT = 35 - SYS_SYMLINKAT = 36 - SYS_LINKAT = 37 - SYS_UMOUNT2 = 39 - SYS_MOUNT = 40 - SYS_PIVOT_ROOT = 41 - SYS_NFSSERVCTL = 42 - SYS_STATFS = 43 - SYS_FSTATFS = 44 - SYS_TRUNCATE = 45 - SYS_FTRUNCATE = 46 - SYS_FALLOCATE = 47 - SYS_FACCESSAT = 48 - SYS_CHDIR = 49 - SYS_FCHDIR = 50 - SYS_CHROOT = 51 - SYS_FCHMOD = 52 - SYS_FCHMODAT = 53 - SYS_FCHOWNAT = 54 - SYS_FCHOWN = 55 - SYS_OPENAT = 56 - SYS_CLOSE = 57 - SYS_VHANGUP = 58 - SYS_PIPE2 = 59 - SYS_QUOTACTL = 60 - SYS_GETDENTS64 = 61 - SYS_LSEEK = 62 - SYS_READ = 63 - SYS_WRITE = 64 - SYS_READV = 65 - SYS_WRITEV = 66 - SYS_PREAD64 = 67 - SYS_PWRITE64 = 68 - SYS_PREADV = 69 - SYS_PWRITEV = 70 - SYS_SENDFILE = 71 - SYS_PSELECT6 = 72 - SYS_PPOLL = 73 - SYS_SIGNALFD4 = 74 - SYS_VMSPLICE = 75 - SYS_SPLICE = 76 - SYS_TEE = 77 - SYS_READLINKAT = 78 - SYS_FSTATAT = 79 - SYS_FSTAT = 80 - SYS_SYNC = 81 - SYS_FSYNC = 82 - SYS_FDATASYNC = 83 - SYS_SYNC_FILE_RANGE = 84 - SYS_TIMERFD_CREATE = 85 - SYS_TIMERFD_SETTIME = 86 - SYS_TIMERFD_GETTIME = 87 - SYS_UTIMENSAT = 88 - SYS_ACCT = 89 - SYS_CAPGET = 90 - SYS_CAPSET = 91 - SYS_PERSONALITY = 92 - SYS_EXIT = 93 - SYS_EXIT_GROUP = 94 - SYS_WAITID = 95 - SYS_SET_TID_ADDRESS = 96 - SYS_UNSHARE = 97 - SYS_FUTEX = 98 - SYS_SET_ROBUST_LIST = 99 - SYS_GET_ROBUST_LIST = 100 - SYS_NANOSLEEP = 101 - SYS_GETITIMER = 102 - SYS_SETITIMER = 103 - SYS_KEXEC_LOAD = 104 - SYS_INIT_MODULE = 105 - SYS_DELETE_MODULE = 106 - SYS_TIMER_CREATE = 107 - SYS_TIMER_GETTIME = 108 - SYS_TIMER_GETOVERRUN = 109 - SYS_TIMER_SETTIME = 110 - SYS_TIMER_DELETE = 111 - SYS_CLOCK_SETTIME = 112 - SYS_CLOCK_GETTIME = 113 - SYS_CLOCK_GETRES = 114 - SYS_CLOCK_NANOSLEEP = 115 - SYS_SYSLOG = 116 - SYS_PTRACE = 117 - SYS_SCHED_SETPARAM = 118 - SYS_SCHED_SETSCHEDULER = 119 - SYS_SCHED_GETSCHEDULER = 120 - SYS_SCHED_GETPARAM = 121 - SYS_SCHED_SETAFFINITY = 122 - SYS_SCHED_GETAFFINITY = 123 - SYS_SCHED_YIELD = 124 - SYS_SCHED_GET_PRIORITY_MAX = 125 - SYS_SCHED_GET_PRIORITY_MIN = 126 - SYS_SCHED_RR_GET_INTERVAL = 127 - SYS_RESTART_SYSCALL = 128 - SYS_KILL = 129 - SYS_TKILL = 130 - SYS_TGKILL = 131 - SYS_SIGALTSTACK = 132 - SYS_RT_SIGSUSPEND = 133 - SYS_RT_SIGACTION = 134 - SYS_RT_SIGPROCMASK = 135 - SYS_RT_SIGPENDING = 136 - SYS_RT_SIGTIMEDWAIT = 137 - SYS_RT_SIGQUEUEINFO = 138 - SYS_RT_SIGRETURN = 139 - SYS_SETPRIORITY = 140 - SYS_GETPRIORITY = 141 - SYS_REBOOT = 142 - SYS_SETREGID = 143 - SYS_SETGID = 144 - SYS_SETREUID = 145 - SYS_SETUID = 146 - SYS_SETRESUID = 147 - SYS_GETRESUID = 148 - SYS_SETRESGID = 149 - SYS_GETRESGID = 150 - SYS_SETFSUID = 151 - SYS_SETFSGID = 152 - SYS_TIMES = 153 - SYS_SETPGID = 154 - SYS_GETPGID = 155 - SYS_GETSID = 156 - SYS_SETSID = 157 - SYS_GETGROUPS = 158 - SYS_SETGROUPS = 159 - SYS_UNAME = 160 - SYS_SETHOSTNAME = 161 - SYS_SETDOMAINNAME = 162 - SYS_GETRLIMIT = 163 - SYS_SETRLIMIT = 164 - SYS_GETRUSAGE = 165 - SYS_UMASK = 166 - SYS_PRCTL = 167 - SYS_GETCPU = 168 - SYS_GETTIMEOFDAY = 169 - SYS_SETTIMEOFDAY = 170 - SYS_ADJTIMEX = 171 - SYS_GETPID = 172 - SYS_GETPPID = 173 - SYS_GETUID = 174 - SYS_GETEUID = 175 - SYS_GETGID = 176 - SYS_GETEGID = 177 - SYS_GETTID = 178 - SYS_SYSINFO = 179 - SYS_MQ_OPEN = 180 - SYS_MQ_UNLINK = 181 - SYS_MQ_TIMEDSEND = 182 - SYS_MQ_TIMEDRECEIVE = 183 - SYS_MQ_NOTIFY = 184 - SYS_MQ_GETSETATTR = 185 - SYS_MSGGET = 186 - SYS_MSGCTL = 187 - SYS_MSGRCV = 188 - SYS_MSGSND = 189 - SYS_SEMGET = 190 - SYS_SEMCTL = 191 - SYS_SEMTIMEDOP = 192 - SYS_SEMOP = 193 - SYS_SHMGET = 194 - SYS_SHMCTL = 195 - SYS_SHMAT = 196 - SYS_SHMDT = 197 - SYS_SOCKET = 198 - SYS_SOCKETPAIR = 199 - SYS_BIND = 200 - SYS_LISTEN = 201 - SYS_ACCEPT = 202 - SYS_CONNECT = 203 - SYS_GETSOCKNAME = 204 - SYS_GETPEERNAME = 205 - SYS_SENDTO = 206 - SYS_RECVFROM = 207 - SYS_SETSOCKOPT = 208 - SYS_GETSOCKOPT = 209 - SYS_SHUTDOWN = 210 - SYS_SENDMSG = 211 - SYS_RECVMSG = 212 - SYS_READAHEAD = 213 - SYS_BRK = 214 - SYS_MUNMAP = 215 - SYS_MREMAP = 216 - SYS_ADD_KEY = 217 - SYS_REQUEST_KEY = 218 - SYS_KEYCTL = 219 - SYS_CLONE = 220 - SYS_EXECVE = 221 - SYS_MMAP = 222 - SYS_FADVISE64 = 223 - SYS_SWAPON = 224 - SYS_SWAPOFF = 225 - SYS_MPROTECT = 226 - SYS_MSYNC = 227 - SYS_MLOCK = 228 - SYS_MUNLOCK = 229 - SYS_MLOCKALL = 230 - SYS_MUNLOCKALL = 231 - SYS_MINCORE = 232 - SYS_MADVISE = 233 - SYS_REMAP_FILE_PAGES = 234 - SYS_MBIND = 235 - SYS_GET_MEMPOLICY = 236 - SYS_SET_MEMPOLICY = 237 - SYS_MIGRATE_PAGES = 238 - SYS_MOVE_PAGES = 239 - SYS_RT_TGSIGQUEUEINFO = 240 - SYS_PERF_EVENT_OPEN = 241 - SYS_ACCEPT4 = 242 - SYS_RECVMMSG = 243 - SYS_ARCH_SPECIFIC_SYSCALL = 244 - SYS_WAIT4 = 260 - SYS_PRLIMIT64 = 261 - SYS_FANOTIFY_INIT = 262 - SYS_FANOTIFY_MARK = 263 - SYS_NAME_TO_HANDLE_AT = 264 - SYS_OPEN_BY_HANDLE_AT = 265 - SYS_CLOCK_ADJTIME = 266 - SYS_SYNCFS = 267 - SYS_SETNS = 268 - SYS_SENDMMSG = 269 - SYS_PROCESS_VM_READV = 270 - SYS_PROCESS_VM_WRITEV = 271 - SYS_KCMP = 272 - SYS_FINIT_MODULE = 273 - SYS_SCHED_SETATTR = 274 - SYS_SCHED_GETATTR = 275 - SYS_RENAMEAT2 = 276 - SYS_SECCOMP = 277 - SYS_GETRANDOM = 278 - SYS_MEMFD_CREATE = 279 - SYS_BPF = 280 - SYS_EXECVEAT = 281 - SYS_USERFAULTFD = 282 - SYS_MEMBARRIER = 283 - SYS_MLOCK2 = 284 - SYS_COPY_FILE_RANGE = 285 - SYS_PREADV2 = 286 - SYS_PWRITEV2 = 287 - SYS_PKEY_MPROTECT = 288 - SYS_PKEY_ALLOC = 289 - SYS_PKEY_FREE = 290 - SYS_STATX = 291 - SYS_IO_PGETEVENTS = 292 - SYS_RSEQ = 293 - SYS_KEXEC_FILE_LOAD = 294 - SYS_PIDFD_SEND_SIGNAL = 424 - SYS_IO_URING_SETUP = 425 - SYS_IO_URING_ENTER = 426 - SYS_IO_URING_REGISTER = 427 - SYS_OPEN_TREE = 428 - SYS_MOVE_MOUNT = 429 - SYS_FSOPEN = 430 - SYS_FSCONFIG = 431 - SYS_FSMOUNT = 432 - SYS_FSPICK = 433 - SYS_PIDFD_OPEN = 434 - SYS_CLONE3 = 435 - SYS_CLOSE_RANGE = 436 - SYS_OPENAT2 = 437 - SYS_PIDFD_GETFD = 438 - SYS_FACCESSAT2 = 439 - SYS_PROCESS_MADVISE = 440 - SYS_EPOLL_PWAIT2 = 441 - SYS_MOUNT_SETATTR = 442 + SYS_IO_SETUP = 0 + SYS_IO_DESTROY = 1 + SYS_IO_SUBMIT = 2 + SYS_IO_CANCEL = 3 + SYS_IO_GETEVENTS = 4 + SYS_SETXATTR = 5 + SYS_LSETXATTR = 6 + SYS_FSETXATTR = 7 + SYS_GETXATTR = 8 + SYS_LGETXATTR = 9 + SYS_FGETXATTR = 10 + SYS_LISTXATTR = 11 + SYS_LLISTXATTR = 12 + SYS_FLISTXATTR = 13 + SYS_REMOVEXATTR = 14 + SYS_LREMOVEXATTR = 15 + SYS_FREMOVEXATTR = 16 + SYS_GETCWD = 17 + SYS_LOOKUP_DCOOKIE = 18 + SYS_EVENTFD2 = 19 + SYS_EPOLL_CREATE1 = 20 + SYS_EPOLL_CTL = 21 + SYS_EPOLL_PWAIT = 22 + SYS_DUP = 23 + SYS_DUP3 = 24 + SYS_FCNTL = 25 + SYS_INOTIFY_INIT1 = 26 + SYS_INOTIFY_ADD_WATCH = 27 + SYS_INOTIFY_RM_WATCH = 28 + SYS_IOCTL = 29 + SYS_IOPRIO_SET = 30 + SYS_IOPRIO_GET = 31 + SYS_FLOCK = 32 + SYS_MKNODAT = 33 + SYS_MKDIRAT = 34 + SYS_UNLINKAT = 35 + SYS_SYMLINKAT = 36 + SYS_LINKAT = 37 + SYS_UMOUNT2 = 39 + SYS_MOUNT = 40 + SYS_PIVOT_ROOT = 41 + SYS_NFSSERVCTL = 42 + SYS_STATFS = 43 + SYS_FSTATFS = 44 + SYS_TRUNCATE = 45 + SYS_FTRUNCATE = 46 + SYS_FALLOCATE = 47 + SYS_FACCESSAT = 48 + SYS_CHDIR = 49 + SYS_FCHDIR = 50 + SYS_CHROOT = 51 + SYS_FCHMOD = 52 + SYS_FCHMODAT = 53 + SYS_FCHOWNAT = 54 + SYS_FCHOWN = 55 + SYS_OPENAT = 56 + SYS_CLOSE = 57 + SYS_VHANGUP = 58 + SYS_PIPE2 = 59 + SYS_QUOTACTL = 60 + SYS_GETDENTS64 = 61 + SYS_LSEEK = 62 + SYS_READ = 63 + SYS_WRITE = 64 + SYS_READV = 65 + SYS_WRITEV = 66 + SYS_PREAD64 = 67 + SYS_PWRITE64 = 68 + SYS_PREADV = 69 + SYS_PWRITEV = 70 + SYS_SENDFILE = 71 + SYS_PSELECT6 = 72 + SYS_PPOLL = 73 + SYS_SIGNALFD4 = 74 + SYS_VMSPLICE = 75 + SYS_SPLICE = 76 + SYS_TEE = 77 + SYS_READLINKAT = 78 + SYS_FSTATAT = 79 + SYS_FSTAT = 80 + SYS_SYNC = 81 + SYS_FSYNC = 82 + SYS_FDATASYNC = 83 + SYS_SYNC_FILE_RANGE = 84 + SYS_TIMERFD_CREATE = 85 + SYS_TIMERFD_SETTIME = 86 + SYS_TIMERFD_GETTIME = 87 + SYS_UTIMENSAT = 88 + SYS_ACCT = 89 + SYS_CAPGET = 90 + SYS_CAPSET = 91 + SYS_PERSONALITY = 92 + SYS_EXIT = 93 + SYS_EXIT_GROUP = 94 + SYS_WAITID = 95 + SYS_SET_TID_ADDRESS = 96 + SYS_UNSHARE = 97 + SYS_FUTEX = 98 + SYS_SET_ROBUST_LIST = 99 + SYS_GET_ROBUST_LIST = 100 + SYS_NANOSLEEP = 101 + SYS_GETITIMER = 102 + SYS_SETITIMER = 103 + SYS_KEXEC_LOAD = 104 + SYS_INIT_MODULE = 105 + SYS_DELETE_MODULE = 106 + SYS_TIMER_CREATE = 107 + SYS_TIMER_GETTIME = 108 + SYS_TIMER_GETOVERRUN = 109 + SYS_TIMER_SETTIME = 110 + SYS_TIMER_DELETE = 111 + SYS_CLOCK_SETTIME = 112 + SYS_CLOCK_GETTIME = 113 + SYS_CLOCK_GETRES = 114 + SYS_CLOCK_NANOSLEEP = 115 + SYS_SYSLOG = 116 + SYS_PTRACE = 117 + SYS_SCHED_SETPARAM = 118 + SYS_SCHED_SETSCHEDULER = 119 + SYS_SCHED_GETSCHEDULER = 120 + SYS_SCHED_GETPARAM = 121 + SYS_SCHED_SETAFFINITY = 122 + SYS_SCHED_GETAFFINITY = 123 + SYS_SCHED_YIELD = 124 + SYS_SCHED_GET_PRIORITY_MAX = 125 + SYS_SCHED_GET_PRIORITY_MIN = 126 + SYS_SCHED_RR_GET_INTERVAL = 127 + SYS_RESTART_SYSCALL = 128 + SYS_KILL = 129 + SYS_TKILL = 130 + SYS_TGKILL = 131 + SYS_SIGALTSTACK = 132 + SYS_RT_SIGSUSPEND = 133 + SYS_RT_SIGACTION = 134 + SYS_RT_SIGPROCMASK = 135 + SYS_RT_SIGPENDING = 136 + SYS_RT_SIGTIMEDWAIT = 137 + SYS_RT_SIGQUEUEINFO = 138 + SYS_RT_SIGRETURN = 139 + SYS_SETPRIORITY = 140 + SYS_GETPRIORITY = 141 + SYS_REBOOT = 142 + SYS_SETREGID = 143 + SYS_SETGID = 144 + SYS_SETREUID = 145 + SYS_SETUID = 146 + SYS_SETRESUID = 147 + SYS_GETRESUID = 148 + SYS_SETRESGID = 149 + SYS_GETRESGID = 150 + SYS_SETFSUID = 151 + SYS_SETFSGID = 152 + SYS_TIMES = 153 + SYS_SETPGID = 154 + SYS_GETPGID = 155 + SYS_GETSID = 156 + SYS_SETSID = 157 + SYS_GETGROUPS = 158 + SYS_SETGROUPS = 159 + SYS_UNAME = 160 + SYS_SETHOSTNAME = 161 + SYS_SETDOMAINNAME = 162 + SYS_GETRLIMIT = 163 + SYS_SETRLIMIT = 164 + SYS_GETRUSAGE = 165 + SYS_UMASK = 166 + SYS_PRCTL = 167 + SYS_GETCPU = 168 + SYS_GETTIMEOFDAY = 169 + SYS_SETTIMEOFDAY = 170 + SYS_ADJTIMEX = 171 + SYS_GETPID = 172 + SYS_GETPPID = 173 + SYS_GETUID = 174 + SYS_GETEUID = 175 + SYS_GETGID = 176 + SYS_GETEGID = 177 + SYS_GETTID = 178 + SYS_SYSINFO = 179 + SYS_MQ_OPEN = 180 + SYS_MQ_UNLINK = 181 + SYS_MQ_TIMEDSEND = 182 + SYS_MQ_TIMEDRECEIVE = 183 + SYS_MQ_NOTIFY = 184 + SYS_MQ_GETSETATTR = 185 + SYS_MSGGET = 186 + SYS_MSGCTL = 187 + SYS_MSGRCV = 188 + SYS_MSGSND = 189 + SYS_SEMGET = 190 + SYS_SEMCTL = 191 + SYS_SEMTIMEDOP = 192 + SYS_SEMOP = 193 + SYS_SHMGET = 194 + SYS_SHMCTL = 195 + SYS_SHMAT = 196 + SYS_SHMDT = 197 + SYS_SOCKET = 198 + SYS_SOCKETPAIR = 199 + SYS_BIND = 200 + SYS_LISTEN = 201 + SYS_ACCEPT = 202 + SYS_CONNECT = 203 + SYS_GETSOCKNAME = 204 + SYS_GETPEERNAME = 205 + SYS_SENDTO = 206 + SYS_RECVFROM = 207 + SYS_SETSOCKOPT = 208 + SYS_GETSOCKOPT = 209 + SYS_SHUTDOWN = 210 + SYS_SENDMSG = 211 + SYS_RECVMSG = 212 + SYS_READAHEAD = 213 + SYS_BRK = 214 + SYS_MUNMAP = 215 + SYS_MREMAP = 216 + SYS_ADD_KEY = 217 + SYS_REQUEST_KEY = 218 + SYS_KEYCTL = 219 + SYS_CLONE = 220 + SYS_EXECVE = 221 + SYS_MMAP = 222 + SYS_FADVISE64 = 223 + SYS_SWAPON = 224 + SYS_SWAPOFF = 225 + SYS_MPROTECT = 226 + SYS_MSYNC = 227 + SYS_MLOCK = 228 + SYS_MUNLOCK = 229 + SYS_MLOCKALL = 230 + SYS_MUNLOCKALL = 231 + SYS_MINCORE = 232 + SYS_MADVISE = 233 + SYS_REMAP_FILE_PAGES = 234 + SYS_MBIND = 235 + SYS_GET_MEMPOLICY = 236 + SYS_SET_MEMPOLICY = 237 + SYS_MIGRATE_PAGES = 238 + SYS_MOVE_PAGES = 239 + SYS_RT_TGSIGQUEUEINFO = 240 + SYS_PERF_EVENT_OPEN = 241 + SYS_ACCEPT4 = 242 + SYS_RECVMMSG = 243 + SYS_ARCH_SPECIFIC_SYSCALL = 244 + SYS_WAIT4 = 260 + SYS_PRLIMIT64 = 261 + SYS_FANOTIFY_INIT = 262 + SYS_FANOTIFY_MARK = 263 + SYS_NAME_TO_HANDLE_AT = 264 + SYS_OPEN_BY_HANDLE_AT = 265 + SYS_CLOCK_ADJTIME = 266 + SYS_SYNCFS = 267 + SYS_SETNS = 268 + SYS_SENDMMSG = 269 + SYS_PROCESS_VM_READV = 270 + SYS_PROCESS_VM_WRITEV = 271 + SYS_KCMP = 272 + SYS_FINIT_MODULE = 273 + SYS_SCHED_SETATTR = 274 + SYS_SCHED_GETATTR = 275 + SYS_RENAMEAT2 = 276 + SYS_SECCOMP = 277 + SYS_GETRANDOM = 278 + SYS_MEMFD_CREATE = 279 + SYS_BPF = 280 + SYS_EXECVEAT = 281 + SYS_USERFAULTFD = 282 + SYS_MEMBARRIER = 283 + SYS_MLOCK2 = 284 + SYS_COPY_FILE_RANGE = 285 + SYS_PREADV2 = 286 + SYS_PWRITEV2 = 287 + SYS_PKEY_MPROTECT = 288 + SYS_PKEY_ALLOC = 289 + SYS_PKEY_FREE = 290 + SYS_STATX = 291 + SYS_IO_PGETEVENTS = 292 + SYS_RSEQ = 293 + SYS_KEXEC_FILE_LOAD = 294 + SYS_PIDFD_SEND_SIGNAL = 424 + SYS_IO_URING_SETUP = 425 + SYS_IO_URING_ENTER = 426 + SYS_IO_URING_REGISTER = 427 + SYS_OPEN_TREE = 428 + SYS_MOVE_MOUNT = 429 + SYS_FSOPEN = 430 + SYS_FSCONFIG = 431 + SYS_FSMOUNT = 432 + SYS_FSPICK = 433 + SYS_PIDFD_OPEN = 434 + SYS_CLONE3 = 435 + SYS_CLOSE_RANGE = 436 + SYS_OPENAT2 = 437 + SYS_PIDFD_GETFD = 438 + SYS_FACCESSAT2 = 439 + SYS_PROCESS_MADVISE = 440 + SYS_EPOLL_PWAIT2 = 441 + SYS_MOUNT_SETATTR = 442 + SYS_LANDLOCK_CREATE_RULESET = 444 + SYS_LANDLOCK_ADD_RULE = 445 + SYS_LANDLOCK_RESTRICT_SELF = 446 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go index aea5760..378e6ec 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go @@ -7,365 +7,368 @@ package unix const ( - SYS_EXIT = 1 - SYS_FORK = 2 - SYS_READ = 3 - SYS_WRITE = 4 - SYS_OPEN = 5 - SYS_CLOSE = 6 - SYS_RESTART_SYSCALL = 7 - SYS_CREAT = 8 - SYS_LINK = 9 - SYS_UNLINK = 10 - SYS_EXECVE = 11 - SYS_CHDIR = 12 - SYS_MKNOD = 14 - SYS_CHMOD = 15 - SYS_LSEEK = 19 - SYS_GETPID = 20 - SYS_MOUNT = 21 - SYS_UMOUNT = 22 - SYS_PTRACE = 26 - SYS_ALARM = 27 - SYS_PAUSE = 29 - SYS_UTIME = 30 - SYS_ACCESS = 33 - SYS_NICE = 34 - SYS_SYNC = 36 - SYS_KILL = 37 - SYS_RENAME = 38 - SYS_MKDIR = 39 - SYS_RMDIR = 40 - SYS_DUP = 41 - SYS_PIPE = 42 - SYS_TIMES = 43 - SYS_BRK = 45 - SYS_SIGNAL = 48 - SYS_ACCT = 51 - SYS_UMOUNT2 = 52 - SYS_IOCTL = 54 - SYS_FCNTL = 55 - SYS_SETPGID = 57 - SYS_UMASK = 60 - SYS_CHROOT = 61 - SYS_USTAT = 62 - SYS_DUP2 = 63 - SYS_GETPPID = 64 - SYS_GETPGRP = 65 - SYS_SETSID = 66 - SYS_SIGACTION = 67 - SYS_SIGSUSPEND = 72 - SYS_SIGPENDING = 73 - SYS_SETHOSTNAME = 74 - SYS_SETRLIMIT = 75 - SYS_GETRUSAGE = 77 - SYS_GETTIMEOFDAY = 78 - SYS_SETTIMEOFDAY = 79 - SYS_SYMLINK = 83 - SYS_READLINK = 85 - SYS_USELIB = 86 - SYS_SWAPON = 87 - SYS_REBOOT = 88 - SYS_READDIR = 89 - SYS_MMAP = 90 - SYS_MUNMAP = 91 - SYS_TRUNCATE = 92 - SYS_FTRUNCATE = 93 - SYS_FCHMOD = 94 - SYS_GETPRIORITY = 96 - SYS_SETPRIORITY = 97 - SYS_STATFS = 99 - SYS_FSTATFS = 100 - SYS_SOCKETCALL = 102 - SYS_SYSLOG = 103 - SYS_SETITIMER = 104 - SYS_GETITIMER = 105 - SYS_STAT = 106 - SYS_LSTAT = 107 - SYS_FSTAT = 108 - SYS_LOOKUP_DCOOKIE = 110 - SYS_VHANGUP = 111 - SYS_IDLE = 112 - SYS_WAIT4 = 114 - SYS_SWAPOFF = 115 - SYS_SYSINFO = 116 - SYS_IPC = 117 - SYS_FSYNC = 118 - SYS_SIGRETURN = 119 - SYS_CLONE = 120 - SYS_SETDOMAINNAME = 121 - SYS_UNAME = 122 - SYS_ADJTIMEX = 124 - SYS_MPROTECT = 125 - SYS_SIGPROCMASK = 126 - SYS_CREATE_MODULE = 127 - SYS_INIT_MODULE = 128 - SYS_DELETE_MODULE = 129 - SYS_GET_KERNEL_SYMS = 130 - SYS_QUOTACTL = 131 - SYS_GETPGID = 132 - SYS_FCHDIR = 133 - SYS_BDFLUSH = 134 - SYS_SYSFS = 135 - SYS_PERSONALITY = 136 - SYS_AFS_SYSCALL = 137 - SYS_GETDENTS = 141 - SYS_SELECT = 142 - SYS_FLOCK = 143 - SYS_MSYNC = 144 - SYS_READV = 145 - SYS_WRITEV = 146 - SYS_GETSID = 147 - SYS_FDATASYNC = 148 - SYS__SYSCTL = 149 - SYS_MLOCK = 150 - SYS_MUNLOCK = 151 - SYS_MLOCKALL = 152 - SYS_MUNLOCKALL = 153 - SYS_SCHED_SETPARAM = 154 - SYS_SCHED_GETPARAM = 155 - SYS_SCHED_SETSCHEDULER = 156 - SYS_SCHED_GETSCHEDULER = 157 - SYS_SCHED_YIELD = 158 - SYS_SCHED_GET_PRIORITY_MAX = 159 - SYS_SCHED_GET_PRIORITY_MIN = 160 - SYS_SCHED_RR_GET_INTERVAL = 161 - SYS_NANOSLEEP = 162 - SYS_MREMAP = 163 - SYS_QUERY_MODULE = 167 - SYS_POLL = 168 - SYS_NFSSERVCTL = 169 - SYS_PRCTL = 172 - SYS_RT_SIGRETURN = 173 - SYS_RT_SIGACTION = 174 - SYS_RT_SIGPROCMASK = 175 - SYS_RT_SIGPENDING = 176 - SYS_RT_SIGTIMEDWAIT = 177 - SYS_RT_SIGQUEUEINFO = 178 - SYS_RT_SIGSUSPEND = 179 - SYS_PREAD64 = 180 - SYS_PWRITE64 = 181 - SYS_GETCWD = 183 - SYS_CAPGET = 184 - SYS_CAPSET = 185 - SYS_SIGALTSTACK = 186 - SYS_SENDFILE = 187 - SYS_GETPMSG = 188 - SYS_PUTPMSG = 189 - SYS_VFORK = 190 - SYS_GETRLIMIT = 191 - SYS_LCHOWN = 198 - SYS_GETUID = 199 - SYS_GETGID = 200 - SYS_GETEUID = 201 - SYS_GETEGID = 202 - SYS_SETREUID = 203 - SYS_SETREGID = 204 - SYS_GETGROUPS = 205 - SYS_SETGROUPS = 206 - SYS_FCHOWN = 207 - SYS_SETRESUID = 208 - SYS_GETRESUID = 209 - SYS_SETRESGID = 210 - SYS_GETRESGID = 211 - SYS_CHOWN = 212 - SYS_SETUID = 213 - SYS_SETGID = 214 - SYS_SETFSUID = 215 - SYS_SETFSGID = 216 - SYS_PIVOT_ROOT = 217 - SYS_MINCORE = 218 - SYS_MADVISE = 219 - SYS_GETDENTS64 = 220 - SYS_READAHEAD = 222 - SYS_SETXATTR = 224 - SYS_LSETXATTR = 225 - SYS_FSETXATTR = 226 - SYS_GETXATTR = 227 - SYS_LGETXATTR = 228 - SYS_FGETXATTR = 229 - SYS_LISTXATTR = 230 - SYS_LLISTXATTR = 231 - SYS_FLISTXATTR = 232 - SYS_REMOVEXATTR = 233 - SYS_LREMOVEXATTR = 234 - SYS_FREMOVEXATTR = 235 - SYS_GETTID = 236 - SYS_TKILL = 237 - SYS_FUTEX = 238 - SYS_SCHED_SETAFFINITY = 239 - SYS_SCHED_GETAFFINITY = 240 - SYS_TGKILL = 241 - SYS_IO_SETUP = 243 - SYS_IO_DESTROY = 244 - SYS_IO_GETEVENTS = 245 - SYS_IO_SUBMIT = 246 - SYS_IO_CANCEL = 247 - SYS_EXIT_GROUP = 248 - SYS_EPOLL_CREATE = 249 - SYS_EPOLL_CTL = 250 - SYS_EPOLL_WAIT = 251 - SYS_SET_TID_ADDRESS = 252 - SYS_FADVISE64 = 253 - SYS_TIMER_CREATE = 254 - SYS_TIMER_SETTIME = 255 - SYS_TIMER_GETTIME = 256 - SYS_TIMER_GETOVERRUN = 257 - SYS_TIMER_DELETE = 258 - SYS_CLOCK_SETTIME = 259 - SYS_CLOCK_GETTIME = 260 - SYS_CLOCK_GETRES = 261 - SYS_CLOCK_NANOSLEEP = 262 - SYS_STATFS64 = 265 - SYS_FSTATFS64 = 266 - SYS_REMAP_FILE_PAGES = 267 - SYS_MBIND = 268 - SYS_GET_MEMPOLICY = 269 - SYS_SET_MEMPOLICY = 270 - SYS_MQ_OPEN = 271 - SYS_MQ_UNLINK = 272 - SYS_MQ_TIMEDSEND = 273 - SYS_MQ_TIMEDRECEIVE = 274 - SYS_MQ_NOTIFY = 275 - SYS_MQ_GETSETATTR = 276 - SYS_KEXEC_LOAD = 277 - SYS_ADD_KEY = 278 - SYS_REQUEST_KEY = 279 - SYS_KEYCTL = 280 - SYS_WAITID = 281 - SYS_IOPRIO_SET = 282 - SYS_IOPRIO_GET = 283 - SYS_INOTIFY_INIT = 284 - SYS_INOTIFY_ADD_WATCH = 285 - SYS_INOTIFY_RM_WATCH = 286 - SYS_MIGRATE_PAGES = 287 - SYS_OPENAT = 288 - SYS_MKDIRAT = 289 - SYS_MKNODAT = 290 - SYS_FCHOWNAT = 291 - SYS_FUTIMESAT = 292 - SYS_NEWFSTATAT = 293 - SYS_UNLINKAT = 294 - SYS_RENAMEAT = 295 - SYS_LINKAT = 296 - SYS_SYMLINKAT = 297 - SYS_READLINKAT = 298 - SYS_FCHMODAT = 299 - SYS_FACCESSAT = 300 - SYS_PSELECT6 = 301 - SYS_PPOLL = 302 - SYS_UNSHARE = 303 - SYS_SET_ROBUST_LIST = 304 - SYS_GET_ROBUST_LIST = 305 - SYS_SPLICE = 306 - SYS_SYNC_FILE_RANGE = 307 - SYS_TEE = 308 - SYS_VMSPLICE = 309 - SYS_MOVE_PAGES = 310 - SYS_GETCPU = 311 - SYS_EPOLL_PWAIT = 312 - SYS_UTIMES = 313 - SYS_FALLOCATE = 314 - SYS_UTIMENSAT = 315 - SYS_SIGNALFD = 316 - SYS_TIMERFD = 317 - SYS_EVENTFD = 318 - SYS_TIMERFD_CREATE = 319 - SYS_TIMERFD_SETTIME = 320 - SYS_TIMERFD_GETTIME = 321 - SYS_SIGNALFD4 = 322 - SYS_EVENTFD2 = 323 - SYS_INOTIFY_INIT1 = 324 - SYS_PIPE2 = 325 - SYS_DUP3 = 326 - SYS_EPOLL_CREATE1 = 327 - SYS_PREADV = 328 - SYS_PWRITEV = 329 - SYS_RT_TGSIGQUEUEINFO = 330 - SYS_PERF_EVENT_OPEN = 331 - SYS_FANOTIFY_INIT = 332 - SYS_FANOTIFY_MARK = 333 - SYS_PRLIMIT64 = 334 - SYS_NAME_TO_HANDLE_AT = 335 - SYS_OPEN_BY_HANDLE_AT = 336 - SYS_CLOCK_ADJTIME = 337 - SYS_SYNCFS = 338 - SYS_SETNS = 339 - SYS_PROCESS_VM_READV = 340 - SYS_PROCESS_VM_WRITEV = 341 - SYS_S390_RUNTIME_INSTR = 342 - SYS_KCMP = 343 - SYS_FINIT_MODULE = 344 - SYS_SCHED_SETATTR = 345 - SYS_SCHED_GETATTR = 346 - SYS_RENAMEAT2 = 347 - SYS_SECCOMP = 348 - SYS_GETRANDOM = 349 - SYS_MEMFD_CREATE = 350 - SYS_BPF = 351 - SYS_S390_PCI_MMIO_WRITE = 352 - SYS_S390_PCI_MMIO_READ = 353 - SYS_EXECVEAT = 354 - SYS_USERFAULTFD = 355 - SYS_MEMBARRIER = 356 - SYS_RECVMMSG = 357 - SYS_SENDMMSG = 358 - SYS_SOCKET = 359 - SYS_SOCKETPAIR = 360 - SYS_BIND = 361 - SYS_CONNECT = 362 - SYS_LISTEN = 363 - SYS_ACCEPT4 = 364 - SYS_GETSOCKOPT = 365 - SYS_SETSOCKOPT = 366 - SYS_GETSOCKNAME = 367 - SYS_GETPEERNAME = 368 - SYS_SENDTO = 369 - SYS_SENDMSG = 370 - SYS_RECVFROM = 371 - SYS_RECVMSG = 372 - SYS_SHUTDOWN = 373 - SYS_MLOCK2 = 374 - SYS_COPY_FILE_RANGE = 375 - SYS_PREADV2 = 376 - SYS_PWRITEV2 = 377 - SYS_S390_GUARDED_STORAGE = 378 - SYS_STATX = 379 - SYS_S390_STHYI = 380 - SYS_KEXEC_FILE_LOAD = 381 - SYS_IO_PGETEVENTS = 382 - SYS_RSEQ = 383 - SYS_PKEY_MPROTECT = 384 - SYS_PKEY_ALLOC = 385 - SYS_PKEY_FREE = 386 - SYS_SEMTIMEDOP = 392 - SYS_SEMGET = 393 - SYS_SEMCTL = 394 - SYS_SHMGET = 395 - SYS_SHMCTL = 396 - SYS_SHMAT = 397 - SYS_SHMDT = 398 - SYS_MSGGET = 399 - SYS_MSGSND = 400 - SYS_MSGRCV = 401 - SYS_MSGCTL = 402 - SYS_PIDFD_SEND_SIGNAL = 424 - SYS_IO_URING_SETUP = 425 - SYS_IO_URING_ENTER = 426 - SYS_IO_URING_REGISTER = 427 - SYS_OPEN_TREE = 428 - SYS_MOVE_MOUNT = 429 - SYS_FSOPEN = 430 - SYS_FSCONFIG = 431 - SYS_FSMOUNT = 432 - SYS_FSPICK = 433 - SYS_PIDFD_OPEN = 434 - SYS_CLONE3 = 435 - SYS_CLOSE_RANGE = 436 - SYS_OPENAT2 = 437 - SYS_PIDFD_GETFD = 438 - SYS_FACCESSAT2 = 439 - SYS_PROCESS_MADVISE = 440 - SYS_EPOLL_PWAIT2 = 441 - SYS_MOUNT_SETATTR = 442 + SYS_EXIT = 1 + SYS_FORK = 2 + SYS_READ = 3 + SYS_WRITE = 4 + SYS_OPEN = 5 + SYS_CLOSE = 6 + SYS_RESTART_SYSCALL = 7 + SYS_CREAT = 8 + SYS_LINK = 9 + SYS_UNLINK = 10 + SYS_EXECVE = 11 + SYS_CHDIR = 12 + SYS_MKNOD = 14 + SYS_CHMOD = 15 + SYS_LSEEK = 19 + SYS_GETPID = 20 + SYS_MOUNT = 21 + SYS_UMOUNT = 22 + SYS_PTRACE = 26 + SYS_ALARM = 27 + SYS_PAUSE = 29 + SYS_UTIME = 30 + SYS_ACCESS = 33 + SYS_NICE = 34 + SYS_SYNC = 36 + SYS_KILL = 37 + SYS_RENAME = 38 + SYS_MKDIR = 39 + SYS_RMDIR = 40 + SYS_DUP = 41 + SYS_PIPE = 42 + SYS_TIMES = 43 + SYS_BRK = 45 + SYS_SIGNAL = 48 + SYS_ACCT = 51 + SYS_UMOUNT2 = 52 + SYS_IOCTL = 54 + SYS_FCNTL = 55 + SYS_SETPGID = 57 + SYS_UMASK = 60 + SYS_CHROOT = 61 + SYS_USTAT = 62 + SYS_DUP2 = 63 + SYS_GETPPID = 64 + SYS_GETPGRP = 65 + SYS_SETSID = 66 + SYS_SIGACTION = 67 + SYS_SIGSUSPEND = 72 + SYS_SIGPENDING = 73 + SYS_SETHOSTNAME = 74 + SYS_SETRLIMIT = 75 + SYS_GETRUSAGE = 77 + SYS_GETTIMEOFDAY = 78 + SYS_SETTIMEOFDAY = 79 + SYS_SYMLINK = 83 + SYS_READLINK = 85 + SYS_USELIB = 86 + SYS_SWAPON = 87 + SYS_REBOOT = 88 + SYS_READDIR = 89 + SYS_MMAP = 90 + SYS_MUNMAP = 91 + SYS_TRUNCATE = 92 + SYS_FTRUNCATE = 93 + SYS_FCHMOD = 94 + SYS_GETPRIORITY = 96 + SYS_SETPRIORITY = 97 + SYS_STATFS = 99 + SYS_FSTATFS = 100 + SYS_SOCKETCALL = 102 + SYS_SYSLOG = 103 + SYS_SETITIMER = 104 + SYS_GETITIMER = 105 + SYS_STAT = 106 + SYS_LSTAT = 107 + SYS_FSTAT = 108 + SYS_LOOKUP_DCOOKIE = 110 + SYS_VHANGUP = 111 + SYS_IDLE = 112 + SYS_WAIT4 = 114 + SYS_SWAPOFF = 115 + SYS_SYSINFO = 116 + SYS_IPC = 117 + SYS_FSYNC = 118 + SYS_SIGRETURN = 119 + SYS_CLONE = 120 + SYS_SETDOMAINNAME = 121 + SYS_UNAME = 122 + SYS_ADJTIMEX = 124 + SYS_MPROTECT = 125 + SYS_SIGPROCMASK = 126 + SYS_CREATE_MODULE = 127 + SYS_INIT_MODULE = 128 + SYS_DELETE_MODULE = 129 + SYS_GET_KERNEL_SYMS = 130 + SYS_QUOTACTL = 131 + SYS_GETPGID = 132 + SYS_FCHDIR = 133 + SYS_BDFLUSH = 134 + SYS_SYSFS = 135 + SYS_PERSONALITY = 136 + SYS_AFS_SYSCALL = 137 + SYS_GETDENTS = 141 + SYS_SELECT = 142 + SYS_FLOCK = 143 + SYS_MSYNC = 144 + SYS_READV = 145 + SYS_WRITEV = 146 + SYS_GETSID = 147 + SYS_FDATASYNC = 148 + SYS__SYSCTL = 149 + SYS_MLOCK = 150 + SYS_MUNLOCK = 151 + SYS_MLOCKALL = 152 + SYS_MUNLOCKALL = 153 + SYS_SCHED_SETPARAM = 154 + SYS_SCHED_GETPARAM = 155 + SYS_SCHED_SETSCHEDULER = 156 + SYS_SCHED_GETSCHEDULER = 157 + SYS_SCHED_YIELD = 158 + SYS_SCHED_GET_PRIORITY_MAX = 159 + SYS_SCHED_GET_PRIORITY_MIN = 160 + SYS_SCHED_RR_GET_INTERVAL = 161 + SYS_NANOSLEEP = 162 + SYS_MREMAP = 163 + SYS_QUERY_MODULE = 167 + SYS_POLL = 168 + SYS_NFSSERVCTL = 169 + SYS_PRCTL = 172 + SYS_RT_SIGRETURN = 173 + SYS_RT_SIGACTION = 174 + SYS_RT_SIGPROCMASK = 175 + SYS_RT_SIGPENDING = 176 + SYS_RT_SIGTIMEDWAIT = 177 + SYS_RT_SIGQUEUEINFO = 178 + SYS_RT_SIGSUSPEND = 179 + SYS_PREAD64 = 180 + SYS_PWRITE64 = 181 + SYS_GETCWD = 183 + SYS_CAPGET = 184 + SYS_CAPSET = 185 + SYS_SIGALTSTACK = 186 + SYS_SENDFILE = 187 + SYS_GETPMSG = 188 + SYS_PUTPMSG = 189 + SYS_VFORK = 190 + SYS_GETRLIMIT = 191 + SYS_LCHOWN = 198 + SYS_GETUID = 199 + SYS_GETGID = 200 + SYS_GETEUID = 201 + SYS_GETEGID = 202 + SYS_SETREUID = 203 + SYS_SETREGID = 204 + SYS_GETGROUPS = 205 + SYS_SETGROUPS = 206 + SYS_FCHOWN = 207 + SYS_SETRESUID = 208 + SYS_GETRESUID = 209 + SYS_SETRESGID = 210 + SYS_GETRESGID = 211 + SYS_CHOWN = 212 + SYS_SETUID = 213 + SYS_SETGID = 214 + SYS_SETFSUID = 215 + SYS_SETFSGID = 216 + SYS_PIVOT_ROOT = 217 + SYS_MINCORE = 218 + SYS_MADVISE = 219 + SYS_GETDENTS64 = 220 + SYS_READAHEAD = 222 + SYS_SETXATTR = 224 + SYS_LSETXATTR = 225 + SYS_FSETXATTR = 226 + SYS_GETXATTR = 227 + SYS_LGETXATTR = 228 + SYS_FGETXATTR = 229 + SYS_LISTXATTR = 230 + SYS_LLISTXATTR = 231 + SYS_FLISTXATTR = 232 + SYS_REMOVEXATTR = 233 + SYS_LREMOVEXATTR = 234 + SYS_FREMOVEXATTR = 235 + SYS_GETTID = 236 + SYS_TKILL = 237 + SYS_FUTEX = 238 + SYS_SCHED_SETAFFINITY = 239 + SYS_SCHED_GETAFFINITY = 240 + SYS_TGKILL = 241 + SYS_IO_SETUP = 243 + SYS_IO_DESTROY = 244 + SYS_IO_GETEVENTS = 245 + SYS_IO_SUBMIT = 246 + SYS_IO_CANCEL = 247 + SYS_EXIT_GROUP = 248 + SYS_EPOLL_CREATE = 249 + SYS_EPOLL_CTL = 250 + SYS_EPOLL_WAIT = 251 + SYS_SET_TID_ADDRESS = 252 + SYS_FADVISE64 = 253 + SYS_TIMER_CREATE = 254 + SYS_TIMER_SETTIME = 255 + SYS_TIMER_GETTIME = 256 + SYS_TIMER_GETOVERRUN = 257 + SYS_TIMER_DELETE = 258 + SYS_CLOCK_SETTIME = 259 + SYS_CLOCK_GETTIME = 260 + SYS_CLOCK_GETRES = 261 + SYS_CLOCK_NANOSLEEP = 262 + SYS_STATFS64 = 265 + SYS_FSTATFS64 = 266 + SYS_REMAP_FILE_PAGES = 267 + SYS_MBIND = 268 + SYS_GET_MEMPOLICY = 269 + SYS_SET_MEMPOLICY = 270 + SYS_MQ_OPEN = 271 + SYS_MQ_UNLINK = 272 + SYS_MQ_TIMEDSEND = 273 + SYS_MQ_TIMEDRECEIVE = 274 + SYS_MQ_NOTIFY = 275 + SYS_MQ_GETSETATTR = 276 + SYS_KEXEC_LOAD = 277 + SYS_ADD_KEY = 278 + SYS_REQUEST_KEY = 279 + SYS_KEYCTL = 280 + SYS_WAITID = 281 + SYS_IOPRIO_SET = 282 + SYS_IOPRIO_GET = 283 + SYS_INOTIFY_INIT = 284 + SYS_INOTIFY_ADD_WATCH = 285 + SYS_INOTIFY_RM_WATCH = 286 + SYS_MIGRATE_PAGES = 287 + SYS_OPENAT = 288 + SYS_MKDIRAT = 289 + SYS_MKNODAT = 290 + SYS_FCHOWNAT = 291 + SYS_FUTIMESAT = 292 + SYS_NEWFSTATAT = 293 + SYS_UNLINKAT = 294 + SYS_RENAMEAT = 295 + SYS_LINKAT = 296 + SYS_SYMLINKAT = 297 + SYS_READLINKAT = 298 + SYS_FCHMODAT = 299 + SYS_FACCESSAT = 300 + SYS_PSELECT6 = 301 + SYS_PPOLL = 302 + SYS_UNSHARE = 303 + SYS_SET_ROBUST_LIST = 304 + SYS_GET_ROBUST_LIST = 305 + SYS_SPLICE = 306 + SYS_SYNC_FILE_RANGE = 307 + SYS_TEE = 308 + SYS_VMSPLICE = 309 + SYS_MOVE_PAGES = 310 + SYS_GETCPU = 311 + SYS_EPOLL_PWAIT = 312 + SYS_UTIMES = 313 + SYS_FALLOCATE = 314 + SYS_UTIMENSAT = 315 + SYS_SIGNALFD = 316 + SYS_TIMERFD = 317 + SYS_EVENTFD = 318 + SYS_TIMERFD_CREATE = 319 + SYS_TIMERFD_SETTIME = 320 + SYS_TIMERFD_GETTIME = 321 + SYS_SIGNALFD4 = 322 + SYS_EVENTFD2 = 323 + SYS_INOTIFY_INIT1 = 324 + SYS_PIPE2 = 325 + SYS_DUP3 = 326 + SYS_EPOLL_CREATE1 = 327 + SYS_PREADV = 328 + SYS_PWRITEV = 329 + SYS_RT_TGSIGQUEUEINFO = 330 + SYS_PERF_EVENT_OPEN = 331 + SYS_FANOTIFY_INIT = 332 + SYS_FANOTIFY_MARK = 333 + SYS_PRLIMIT64 = 334 + SYS_NAME_TO_HANDLE_AT = 335 + SYS_OPEN_BY_HANDLE_AT = 336 + SYS_CLOCK_ADJTIME = 337 + SYS_SYNCFS = 338 + SYS_SETNS = 339 + SYS_PROCESS_VM_READV = 340 + SYS_PROCESS_VM_WRITEV = 341 + SYS_S390_RUNTIME_INSTR = 342 + SYS_KCMP = 343 + SYS_FINIT_MODULE = 344 + SYS_SCHED_SETATTR = 345 + SYS_SCHED_GETATTR = 346 + SYS_RENAMEAT2 = 347 + SYS_SECCOMP = 348 + SYS_GETRANDOM = 349 + SYS_MEMFD_CREATE = 350 + SYS_BPF = 351 + SYS_S390_PCI_MMIO_WRITE = 352 + SYS_S390_PCI_MMIO_READ = 353 + SYS_EXECVEAT = 354 + SYS_USERFAULTFD = 355 + SYS_MEMBARRIER = 356 + SYS_RECVMMSG = 357 + SYS_SENDMMSG = 358 + SYS_SOCKET = 359 + SYS_SOCKETPAIR = 360 + SYS_BIND = 361 + SYS_CONNECT = 362 + SYS_LISTEN = 363 + SYS_ACCEPT4 = 364 + SYS_GETSOCKOPT = 365 + SYS_SETSOCKOPT = 366 + SYS_GETSOCKNAME = 367 + SYS_GETPEERNAME = 368 + SYS_SENDTO = 369 + SYS_SENDMSG = 370 + SYS_RECVFROM = 371 + SYS_RECVMSG = 372 + SYS_SHUTDOWN = 373 + SYS_MLOCK2 = 374 + SYS_COPY_FILE_RANGE = 375 + SYS_PREADV2 = 376 + SYS_PWRITEV2 = 377 + SYS_S390_GUARDED_STORAGE = 378 + SYS_STATX = 379 + SYS_S390_STHYI = 380 + SYS_KEXEC_FILE_LOAD = 381 + SYS_IO_PGETEVENTS = 382 + SYS_RSEQ = 383 + SYS_PKEY_MPROTECT = 384 + SYS_PKEY_ALLOC = 385 + SYS_PKEY_FREE = 386 + SYS_SEMTIMEDOP = 392 + SYS_SEMGET = 393 + SYS_SEMCTL = 394 + SYS_SHMGET = 395 + SYS_SHMCTL = 396 + SYS_SHMAT = 397 + SYS_SHMDT = 398 + SYS_MSGGET = 399 + SYS_MSGSND = 400 + SYS_MSGRCV = 401 + SYS_MSGCTL = 402 + SYS_PIDFD_SEND_SIGNAL = 424 + SYS_IO_URING_SETUP = 425 + SYS_IO_URING_ENTER = 426 + SYS_IO_URING_REGISTER = 427 + SYS_OPEN_TREE = 428 + SYS_MOVE_MOUNT = 429 + SYS_FSOPEN = 430 + SYS_FSCONFIG = 431 + SYS_FSMOUNT = 432 + SYS_FSPICK = 433 + SYS_PIDFD_OPEN = 434 + SYS_CLONE3 = 435 + SYS_CLOSE_RANGE = 436 + SYS_OPENAT2 = 437 + SYS_PIDFD_GETFD = 438 + SYS_FACCESSAT2 = 439 + SYS_PROCESS_MADVISE = 440 + SYS_EPOLL_PWAIT2 = 441 + SYS_MOUNT_SETATTR = 442 + SYS_LANDLOCK_CREATE_RULESET = 444 + SYS_LANDLOCK_ADD_RULE = 445 + SYS_LANDLOCK_RESTRICT_SELF = 446 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go index 488ca84..58e72b0 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go @@ -7,379 +7,382 @@ package unix const ( - SYS_RESTART_SYSCALL = 0 - SYS_EXIT = 1 - SYS_FORK = 2 - SYS_READ = 3 - SYS_WRITE = 4 - SYS_OPEN = 5 - SYS_CLOSE = 6 - SYS_WAIT4 = 7 - SYS_CREAT = 8 - SYS_LINK = 9 - SYS_UNLINK = 10 - SYS_EXECV = 11 - SYS_CHDIR = 12 - SYS_CHOWN = 13 - SYS_MKNOD = 14 - SYS_CHMOD = 15 - SYS_LCHOWN = 16 - SYS_BRK = 17 - SYS_PERFCTR = 18 - SYS_LSEEK = 19 - SYS_GETPID = 20 - SYS_CAPGET = 21 - SYS_CAPSET = 22 - SYS_SETUID = 23 - SYS_GETUID = 24 - SYS_VMSPLICE = 25 - SYS_PTRACE = 26 - SYS_ALARM = 27 - SYS_SIGALTSTACK = 28 - SYS_PAUSE = 29 - SYS_UTIME = 30 - SYS_ACCESS = 33 - SYS_NICE = 34 - SYS_SYNC = 36 - SYS_KILL = 37 - SYS_STAT = 38 - SYS_SENDFILE = 39 - SYS_LSTAT = 40 - SYS_DUP = 41 - SYS_PIPE = 42 - SYS_TIMES = 43 - SYS_UMOUNT2 = 45 - SYS_SETGID = 46 - SYS_GETGID = 47 - SYS_SIGNAL = 48 - SYS_GETEUID = 49 - SYS_GETEGID = 50 - SYS_ACCT = 51 - SYS_MEMORY_ORDERING = 52 - SYS_IOCTL = 54 - SYS_REBOOT = 55 - SYS_SYMLINK = 57 - SYS_READLINK = 58 - SYS_EXECVE = 59 - SYS_UMASK = 60 - SYS_CHROOT = 61 - SYS_FSTAT = 62 - SYS_FSTAT64 = 63 - SYS_GETPAGESIZE = 64 - SYS_MSYNC = 65 - SYS_VFORK = 66 - SYS_PREAD64 = 67 - SYS_PWRITE64 = 68 - SYS_MMAP = 71 - SYS_MUNMAP = 73 - SYS_MPROTECT = 74 - SYS_MADVISE = 75 - SYS_VHANGUP = 76 - SYS_MINCORE = 78 - SYS_GETGROUPS = 79 - SYS_SETGROUPS = 80 - SYS_GETPGRP = 81 - SYS_SETITIMER = 83 - SYS_SWAPON = 85 - SYS_GETITIMER = 86 - SYS_SETHOSTNAME = 88 - SYS_DUP2 = 90 - SYS_FCNTL = 92 - SYS_SELECT = 93 - SYS_FSYNC = 95 - SYS_SETPRIORITY = 96 - SYS_SOCKET = 97 - SYS_CONNECT = 98 - SYS_ACCEPT = 99 - SYS_GETPRIORITY = 100 - SYS_RT_SIGRETURN = 101 - SYS_RT_SIGACTION = 102 - SYS_RT_SIGPROCMASK = 103 - SYS_RT_SIGPENDING = 104 - SYS_RT_SIGTIMEDWAIT = 105 - SYS_RT_SIGQUEUEINFO = 106 - SYS_RT_SIGSUSPEND = 107 - SYS_SETRESUID = 108 - SYS_GETRESUID = 109 - SYS_SETRESGID = 110 - SYS_GETRESGID = 111 - SYS_RECVMSG = 113 - SYS_SENDMSG = 114 - SYS_GETTIMEOFDAY = 116 - SYS_GETRUSAGE = 117 - SYS_GETSOCKOPT = 118 - SYS_GETCWD = 119 - SYS_READV = 120 - SYS_WRITEV = 121 - SYS_SETTIMEOFDAY = 122 - SYS_FCHOWN = 123 - SYS_FCHMOD = 124 - SYS_RECVFROM = 125 - SYS_SETREUID = 126 - SYS_SETREGID = 127 - SYS_RENAME = 128 - SYS_TRUNCATE = 129 - SYS_FTRUNCATE = 130 - SYS_FLOCK = 131 - SYS_LSTAT64 = 132 - SYS_SENDTO = 133 - SYS_SHUTDOWN = 134 - SYS_SOCKETPAIR = 135 - SYS_MKDIR = 136 - SYS_RMDIR = 137 - SYS_UTIMES = 138 - SYS_STAT64 = 139 - SYS_SENDFILE64 = 140 - SYS_GETPEERNAME = 141 - SYS_FUTEX = 142 - SYS_GETTID = 143 - SYS_GETRLIMIT = 144 - SYS_SETRLIMIT = 145 - SYS_PIVOT_ROOT = 146 - SYS_PRCTL = 147 - SYS_PCICONFIG_READ = 148 - SYS_PCICONFIG_WRITE = 149 - SYS_GETSOCKNAME = 150 - SYS_INOTIFY_INIT = 151 - SYS_INOTIFY_ADD_WATCH = 152 - SYS_POLL = 153 - SYS_GETDENTS64 = 154 - SYS_INOTIFY_RM_WATCH = 156 - SYS_STATFS = 157 - SYS_FSTATFS = 158 - SYS_UMOUNT = 159 - SYS_SCHED_SET_AFFINITY = 160 - SYS_SCHED_GET_AFFINITY = 161 - SYS_GETDOMAINNAME = 162 - SYS_SETDOMAINNAME = 163 - SYS_UTRAP_INSTALL = 164 - SYS_QUOTACTL = 165 - SYS_SET_TID_ADDRESS = 166 - SYS_MOUNT = 167 - SYS_USTAT = 168 - SYS_SETXATTR = 169 - SYS_LSETXATTR = 170 - SYS_FSETXATTR = 171 - SYS_GETXATTR = 172 - SYS_LGETXATTR = 173 - SYS_GETDENTS = 174 - SYS_SETSID = 175 - SYS_FCHDIR = 176 - SYS_FGETXATTR = 177 - SYS_LISTXATTR = 178 - SYS_LLISTXATTR = 179 - SYS_FLISTXATTR = 180 - SYS_REMOVEXATTR = 181 - SYS_LREMOVEXATTR = 182 - SYS_SIGPENDING = 183 - SYS_QUERY_MODULE = 184 - SYS_SETPGID = 185 - SYS_FREMOVEXATTR = 186 - SYS_TKILL = 187 - SYS_EXIT_GROUP = 188 - SYS_UNAME = 189 - SYS_INIT_MODULE = 190 - SYS_PERSONALITY = 191 - SYS_REMAP_FILE_PAGES = 192 - SYS_EPOLL_CREATE = 193 - SYS_EPOLL_CTL = 194 - SYS_EPOLL_WAIT = 195 - SYS_IOPRIO_SET = 196 - SYS_GETPPID = 197 - SYS_SIGACTION = 198 - SYS_SGETMASK = 199 - SYS_SSETMASK = 200 - SYS_SIGSUSPEND = 201 - SYS_OLDLSTAT = 202 - SYS_USELIB = 203 - SYS_READDIR = 204 - SYS_READAHEAD = 205 - SYS_SOCKETCALL = 206 - SYS_SYSLOG = 207 - SYS_LOOKUP_DCOOKIE = 208 - SYS_FADVISE64 = 209 - SYS_FADVISE64_64 = 210 - SYS_TGKILL = 211 - SYS_WAITPID = 212 - SYS_SWAPOFF = 213 - SYS_SYSINFO = 214 - SYS_IPC = 215 - SYS_SIGRETURN = 216 - SYS_CLONE = 217 - SYS_IOPRIO_GET = 218 - SYS_ADJTIMEX = 219 - SYS_SIGPROCMASK = 220 - SYS_CREATE_MODULE = 221 - SYS_DELETE_MODULE = 222 - SYS_GET_KERNEL_SYMS = 223 - SYS_GETPGID = 224 - SYS_BDFLUSH = 225 - SYS_SYSFS = 226 - SYS_AFS_SYSCALL = 227 - SYS_SETFSUID = 228 - SYS_SETFSGID = 229 - SYS__NEWSELECT = 230 - SYS_SPLICE = 232 - SYS_STIME = 233 - SYS_STATFS64 = 234 - SYS_FSTATFS64 = 235 - SYS__LLSEEK = 236 - SYS_MLOCK = 237 - SYS_MUNLOCK = 238 - SYS_MLOCKALL = 239 - SYS_MUNLOCKALL = 240 - SYS_SCHED_SETPARAM = 241 - SYS_SCHED_GETPARAM = 242 - SYS_SCHED_SETSCHEDULER = 243 - SYS_SCHED_GETSCHEDULER = 244 - SYS_SCHED_YIELD = 245 - SYS_SCHED_GET_PRIORITY_MAX = 246 - SYS_SCHED_GET_PRIORITY_MIN = 247 - SYS_SCHED_RR_GET_INTERVAL = 248 - SYS_NANOSLEEP = 249 - SYS_MREMAP = 250 - SYS__SYSCTL = 251 - SYS_GETSID = 252 - SYS_FDATASYNC = 253 - SYS_NFSSERVCTL = 254 - SYS_SYNC_FILE_RANGE = 255 - SYS_CLOCK_SETTIME = 256 - SYS_CLOCK_GETTIME = 257 - SYS_CLOCK_GETRES = 258 - SYS_CLOCK_NANOSLEEP = 259 - SYS_SCHED_GETAFFINITY = 260 - SYS_SCHED_SETAFFINITY = 261 - SYS_TIMER_SETTIME = 262 - SYS_TIMER_GETTIME = 263 - SYS_TIMER_GETOVERRUN = 264 - SYS_TIMER_DELETE = 265 - SYS_TIMER_CREATE = 266 - SYS_VSERVER = 267 - SYS_IO_SETUP = 268 - SYS_IO_DESTROY = 269 - SYS_IO_SUBMIT = 270 - SYS_IO_CANCEL = 271 - SYS_IO_GETEVENTS = 272 - SYS_MQ_OPEN = 273 - SYS_MQ_UNLINK = 274 - SYS_MQ_TIMEDSEND = 275 - SYS_MQ_TIMEDRECEIVE = 276 - SYS_MQ_NOTIFY = 277 - SYS_MQ_GETSETATTR = 278 - SYS_WAITID = 279 - SYS_TEE = 280 - SYS_ADD_KEY = 281 - SYS_REQUEST_KEY = 282 - SYS_KEYCTL = 283 - SYS_OPENAT = 284 - SYS_MKDIRAT = 285 - SYS_MKNODAT = 286 - SYS_FCHOWNAT = 287 - SYS_FUTIMESAT = 288 - SYS_FSTATAT64 = 289 - SYS_UNLINKAT = 290 - SYS_RENAMEAT = 291 - SYS_LINKAT = 292 - SYS_SYMLINKAT = 293 - SYS_READLINKAT = 294 - SYS_FCHMODAT = 295 - SYS_FACCESSAT = 296 - SYS_PSELECT6 = 297 - SYS_PPOLL = 298 - SYS_UNSHARE = 299 - SYS_SET_ROBUST_LIST = 300 - SYS_GET_ROBUST_LIST = 301 - SYS_MIGRATE_PAGES = 302 - SYS_MBIND = 303 - SYS_GET_MEMPOLICY = 304 - SYS_SET_MEMPOLICY = 305 - SYS_KEXEC_LOAD = 306 - SYS_MOVE_PAGES = 307 - SYS_GETCPU = 308 - SYS_EPOLL_PWAIT = 309 - SYS_UTIMENSAT = 310 - SYS_SIGNALFD = 311 - SYS_TIMERFD_CREATE = 312 - SYS_EVENTFD = 313 - SYS_FALLOCATE = 314 - SYS_TIMERFD_SETTIME = 315 - SYS_TIMERFD_GETTIME = 316 - SYS_SIGNALFD4 = 317 - SYS_EVENTFD2 = 318 - SYS_EPOLL_CREATE1 = 319 - SYS_DUP3 = 320 - SYS_PIPE2 = 321 - SYS_INOTIFY_INIT1 = 322 - SYS_ACCEPT4 = 323 - SYS_PREADV = 324 - SYS_PWRITEV = 325 - SYS_RT_TGSIGQUEUEINFO = 326 - SYS_PERF_EVENT_OPEN = 327 - SYS_RECVMMSG = 328 - SYS_FANOTIFY_INIT = 329 - SYS_FANOTIFY_MARK = 330 - SYS_PRLIMIT64 = 331 - SYS_NAME_TO_HANDLE_AT = 332 - SYS_OPEN_BY_HANDLE_AT = 333 - SYS_CLOCK_ADJTIME = 334 - SYS_SYNCFS = 335 - SYS_SENDMMSG = 336 - SYS_SETNS = 337 - SYS_PROCESS_VM_READV = 338 - SYS_PROCESS_VM_WRITEV = 339 - SYS_KERN_FEATURES = 340 - SYS_KCMP = 341 - SYS_FINIT_MODULE = 342 - SYS_SCHED_SETATTR = 343 - SYS_SCHED_GETATTR = 344 - SYS_RENAMEAT2 = 345 - SYS_SECCOMP = 346 - SYS_GETRANDOM = 347 - SYS_MEMFD_CREATE = 348 - SYS_BPF = 349 - SYS_EXECVEAT = 350 - SYS_MEMBARRIER = 351 - SYS_USERFAULTFD = 352 - SYS_BIND = 353 - SYS_LISTEN = 354 - SYS_SETSOCKOPT = 355 - SYS_MLOCK2 = 356 - SYS_COPY_FILE_RANGE = 357 - SYS_PREADV2 = 358 - SYS_PWRITEV2 = 359 - SYS_STATX = 360 - SYS_IO_PGETEVENTS = 361 - SYS_PKEY_MPROTECT = 362 - SYS_PKEY_ALLOC = 363 - SYS_PKEY_FREE = 364 - SYS_RSEQ = 365 - SYS_SEMTIMEDOP = 392 - SYS_SEMGET = 393 - SYS_SEMCTL = 394 - SYS_SHMGET = 395 - SYS_SHMCTL = 396 - SYS_SHMAT = 397 - SYS_SHMDT = 398 - SYS_MSGGET = 399 - SYS_MSGSND = 400 - SYS_MSGRCV = 401 - SYS_MSGCTL = 402 - SYS_PIDFD_SEND_SIGNAL = 424 - SYS_IO_URING_SETUP = 425 - SYS_IO_URING_ENTER = 426 - SYS_IO_URING_REGISTER = 427 - SYS_OPEN_TREE = 428 - SYS_MOVE_MOUNT = 429 - SYS_FSOPEN = 430 - SYS_FSCONFIG = 431 - SYS_FSMOUNT = 432 - SYS_FSPICK = 433 - SYS_PIDFD_OPEN = 434 - SYS_CLOSE_RANGE = 436 - SYS_OPENAT2 = 437 - SYS_PIDFD_GETFD = 438 - SYS_FACCESSAT2 = 439 - SYS_PROCESS_MADVISE = 440 - SYS_EPOLL_PWAIT2 = 441 - SYS_MOUNT_SETATTR = 442 + SYS_RESTART_SYSCALL = 0 + SYS_EXIT = 1 + SYS_FORK = 2 + SYS_READ = 3 + SYS_WRITE = 4 + SYS_OPEN = 5 + SYS_CLOSE = 6 + SYS_WAIT4 = 7 + SYS_CREAT = 8 + SYS_LINK = 9 + SYS_UNLINK = 10 + SYS_EXECV = 11 + SYS_CHDIR = 12 + SYS_CHOWN = 13 + SYS_MKNOD = 14 + SYS_CHMOD = 15 + SYS_LCHOWN = 16 + SYS_BRK = 17 + SYS_PERFCTR = 18 + SYS_LSEEK = 19 + SYS_GETPID = 20 + SYS_CAPGET = 21 + SYS_CAPSET = 22 + SYS_SETUID = 23 + SYS_GETUID = 24 + SYS_VMSPLICE = 25 + SYS_PTRACE = 26 + SYS_ALARM = 27 + SYS_SIGALTSTACK = 28 + SYS_PAUSE = 29 + SYS_UTIME = 30 + SYS_ACCESS = 33 + SYS_NICE = 34 + SYS_SYNC = 36 + SYS_KILL = 37 + SYS_STAT = 38 + SYS_SENDFILE = 39 + SYS_LSTAT = 40 + SYS_DUP = 41 + SYS_PIPE = 42 + SYS_TIMES = 43 + SYS_UMOUNT2 = 45 + SYS_SETGID = 46 + SYS_GETGID = 47 + SYS_SIGNAL = 48 + SYS_GETEUID = 49 + SYS_GETEGID = 50 + SYS_ACCT = 51 + SYS_MEMORY_ORDERING = 52 + SYS_IOCTL = 54 + SYS_REBOOT = 55 + SYS_SYMLINK = 57 + SYS_READLINK = 58 + SYS_EXECVE = 59 + SYS_UMASK = 60 + SYS_CHROOT = 61 + SYS_FSTAT = 62 + SYS_FSTAT64 = 63 + SYS_GETPAGESIZE = 64 + SYS_MSYNC = 65 + SYS_VFORK = 66 + SYS_PREAD64 = 67 + SYS_PWRITE64 = 68 + SYS_MMAP = 71 + SYS_MUNMAP = 73 + SYS_MPROTECT = 74 + SYS_MADVISE = 75 + SYS_VHANGUP = 76 + SYS_MINCORE = 78 + SYS_GETGROUPS = 79 + SYS_SETGROUPS = 80 + SYS_GETPGRP = 81 + SYS_SETITIMER = 83 + SYS_SWAPON = 85 + SYS_GETITIMER = 86 + SYS_SETHOSTNAME = 88 + SYS_DUP2 = 90 + SYS_FCNTL = 92 + SYS_SELECT = 93 + SYS_FSYNC = 95 + SYS_SETPRIORITY = 96 + SYS_SOCKET = 97 + SYS_CONNECT = 98 + SYS_ACCEPT = 99 + SYS_GETPRIORITY = 100 + SYS_RT_SIGRETURN = 101 + SYS_RT_SIGACTION = 102 + SYS_RT_SIGPROCMASK = 103 + SYS_RT_SIGPENDING = 104 + SYS_RT_SIGTIMEDWAIT = 105 + SYS_RT_SIGQUEUEINFO = 106 + SYS_RT_SIGSUSPEND = 107 + SYS_SETRESUID = 108 + SYS_GETRESUID = 109 + SYS_SETRESGID = 110 + SYS_GETRESGID = 111 + SYS_RECVMSG = 113 + SYS_SENDMSG = 114 + SYS_GETTIMEOFDAY = 116 + SYS_GETRUSAGE = 117 + SYS_GETSOCKOPT = 118 + SYS_GETCWD = 119 + SYS_READV = 120 + SYS_WRITEV = 121 + SYS_SETTIMEOFDAY = 122 + SYS_FCHOWN = 123 + SYS_FCHMOD = 124 + SYS_RECVFROM = 125 + SYS_SETREUID = 126 + SYS_SETREGID = 127 + SYS_RENAME = 128 + SYS_TRUNCATE = 129 + SYS_FTRUNCATE = 130 + SYS_FLOCK = 131 + SYS_LSTAT64 = 132 + SYS_SENDTO = 133 + SYS_SHUTDOWN = 134 + SYS_SOCKETPAIR = 135 + SYS_MKDIR = 136 + SYS_RMDIR = 137 + SYS_UTIMES = 138 + SYS_STAT64 = 139 + SYS_SENDFILE64 = 140 + SYS_GETPEERNAME = 141 + SYS_FUTEX = 142 + SYS_GETTID = 143 + SYS_GETRLIMIT = 144 + SYS_SETRLIMIT = 145 + SYS_PIVOT_ROOT = 146 + SYS_PRCTL = 147 + SYS_PCICONFIG_READ = 148 + SYS_PCICONFIG_WRITE = 149 + SYS_GETSOCKNAME = 150 + SYS_INOTIFY_INIT = 151 + SYS_INOTIFY_ADD_WATCH = 152 + SYS_POLL = 153 + SYS_GETDENTS64 = 154 + SYS_INOTIFY_RM_WATCH = 156 + SYS_STATFS = 157 + SYS_FSTATFS = 158 + SYS_UMOUNT = 159 + SYS_SCHED_SET_AFFINITY = 160 + SYS_SCHED_GET_AFFINITY = 161 + SYS_GETDOMAINNAME = 162 + SYS_SETDOMAINNAME = 163 + SYS_UTRAP_INSTALL = 164 + SYS_QUOTACTL = 165 + SYS_SET_TID_ADDRESS = 166 + SYS_MOUNT = 167 + SYS_USTAT = 168 + SYS_SETXATTR = 169 + SYS_LSETXATTR = 170 + SYS_FSETXATTR = 171 + SYS_GETXATTR = 172 + SYS_LGETXATTR = 173 + SYS_GETDENTS = 174 + SYS_SETSID = 175 + SYS_FCHDIR = 176 + SYS_FGETXATTR = 177 + SYS_LISTXATTR = 178 + SYS_LLISTXATTR = 179 + SYS_FLISTXATTR = 180 + SYS_REMOVEXATTR = 181 + SYS_LREMOVEXATTR = 182 + SYS_SIGPENDING = 183 + SYS_QUERY_MODULE = 184 + SYS_SETPGID = 185 + SYS_FREMOVEXATTR = 186 + SYS_TKILL = 187 + SYS_EXIT_GROUP = 188 + SYS_UNAME = 189 + SYS_INIT_MODULE = 190 + SYS_PERSONALITY = 191 + SYS_REMAP_FILE_PAGES = 192 + SYS_EPOLL_CREATE = 193 + SYS_EPOLL_CTL = 194 + SYS_EPOLL_WAIT = 195 + SYS_IOPRIO_SET = 196 + SYS_GETPPID = 197 + SYS_SIGACTION = 198 + SYS_SGETMASK = 199 + SYS_SSETMASK = 200 + SYS_SIGSUSPEND = 201 + SYS_OLDLSTAT = 202 + SYS_USELIB = 203 + SYS_READDIR = 204 + SYS_READAHEAD = 205 + SYS_SOCKETCALL = 206 + SYS_SYSLOG = 207 + SYS_LOOKUP_DCOOKIE = 208 + SYS_FADVISE64 = 209 + SYS_FADVISE64_64 = 210 + SYS_TGKILL = 211 + SYS_WAITPID = 212 + SYS_SWAPOFF = 213 + SYS_SYSINFO = 214 + SYS_IPC = 215 + SYS_SIGRETURN = 216 + SYS_CLONE = 217 + SYS_IOPRIO_GET = 218 + SYS_ADJTIMEX = 219 + SYS_SIGPROCMASK = 220 + SYS_CREATE_MODULE = 221 + SYS_DELETE_MODULE = 222 + SYS_GET_KERNEL_SYMS = 223 + SYS_GETPGID = 224 + SYS_BDFLUSH = 225 + SYS_SYSFS = 226 + SYS_AFS_SYSCALL = 227 + SYS_SETFSUID = 228 + SYS_SETFSGID = 229 + SYS__NEWSELECT = 230 + SYS_SPLICE = 232 + SYS_STIME = 233 + SYS_STATFS64 = 234 + SYS_FSTATFS64 = 235 + SYS__LLSEEK = 236 + SYS_MLOCK = 237 + SYS_MUNLOCK = 238 + SYS_MLOCKALL = 239 + SYS_MUNLOCKALL = 240 + SYS_SCHED_SETPARAM = 241 + SYS_SCHED_GETPARAM = 242 + SYS_SCHED_SETSCHEDULER = 243 + SYS_SCHED_GETSCHEDULER = 244 + SYS_SCHED_YIELD = 245 + SYS_SCHED_GET_PRIORITY_MAX = 246 + SYS_SCHED_GET_PRIORITY_MIN = 247 + SYS_SCHED_RR_GET_INTERVAL = 248 + SYS_NANOSLEEP = 249 + SYS_MREMAP = 250 + SYS__SYSCTL = 251 + SYS_GETSID = 252 + SYS_FDATASYNC = 253 + SYS_NFSSERVCTL = 254 + SYS_SYNC_FILE_RANGE = 255 + SYS_CLOCK_SETTIME = 256 + SYS_CLOCK_GETTIME = 257 + SYS_CLOCK_GETRES = 258 + SYS_CLOCK_NANOSLEEP = 259 + SYS_SCHED_GETAFFINITY = 260 + SYS_SCHED_SETAFFINITY = 261 + SYS_TIMER_SETTIME = 262 + SYS_TIMER_GETTIME = 263 + SYS_TIMER_GETOVERRUN = 264 + SYS_TIMER_DELETE = 265 + SYS_TIMER_CREATE = 266 + SYS_VSERVER = 267 + SYS_IO_SETUP = 268 + SYS_IO_DESTROY = 269 + SYS_IO_SUBMIT = 270 + SYS_IO_CANCEL = 271 + SYS_IO_GETEVENTS = 272 + SYS_MQ_OPEN = 273 + SYS_MQ_UNLINK = 274 + SYS_MQ_TIMEDSEND = 275 + SYS_MQ_TIMEDRECEIVE = 276 + SYS_MQ_NOTIFY = 277 + SYS_MQ_GETSETATTR = 278 + SYS_WAITID = 279 + SYS_TEE = 280 + SYS_ADD_KEY = 281 + SYS_REQUEST_KEY = 282 + SYS_KEYCTL = 283 + SYS_OPENAT = 284 + SYS_MKDIRAT = 285 + SYS_MKNODAT = 286 + SYS_FCHOWNAT = 287 + SYS_FUTIMESAT = 288 + SYS_FSTATAT64 = 289 + SYS_UNLINKAT = 290 + SYS_RENAMEAT = 291 + SYS_LINKAT = 292 + SYS_SYMLINKAT = 293 + SYS_READLINKAT = 294 + SYS_FCHMODAT = 295 + SYS_FACCESSAT = 296 + SYS_PSELECT6 = 297 + SYS_PPOLL = 298 + SYS_UNSHARE = 299 + SYS_SET_ROBUST_LIST = 300 + SYS_GET_ROBUST_LIST = 301 + SYS_MIGRATE_PAGES = 302 + SYS_MBIND = 303 + SYS_GET_MEMPOLICY = 304 + SYS_SET_MEMPOLICY = 305 + SYS_KEXEC_LOAD = 306 + SYS_MOVE_PAGES = 307 + SYS_GETCPU = 308 + SYS_EPOLL_PWAIT = 309 + SYS_UTIMENSAT = 310 + SYS_SIGNALFD = 311 + SYS_TIMERFD_CREATE = 312 + SYS_EVENTFD = 313 + SYS_FALLOCATE = 314 + SYS_TIMERFD_SETTIME = 315 + SYS_TIMERFD_GETTIME = 316 + SYS_SIGNALFD4 = 317 + SYS_EVENTFD2 = 318 + SYS_EPOLL_CREATE1 = 319 + SYS_DUP3 = 320 + SYS_PIPE2 = 321 + SYS_INOTIFY_INIT1 = 322 + SYS_ACCEPT4 = 323 + SYS_PREADV = 324 + SYS_PWRITEV = 325 + SYS_RT_TGSIGQUEUEINFO = 326 + SYS_PERF_EVENT_OPEN = 327 + SYS_RECVMMSG = 328 + SYS_FANOTIFY_INIT = 329 + SYS_FANOTIFY_MARK = 330 + SYS_PRLIMIT64 = 331 + SYS_NAME_TO_HANDLE_AT = 332 + SYS_OPEN_BY_HANDLE_AT = 333 + SYS_CLOCK_ADJTIME = 334 + SYS_SYNCFS = 335 + SYS_SENDMMSG = 336 + SYS_SETNS = 337 + SYS_PROCESS_VM_READV = 338 + SYS_PROCESS_VM_WRITEV = 339 + SYS_KERN_FEATURES = 340 + SYS_KCMP = 341 + SYS_FINIT_MODULE = 342 + SYS_SCHED_SETATTR = 343 + SYS_SCHED_GETATTR = 344 + SYS_RENAMEAT2 = 345 + SYS_SECCOMP = 346 + SYS_GETRANDOM = 347 + SYS_MEMFD_CREATE = 348 + SYS_BPF = 349 + SYS_EXECVEAT = 350 + SYS_MEMBARRIER = 351 + SYS_USERFAULTFD = 352 + SYS_BIND = 353 + SYS_LISTEN = 354 + SYS_SETSOCKOPT = 355 + SYS_MLOCK2 = 356 + SYS_COPY_FILE_RANGE = 357 + SYS_PREADV2 = 358 + SYS_PWRITEV2 = 359 + SYS_STATX = 360 + SYS_IO_PGETEVENTS = 361 + SYS_PKEY_MPROTECT = 362 + SYS_PKEY_ALLOC = 363 + SYS_PKEY_FREE = 364 + SYS_RSEQ = 365 + SYS_SEMTIMEDOP = 392 + SYS_SEMGET = 393 + SYS_SEMCTL = 394 + SYS_SHMGET = 395 + SYS_SHMCTL = 396 + SYS_SHMAT = 397 + SYS_SHMDT = 398 + SYS_MSGGET = 399 + SYS_MSGSND = 400 + SYS_MSGRCV = 401 + SYS_MSGCTL = 402 + SYS_PIDFD_SEND_SIGNAL = 424 + SYS_IO_URING_SETUP = 425 + SYS_IO_URING_ENTER = 426 + SYS_IO_URING_REGISTER = 427 + SYS_OPEN_TREE = 428 + SYS_MOVE_MOUNT = 429 + SYS_FSOPEN = 430 + SYS_FSCONFIG = 431 + SYS_FSMOUNT = 432 + SYS_FSPICK = 433 + SYS_PIDFD_OPEN = 434 + SYS_CLOSE_RANGE = 436 + SYS_OPENAT2 = 437 + SYS_PIDFD_GETFD = 438 + SYS_FACCESSAT2 = 439 + SYS_PROCESS_MADVISE = 440 + SYS_EPOLL_PWAIT2 = 441 + SYS_MOUNT_SETATTR = 442 + SYS_LANDLOCK_CREATE_RULESET = 444 + SYS_LANDLOCK_ADD_RULE = 445 + SYS_LANDLOCK_RESTRICT_SELF = 446 ) diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux.go b/vendor/golang.org/x/sys/unix/ztypes_linux.go index 72887ab..93a64c1 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux.go @@ -681,6 +681,16 @@ type NdMsg struct { Type uint8 } +const ( + ICMP_FILTER = 0x1 + + ICMPV6_FILTER = 0x1 + ICMPV6_FILTER_BLOCK = 0x1 + ICMPV6_FILTER_BLOCKOTHERS = 0x3 + ICMPV6_FILTER_PASS = 0x2 + ICMPV6_FILTER_PASSONLY = 0x4 +) + const ( SizeofSockFilter = 0x8 ) @@ -1001,7 +1011,7 @@ const ( PERF_COUNT_SW_EMULATION_FAULTS = 0x8 PERF_COUNT_SW_DUMMY = 0x9 PERF_COUNT_SW_BPF_OUTPUT = 0xa - PERF_COUNT_SW_MAX = 0xb + PERF_COUNT_SW_MAX = 0xc PERF_SAMPLE_IP = 0x1 PERF_SAMPLE_TID = 0x2 PERF_SAMPLE_TIME = 0x4 @@ -1773,6 +1783,8 @@ const ( NFPROTO_NUMPROTO = 0xd ) +const SO_ORIGINAL_DST = 0x50 + type Nfgenmsg struct { Nfgen_family uint8 Version uint8 @@ -3434,7 +3446,7 @@ const ( ETHTOOL_MSG_CABLE_TEST_ACT = 0x1a ETHTOOL_MSG_CABLE_TEST_TDR_ACT = 0x1b ETHTOOL_MSG_TUNNEL_INFO_GET = 0x1c - ETHTOOL_MSG_USER_MAX = 0x1c + ETHTOOL_MSG_USER_MAX = 0x20 ETHTOOL_MSG_KERNEL_NONE = 0x0 ETHTOOL_MSG_STRSET_GET_REPLY = 0x1 ETHTOOL_MSG_LINKINFO_GET_REPLY = 0x2 @@ -3465,7 +3477,7 @@ const ( ETHTOOL_MSG_CABLE_TEST_NTF = 0x1b ETHTOOL_MSG_CABLE_TEST_TDR_NTF = 0x1c ETHTOOL_MSG_TUNNEL_INFO_GET_REPLY = 0x1d - ETHTOOL_MSG_KERNEL_MAX = 0x1d + ETHTOOL_MSG_KERNEL_MAX = 0x21 ETHTOOL_A_HEADER_UNSPEC = 0x0 ETHTOOL_A_HEADER_DEV_INDEX = 0x1 ETHTOOL_A_HEADER_DEV_NAME = 0x2 diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_386.go b/vendor/golang.org/x/sys/unix/ztypes_linux_386.go index 235c62e..72f2e96 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_386.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_386.go @@ -170,6 +170,11 @@ type Cmsghdr struct { Type int32 } +type ifreq struct { + Ifrn [16]byte + Ifru [16]byte +} + const ( SizeofSockaddrNFCLLCP = 0x58 SizeofIovec = 0x8 diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_amd64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_amd64.go index 99b1e5b..d5f018d 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_amd64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_amd64.go @@ -173,6 +173,11 @@ type Cmsghdr struct { Type int32 } +type ifreq struct { + Ifrn [16]byte + Ifru [24]byte +} + const ( SizeofSockaddrNFCLLCP = 0x60 SizeofIovec = 0x10 diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_arm.go b/vendor/golang.org/x/sys/unix/ztypes_linux_arm.go index cc8bba7..675446d 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_arm.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_arm.go @@ -176,6 +176,11 @@ type Cmsghdr struct { Type int32 } +type ifreq struct { + Ifrn [16]byte + Ifru [16]byte +} + const ( SizeofSockaddrNFCLLCP = 0x58 SizeofIovec = 0x8 diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go index fa8fe3a..711d071 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go @@ -174,6 +174,11 @@ type Cmsghdr struct { Type int32 } +type ifreq struct { + Ifrn [16]byte + Ifru [24]byte +} + const ( SizeofSockaddrNFCLLCP = 0x60 SizeofIovec = 0x10 diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_mips.go b/vendor/golang.org/x/sys/unix/ztypes_linux_mips.go index e7fb8d9..c1131c7 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_mips.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_mips.go @@ -175,6 +175,11 @@ type Cmsghdr struct { Type int32 } +type ifreq struct { + Ifrn [16]byte + Ifru [16]byte +} + const ( SizeofSockaddrNFCLLCP = 0x58 SizeofIovec = 0x8 diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_mips64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_mips64.go index 2fa61d5..91d5574 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_mips64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_mips64.go @@ -174,6 +174,11 @@ type Cmsghdr struct { Type int32 } +type ifreq struct { + Ifrn [16]byte + Ifru [24]byte +} + const ( SizeofSockaddrNFCLLCP = 0x60 SizeofIovec = 0x10 diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_mips64le.go b/vendor/golang.org/x/sys/unix/ztypes_linux_mips64le.go index 7f36399..5d72149 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_mips64le.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_mips64le.go @@ -174,6 +174,11 @@ type Cmsghdr struct { Type int32 } +type ifreq struct { + Ifrn [16]byte + Ifru [24]byte +} + const ( SizeofSockaddrNFCLLCP = 0x60 SizeofIovec = 0x10 diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_mipsle.go b/vendor/golang.org/x/sys/unix/ztypes_linux_mipsle.go index f3c20cb..a5addd0 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_mipsle.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_mipsle.go @@ -175,6 +175,11 @@ type Cmsghdr struct { Type int32 } +type ifreq struct { + Ifrn [16]byte + Ifru [16]byte +} + const ( SizeofSockaddrNFCLLCP = 0x58 SizeofIovec = 0x8 diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_ppc.go b/vendor/golang.org/x/sys/unix/ztypes_linux_ppc.go index 885d279..bb6b03d 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_ppc.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_ppc.go @@ -176,6 +176,11 @@ type Cmsghdr struct { Type int32 } +type ifreq struct { + Ifrn [16]byte + Ifru [16]byte +} + const ( SizeofSockaddrNFCLLCP = 0x58 SizeofIovec = 0x8 diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go index a94eb8e..7637243 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go @@ -175,6 +175,11 @@ type Cmsghdr struct { Type int32 } +type ifreq struct { + Ifrn [16]byte + Ifru [24]byte +} + const ( SizeofSockaddrNFCLLCP = 0x60 SizeofIovec = 0x10 diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64le.go b/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64le.go index 659e32e..a1a28e5 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64le.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64le.go @@ -175,6 +175,11 @@ type Cmsghdr struct { Type int32 } +type ifreq struct { + Ifrn [16]byte + Ifru [24]byte +} + const ( SizeofSockaddrNFCLLCP = 0x60 SizeofIovec = 0x10 diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_riscv64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_riscv64.go index ab8ec60..e0a8a13 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_riscv64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_riscv64.go @@ -174,6 +174,11 @@ type Cmsghdr struct { Type int32 } +type ifreq struct { + Ifrn [16]byte + Ifru [24]byte +} + const ( SizeofSockaddrNFCLLCP = 0x60 SizeofIovec = 0x10 diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_s390x.go b/vendor/golang.org/x/sys/unix/ztypes_linux_s390x.go index 3ec0823..21d6e56 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_s390x.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_s390x.go @@ -173,6 +173,11 @@ type Cmsghdr struct { Type int32 } +type ifreq struct { + Ifrn [16]byte + Ifru [24]byte +} + const ( SizeofSockaddrNFCLLCP = 0x60 SizeofIovec = 0x10 diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_sparc64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_sparc64.go index 23d4744..0531e98 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_sparc64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_sparc64.go @@ -177,6 +177,11 @@ type Cmsghdr struct { Type int32 } +type ifreq struct { + Ifrn [16]byte + Ifru [24]byte +} + const ( SizeofSockaddrNFCLLCP = 0x60 SizeofIovec = 0x10 diff --git a/vendor/golang.org/x/sys/unix/ztypes_solaris_amd64.go b/vendor/golang.org/x/sys/unix/ztypes_solaris_amd64.go index 85effef..ad4aad2 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_solaris_amd64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_solaris_amd64.go @@ -440,3 +440,43 @@ const ( POLLWRBAND = 0x100 POLLWRNORM = 0x4 ) + +type fileObj struct { + Atim Timespec + Mtim Timespec + Ctim Timespec + Pad [3]uint64 + Name *int8 +} + +type portEvent struct { + Events int32 + Source uint16 + Pad uint16 + Object uint64 + User *byte +} + +const ( + PORT_SOURCE_AIO = 0x1 + PORT_SOURCE_TIMER = 0x2 + PORT_SOURCE_USER = 0x3 + PORT_SOURCE_FD = 0x4 + PORT_SOURCE_ALERT = 0x5 + PORT_SOURCE_MQ = 0x6 + PORT_SOURCE_FILE = 0x7 + PORT_ALERT_SET = 0x1 + PORT_ALERT_UPDATE = 0x2 + PORT_ALERT_INVALID = 0x3 + FILE_ACCESS = 0x1 + FILE_MODIFIED = 0x2 + FILE_ATTRIB = 0x4 + FILE_TRUNC = 0x100000 + FILE_NOFOLLOW = 0x10000000 + FILE_DELETE = 0x10 + FILE_RENAME_TO = 0x20 + FILE_RENAME_FROM = 0x40 + UNMOUNTED = 0x20000000 + MOUNTEDOVER = 0x40000000 + FILE_EXCEPTION = 0x60000070 +) diff --git a/vendor/golang.org/x/sys/windows/types_windows.go b/vendor/golang.org/x/sys/windows/types_windows.go index 1f73339..17f0331 100644 --- a/vendor/golang.org/x/sys/windows/types_windows.go +++ b/vendor/golang.org/x/sys/windows/types_windows.go @@ -680,7 +680,7 @@ const ( WTD_CHOICE_CERT = 5 WTD_STATEACTION_IGNORE = 0x00000000 - WTD_STATEACTION_VERIFY = 0x00000010 + WTD_STATEACTION_VERIFY = 0x00000001 WTD_STATEACTION_CLOSE = 0x00000002 WTD_STATEACTION_AUTO_CACHE = 0x00000003 WTD_STATEACTION_AUTO_CACHE_FLUSH = 0x00000004 diff --git a/vendor/gopkg.in/yaml.v3/LICENSE b/vendor/gopkg.in/yaml.v3/LICENSE new file mode 100644 index 0000000..2683e4b --- /dev/null +++ b/vendor/gopkg.in/yaml.v3/LICENSE @@ -0,0 +1,50 @@ + +This project is covered by two different licenses: MIT and Apache. + +#### MIT License #### + +The following files were ported to Go from C files of libyaml, and thus +are still covered by their original MIT license, with the additional +copyright staring in 2011 when the project was ported over: + + apic.go emitterc.go parserc.go readerc.go scannerc.go + writerc.go yamlh.go yamlprivateh.go + +Copyright (c) 2006-2010 Kirill Simonov +Copyright (c) 2006-2011 Kirill Simonov + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +of the Software, and to permit persons to whom the Software is furnished to do +so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + +### Apache License ### + +All the remaining project files are covered by the Apache license: + +Copyright (c) 2011-2019 Canonical Ltd + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. diff --git a/vendor/gopkg.in/yaml.v3/NOTICE b/vendor/gopkg.in/yaml.v3/NOTICE new file mode 100644 index 0000000..866d74a --- /dev/null +++ b/vendor/gopkg.in/yaml.v3/NOTICE @@ -0,0 +1,13 @@ +Copyright 2011-2016 Canonical Ltd. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. diff --git a/vendor/gopkg.in/yaml.v3/README.md b/vendor/gopkg.in/yaml.v3/README.md new file mode 100644 index 0000000..08eb1ba --- /dev/null +++ b/vendor/gopkg.in/yaml.v3/README.md @@ -0,0 +1,150 @@ +# YAML support for the Go language + +Introduction +------------ + +The yaml package enables Go programs to comfortably encode and decode YAML +values. It was developed within [Canonical](https://www.canonical.com) as +part of the [juju](https://juju.ubuntu.com) project, and is based on a +pure Go port of the well-known [libyaml](http://pyyaml.org/wiki/LibYAML) +C library to parse and generate YAML data quickly and reliably. + +Compatibility +------------- + +The yaml package supports most of YAML 1.2, but preserves some behavior +from 1.1 for backwards compatibility. + +Specifically, as of v3 of the yaml package: + + - YAML 1.1 bools (_yes/no, on/off_) are supported as long as they are being + decoded into a typed bool value. Otherwise they behave as a string. Booleans + in YAML 1.2 are _true/false_ only. + - Octals encode and decode as _0777_ per YAML 1.1, rather than _0o777_ + as specified in YAML 1.2, because most parsers still use the old format. + Octals in the _0o777_ format are supported though, so new files work. + - Does not support base-60 floats. These are gone from YAML 1.2, and were + actually never supported by this package as it's clearly a poor choice. + +and offers backwards +compatibility with YAML 1.1 in some cases. +1.2, including support for +anchors, tags, map merging, etc. Multi-document unmarshalling is not yet +implemented, and base-60 floats from YAML 1.1 are purposefully not +supported since they're a poor design and are gone in YAML 1.2. + +Installation and usage +---------------------- + +The import path for the package is *gopkg.in/yaml.v3*. + +To install it, run: + + go get gopkg.in/yaml.v3 + +API documentation +----------------- + +If opened in a browser, the import path itself leads to the API documentation: + + - [https://gopkg.in/yaml.v3](https://gopkg.in/yaml.v3) + +API stability +------------- + +The package API for yaml v3 will remain stable as described in [gopkg.in](https://gopkg.in). + + +License +------- + +The yaml package is licensed under the MIT and Apache License 2.0 licenses. +Please see the LICENSE file for details. + + +Example +------- + +```Go +package main + +import ( + "fmt" + "log" + + "gopkg.in/yaml.v3" +) + +var data = ` +a: Easy! +b: + c: 2 + d: [3, 4] +` + +// Note: struct fields must be public in order for unmarshal to +// correctly populate the data. +type T struct { + A string + B struct { + RenamedC int `yaml:"c"` + D []int `yaml:",flow"` + } +} + +func main() { + t := T{} + + err := yaml.Unmarshal([]byte(data), &t) + if err != nil { + log.Fatalf("error: %v", err) + } + fmt.Printf("--- t:\n%v\n\n", t) + + d, err := yaml.Marshal(&t) + if err != nil { + log.Fatalf("error: %v", err) + } + fmt.Printf("--- t dump:\n%s\n\n", string(d)) + + m := make(map[interface{}]interface{}) + + err = yaml.Unmarshal([]byte(data), &m) + if err != nil { + log.Fatalf("error: %v", err) + } + fmt.Printf("--- m:\n%v\n\n", m) + + d, err = yaml.Marshal(&m) + if err != nil { + log.Fatalf("error: %v", err) + } + fmt.Printf("--- m dump:\n%s\n\n", string(d)) +} +``` + +This example will generate the following output: + +``` +--- t: +{Easy! {2 [3 4]}} + +--- t dump: +a: Easy! +b: + c: 2 + d: [3, 4] + + +--- m: +map[a:Easy! b:map[c:2 d:[3 4]]] + +--- m dump: +a: Easy! +b: + c: 2 + d: + - 3 + - 4 +``` + diff --git a/vendor/gopkg.in/yaml.v3/apic.go b/vendor/gopkg.in/yaml.v3/apic.go new file mode 100644 index 0000000..ae7d049 --- /dev/null +++ b/vendor/gopkg.in/yaml.v3/apic.go @@ -0,0 +1,747 @@ +// +// Copyright (c) 2011-2019 Canonical Ltd +// Copyright (c) 2006-2010 Kirill Simonov +// +// Permission is hereby granted, free of charge, to any person obtaining a copy of +// this software and associated documentation files (the "Software"), to deal in +// the Software without restriction, including without limitation the rights to +// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +// of the Software, and to permit persons to whom the Software is furnished to do +// so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in all +// copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +// SOFTWARE. + +package yaml + +import ( + "io" +) + +func yaml_insert_token(parser *yaml_parser_t, pos int, token *yaml_token_t) { + //fmt.Println("yaml_insert_token", "pos:", pos, "typ:", token.typ, "head:", parser.tokens_head, "len:", len(parser.tokens)) + + // Check if we can move the queue at the beginning of the buffer. + if parser.tokens_head > 0 && len(parser.tokens) == cap(parser.tokens) { + if parser.tokens_head != len(parser.tokens) { + copy(parser.tokens, parser.tokens[parser.tokens_head:]) + } + parser.tokens = parser.tokens[:len(parser.tokens)-parser.tokens_head] + parser.tokens_head = 0 + } + parser.tokens = append(parser.tokens, *token) + if pos < 0 { + return + } + copy(parser.tokens[parser.tokens_head+pos+1:], parser.tokens[parser.tokens_head+pos:]) + parser.tokens[parser.tokens_head+pos] = *token +} + +// Create a new parser object. +func yaml_parser_initialize(parser *yaml_parser_t) bool { + *parser = yaml_parser_t{ + raw_buffer: make([]byte, 0, input_raw_buffer_size), + buffer: make([]byte, 0, input_buffer_size), + } + return true +} + +// Destroy a parser object. +func yaml_parser_delete(parser *yaml_parser_t) { + *parser = yaml_parser_t{} +} + +// String read handler. +func yaml_string_read_handler(parser *yaml_parser_t, buffer []byte) (n int, err error) { + if parser.input_pos == len(parser.input) { + return 0, io.EOF + } + n = copy(buffer, parser.input[parser.input_pos:]) + parser.input_pos += n + return n, nil +} + +// Reader read handler. +func yaml_reader_read_handler(parser *yaml_parser_t, buffer []byte) (n int, err error) { + return parser.input_reader.Read(buffer) +} + +// Set a string input. +func yaml_parser_set_input_string(parser *yaml_parser_t, input []byte) { + if parser.read_handler != nil { + panic("must set the input source only once") + } + parser.read_handler = yaml_string_read_handler + parser.input = input + parser.input_pos = 0 +} + +// Set a file input. +func yaml_parser_set_input_reader(parser *yaml_parser_t, r io.Reader) { + if parser.read_handler != nil { + panic("must set the input source only once") + } + parser.read_handler = yaml_reader_read_handler + parser.input_reader = r +} + +// Set the source encoding. +func yaml_parser_set_encoding(parser *yaml_parser_t, encoding yaml_encoding_t) { + if parser.encoding != yaml_ANY_ENCODING { + panic("must set the encoding only once") + } + parser.encoding = encoding +} + +// Create a new emitter object. +func yaml_emitter_initialize(emitter *yaml_emitter_t) { + *emitter = yaml_emitter_t{ + buffer: make([]byte, output_buffer_size), + raw_buffer: make([]byte, 0, output_raw_buffer_size), + states: make([]yaml_emitter_state_t, 0, initial_stack_size), + events: make([]yaml_event_t, 0, initial_queue_size), + best_width: -1, + } +} + +// Destroy an emitter object. +func yaml_emitter_delete(emitter *yaml_emitter_t) { + *emitter = yaml_emitter_t{} +} + +// String write handler. +func yaml_string_write_handler(emitter *yaml_emitter_t, buffer []byte) error { + *emitter.output_buffer = append(*emitter.output_buffer, buffer...) + return nil +} + +// yaml_writer_write_handler uses emitter.output_writer to write the +// emitted text. +func yaml_writer_write_handler(emitter *yaml_emitter_t, buffer []byte) error { + _, err := emitter.output_writer.Write(buffer) + return err +} + +// Set a string output. +func yaml_emitter_set_output_string(emitter *yaml_emitter_t, output_buffer *[]byte) { + if emitter.write_handler != nil { + panic("must set the output target only once") + } + emitter.write_handler = yaml_string_write_handler + emitter.output_buffer = output_buffer +} + +// Set a file output. +func yaml_emitter_set_output_writer(emitter *yaml_emitter_t, w io.Writer) { + if emitter.write_handler != nil { + panic("must set the output target only once") + } + emitter.write_handler = yaml_writer_write_handler + emitter.output_writer = w +} + +// Set the output encoding. +func yaml_emitter_set_encoding(emitter *yaml_emitter_t, encoding yaml_encoding_t) { + if emitter.encoding != yaml_ANY_ENCODING { + panic("must set the output encoding only once") + } + emitter.encoding = encoding +} + +// Set the canonical output style. +func yaml_emitter_set_canonical(emitter *yaml_emitter_t, canonical bool) { + emitter.canonical = canonical +} + +// Set the indentation increment. +func yaml_emitter_set_indent(emitter *yaml_emitter_t, indent int) { + if indent < 2 || indent > 9 { + indent = 2 + } + emitter.best_indent = indent +} + +// Set the preferred line width. +func yaml_emitter_set_width(emitter *yaml_emitter_t, width int) { + if width < 0 { + width = -1 + } + emitter.best_width = width +} + +// Set if unescaped non-ASCII characters are allowed. +func yaml_emitter_set_unicode(emitter *yaml_emitter_t, unicode bool) { + emitter.unicode = unicode +} + +// Set the preferred line break character. +func yaml_emitter_set_break(emitter *yaml_emitter_t, line_break yaml_break_t) { + emitter.line_break = line_break +} + +///* +// * Destroy a token object. +// */ +// +//YAML_DECLARE(void) +//yaml_token_delete(yaml_token_t *token) +//{ +// assert(token); // Non-NULL token object expected. +// +// switch (token.type) +// { +// case YAML_TAG_DIRECTIVE_TOKEN: +// yaml_free(token.data.tag_directive.handle); +// yaml_free(token.data.tag_directive.prefix); +// break; +// +// case YAML_ALIAS_TOKEN: +// yaml_free(token.data.alias.value); +// break; +// +// case YAML_ANCHOR_TOKEN: +// yaml_free(token.data.anchor.value); +// break; +// +// case YAML_TAG_TOKEN: +// yaml_free(token.data.tag.handle); +// yaml_free(token.data.tag.suffix); +// break; +// +// case YAML_SCALAR_TOKEN: +// yaml_free(token.data.scalar.value); +// break; +// +// default: +// break; +// } +// +// memset(token, 0, sizeof(yaml_token_t)); +//} +// +///* +// * Check if a string is a valid UTF-8 sequence. +// * +// * Check 'reader.c' for more details on UTF-8 encoding. +// */ +// +//static int +//yaml_check_utf8(yaml_char_t *start, size_t length) +//{ +// yaml_char_t *end = start+length; +// yaml_char_t *pointer = start; +// +// while (pointer < end) { +// unsigned char octet; +// unsigned int width; +// unsigned int value; +// size_t k; +// +// octet = pointer[0]; +// width = (octet & 0x80) == 0x00 ? 1 : +// (octet & 0xE0) == 0xC0 ? 2 : +// (octet & 0xF0) == 0xE0 ? 3 : +// (octet & 0xF8) == 0xF0 ? 4 : 0; +// value = (octet & 0x80) == 0x00 ? octet & 0x7F : +// (octet & 0xE0) == 0xC0 ? octet & 0x1F : +// (octet & 0xF0) == 0xE0 ? octet & 0x0F : +// (octet & 0xF8) == 0xF0 ? octet & 0x07 : 0; +// if (!width) return 0; +// if (pointer+width > end) return 0; +// for (k = 1; k < width; k ++) { +// octet = pointer[k]; +// if ((octet & 0xC0) != 0x80) return 0; +// value = (value << 6) + (octet & 0x3F); +// } +// if (!((width == 1) || +// (width == 2 && value >= 0x80) || +// (width == 3 && value >= 0x800) || +// (width == 4 && value >= 0x10000))) return 0; +// +// pointer += width; +// } +// +// return 1; +//} +// + +// Create STREAM-START. +func yaml_stream_start_event_initialize(event *yaml_event_t, encoding yaml_encoding_t) { + *event = yaml_event_t{ + typ: yaml_STREAM_START_EVENT, + encoding: encoding, + } +} + +// Create STREAM-END. +func yaml_stream_end_event_initialize(event *yaml_event_t) { + *event = yaml_event_t{ + typ: yaml_STREAM_END_EVENT, + } +} + +// Create DOCUMENT-START. +func yaml_document_start_event_initialize( + event *yaml_event_t, + version_directive *yaml_version_directive_t, + tag_directives []yaml_tag_directive_t, + implicit bool, +) { + *event = yaml_event_t{ + typ: yaml_DOCUMENT_START_EVENT, + version_directive: version_directive, + tag_directives: tag_directives, + implicit: implicit, + } +} + +// Create DOCUMENT-END. +func yaml_document_end_event_initialize(event *yaml_event_t, implicit bool) { + *event = yaml_event_t{ + typ: yaml_DOCUMENT_END_EVENT, + implicit: implicit, + } +} + +// Create ALIAS. +func yaml_alias_event_initialize(event *yaml_event_t, anchor []byte) bool { + *event = yaml_event_t{ + typ: yaml_ALIAS_EVENT, + anchor: anchor, + } + return true +} + +// Create SCALAR. +func yaml_scalar_event_initialize(event *yaml_event_t, anchor, tag, value []byte, plain_implicit, quoted_implicit bool, style yaml_scalar_style_t) bool { + *event = yaml_event_t{ + typ: yaml_SCALAR_EVENT, + anchor: anchor, + tag: tag, + value: value, + implicit: plain_implicit, + quoted_implicit: quoted_implicit, + style: yaml_style_t(style), + } + return true +} + +// Create SEQUENCE-START. +func yaml_sequence_start_event_initialize(event *yaml_event_t, anchor, tag []byte, implicit bool, style yaml_sequence_style_t) bool { + *event = yaml_event_t{ + typ: yaml_SEQUENCE_START_EVENT, + anchor: anchor, + tag: tag, + implicit: implicit, + style: yaml_style_t(style), + } + return true +} + +// Create SEQUENCE-END. +func yaml_sequence_end_event_initialize(event *yaml_event_t) bool { + *event = yaml_event_t{ + typ: yaml_SEQUENCE_END_EVENT, + } + return true +} + +// Create MAPPING-START. +func yaml_mapping_start_event_initialize(event *yaml_event_t, anchor, tag []byte, implicit bool, style yaml_mapping_style_t) { + *event = yaml_event_t{ + typ: yaml_MAPPING_START_EVENT, + anchor: anchor, + tag: tag, + implicit: implicit, + style: yaml_style_t(style), + } +} + +// Create MAPPING-END. +func yaml_mapping_end_event_initialize(event *yaml_event_t) { + *event = yaml_event_t{ + typ: yaml_MAPPING_END_EVENT, + } +} + +// Destroy an event object. +func yaml_event_delete(event *yaml_event_t) { + *event = yaml_event_t{} +} + +///* +// * Create a document object. +// */ +// +//YAML_DECLARE(int) +//yaml_document_initialize(document *yaml_document_t, +// version_directive *yaml_version_directive_t, +// tag_directives_start *yaml_tag_directive_t, +// tag_directives_end *yaml_tag_directive_t, +// start_implicit int, end_implicit int) +//{ +// struct { +// error yaml_error_type_t +// } context +// struct { +// start *yaml_node_t +// end *yaml_node_t +// top *yaml_node_t +// } nodes = { NULL, NULL, NULL } +// version_directive_copy *yaml_version_directive_t = NULL +// struct { +// start *yaml_tag_directive_t +// end *yaml_tag_directive_t +// top *yaml_tag_directive_t +// } tag_directives_copy = { NULL, NULL, NULL } +// value yaml_tag_directive_t = { NULL, NULL } +// mark yaml_mark_t = { 0, 0, 0 } +// +// assert(document) // Non-NULL document object is expected. +// assert((tag_directives_start && tag_directives_end) || +// (tag_directives_start == tag_directives_end)) +// // Valid tag directives are expected. +// +// if (!STACK_INIT(&context, nodes, INITIAL_STACK_SIZE)) goto error +// +// if (version_directive) { +// version_directive_copy = yaml_malloc(sizeof(yaml_version_directive_t)) +// if (!version_directive_copy) goto error +// version_directive_copy.major = version_directive.major +// version_directive_copy.minor = version_directive.minor +// } +// +// if (tag_directives_start != tag_directives_end) { +// tag_directive *yaml_tag_directive_t +// if (!STACK_INIT(&context, tag_directives_copy, INITIAL_STACK_SIZE)) +// goto error +// for (tag_directive = tag_directives_start +// tag_directive != tag_directives_end; tag_directive ++) { +// assert(tag_directive.handle) +// assert(tag_directive.prefix) +// if (!yaml_check_utf8(tag_directive.handle, +// strlen((char *)tag_directive.handle))) +// goto error +// if (!yaml_check_utf8(tag_directive.prefix, +// strlen((char *)tag_directive.prefix))) +// goto error +// value.handle = yaml_strdup(tag_directive.handle) +// value.prefix = yaml_strdup(tag_directive.prefix) +// if (!value.handle || !value.prefix) goto error +// if (!PUSH(&context, tag_directives_copy, value)) +// goto error +// value.handle = NULL +// value.prefix = NULL +// } +// } +// +// DOCUMENT_INIT(*document, nodes.start, nodes.end, version_directive_copy, +// tag_directives_copy.start, tag_directives_copy.top, +// start_implicit, end_implicit, mark, mark) +// +// return 1 +// +//error: +// STACK_DEL(&context, nodes) +// yaml_free(version_directive_copy) +// while (!STACK_EMPTY(&context, tag_directives_copy)) { +// value yaml_tag_directive_t = POP(&context, tag_directives_copy) +// yaml_free(value.handle) +// yaml_free(value.prefix) +// } +// STACK_DEL(&context, tag_directives_copy) +// yaml_free(value.handle) +// yaml_free(value.prefix) +// +// return 0 +//} +// +///* +// * Destroy a document object. +// */ +// +//YAML_DECLARE(void) +//yaml_document_delete(document *yaml_document_t) +//{ +// struct { +// error yaml_error_type_t +// } context +// tag_directive *yaml_tag_directive_t +// +// context.error = YAML_NO_ERROR // Eliminate a compiler warning. +// +// assert(document) // Non-NULL document object is expected. +// +// while (!STACK_EMPTY(&context, document.nodes)) { +// node yaml_node_t = POP(&context, document.nodes) +// yaml_free(node.tag) +// switch (node.type) { +// case YAML_SCALAR_NODE: +// yaml_free(node.data.scalar.value) +// break +// case YAML_SEQUENCE_NODE: +// STACK_DEL(&context, node.data.sequence.items) +// break +// case YAML_MAPPING_NODE: +// STACK_DEL(&context, node.data.mapping.pairs) +// break +// default: +// assert(0) // Should not happen. +// } +// } +// STACK_DEL(&context, document.nodes) +// +// yaml_free(document.version_directive) +// for (tag_directive = document.tag_directives.start +// tag_directive != document.tag_directives.end +// tag_directive++) { +// yaml_free(tag_directive.handle) +// yaml_free(tag_directive.prefix) +// } +// yaml_free(document.tag_directives.start) +// +// memset(document, 0, sizeof(yaml_document_t)) +//} +// +///** +// * Get a document node. +// */ +// +//YAML_DECLARE(yaml_node_t *) +//yaml_document_get_node(document *yaml_document_t, index int) +//{ +// assert(document) // Non-NULL document object is expected. +// +// if (index > 0 && document.nodes.start + index <= document.nodes.top) { +// return document.nodes.start + index - 1 +// } +// return NULL +//} +// +///** +// * Get the root object. +// */ +// +//YAML_DECLARE(yaml_node_t *) +//yaml_document_get_root_node(document *yaml_document_t) +//{ +// assert(document) // Non-NULL document object is expected. +// +// if (document.nodes.top != document.nodes.start) { +// return document.nodes.start +// } +// return NULL +//} +// +///* +// * Add a scalar node to a document. +// */ +// +//YAML_DECLARE(int) +//yaml_document_add_scalar(document *yaml_document_t, +// tag *yaml_char_t, value *yaml_char_t, length int, +// style yaml_scalar_style_t) +//{ +// struct { +// error yaml_error_type_t +// } context +// mark yaml_mark_t = { 0, 0, 0 } +// tag_copy *yaml_char_t = NULL +// value_copy *yaml_char_t = NULL +// node yaml_node_t +// +// assert(document) // Non-NULL document object is expected. +// assert(value) // Non-NULL value is expected. +// +// if (!tag) { +// tag = (yaml_char_t *)YAML_DEFAULT_SCALAR_TAG +// } +// +// if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error +// tag_copy = yaml_strdup(tag) +// if (!tag_copy) goto error +// +// if (length < 0) { +// length = strlen((char *)value) +// } +// +// if (!yaml_check_utf8(value, length)) goto error +// value_copy = yaml_malloc(length+1) +// if (!value_copy) goto error +// memcpy(value_copy, value, length) +// value_copy[length] = '\0' +// +// SCALAR_NODE_INIT(node, tag_copy, value_copy, length, style, mark, mark) +// if (!PUSH(&context, document.nodes, node)) goto error +// +// return document.nodes.top - document.nodes.start +// +//error: +// yaml_free(tag_copy) +// yaml_free(value_copy) +// +// return 0 +//} +// +///* +// * Add a sequence node to a document. +// */ +// +//YAML_DECLARE(int) +//yaml_document_add_sequence(document *yaml_document_t, +// tag *yaml_char_t, style yaml_sequence_style_t) +//{ +// struct { +// error yaml_error_type_t +// } context +// mark yaml_mark_t = { 0, 0, 0 } +// tag_copy *yaml_char_t = NULL +// struct { +// start *yaml_node_item_t +// end *yaml_node_item_t +// top *yaml_node_item_t +// } items = { NULL, NULL, NULL } +// node yaml_node_t +// +// assert(document) // Non-NULL document object is expected. +// +// if (!tag) { +// tag = (yaml_char_t *)YAML_DEFAULT_SEQUENCE_TAG +// } +// +// if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error +// tag_copy = yaml_strdup(tag) +// if (!tag_copy) goto error +// +// if (!STACK_INIT(&context, items, INITIAL_STACK_SIZE)) goto error +// +// SEQUENCE_NODE_INIT(node, tag_copy, items.start, items.end, +// style, mark, mark) +// if (!PUSH(&context, document.nodes, node)) goto error +// +// return document.nodes.top - document.nodes.start +// +//error: +// STACK_DEL(&context, items) +// yaml_free(tag_copy) +// +// return 0 +//} +// +///* +// * Add a mapping node to a document. +// */ +// +//YAML_DECLARE(int) +//yaml_document_add_mapping(document *yaml_document_t, +// tag *yaml_char_t, style yaml_mapping_style_t) +//{ +// struct { +// error yaml_error_type_t +// } context +// mark yaml_mark_t = { 0, 0, 0 } +// tag_copy *yaml_char_t = NULL +// struct { +// start *yaml_node_pair_t +// end *yaml_node_pair_t +// top *yaml_node_pair_t +// } pairs = { NULL, NULL, NULL } +// node yaml_node_t +// +// assert(document) // Non-NULL document object is expected. +// +// if (!tag) { +// tag = (yaml_char_t *)YAML_DEFAULT_MAPPING_TAG +// } +// +// if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error +// tag_copy = yaml_strdup(tag) +// if (!tag_copy) goto error +// +// if (!STACK_INIT(&context, pairs, INITIAL_STACK_SIZE)) goto error +// +// MAPPING_NODE_INIT(node, tag_copy, pairs.start, pairs.end, +// style, mark, mark) +// if (!PUSH(&context, document.nodes, node)) goto error +// +// return document.nodes.top - document.nodes.start +// +//error: +// STACK_DEL(&context, pairs) +// yaml_free(tag_copy) +// +// return 0 +//} +// +///* +// * Append an item to a sequence node. +// */ +// +//YAML_DECLARE(int) +//yaml_document_append_sequence_item(document *yaml_document_t, +// sequence int, item int) +//{ +// struct { +// error yaml_error_type_t +// } context +// +// assert(document) // Non-NULL document is required. +// assert(sequence > 0 +// && document.nodes.start + sequence <= document.nodes.top) +// // Valid sequence id is required. +// assert(document.nodes.start[sequence-1].type == YAML_SEQUENCE_NODE) +// // A sequence node is required. +// assert(item > 0 && document.nodes.start + item <= document.nodes.top) +// // Valid item id is required. +// +// if (!PUSH(&context, +// document.nodes.start[sequence-1].data.sequence.items, item)) +// return 0 +// +// return 1 +//} +// +///* +// * Append a pair of a key and a value to a mapping node. +// */ +// +//YAML_DECLARE(int) +//yaml_document_append_mapping_pair(document *yaml_document_t, +// mapping int, key int, value int) +//{ +// struct { +// error yaml_error_type_t +// } context +// +// pair yaml_node_pair_t +// +// assert(document) // Non-NULL document is required. +// assert(mapping > 0 +// && document.nodes.start + mapping <= document.nodes.top) +// // Valid mapping id is required. +// assert(document.nodes.start[mapping-1].type == YAML_MAPPING_NODE) +// // A mapping node is required. +// assert(key > 0 && document.nodes.start + key <= document.nodes.top) +// // Valid key id is required. +// assert(value > 0 && document.nodes.start + value <= document.nodes.top) +// // Valid value id is required. +// +// pair.key = key +// pair.value = value +// +// if (!PUSH(&context, +// document.nodes.start[mapping-1].data.mapping.pairs, pair)) +// return 0 +// +// return 1 +//} +// +// diff --git a/vendor/gopkg.in/yaml.v3/decode.go b/vendor/gopkg.in/yaml.v3/decode.go new file mode 100644 index 0000000..df36e3a --- /dev/null +++ b/vendor/gopkg.in/yaml.v3/decode.go @@ -0,0 +1,950 @@ +// +// Copyright (c) 2011-2019 Canonical Ltd +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package yaml + +import ( + "encoding" + "encoding/base64" + "fmt" + "io" + "math" + "reflect" + "strconv" + "time" +) + +// ---------------------------------------------------------------------------- +// Parser, produces a node tree out of a libyaml event stream. + +type parser struct { + parser yaml_parser_t + event yaml_event_t + doc *Node + anchors map[string]*Node + doneInit bool + textless bool +} + +func newParser(b []byte) *parser { + p := parser{} + if !yaml_parser_initialize(&p.parser) { + panic("failed to initialize YAML emitter") + } + if len(b) == 0 { + b = []byte{'\n'} + } + yaml_parser_set_input_string(&p.parser, b) + return &p +} + +func newParserFromReader(r io.Reader) *parser { + p := parser{} + if !yaml_parser_initialize(&p.parser) { + panic("failed to initialize YAML emitter") + } + yaml_parser_set_input_reader(&p.parser, r) + return &p +} + +func (p *parser) init() { + if p.doneInit { + return + } + p.anchors = make(map[string]*Node) + p.expect(yaml_STREAM_START_EVENT) + p.doneInit = true +} + +func (p *parser) destroy() { + if p.event.typ != yaml_NO_EVENT { + yaml_event_delete(&p.event) + } + yaml_parser_delete(&p.parser) +} + +// expect consumes an event from the event stream and +// checks that it's of the expected type. +func (p *parser) expect(e yaml_event_type_t) { + if p.event.typ == yaml_NO_EVENT { + if !yaml_parser_parse(&p.parser, &p.event) { + p.fail() + } + } + if p.event.typ == yaml_STREAM_END_EVENT { + failf("attempted to go past the end of stream; corrupted value?") + } + if p.event.typ != e { + p.parser.problem = fmt.Sprintf("expected %s event but got %s", e, p.event.typ) + p.fail() + } + yaml_event_delete(&p.event) + p.event.typ = yaml_NO_EVENT +} + +// peek peeks at the next event in the event stream, +// puts the results into p.event and returns the event type. +func (p *parser) peek() yaml_event_type_t { + if p.event.typ != yaml_NO_EVENT { + return p.event.typ + } + if !yaml_parser_parse(&p.parser, &p.event) { + p.fail() + } + return p.event.typ +} + +func (p *parser) fail() { + var where string + var line int + if p.parser.context_mark.line != 0 { + line = p.parser.context_mark.line + // Scanner errors don't iterate line before returning error + if p.parser.error == yaml_SCANNER_ERROR { + line++ + } + } else if p.parser.problem_mark.line != 0 { + line = p.parser.problem_mark.line + // Scanner errors don't iterate line before returning error + if p.parser.error == yaml_SCANNER_ERROR { + line++ + } + } + if line != 0 { + where = "line " + strconv.Itoa(line) + ": " + } + var msg string + if len(p.parser.problem) > 0 { + msg = p.parser.problem + } else { + msg = "unknown problem parsing YAML content" + } + failf("%s%s", where, msg) +} + +func (p *parser) anchor(n *Node, anchor []byte) { + if anchor != nil { + n.Anchor = string(anchor) + p.anchors[n.Anchor] = n + } +} + +func (p *parser) parse() *Node { + p.init() + switch p.peek() { + case yaml_SCALAR_EVENT: + return p.scalar() + case yaml_ALIAS_EVENT: + return p.alias() + case yaml_MAPPING_START_EVENT: + return p.mapping() + case yaml_SEQUENCE_START_EVENT: + return p.sequence() + case yaml_DOCUMENT_START_EVENT: + return p.document() + case yaml_STREAM_END_EVENT: + // Happens when attempting to decode an empty buffer. + return nil + case yaml_TAIL_COMMENT_EVENT: + panic("internal error: unexpected tail comment event (please report)") + default: + panic("internal error: attempted to parse unknown event (please report): " + p.event.typ.String()) + } +} + +func (p *parser) node(kind Kind, defaultTag, tag, value string) *Node { + var style Style + if tag != "" && tag != "!" { + tag = shortTag(tag) + style = TaggedStyle + } else if defaultTag != "" { + tag = defaultTag + } else if kind == ScalarNode { + tag, _ = resolve("", value) + } + n := &Node{ + Kind: kind, + Tag: tag, + Value: value, + Style: style, + } + if !p.textless { + n.Line = p.event.start_mark.line + 1 + n.Column = p.event.start_mark.column + 1 + n.HeadComment = string(p.event.head_comment) + n.LineComment = string(p.event.line_comment) + n.FootComment = string(p.event.foot_comment) + } + return n +} + +func (p *parser) parseChild(parent *Node) *Node { + child := p.parse() + parent.Content = append(parent.Content, child) + return child +} + +func (p *parser) document() *Node { + n := p.node(DocumentNode, "", "", "") + p.doc = n + p.expect(yaml_DOCUMENT_START_EVENT) + p.parseChild(n) + if p.peek() == yaml_DOCUMENT_END_EVENT { + n.FootComment = string(p.event.foot_comment) + } + p.expect(yaml_DOCUMENT_END_EVENT) + return n +} + +func (p *parser) alias() *Node { + n := p.node(AliasNode, "", "", string(p.event.anchor)) + n.Alias = p.anchors[n.Value] + if n.Alias == nil { + failf("unknown anchor '%s' referenced", n.Value) + } + p.expect(yaml_ALIAS_EVENT) + return n +} + +func (p *parser) scalar() *Node { + var parsedStyle = p.event.scalar_style() + var nodeStyle Style + switch { + case parsedStyle&yaml_DOUBLE_QUOTED_SCALAR_STYLE != 0: + nodeStyle = DoubleQuotedStyle + case parsedStyle&yaml_SINGLE_QUOTED_SCALAR_STYLE != 0: + nodeStyle = SingleQuotedStyle + case parsedStyle&yaml_LITERAL_SCALAR_STYLE != 0: + nodeStyle = LiteralStyle + case parsedStyle&yaml_FOLDED_SCALAR_STYLE != 0: + nodeStyle = FoldedStyle + } + var nodeValue = string(p.event.value) + var nodeTag = string(p.event.tag) + var defaultTag string + if nodeStyle == 0 { + if nodeValue == "<<" { + defaultTag = mergeTag + } + } else { + defaultTag = strTag + } + n := p.node(ScalarNode, defaultTag, nodeTag, nodeValue) + n.Style |= nodeStyle + p.anchor(n, p.event.anchor) + p.expect(yaml_SCALAR_EVENT) + return n +} + +func (p *parser) sequence() *Node { + n := p.node(SequenceNode, seqTag, string(p.event.tag), "") + if p.event.sequence_style()&yaml_FLOW_SEQUENCE_STYLE != 0 { + n.Style |= FlowStyle + } + p.anchor(n, p.event.anchor) + p.expect(yaml_SEQUENCE_START_EVENT) + for p.peek() != yaml_SEQUENCE_END_EVENT { + p.parseChild(n) + } + n.LineComment = string(p.event.line_comment) + n.FootComment = string(p.event.foot_comment) + p.expect(yaml_SEQUENCE_END_EVENT) + return n +} + +func (p *parser) mapping() *Node { + n := p.node(MappingNode, mapTag, string(p.event.tag), "") + block := true + if p.event.mapping_style()&yaml_FLOW_MAPPING_STYLE != 0 { + block = false + n.Style |= FlowStyle + } + p.anchor(n, p.event.anchor) + p.expect(yaml_MAPPING_START_EVENT) + for p.peek() != yaml_MAPPING_END_EVENT { + k := p.parseChild(n) + if block && k.FootComment != "" { + // Must be a foot comment for the prior value when being dedented. + if len(n.Content) > 2 { + n.Content[len(n.Content)-3].FootComment = k.FootComment + k.FootComment = "" + } + } + v := p.parseChild(n) + if k.FootComment == "" && v.FootComment != "" { + k.FootComment = v.FootComment + v.FootComment = "" + } + if p.peek() == yaml_TAIL_COMMENT_EVENT { + if k.FootComment == "" { + k.FootComment = string(p.event.foot_comment) + } + p.expect(yaml_TAIL_COMMENT_EVENT) + } + } + n.LineComment = string(p.event.line_comment) + n.FootComment = string(p.event.foot_comment) + if n.Style&FlowStyle == 0 && n.FootComment != "" && len(n.Content) > 1 { + n.Content[len(n.Content)-2].FootComment = n.FootComment + n.FootComment = "" + } + p.expect(yaml_MAPPING_END_EVENT) + return n +} + +// ---------------------------------------------------------------------------- +// Decoder, unmarshals a node into a provided value. + +type decoder struct { + doc *Node + aliases map[*Node]bool + terrors []string + + stringMapType reflect.Type + generalMapType reflect.Type + + knownFields bool + uniqueKeys bool + decodeCount int + aliasCount int + aliasDepth int +} + +var ( + nodeType = reflect.TypeOf(Node{}) + durationType = reflect.TypeOf(time.Duration(0)) + stringMapType = reflect.TypeOf(map[string]interface{}{}) + generalMapType = reflect.TypeOf(map[interface{}]interface{}{}) + ifaceType = generalMapType.Elem() + timeType = reflect.TypeOf(time.Time{}) + ptrTimeType = reflect.TypeOf(&time.Time{}) +) + +func newDecoder() *decoder { + d := &decoder{ + stringMapType: stringMapType, + generalMapType: generalMapType, + uniqueKeys: true, + } + d.aliases = make(map[*Node]bool) + return d +} + +func (d *decoder) terror(n *Node, tag string, out reflect.Value) { + if n.Tag != "" { + tag = n.Tag + } + value := n.Value + if tag != seqTag && tag != mapTag { + if len(value) > 10 { + value = " `" + value[:7] + "...`" + } else { + value = " `" + value + "`" + } + } + d.terrors = append(d.terrors, fmt.Sprintf("line %d: cannot unmarshal %s%s into %s", n.Line, shortTag(tag), value, out.Type())) +} + +func (d *decoder) callUnmarshaler(n *Node, u Unmarshaler) (good bool) { + err := u.UnmarshalYAML(n) + if e, ok := err.(*TypeError); ok { + d.terrors = append(d.terrors, e.Errors...) + return false + } + if err != nil { + fail(err) + } + return true +} + +func (d *decoder) callObsoleteUnmarshaler(n *Node, u obsoleteUnmarshaler) (good bool) { + terrlen := len(d.terrors) + err := u.UnmarshalYAML(func(v interface{}) (err error) { + defer handleErr(&err) + d.unmarshal(n, reflect.ValueOf(v)) + if len(d.terrors) > terrlen { + issues := d.terrors[terrlen:] + d.terrors = d.terrors[:terrlen] + return &TypeError{issues} + } + return nil + }) + if e, ok := err.(*TypeError); ok { + d.terrors = append(d.terrors, e.Errors...) + return false + } + if err != nil { + fail(err) + } + return true +} + +// d.prepare initializes and dereferences pointers and calls UnmarshalYAML +// if a value is found to implement it. +// It returns the initialized and dereferenced out value, whether +// unmarshalling was already done by UnmarshalYAML, and if so whether +// its types unmarshalled appropriately. +// +// If n holds a null value, prepare returns before doing anything. +func (d *decoder) prepare(n *Node, out reflect.Value) (newout reflect.Value, unmarshaled, good bool) { + if n.ShortTag() == nullTag { + return out, false, false + } + again := true + for again { + again = false + if out.Kind() == reflect.Ptr { + if out.IsNil() { + out.Set(reflect.New(out.Type().Elem())) + } + out = out.Elem() + again = true + } + if out.CanAddr() { + outi := out.Addr().Interface() + if u, ok := outi.(Unmarshaler); ok { + good = d.callUnmarshaler(n, u) + return out, true, good + } + if u, ok := outi.(obsoleteUnmarshaler); ok { + good = d.callObsoleteUnmarshaler(n, u) + return out, true, good + } + } + } + return out, false, false +} + +func (d *decoder) fieldByIndex(n *Node, v reflect.Value, index []int) (field reflect.Value) { + if n.ShortTag() == nullTag { + return reflect.Value{} + } + for _, num := range index { + for { + if v.Kind() == reflect.Ptr { + if v.IsNil() { + v.Set(reflect.New(v.Type().Elem())) + } + v = v.Elem() + continue + } + break + } + v = v.Field(num) + } + return v +} + +const ( + // 400,000 decode operations is ~500kb of dense object declarations, or + // ~5kb of dense object declarations with 10000% alias expansion + alias_ratio_range_low = 400000 + + // 4,000,000 decode operations is ~5MB of dense object declarations, or + // ~4.5MB of dense object declarations with 10% alias expansion + alias_ratio_range_high = 4000000 + + // alias_ratio_range is the range over which we scale allowed alias ratios + alias_ratio_range = float64(alias_ratio_range_high - alias_ratio_range_low) +) + +func allowedAliasRatio(decodeCount int) float64 { + switch { + case decodeCount <= alias_ratio_range_low: + // allow 99% to come from alias expansion for small-to-medium documents + return 0.99 + case decodeCount >= alias_ratio_range_high: + // allow 10% to come from alias expansion for very large documents + return 0.10 + default: + // scale smoothly from 99% down to 10% over the range. + // this maps to 396,000 - 400,000 allowed alias-driven decodes over the range. + // 400,000 decode operations is ~100MB of allocations in worst-case scenarios (single-item maps). + return 0.99 - 0.89*(float64(decodeCount-alias_ratio_range_low)/alias_ratio_range) + } +} + +func (d *decoder) unmarshal(n *Node, out reflect.Value) (good bool) { + d.decodeCount++ + if d.aliasDepth > 0 { + d.aliasCount++ + } + if d.aliasCount > 100 && d.decodeCount > 1000 && float64(d.aliasCount)/float64(d.decodeCount) > allowedAliasRatio(d.decodeCount) { + failf("document contains excessive aliasing") + } + if out.Type() == nodeType { + out.Set(reflect.ValueOf(n).Elem()) + return true + } + switch n.Kind { + case DocumentNode: + return d.document(n, out) + case AliasNode: + return d.alias(n, out) + } + out, unmarshaled, good := d.prepare(n, out) + if unmarshaled { + return good + } + switch n.Kind { + case ScalarNode: + good = d.scalar(n, out) + case MappingNode: + good = d.mapping(n, out) + case SequenceNode: + good = d.sequence(n, out) + case 0: + if n.IsZero() { + return d.null(out) + } + fallthrough + default: + failf("cannot decode node with unknown kind %d", n.Kind) + } + return good +} + +func (d *decoder) document(n *Node, out reflect.Value) (good bool) { + if len(n.Content) == 1 { + d.doc = n + d.unmarshal(n.Content[0], out) + return true + } + return false +} + +func (d *decoder) alias(n *Node, out reflect.Value) (good bool) { + if d.aliases[n] { + // TODO this could actually be allowed in some circumstances. + failf("anchor '%s' value contains itself", n.Value) + } + d.aliases[n] = true + d.aliasDepth++ + good = d.unmarshal(n.Alias, out) + d.aliasDepth-- + delete(d.aliases, n) + return good +} + +var zeroValue reflect.Value + +func resetMap(out reflect.Value) { + for _, k := range out.MapKeys() { + out.SetMapIndex(k, zeroValue) + } +} + +func (d *decoder) null(out reflect.Value) bool { + if out.CanAddr() { + switch out.Kind() { + case reflect.Interface, reflect.Ptr, reflect.Map, reflect.Slice: + out.Set(reflect.Zero(out.Type())) + return true + } + } + return false +} + +func (d *decoder) scalar(n *Node, out reflect.Value) bool { + var tag string + var resolved interface{} + if n.indicatedString() { + tag = strTag + resolved = n.Value + } else { + tag, resolved = resolve(n.Tag, n.Value) + if tag == binaryTag { + data, err := base64.StdEncoding.DecodeString(resolved.(string)) + if err != nil { + failf("!!binary value contains invalid base64 data") + } + resolved = string(data) + } + } + if resolved == nil { + return d.null(out) + } + if resolvedv := reflect.ValueOf(resolved); out.Type() == resolvedv.Type() { + // We've resolved to exactly the type we want, so use that. + out.Set(resolvedv) + return true + } + // Perhaps we can use the value as a TextUnmarshaler to + // set its value. + if out.CanAddr() { + u, ok := out.Addr().Interface().(encoding.TextUnmarshaler) + if ok { + var text []byte + if tag == binaryTag { + text = []byte(resolved.(string)) + } else { + // We let any value be unmarshaled into TextUnmarshaler. + // That might be more lax than we'd like, but the + // TextUnmarshaler itself should bowl out any dubious values. + text = []byte(n.Value) + } + err := u.UnmarshalText(text) + if err != nil { + fail(err) + } + return true + } + } + switch out.Kind() { + case reflect.String: + if tag == binaryTag { + out.SetString(resolved.(string)) + return true + } + out.SetString(n.Value) + return true + case reflect.Interface: + out.Set(reflect.ValueOf(resolved)) + return true + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + // This used to work in v2, but it's very unfriendly. + isDuration := out.Type() == durationType + + switch resolved := resolved.(type) { + case int: + if !isDuration && !out.OverflowInt(int64(resolved)) { + out.SetInt(int64(resolved)) + return true + } + case int64: + if !isDuration && !out.OverflowInt(resolved) { + out.SetInt(resolved) + return true + } + case uint64: + if !isDuration && resolved <= math.MaxInt64 && !out.OverflowInt(int64(resolved)) { + out.SetInt(int64(resolved)) + return true + } + case float64: + if !isDuration && resolved <= math.MaxInt64 && !out.OverflowInt(int64(resolved)) { + out.SetInt(int64(resolved)) + return true + } + case string: + if out.Type() == durationType { + d, err := time.ParseDuration(resolved) + if err == nil { + out.SetInt(int64(d)) + return true + } + } + } + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + switch resolved := resolved.(type) { + case int: + if resolved >= 0 && !out.OverflowUint(uint64(resolved)) { + out.SetUint(uint64(resolved)) + return true + } + case int64: + if resolved >= 0 && !out.OverflowUint(uint64(resolved)) { + out.SetUint(uint64(resolved)) + return true + } + case uint64: + if !out.OverflowUint(uint64(resolved)) { + out.SetUint(uint64(resolved)) + return true + } + case float64: + if resolved <= math.MaxUint64 && !out.OverflowUint(uint64(resolved)) { + out.SetUint(uint64(resolved)) + return true + } + } + case reflect.Bool: + switch resolved := resolved.(type) { + case bool: + out.SetBool(resolved) + return true + case string: + // This offers some compatibility with the 1.1 spec (https://yaml.org/type/bool.html). + // It only works if explicitly attempting to unmarshal into a typed bool value. + switch resolved { + case "y", "Y", "yes", "Yes", "YES", "on", "On", "ON": + out.SetBool(true) + return true + case "n", "N", "no", "No", "NO", "off", "Off", "OFF": + out.SetBool(false) + return true + } + } + case reflect.Float32, reflect.Float64: + switch resolved := resolved.(type) { + case int: + out.SetFloat(float64(resolved)) + return true + case int64: + out.SetFloat(float64(resolved)) + return true + case uint64: + out.SetFloat(float64(resolved)) + return true + case float64: + out.SetFloat(resolved) + return true + } + case reflect.Struct: + if resolvedv := reflect.ValueOf(resolved); out.Type() == resolvedv.Type() { + out.Set(resolvedv) + return true + } + case reflect.Ptr: + panic("yaml internal error: please report the issue") + } + d.terror(n, tag, out) + return false +} + +func settableValueOf(i interface{}) reflect.Value { + v := reflect.ValueOf(i) + sv := reflect.New(v.Type()).Elem() + sv.Set(v) + return sv +} + +func (d *decoder) sequence(n *Node, out reflect.Value) (good bool) { + l := len(n.Content) + + var iface reflect.Value + switch out.Kind() { + case reflect.Slice: + out.Set(reflect.MakeSlice(out.Type(), l, l)) + case reflect.Array: + if l != out.Len() { + failf("invalid array: want %d elements but got %d", out.Len(), l) + } + case reflect.Interface: + // No type hints. Will have to use a generic sequence. + iface = out + out = settableValueOf(make([]interface{}, l)) + default: + d.terror(n, seqTag, out) + return false + } + et := out.Type().Elem() + + j := 0 + for i := 0; i < l; i++ { + e := reflect.New(et).Elem() + if ok := d.unmarshal(n.Content[i], e); ok { + out.Index(j).Set(e) + j++ + } + } + if out.Kind() != reflect.Array { + out.Set(out.Slice(0, j)) + } + if iface.IsValid() { + iface.Set(out) + } + return true +} + +func (d *decoder) mapping(n *Node, out reflect.Value) (good bool) { + l := len(n.Content) + if d.uniqueKeys { + nerrs := len(d.terrors) + for i := 0; i < l; i += 2 { + ni := n.Content[i] + for j := i + 2; j < l; j += 2 { + nj := n.Content[j] + if ni.Kind == nj.Kind && ni.Value == nj.Value { + d.terrors = append(d.terrors, fmt.Sprintf("line %d: mapping key %#v already defined at line %d", nj.Line, nj.Value, ni.Line)) + } + } + } + if len(d.terrors) > nerrs { + return false + } + } + switch out.Kind() { + case reflect.Struct: + return d.mappingStruct(n, out) + case reflect.Map: + // okay + case reflect.Interface: + iface := out + if isStringMap(n) { + out = reflect.MakeMap(d.stringMapType) + } else { + out = reflect.MakeMap(d.generalMapType) + } + iface.Set(out) + default: + d.terror(n, mapTag, out) + return false + } + + outt := out.Type() + kt := outt.Key() + et := outt.Elem() + + stringMapType := d.stringMapType + generalMapType := d.generalMapType + if outt.Elem() == ifaceType { + if outt.Key().Kind() == reflect.String { + d.stringMapType = outt + } else if outt.Key() == ifaceType { + d.generalMapType = outt + } + } + + mapIsNew := false + if out.IsNil() { + out.Set(reflect.MakeMap(outt)) + mapIsNew = true + } + for i := 0; i < l; i += 2 { + if isMerge(n.Content[i]) { + d.merge(n.Content[i+1], out) + continue + } + k := reflect.New(kt).Elem() + if d.unmarshal(n.Content[i], k) { + kkind := k.Kind() + if kkind == reflect.Interface { + kkind = k.Elem().Kind() + } + if kkind == reflect.Map || kkind == reflect.Slice { + failf("invalid map key: %#v", k.Interface()) + } + e := reflect.New(et).Elem() + if d.unmarshal(n.Content[i+1], e) || n.Content[i+1].ShortTag() == nullTag && (mapIsNew || !out.MapIndex(k).IsValid()) { + out.SetMapIndex(k, e) + } + } + } + d.stringMapType = stringMapType + d.generalMapType = generalMapType + return true +} + +func isStringMap(n *Node) bool { + if n.Kind != MappingNode { + return false + } + l := len(n.Content) + for i := 0; i < l; i += 2 { + if n.Content[i].ShortTag() != strTag { + return false + } + } + return true +} + +func (d *decoder) mappingStruct(n *Node, out reflect.Value) (good bool) { + sinfo, err := getStructInfo(out.Type()) + if err != nil { + panic(err) + } + + var inlineMap reflect.Value + var elemType reflect.Type + if sinfo.InlineMap != -1 { + inlineMap = out.Field(sinfo.InlineMap) + inlineMap.Set(reflect.New(inlineMap.Type()).Elem()) + elemType = inlineMap.Type().Elem() + } + + for _, index := range sinfo.InlineUnmarshalers { + field := d.fieldByIndex(n, out, index) + d.prepare(n, field) + } + + var doneFields []bool + if d.uniqueKeys { + doneFields = make([]bool, len(sinfo.FieldsList)) + } + name := settableValueOf("") + l := len(n.Content) + for i := 0; i < l; i += 2 { + ni := n.Content[i] + if isMerge(ni) { + d.merge(n.Content[i+1], out) + continue + } + if !d.unmarshal(ni, name) { + continue + } + if info, ok := sinfo.FieldsMap[name.String()]; ok { + if d.uniqueKeys { + if doneFields[info.Id] { + d.terrors = append(d.terrors, fmt.Sprintf("line %d: field %s already set in type %s", ni.Line, name.String(), out.Type())) + continue + } + doneFields[info.Id] = true + } + var field reflect.Value + if info.Inline == nil { + field = out.Field(info.Num) + } else { + field = d.fieldByIndex(n, out, info.Inline) + } + d.unmarshal(n.Content[i+1], field) + } else if sinfo.InlineMap != -1 { + if inlineMap.IsNil() { + inlineMap.Set(reflect.MakeMap(inlineMap.Type())) + } + value := reflect.New(elemType).Elem() + d.unmarshal(n.Content[i+1], value) + inlineMap.SetMapIndex(name, value) + } else if d.knownFields { + d.terrors = append(d.terrors, fmt.Sprintf("line %d: field %s not found in type %s", ni.Line, name.String(), out.Type())) + } + } + return true +} + +func failWantMap() { + failf("map merge requires map or sequence of maps as the value") +} + +func (d *decoder) merge(n *Node, out reflect.Value) { + switch n.Kind { + case MappingNode: + d.unmarshal(n, out) + case AliasNode: + if n.Alias != nil && n.Alias.Kind != MappingNode { + failWantMap() + } + d.unmarshal(n, out) + case SequenceNode: + // Step backwards as earlier nodes take precedence. + for i := len(n.Content) - 1; i >= 0; i-- { + ni := n.Content[i] + if ni.Kind == AliasNode { + if ni.Alias != nil && ni.Alias.Kind != MappingNode { + failWantMap() + } + } else if ni.Kind != MappingNode { + failWantMap() + } + d.unmarshal(ni, out) + } + default: + failWantMap() + } +} + +func isMerge(n *Node) bool { + return n.Kind == ScalarNode && n.Value == "<<" && (n.Tag == "" || n.Tag == "!" || shortTag(n.Tag) == mergeTag) +} diff --git a/vendor/gopkg.in/yaml.v3/emitterc.go b/vendor/gopkg.in/yaml.v3/emitterc.go new file mode 100644 index 0000000..0f47c9c --- /dev/null +++ b/vendor/gopkg.in/yaml.v3/emitterc.go @@ -0,0 +1,2020 @@ +// +// Copyright (c) 2011-2019 Canonical Ltd +// Copyright (c) 2006-2010 Kirill Simonov +// +// Permission is hereby granted, free of charge, to any person obtaining a copy of +// this software and associated documentation files (the "Software"), to deal in +// the Software without restriction, including without limitation the rights to +// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +// of the Software, and to permit persons to whom the Software is furnished to do +// so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in all +// copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +// SOFTWARE. + +package yaml + +import ( + "bytes" + "fmt" +) + +// Flush the buffer if needed. +func flush(emitter *yaml_emitter_t) bool { + if emitter.buffer_pos+5 >= len(emitter.buffer) { + return yaml_emitter_flush(emitter) + } + return true +} + +// Put a character to the output buffer. +func put(emitter *yaml_emitter_t, value byte) bool { + if emitter.buffer_pos+5 >= len(emitter.buffer) && !yaml_emitter_flush(emitter) { + return false + } + emitter.buffer[emitter.buffer_pos] = value + emitter.buffer_pos++ + emitter.column++ + return true +} + +// Put a line break to the output buffer. +func put_break(emitter *yaml_emitter_t) bool { + if emitter.buffer_pos+5 >= len(emitter.buffer) && !yaml_emitter_flush(emitter) { + return false + } + switch emitter.line_break { + case yaml_CR_BREAK: + emitter.buffer[emitter.buffer_pos] = '\r' + emitter.buffer_pos += 1 + case yaml_LN_BREAK: + emitter.buffer[emitter.buffer_pos] = '\n' + emitter.buffer_pos += 1 + case yaml_CRLN_BREAK: + emitter.buffer[emitter.buffer_pos+0] = '\r' + emitter.buffer[emitter.buffer_pos+1] = '\n' + emitter.buffer_pos += 2 + default: + panic("unknown line break setting") + } + if emitter.column == 0 { + emitter.space_above = true + } + emitter.column = 0 + emitter.line++ + // [Go] Do this here and below and drop from everywhere else (see commented lines). + emitter.indention = true + return true +} + +// Copy a character from a string into buffer. +func write(emitter *yaml_emitter_t, s []byte, i *int) bool { + if emitter.buffer_pos+5 >= len(emitter.buffer) && !yaml_emitter_flush(emitter) { + return false + } + p := emitter.buffer_pos + w := width(s[*i]) + switch w { + case 4: + emitter.buffer[p+3] = s[*i+3] + fallthrough + case 3: + emitter.buffer[p+2] = s[*i+2] + fallthrough + case 2: + emitter.buffer[p+1] = s[*i+1] + fallthrough + case 1: + emitter.buffer[p+0] = s[*i+0] + default: + panic("unknown character width") + } + emitter.column++ + emitter.buffer_pos += w + *i += w + return true +} + +// Write a whole string into buffer. +func write_all(emitter *yaml_emitter_t, s []byte) bool { + for i := 0; i < len(s); { + if !write(emitter, s, &i) { + return false + } + } + return true +} + +// Copy a line break character from a string into buffer. +func write_break(emitter *yaml_emitter_t, s []byte, i *int) bool { + if s[*i] == '\n' { + if !put_break(emitter) { + return false + } + *i++ + } else { + if !write(emitter, s, i) { + return false + } + if emitter.column == 0 { + emitter.space_above = true + } + emitter.column = 0 + emitter.line++ + // [Go] Do this here and above and drop from everywhere else (see commented lines). + emitter.indention = true + } + return true +} + +// Set an emitter error and return false. +func yaml_emitter_set_emitter_error(emitter *yaml_emitter_t, problem string) bool { + emitter.error = yaml_EMITTER_ERROR + emitter.problem = problem + return false +} + +// Emit an event. +func yaml_emitter_emit(emitter *yaml_emitter_t, event *yaml_event_t) bool { + emitter.events = append(emitter.events, *event) + for !yaml_emitter_need_more_events(emitter) { + event := &emitter.events[emitter.events_head] + if !yaml_emitter_analyze_event(emitter, event) { + return false + } + if !yaml_emitter_state_machine(emitter, event) { + return false + } + yaml_event_delete(event) + emitter.events_head++ + } + return true +} + +// Check if we need to accumulate more events before emitting. +// +// We accumulate extra +// - 1 event for DOCUMENT-START +// - 2 events for SEQUENCE-START +// - 3 events for MAPPING-START +// +func yaml_emitter_need_more_events(emitter *yaml_emitter_t) bool { + if emitter.events_head == len(emitter.events) { + return true + } + var accumulate int + switch emitter.events[emitter.events_head].typ { + case yaml_DOCUMENT_START_EVENT: + accumulate = 1 + break + case yaml_SEQUENCE_START_EVENT: + accumulate = 2 + break + case yaml_MAPPING_START_EVENT: + accumulate = 3 + break + default: + return false + } + if len(emitter.events)-emitter.events_head > accumulate { + return false + } + var level int + for i := emitter.events_head; i < len(emitter.events); i++ { + switch emitter.events[i].typ { + case yaml_STREAM_START_EVENT, yaml_DOCUMENT_START_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT: + level++ + case yaml_STREAM_END_EVENT, yaml_DOCUMENT_END_EVENT, yaml_SEQUENCE_END_EVENT, yaml_MAPPING_END_EVENT: + level-- + } + if level == 0 { + return false + } + } + return true +} + +// Append a directive to the directives stack. +func yaml_emitter_append_tag_directive(emitter *yaml_emitter_t, value *yaml_tag_directive_t, allow_duplicates bool) bool { + for i := 0; i < len(emitter.tag_directives); i++ { + if bytes.Equal(value.handle, emitter.tag_directives[i].handle) { + if allow_duplicates { + return true + } + return yaml_emitter_set_emitter_error(emitter, "duplicate %TAG directive") + } + } + + // [Go] Do we actually need to copy this given garbage collection + // and the lack of deallocating destructors? + tag_copy := yaml_tag_directive_t{ + handle: make([]byte, len(value.handle)), + prefix: make([]byte, len(value.prefix)), + } + copy(tag_copy.handle, value.handle) + copy(tag_copy.prefix, value.prefix) + emitter.tag_directives = append(emitter.tag_directives, tag_copy) + return true +} + +// Increase the indentation level. +func yaml_emitter_increase_indent(emitter *yaml_emitter_t, flow, indentless bool) bool { + emitter.indents = append(emitter.indents, emitter.indent) + if emitter.indent < 0 { + if flow { + emitter.indent = emitter.best_indent + } else { + emitter.indent = 0 + } + } else if !indentless { + // [Go] This was changed so that indentations are more regular. + if emitter.states[len(emitter.states)-1] == yaml_EMIT_BLOCK_SEQUENCE_ITEM_STATE { + // The first indent inside a sequence will just skip the "- " indicator. + emitter.indent += 2 + } else { + // Everything else aligns to the chosen indentation. + emitter.indent = emitter.best_indent*((emitter.indent+emitter.best_indent)/emitter.best_indent) + } + } + return true +} + +// State dispatcher. +func yaml_emitter_state_machine(emitter *yaml_emitter_t, event *yaml_event_t) bool { + switch emitter.state { + default: + case yaml_EMIT_STREAM_START_STATE: + return yaml_emitter_emit_stream_start(emitter, event) + + case yaml_EMIT_FIRST_DOCUMENT_START_STATE: + return yaml_emitter_emit_document_start(emitter, event, true) + + case yaml_EMIT_DOCUMENT_START_STATE: + return yaml_emitter_emit_document_start(emitter, event, false) + + case yaml_EMIT_DOCUMENT_CONTENT_STATE: + return yaml_emitter_emit_document_content(emitter, event) + + case yaml_EMIT_DOCUMENT_END_STATE: + return yaml_emitter_emit_document_end(emitter, event) + + case yaml_EMIT_FLOW_SEQUENCE_FIRST_ITEM_STATE: + return yaml_emitter_emit_flow_sequence_item(emitter, event, true, false) + + case yaml_EMIT_FLOW_SEQUENCE_TRAIL_ITEM_STATE: + return yaml_emitter_emit_flow_sequence_item(emitter, event, false, true) + + case yaml_EMIT_FLOW_SEQUENCE_ITEM_STATE: + return yaml_emitter_emit_flow_sequence_item(emitter, event, false, false) + + case yaml_EMIT_FLOW_MAPPING_FIRST_KEY_STATE: + return yaml_emitter_emit_flow_mapping_key(emitter, event, true, false) + + case yaml_EMIT_FLOW_MAPPING_TRAIL_KEY_STATE: + return yaml_emitter_emit_flow_mapping_key(emitter, event, false, true) + + case yaml_EMIT_FLOW_MAPPING_KEY_STATE: + return yaml_emitter_emit_flow_mapping_key(emitter, event, false, false) + + case yaml_EMIT_FLOW_MAPPING_SIMPLE_VALUE_STATE: + return yaml_emitter_emit_flow_mapping_value(emitter, event, true) + + case yaml_EMIT_FLOW_MAPPING_VALUE_STATE: + return yaml_emitter_emit_flow_mapping_value(emitter, event, false) + + case yaml_EMIT_BLOCK_SEQUENCE_FIRST_ITEM_STATE: + return yaml_emitter_emit_block_sequence_item(emitter, event, true) + + case yaml_EMIT_BLOCK_SEQUENCE_ITEM_STATE: + return yaml_emitter_emit_block_sequence_item(emitter, event, false) + + case yaml_EMIT_BLOCK_MAPPING_FIRST_KEY_STATE: + return yaml_emitter_emit_block_mapping_key(emitter, event, true) + + case yaml_EMIT_BLOCK_MAPPING_KEY_STATE: + return yaml_emitter_emit_block_mapping_key(emitter, event, false) + + case yaml_EMIT_BLOCK_MAPPING_SIMPLE_VALUE_STATE: + return yaml_emitter_emit_block_mapping_value(emitter, event, true) + + case yaml_EMIT_BLOCK_MAPPING_VALUE_STATE: + return yaml_emitter_emit_block_mapping_value(emitter, event, false) + + case yaml_EMIT_END_STATE: + return yaml_emitter_set_emitter_error(emitter, "expected nothing after STREAM-END") + } + panic("invalid emitter state") +} + +// Expect STREAM-START. +func yaml_emitter_emit_stream_start(emitter *yaml_emitter_t, event *yaml_event_t) bool { + if event.typ != yaml_STREAM_START_EVENT { + return yaml_emitter_set_emitter_error(emitter, "expected STREAM-START") + } + if emitter.encoding == yaml_ANY_ENCODING { + emitter.encoding = event.encoding + if emitter.encoding == yaml_ANY_ENCODING { + emitter.encoding = yaml_UTF8_ENCODING + } + } + if emitter.best_indent < 2 || emitter.best_indent > 9 { + emitter.best_indent = 2 + } + if emitter.best_width >= 0 && emitter.best_width <= emitter.best_indent*2 { + emitter.best_width = 80 + } + if emitter.best_width < 0 { + emitter.best_width = 1<<31 - 1 + } + if emitter.line_break == yaml_ANY_BREAK { + emitter.line_break = yaml_LN_BREAK + } + + emitter.indent = -1 + emitter.line = 0 + emitter.column = 0 + emitter.whitespace = true + emitter.indention = true + emitter.space_above = true + emitter.foot_indent = -1 + + if emitter.encoding != yaml_UTF8_ENCODING { + if !yaml_emitter_write_bom(emitter) { + return false + } + } + emitter.state = yaml_EMIT_FIRST_DOCUMENT_START_STATE + return true +} + +// Expect DOCUMENT-START or STREAM-END. +func yaml_emitter_emit_document_start(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool { + + if event.typ == yaml_DOCUMENT_START_EVENT { + + if event.version_directive != nil { + if !yaml_emitter_analyze_version_directive(emitter, event.version_directive) { + return false + } + } + + for i := 0; i < len(event.tag_directives); i++ { + tag_directive := &event.tag_directives[i] + if !yaml_emitter_analyze_tag_directive(emitter, tag_directive) { + return false + } + if !yaml_emitter_append_tag_directive(emitter, tag_directive, false) { + return false + } + } + + for i := 0; i < len(default_tag_directives); i++ { + tag_directive := &default_tag_directives[i] + if !yaml_emitter_append_tag_directive(emitter, tag_directive, true) { + return false + } + } + + implicit := event.implicit + if !first || emitter.canonical { + implicit = false + } + + if emitter.open_ended && (event.version_directive != nil || len(event.tag_directives) > 0) { + if !yaml_emitter_write_indicator(emitter, []byte("..."), true, false, false) { + return false + } + if !yaml_emitter_write_indent(emitter) { + return false + } + } + + if event.version_directive != nil { + implicit = false + if !yaml_emitter_write_indicator(emitter, []byte("%YAML"), true, false, false) { + return false + } + if !yaml_emitter_write_indicator(emitter, []byte("1.1"), true, false, false) { + return false + } + if !yaml_emitter_write_indent(emitter) { + return false + } + } + + if len(event.tag_directives) > 0 { + implicit = false + for i := 0; i < len(event.tag_directives); i++ { + tag_directive := &event.tag_directives[i] + if !yaml_emitter_write_indicator(emitter, []byte("%TAG"), true, false, false) { + return false + } + if !yaml_emitter_write_tag_handle(emitter, tag_directive.handle) { + return false + } + if !yaml_emitter_write_tag_content(emitter, tag_directive.prefix, true) { + return false + } + if !yaml_emitter_write_indent(emitter) { + return false + } + } + } + + if yaml_emitter_check_empty_document(emitter) { + implicit = false + } + if !implicit { + if !yaml_emitter_write_indent(emitter) { + return false + } + if !yaml_emitter_write_indicator(emitter, []byte("---"), true, false, false) { + return false + } + if emitter.canonical || true { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + } + + if len(emitter.head_comment) > 0 { + if !yaml_emitter_process_head_comment(emitter) { + return false + } + if !put_break(emitter) { + return false + } + } + + emitter.state = yaml_EMIT_DOCUMENT_CONTENT_STATE + return true + } + + if event.typ == yaml_STREAM_END_EVENT { + if emitter.open_ended { + if !yaml_emitter_write_indicator(emitter, []byte("..."), true, false, false) { + return false + } + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if !yaml_emitter_flush(emitter) { + return false + } + emitter.state = yaml_EMIT_END_STATE + return true + } + + return yaml_emitter_set_emitter_error(emitter, "expected DOCUMENT-START or STREAM-END") +} + +// Expect the root node. +func yaml_emitter_emit_document_content(emitter *yaml_emitter_t, event *yaml_event_t) bool { + emitter.states = append(emitter.states, yaml_EMIT_DOCUMENT_END_STATE) + + if !yaml_emitter_process_head_comment(emitter) { + return false + } + if !yaml_emitter_emit_node(emitter, event, true, false, false, false) { + return false + } + if !yaml_emitter_process_line_comment(emitter) { + return false + } + if !yaml_emitter_process_foot_comment(emitter) { + return false + } + return true +} + +// Expect DOCUMENT-END. +func yaml_emitter_emit_document_end(emitter *yaml_emitter_t, event *yaml_event_t) bool { + if event.typ != yaml_DOCUMENT_END_EVENT { + return yaml_emitter_set_emitter_error(emitter, "expected DOCUMENT-END") + } + // [Go] Force document foot separation. + emitter.foot_indent = 0 + if !yaml_emitter_process_foot_comment(emitter) { + return false + } + emitter.foot_indent = -1 + if !yaml_emitter_write_indent(emitter) { + return false + } + if !event.implicit { + // [Go] Allocate the slice elsewhere. + if !yaml_emitter_write_indicator(emitter, []byte("..."), true, false, false) { + return false + } + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if !yaml_emitter_flush(emitter) { + return false + } + emitter.state = yaml_EMIT_DOCUMENT_START_STATE + emitter.tag_directives = emitter.tag_directives[:0] + return true +} + +// Expect a flow item node. +func yaml_emitter_emit_flow_sequence_item(emitter *yaml_emitter_t, event *yaml_event_t, first, trail bool) bool { + if first { + if !yaml_emitter_write_indicator(emitter, []byte{'['}, true, true, false) { + return false + } + if !yaml_emitter_increase_indent(emitter, true, false) { + return false + } + emitter.flow_level++ + } + + if event.typ == yaml_SEQUENCE_END_EVENT { + if emitter.canonical && !first && !trail { + if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) { + return false + } + } + emitter.flow_level-- + emitter.indent = emitter.indents[len(emitter.indents)-1] + emitter.indents = emitter.indents[:len(emitter.indents)-1] + if emitter.column == 0 || emitter.canonical && !first { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if !yaml_emitter_write_indicator(emitter, []byte{']'}, false, false, false) { + return false + } + if !yaml_emitter_process_line_comment(emitter) { + return false + } + if !yaml_emitter_process_foot_comment(emitter) { + return false + } + emitter.state = emitter.states[len(emitter.states)-1] + emitter.states = emitter.states[:len(emitter.states)-1] + + return true + } + + if !first && !trail { + if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) { + return false + } + } + + if !yaml_emitter_process_head_comment(emitter) { + return false + } + if emitter.column == 0 { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + + if emitter.canonical || emitter.column > emitter.best_width { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if len(emitter.line_comment)+len(emitter.foot_comment)+len(emitter.tail_comment) > 0 { + emitter.states = append(emitter.states, yaml_EMIT_FLOW_SEQUENCE_TRAIL_ITEM_STATE) + } else { + emitter.states = append(emitter.states, yaml_EMIT_FLOW_SEQUENCE_ITEM_STATE) + } + if !yaml_emitter_emit_node(emitter, event, false, true, false, false) { + return false + } + if len(emitter.line_comment)+len(emitter.foot_comment)+len(emitter.tail_comment) > 0 { + if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) { + return false + } + } + if !yaml_emitter_process_line_comment(emitter) { + return false + } + if !yaml_emitter_process_foot_comment(emitter) { + return false + } + return true +} + +// Expect a flow key node. +func yaml_emitter_emit_flow_mapping_key(emitter *yaml_emitter_t, event *yaml_event_t, first, trail bool) bool { + if first { + if !yaml_emitter_write_indicator(emitter, []byte{'{'}, true, true, false) { + return false + } + if !yaml_emitter_increase_indent(emitter, true, false) { + return false + } + emitter.flow_level++ + } + + if event.typ == yaml_MAPPING_END_EVENT { + if (emitter.canonical || len(emitter.head_comment)+len(emitter.foot_comment)+len(emitter.tail_comment) > 0) && !first && !trail { + if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) { + return false + } + } + if !yaml_emitter_process_head_comment(emitter) { + return false + } + emitter.flow_level-- + emitter.indent = emitter.indents[len(emitter.indents)-1] + emitter.indents = emitter.indents[:len(emitter.indents)-1] + if emitter.canonical && !first { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if !yaml_emitter_write_indicator(emitter, []byte{'}'}, false, false, false) { + return false + } + if !yaml_emitter_process_line_comment(emitter) { + return false + } + if !yaml_emitter_process_foot_comment(emitter) { + return false + } + emitter.state = emitter.states[len(emitter.states)-1] + emitter.states = emitter.states[:len(emitter.states)-1] + return true + } + + if !first && !trail { + if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) { + return false + } + } + + if !yaml_emitter_process_head_comment(emitter) { + return false + } + + if emitter.column == 0 { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + + if emitter.canonical || emitter.column > emitter.best_width { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + + if !emitter.canonical && yaml_emitter_check_simple_key(emitter) { + emitter.states = append(emitter.states, yaml_EMIT_FLOW_MAPPING_SIMPLE_VALUE_STATE) + return yaml_emitter_emit_node(emitter, event, false, false, true, true) + } + if !yaml_emitter_write_indicator(emitter, []byte{'?'}, true, false, false) { + return false + } + emitter.states = append(emitter.states, yaml_EMIT_FLOW_MAPPING_VALUE_STATE) + return yaml_emitter_emit_node(emitter, event, false, false, true, false) +} + +// Expect a flow value node. +func yaml_emitter_emit_flow_mapping_value(emitter *yaml_emitter_t, event *yaml_event_t, simple bool) bool { + if simple { + if !yaml_emitter_write_indicator(emitter, []byte{':'}, false, false, false) { + return false + } + } else { + if emitter.canonical || emitter.column > emitter.best_width { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if !yaml_emitter_write_indicator(emitter, []byte{':'}, true, false, false) { + return false + } + } + if len(emitter.line_comment)+len(emitter.foot_comment)+len(emitter.tail_comment) > 0 { + emitter.states = append(emitter.states, yaml_EMIT_FLOW_MAPPING_TRAIL_KEY_STATE) + } else { + emitter.states = append(emitter.states, yaml_EMIT_FLOW_MAPPING_KEY_STATE) + } + if !yaml_emitter_emit_node(emitter, event, false, false, true, false) { + return false + } + if len(emitter.line_comment)+len(emitter.foot_comment)+len(emitter.tail_comment) > 0 { + if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) { + return false + } + } + if !yaml_emitter_process_line_comment(emitter) { + return false + } + if !yaml_emitter_process_foot_comment(emitter) { + return false + } + return true +} + +// Expect a block item node. +func yaml_emitter_emit_block_sequence_item(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool { + if first { + if !yaml_emitter_increase_indent(emitter, false, false) { + return false + } + } + if event.typ == yaml_SEQUENCE_END_EVENT { + emitter.indent = emitter.indents[len(emitter.indents)-1] + emitter.indents = emitter.indents[:len(emitter.indents)-1] + emitter.state = emitter.states[len(emitter.states)-1] + emitter.states = emitter.states[:len(emitter.states)-1] + return true + } + if !yaml_emitter_process_head_comment(emitter) { + return false + } + if !yaml_emitter_write_indent(emitter) { + return false + } + if !yaml_emitter_write_indicator(emitter, []byte{'-'}, true, false, true) { + return false + } + emitter.states = append(emitter.states, yaml_EMIT_BLOCK_SEQUENCE_ITEM_STATE) + if !yaml_emitter_emit_node(emitter, event, false, true, false, false) { + return false + } + if !yaml_emitter_process_line_comment(emitter) { + return false + } + if !yaml_emitter_process_foot_comment(emitter) { + return false + } + return true +} + +// Expect a block key node. +func yaml_emitter_emit_block_mapping_key(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool { + if first { + if !yaml_emitter_increase_indent(emitter, false, false) { + return false + } + } + if !yaml_emitter_process_head_comment(emitter) { + return false + } + if event.typ == yaml_MAPPING_END_EVENT { + emitter.indent = emitter.indents[len(emitter.indents)-1] + emitter.indents = emitter.indents[:len(emitter.indents)-1] + emitter.state = emitter.states[len(emitter.states)-1] + emitter.states = emitter.states[:len(emitter.states)-1] + return true + } + if !yaml_emitter_write_indent(emitter) { + return false + } + if len(emitter.line_comment) > 0 { + // [Go] A line comment was provided for the key. That's unusual as the + // scanner associates line comments with the value. Either way, + // save the line comment and render it appropriately later. + emitter.key_line_comment = emitter.line_comment + emitter.line_comment = nil + } + if yaml_emitter_check_simple_key(emitter) { + emitter.states = append(emitter.states, yaml_EMIT_BLOCK_MAPPING_SIMPLE_VALUE_STATE) + return yaml_emitter_emit_node(emitter, event, false, false, true, true) + } + if !yaml_emitter_write_indicator(emitter, []byte{'?'}, true, false, true) { + return false + } + emitter.states = append(emitter.states, yaml_EMIT_BLOCK_MAPPING_VALUE_STATE) + return yaml_emitter_emit_node(emitter, event, false, false, true, false) +} + +// Expect a block value node. +func yaml_emitter_emit_block_mapping_value(emitter *yaml_emitter_t, event *yaml_event_t, simple bool) bool { + if simple { + if !yaml_emitter_write_indicator(emitter, []byte{':'}, false, false, false) { + return false + } + } else { + if !yaml_emitter_write_indent(emitter) { + return false + } + if !yaml_emitter_write_indicator(emitter, []byte{':'}, true, false, true) { + return false + } + } + if len(emitter.key_line_comment) > 0 { + // [Go] Line comments are generally associated with the value, but when there's + // no value on the same line as a mapping key they end up attached to the + // key itself. + if event.typ == yaml_SCALAR_EVENT { + if len(emitter.line_comment) == 0 { + // A scalar is coming and it has no line comments by itself yet, + // so just let it handle the line comment as usual. If it has a + // line comment, we can't have both so the one from the key is lost. + emitter.line_comment = emitter.key_line_comment + emitter.key_line_comment = nil + } + } else if event.sequence_style() != yaml_FLOW_SEQUENCE_STYLE && (event.typ == yaml_MAPPING_START_EVENT || event.typ == yaml_SEQUENCE_START_EVENT) { + // An indented block follows, so write the comment right now. + emitter.line_comment, emitter.key_line_comment = emitter.key_line_comment, emitter.line_comment + if !yaml_emitter_process_line_comment(emitter) { + return false + } + emitter.line_comment, emitter.key_line_comment = emitter.key_line_comment, emitter.line_comment + } + } + emitter.states = append(emitter.states, yaml_EMIT_BLOCK_MAPPING_KEY_STATE) + if !yaml_emitter_emit_node(emitter, event, false, false, true, false) { + return false + } + if !yaml_emitter_process_line_comment(emitter) { + return false + } + if !yaml_emitter_process_foot_comment(emitter) { + return false + } + return true +} + +func yaml_emitter_silent_nil_event(emitter *yaml_emitter_t, event *yaml_event_t) bool { + return event.typ == yaml_SCALAR_EVENT && event.implicit && !emitter.canonical && len(emitter.scalar_data.value) == 0 +} + +// Expect a node. +func yaml_emitter_emit_node(emitter *yaml_emitter_t, event *yaml_event_t, + root bool, sequence bool, mapping bool, simple_key bool) bool { + + emitter.root_context = root + emitter.sequence_context = sequence + emitter.mapping_context = mapping + emitter.simple_key_context = simple_key + + switch event.typ { + case yaml_ALIAS_EVENT: + return yaml_emitter_emit_alias(emitter, event) + case yaml_SCALAR_EVENT: + return yaml_emitter_emit_scalar(emitter, event) + case yaml_SEQUENCE_START_EVENT: + return yaml_emitter_emit_sequence_start(emitter, event) + case yaml_MAPPING_START_EVENT: + return yaml_emitter_emit_mapping_start(emitter, event) + default: + return yaml_emitter_set_emitter_error(emitter, + fmt.Sprintf("expected SCALAR, SEQUENCE-START, MAPPING-START, or ALIAS, but got %v", event.typ)) + } +} + +// Expect ALIAS. +func yaml_emitter_emit_alias(emitter *yaml_emitter_t, event *yaml_event_t) bool { + if !yaml_emitter_process_anchor(emitter) { + return false + } + emitter.state = emitter.states[len(emitter.states)-1] + emitter.states = emitter.states[:len(emitter.states)-1] + return true +} + +// Expect SCALAR. +func yaml_emitter_emit_scalar(emitter *yaml_emitter_t, event *yaml_event_t) bool { + if !yaml_emitter_select_scalar_style(emitter, event) { + return false + } + if !yaml_emitter_process_anchor(emitter) { + return false + } + if !yaml_emitter_process_tag(emitter) { + return false + } + if !yaml_emitter_increase_indent(emitter, true, false) { + return false + } + if !yaml_emitter_process_scalar(emitter) { + return false + } + emitter.indent = emitter.indents[len(emitter.indents)-1] + emitter.indents = emitter.indents[:len(emitter.indents)-1] + emitter.state = emitter.states[len(emitter.states)-1] + emitter.states = emitter.states[:len(emitter.states)-1] + return true +} + +// Expect SEQUENCE-START. +func yaml_emitter_emit_sequence_start(emitter *yaml_emitter_t, event *yaml_event_t) bool { + if !yaml_emitter_process_anchor(emitter) { + return false + } + if !yaml_emitter_process_tag(emitter) { + return false + } + if emitter.flow_level > 0 || emitter.canonical || event.sequence_style() == yaml_FLOW_SEQUENCE_STYLE || + yaml_emitter_check_empty_sequence(emitter) { + emitter.state = yaml_EMIT_FLOW_SEQUENCE_FIRST_ITEM_STATE + } else { + emitter.state = yaml_EMIT_BLOCK_SEQUENCE_FIRST_ITEM_STATE + } + return true +} + +// Expect MAPPING-START. +func yaml_emitter_emit_mapping_start(emitter *yaml_emitter_t, event *yaml_event_t) bool { + if !yaml_emitter_process_anchor(emitter) { + return false + } + if !yaml_emitter_process_tag(emitter) { + return false + } + if emitter.flow_level > 0 || emitter.canonical || event.mapping_style() == yaml_FLOW_MAPPING_STYLE || + yaml_emitter_check_empty_mapping(emitter) { + emitter.state = yaml_EMIT_FLOW_MAPPING_FIRST_KEY_STATE + } else { + emitter.state = yaml_EMIT_BLOCK_MAPPING_FIRST_KEY_STATE + } + return true +} + +// Check if the document content is an empty scalar. +func yaml_emitter_check_empty_document(emitter *yaml_emitter_t) bool { + return false // [Go] Huh? +} + +// Check if the next events represent an empty sequence. +func yaml_emitter_check_empty_sequence(emitter *yaml_emitter_t) bool { + if len(emitter.events)-emitter.events_head < 2 { + return false + } + return emitter.events[emitter.events_head].typ == yaml_SEQUENCE_START_EVENT && + emitter.events[emitter.events_head+1].typ == yaml_SEQUENCE_END_EVENT +} + +// Check if the next events represent an empty mapping. +func yaml_emitter_check_empty_mapping(emitter *yaml_emitter_t) bool { + if len(emitter.events)-emitter.events_head < 2 { + return false + } + return emitter.events[emitter.events_head].typ == yaml_MAPPING_START_EVENT && + emitter.events[emitter.events_head+1].typ == yaml_MAPPING_END_EVENT +} + +// Check if the next node can be expressed as a simple key. +func yaml_emitter_check_simple_key(emitter *yaml_emitter_t) bool { + length := 0 + switch emitter.events[emitter.events_head].typ { + case yaml_ALIAS_EVENT: + length += len(emitter.anchor_data.anchor) + case yaml_SCALAR_EVENT: + if emitter.scalar_data.multiline { + return false + } + length += len(emitter.anchor_data.anchor) + + len(emitter.tag_data.handle) + + len(emitter.tag_data.suffix) + + len(emitter.scalar_data.value) + case yaml_SEQUENCE_START_EVENT: + if !yaml_emitter_check_empty_sequence(emitter) { + return false + } + length += len(emitter.anchor_data.anchor) + + len(emitter.tag_data.handle) + + len(emitter.tag_data.suffix) + case yaml_MAPPING_START_EVENT: + if !yaml_emitter_check_empty_mapping(emitter) { + return false + } + length += len(emitter.anchor_data.anchor) + + len(emitter.tag_data.handle) + + len(emitter.tag_data.suffix) + default: + return false + } + return length <= 128 +} + +// Determine an acceptable scalar style. +func yaml_emitter_select_scalar_style(emitter *yaml_emitter_t, event *yaml_event_t) bool { + + no_tag := len(emitter.tag_data.handle) == 0 && len(emitter.tag_data.suffix) == 0 + if no_tag && !event.implicit && !event.quoted_implicit { + return yaml_emitter_set_emitter_error(emitter, "neither tag nor implicit flags are specified") + } + + style := event.scalar_style() + if style == yaml_ANY_SCALAR_STYLE { + style = yaml_PLAIN_SCALAR_STYLE + } + if emitter.canonical { + style = yaml_DOUBLE_QUOTED_SCALAR_STYLE + } + if emitter.simple_key_context && emitter.scalar_data.multiline { + style = yaml_DOUBLE_QUOTED_SCALAR_STYLE + } + + if style == yaml_PLAIN_SCALAR_STYLE { + if emitter.flow_level > 0 && !emitter.scalar_data.flow_plain_allowed || + emitter.flow_level == 0 && !emitter.scalar_data.block_plain_allowed { + style = yaml_SINGLE_QUOTED_SCALAR_STYLE + } + if len(emitter.scalar_data.value) == 0 && (emitter.flow_level > 0 || emitter.simple_key_context) { + style = yaml_SINGLE_QUOTED_SCALAR_STYLE + } + if no_tag && !event.implicit { + style = yaml_SINGLE_QUOTED_SCALAR_STYLE + } + } + if style == yaml_SINGLE_QUOTED_SCALAR_STYLE { + if !emitter.scalar_data.single_quoted_allowed { + style = yaml_DOUBLE_QUOTED_SCALAR_STYLE + } + } + if style == yaml_LITERAL_SCALAR_STYLE || style == yaml_FOLDED_SCALAR_STYLE { + if !emitter.scalar_data.block_allowed || emitter.flow_level > 0 || emitter.simple_key_context { + style = yaml_DOUBLE_QUOTED_SCALAR_STYLE + } + } + + if no_tag && !event.quoted_implicit && style != yaml_PLAIN_SCALAR_STYLE { + emitter.tag_data.handle = []byte{'!'} + } + emitter.scalar_data.style = style + return true +} + +// Write an anchor. +func yaml_emitter_process_anchor(emitter *yaml_emitter_t) bool { + if emitter.anchor_data.anchor == nil { + return true + } + c := []byte{'&'} + if emitter.anchor_data.alias { + c[0] = '*' + } + if !yaml_emitter_write_indicator(emitter, c, true, false, false) { + return false + } + return yaml_emitter_write_anchor(emitter, emitter.anchor_data.anchor) +} + +// Write a tag. +func yaml_emitter_process_tag(emitter *yaml_emitter_t) bool { + if len(emitter.tag_data.handle) == 0 && len(emitter.tag_data.suffix) == 0 { + return true + } + if len(emitter.tag_data.handle) > 0 { + if !yaml_emitter_write_tag_handle(emitter, emitter.tag_data.handle) { + return false + } + if len(emitter.tag_data.suffix) > 0 { + if !yaml_emitter_write_tag_content(emitter, emitter.tag_data.suffix, false) { + return false + } + } + } else { + // [Go] Allocate these slices elsewhere. + if !yaml_emitter_write_indicator(emitter, []byte("!<"), true, false, false) { + return false + } + if !yaml_emitter_write_tag_content(emitter, emitter.tag_data.suffix, false) { + return false + } + if !yaml_emitter_write_indicator(emitter, []byte{'>'}, false, false, false) { + return false + } + } + return true +} + +// Write a scalar. +func yaml_emitter_process_scalar(emitter *yaml_emitter_t) bool { + switch emitter.scalar_data.style { + case yaml_PLAIN_SCALAR_STYLE: + return yaml_emitter_write_plain_scalar(emitter, emitter.scalar_data.value, !emitter.simple_key_context) + + case yaml_SINGLE_QUOTED_SCALAR_STYLE: + return yaml_emitter_write_single_quoted_scalar(emitter, emitter.scalar_data.value, !emitter.simple_key_context) + + case yaml_DOUBLE_QUOTED_SCALAR_STYLE: + return yaml_emitter_write_double_quoted_scalar(emitter, emitter.scalar_data.value, !emitter.simple_key_context) + + case yaml_LITERAL_SCALAR_STYLE: + return yaml_emitter_write_literal_scalar(emitter, emitter.scalar_data.value) + + case yaml_FOLDED_SCALAR_STYLE: + return yaml_emitter_write_folded_scalar(emitter, emitter.scalar_data.value) + } + panic("unknown scalar style") +} + +// Write a head comment. +func yaml_emitter_process_head_comment(emitter *yaml_emitter_t) bool { + if len(emitter.tail_comment) > 0 { + if !yaml_emitter_write_indent(emitter) { + return false + } + if !yaml_emitter_write_comment(emitter, emitter.tail_comment) { + return false + } + emitter.tail_comment = emitter.tail_comment[:0] + emitter.foot_indent = emitter.indent + if emitter.foot_indent < 0 { + emitter.foot_indent = 0 + } + } + + if len(emitter.head_comment) == 0 { + return true + } + if !yaml_emitter_write_indent(emitter) { + return false + } + if !yaml_emitter_write_comment(emitter, emitter.head_comment) { + return false + } + emitter.head_comment = emitter.head_comment[:0] + return true +} + +// Write an line comment. +func yaml_emitter_process_line_comment(emitter *yaml_emitter_t) bool { + if len(emitter.line_comment) == 0 { + return true + } + if !emitter.whitespace { + if !put(emitter, ' ') { + return false + } + } + if !yaml_emitter_write_comment(emitter, emitter.line_comment) { + return false + } + emitter.line_comment = emitter.line_comment[:0] + return true +} + +// Write a foot comment. +func yaml_emitter_process_foot_comment(emitter *yaml_emitter_t) bool { + if len(emitter.foot_comment) == 0 { + return true + } + if !yaml_emitter_write_indent(emitter) { + return false + } + if !yaml_emitter_write_comment(emitter, emitter.foot_comment) { + return false + } + emitter.foot_comment = emitter.foot_comment[:0] + emitter.foot_indent = emitter.indent + if emitter.foot_indent < 0 { + emitter.foot_indent = 0 + } + return true +} + +// Check if a %YAML directive is valid. +func yaml_emitter_analyze_version_directive(emitter *yaml_emitter_t, version_directive *yaml_version_directive_t) bool { + if version_directive.major != 1 || version_directive.minor != 1 { + return yaml_emitter_set_emitter_error(emitter, "incompatible %YAML directive") + } + return true +} + +// Check if a %TAG directive is valid. +func yaml_emitter_analyze_tag_directive(emitter *yaml_emitter_t, tag_directive *yaml_tag_directive_t) bool { + handle := tag_directive.handle + prefix := tag_directive.prefix + if len(handle) == 0 { + return yaml_emitter_set_emitter_error(emitter, "tag handle must not be empty") + } + if handle[0] != '!' { + return yaml_emitter_set_emitter_error(emitter, "tag handle must start with '!'") + } + if handle[len(handle)-1] != '!' { + return yaml_emitter_set_emitter_error(emitter, "tag handle must end with '!'") + } + for i := 1; i < len(handle)-1; i += width(handle[i]) { + if !is_alpha(handle, i) { + return yaml_emitter_set_emitter_error(emitter, "tag handle must contain alphanumerical characters only") + } + } + if len(prefix) == 0 { + return yaml_emitter_set_emitter_error(emitter, "tag prefix must not be empty") + } + return true +} + +// Check if an anchor is valid. +func yaml_emitter_analyze_anchor(emitter *yaml_emitter_t, anchor []byte, alias bool) bool { + if len(anchor) == 0 { + problem := "anchor value must not be empty" + if alias { + problem = "alias value must not be empty" + } + return yaml_emitter_set_emitter_error(emitter, problem) + } + for i := 0; i < len(anchor); i += width(anchor[i]) { + if !is_alpha(anchor, i) { + problem := "anchor value must contain alphanumerical characters only" + if alias { + problem = "alias value must contain alphanumerical characters only" + } + return yaml_emitter_set_emitter_error(emitter, problem) + } + } + emitter.anchor_data.anchor = anchor + emitter.anchor_data.alias = alias + return true +} + +// Check if a tag is valid. +func yaml_emitter_analyze_tag(emitter *yaml_emitter_t, tag []byte) bool { + if len(tag) == 0 { + return yaml_emitter_set_emitter_error(emitter, "tag value must not be empty") + } + for i := 0; i < len(emitter.tag_directives); i++ { + tag_directive := &emitter.tag_directives[i] + if bytes.HasPrefix(tag, tag_directive.prefix) { + emitter.tag_data.handle = tag_directive.handle + emitter.tag_data.suffix = tag[len(tag_directive.prefix):] + return true + } + } + emitter.tag_data.suffix = tag + return true +} + +// Check if a scalar is valid. +func yaml_emitter_analyze_scalar(emitter *yaml_emitter_t, value []byte) bool { + var ( + block_indicators = false + flow_indicators = false + line_breaks = false + special_characters = false + tab_characters = false + + leading_space = false + leading_break = false + trailing_space = false + trailing_break = false + break_space = false + space_break = false + + preceded_by_whitespace = false + followed_by_whitespace = false + previous_space = false + previous_break = false + ) + + emitter.scalar_data.value = value + + if len(value) == 0 { + emitter.scalar_data.multiline = false + emitter.scalar_data.flow_plain_allowed = false + emitter.scalar_data.block_plain_allowed = true + emitter.scalar_data.single_quoted_allowed = true + emitter.scalar_data.block_allowed = false + return true + } + + if len(value) >= 3 && ((value[0] == '-' && value[1] == '-' && value[2] == '-') || (value[0] == '.' && value[1] == '.' && value[2] == '.')) { + block_indicators = true + flow_indicators = true + } + + preceded_by_whitespace = true + for i, w := 0, 0; i < len(value); i += w { + w = width(value[i]) + followed_by_whitespace = i+w >= len(value) || is_blank(value, i+w) + + if i == 0 { + switch value[i] { + case '#', ',', '[', ']', '{', '}', '&', '*', '!', '|', '>', '\'', '"', '%', '@', '`': + flow_indicators = true + block_indicators = true + case '?', ':': + flow_indicators = true + if followed_by_whitespace { + block_indicators = true + } + case '-': + if followed_by_whitespace { + flow_indicators = true + block_indicators = true + } + } + } else { + switch value[i] { + case ',', '?', '[', ']', '{', '}': + flow_indicators = true + case ':': + flow_indicators = true + if followed_by_whitespace { + block_indicators = true + } + case '#': + if preceded_by_whitespace { + flow_indicators = true + block_indicators = true + } + } + } + + if value[i] == '\t' { + tab_characters = true + } else if !is_printable(value, i) || !is_ascii(value, i) && !emitter.unicode { + special_characters = true + } + if is_space(value, i) { + if i == 0 { + leading_space = true + } + if i+width(value[i]) == len(value) { + trailing_space = true + } + if previous_break { + break_space = true + } + previous_space = true + previous_break = false + } else if is_break(value, i) { + line_breaks = true + if i == 0 { + leading_break = true + } + if i+width(value[i]) == len(value) { + trailing_break = true + } + if previous_space { + space_break = true + } + previous_space = false + previous_break = true + } else { + previous_space = false + previous_break = false + } + + // [Go]: Why 'z'? Couldn't be the end of the string as that's the loop condition. + preceded_by_whitespace = is_blankz(value, i) + } + + emitter.scalar_data.multiline = line_breaks + emitter.scalar_data.flow_plain_allowed = true + emitter.scalar_data.block_plain_allowed = true + emitter.scalar_data.single_quoted_allowed = true + emitter.scalar_data.block_allowed = true + + if leading_space || leading_break || trailing_space || trailing_break { + emitter.scalar_data.flow_plain_allowed = false + emitter.scalar_data.block_plain_allowed = false + } + if trailing_space { + emitter.scalar_data.block_allowed = false + } + if break_space { + emitter.scalar_data.flow_plain_allowed = false + emitter.scalar_data.block_plain_allowed = false + emitter.scalar_data.single_quoted_allowed = false + } + if space_break || tab_characters || special_characters { + emitter.scalar_data.flow_plain_allowed = false + emitter.scalar_data.block_plain_allowed = false + emitter.scalar_data.single_quoted_allowed = false + } + if space_break || special_characters { + emitter.scalar_data.block_allowed = false + } + if line_breaks { + emitter.scalar_data.flow_plain_allowed = false + emitter.scalar_data.block_plain_allowed = false + } + if flow_indicators { + emitter.scalar_data.flow_plain_allowed = false + } + if block_indicators { + emitter.scalar_data.block_plain_allowed = false + } + return true +} + +// Check if the event data is valid. +func yaml_emitter_analyze_event(emitter *yaml_emitter_t, event *yaml_event_t) bool { + + emitter.anchor_data.anchor = nil + emitter.tag_data.handle = nil + emitter.tag_data.suffix = nil + emitter.scalar_data.value = nil + + if len(event.head_comment) > 0 { + emitter.head_comment = event.head_comment + } + if len(event.line_comment) > 0 { + emitter.line_comment = event.line_comment + } + if len(event.foot_comment) > 0 { + emitter.foot_comment = event.foot_comment + } + if len(event.tail_comment) > 0 { + emitter.tail_comment = event.tail_comment + } + + switch event.typ { + case yaml_ALIAS_EVENT: + if !yaml_emitter_analyze_anchor(emitter, event.anchor, true) { + return false + } + + case yaml_SCALAR_EVENT: + if len(event.anchor) > 0 { + if !yaml_emitter_analyze_anchor(emitter, event.anchor, false) { + return false + } + } + if len(event.tag) > 0 && (emitter.canonical || (!event.implicit && !event.quoted_implicit)) { + if !yaml_emitter_analyze_tag(emitter, event.tag) { + return false + } + } + if !yaml_emitter_analyze_scalar(emitter, event.value) { + return false + } + + case yaml_SEQUENCE_START_EVENT: + if len(event.anchor) > 0 { + if !yaml_emitter_analyze_anchor(emitter, event.anchor, false) { + return false + } + } + if len(event.tag) > 0 && (emitter.canonical || !event.implicit) { + if !yaml_emitter_analyze_tag(emitter, event.tag) { + return false + } + } + + case yaml_MAPPING_START_EVENT: + if len(event.anchor) > 0 { + if !yaml_emitter_analyze_anchor(emitter, event.anchor, false) { + return false + } + } + if len(event.tag) > 0 && (emitter.canonical || !event.implicit) { + if !yaml_emitter_analyze_tag(emitter, event.tag) { + return false + } + } + } + return true +} + +// Write the BOM character. +func yaml_emitter_write_bom(emitter *yaml_emitter_t) bool { + if !flush(emitter) { + return false + } + pos := emitter.buffer_pos + emitter.buffer[pos+0] = '\xEF' + emitter.buffer[pos+1] = '\xBB' + emitter.buffer[pos+2] = '\xBF' + emitter.buffer_pos += 3 + return true +} + +func yaml_emitter_write_indent(emitter *yaml_emitter_t) bool { + indent := emitter.indent + if indent < 0 { + indent = 0 + } + if !emitter.indention || emitter.column > indent || (emitter.column == indent && !emitter.whitespace) { + if !put_break(emitter) { + return false + } + } + if emitter.foot_indent == indent { + if !put_break(emitter) { + return false + } + } + for emitter.column < indent { + if !put(emitter, ' ') { + return false + } + } + emitter.whitespace = true + //emitter.indention = true + emitter.space_above = false + emitter.foot_indent = -1 + return true +} + +func yaml_emitter_write_indicator(emitter *yaml_emitter_t, indicator []byte, need_whitespace, is_whitespace, is_indention bool) bool { + if need_whitespace && !emitter.whitespace { + if !put(emitter, ' ') { + return false + } + } + if !write_all(emitter, indicator) { + return false + } + emitter.whitespace = is_whitespace + emitter.indention = (emitter.indention && is_indention) + emitter.open_ended = false + return true +} + +func yaml_emitter_write_anchor(emitter *yaml_emitter_t, value []byte) bool { + if !write_all(emitter, value) { + return false + } + emitter.whitespace = false + emitter.indention = false + return true +} + +func yaml_emitter_write_tag_handle(emitter *yaml_emitter_t, value []byte) bool { + if !emitter.whitespace { + if !put(emitter, ' ') { + return false + } + } + if !write_all(emitter, value) { + return false + } + emitter.whitespace = false + emitter.indention = false + return true +} + +func yaml_emitter_write_tag_content(emitter *yaml_emitter_t, value []byte, need_whitespace bool) bool { + if need_whitespace && !emitter.whitespace { + if !put(emitter, ' ') { + return false + } + } + for i := 0; i < len(value); { + var must_write bool + switch value[i] { + case ';', '/', '?', ':', '@', '&', '=', '+', '$', ',', '_', '.', '~', '*', '\'', '(', ')', '[', ']': + must_write = true + default: + must_write = is_alpha(value, i) + } + if must_write { + if !write(emitter, value, &i) { + return false + } + } else { + w := width(value[i]) + for k := 0; k < w; k++ { + octet := value[i] + i++ + if !put(emitter, '%') { + return false + } + + c := octet >> 4 + if c < 10 { + c += '0' + } else { + c += 'A' - 10 + } + if !put(emitter, c) { + return false + } + + c = octet & 0x0f + if c < 10 { + c += '0' + } else { + c += 'A' - 10 + } + if !put(emitter, c) { + return false + } + } + } + } + emitter.whitespace = false + emitter.indention = false + return true +} + +func yaml_emitter_write_plain_scalar(emitter *yaml_emitter_t, value []byte, allow_breaks bool) bool { + if len(value) > 0 && !emitter.whitespace { + if !put(emitter, ' ') { + return false + } + } + + spaces := false + breaks := false + for i := 0; i < len(value); { + if is_space(value, i) { + if allow_breaks && !spaces && emitter.column > emitter.best_width && !is_space(value, i+1) { + if !yaml_emitter_write_indent(emitter) { + return false + } + i += width(value[i]) + } else { + if !write(emitter, value, &i) { + return false + } + } + spaces = true + } else if is_break(value, i) { + if !breaks && value[i] == '\n' { + if !put_break(emitter) { + return false + } + } + if !write_break(emitter, value, &i) { + return false + } + //emitter.indention = true + breaks = true + } else { + if breaks { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if !write(emitter, value, &i) { + return false + } + emitter.indention = false + spaces = false + breaks = false + } + } + + if len(value) > 0 { + emitter.whitespace = false + } + emitter.indention = false + if emitter.root_context { + emitter.open_ended = true + } + + return true +} + +func yaml_emitter_write_single_quoted_scalar(emitter *yaml_emitter_t, value []byte, allow_breaks bool) bool { + + if !yaml_emitter_write_indicator(emitter, []byte{'\''}, true, false, false) { + return false + } + + spaces := false + breaks := false + for i := 0; i < len(value); { + if is_space(value, i) { + if allow_breaks && !spaces && emitter.column > emitter.best_width && i > 0 && i < len(value)-1 && !is_space(value, i+1) { + if !yaml_emitter_write_indent(emitter) { + return false + } + i += width(value[i]) + } else { + if !write(emitter, value, &i) { + return false + } + } + spaces = true + } else if is_break(value, i) { + if !breaks && value[i] == '\n' { + if !put_break(emitter) { + return false + } + } + if !write_break(emitter, value, &i) { + return false + } + //emitter.indention = true + breaks = true + } else { + if breaks { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if value[i] == '\'' { + if !put(emitter, '\'') { + return false + } + } + if !write(emitter, value, &i) { + return false + } + emitter.indention = false + spaces = false + breaks = false + } + } + if !yaml_emitter_write_indicator(emitter, []byte{'\''}, false, false, false) { + return false + } + emitter.whitespace = false + emitter.indention = false + return true +} + +func yaml_emitter_write_double_quoted_scalar(emitter *yaml_emitter_t, value []byte, allow_breaks bool) bool { + spaces := false + if !yaml_emitter_write_indicator(emitter, []byte{'"'}, true, false, false) { + return false + } + + for i := 0; i < len(value); { + if !is_printable(value, i) || (!emitter.unicode && !is_ascii(value, i)) || + is_bom(value, i) || is_break(value, i) || + value[i] == '"' || value[i] == '\\' { + + octet := value[i] + + var w int + var v rune + switch { + case octet&0x80 == 0x00: + w, v = 1, rune(octet&0x7F) + case octet&0xE0 == 0xC0: + w, v = 2, rune(octet&0x1F) + case octet&0xF0 == 0xE0: + w, v = 3, rune(octet&0x0F) + case octet&0xF8 == 0xF0: + w, v = 4, rune(octet&0x07) + } + for k := 1; k < w; k++ { + octet = value[i+k] + v = (v << 6) + (rune(octet) & 0x3F) + } + i += w + + if !put(emitter, '\\') { + return false + } + + var ok bool + switch v { + case 0x00: + ok = put(emitter, '0') + case 0x07: + ok = put(emitter, 'a') + case 0x08: + ok = put(emitter, 'b') + case 0x09: + ok = put(emitter, 't') + case 0x0A: + ok = put(emitter, 'n') + case 0x0b: + ok = put(emitter, 'v') + case 0x0c: + ok = put(emitter, 'f') + case 0x0d: + ok = put(emitter, 'r') + case 0x1b: + ok = put(emitter, 'e') + case 0x22: + ok = put(emitter, '"') + case 0x5c: + ok = put(emitter, '\\') + case 0x85: + ok = put(emitter, 'N') + case 0xA0: + ok = put(emitter, '_') + case 0x2028: + ok = put(emitter, 'L') + case 0x2029: + ok = put(emitter, 'P') + default: + if v <= 0xFF { + ok = put(emitter, 'x') + w = 2 + } else if v <= 0xFFFF { + ok = put(emitter, 'u') + w = 4 + } else { + ok = put(emitter, 'U') + w = 8 + } + for k := (w - 1) * 4; ok && k >= 0; k -= 4 { + digit := byte((v >> uint(k)) & 0x0F) + if digit < 10 { + ok = put(emitter, digit+'0') + } else { + ok = put(emitter, digit+'A'-10) + } + } + } + if !ok { + return false + } + spaces = false + } else if is_space(value, i) { + if allow_breaks && !spaces && emitter.column > emitter.best_width && i > 0 && i < len(value)-1 { + if !yaml_emitter_write_indent(emitter) { + return false + } + if is_space(value, i+1) { + if !put(emitter, '\\') { + return false + } + } + i += width(value[i]) + } else if !write(emitter, value, &i) { + return false + } + spaces = true + } else { + if !write(emitter, value, &i) { + return false + } + spaces = false + } + } + if !yaml_emitter_write_indicator(emitter, []byte{'"'}, false, false, false) { + return false + } + emitter.whitespace = false + emitter.indention = false + return true +} + +func yaml_emitter_write_block_scalar_hints(emitter *yaml_emitter_t, value []byte) bool { + if is_space(value, 0) || is_break(value, 0) { + indent_hint := []byte{'0' + byte(emitter.best_indent)} + if !yaml_emitter_write_indicator(emitter, indent_hint, false, false, false) { + return false + } + } + + emitter.open_ended = false + + var chomp_hint [1]byte + if len(value) == 0 { + chomp_hint[0] = '-' + } else { + i := len(value) - 1 + for value[i]&0xC0 == 0x80 { + i-- + } + if !is_break(value, i) { + chomp_hint[0] = '-' + } else if i == 0 { + chomp_hint[0] = '+' + emitter.open_ended = true + } else { + i-- + for value[i]&0xC0 == 0x80 { + i-- + } + if is_break(value, i) { + chomp_hint[0] = '+' + emitter.open_ended = true + } + } + } + if chomp_hint[0] != 0 { + if !yaml_emitter_write_indicator(emitter, chomp_hint[:], false, false, false) { + return false + } + } + return true +} + +func yaml_emitter_write_literal_scalar(emitter *yaml_emitter_t, value []byte) bool { + if !yaml_emitter_write_indicator(emitter, []byte{'|'}, true, false, false) { + return false + } + if !yaml_emitter_write_block_scalar_hints(emitter, value) { + return false + } + if !yaml_emitter_process_line_comment(emitter) { + return false + } + //emitter.indention = true + emitter.whitespace = true + breaks := true + for i := 0; i < len(value); { + if is_break(value, i) { + if !write_break(emitter, value, &i) { + return false + } + //emitter.indention = true + breaks = true + } else { + if breaks { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if !write(emitter, value, &i) { + return false + } + emitter.indention = false + breaks = false + } + } + + return true +} + +func yaml_emitter_write_folded_scalar(emitter *yaml_emitter_t, value []byte) bool { + if !yaml_emitter_write_indicator(emitter, []byte{'>'}, true, false, false) { + return false + } + if !yaml_emitter_write_block_scalar_hints(emitter, value) { + return false + } + if !yaml_emitter_process_line_comment(emitter) { + return false + } + + //emitter.indention = true + emitter.whitespace = true + + breaks := true + leading_spaces := true + for i := 0; i < len(value); { + if is_break(value, i) { + if !breaks && !leading_spaces && value[i] == '\n' { + k := 0 + for is_break(value, k) { + k += width(value[k]) + } + if !is_blankz(value, k) { + if !put_break(emitter) { + return false + } + } + } + if !write_break(emitter, value, &i) { + return false + } + //emitter.indention = true + breaks = true + } else { + if breaks { + if !yaml_emitter_write_indent(emitter) { + return false + } + leading_spaces = is_blank(value, i) + } + if !breaks && is_space(value, i) && !is_space(value, i+1) && emitter.column > emitter.best_width { + if !yaml_emitter_write_indent(emitter) { + return false + } + i += width(value[i]) + } else { + if !write(emitter, value, &i) { + return false + } + } + emitter.indention = false + breaks = false + } + } + return true +} + +func yaml_emitter_write_comment(emitter *yaml_emitter_t, comment []byte) bool { + breaks := false + pound := false + for i := 0; i < len(comment); { + if is_break(comment, i) { + if !write_break(emitter, comment, &i) { + return false + } + //emitter.indention = true + breaks = true + pound = false + } else { + if breaks && !yaml_emitter_write_indent(emitter) { + return false + } + if !pound { + if comment[i] != '#' && (!put(emitter, '#') || !put(emitter, ' ')) { + return false + } + pound = true + } + if !write(emitter, comment, &i) { + return false + } + emitter.indention = false + breaks = false + } + } + if !breaks && !put_break(emitter) { + return false + } + + emitter.whitespace = true + //emitter.indention = true + return true +} diff --git a/vendor/gopkg.in/yaml.v3/encode.go b/vendor/gopkg.in/yaml.v3/encode.go new file mode 100644 index 0000000..de9e72a --- /dev/null +++ b/vendor/gopkg.in/yaml.v3/encode.go @@ -0,0 +1,577 @@ +// +// Copyright (c) 2011-2019 Canonical Ltd +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package yaml + +import ( + "encoding" + "fmt" + "io" + "reflect" + "regexp" + "sort" + "strconv" + "strings" + "time" + "unicode/utf8" +) + +type encoder struct { + emitter yaml_emitter_t + event yaml_event_t + out []byte + flow bool + indent int + doneInit bool +} + +func newEncoder() *encoder { + e := &encoder{} + yaml_emitter_initialize(&e.emitter) + yaml_emitter_set_output_string(&e.emitter, &e.out) + yaml_emitter_set_unicode(&e.emitter, true) + return e +} + +func newEncoderWithWriter(w io.Writer) *encoder { + e := &encoder{} + yaml_emitter_initialize(&e.emitter) + yaml_emitter_set_output_writer(&e.emitter, w) + yaml_emitter_set_unicode(&e.emitter, true) + return e +} + +func (e *encoder) init() { + if e.doneInit { + return + } + if e.indent == 0 { + e.indent = 4 + } + e.emitter.best_indent = e.indent + yaml_stream_start_event_initialize(&e.event, yaml_UTF8_ENCODING) + e.emit() + e.doneInit = true +} + +func (e *encoder) finish() { + e.emitter.open_ended = false + yaml_stream_end_event_initialize(&e.event) + e.emit() +} + +func (e *encoder) destroy() { + yaml_emitter_delete(&e.emitter) +} + +func (e *encoder) emit() { + // This will internally delete the e.event value. + e.must(yaml_emitter_emit(&e.emitter, &e.event)) +} + +func (e *encoder) must(ok bool) { + if !ok { + msg := e.emitter.problem + if msg == "" { + msg = "unknown problem generating YAML content" + } + failf("%s", msg) + } +} + +func (e *encoder) marshalDoc(tag string, in reflect.Value) { + e.init() + var node *Node + if in.IsValid() { + node, _ = in.Interface().(*Node) + } + if node != nil && node.Kind == DocumentNode { + e.nodev(in) + } else { + yaml_document_start_event_initialize(&e.event, nil, nil, true) + e.emit() + e.marshal(tag, in) + yaml_document_end_event_initialize(&e.event, true) + e.emit() + } +} + +func (e *encoder) marshal(tag string, in reflect.Value) { + tag = shortTag(tag) + if !in.IsValid() || in.Kind() == reflect.Ptr && in.IsNil() { + e.nilv() + return + } + iface := in.Interface() + switch value := iface.(type) { + case *Node: + e.nodev(in) + return + case Node: + if !in.CanAddr() { + var n = reflect.New(in.Type()).Elem() + n.Set(in) + in = n + } + e.nodev(in.Addr()) + return + case time.Time: + e.timev(tag, in) + return + case *time.Time: + e.timev(tag, in.Elem()) + return + case time.Duration: + e.stringv(tag, reflect.ValueOf(value.String())) + return + case Marshaler: + v, err := value.MarshalYAML() + if err != nil { + fail(err) + } + if v == nil { + e.nilv() + return + } + e.marshal(tag, reflect.ValueOf(v)) + return + case encoding.TextMarshaler: + text, err := value.MarshalText() + if err != nil { + fail(err) + } + in = reflect.ValueOf(string(text)) + case nil: + e.nilv() + return + } + switch in.Kind() { + case reflect.Interface: + e.marshal(tag, in.Elem()) + case reflect.Map: + e.mapv(tag, in) + case reflect.Ptr: + e.marshal(tag, in.Elem()) + case reflect.Struct: + e.structv(tag, in) + case reflect.Slice, reflect.Array: + e.slicev(tag, in) + case reflect.String: + e.stringv(tag, in) + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + e.intv(tag, in) + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + e.uintv(tag, in) + case reflect.Float32, reflect.Float64: + e.floatv(tag, in) + case reflect.Bool: + e.boolv(tag, in) + default: + panic("cannot marshal type: " + in.Type().String()) + } +} + +func (e *encoder) mapv(tag string, in reflect.Value) { + e.mappingv(tag, func() { + keys := keyList(in.MapKeys()) + sort.Sort(keys) + for _, k := range keys { + e.marshal("", k) + e.marshal("", in.MapIndex(k)) + } + }) +} + +func (e *encoder) fieldByIndex(v reflect.Value, index []int) (field reflect.Value) { + for _, num := range index { + for { + if v.Kind() == reflect.Ptr { + if v.IsNil() { + return reflect.Value{} + } + v = v.Elem() + continue + } + break + } + v = v.Field(num) + } + return v +} + +func (e *encoder) structv(tag string, in reflect.Value) { + sinfo, err := getStructInfo(in.Type()) + if err != nil { + panic(err) + } + e.mappingv(tag, func() { + for _, info := range sinfo.FieldsList { + var value reflect.Value + if info.Inline == nil { + value = in.Field(info.Num) + } else { + value = e.fieldByIndex(in, info.Inline) + if !value.IsValid() { + continue + } + } + if info.OmitEmpty && isZero(value) { + continue + } + e.marshal("", reflect.ValueOf(info.Key)) + e.flow = info.Flow + e.marshal("", value) + } + if sinfo.InlineMap >= 0 { + m := in.Field(sinfo.InlineMap) + if m.Len() > 0 { + e.flow = false + keys := keyList(m.MapKeys()) + sort.Sort(keys) + for _, k := range keys { + if _, found := sinfo.FieldsMap[k.String()]; found { + panic(fmt.Sprintf("cannot have key %q in inlined map: conflicts with struct field", k.String())) + } + e.marshal("", k) + e.flow = false + e.marshal("", m.MapIndex(k)) + } + } + } + }) +} + +func (e *encoder) mappingv(tag string, f func()) { + implicit := tag == "" + style := yaml_BLOCK_MAPPING_STYLE + if e.flow { + e.flow = false + style = yaml_FLOW_MAPPING_STYLE + } + yaml_mapping_start_event_initialize(&e.event, nil, []byte(tag), implicit, style) + e.emit() + f() + yaml_mapping_end_event_initialize(&e.event) + e.emit() +} + +func (e *encoder) slicev(tag string, in reflect.Value) { + implicit := tag == "" + style := yaml_BLOCK_SEQUENCE_STYLE + if e.flow { + e.flow = false + style = yaml_FLOW_SEQUENCE_STYLE + } + e.must(yaml_sequence_start_event_initialize(&e.event, nil, []byte(tag), implicit, style)) + e.emit() + n := in.Len() + for i := 0; i < n; i++ { + e.marshal("", in.Index(i)) + } + e.must(yaml_sequence_end_event_initialize(&e.event)) + e.emit() +} + +// isBase60 returns whether s is in base 60 notation as defined in YAML 1.1. +// +// The base 60 float notation in YAML 1.1 is a terrible idea and is unsupported +// in YAML 1.2 and by this package, but these should be marshalled quoted for +// the time being for compatibility with other parsers. +func isBase60Float(s string) (result bool) { + // Fast path. + if s == "" { + return false + } + c := s[0] + if !(c == '+' || c == '-' || c >= '0' && c <= '9') || strings.IndexByte(s, ':') < 0 { + return false + } + // Do the full match. + return base60float.MatchString(s) +} + +// From http://yaml.org/type/float.html, except the regular expression there +// is bogus. In practice parsers do not enforce the "\.[0-9_]*" suffix. +var base60float = regexp.MustCompile(`^[-+]?[0-9][0-9_]*(?::[0-5]?[0-9])+(?:\.[0-9_]*)?$`) + +// isOldBool returns whether s is bool notation as defined in YAML 1.1. +// +// We continue to force strings that YAML 1.1 would interpret as booleans to be +// rendered as quotes strings so that the marshalled output valid for YAML 1.1 +// parsing. +func isOldBool(s string) (result bool) { + switch s { + case "y", "Y", "yes", "Yes", "YES", "on", "On", "ON", + "n", "N", "no", "No", "NO", "off", "Off", "OFF": + return true + default: + return false + } +} + +func (e *encoder) stringv(tag string, in reflect.Value) { + var style yaml_scalar_style_t + s := in.String() + canUsePlain := true + switch { + case !utf8.ValidString(s): + if tag == binaryTag { + failf("explicitly tagged !!binary data must be base64-encoded") + } + if tag != "" { + failf("cannot marshal invalid UTF-8 data as %s", shortTag(tag)) + } + // It can't be encoded directly as YAML so use a binary tag + // and encode it as base64. + tag = binaryTag + s = encodeBase64(s) + case tag == "": + // Check to see if it would resolve to a specific + // tag when encoded unquoted. If it doesn't, + // there's no need to quote it. + rtag, _ := resolve("", s) + canUsePlain = rtag == strTag && !(isBase60Float(s) || isOldBool(s)) + } + // Note: it's possible for user code to emit invalid YAML + // if they explicitly specify a tag and a string containing + // text that's incompatible with that tag. + switch { + case strings.Contains(s, "\n"): + if e.flow { + style = yaml_DOUBLE_QUOTED_SCALAR_STYLE + } else { + style = yaml_LITERAL_SCALAR_STYLE + } + case canUsePlain: + style = yaml_PLAIN_SCALAR_STYLE + default: + style = yaml_DOUBLE_QUOTED_SCALAR_STYLE + } + e.emitScalar(s, "", tag, style, nil, nil, nil, nil) +} + +func (e *encoder) boolv(tag string, in reflect.Value) { + var s string + if in.Bool() { + s = "true" + } else { + s = "false" + } + e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE, nil, nil, nil, nil) +} + +func (e *encoder) intv(tag string, in reflect.Value) { + s := strconv.FormatInt(in.Int(), 10) + e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE, nil, nil, nil, nil) +} + +func (e *encoder) uintv(tag string, in reflect.Value) { + s := strconv.FormatUint(in.Uint(), 10) + e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE, nil, nil, nil, nil) +} + +func (e *encoder) timev(tag string, in reflect.Value) { + t := in.Interface().(time.Time) + s := t.Format(time.RFC3339Nano) + e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE, nil, nil, nil, nil) +} + +func (e *encoder) floatv(tag string, in reflect.Value) { + // Issue #352: When formatting, use the precision of the underlying value + precision := 64 + if in.Kind() == reflect.Float32 { + precision = 32 + } + + s := strconv.FormatFloat(in.Float(), 'g', -1, precision) + switch s { + case "+Inf": + s = ".inf" + case "-Inf": + s = "-.inf" + case "NaN": + s = ".nan" + } + e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE, nil, nil, nil, nil) +} + +func (e *encoder) nilv() { + e.emitScalar("null", "", "", yaml_PLAIN_SCALAR_STYLE, nil, nil, nil, nil) +} + +func (e *encoder) emitScalar(value, anchor, tag string, style yaml_scalar_style_t, head, line, foot, tail []byte) { + // TODO Kill this function. Replace all initialize calls by their underlining Go literals. + implicit := tag == "" + if !implicit { + tag = longTag(tag) + } + e.must(yaml_scalar_event_initialize(&e.event, []byte(anchor), []byte(tag), []byte(value), implicit, implicit, style)) + e.event.head_comment = head + e.event.line_comment = line + e.event.foot_comment = foot + e.event.tail_comment = tail + e.emit() +} + +func (e *encoder) nodev(in reflect.Value) { + e.node(in.Interface().(*Node), "") +} + +func (e *encoder) node(node *Node, tail string) { + // Zero nodes behave as nil. + if node.Kind == 0 && node.IsZero() { + e.nilv() + return + } + + // If the tag was not explicitly requested, and dropping it won't change the + // implicit tag of the value, don't include it in the presentation. + var tag = node.Tag + var stag = shortTag(tag) + var forceQuoting bool + if tag != "" && node.Style&TaggedStyle == 0 { + if node.Kind == ScalarNode { + if stag == strTag && node.Style&(SingleQuotedStyle|DoubleQuotedStyle|LiteralStyle|FoldedStyle) != 0 { + tag = "" + } else { + rtag, _ := resolve("", node.Value) + if rtag == stag { + tag = "" + } else if stag == strTag { + tag = "" + forceQuoting = true + } + } + } else { + var rtag string + switch node.Kind { + case MappingNode: + rtag = mapTag + case SequenceNode: + rtag = seqTag + } + if rtag == stag { + tag = "" + } + } + } + + switch node.Kind { + case DocumentNode: + yaml_document_start_event_initialize(&e.event, nil, nil, true) + e.event.head_comment = []byte(node.HeadComment) + e.emit() + for _, node := range node.Content { + e.node(node, "") + } + yaml_document_end_event_initialize(&e.event, true) + e.event.foot_comment = []byte(node.FootComment) + e.emit() + + case SequenceNode: + style := yaml_BLOCK_SEQUENCE_STYLE + if node.Style&FlowStyle != 0 { + style = yaml_FLOW_SEQUENCE_STYLE + } + e.must(yaml_sequence_start_event_initialize(&e.event, []byte(node.Anchor), []byte(longTag(tag)), tag == "", style)) + e.event.head_comment = []byte(node.HeadComment) + e.emit() + for _, node := range node.Content { + e.node(node, "") + } + e.must(yaml_sequence_end_event_initialize(&e.event)) + e.event.line_comment = []byte(node.LineComment) + e.event.foot_comment = []byte(node.FootComment) + e.emit() + + case MappingNode: + style := yaml_BLOCK_MAPPING_STYLE + if node.Style&FlowStyle != 0 { + style = yaml_FLOW_MAPPING_STYLE + } + yaml_mapping_start_event_initialize(&e.event, []byte(node.Anchor), []byte(longTag(tag)), tag == "", style) + e.event.tail_comment = []byte(tail) + e.event.head_comment = []byte(node.HeadComment) + e.emit() + + // The tail logic below moves the foot comment of prior keys to the following key, + // since the value for each key may be a nested structure and the foot needs to be + // processed only the entirety of the value is streamed. The last tail is processed + // with the mapping end event. + var tail string + for i := 0; i+1 < len(node.Content); i += 2 { + k := node.Content[i] + foot := k.FootComment + if foot != "" { + kopy := *k + kopy.FootComment = "" + k = &kopy + } + e.node(k, tail) + tail = foot + + v := node.Content[i+1] + e.node(v, "") + } + + yaml_mapping_end_event_initialize(&e.event) + e.event.tail_comment = []byte(tail) + e.event.line_comment = []byte(node.LineComment) + e.event.foot_comment = []byte(node.FootComment) + e.emit() + + case AliasNode: + yaml_alias_event_initialize(&e.event, []byte(node.Value)) + e.event.head_comment = []byte(node.HeadComment) + e.event.line_comment = []byte(node.LineComment) + e.event.foot_comment = []byte(node.FootComment) + e.emit() + + case ScalarNode: + value := node.Value + if !utf8.ValidString(value) { + if stag == binaryTag { + failf("explicitly tagged !!binary data must be base64-encoded") + } + if stag != "" { + failf("cannot marshal invalid UTF-8 data as %s", stag) + } + // It can't be encoded directly as YAML so use a binary tag + // and encode it as base64. + tag = binaryTag + value = encodeBase64(value) + } + + style := yaml_PLAIN_SCALAR_STYLE + switch { + case node.Style&DoubleQuotedStyle != 0: + style = yaml_DOUBLE_QUOTED_SCALAR_STYLE + case node.Style&SingleQuotedStyle != 0: + style = yaml_SINGLE_QUOTED_SCALAR_STYLE + case node.Style&LiteralStyle != 0: + style = yaml_LITERAL_SCALAR_STYLE + case node.Style&FoldedStyle != 0: + style = yaml_FOLDED_SCALAR_STYLE + case strings.Contains(value, "\n"): + style = yaml_LITERAL_SCALAR_STYLE + case forceQuoting: + style = yaml_DOUBLE_QUOTED_SCALAR_STYLE + } + + e.emitScalar(value, node.Anchor, tag, style, []byte(node.HeadComment), []byte(node.LineComment), []byte(node.FootComment), []byte(tail)) + default: + failf("cannot encode node with unknown kind %d", node.Kind) + } +} diff --git a/vendor/gopkg.in/yaml.v3/go.mod b/vendor/gopkg.in/yaml.v3/go.mod new file mode 100644 index 0000000..f407ea3 --- /dev/null +++ b/vendor/gopkg.in/yaml.v3/go.mod @@ -0,0 +1,5 @@ +module "gopkg.in/yaml.v3" + +require ( + "gopkg.in/check.v1" v0.0.0-20161208181325-20d25e280405 +) diff --git a/vendor/gopkg.in/yaml.v3/parserc.go b/vendor/gopkg.in/yaml.v3/parserc.go new file mode 100644 index 0000000..ac66fcc --- /dev/null +++ b/vendor/gopkg.in/yaml.v3/parserc.go @@ -0,0 +1,1249 @@ +// +// Copyright (c) 2011-2019 Canonical Ltd +// Copyright (c) 2006-2010 Kirill Simonov +// +// Permission is hereby granted, free of charge, to any person obtaining a copy of +// this software and associated documentation files (the "Software"), to deal in +// the Software without restriction, including without limitation the rights to +// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +// of the Software, and to permit persons to whom the Software is furnished to do +// so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in all +// copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +// SOFTWARE. + +package yaml + +import ( + "bytes" +) + +// The parser implements the following grammar: +// +// stream ::= STREAM-START implicit_document? explicit_document* STREAM-END +// implicit_document ::= block_node DOCUMENT-END* +// explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END* +// block_node_or_indentless_sequence ::= +// ALIAS +// | properties (block_content | indentless_block_sequence)? +// | block_content +// | indentless_block_sequence +// block_node ::= ALIAS +// | properties block_content? +// | block_content +// flow_node ::= ALIAS +// | properties flow_content? +// | flow_content +// properties ::= TAG ANCHOR? | ANCHOR TAG? +// block_content ::= block_collection | flow_collection | SCALAR +// flow_content ::= flow_collection | SCALAR +// block_collection ::= block_sequence | block_mapping +// flow_collection ::= flow_sequence | flow_mapping +// block_sequence ::= BLOCK-SEQUENCE-START (BLOCK-ENTRY block_node?)* BLOCK-END +// indentless_sequence ::= (BLOCK-ENTRY block_node?)+ +// block_mapping ::= BLOCK-MAPPING_START +// ((KEY block_node_or_indentless_sequence?)? +// (VALUE block_node_or_indentless_sequence?)?)* +// BLOCK-END +// flow_sequence ::= FLOW-SEQUENCE-START +// (flow_sequence_entry FLOW-ENTRY)* +// flow_sequence_entry? +// FLOW-SEQUENCE-END +// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? +// flow_mapping ::= FLOW-MAPPING-START +// (flow_mapping_entry FLOW-ENTRY)* +// flow_mapping_entry? +// FLOW-MAPPING-END +// flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? + +// Peek the next token in the token queue. +func peek_token(parser *yaml_parser_t) *yaml_token_t { + if parser.token_available || yaml_parser_fetch_more_tokens(parser) { + token := &parser.tokens[parser.tokens_head] + yaml_parser_unfold_comments(parser, token) + return token + } + return nil +} + +// yaml_parser_unfold_comments walks through the comments queue and joins all +// comments behind the position of the provided token into the respective +// top-level comment slices in the parser. +func yaml_parser_unfold_comments(parser *yaml_parser_t, token *yaml_token_t) { + for parser.comments_head < len(parser.comments) && token.start_mark.index >= parser.comments[parser.comments_head].token_mark.index { + comment := &parser.comments[parser.comments_head] + if len(comment.head) > 0 { + if token.typ == yaml_BLOCK_END_TOKEN { + // No heads on ends, so keep comment.head for a follow up token. + break + } + if len(parser.head_comment) > 0 { + parser.head_comment = append(parser.head_comment, '\n') + } + parser.head_comment = append(parser.head_comment, comment.head...) + } + if len(comment.foot) > 0 { + if len(parser.foot_comment) > 0 { + parser.foot_comment = append(parser.foot_comment, '\n') + } + parser.foot_comment = append(parser.foot_comment, comment.foot...) + } + if len(comment.line) > 0 { + if len(parser.line_comment) > 0 { + parser.line_comment = append(parser.line_comment, '\n') + } + parser.line_comment = append(parser.line_comment, comment.line...) + } + *comment = yaml_comment_t{} + parser.comments_head++ + } +} + +// Remove the next token from the queue (must be called after peek_token). +func skip_token(parser *yaml_parser_t) { + parser.token_available = false + parser.tokens_parsed++ + parser.stream_end_produced = parser.tokens[parser.tokens_head].typ == yaml_STREAM_END_TOKEN + parser.tokens_head++ +} + +// Get the next event. +func yaml_parser_parse(parser *yaml_parser_t, event *yaml_event_t) bool { + // Erase the event object. + *event = yaml_event_t{} + + // No events after the end of the stream or error. + if parser.stream_end_produced || parser.error != yaml_NO_ERROR || parser.state == yaml_PARSE_END_STATE { + return true + } + + // Generate the next event. + return yaml_parser_state_machine(parser, event) +} + +// Set parser error. +func yaml_parser_set_parser_error(parser *yaml_parser_t, problem string, problem_mark yaml_mark_t) bool { + parser.error = yaml_PARSER_ERROR + parser.problem = problem + parser.problem_mark = problem_mark + return false +} + +func yaml_parser_set_parser_error_context(parser *yaml_parser_t, context string, context_mark yaml_mark_t, problem string, problem_mark yaml_mark_t) bool { + parser.error = yaml_PARSER_ERROR + parser.context = context + parser.context_mark = context_mark + parser.problem = problem + parser.problem_mark = problem_mark + return false +} + +// State dispatcher. +func yaml_parser_state_machine(parser *yaml_parser_t, event *yaml_event_t) bool { + //trace("yaml_parser_state_machine", "state:", parser.state.String()) + + switch parser.state { + case yaml_PARSE_STREAM_START_STATE: + return yaml_parser_parse_stream_start(parser, event) + + case yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE: + return yaml_parser_parse_document_start(parser, event, true) + + case yaml_PARSE_DOCUMENT_START_STATE: + return yaml_parser_parse_document_start(parser, event, false) + + case yaml_PARSE_DOCUMENT_CONTENT_STATE: + return yaml_parser_parse_document_content(parser, event) + + case yaml_PARSE_DOCUMENT_END_STATE: + return yaml_parser_parse_document_end(parser, event) + + case yaml_PARSE_BLOCK_NODE_STATE: + return yaml_parser_parse_node(parser, event, true, false) + + case yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE: + return yaml_parser_parse_node(parser, event, true, true) + + case yaml_PARSE_FLOW_NODE_STATE: + return yaml_parser_parse_node(parser, event, false, false) + + case yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE: + return yaml_parser_parse_block_sequence_entry(parser, event, true) + + case yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE: + return yaml_parser_parse_block_sequence_entry(parser, event, false) + + case yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE: + return yaml_parser_parse_indentless_sequence_entry(parser, event) + + case yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE: + return yaml_parser_parse_block_mapping_key(parser, event, true) + + case yaml_PARSE_BLOCK_MAPPING_KEY_STATE: + return yaml_parser_parse_block_mapping_key(parser, event, false) + + case yaml_PARSE_BLOCK_MAPPING_VALUE_STATE: + return yaml_parser_parse_block_mapping_value(parser, event) + + case yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE: + return yaml_parser_parse_flow_sequence_entry(parser, event, true) + + case yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE: + return yaml_parser_parse_flow_sequence_entry(parser, event, false) + + case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE: + return yaml_parser_parse_flow_sequence_entry_mapping_key(parser, event) + + case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE: + return yaml_parser_parse_flow_sequence_entry_mapping_value(parser, event) + + case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE: + return yaml_parser_parse_flow_sequence_entry_mapping_end(parser, event) + + case yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE: + return yaml_parser_parse_flow_mapping_key(parser, event, true) + + case yaml_PARSE_FLOW_MAPPING_KEY_STATE: + return yaml_parser_parse_flow_mapping_key(parser, event, false) + + case yaml_PARSE_FLOW_MAPPING_VALUE_STATE: + return yaml_parser_parse_flow_mapping_value(parser, event, false) + + case yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE: + return yaml_parser_parse_flow_mapping_value(parser, event, true) + + default: + panic("invalid parser state") + } +} + +// Parse the production: +// stream ::= STREAM-START implicit_document? explicit_document* STREAM-END +// ************ +func yaml_parser_parse_stream_start(parser *yaml_parser_t, event *yaml_event_t) bool { + token := peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_STREAM_START_TOKEN { + return yaml_parser_set_parser_error(parser, "did not find expected ", token.start_mark) + } + parser.state = yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE + *event = yaml_event_t{ + typ: yaml_STREAM_START_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + encoding: token.encoding, + } + skip_token(parser) + return true +} + +// Parse the productions: +// implicit_document ::= block_node DOCUMENT-END* +// * +// explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END* +// ************************* +func yaml_parser_parse_document_start(parser *yaml_parser_t, event *yaml_event_t, implicit bool) bool { + + token := peek_token(parser) + if token == nil { + return false + } + + // Parse extra document end indicators. + if !implicit { + for token.typ == yaml_DOCUMENT_END_TOKEN { + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + } + } + + if implicit && token.typ != yaml_VERSION_DIRECTIVE_TOKEN && + token.typ != yaml_TAG_DIRECTIVE_TOKEN && + token.typ != yaml_DOCUMENT_START_TOKEN && + token.typ != yaml_STREAM_END_TOKEN { + // Parse an implicit document. + if !yaml_parser_process_directives(parser, nil, nil) { + return false + } + parser.states = append(parser.states, yaml_PARSE_DOCUMENT_END_STATE) + parser.state = yaml_PARSE_BLOCK_NODE_STATE + + var head_comment []byte + if len(parser.head_comment) > 0 { + // [Go] Scan the header comment backwards, and if an empty line is found, break + // the header so the part before the last empty line goes into the + // document header, while the bottom of it goes into a follow up event. + for i := len(parser.head_comment) - 1; i > 0; i-- { + if parser.head_comment[i] == '\n' { + if i == len(parser.head_comment)-1 { + head_comment = parser.head_comment[:i] + parser.head_comment = parser.head_comment[i+1:] + break + } else if parser.head_comment[i-1] == '\n' { + head_comment = parser.head_comment[:i-1] + parser.head_comment = parser.head_comment[i+1:] + break + } + } + } + } + + *event = yaml_event_t{ + typ: yaml_DOCUMENT_START_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + + head_comment: head_comment, + } + + } else if token.typ != yaml_STREAM_END_TOKEN { + // Parse an explicit document. + var version_directive *yaml_version_directive_t + var tag_directives []yaml_tag_directive_t + start_mark := token.start_mark + if !yaml_parser_process_directives(parser, &version_directive, &tag_directives) { + return false + } + token = peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_DOCUMENT_START_TOKEN { + yaml_parser_set_parser_error(parser, + "did not find expected ", token.start_mark) + return false + } + parser.states = append(parser.states, yaml_PARSE_DOCUMENT_END_STATE) + parser.state = yaml_PARSE_DOCUMENT_CONTENT_STATE + end_mark := token.end_mark + + *event = yaml_event_t{ + typ: yaml_DOCUMENT_START_EVENT, + start_mark: start_mark, + end_mark: end_mark, + version_directive: version_directive, + tag_directives: tag_directives, + implicit: false, + } + skip_token(parser) + + } else { + // Parse the stream end. + parser.state = yaml_PARSE_END_STATE + *event = yaml_event_t{ + typ: yaml_STREAM_END_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + } + skip_token(parser) + } + + return true +} + +// Parse the productions: +// explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END* +// *********** +// +func yaml_parser_parse_document_content(parser *yaml_parser_t, event *yaml_event_t) bool { + token := peek_token(parser) + if token == nil { + return false + } + + if token.typ == yaml_VERSION_DIRECTIVE_TOKEN || + token.typ == yaml_TAG_DIRECTIVE_TOKEN || + token.typ == yaml_DOCUMENT_START_TOKEN || + token.typ == yaml_DOCUMENT_END_TOKEN || + token.typ == yaml_STREAM_END_TOKEN { + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + return yaml_parser_process_empty_scalar(parser, event, + token.start_mark) + } + return yaml_parser_parse_node(parser, event, true, false) +} + +// Parse the productions: +// implicit_document ::= block_node DOCUMENT-END* +// ************* +// explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END* +// +func yaml_parser_parse_document_end(parser *yaml_parser_t, event *yaml_event_t) bool { + token := peek_token(parser) + if token == nil { + return false + } + + start_mark := token.start_mark + end_mark := token.start_mark + + implicit := true + if token.typ == yaml_DOCUMENT_END_TOKEN { + end_mark = token.end_mark + skip_token(parser) + implicit = false + } + + parser.tag_directives = parser.tag_directives[:0] + + parser.state = yaml_PARSE_DOCUMENT_START_STATE + *event = yaml_event_t{ + typ: yaml_DOCUMENT_END_EVENT, + start_mark: start_mark, + end_mark: end_mark, + implicit: implicit, + } + yaml_parser_set_event_comments(parser, event) + if len(event.head_comment) > 0 && len(event.foot_comment) == 0 { + event.foot_comment = event.head_comment + event.head_comment = nil + } + return true +} + +func yaml_parser_set_event_comments(parser *yaml_parser_t, event *yaml_event_t) { + event.head_comment = parser.head_comment + event.line_comment = parser.line_comment + event.foot_comment = parser.foot_comment + parser.head_comment = nil + parser.line_comment = nil + parser.foot_comment = nil + parser.tail_comment = nil + parser.stem_comment = nil +} + +// Parse the productions: +// block_node_or_indentless_sequence ::= +// ALIAS +// ***** +// | properties (block_content | indentless_block_sequence)? +// ********** * +// | block_content | indentless_block_sequence +// * +// block_node ::= ALIAS +// ***** +// | properties block_content? +// ********** * +// | block_content +// * +// flow_node ::= ALIAS +// ***** +// | properties flow_content? +// ********** * +// | flow_content +// * +// properties ::= TAG ANCHOR? | ANCHOR TAG? +// ************************* +// block_content ::= block_collection | flow_collection | SCALAR +// ****** +// flow_content ::= flow_collection | SCALAR +// ****** +func yaml_parser_parse_node(parser *yaml_parser_t, event *yaml_event_t, block, indentless_sequence bool) bool { + //defer trace("yaml_parser_parse_node", "block:", block, "indentless_sequence:", indentless_sequence)() + + token := peek_token(parser) + if token == nil { + return false + } + + if token.typ == yaml_ALIAS_TOKEN { + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + *event = yaml_event_t{ + typ: yaml_ALIAS_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + anchor: token.value, + } + yaml_parser_set_event_comments(parser, event) + skip_token(parser) + return true + } + + start_mark := token.start_mark + end_mark := token.start_mark + + var tag_token bool + var tag_handle, tag_suffix, anchor []byte + var tag_mark yaml_mark_t + if token.typ == yaml_ANCHOR_TOKEN { + anchor = token.value + start_mark = token.start_mark + end_mark = token.end_mark + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + if token.typ == yaml_TAG_TOKEN { + tag_token = true + tag_handle = token.value + tag_suffix = token.suffix + tag_mark = token.start_mark + end_mark = token.end_mark + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + } + } else if token.typ == yaml_TAG_TOKEN { + tag_token = true + tag_handle = token.value + tag_suffix = token.suffix + start_mark = token.start_mark + tag_mark = token.start_mark + end_mark = token.end_mark + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + if token.typ == yaml_ANCHOR_TOKEN { + anchor = token.value + end_mark = token.end_mark + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + } + } + + var tag []byte + if tag_token { + if len(tag_handle) == 0 { + tag = tag_suffix + tag_suffix = nil + } else { + for i := range parser.tag_directives { + if bytes.Equal(parser.tag_directives[i].handle, tag_handle) { + tag = append([]byte(nil), parser.tag_directives[i].prefix...) + tag = append(tag, tag_suffix...) + break + } + } + if len(tag) == 0 { + yaml_parser_set_parser_error_context(parser, + "while parsing a node", start_mark, + "found undefined tag handle", tag_mark) + return false + } + } + } + + implicit := len(tag) == 0 + if indentless_sequence && token.typ == yaml_BLOCK_ENTRY_TOKEN { + end_mark = token.end_mark + parser.state = yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE + *event = yaml_event_t{ + typ: yaml_SEQUENCE_START_EVENT, + start_mark: start_mark, + end_mark: end_mark, + anchor: anchor, + tag: tag, + implicit: implicit, + style: yaml_style_t(yaml_BLOCK_SEQUENCE_STYLE), + } + return true + } + if token.typ == yaml_SCALAR_TOKEN { + var plain_implicit, quoted_implicit bool + end_mark = token.end_mark + if (len(tag) == 0 && token.style == yaml_PLAIN_SCALAR_STYLE) || (len(tag) == 1 && tag[0] == '!') { + plain_implicit = true + } else if len(tag) == 0 { + quoted_implicit = true + } + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + + *event = yaml_event_t{ + typ: yaml_SCALAR_EVENT, + start_mark: start_mark, + end_mark: end_mark, + anchor: anchor, + tag: tag, + value: token.value, + implicit: plain_implicit, + quoted_implicit: quoted_implicit, + style: yaml_style_t(token.style), + } + yaml_parser_set_event_comments(parser, event) + skip_token(parser) + return true + } + if token.typ == yaml_FLOW_SEQUENCE_START_TOKEN { + // [Go] Some of the events below can be merged as they differ only on style. + end_mark = token.end_mark + parser.state = yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE + *event = yaml_event_t{ + typ: yaml_SEQUENCE_START_EVENT, + start_mark: start_mark, + end_mark: end_mark, + anchor: anchor, + tag: tag, + implicit: implicit, + style: yaml_style_t(yaml_FLOW_SEQUENCE_STYLE), + } + yaml_parser_set_event_comments(parser, event) + return true + } + if token.typ == yaml_FLOW_MAPPING_START_TOKEN { + end_mark = token.end_mark + parser.state = yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE + *event = yaml_event_t{ + typ: yaml_MAPPING_START_EVENT, + start_mark: start_mark, + end_mark: end_mark, + anchor: anchor, + tag: tag, + implicit: implicit, + style: yaml_style_t(yaml_FLOW_MAPPING_STYLE), + } + yaml_parser_set_event_comments(parser, event) + return true + } + if block && token.typ == yaml_BLOCK_SEQUENCE_START_TOKEN { + end_mark = token.end_mark + parser.state = yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE + *event = yaml_event_t{ + typ: yaml_SEQUENCE_START_EVENT, + start_mark: start_mark, + end_mark: end_mark, + anchor: anchor, + tag: tag, + implicit: implicit, + style: yaml_style_t(yaml_BLOCK_SEQUENCE_STYLE), + } + if parser.stem_comment != nil { + event.head_comment = parser.stem_comment + parser.stem_comment = nil + } + return true + } + if block && token.typ == yaml_BLOCK_MAPPING_START_TOKEN { + end_mark = token.end_mark + parser.state = yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE + *event = yaml_event_t{ + typ: yaml_MAPPING_START_EVENT, + start_mark: start_mark, + end_mark: end_mark, + anchor: anchor, + tag: tag, + implicit: implicit, + style: yaml_style_t(yaml_BLOCK_MAPPING_STYLE), + } + if parser.stem_comment != nil { + event.head_comment = parser.stem_comment + parser.stem_comment = nil + } + return true + } + if len(anchor) > 0 || len(tag) > 0 { + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + + *event = yaml_event_t{ + typ: yaml_SCALAR_EVENT, + start_mark: start_mark, + end_mark: end_mark, + anchor: anchor, + tag: tag, + implicit: implicit, + quoted_implicit: false, + style: yaml_style_t(yaml_PLAIN_SCALAR_STYLE), + } + return true + } + + context := "while parsing a flow node" + if block { + context = "while parsing a block node" + } + yaml_parser_set_parser_error_context(parser, context, start_mark, + "did not find expected node content", token.start_mark) + return false +} + +// Parse the productions: +// block_sequence ::= BLOCK-SEQUENCE-START (BLOCK-ENTRY block_node?)* BLOCK-END +// ******************** *********** * ********* +// +func yaml_parser_parse_block_sequence_entry(parser *yaml_parser_t, event *yaml_event_t, first bool) bool { + if first { + token := peek_token(parser) + parser.marks = append(parser.marks, token.start_mark) + skip_token(parser) + } + + token := peek_token(parser) + if token == nil { + return false + } + + if token.typ == yaml_BLOCK_ENTRY_TOKEN { + mark := token.end_mark + prior_head_len := len(parser.head_comment) + skip_token(parser) + yaml_parser_split_stem_comment(parser, prior_head_len) + token = peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_BLOCK_ENTRY_TOKEN && token.typ != yaml_BLOCK_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE) + return yaml_parser_parse_node(parser, event, true, false) + } else { + parser.state = yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE + return yaml_parser_process_empty_scalar(parser, event, mark) + } + } + if token.typ == yaml_BLOCK_END_TOKEN { + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + + *event = yaml_event_t{ + typ: yaml_SEQUENCE_END_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + } + + skip_token(parser) + return true + } + + context_mark := parser.marks[len(parser.marks)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + return yaml_parser_set_parser_error_context(parser, + "while parsing a block collection", context_mark, + "did not find expected '-' indicator", token.start_mark) +} + +// Parse the productions: +// indentless_sequence ::= (BLOCK-ENTRY block_node?)+ +// *********** * +func yaml_parser_parse_indentless_sequence_entry(parser *yaml_parser_t, event *yaml_event_t) bool { + token := peek_token(parser) + if token == nil { + return false + } + + if token.typ == yaml_BLOCK_ENTRY_TOKEN { + mark := token.end_mark + prior_head_len := len(parser.head_comment) + skip_token(parser) + yaml_parser_split_stem_comment(parser, prior_head_len) + token = peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_BLOCK_ENTRY_TOKEN && + token.typ != yaml_KEY_TOKEN && + token.typ != yaml_VALUE_TOKEN && + token.typ != yaml_BLOCK_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE) + return yaml_parser_parse_node(parser, event, true, false) + } + parser.state = yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE + return yaml_parser_process_empty_scalar(parser, event, mark) + } + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + + *event = yaml_event_t{ + typ: yaml_SEQUENCE_END_EVENT, + start_mark: token.start_mark, + end_mark: token.start_mark, // [Go] Shouldn't this be token.end_mark? + } + return true +} + +// Split stem comment from head comment. +// +// When a sequence or map is found under a sequence entry, the former head comment +// is assigned to the underlying sequence or map as a whole, not the individual +// sequence or map entry as would be expected otherwise. To handle this case the +// previous head comment is moved aside as the stem comment. +func yaml_parser_split_stem_comment(parser *yaml_parser_t, stem_len int) { + if stem_len == 0 { + return + } + + token := peek_token(parser) + if token.typ != yaml_BLOCK_SEQUENCE_START_TOKEN && token.typ != yaml_BLOCK_MAPPING_START_TOKEN { + return + } + + parser.stem_comment = parser.head_comment[:stem_len] + if len(parser.head_comment) == stem_len { + parser.head_comment = nil + } else { + // Copy suffix to prevent very strange bugs if someone ever appends + // further bytes to the prefix in the stem_comment slice above. + parser.head_comment = append([]byte(nil), parser.head_comment[stem_len+1:]...) + } +} + +// Parse the productions: +// block_mapping ::= BLOCK-MAPPING_START +// ******************* +// ((KEY block_node_or_indentless_sequence?)? +// *** * +// (VALUE block_node_or_indentless_sequence?)?)* +// +// BLOCK-END +// ********* +// +func yaml_parser_parse_block_mapping_key(parser *yaml_parser_t, event *yaml_event_t, first bool) bool { + if first { + token := peek_token(parser) + parser.marks = append(parser.marks, token.start_mark) + skip_token(parser) + } + + token := peek_token(parser) + if token == nil { + return false + } + + // [Go] A tail comment was left from the prior mapping value processed. Emit an event + // as it needs to be processed with that value and not the following key. + if len(parser.tail_comment) > 0 { + *event = yaml_event_t{ + typ: yaml_TAIL_COMMENT_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + foot_comment: parser.tail_comment, + } + parser.tail_comment = nil + return true + } + + if token.typ == yaml_KEY_TOKEN { + mark := token.end_mark + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_KEY_TOKEN && + token.typ != yaml_VALUE_TOKEN && + token.typ != yaml_BLOCK_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_BLOCK_MAPPING_VALUE_STATE) + return yaml_parser_parse_node(parser, event, true, true) + } else { + parser.state = yaml_PARSE_BLOCK_MAPPING_VALUE_STATE + return yaml_parser_process_empty_scalar(parser, event, mark) + } + } else if token.typ == yaml_BLOCK_END_TOKEN { + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + *event = yaml_event_t{ + typ: yaml_MAPPING_END_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + } + yaml_parser_set_event_comments(parser, event) + skip_token(parser) + return true + } + + context_mark := parser.marks[len(parser.marks)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + return yaml_parser_set_parser_error_context(parser, + "while parsing a block mapping", context_mark, + "did not find expected key", token.start_mark) +} + +// Parse the productions: +// block_mapping ::= BLOCK-MAPPING_START +// +// ((KEY block_node_or_indentless_sequence?)? +// +// (VALUE block_node_or_indentless_sequence?)?)* +// ***** * +// BLOCK-END +// +// +func yaml_parser_parse_block_mapping_value(parser *yaml_parser_t, event *yaml_event_t) bool { + token := peek_token(parser) + if token == nil { + return false + } + if token.typ == yaml_VALUE_TOKEN { + mark := token.end_mark + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_KEY_TOKEN && + token.typ != yaml_VALUE_TOKEN && + token.typ != yaml_BLOCK_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_BLOCK_MAPPING_KEY_STATE) + return yaml_parser_parse_node(parser, event, true, true) + } + parser.state = yaml_PARSE_BLOCK_MAPPING_KEY_STATE + return yaml_parser_process_empty_scalar(parser, event, mark) + } + parser.state = yaml_PARSE_BLOCK_MAPPING_KEY_STATE + return yaml_parser_process_empty_scalar(parser, event, token.start_mark) +} + +// Parse the productions: +// flow_sequence ::= FLOW-SEQUENCE-START +// ******************* +// (flow_sequence_entry FLOW-ENTRY)* +// * ********** +// flow_sequence_entry? +// * +// FLOW-SEQUENCE-END +// ***************** +// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? +// * +// +func yaml_parser_parse_flow_sequence_entry(parser *yaml_parser_t, event *yaml_event_t, first bool) bool { + if first { + token := peek_token(parser) + parser.marks = append(parser.marks, token.start_mark) + skip_token(parser) + } + token := peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_FLOW_SEQUENCE_END_TOKEN { + if !first { + if token.typ == yaml_FLOW_ENTRY_TOKEN { + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + } else { + context_mark := parser.marks[len(parser.marks)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + return yaml_parser_set_parser_error_context(parser, + "while parsing a flow sequence", context_mark, + "did not find expected ',' or ']'", token.start_mark) + } + } + + if token.typ == yaml_KEY_TOKEN { + parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE + *event = yaml_event_t{ + typ: yaml_MAPPING_START_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + implicit: true, + style: yaml_style_t(yaml_FLOW_MAPPING_STYLE), + } + skip_token(parser) + return true + } else if token.typ != yaml_FLOW_SEQUENCE_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE) + return yaml_parser_parse_node(parser, event, false, false) + } + } + + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + + *event = yaml_event_t{ + typ: yaml_SEQUENCE_END_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + } + yaml_parser_set_event_comments(parser, event) + + skip_token(parser) + return true +} + +// +// Parse the productions: +// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? +// *** * +// +func yaml_parser_parse_flow_sequence_entry_mapping_key(parser *yaml_parser_t, event *yaml_event_t) bool { + token := peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_VALUE_TOKEN && + token.typ != yaml_FLOW_ENTRY_TOKEN && + token.typ != yaml_FLOW_SEQUENCE_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE) + return yaml_parser_parse_node(parser, event, false, false) + } + mark := token.end_mark + skip_token(parser) + parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE + return yaml_parser_process_empty_scalar(parser, event, mark) +} + +// Parse the productions: +// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? +// ***** * +// +func yaml_parser_parse_flow_sequence_entry_mapping_value(parser *yaml_parser_t, event *yaml_event_t) bool { + token := peek_token(parser) + if token == nil { + return false + } + if token.typ == yaml_VALUE_TOKEN { + skip_token(parser) + token := peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_FLOW_ENTRY_TOKEN && token.typ != yaml_FLOW_SEQUENCE_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE) + return yaml_parser_parse_node(parser, event, false, false) + } + } + parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE + return yaml_parser_process_empty_scalar(parser, event, token.start_mark) +} + +// Parse the productions: +// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? +// * +// +func yaml_parser_parse_flow_sequence_entry_mapping_end(parser *yaml_parser_t, event *yaml_event_t) bool { + token := peek_token(parser) + if token == nil { + return false + } + parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE + *event = yaml_event_t{ + typ: yaml_MAPPING_END_EVENT, + start_mark: token.start_mark, + end_mark: token.start_mark, // [Go] Shouldn't this be end_mark? + } + return true +} + +// Parse the productions: +// flow_mapping ::= FLOW-MAPPING-START +// ****************** +// (flow_mapping_entry FLOW-ENTRY)* +// * ********** +// flow_mapping_entry? +// ****************** +// FLOW-MAPPING-END +// **************** +// flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? +// * *** * +// +func yaml_parser_parse_flow_mapping_key(parser *yaml_parser_t, event *yaml_event_t, first bool) bool { + if first { + token := peek_token(parser) + parser.marks = append(parser.marks, token.start_mark) + skip_token(parser) + } + + token := peek_token(parser) + if token == nil { + return false + } + + if token.typ != yaml_FLOW_MAPPING_END_TOKEN { + if !first { + if token.typ == yaml_FLOW_ENTRY_TOKEN { + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + } else { + context_mark := parser.marks[len(parser.marks)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + return yaml_parser_set_parser_error_context(parser, + "while parsing a flow mapping", context_mark, + "did not find expected ',' or '}'", token.start_mark) + } + } + + if token.typ == yaml_KEY_TOKEN { + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_VALUE_TOKEN && + token.typ != yaml_FLOW_ENTRY_TOKEN && + token.typ != yaml_FLOW_MAPPING_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_FLOW_MAPPING_VALUE_STATE) + return yaml_parser_parse_node(parser, event, false, false) + } else { + parser.state = yaml_PARSE_FLOW_MAPPING_VALUE_STATE + return yaml_parser_process_empty_scalar(parser, event, token.start_mark) + } + } else if token.typ != yaml_FLOW_MAPPING_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE) + return yaml_parser_parse_node(parser, event, false, false) + } + } + + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + *event = yaml_event_t{ + typ: yaml_MAPPING_END_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + } + yaml_parser_set_event_comments(parser, event) + skip_token(parser) + return true +} + +// Parse the productions: +// flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? +// * ***** * +// +func yaml_parser_parse_flow_mapping_value(parser *yaml_parser_t, event *yaml_event_t, empty bool) bool { + token := peek_token(parser) + if token == nil { + return false + } + if empty { + parser.state = yaml_PARSE_FLOW_MAPPING_KEY_STATE + return yaml_parser_process_empty_scalar(parser, event, token.start_mark) + } + if token.typ == yaml_VALUE_TOKEN { + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_FLOW_ENTRY_TOKEN && token.typ != yaml_FLOW_MAPPING_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_FLOW_MAPPING_KEY_STATE) + return yaml_parser_parse_node(parser, event, false, false) + } + } + parser.state = yaml_PARSE_FLOW_MAPPING_KEY_STATE + return yaml_parser_process_empty_scalar(parser, event, token.start_mark) +} + +// Generate an empty scalar event. +func yaml_parser_process_empty_scalar(parser *yaml_parser_t, event *yaml_event_t, mark yaml_mark_t) bool { + *event = yaml_event_t{ + typ: yaml_SCALAR_EVENT, + start_mark: mark, + end_mark: mark, + value: nil, // Empty + implicit: true, + style: yaml_style_t(yaml_PLAIN_SCALAR_STYLE), + } + return true +} + +var default_tag_directives = []yaml_tag_directive_t{ + {[]byte("!"), []byte("!")}, + {[]byte("!!"), []byte("tag:yaml.org,2002:")}, +} + +// Parse directives. +func yaml_parser_process_directives(parser *yaml_parser_t, + version_directive_ref **yaml_version_directive_t, + tag_directives_ref *[]yaml_tag_directive_t) bool { + + var version_directive *yaml_version_directive_t + var tag_directives []yaml_tag_directive_t + + token := peek_token(parser) + if token == nil { + return false + } + + for token.typ == yaml_VERSION_DIRECTIVE_TOKEN || token.typ == yaml_TAG_DIRECTIVE_TOKEN { + if token.typ == yaml_VERSION_DIRECTIVE_TOKEN { + if version_directive != nil { + yaml_parser_set_parser_error(parser, + "found duplicate %YAML directive", token.start_mark) + return false + } + if token.major != 1 || token.minor != 1 { + yaml_parser_set_parser_error(parser, + "found incompatible YAML document", token.start_mark) + return false + } + version_directive = &yaml_version_directive_t{ + major: token.major, + minor: token.minor, + } + } else if token.typ == yaml_TAG_DIRECTIVE_TOKEN { + value := yaml_tag_directive_t{ + handle: token.value, + prefix: token.prefix, + } + if !yaml_parser_append_tag_directive(parser, value, false, token.start_mark) { + return false + } + tag_directives = append(tag_directives, value) + } + + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + } + + for i := range default_tag_directives { + if !yaml_parser_append_tag_directive(parser, default_tag_directives[i], true, token.start_mark) { + return false + } + } + + if version_directive_ref != nil { + *version_directive_ref = version_directive + } + if tag_directives_ref != nil { + *tag_directives_ref = tag_directives + } + return true +} + +// Append a tag directive to the directives stack. +func yaml_parser_append_tag_directive(parser *yaml_parser_t, value yaml_tag_directive_t, allow_duplicates bool, mark yaml_mark_t) bool { + for i := range parser.tag_directives { + if bytes.Equal(value.handle, parser.tag_directives[i].handle) { + if allow_duplicates { + return true + } + return yaml_parser_set_parser_error(parser, "found duplicate %TAG directive", mark) + } + } + + // [Go] I suspect the copy is unnecessary. This was likely done + // because there was no way to track ownership of the data. + value_copy := yaml_tag_directive_t{ + handle: make([]byte, len(value.handle)), + prefix: make([]byte, len(value.prefix)), + } + copy(value_copy.handle, value.handle) + copy(value_copy.prefix, value.prefix) + parser.tag_directives = append(parser.tag_directives, value_copy) + return true +} diff --git a/vendor/gopkg.in/yaml.v3/readerc.go b/vendor/gopkg.in/yaml.v3/readerc.go new file mode 100644 index 0000000..b7de0a8 --- /dev/null +++ b/vendor/gopkg.in/yaml.v3/readerc.go @@ -0,0 +1,434 @@ +// +// Copyright (c) 2011-2019 Canonical Ltd +// Copyright (c) 2006-2010 Kirill Simonov +// +// Permission is hereby granted, free of charge, to any person obtaining a copy of +// this software and associated documentation files (the "Software"), to deal in +// the Software without restriction, including without limitation the rights to +// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +// of the Software, and to permit persons to whom the Software is furnished to do +// so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in all +// copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +// SOFTWARE. + +package yaml + +import ( + "io" +) + +// Set the reader error and return 0. +func yaml_parser_set_reader_error(parser *yaml_parser_t, problem string, offset int, value int) bool { + parser.error = yaml_READER_ERROR + parser.problem = problem + parser.problem_offset = offset + parser.problem_value = value + return false +} + +// Byte order marks. +const ( + bom_UTF8 = "\xef\xbb\xbf" + bom_UTF16LE = "\xff\xfe" + bom_UTF16BE = "\xfe\xff" +) + +// Determine the input stream encoding by checking the BOM symbol. If no BOM is +// found, the UTF-8 encoding is assumed. Return 1 on success, 0 on failure. +func yaml_parser_determine_encoding(parser *yaml_parser_t) bool { + // Ensure that we had enough bytes in the raw buffer. + for !parser.eof && len(parser.raw_buffer)-parser.raw_buffer_pos < 3 { + if !yaml_parser_update_raw_buffer(parser) { + return false + } + } + + // Determine the encoding. + buf := parser.raw_buffer + pos := parser.raw_buffer_pos + avail := len(buf) - pos + if avail >= 2 && buf[pos] == bom_UTF16LE[0] && buf[pos+1] == bom_UTF16LE[1] { + parser.encoding = yaml_UTF16LE_ENCODING + parser.raw_buffer_pos += 2 + parser.offset += 2 + } else if avail >= 2 && buf[pos] == bom_UTF16BE[0] && buf[pos+1] == bom_UTF16BE[1] { + parser.encoding = yaml_UTF16BE_ENCODING + parser.raw_buffer_pos += 2 + parser.offset += 2 + } else if avail >= 3 && buf[pos] == bom_UTF8[0] && buf[pos+1] == bom_UTF8[1] && buf[pos+2] == bom_UTF8[2] { + parser.encoding = yaml_UTF8_ENCODING + parser.raw_buffer_pos += 3 + parser.offset += 3 + } else { + parser.encoding = yaml_UTF8_ENCODING + } + return true +} + +// Update the raw buffer. +func yaml_parser_update_raw_buffer(parser *yaml_parser_t) bool { + size_read := 0 + + // Return if the raw buffer is full. + if parser.raw_buffer_pos == 0 && len(parser.raw_buffer) == cap(parser.raw_buffer) { + return true + } + + // Return on EOF. + if parser.eof { + return true + } + + // Move the remaining bytes in the raw buffer to the beginning. + if parser.raw_buffer_pos > 0 && parser.raw_buffer_pos < len(parser.raw_buffer) { + copy(parser.raw_buffer, parser.raw_buffer[parser.raw_buffer_pos:]) + } + parser.raw_buffer = parser.raw_buffer[:len(parser.raw_buffer)-parser.raw_buffer_pos] + parser.raw_buffer_pos = 0 + + // Call the read handler to fill the buffer. + size_read, err := parser.read_handler(parser, parser.raw_buffer[len(parser.raw_buffer):cap(parser.raw_buffer)]) + parser.raw_buffer = parser.raw_buffer[:len(parser.raw_buffer)+size_read] + if err == io.EOF { + parser.eof = true + } else if err != nil { + return yaml_parser_set_reader_error(parser, "input error: "+err.Error(), parser.offset, -1) + } + return true +} + +// Ensure that the buffer contains at least `length` characters. +// Return true on success, false on failure. +// +// The length is supposed to be significantly less that the buffer size. +func yaml_parser_update_buffer(parser *yaml_parser_t, length int) bool { + if parser.read_handler == nil { + panic("read handler must be set") + } + + // [Go] This function was changed to guarantee the requested length size at EOF. + // The fact we need to do this is pretty awful, but the description above implies + // for that to be the case, and there are tests + + // If the EOF flag is set and the raw buffer is empty, do nothing. + if parser.eof && parser.raw_buffer_pos == len(parser.raw_buffer) { + // [Go] ACTUALLY! Read the documentation of this function above. + // This is just broken. To return true, we need to have the + // given length in the buffer. Not doing that means every single + // check that calls this function to make sure the buffer has a + // given length is Go) panicking; or C) accessing invalid memory. + //return true + } + + // Return if the buffer contains enough characters. + if parser.unread >= length { + return true + } + + // Determine the input encoding if it is not known yet. + if parser.encoding == yaml_ANY_ENCODING { + if !yaml_parser_determine_encoding(parser) { + return false + } + } + + // Move the unread characters to the beginning of the buffer. + buffer_len := len(parser.buffer) + if parser.buffer_pos > 0 && parser.buffer_pos < buffer_len { + copy(parser.buffer, parser.buffer[parser.buffer_pos:]) + buffer_len -= parser.buffer_pos + parser.buffer_pos = 0 + } else if parser.buffer_pos == buffer_len { + buffer_len = 0 + parser.buffer_pos = 0 + } + + // Open the whole buffer for writing, and cut it before returning. + parser.buffer = parser.buffer[:cap(parser.buffer)] + + // Fill the buffer until it has enough characters. + first := true + for parser.unread < length { + + // Fill the raw buffer if necessary. + if !first || parser.raw_buffer_pos == len(parser.raw_buffer) { + if !yaml_parser_update_raw_buffer(parser) { + parser.buffer = parser.buffer[:buffer_len] + return false + } + } + first = false + + // Decode the raw buffer. + inner: + for parser.raw_buffer_pos != len(parser.raw_buffer) { + var value rune + var width int + + raw_unread := len(parser.raw_buffer) - parser.raw_buffer_pos + + // Decode the next character. + switch parser.encoding { + case yaml_UTF8_ENCODING: + // Decode a UTF-8 character. Check RFC 3629 + // (http://www.ietf.org/rfc/rfc3629.txt) for more details. + // + // The following table (taken from the RFC) is used for + // decoding. + // + // Char. number range | UTF-8 octet sequence + // (hexadecimal) | (binary) + // --------------------+------------------------------------ + // 0000 0000-0000 007F | 0xxxxxxx + // 0000 0080-0000 07FF | 110xxxxx 10xxxxxx + // 0000 0800-0000 FFFF | 1110xxxx 10xxxxxx 10xxxxxx + // 0001 0000-0010 FFFF | 11110xxx 10xxxxxx 10xxxxxx 10xxxxxx + // + // Additionally, the characters in the range 0xD800-0xDFFF + // are prohibited as they are reserved for use with UTF-16 + // surrogate pairs. + + // Determine the length of the UTF-8 sequence. + octet := parser.raw_buffer[parser.raw_buffer_pos] + switch { + case octet&0x80 == 0x00: + width = 1 + case octet&0xE0 == 0xC0: + width = 2 + case octet&0xF0 == 0xE0: + width = 3 + case octet&0xF8 == 0xF0: + width = 4 + default: + // The leading octet is invalid. + return yaml_parser_set_reader_error(parser, + "invalid leading UTF-8 octet", + parser.offset, int(octet)) + } + + // Check if the raw buffer contains an incomplete character. + if width > raw_unread { + if parser.eof { + return yaml_parser_set_reader_error(parser, + "incomplete UTF-8 octet sequence", + parser.offset, -1) + } + break inner + } + + // Decode the leading octet. + switch { + case octet&0x80 == 0x00: + value = rune(octet & 0x7F) + case octet&0xE0 == 0xC0: + value = rune(octet & 0x1F) + case octet&0xF0 == 0xE0: + value = rune(octet & 0x0F) + case octet&0xF8 == 0xF0: + value = rune(octet & 0x07) + default: + value = 0 + } + + // Check and decode the trailing octets. + for k := 1; k < width; k++ { + octet = parser.raw_buffer[parser.raw_buffer_pos+k] + + // Check if the octet is valid. + if (octet & 0xC0) != 0x80 { + return yaml_parser_set_reader_error(parser, + "invalid trailing UTF-8 octet", + parser.offset+k, int(octet)) + } + + // Decode the octet. + value = (value << 6) + rune(octet&0x3F) + } + + // Check the length of the sequence against the value. + switch { + case width == 1: + case width == 2 && value >= 0x80: + case width == 3 && value >= 0x800: + case width == 4 && value >= 0x10000: + default: + return yaml_parser_set_reader_error(parser, + "invalid length of a UTF-8 sequence", + parser.offset, -1) + } + + // Check the range of the value. + if value >= 0xD800 && value <= 0xDFFF || value > 0x10FFFF { + return yaml_parser_set_reader_error(parser, + "invalid Unicode character", + parser.offset, int(value)) + } + + case yaml_UTF16LE_ENCODING, yaml_UTF16BE_ENCODING: + var low, high int + if parser.encoding == yaml_UTF16LE_ENCODING { + low, high = 0, 1 + } else { + low, high = 1, 0 + } + + // The UTF-16 encoding is not as simple as one might + // naively think. Check RFC 2781 + // (http://www.ietf.org/rfc/rfc2781.txt). + // + // Normally, two subsequent bytes describe a Unicode + // character. However a special technique (called a + // surrogate pair) is used for specifying character + // values larger than 0xFFFF. + // + // A surrogate pair consists of two pseudo-characters: + // high surrogate area (0xD800-0xDBFF) + // low surrogate area (0xDC00-0xDFFF) + // + // The following formulas are used for decoding + // and encoding characters using surrogate pairs: + // + // U = U' + 0x10000 (0x01 00 00 <= U <= 0x10 FF FF) + // U' = yyyyyyyyyyxxxxxxxxxx (0 <= U' <= 0x0F FF FF) + // W1 = 110110yyyyyyyyyy + // W2 = 110111xxxxxxxxxx + // + // where U is the character value, W1 is the high surrogate + // area, W2 is the low surrogate area. + + // Check for incomplete UTF-16 character. + if raw_unread < 2 { + if parser.eof { + return yaml_parser_set_reader_error(parser, + "incomplete UTF-16 character", + parser.offset, -1) + } + break inner + } + + // Get the character. + value = rune(parser.raw_buffer[parser.raw_buffer_pos+low]) + + (rune(parser.raw_buffer[parser.raw_buffer_pos+high]) << 8) + + // Check for unexpected low surrogate area. + if value&0xFC00 == 0xDC00 { + return yaml_parser_set_reader_error(parser, + "unexpected low surrogate area", + parser.offset, int(value)) + } + + // Check for a high surrogate area. + if value&0xFC00 == 0xD800 { + width = 4 + + // Check for incomplete surrogate pair. + if raw_unread < 4 { + if parser.eof { + return yaml_parser_set_reader_error(parser, + "incomplete UTF-16 surrogate pair", + parser.offset, -1) + } + break inner + } + + // Get the next character. + value2 := rune(parser.raw_buffer[parser.raw_buffer_pos+low+2]) + + (rune(parser.raw_buffer[parser.raw_buffer_pos+high+2]) << 8) + + // Check for a low surrogate area. + if value2&0xFC00 != 0xDC00 { + return yaml_parser_set_reader_error(parser, + "expected low surrogate area", + parser.offset+2, int(value2)) + } + + // Generate the value of the surrogate pair. + value = 0x10000 + ((value & 0x3FF) << 10) + (value2 & 0x3FF) + } else { + width = 2 + } + + default: + panic("impossible") + } + + // Check if the character is in the allowed range: + // #x9 | #xA | #xD | [#x20-#x7E] (8 bit) + // | #x85 | [#xA0-#xD7FF] | [#xE000-#xFFFD] (16 bit) + // | [#x10000-#x10FFFF] (32 bit) + switch { + case value == 0x09: + case value == 0x0A: + case value == 0x0D: + case value >= 0x20 && value <= 0x7E: + case value == 0x85: + case value >= 0xA0 && value <= 0xD7FF: + case value >= 0xE000 && value <= 0xFFFD: + case value >= 0x10000 && value <= 0x10FFFF: + default: + return yaml_parser_set_reader_error(parser, + "control characters are not allowed", + parser.offset, int(value)) + } + + // Move the raw pointers. + parser.raw_buffer_pos += width + parser.offset += width + + // Finally put the character into the buffer. + if value <= 0x7F { + // 0000 0000-0000 007F . 0xxxxxxx + parser.buffer[buffer_len+0] = byte(value) + buffer_len += 1 + } else if value <= 0x7FF { + // 0000 0080-0000 07FF . 110xxxxx 10xxxxxx + parser.buffer[buffer_len+0] = byte(0xC0 + (value >> 6)) + parser.buffer[buffer_len+1] = byte(0x80 + (value & 0x3F)) + buffer_len += 2 + } else if value <= 0xFFFF { + // 0000 0800-0000 FFFF . 1110xxxx 10xxxxxx 10xxxxxx + parser.buffer[buffer_len+0] = byte(0xE0 + (value >> 12)) + parser.buffer[buffer_len+1] = byte(0x80 + ((value >> 6) & 0x3F)) + parser.buffer[buffer_len+2] = byte(0x80 + (value & 0x3F)) + buffer_len += 3 + } else { + // 0001 0000-0010 FFFF . 11110xxx 10xxxxxx 10xxxxxx 10xxxxxx + parser.buffer[buffer_len+0] = byte(0xF0 + (value >> 18)) + parser.buffer[buffer_len+1] = byte(0x80 + ((value >> 12) & 0x3F)) + parser.buffer[buffer_len+2] = byte(0x80 + ((value >> 6) & 0x3F)) + parser.buffer[buffer_len+3] = byte(0x80 + (value & 0x3F)) + buffer_len += 4 + } + + parser.unread++ + } + + // On EOF, put NUL into the buffer and return. + if parser.eof { + parser.buffer[buffer_len] = 0 + buffer_len++ + parser.unread++ + break + } + } + // [Go] Read the documentation of this function above. To return true, + // we need to have the given length in the buffer. Not doing that means + // every single check that calls this function to make sure the buffer + // has a given length is Go) panicking; or C) accessing invalid memory. + // This happens here due to the EOF above breaking early. + for buffer_len < length { + parser.buffer[buffer_len] = 0 + buffer_len++ + } + parser.buffer = parser.buffer[:buffer_len] + return true +} diff --git a/vendor/gopkg.in/yaml.v3/resolve.go b/vendor/gopkg.in/yaml.v3/resolve.go new file mode 100644 index 0000000..64ae888 --- /dev/null +++ b/vendor/gopkg.in/yaml.v3/resolve.go @@ -0,0 +1,326 @@ +// +// Copyright (c) 2011-2019 Canonical Ltd +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package yaml + +import ( + "encoding/base64" + "math" + "regexp" + "strconv" + "strings" + "time" +) + +type resolveMapItem struct { + value interface{} + tag string +} + +var resolveTable = make([]byte, 256) +var resolveMap = make(map[string]resolveMapItem) + +func init() { + t := resolveTable + t[int('+')] = 'S' // Sign + t[int('-')] = 'S' + for _, c := range "0123456789" { + t[int(c)] = 'D' // Digit + } + for _, c := range "yYnNtTfFoO~" { + t[int(c)] = 'M' // In map + } + t[int('.')] = '.' // Float (potentially in map) + + var resolveMapList = []struct { + v interface{} + tag string + l []string + }{ + {true, boolTag, []string{"true", "True", "TRUE"}}, + {false, boolTag, []string{"false", "False", "FALSE"}}, + {nil, nullTag, []string{"", "~", "null", "Null", "NULL"}}, + {math.NaN(), floatTag, []string{".nan", ".NaN", ".NAN"}}, + {math.Inf(+1), floatTag, []string{".inf", ".Inf", ".INF"}}, + {math.Inf(+1), floatTag, []string{"+.inf", "+.Inf", "+.INF"}}, + {math.Inf(-1), floatTag, []string{"-.inf", "-.Inf", "-.INF"}}, + {"<<", mergeTag, []string{"<<"}}, + } + + m := resolveMap + for _, item := range resolveMapList { + for _, s := range item.l { + m[s] = resolveMapItem{item.v, item.tag} + } + } +} + +const ( + nullTag = "!!null" + boolTag = "!!bool" + strTag = "!!str" + intTag = "!!int" + floatTag = "!!float" + timestampTag = "!!timestamp" + seqTag = "!!seq" + mapTag = "!!map" + binaryTag = "!!binary" + mergeTag = "!!merge" +) + +var longTags = make(map[string]string) +var shortTags = make(map[string]string) + +func init() { + for _, stag := range []string{nullTag, boolTag, strTag, intTag, floatTag, timestampTag, seqTag, mapTag, binaryTag, mergeTag} { + ltag := longTag(stag) + longTags[stag] = ltag + shortTags[ltag] = stag + } +} + +const longTagPrefix = "tag:yaml.org,2002:" + +func shortTag(tag string) string { + if strings.HasPrefix(tag, longTagPrefix) { + if stag, ok := shortTags[tag]; ok { + return stag + } + return "!!" + tag[len(longTagPrefix):] + } + return tag +} + +func longTag(tag string) string { + if strings.HasPrefix(tag, "!!") { + if ltag, ok := longTags[tag]; ok { + return ltag + } + return longTagPrefix + tag[2:] + } + return tag +} + +func resolvableTag(tag string) bool { + switch tag { + case "", strTag, boolTag, intTag, floatTag, nullTag, timestampTag: + return true + } + return false +} + +var yamlStyleFloat = regexp.MustCompile(`^[-+]?(\.[0-9]+|[0-9]+(\.[0-9]*)?)([eE][-+]?[0-9]+)?$`) + +func resolve(tag string, in string) (rtag string, out interface{}) { + tag = shortTag(tag) + if !resolvableTag(tag) { + return tag, in + } + + defer func() { + switch tag { + case "", rtag, strTag, binaryTag: + return + case floatTag: + if rtag == intTag { + switch v := out.(type) { + case int64: + rtag = floatTag + out = float64(v) + return + case int: + rtag = floatTag + out = float64(v) + return + } + } + } + failf("cannot decode %s `%s` as a %s", shortTag(rtag), in, shortTag(tag)) + }() + + // Any data is accepted as a !!str or !!binary. + // Otherwise, the prefix is enough of a hint about what it might be. + hint := byte('N') + if in != "" { + hint = resolveTable[in[0]] + } + if hint != 0 && tag != strTag && tag != binaryTag { + // Handle things we can lookup in a map. + if item, ok := resolveMap[in]; ok { + return item.tag, item.value + } + + // Base 60 floats are a bad idea, were dropped in YAML 1.2, and + // are purposefully unsupported here. They're still quoted on + // the way out for compatibility with other parser, though. + + switch hint { + case 'M': + // We've already checked the map above. + + case '.': + // Not in the map, so maybe a normal float. + floatv, err := strconv.ParseFloat(in, 64) + if err == nil { + return floatTag, floatv + } + + case 'D', 'S': + // Int, float, or timestamp. + // Only try values as a timestamp if the value is unquoted or there's an explicit + // !!timestamp tag. + if tag == "" || tag == timestampTag { + t, ok := parseTimestamp(in) + if ok { + return timestampTag, t + } + } + + plain := strings.Replace(in, "_", "", -1) + intv, err := strconv.ParseInt(plain, 0, 64) + if err == nil { + if intv == int64(int(intv)) { + return intTag, int(intv) + } else { + return intTag, intv + } + } + uintv, err := strconv.ParseUint(plain, 0, 64) + if err == nil { + return intTag, uintv + } + if yamlStyleFloat.MatchString(plain) { + floatv, err := strconv.ParseFloat(plain, 64) + if err == nil { + return floatTag, floatv + } + } + if strings.HasPrefix(plain, "0b") { + intv, err := strconv.ParseInt(plain[2:], 2, 64) + if err == nil { + if intv == int64(int(intv)) { + return intTag, int(intv) + } else { + return intTag, intv + } + } + uintv, err := strconv.ParseUint(plain[2:], 2, 64) + if err == nil { + return intTag, uintv + } + } else if strings.HasPrefix(plain, "-0b") { + intv, err := strconv.ParseInt("-"+plain[3:], 2, 64) + if err == nil { + if true || intv == int64(int(intv)) { + return intTag, int(intv) + } else { + return intTag, intv + } + } + } + // Octals as introduced in version 1.2 of the spec. + // Octals from the 1.1 spec, spelled as 0777, are still + // decoded by default in v3 as well for compatibility. + // May be dropped in v4 depending on how usage evolves. + if strings.HasPrefix(plain, "0o") { + intv, err := strconv.ParseInt(plain[2:], 8, 64) + if err == nil { + if intv == int64(int(intv)) { + return intTag, int(intv) + } else { + return intTag, intv + } + } + uintv, err := strconv.ParseUint(plain[2:], 8, 64) + if err == nil { + return intTag, uintv + } + } else if strings.HasPrefix(plain, "-0o") { + intv, err := strconv.ParseInt("-"+plain[3:], 8, 64) + if err == nil { + if true || intv == int64(int(intv)) { + return intTag, int(intv) + } else { + return intTag, intv + } + } + } + default: + panic("internal error: missing handler for resolver table: " + string(rune(hint)) + " (with " + in + ")") + } + } + return strTag, in +} + +// encodeBase64 encodes s as base64 that is broken up into multiple lines +// as appropriate for the resulting length. +func encodeBase64(s string) string { + const lineLen = 70 + encLen := base64.StdEncoding.EncodedLen(len(s)) + lines := encLen/lineLen + 1 + buf := make([]byte, encLen*2+lines) + in := buf[0:encLen] + out := buf[encLen:] + base64.StdEncoding.Encode(in, []byte(s)) + k := 0 + for i := 0; i < len(in); i += lineLen { + j := i + lineLen + if j > len(in) { + j = len(in) + } + k += copy(out[k:], in[i:j]) + if lines > 1 { + out[k] = '\n' + k++ + } + } + return string(out[:k]) +} + +// This is a subset of the formats allowed by the regular expression +// defined at http://yaml.org/type/timestamp.html. +var allowedTimestampFormats = []string{ + "2006-1-2T15:4:5.999999999Z07:00", // RCF3339Nano with short date fields. + "2006-1-2t15:4:5.999999999Z07:00", // RFC3339Nano with short date fields and lower-case "t". + "2006-1-2 15:4:5.999999999", // space separated with no time zone + "2006-1-2", // date only + // Notable exception: time.Parse cannot handle: "2001-12-14 21:59:43.10 -5" + // from the set of examples. +} + +// parseTimestamp parses s as a timestamp string and +// returns the timestamp and reports whether it succeeded. +// Timestamp formats are defined at http://yaml.org/type/timestamp.html +func parseTimestamp(s string) (time.Time, bool) { + // TODO write code to check all the formats supported by + // http://yaml.org/type/timestamp.html instead of using time.Parse. + + // Quick check: all date formats start with YYYY-. + i := 0 + for ; i < len(s); i++ { + if c := s[i]; c < '0' || c > '9' { + break + } + } + if i != 4 || i == len(s) || s[i] != '-' { + return time.Time{}, false + } + for _, format := range allowedTimestampFormats { + if t, err := time.Parse(format, s); err == nil { + return t, true + } + } + return time.Time{}, false +} diff --git a/vendor/gopkg.in/yaml.v3/scannerc.go b/vendor/gopkg.in/yaml.v3/scannerc.go new file mode 100644 index 0000000..ca00701 --- /dev/null +++ b/vendor/gopkg.in/yaml.v3/scannerc.go @@ -0,0 +1,3038 @@ +// +// Copyright (c) 2011-2019 Canonical Ltd +// Copyright (c) 2006-2010 Kirill Simonov +// +// Permission is hereby granted, free of charge, to any person obtaining a copy of +// this software and associated documentation files (the "Software"), to deal in +// the Software without restriction, including without limitation the rights to +// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +// of the Software, and to permit persons to whom the Software is furnished to do +// so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in all +// copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +// SOFTWARE. + +package yaml + +import ( + "bytes" + "fmt" +) + +// Introduction +// ************ +// +// The following notes assume that you are familiar with the YAML specification +// (http://yaml.org/spec/1.2/spec.html). We mostly follow it, although in +// some cases we are less restrictive that it requires. +// +// The process of transforming a YAML stream into a sequence of events is +// divided on two steps: Scanning and Parsing. +// +// The Scanner transforms the input stream into a sequence of tokens, while the +// parser transform the sequence of tokens produced by the Scanner into a +// sequence of parsing events. +// +// The Scanner is rather clever and complicated. The Parser, on the contrary, +// is a straightforward implementation of a recursive-descendant parser (or, +// LL(1) parser, as it is usually called). +// +// Actually there are two issues of Scanning that might be called "clever", the +// rest is quite straightforward. The issues are "block collection start" and +// "simple keys". Both issues are explained below in details. +// +// Here the Scanning step is explained and implemented. We start with the list +// of all the tokens produced by the Scanner together with short descriptions. +// +// Now, tokens: +// +// STREAM-START(encoding) # The stream start. +// STREAM-END # The stream end. +// VERSION-DIRECTIVE(major,minor) # The '%YAML' directive. +// TAG-DIRECTIVE(handle,prefix) # The '%TAG' directive. +// DOCUMENT-START # '---' +// DOCUMENT-END # '...' +// BLOCK-SEQUENCE-START # Indentation increase denoting a block +// BLOCK-MAPPING-START # sequence or a block mapping. +// BLOCK-END # Indentation decrease. +// FLOW-SEQUENCE-START # '[' +// FLOW-SEQUENCE-END # ']' +// BLOCK-SEQUENCE-START # '{' +// BLOCK-SEQUENCE-END # '}' +// BLOCK-ENTRY # '-' +// FLOW-ENTRY # ',' +// KEY # '?' or nothing (simple keys). +// VALUE # ':' +// ALIAS(anchor) # '*anchor' +// ANCHOR(anchor) # '&anchor' +// TAG(handle,suffix) # '!handle!suffix' +// SCALAR(value,style) # A scalar. +// +// The following two tokens are "virtual" tokens denoting the beginning and the +// end of the stream: +// +// STREAM-START(encoding) +// STREAM-END +// +// We pass the information about the input stream encoding with the +// STREAM-START token. +// +// The next two tokens are responsible for tags: +// +// VERSION-DIRECTIVE(major,minor) +// TAG-DIRECTIVE(handle,prefix) +// +// Example: +// +// %YAML 1.1 +// %TAG ! !foo +// %TAG !yaml! tag:yaml.org,2002: +// --- +// +// The correspoding sequence of tokens: +// +// STREAM-START(utf-8) +// VERSION-DIRECTIVE(1,1) +// TAG-DIRECTIVE("!","!foo") +// TAG-DIRECTIVE("!yaml","tag:yaml.org,2002:") +// DOCUMENT-START +// STREAM-END +// +// Note that the VERSION-DIRECTIVE and TAG-DIRECTIVE tokens occupy a whole +// line. +// +// The document start and end indicators are represented by: +// +// DOCUMENT-START +// DOCUMENT-END +// +// Note that if a YAML stream contains an implicit document (without '---' +// and '...' indicators), no DOCUMENT-START and DOCUMENT-END tokens will be +// produced. +// +// In the following examples, we present whole documents together with the +// produced tokens. +// +// 1. An implicit document: +// +// 'a scalar' +// +// Tokens: +// +// STREAM-START(utf-8) +// SCALAR("a scalar",single-quoted) +// STREAM-END +// +// 2. An explicit document: +// +// --- +// 'a scalar' +// ... +// +// Tokens: +// +// STREAM-START(utf-8) +// DOCUMENT-START +// SCALAR("a scalar",single-quoted) +// DOCUMENT-END +// STREAM-END +// +// 3. Several documents in a stream: +// +// 'a scalar' +// --- +// 'another scalar' +// --- +// 'yet another scalar' +// +// Tokens: +// +// STREAM-START(utf-8) +// SCALAR("a scalar",single-quoted) +// DOCUMENT-START +// SCALAR("another scalar",single-quoted) +// DOCUMENT-START +// SCALAR("yet another scalar",single-quoted) +// STREAM-END +// +// We have already introduced the SCALAR token above. The following tokens are +// used to describe aliases, anchors, tag, and scalars: +// +// ALIAS(anchor) +// ANCHOR(anchor) +// TAG(handle,suffix) +// SCALAR(value,style) +// +// The following series of examples illustrate the usage of these tokens: +// +// 1. A recursive sequence: +// +// &A [ *A ] +// +// Tokens: +// +// STREAM-START(utf-8) +// ANCHOR("A") +// FLOW-SEQUENCE-START +// ALIAS("A") +// FLOW-SEQUENCE-END +// STREAM-END +// +// 2. A tagged scalar: +// +// !!float "3.14" # A good approximation. +// +// Tokens: +// +// STREAM-START(utf-8) +// TAG("!!","float") +// SCALAR("3.14",double-quoted) +// STREAM-END +// +// 3. Various scalar styles: +// +// --- # Implicit empty plain scalars do not produce tokens. +// --- a plain scalar +// --- 'a single-quoted scalar' +// --- "a double-quoted scalar" +// --- |- +// a literal scalar +// --- >- +// a folded +// scalar +// +// Tokens: +// +// STREAM-START(utf-8) +// DOCUMENT-START +// DOCUMENT-START +// SCALAR("a plain scalar",plain) +// DOCUMENT-START +// SCALAR("a single-quoted scalar",single-quoted) +// DOCUMENT-START +// SCALAR("a double-quoted scalar",double-quoted) +// DOCUMENT-START +// SCALAR("a literal scalar",literal) +// DOCUMENT-START +// SCALAR("a folded scalar",folded) +// STREAM-END +// +// Now it's time to review collection-related tokens. We will start with +// flow collections: +// +// FLOW-SEQUENCE-START +// FLOW-SEQUENCE-END +// FLOW-MAPPING-START +// FLOW-MAPPING-END +// FLOW-ENTRY +// KEY +// VALUE +// +// The tokens FLOW-SEQUENCE-START, FLOW-SEQUENCE-END, FLOW-MAPPING-START, and +// FLOW-MAPPING-END represent the indicators '[', ']', '{', and '}' +// correspondingly. FLOW-ENTRY represent the ',' indicator. Finally the +// indicators '?' and ':', which are used for denoting mapping keys and values, +// are represented by the KEY and VALUE tokens. +// +// The following examples show flow collections: +// +// 1. A flow sequence: +// +// [item 1, item 2, item 3] +// +// Tokens: +// +// STREAM-START(utf-8) +// FLOW-SEQUENCE-START +// SCALAR("item 1",plain) +// FLOW-ENTRY +// SCALAR("item 2",plain) +// FLOW-ENTRY +// SCALAR("item 3",plain) +// FLOW-SEQUENCE-END +// STREAM-END +// +// 2. A flow mapping: +// +// { +// a simple key: a value, # Note that the KEY token is produced. +// ? a complex key: another value, +// } +// +// Tokens: +// +// STREAM-START(utf-8) +// FLOW-MAPPING-START +// KEY +// SCALAR("a simple key",plain) +// VALUE +// SCALAR("a value",plain) +// FLOW-ENTRY +// KEY +// SCALAR("a complex key",plain) +// VALUE +// SCALAR("another value",plain) +// FLOW-ENTRY +// FLOW-MAPPING-END +// STREAM-END +// +// A simple key is a key which is not denoted by the '?' indicator. Note that +// the Scanner still produce the KEY token whenever it encounters a simple key. +// +// For scanning block collections, the following tokens are used (note that we +// repeat KEY and VALUE here): +// +// BLOCK-SEQUENCE-START +// BLOCK-MAPPING-START +// BLOCK-END +// BLOCK-ENTRY +// KEY +// VALUE +// +// The tokens BLOCK-SEQUENCE-START and BLOCK-MAPPING-START denote indentation +// increase that precedes a block collection (cf. the INDENT token in Python). +// The token BLOCK-END denote indentation decrease that ends a block collection +// (cf. the DEDENT token in Python). However YAML has some syntax pecularities +// that makes detections of these tokens more complex. +// +// The tokens BLOCK-ENTRY, KEY, and VALUE are used to represent the indicators +// '-', '?', and ':' correspondingly. +// +// The following examples show how the tokens BLOCK-SEQUENCE-START, +// BLOCK-MAPPING-START, and BLOCK-END are emitted by the Scanner: +// +// 1. Block sequences: +// +// - item 1 +// - item 2 +// - +// - item 3.1 +// - item 3.2 +// - +// key 1: value 1 +// key 2: value 2 +// +// Tokens: +// +// STREAM-START(utf-8) +// BLOCK-SEQUENCE-START +// BLOCK-ENTRY +// SCALAR("item 1",plain) +// BLOCK-ENTRY +// SCALAR("item 2",plain) +// BLOCK-ENTRY +// BLOCK-SEQUENCE-START +// BLOCK-ENTRY +// SCALAR("item 3.1",plain) +// BLOCK-ENTRY +// SCALAR("item 3.2",plain) +// BLOCK-END +// BLOCK-ENTRY +// BLOCK-MAPPING-START +// KEY +// SCALAR("key 1",plain) +// VALUE +// SCALAR("value 1",plain) +// KEY +// SCALAR("key 2",plain) +// VALUE +// SCALAR("value 2",plain) +// BLOCK-END +// BLOCK-END +// STREAM-END +// +// 2. Block mappings: +// +// a simple key: a value # The KEY token is produced here. +// ? a complex key +// : another value +// a mapping: +// key 1: value 1 +// key 2: value 2 +// a sequence: +// - item 1 +// - item 2 +// +// Tokens: +// +// STREAM-START(utf-8) +// BLOCK-MAPPING-START +// KEY +// SCALAR("a simple key",plain) +// VALUE +// SCALAR("a value",plain) +// KEY +// SCALAR("a complex key",plain) +// VALUE +// SCALAR("another value",plain) +// KEY +// SCALAR("a mapping",plain) +// BLOCK-MAPPING-START +// KEY +// SCALAR("key 1",plain) +// VALUE +// SCALAR("value 1",plain) +// KEY +// SCALAR("key 2",plain) +// VALUE +// SCALAR("value 2",plain) +// BLOCK-END +// KEY +// SCALAR("a sequence",plain) +// VALUE +// BLOCK-SEQUENCE-START +// BLOCK-ENTRY +// SCALAR("item 1",plain) +// BLOCK-ENTRY +// SCALAR("item 2",plain) +// BLOCK-END +// BLOCK-END +// STREAM-END +// +// YAML does not always require to start a new block collection from a new +// line. If the current line contains only '-', '?', and ':' indicators, a new +// block collection may start at the current line. The following examples +// illustrate this case: +// +// 1. Collections in a sequence: +// +// - - item 1 +// - item 2 +// - key 1: value 1 +// key 2: value 2 +// - ? complex key +// : complex value +// +// Tokens: +// +// STREAM-START(utf-8) +// BLOCK-SEQUENCE-START +// BLOCK-ENTRY +// BLOCK-SEQUENCE-START +// BLOCK-ENTRY +// SCALAR("item 1",plain) +// BLOCK-ENTRY +// SCALAR("item 2",plain) +// BLOCK-END +// BLOCK-ENTRY +// BLOCK-MAPPING-START +// KEY +// SCALAR("key 1",plain) +// VALUE +// SCALAR("value 1",plain) +// KEY +// SCALAR("key 2",plain) +// VALUE +// SCALAR("value 2",plain) +// BLOCK-END +// BLOCK-ENTRY +// BLOCK-MAPPING-START +// KEY +// SCALAR("complex key") +// VALUE +// SCALAR("complex value") +// BLOCK-END +// BLOCK-END +// STREAM-END +// +// 2. Collections in a mapping: +// +// ? a sequence +// : - item 1 +// - item 2 +// ? a mapping +// : key 1: value 1 +// key 2: value 2 +// +// Tokens: +// +// STREAM-START(utf-8) +// BLOCK-MAPPING-START +// KEY +// SCALAR("a sequence",plain) +// VALUE +// BLOCK-SEQUENCE-START +// BLOCK-ENTRY +// SCALAR("item 1",plain) +// BLOCK-ENTRY +// SCALAR("item 2",plain) +// BLOCK-END +// KEY +// SCALAR("a mapping",plain) +// VALUE +// BLOCK-MAPPING-START +// KEY +// SCALAR("key 1",plain) +// VALUE +// SCALAR("value 1",plain) +// KEY +// SCALAR("key 2",plain) +// VALUE +// SCALAR("value 2",plain) +// BLOCK-END +// BLOCK-END +// STREAM-END +// +// YAML also permits non-indented sequences if they are included into a block +// mapping. In this case, the token BLOCK-SEQUENCE-START is not produced: +// +// key: +// - item 1 # BLOCK-SEQUENCE-START is NOT produced here. +// - item 2 +// +// Tokens: +// +// STREAM-START(utf-8) +// BLOCK-MAPPING-START +// KEY +// SCALAR("key",plain) +// VALUE +// BLOCK-ENTRY +// SCALAR("item 1",plain) +// BLOCK-ENTRY +// SCALAR("item 2",plain) +// BLOCK-END +// + +// Ensure that the buffer contains the required number of characters. +// Return true on success, false on failure (reader error or memory error). +func cache(parser *yaml_parser_t, length int) bool { + // [Go] This was inlined: !cache(A, B) -> unread < B && !update(A, B) + return parser.unread >= length || yaml_parser_update_buffer(parser, length) +} + +// Advance the buffer pointer. +func skip(parser *yaml_parser_t) { + if !is_blank(parser.buffer, parser.buffer_pos) { + parser.newlines = 0 + } + parser.mark.index++ + parser.mark.column++ + parser.unread-- + parser.buffer_pos += width(parser.buffer[parser.buffer_pos]) +} + +func skip_line(parser *yaml_parser_t) { + if is_crlf(parser.buffer, parser.buffer_pos) { + parser.mark.index += 2 + parser.mark.column = 0 + parser.mark.line++ + parser.unread -= 2 + parser.buffer_pos += 2 + parser.newlines++ + } else if is_break(parser.buffer, parser.buffer_pos) { + parser.mark.index++ + parser.mark.column = 0 + parser.mark.line++ + parser.unread-- + parser.buffer_pos += width(parser.buffer[parser.buffer_pos]) + parser.newlines++ + } +} + +// Copy a character to a string buffer and advance pointers. +func read(parser *yaml_parser_t, s []byte) []byte { + if !is_blank(parser.buffer, parser.buffer_pos) { + parser.newlines = 0 + } + w := width(parser.buffer[parser.buffer_pos]) + if w == 0 { + panic("invalid character sequence") + } + if len(s) == 0 { + s = make([]byte, 0, 32) + } + if w == 1 && len(s)+w <= cap(s) { + s = s[:len(s)+1] + s[len(s)-1] = parser.buffer[parser.buffer_pos] + parser.buffer_pos++ + } else { + s = append(s, parser.buffer[parser.buffer_pos:parser.buffer_pos+w]...) + parser.buffer_pos += w + } + parser.mark.index++ + parser.mark.column++ + parser.unread-- + return s +} + +// Copy a line break character to a string buffer and advance pointers. +func read_line(parser *yaml_parser_t, s []byte) []byte { + buf := parser.buffer + pos := parser.buffer_pos + switch { + case buf[pos] == '\r' && buf[pos+1] == '\n': + // CR LF . LF + s = append(s, '\n') + parser.buffer_pos += 2 + parser.mark.index++ + parser.unread-- + case buf[pos] == '\r' || buf[pos] == '\n': + // CR|LF . LF + s = append(s, '\n') + parser.buffer_pos += 1 + case buf[pos] == '\xC2' && buf[pos+1] == '\x85': + // NEL . LF + s = append(s, '\n') + parser.buffer_pos += 2 + case buf[pos] == '\xE2' && buf[pos+1] == '\x80' && (buf[pos+2] == '\xA8' || buf[pos+2] == '\xA9'): + // LS|PS . LS|PS + s = append(s, buf[parser.buffer_pos:pos+3]...) + parser.buffer_pos += 3 + default: + return s + } + parser.mark.index++ + parser.mark.column = 0 + parser.mark.line++ + parser.unread-- + parser.newlines++ + return s +} + +// Get the next token. +func yaml_parser_scan(parser *yaml_parser_t, token *yaml_token_t) bool { + // Erase the token object. + *token = yaml_token_t{} // [Go] Is this necessary? + + // No tokens after STREAM-END or error. + if parser.stream_end_produced || parser.error != yaml_NO_ERROR { + return true + } + + // Ensure that the tokens queue contains enough tokens. + if !parser.token_available { + if !yaml_parser_fetch_more_tokens(parser) { + return false + } + } + + // Fetch the next token from the queue. + *token = parser.tokens[parser.tokens_head] + parser.tokens_head++ + parser.tokens_parsed++ + parser.token_available = false + + if token.typ == yaml_STREAM_END_TOKEN { + parser.stream_end_produced = true + } + return true +} + +// Set the scanner error and return false. +func yaml_parser_set_scanner_error(parser *yaml_parser_t, context string, context_mark yaml_mark_t, problem string) bool { + parser.error = yaml_SCANNER_ERROR + parser.context = context + parser.context_mark = context_mark + parser.problem = problem + parser.problem_mark = parser.mark + return false +} + +func yaml_parser_set_scanner_tag_error(parser *yaml_parser_t, directive bool, context_mark yaml_mark_t, problem string) bool { + context := "while parsing a tag" + if directive { + context = "while parsing a %TAG directive" + } + return yaml_parser_set_scanner_error(parser, context, context_mark, problem) +} + +func trace(args ...interface{}) func() { + pargs := append([]interface{}{"+++"}, args...) + fmt.Println(pargs...) + pargs = append([]interface{}{"---"}, args...) + return func() { fmt.Println(pargs...) } +} + +// Ensure that the tokens queue contains at least one token which can be +// returned to the Parser. +func yaml_parser_fetch_more_tokens(parser *yaml_parser_t) bool { + // While we need more tokens to fetch, do it. + for { + // [Go] The comment parsing logic requires a lookahead of two tokens + // so that foot comments may be parsed in time of associating them + // with the tokens that are parsed before them, and also for line + // comments to be transformed into head comments in some edge cases. + if parser.tokens_head < len(parser.tokens)-2 { + // If a potential simple key is at the head position, we need to fetch + // the next token to disambiguate it. + head_tok_idx, ok := parser.simple_keys_by_tok[parser.tokens_parsed] + if !ok { + break + } else if valid, ok := yaml_simple_key_is_valid(parser, &parser.simple_keys[head_tok_idx]); !ok { + return false + } else if !valid { + break + } + } + // Fetch the next token. + if !yaml_parser_fetch_next_token(parser) { + return false + } + } + + parser.token_available = true + return true +} + +// The dispatcher for token fetchers. +func yaml_parser_fetch_next_token(parser *yaml_parser_t) (ok bool) { + // Ensure that the buffer is initialized. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + // Check if we just started scanning. Fetch STREAM-START then. + if !parser.stream_start_produced { + return yaml_parser_fetch_stream_start(parser) + } + + scan_mark := parser.mark + + // Eat whitespaces and comments until we reach the next token. + if !yaml_parser_scan_to_next_token(parser) { + return false + } + + // [Go] While unrolling indents, transform the head comments of prior + // indentation levels observed after scan_start into foot comments at + // the respective indexes. + + // Check the indentation level against the current column. + if !yaml_parser_unroll_indent(parser, parser.mark.column, scan_mark) { + return false + } + + // Ensure that the buffer contains at least 4 characters. 4 is the length + // of the longest indicators ('--- ' and '... '). + if parser.unread < 4 && !yaml_parser_update_buffer(parser, 4) { + return false + } + + // Is it the end of the stream? + if is_z(parser.buffer, parser.buffer_pos) { + return yaml_parser_fetch_stream_end(parser) + } + + // Is it a directive? + if parser.mark.column == 0 && parser.buffer[parser.buffer_pos] == '%' { + return yaml_parser_fetch_directive(parser) + } + + buf := parser.buffer + pos := parser.buffer_pos + + // Is it the document start indicator? + if parser.mark.column == 0 && buf[pos] == '-' && buf[pos+1] == '-' && buf[pos+2] == '-' && is_blankz(buf, pos+3) { + return yaml_parser_fetch_document_indicator(parser, yaml_DOCUMENT_START_TOKEN) + } + + // Is it the document end indicator? + if parser.mark.column == 0 && buf[pos] == '.' && buf[pos+1] == '.' && buf[pos+2] == '.' && is_blankz(buf, pos+3) { + return yaml_parser_fetch_document_indicator(parser, yaml_DOCUMENT_END_TOKEN) + } + + comment_mark := parser.mark + if len(parser.tokens) > 0 && (parser.flow_level == 0 && buf[pos] == ':' || parser.flow_level > 0 && buf[pos] == ',') { + // Associate any following comments with the prior token. + comment_mark = parser.tokens[len(parser.tokens)-1].start_mark + } + defer func() { + if !ok { + return + } + if len(parser.tokens) > 0 && parser.tokens[len(parser.tokens)-1].typ == yaml_BLOCK_ENTRY_TOKEN { + // Sequence indicators alone have no line comments. It becomes + // a head comment for whatever follows. + return + } + if !yaml_parser_scan_line_comment(parser, comment_mark) { + ok = false + return + } + }() + + // Is it the flow sequence start indicator? + if buf[pos] == '[' { + return yaml_parser_fetch_flow_collection_start(parser, yaml_FLOW_SEQUENCE_START_TOKEN) + } + + // Is it the flow mapping start indicator? + if parser.buffer[parser.buffer_pos] == '{' { + return yaml_parser_fetch_flow_collection_start(parser, yaml_FLOW_MAPPING_START_TOKEN) + } + + // Is it the flow sequence end indicator? + if parser.buffer[parser.buffer_pos] == ']' { + return yaml_parser_fetch_flow_collection_end(parser, + yaml_FLOW_SEQUENCE_END_TOKEN) + } + + // Is it the flow mapping end indicator? + if parser.buffer[parser.buffer_pos] == '}' { + return yaml_parser_fetch_flow_collection_end(parser, + yaml_FLOW_MAPPING_END_TOKEN) + } + + // Is it the flow entry indicator? + if parser.buffer[parser.buffer_pos] == ',' { + return yaml_parser_fetch_flow_entry(parser) + } + + // Is it the block entry indicator? + if parser.buffer[parser.buffer_pos] == '-' && is_blankz(parser.buffer, parser.buffer_pos+1) { + return yaml_parser_fetch_block_entry(parser) + } + + // Is it the key indicator? + if parser.buffer[parser.buffer_pos] == '?' && (parser.flow_level > 0 || is_blankz(parser.buffer, parser.buffer_pos+1)) { + return yaml_parser_fetch_key(parser) + } + + // Is it the value indicator? + if parser.buffer[parser.buffer_pos] == ':' && (parser.flow_level > 0 || is_blankz(parser.buffer, parser.buffer_pos+1)) { + return yaml_parser_fetch_value(parser) + } + + // Is it an alias? + if parser.buffer[parser.buffer_pos] == '*' { + return yaml_parser_fetch_anchor(parser, yaml_ALIAS_TOKEN) + } + + // Is it an anchor? + if parser.buffer[parser.buffer_pos] == '&' { + return yaml_parser_fetch_anchor(parser, yaml_ANCHOR_TOKEN) + } + + // Is it a tag? + if parser.buffer[parser.buffer_pos] == '!' { + return yaml_parser_fetch_tag(parser) + } + + // Is it a literal scalar? + if parser.buffer[parser.buffer_pos] == '|' && parser.flow_level == 0 { + return yaml_parser_fetch_block_scalar(parser, true) + } + + // Is it a folded scalar? + if parser.buffer[parser.buffer_pos] == '>' && parser.flow_level == 0 { + return yaml_parser_fetch_block_scalar(parser, false) + } + + // Is it a single-quoted scalar? + if parser.buffer[parser.buffer_pos] == '\'' { + return yaml_parser_fetch_flow_scalar(parser, true) + } + + // Is it a double-quoted scalar? + if parser.buffer[parser.buffer_pos] == '"' { + return yaml_parser_fetch_flow_scalar(parser, false) + } + + // Is it a plain scalar? + // + // A plain scalar may start with any non-blank characters except + // + // '-', '?', ':', ',', '[', ']', '{', '}', + // '#', '&', '*', '!', '|', '>', '\'', '\"', + // '%', '@', '`'. + // + // In the block context (and, for the '-' indicator, in the flow context + // too), it may also start with the characters + // + // '-', '?', ':' + // + // if it is followed by a non-space character. + // + // The last rule is more restrictive than the specification requires. + // [Go] TODO Make this logic more reasonable. + //switch parser.buffer[parser.buffer_pos] { + //case '-', '?', ':', ',', '?', '-', ',', ':', ']', '[', '}', '{', '&', '#', '!', '*', '>', '|', '"', '\'', '@', '%', '-', '`': + //} + if !(is_blankz(parser.buffer, parser.buffer_pos) || parser.buffer[parser.buffer_pos] == '-' || + parser.buffer[parser.buffer_pos] == '?' || parser.buffer[parser.buffer_pos] == ':' || + parser.buffer[parser.buffer_pos] == ',' || parser.buffer[parser.buffer_pos] == '[' || + parser.buffer[parser.buffer_pos] == ']' || parser.buffer[parser.buffer_pos] == '{' || + parser.buffer[parser.buffer_pos] == '}' || parser.buffer[parser.buffer_pos] == '#' || + parser.buffer[parser.buffer_pos] == '&' || parser.buffer[parser.buffer_pos] == '*' || + parser.buffer[parser.buffer_pos] == '!' || parser.buffer[parser.buffer_pos] == '|' || + parser.buffer[parser.buffer_pos] == '>' || parser.buffer[parser.buffer_pos] == '\'' || + parser.buffer[parser.buffer_pos] == '"' || parser.buffer[parser.buffer_pos] == '%' || + parser.buffer[parser.buffer_pos] == '@' || parser.buffer[parser.buffer_pos] == '`') || + (parser.buffer[parser.buffer_pos] == '-' && !is_blank(parser.buffer, parser.buffer_pos+1)) || + (parser.flow_level == 0 && + (parser.buffer[parser.buffer_pos] == '?' || parser.buffer[parser.buffer_pos] == ':') && + !is_blankz(parser.buffer, parser.buffer_pos+1)) { + return yaml_parser_fetch_plain_scalar(parser) + } + + // If we don't determine the token type so far, it is an error. + return yaml_parser_set_scanner_error(parser, + "while scanning for the next token", parser.mark, + "found character that cannot start any token") +} + +func yaml_simple_key_is_valid(parser *yaml_parser_t, simple_key *yaml_simple_key_t) (valid, ok bool) { + if !simple_key.possible { + return false, true + } + + // The 1.2 specification says: + // + // "If the ? indicator is omitted, parsing needs to see past the + // implicit key to recognize it as such. To limit the amount of + // lookahead required, the “:” indicator must appear at most 1024 + // Unicode characters beyond the start of the key. In addition, the key + // is restricted to a single line." + // + if simple_key.mark.line < parser.mark.line || simple_key.mark.index+1024 < parser.mark.index { + // Check if the potential simple key to be removed is required. + if simple_key.required { + return false, yaml_parser_set_scanner_error(parser, + "while scanning a simple key", simple_key.mark, + "could not find expected ':'") + } + simple_key.possible = false + return false, true + } + return true, true +} + +// Check if a simple key may start at the current position and add it if +// needed. +func yaml_parser_save_simple_key(parser *yaml_parser_t) bool { + // A simple key is required at the current position if the scanner is in + // the block context and the current column coincides with the indentation + // level. + + required := parser.flow_level == 0 && parser.indent == parser.mark.column + + // + // If the current position may start a simple key, save it. + // + if parser.simple_key_allowed { + simple_key := yaml_simple_key_t{ + possible: true, + required: required, + token_number: parser.tokens_parsed + (len(parser.tokens) - parser.tokens_head), + mark: parser.mark, + } + + if !yaml_parser_remove_simple_key(parser) { + return false + } + parser.simple_keys[len(parser.simple_keys)-1] = simple_key + parser.simple_keys_by_tok[simple_key.token_number] = len(parser.simple_keys) - 1 + } + return true +} + +// Remove a potential simple key at the current flow level. +func yaml_parser_remove_simple_key(parser *yaml_parser_t) bool { + i := len(parser.simple_keys) - 1 + if parser.simple_keys[i].possible { + // If the key is required, it is an error. + if parser.simple_keys[i].required { + return yaml_parser_set_scanner_error(parser, + "while scanning a simple key", parser.simple_keys[i].mark, + "could not find expected ':'") + } + // Remove the key from the stack. + parser.simple_keys[i].possible = false + delete(parser.simple_keys_by_tok, parser.simple_keys[i].token_number) + } + return true +} + +// max_flow_level limits the flow_level +const max_flow_level = 10000 + +// Increase the flow level and resize the simple key list if needed. +func yaml_parser_increase_flow_level(parser *yaml_parser_t) bool { + // Reset the simple key on the next level. + parser.simple_keys = append(parser.simple_keys, yaml_simple_key_t{ + possible: false, + required: false, + token_number: parser.tokens_parsed + (len(parser.tokens) - parser.tokens_head), + mark: parser.mark, + }) + + // Increase the flow level. + parser.flow_level++ + if parser.flow_level > max_flow_level { + return yaml_parser_set_scanner_error(parser, + "while increasing flow level", parser.simple_keys[len(parser.simple_keys)-1].mark, + fmt.Sprintf("exceeded max depth of %d", max_flow_level)) + } + return true +} + +// Decrease the flow level. +func yaml_parser_decrease_flow_level(parser *yaml_parser_t) bool { + if parser.flow_level > 0 { + parser.flow_level-- + last := len(parser.simple_keys) - 1 + delete(parser.simple_keys_by_tok, parser.simple_keys[last].token_number) + parser.simple_keys = parser.simple_keys[:last] + } + return true +} + +// max_indents limits the indents stack size +const max_indents = 10000 + +// Push the current indentation level to the stack and set the new level +// the current column is greater than the indentation level. In this case, +// append or insert the specified token into the token queue. +func yaml_parser_roll_indent(parser *yaml_parser_t, column, number int, typ yaml_token_type_t, mark yaml_mark_t) bool { + // In the flow context, do nothing. + if parser.flow_level > 0 { + return true + } + + if parser.indent < column { + // Push the current indentation level to the stack and set the new + // indentation level. + parser.indents = append(parser.indents, parser.indent) + parser.indent = column + if len(parser.indents) > max_indents { + return yaml_parser_set_scanner_error(parser, + "while increasing indent level", parser.simple_keys[len(parser.simple_keys)-1].mark, + fmt.Sprintf("exceeded max depth of %d", max_indents)) + } + + // Create a token and insert it into the queue. + token := yaml_token_t{ + typ: typ, + start_mark: mark, + end_mark: mark, + } + if number > -1 { + number -= parser.tokens_parsed + } + yaml_insert_token(parser, number, &token) + } + return true +} + +// Pop indentation levels from the indents stack until the current level +// becomes less or equal to the column. For each indentation level, append +// the BLOCK-END token. +func yaml_parser_unroll_indent(parser *yaml_parser_t, column int, scan_mark yaml_mark_t) bool { + // In the flow context, do nothing. + if parser.flow_level > 0 { + return true + } + + block_mark := scan_mark + block_mark.index-- + + // Loop through the indentation levels in the stack. + for parser.indent > column { + + // [Go] Reposition the end token before potential following + // foot comments of parent blocks. For that, search + // backwards for recent comments that were at the same + // indent as the block that is ending now. + stop_index := block_mark.index + for i := len(parser.comments) - 1; i >= 0; i-- { + comment := &parser.comments[i] + + if comment.end_mark.index < stop_index { + // Don't go back beyond the start of the comment/whitespace scan, unless column < 0. + // If requested indent column is < 0, then the document is over and everything else + // is a foot anyway. + break + } + if comment.start_mark.column == parser.indent+1 { + // This is a good match. But maybe there's a former comment + // at that same indent level, so keep searching. + block_mark = comment.start_mark + } + + // While the end of the former comment matches with + // the start of the following one, we know there's + // nothing in between and scanning is still safe. + stop_index = comment.scan_mark.index + } + + // Create a token and append it to the queue. + token := yaml_token_t{ + typ: yaml_BLOCK_END_TOKEN, + start_mark: block_mark, + end_mark: block_mark, + } + yaml_insert_token(parser, -1, &token) + + // Pop the indentation level. + parser.indent = parser.indents[len(parser.indents)-1] + parser.indents = parser.indents[:len(parser.indents)-1] + } + return true +} + +// Initialize the scanner and produce the STREAM-START token. +func yaml_parser_fetch_stream_start(parser *yaml_parser_t) bool { + + // Set the initial indentation. + parser.indent = -1 + + // Initialize the simple key stack. + parser.simple_keys = append(parser.simple_keys, yaml_simple_key_t{}) + + parser.simple_keys_by_tok = make(map[int]int) + + // A simple key is allowed at the beginning of the stream. + parser.simple_key_allowed = true + + // We have started. + parser.stream_start_produced = true + + // Create the STREAM-START token and append it to the queue. + token := yaml_token_t{ + typ: yaml_STREAM_START_TOKEN, + start_mark: parser.mark, + end_mark: parser.mark, + encoding: parser.encoding, + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the STREAM-END token and shut down the scanner. +func yaml_parser_fetch_stream_end(parser *yaml_parser_t) bool { + + // Force new line. + if parser.mark.column != 0 { + parser.mark.column = 0 + parser.mark.line++ + } + + // Reset the indentation level. + if !yaml_parser_unroll_indent(parser, -1, parser.mark) { + return false + } + + // Reset simple keys. + if !yaml_parser_remove_simple_key(parser) { + return false + } + + parser.simple_key_allowed = false + + // Create the STREAM-END token and append it to the queue. + token := yaml_token_t{ + typ: yaml_STREAM_END_TOKEN, + start_mark: parser.mark, + end_mark: parser.mark, + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce a VERSION-DIRECTIVE or TAG-DIRECTIVE token. +func yaml_parser_fetch_directive(parser *yaml_parser_t) bool { + // Reset the indentation level. + if !yaml_parser_unroll_indent(parser, -1, parser.mark) { + return false + } + + // Reset simple keys. + if !yaml_parser_remove_simple_key(parser) { + return false + } + + parser.simple_key_allowed = false + + // Create the YAML-DIRECTIVE or TAG-DIRECTIVE token. + token := yaml_token_t{} + if !yaml_parser_scan_directive(parser, &token) { + return false + } + // Append the token to the queue. + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the DOCUMENT-START or DOCUMENT-END token. +func yaml_parser_fetch_document_indicator(parser *yaml_parser_t, typ yaml_token_type_t) bool { + // Reset the indentation level. + if !yaml_parser_unroll_indent(parser, -1, parser.mark) { + return false + } + + // Reset simple keys. + if !yaml_parser_remove_simple_key(parser) { + return false + } + + parser.simple_key_allowed = false + + // Consume the token. + start_mark := parser.mark + + skip(parser) + skip(parser) + skip(parser) + + end_mark := parser.mark + + // Create the DOCUMENT-START or DOCUMENT-END token. + token := yaml_token_t{ + typ: typ, + start_mark: start_mark, + end_mark: end_mark, + } + // Append the token to the queue. + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the FLOW-SEQUENCE-START or FLOW-MAPPING-START token. +func yaml_parser_fetch_flow_collection_start(parser *yaml_parser_t, typ yaml_token_type_t) bool { + + // The indicators '[' and '{' may start a simple key. + if !yaml_parser_save_simple_key(parser) { + return false + } + + // Increase the flow level. + if !yaml_parser_increase_flow_level(parser) { + return false + } + + // A simple key may follow the indicators '[' and '{'. + parser.simple_key_allowed = true + + // Consume the token. + start_mark := parser.mark + skip(parser) + end_mark := parser.mark + + // Create the FLOW-SEQUENCE-START of FLOW-MAPPING-START token. + token := yaml_token_t{ + typ: typ, + start_mark: start_mark, + end_mark: end_mark, + } + // Append the token to the queue. + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the FLOW-SEQUENCE-END or FLOW-MAPPING-END token. +func yaml_parser_fetch_flow_collection_end(parser *yaml_parser_t, typ yaml_token_type_t) bool { + // Reset any potential simple key on the current flow level. + if !yaml_parser_remove_simple_key(parser) { + return false + } + + // Decrease the flow level. + if !yaml_parser_decrease_flow_level(parser) { + return false + } + + // No simple keys after the indicators ']' and '}'. + parser.simple_key_allowed = false + + // Consume the token. + + start_mark := parser.mark + skip(parser) + end_mark := parser.mark + + // Create the FLOW-SEQUENCE-END of FLOW-MAPPING-END token. + token := yaml_token_t{ + typ: typ, + start_mark: start_mark, + end_mark: end_mark, + } + // Append the token to the queue. + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the FLOW-ENTRY token. +func yaml_parser_fetch_flow_entry(parser *yaml_parser_t) bool { + // Reset any potential simple keys on the current flow level. + if !yaml_parser_remove_simple_key(parser) { + return false + } + + // Simple keys are allowed after ','. + parser.simple_key_allowed = true + + // Consume the token. + start_mark := parser.mark + skip(parser) + end_mark := parser.mark + + // Create the FLOW-ENTRY token and append it to the queue. + token := yaml_token_t{ + typ: yaml_FLOW_ENTRY_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the BLOCK-ENTRY token. +func yaml_parser_fetch_block_entry(parser *yaml_parser_t) bool { + // Check if the scanner is in the block context. + if parser.flow_level == 0 { + // Check if we are allowed to start a new entry. + if !parser.simple_key_allowed { + return yaml_parser_set_scanner_error(parser, "", parser.mark, + "block sequence entries are not allowed in this context") + } + // Add the BLOCK-SEQUENCE-START token if needed. + if !yaml_parser_roll_indent(parser, parser.mark.column, -1, yaml_BLOCK_SEQUENCE_START_TOKEN, parser.mark) { + return false + } + } else { + // It is an error for the '-' indicator to occur in the flow context, + // but we let the Parser detect and report about it because the Parser + // is able to point to the context. + } + + // Reset any potential simple keys on the current flow level. + if !yaml_parser_remove_simple_key(parser) { + return false + } + + // Simple keys are allowed after '-'. + parser.simple_key_allowed = true + + // Consume the token. + start_mark := parser.mark + skip(parser) + end_mark := parser.mark + + // Create the BLOCK-ENTRY token and append it to the queue. + token := yaml_token_t{ + typ: yaml_BLOCK_ENTRY_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the KEY token. +func yaml_parser_fetch_key(parser *yaml_parser_t) bool { + + // In the block context, additional checks are required. + if parser.flow_level == 0 { + // Check if we are allowed to start a new key (not nessesary simple). + if !parser.simple_key_allowed { + return yaml_parser_set_scanner_error(parser, "", parser.mark, + "mapping keys are not allowed in this context") + } + // Add the BLOCK-MAPPING-START token if needed. + if !yaml_parser_roll_indent(parser, parser.mark.column, -1, yaml_BLOCK_MAPPING_START_TOKEN, parser.mark) { + return false + } + } + + // Reset any potential simple keys on the current flow level. + if !yaml_parser_remove_simple_key(parser) { + return false + } + + // Simple keys are allowed after '?' in the block context. + parser.simple_key_allowed = parser.flow_level == 0 + + // Consume the token. + start_mark := parser.mark + skip(parser) + end_mark := parser.mark + + // Create the KEY token and append it to the queue. + token := yaml_token_t{ + typ: yaml_KEY_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the VALUE token. +func yaml_parser_fetch_value(parser *yaml_parser_t) bool { + + simple_key := &parser.simple_keys[len(parser.simple_keys)-1] + + // Have we found a simple key? + if valid, ok := yaml_simple_key_is_valid(parser, simple_key); !ok { + return false + + } else if valid { + + // Create the KEY token and insert it into the queue. + token := yaml_token_t{ + typ: yaml_KEY_TOKEN, + start_mark: simple_key.mark, + end_mark: simple_key.mark, + } + yaml_insert_token(parser, simple_key.token_number-parser.tokens_parsed, &token) + + // In the block context, we may need to add the BLOCK-MAPPING-START token. + if !yaml_parser_roll_indent(parser, simple_key.mark.column, + simple_key.token_number, + yaml_BLOCK_MAPPING_START_TOKEN, simple_key.mark) { + return false + } + + // Remove the simple key. + simple_key.possible = false + delete(parser.simple_keys_by_tok, simple_key.token_number) + + // A simple key cannot follow another simple key. + parser.simple_key_allowed = false + + } else { + // The ':' indicator follows a complex key. + + // In the block context, extra checks are required. + if parser.flow_level == 0 { + + // Check if we are allowed to start a complex value. + if !parser.simple_key_allowed { + return yaml_parser_set_scanner_error(parser, "", parser.mark, + "mapping values are not allowed in this context") + } + + // Add the BLOCK-MAPPING-START token if needed. + if !yaml_parser_roll_indent(parser, parser.mark.column, -1, yaml_BLOCK_MAPPING_START_TOKEN, parser.mark) { + return false + } + } + + // Simple keys after ':' are allowed in the block context. + parser.simple_key_allowed = parser.flow_level == 0 + } + + // Consume the token. + start_mark := parser.mark + skip(parser) + end_mark := parser.mark + + // Create the VALUE token and append it to the queue. + token := yaml_token_t{ + typ: yaml_VALUE_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the ALIAS or ANCHOR token. +func yaml_parser_fetch_anchor(parser *yaml_parser_t, typ yaml_token_type_t) bool { + // An anchor or an alias could be a simple key. + if !yaml_parser_save_simple_key(parser) { + return false + } + + // A simple key cannot follow an anchor or an alias. + parser.simple_key_allowed = false + + // Create the ALIAS or ANCHOR token and append it to the queue. + var token yaml_token_t + if !yaml_parser_scan_anchor(parser, &token, typ) { + return false + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the TAG token. +func yaml_parser_fetch_tag(parser *yaml_parser_t) bool { + // A tag could be a simple key. + if !yaml_parser_save_simple_key(parser) { + return false + } + + // A simple key cannot follow a tag. + parser.simple_key_allowed = false + + // Create the TAG token and append it to the queue. + var token yaml_token_t + if !yaml_parser_scan_tag(parser, &token) { + return false + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the SCALAR(...,literal) or SCALAR(...,folded) tokens. +func yaml_parser_fetch_block_scalar(parser *yaml_parser_t, literal bool) bool { + // Remove any potential simple keys. + if !yaml_parser_remove_simple_key(parser) { + return false + } + + // A simple key may follow a block scalar. + parser.simple_key_allowed = true + + // Create the SCALAR token and append it to the queue. + var token yaml_token_t + if !yaml_parser_scan_block_scalar(parser, &token, literal) { + return false + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the SCALAR(...,single-quoted) or SCALAR(...,double-quoted) tokens. +func yaml_parser_fetch_flow_scalar(parser *yaml_parser_t, single bool) bool { + // A plain scalar could be a simple key. + if !yaml_parser_save_simple_key(parser) { + return false + } + + // A simple key cannot follow a flow scalar. + parser.simple_key_allowed = false + + // Create the SCALAR token and append it to the queue. + var token yaml_token_t + if !yaml_parser_scan_flow_scalar(parser, &token, single) { + return false + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the SCALAR(...,plain) token. +func yaml_parser_fetch_plain_scalar(parser *yaml_parser_t) bool { + // A plain scalar could be a simple key. + if !yaml_parser_save_simple_key(parser) { + return false + } + + // A simple key cannot follow a flow scalar. + parser.simple_key_allowed = false + + // Create the SCALAR token and append it to the queue. + var token yaml_token_t + if !yaml_parser_scan_plain_scalar(parser, &token) { + return false + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Eat whitespaces and comments until the next token is found. +func yaml_parser_scan_to_next_token(parser *yaml_parser_t) bool { + + scan_mark := parser.mark + + // Until the next token is not found. + for { + // Allow the BOM mark to start a line. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + if parser.mark.column == 0 && is_bom(parser.buffer, parser.buffer_pos) { + skip(parser) + } + + // Eat whitespaces. + // Tabs are allowed: + // - in the flow context + // - in the block context, but not at the beginning of the line or + // after '-', '?', or ':' (complex value). + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + for parser.buffer[parser.buffer_pos] == ' ' || ((parser.flow_level > 0 || !parser.simple_key_allowed) && parser.buffer[parser.buffer_pos] == '\t') { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Check if we just had a line comment under a sequence entry that + // looks more like a header to the following content. Similar to this: + // + // - # The comment + // - Some data + // + // If so, transform the line comment to a head comment and reposition. + if len(parser.comments) > 0 && len(parser.tokens) > 1 { + tokenA := parser.tokens[len(parser.tokens)-2] + tokenB := parser.tokens[len(parser.tokens)-1] + comment := &parser.comments[len(parser.comments)-1] + if tokenA.typ == yaml_BLOCK_SEQUENCE_START_TOKEN && tokenB.typ == yaml_BLOCK_ENTRY_TOKEN && len(comment.line) > 0 && !is_break(parser.buffer, parser.buffer_pos) { + // If it was in the prior line, reposition so it becomes a + // header of the follow up token. Otherwise, keep it in place + // so it becomes a header of the former. + comment.head = comment.line + comment.line = nil + if comment.start_mark.line == parser.mark.line-1 { + comment.token_mark = parser.mark + } + } + } + + // Eat a comment until a line break. + if parser.buffer[parser.buffer_pos] == '#' { + if !yaml_parser_scan_comments(parser, scan_mark) { + return false + } + } + + // If it is a line break, eat it. + if is_break(parser.buffer, parser.buffer_pos) { + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + skip_line(parser) + + // In the block context, a new line may start a simple key. + if parser.flow_level == 0 { + parser.simple_key_allowed = true + } + } else { + break // We have found a token. + } + } + + return true +} + +// Scan a YAML-DIRECTIVE or TAG-DIRECTIVE token. +// +// Scope: +// %YAML 1.1 # a comment \n +// ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +// %TAG !yaml! tag:yaml.org,2002: \n +// ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +// +func yaml_parser_scan_directive(parser *yaml_parser_t, token *yaml_token_t) bool { + // Eat '%'. + start_mark := parser.mark + skip(parser) + + // Scan the directive name. + var name []byte + if !yaml_parser_scan_directive_name(parser, start_mark, &name) { + return false + } + + // Is it a YAML directive? + if bytes.Equal(name, []byte("YAML")) { + // Scan the VERSION directive value. + var major, minor int8 + if !yaml_parser_scan_version_directive_value(parser, start_mark, &major, &minor) { + return false + } + end_mark := parser.mark + + // Create a VERSION-DIRECTIVE token. + *token = yaml_token_t{ + typ: yaml_VERSION_DIRECTIVE_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + major: major, + minor: minor, + } + + // Is it a TAG directive? + } else if bytes.Equal(name, []byte("TAG")) { + // Scan the TAG directive value. + var handle, prefix []byte + if !yaml_parser_scan_tag_directive_value(parser, start_mark, &handle, &prefix) { + return false + } + end_mark := parser.mark + + // Create a TAG-DIRECTIVE token. + *token = yaml_token_t{ + typ: yaml_TAG_DIRECTIVE_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + value: handle, + prefix: prefix, + } + + // Unknown directive. + } else { + yaml_parser_set_scanner_error(parser, "while scanning a directive", + start_mark, "found unknown directive name") + return false + } + + // Eat the rest of the line including any comments. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + for is_blank(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + if parser.buffer[parser.buffer_pos] == '#' { + // [Go] Discard this inline comment for the time being. + //if !yaml_parser_scan_line_comment(parser, start_mark) { + // return false + //} + for !is_breakz(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + } + + // Check if we are at the end of the line. + if !is_breakz(parser.buffer, parser.buffer_pos) { + yaml_parser_set_scanner_error(parser, "while scanning a directive", + start_mark, "did not find expected comment or line break") + return false + } + + // Eat a line break. + if is_break(parser.buffer, parser.buffer_pos) { + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + skip_line(parser) + } + + return true +} + +// Scan the directive name. +// +// Scope: +// %YAML 1.1 # a comment \n +// ^^^^ +// %TAG !yaml! tag:yaml.org,2002: \n +// ^^^ +// +func yaml_parser_scan_directive_name(parser *yaml_parser_t, start_mark yaml_mark_t, name *[]byte) bool { + // Consume the directive name. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + var s []byte + for is_alpha(parser.buffer, parser.buffer_pos) { + s = read(parser, s) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Check if the name is empty. + if len(s) == 0 { + yaml_parser_set_scanner_error(parser, "while scanning a directive", + start_mark, "could not find expected directive name") + return false + } + + // Check for an blank character after the name. + if !is_blankz(parser.buffer, parser.buffer_pos) { + yaml_parser_set_scanner_error(parser, "while scanning a directive", + start_mark, "found unexpected non-alphabetical character") + return false + } + *name = s + return true +} + +// Scan the value of VERSION-DIRECTIVE. +// +// Scope: +// %YAML 1.1 # a comment \n +// ^^^^^^ +func yaml_parser_scan_version_directive_value(parser *yaml_parser_t, start_mark yaml_mark_t, major, minor *int8) bool { + // Eat whitespaces. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + for is_blank(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Consume the major version number. + if !yaml_parser_scan_version_directive_number(parser, start_mark, major) { + return false + } + + // Eat '.'. + if parser.buffer[parser.buffer_pos] != '.' { + return yaml_parser_set_scanner_error(parser, "while scanning a %YAML directive", + start_mark, "did not find expected digit or '.' character") + } + + skip(parser) + + // Consume the minor version number. + if !yaml_parser_scan_version_directive_number(parser, start_mark, minor) { + return false + } + return true +} + +const max_number_length = 2 + +// Scan the version number of VERSION-DIRECTIVE. +// +// Scope: +// %YAML 1.1 # a comment \n +// ^ +// %YAML 1.1 # a comment \n +// ^ +func yaml_parser_scan_version_directive_number(parser *yaml_parser_t, start_mark yaml_mark_t, number *int8) bool { + + // Repeat while the next character is digit. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + var value, length int8 + for is_digit(parser.buffer, parser.buffer_pos) { + // Check if the number is too long. + length++ + if length > max_number_length { + return yaml_parser_set_scanner_error(parser, "while scanning a %YAML directive", + start_mark, "found extremely long version number") + } + value = value*10 + int8(as_digit(parser.buffer, parser.buffer_pos)) + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Check if the number was present. + if length == 0 { + return yaml_parser_set_scanner_error(parser, "while scanning a %YAML directive", + start_mark, "did not find expected version number") + } + *number = value + return true +} + +// Scan the value of a TAG-DIRECTIVE token. +// +// Scope: +// %TAG !yaml! tag:yaml.org,2002: \n +// ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +// +func yaml_parser_scan_tag_directive_value(parser *yaml_parser_t, start_mark yaml_mark_t, handle, prefix *[]byte) bool { + var handle_value, prefix_value []byte + + // Eat whitespaces. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + for is_blank(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Scan a handle. + if !yaml_parser_scan_tag_handle(parser, true, start_mark, &handle_value) { + return false + } + + // Expect a whitespace. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + if !is_blank(parser.buffer, parser.buffer_pos) { + yaml_parser_set_scanner_error(parser, "while scanning a %TAG directive", + start_mark, "did not find expected whitespace") + return false + } + + // Eat whitespaces. + for is_blank(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Scan a prefix. + if !yaml_parser_scan_tag_uri(parser, true, nil, start_mark, &prefix_value) { + return false + } + + // Expect a whitespace or line break. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + if !is_blankz(parser.buffer, parser.buffer_pos) { + yaml_parser_set_scanner_error(parser, "while scanning a %TAG directive", + start_mark, "did not find expected whitespace or line break") + return false + } + + *handle = handle_value + *prefix = prefix_value + return true +} + +func yaml_parser_scan_anchor(parser *yaml_parser_t, token *yaml_token_t, typ yaml_token_type_t) bool { + var s []byte + + // Eat the indicator character. + start_mark := parser.mark + skip(parser) + + // Consume the value. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + for is_alpha(parser.buffer, parser.buffer_pos) { + s = read(parser, s) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + end_mark := parser.mark + + /* + * Check if length of the anchor is greater than 0 and it is followed by + * a whitespace character or one of the indicators: + * + * '?', ':', ',', ']', '}', '%', '@', '`'. + */ + + if len(s) == 0 || + !(is_blankz(parser.buffer, parser.buffer_pos) || parser.buffer[parser.buffer_pos] == '?' || + parser.buffer[parser.buffer_pos] == ':' || parser.buffer[parser.buffer_pos] == ',' || + parser.buffer[parser.buffer_pos] == ']' || parser.buffer[parser.buffer_pos] == '}' || + parser.buffer[parser.buffer_pos] == '%' || parser.buffer[parser.buffer_pos] == '@' || + parser.buffer[parser.buffer_pos] == '`') { + context := "while scanning an alias" + if typ == yaml_ANCHOR_TOKEN { + context = "while scanning an anchor" + } + yaml_parser_set_scanner_error(parser, context, start_mark, + "did not find expected alphabetic or numeric character") + return false + } + + // Create a token. + *token = yaml_token_t{ + typ: typ, + start_mark: start_mark, + end_mark: end_mark, + value: s, + } + + return true +} + +/* + * Scan a TAG token. + */ + +func yaml_parser_scan_tag(parser *yaml_parser_t, token *yaml_token_t) bool { + var handle, suffix []byte + + start_mark := parser.mark + + // Check if the tag is in the canonical form. + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + + if parser.buffer[parser.buffer_pos+1] == '<' { + // Keep the handle as '' + + // Eat '!<' + skip(parser) + skip(parser) + + // Consume the tag value. + if !yaml_parser_scan_tag_uri(parser, false, nil, start_mark, &suffix) { + return false + } + + // Check for '>' and eat it. + if parser.buffer[parser.buffer_pos] != '>' { + yaml_parser_set_scanner_error(parser, "while scanning a tag", + start_mark, "did not find the expected '>'") + return false + } + + skip(parser) + } else { + // The tag has either the '!suffix' or the '!handle!suffix' form. + + // First, try to scan a handle. + if !yaml_parser_scan_tag_handle(parser, false, start_mark, &handle) { + return false + } + + // Check if it is, indeed, handle. + if handle[0] == '!' && len(handle) > 1 && handle[len(handle)-1] == '!' { + // Scan the suffix now. + if !yaml_parser_scan_tag_uri(parser, false, nil, start_mark, &suffix) { + return false + } + } else { + // It wasn't a handle after all. Scan the rest of the tag. + if !yaml_parser_scan_tag_uri(parser, false, handle, start_mark, &suffix) { + return false + } + + // Set the handle to '!'. + handle = []byte{'!'} + + // A special case: the '!' tag. Set the handle to '' and the + // suffix to '!'. + if len(suffix) == 0 { + handle, suffix = suffix, handle + } + } + } + + // Check the character which ends the tag. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + if !is_blankz(parser.buffer, parser.buffer_pos) { + yaml_parser_set_scanner_error(parser, "while scanning a tag", + start_mark, "did not find expected whitespace or line break") + return false + } + + end_mark := parser.mark + + // Create a token. + *token = yaml_token_t{ + typ: yaml_TAG_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + value: handle, + suffix: suffix, + } + return true +} + +// Scan a tag handle. +func yaml_parser_scan_tag_handle(parser *yaml_parser_t, directive bool, start_mark yaml_mark_t, handle *[]byte) bool { + // Check the initial '!' character. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + if parser.buffer[parser.buffer_pos] != '!' { + yaml_parser_set_scanner_tag_error(parser, directive, + start_mark, "did not find expected '!'") + return false + } + + var s []byte + + // Copy the '!' character. + s = read(parser, s) + + // Copy all subsequent alphabetical and numerical characters. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + for is_alpha(parser.buffer, parser.buffer_pos) { + s = read(parser, s) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Check if the trailing character is '!' and copy it. + if parser.buffer[parser.buffer_pos] == '!' { + s = read(parser, s) + } else { + // It's either the '!' tag or not really a tag handle. If it's a %TAG + // directive, it's an error. If it's a tag token, it must be a part of URI. + if directive && string(s) != "!" { + yaml_parser_set_scanner_tag_error(parser, directive, + start_mark, "did not find expected '!'") + return false + } + } + + *handle = s + return true +} + +// Scan a tag. +func yaml_parser_scan_tag_uri(parser *yaml_parser_t, directive bool, head []byte, start_mark yaml_mark_t, uri *[]byte) bool { + //size_t length = head ? strlen((char *)head) : 0 + var s []byte + hasTag := len(head) > 0 + + // Copy the head if needed. + // + // Note that we don't copy the leading '!' character. + if len(head) > 1 { + s = append(s, head[1:]...) + } + + // Scan the tag. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + // The set of characters that may appear in URI is as follows: + // + // '0'-'9', 'A'-'Z', 'a'-'z', '_', '-', ';', '/', '?', ':', '@', '&', + // '=', '+', '$', ',', '.', '!', '~', '*', '\'', '(', ')', '[', ']', + // '%'. + // [Go] TODO Convert this into more reasonable logic. + for is_alpha(parser.buffer, parser.buffer_pos) || parser.buffer[parser.buffer_pos] == ';' || + parser.buffer[parser.buffer_pos] == '/' || parser.buffer[parser.buffer_pos] == '?' || + parser.buffer[parser.buffer_pos] == ':' || parser.buffer[parser.buffer_pos] == '@' || + parser.buffer[parser.buffer_pos] == '&' || parser.buffer[parser.buffer_pos] == '=' || + parser.buffer[parser.buffer_pos] == '+' || parser.buffer[parser.buffer_pos] == '$' || + parser.buffer[parser.buffer_pos] == ',' || parser.buffer[parser.buffer_pos] == '.' || + parser.buffer[parser.buffer_pos] == '!' || parser.buffer[parser.buffer_pos] == '~' || + parser.buffer[parser.buffer_pos] == '*' || parser.buffer[parser.buffer_pos] == '\'' || + parser.buffer[parser.buffer_pos] == '(' || parser.buffer[parser.buffer_pos] == ')' || + parser.buffer[parser.buffer_pos] == '[' || parser.buffer[parser.buffer_pos] == ']' || + parser.buffer[parser.buffer_pos] == '%' { + // Check if it is a URI-escape sequence. + if parser.buffer[parser.buffer_pos] == '%' { + if !yaml_parser_scan_uri_escapes(parser, directive, start_mark, &s) { + return false + } + } else { + s = read(parser, s) + } + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + hasTag = true + } + + if !hasTag { + yaml_parser_set_scanner_tag_error(parser, directive, + start_mark, "did not find expected tag URI") + return false + } + *uri = s + return true +} + +// Decode an URI-escape sequence corresponding to a single UTF-8 character. +func yaml_parser_scan_uri_escapes(parser *yaml_parser_t, directive bool, start_mark yaml_mark_t, s *[]byte) bool { + + // Decode the required number of characters. + w := 1024 + for w > 0 { + // Check for a URI-escaped octet. + if parser.unread < 3 && !yaml_parser_update_buffer(parser, 3) { + return false + } + + if !(parser.buffer[parser.buffer_pos] == '%' && + is_hex(parser.buffer, parser.buffer_pos+1) && + is_hex(parser.buffer, parser.buffer_pos+2)) { + return yaml_parser_set_scanner_tag_error(parser, directive, + start_mark, "did not find URI escaped octet") + } + + // Get the octet. + octet := byte((as_hex(parser.buffer, parser.buffer_pos+1) << 4) + as_hex(parser.buffer, parser.buffer_pos+2)) + + // If it is the leading octet, determine the length of the UTF-8 sequence. + if w == 1024 { + w = width(octet) + if w == 0 { + return yaml_parser_set_scanner_tag_error(parser, directive, + start_mark, "found an incorrect leading UTF-8 octet") + } + } else { + // Check if the trailing octet is correct. + if octet&0xC0 != 0x80 { + return yaml_parser_set_scanner_tag_error(parser, directive, + start_mark, "found an incorrect trailing UTF-8 octet") + } + } + + // Copy the octet and move the pointers. + *s = append(*s, octet) + skip(parser) + skip(parser) + skip(parser) + w-- + } + return true +} + +// Scan a block scalar. +func yaml_parser_scan_block_scalar(parser *yaml_parser_t, token *yaml_token_t, literal bool) bool { + // Eat the indicator '|' or '>'. + start_mark := parser.mark + skip(parser) + + // Scan the additional block scalar indicators. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + // Check for a chomping indicator. + var chomping, increment int + if parser.buffer[parser.buffer_pos] == '+' || parser.buffer[parser.buffer_pos] == '-' { + // Set the chomping method and eat the indicator. + if parser.buffer[parser.buffer_pos] == '+' { + chomping = +1 + } else { + chomping = -1 + } + skip(parser) + + // Check for an indentation indicator. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + if is_digit(parser.buffer, parser.buffer_pos) { + // Check that the indentation is greater than 0. + if parser.buffer[parser.buffer_pos] == '0' { + yaml_parser_set_scanner_error(parser, "while scanning a block scalar", + start_mark, "found an indentation indicator equal to 0") + return false + } + + // Get the indentation level and eat the indicator. + increment = as_digit(parser.buffer, parser.buffer_pos) + skip(parser) + } + + } else if is_digit(parser.buffer, parser.buffer_pos) { + // Do the same as above, but in the opposite order. + + if parser.buffer[parser.buffer_pos] == '0' { + yaml_parser_set_scanner_error(parser, "while scanning a block scalar", + start_mark, "found an indentation indicator equal to 0") + return false + } + increment = as_digit(parser.buffer, parser.buffer_pos) + skip(parser) + + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + if parser.buffer[parser.buffer_pos] == '+' || parser.buffer[parser.buffer_pos] == '-' { + if parser.buffer[parser.buffer_pos] == '+' { + chomping = +1 + } else { + chomping = -1 + } + skip(parser) + } + } + + // Eat whitespaces and comments to the end of the line. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + for is_blank(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + if parser.buffer[parser.buffer_pos] == '#' { + if !yaml_parser_scan_line_comment(parser, start_mark) { + return false + } + for !is_breakz(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + } + + // Check if we are at the end of the line. + if !is_breakz(parser.buffer, parser.buffer_pos) { + yaml_parser_set_scanner_error(parser, "while scanning a block scalar", + start_mark, "did not find expected comment or line break") + return false + } + + // Eat a line break. + if is_break(parser.buffer, parser.buffer_pos) { + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + skip_line(parser) + } + + end_mark := parser.mark + + // Set the indentation level if it was specified. + var indent int + if increment > 0 { + if parser.indent >= 0 { + indent = parser.indent + increment + } else { + indent = increment + } + } + + // Scan the leading line breaks and determine the indentation level if needed. + var s, leading_break, trailing_breaks []byte + if !yaml_parser_scan_block_scalar_breaks(parser, &indent, &trailing_breaks, start_mark, &end_mark) { + return false + } + + // Scan the block scalar content. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + var leading_blank, trailing_blank bool + for parser.mark.column == indent && !is_z(parser.buffer, parser.buffer_pos) { + // We are at the beginning of a non-empty line. + + // Is it a trailing whitespace? + trailing_blank = is_blank(parser.buffer, parser.buffer_pos) + + // Check if we need to fold the leading line break. + if !literal && !leading_blank && !trailing_blank && len(leading_break) > 0 && leading_break[0] == '\n' { + // Do we need to join the lines by space? + if len(trailing_breaks) == 0 { + s = append(s, ' ') + } + } else { + s = append(s, leading_break...) + } + leading_break = leading_break[:0] + + // Append the remaining line breaks. + s = append(s, trailing_breaks...) + trailing_breaks = trailing_breaks[:0] + + // Is it a leading whitespace? + leading_blank = is_blank(parser.buffer, parser.buffer_pos) + + // Consume the current line. + for !is_breakz(parser.buffer, parser.buffer_pos) { + s = read(parser, s) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Consume the line break. + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + + leading_break = read_line(parser, leading_break) + + // Eat the following indentation spaces and line breaks. + if !yaml_parser_scan_block_scalar_breaks(parser, &indent, &trailing_breaks, start_mark, &end_mark) { + return false + } + } + + // Chomp the tail. + if chomping != -1 { + s = append(s, leading_break...) + } + if chomping == 1 { + s = append(s, trailing_breaks...) + } + + // Create a token. + *token = yaml_token_t{ + typ: yaml_SCALAR_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + value: s, + style: yaml_LITERAL_SCALAR_STYLE, + } + if !literal { + token.style = yaml_FOLDED_SCALAR_STYLE + } + return true +} + +// Scan indentation spaces and line breaks for a block scalar. Determine the +// indentation level if needed. +func yaml_parser_scan_block_scalar_breaks(parser *yaml_parser_t, indent *int, breaks *[]byte, start_mark yaml_mark_t, end_mark *yaml_mark_t) bool { + *end_mark = parser.mark + + // Eat the indentation spaces and line breaks. + max_indent := 0 + for { + // Eat the indentation spaces. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + for (*indent == 0 || parser.mark.column < *indent) && is_space(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + if parser.mark.column > max_indent { + max_indent = parser.mark.column + } + + // Check for a tab character messing the indentation. + if (*indent == 0 || parser.mark.column < *indent) && is_tab(parser.buffer, parser.buffer_pos) { + return yaml_parser_set_scanner_error(parser, "while scanning a block scalar", + start_mark, "found a tab character where an indentation space is expected") + } + + // Have we found a non-empty line? + if !is_break(parser.buffer, parser.buffer_pos) { + break + } + + // Consume the line break. + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + // [Go] Should really be returning breaks instead. + *breaks = read_line(parser, *breaks) + *end_mark = parser.mark + } + + // Determine the indentation level if needed. + if *indent == 0 { + *indent = max_indent + if *indent < parser.indent+1 { + *indent = parser.indent + 1 + } + if *indent < 1 { + *indent = 1 + } + } + return true +} + +// Scan a quoted scalar. +func yaml_parser_scan_flow_scalar(parser *yaml_parser_t, token *yaml_token_t, single bool) bool { + // Eat the left quote. + start_mark := parser.mark + skip(parser) + + // Consume the content of the quoted scalar. + var s, leading_break, trailing_breaks, whitespaces []byte + for { + // Check that there are no document indicators at the beginning of the line. + if parser.unread < 4 && !yaml_parser_update_buffer(parser, 4) { + return false + } + + if parser.mark.column == 0 && + ((parser.buffer[parser.buffer_pos+0] == '-' && + parser.buffer[parser.buffer_pos+1] == '-' && + parser.buffer[parser.buffer_pos+2] == '-') || + (parser.buffer[parser.buffer_pos+0] == '.' && + parser.buffer[parser.buffer_pos+1] == '.' && + parser.buffer[parser.buffer_pos+2] == '.')) && + is_blankz(parser.buffer, parser.buffer_pos+3) { + yaml_parser_set_scanner_error(parser, "while scanning a quoted scalar", + start_mark, "found unexpected document indicator") + return false + } + + // Check for EOF. + if is_z(parser.buffer, parser.buffer_pos) { + yaml_parser_set_scanner_error(parser, "while scanning a quoted scalar", + start_mark, "found unexpected end of stream") + return false + } + + // Consume non-blank characters. + leading_blanks := false + for !is_blankz(parser.buffer, parser.buffer_pos) { + if single && parser.buffer[parser.buffer_pos] == '\'' && parser.buffer[parser.buffer_pos+1] == '\'' { + // Is is an escaped single quote. + s = append(s, '\'') + skip(parser) + skip(parser) + + } else if single && parser.buffer[parser.buffer_pos] == '\'' { + // It is a right single quote. + break + } else if !single && parser.buffer[parser.buffer_pos] == '"' { + // It is a right double quote. + break + + } else if !single && parser.buffer[parser.buffer_pos] == '\\' && is_break(parser.buffer, parser.buffer_pos+1) { + // It is an escaped line break. + if parser.unread < 3 && !yaml_parser_update_buffer(parser, 3) { + return false + } + skip(parser) + skip_line(parser) + leading_blanks = true + break + + } else if !single && parser.buffer[parser.buffer_pos] == '\\' { + // It is an escape sequence. + code_length := 0 + + // Check the escape character. + switch parser.buffer[parser.buffer_pos+1] { + case '0': + s = append(s, 0) + case 'a': + s = append(s, '\x07') + case 'b': + s = append(s, '\x08') + case 't', '\t': + s = append(s, '\x09') + case 'n': + s = append(s, '\x0A') + case 'v': + s = append(s, '\x0B') + case 'f': + s = append(s, '\x0C') + case 'r': + s = append(s, '\x0D') + case 'e': + s = append(s, '\x1B') + case ' ': + s = append(s, '\x20') + case '"': + s = append(s, '"') + case '\'': + s = append(s, '\'') + case '\\': + s = append(s, '\\') + case 'N': // NEL (#x85) + s = append(s, '\xC2') + s = append(s, '\x85') + case '_': // #xA0 + s = append(s, '\xC2') + s = append(s, '\xA0') + case 'L': // LS (#x2028) + s = append(s, '\xE2') + s = append(s, '\x80') + s = append(s, '\xA8') + case 'P': // PS (#x2029) + s = append(s, '\xE2') + s = append(s, '\x80') + s = append(s, '\xA9') + case 'x': + code_length = 2 + case 'u': + code_length = 4 + case 'U': + code_length = 8 + default: + yaml_parser_set_scanner_error(parser, "while parsing a quoted scalar", + start_mark, "found unknown escape character") + return false + } + + skip(parser) + skip(parser) + + // Consume an arbitrary escape code. + if code_length > 0 { + var value int + + // Scan the character value. + if parser.unread < code_length && !yaml_parser_update_buffer(parser, code_length) { + return false + } + for k := 0; k < code_length; k++ { + if !is_hex(parser.buffer, parser.buffer_pos+k) { + yaml_parser_set_scanner_error(parser, "while parsing a quoted scalar", + start_mark, "did not find expected hexdecimal number") + return false + } + value = (value << 4) + as_hex(parser.buffer, parser.buffer_pos+k) + } + + // Check the value and write the character. + if (value >= 0xD800 && value <= 0xDFFF) || value > 0x10FFFF { + yaml_parser_set_scanner_error(parser, "while parsing a quoted scalar", + start_mark, "found invalid Unicode character escape code") + return false + } + if value <= 0x7F { + s = append(s, byte(value)) + } else if value <= 0x7FF { + s = append(s, byte(0xC0+(value>>6))) + s = append(s, byte(0x80+(value&0x3F))) + } else if value <= 0xFFFF { + s = append(s, byte(0xE0+(value>>12))) + s = append(s, byte(0x80+((value>>6)&0x3F))) + s = append(s, byte(0x80+(value&0x3F))) + } else { + s = append(s, byte(0xF0+(value>>18))) + s = append(s, byte(0x80+((value>>12)&0x3F))) + s = append(s, byte(0x80+((value>>6)&0x3F))) + s = append(s, byte(0x80+(value&0x3F))) + } + + // Advance the pointer. + for k := 0; k < code_length; k++ { + skip(parser) + } + } + } else { + // It is a non-escaped non-blank character. + s = read(parser, s) + } + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + } + + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + // Check if we are at the end of the scalar. + if single { + if parser.buffer[parser.buffer_pos] == '\'' { + break + } + } else { + if parser.buffer[parser.buffer_pos] == '"' { + break + } + } + + // Consume blank characters. + for is_blank(parser.buffer, parser.buffer_pos) || is_break(parser.buffer, parser.buffer_pos) { + if is_blank(parser.buffer, parser.buffer_pos) { + // Consume a space or a tab character. + if !leading_blanks { + whitespaces = read(parser, whitespaces) + } else { + skip(parser) + } + } else { + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + + // Check if it is a first line break. + if !leading_blanks { + whitespaces = whitespaces[:0] + leading_break = read_line(parser, leading_break) + leading_blanks = true + } else { + trailing_breaks = read_line(parser, trailing_breaks) + } + } + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Join the whitespaces or fold line breaks. + if leading_blanks { + // Do we need to fold line breaks? + if len(leading_break) > 0 && leading_break[0] == '\n' { + if len(trailing_breaks) == 0 { + s = append(s, ' ') + } else { + s = append(s, trailing_breaks...) + } + } else { + s = append(s, leading_break...) + s = append(s, trailing_breaks...) + } + trailing_breaks = trailing_breaks[:0] + leading_break = leading_break[:0] + } else { + s = append(s, whitespaces...) + whitespaces = whitespaces[:0] + } + } + + // Eat the right quote. + skip(parser) + end_mark := parser.mark + + // Create a token. + *token = yaml_token_t{ + typ: yaml_SCALAR_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + value: s, + style: yaml_SINGLE_QUOTED_SCALAR_STYLE, + } + if !single { + token.style = yaml_DOUBLE_QUOTED_SCALAR_STYLE + } + return true +} + +// Scan a plain scalar. +func yaml_parser_scan_plain_scalar(parser *yaml_parser_t, token *yaml_token_t) bool { + + var s, leading_break, trailing_breaks, whitespaces []byte + var leading_blanks bool + var indent = parser.indent + 1 + + start_mark := parser.mark + end_mark := parser.mark + + // Consume the content of the plain scalar. + for { + // Check for a document indicator. + if parser.unread < 4 && !yaml_parser_update_buffer(parser, 4) { + return false + } + if parser.mark.column == 0 && + ((parser.buffer[parser.buffer_pos+0] == '-' && + parser.buffer[parser.buffer_pos+1] == '-' && + parser.buffer[parser.buffer_pos+2] == '-') || + (parser.buffer[parser.buffer_pos+0] == '.' && + parser.buffer[parser.buffer_pos+1] == '.' && + parser.buffer[parser.buffer_pos+2] == '.')) && + is_blankz(parser.buffer, parser.buffer_pos+3) { + break + } + + // Check for a comment. + if parser.buffer[parser.buffer_pos] == '#' { + break + } + + // Consume non-blank characters. + for !is_blankz(parser.buffer, parser.buffer_pos) { + + // Check for indicators that may end a plain scalar. + if (parser.buffer[parser.buffer_pos] == ':' && is_blankz(parser.buffer, parser.buffer_pos+1)) || + (parser.flow_level > 0 && + (parser.buffer[parser.buffer_pos] == ',' || + parser.buffer[parser.buffer_pos] == '?' || parser.buffer[parser.buffer_pos] == '[' || + parser.buffer[parser.buffer_pos] == ']' || parser.buffer[parser.buffer_pos] == '{' || + parser.buffer[parser.buffer_pos] == '}')) { + break + } + + // Check if we need to join whitespaces and breaks. + if leading_blanks || len(whitespaces) > 0 { + if leading_blanks { + // Do we need to fold line breaks? + if leading_break[0] == '\n' { + if len(trailing_breaks) == 0 { + s = append(s, ' ') + } else { + s = append(s, trailing_breaks...) + } + } else { + s = append(s, leading_break...) + s = append(s, trailing_breaks...) + } + trailing_breaks = trailing_breaks[:0] + leading_break = leading_break[:0] + leading_blanks = false + } else { + s = append(s, whitespaces...) + whitespaces = whitespaces[:0] + } + } + + // Copy the character. + s = read(parser, s) + + end_mark = parser.mark + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + } + + // Is it the end? + if !(is_blank(parser.buffer, parser.buffer_pos) || is_break(parser.buffer, parser.buffer_pos)) { + break + } + + // Consume blank characters. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + for is_blank(parser.buffer, parser.buffer_pos) || is_break(parser.buffer, parser.buffer_pos) { + if is_blank(parser.buffer, parser.buffer_pos) { + + // Check for tab characters that abuse indentation. + if leading_blanks && parser.mark.column < indent && is_tab(parser.buffer, parser.buffer_pos) { + yaml_parser_set_scanner_error(parser, "while scanning a plain scalar", + start_mark, "found a tab character that violates indentation") + return false + } + + // Consume a space or a tab character. + if !leading_blanks { + whitespaces = read(parser, whitespaces) + } else { + skip(parser) + } + } else { + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + + // Check if it is a first line break. + if !leading_blanks { + whitespaces = whitespaces[:0] + leading_break = read_line(parser, leading_break) + leading_blanks = true + } else { + trailing_breaks = read_line(parser, trailing_breaks) + } + } + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Check indentation level. + if parser.flow_level == 0 && parser.mark.column < indent { + break + } + } + + // Create a token. + *token = yaml_token_t{ + typ: yaml_SCALAR_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + value: s, + style: yaml_PLAIN_SCALAR_STYLE, + } + + // Note that we change the 'simple_key_allowed' flag. + if leading_blanks { + parser.simple_key_allowed = true + } + return true +} + +func yaml_parser_scan_line_comment(parser *yaml_parser_t, token_mark yaml_mark_t) bool { + if parser.newlines > 0 { + return true + } + + var start_mark yaml_mark_t + var text []byte + + for peek := 0; peek < 512; peek++ { + if parser.unread < peek+1 && !yaml_parser_update_buffer(parser, peek+1) { + break + } + if is_blank(parser.buffer, parser.buffer_pos+peek) { + continue + } + if parser.buffer[parser.buffer_pos+peek] == '#' { + seen := parser.mark.index+peek + for { + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + if is_breakz(parser.buffer, parser.buffer_pos) { + if parser.mark.index >= seen { + break + } + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + skip_line(parser) + } else if parser.mark.index >= seen { + if len(text) == 0 { + start_mark = parser.mark + } + text = read(parser, text) + } else { + skip(parser) + } + } + } + break + } + if len(text) > 0 { + parser.comments = append(parser.comments, yaml_comment_t{ + token_mark: token_mark, + start_mark: start_mark, + line: text, + }) + } + return true +} + +func yaml_parser_scan_comments(parser *yaml_parser_t, scan_mark yaml_mark_t) bool { + token := parser.tokens[len(parser.tokens)-1] + + if token.typ == yaml_FLOW_ENTRY_TOKEN && len(parser.tokens) > 1 { + token = parser.tokens[len(parser.tokens)-2] + } + + var token_mark = token.start_mark + var start_mark yaml_mark_t + var next_indent = parser.indent + if next_indent < 0 { + next_indent = 0 + } + + var recent_empty = false + var first_empty = parser.newlines <= 1 + + var line = parser.mark.line + var column = parser.mark.column + + var text []byte + + // The foot line is the place where a comment must start to + // still be considered as a foot of the prior content. + // If there's some content in the currently parsed line, then + // the foot is the line below it. + var foot_line = -1 + if scan_mark.line > 0 { + foot_line = parser.mark.line-parser.newlines+1 + if parser.newlines == 0 && parser.mark.column > 1 { + foot_line++ + } + } + + var peek = 0 + for ; peek < 512; peek++ { + if parser.unread < peek+1 && !yaml_parser_update_buffer(parser, peek+1) { + break + } + column++ + if is_blank(parser.buffer, parser.buffer_pos+peek) { + continue + } + c := parser.buffer[parser.buffer_pos+peek] + var close_flow = parser.flow_level > 0 && (c == ']' || c == '}') + if close_flow || is_breakz(parser.buffer, parser.buffer_pos+peek) { + // Got line break or terminator. + if close_flow || !recent_empty { + if close_flow || first_empty && (start_mark.line == foot_line && token.typ != yaml_VALUE_TOKEN || start_mark.column-1 < next_indent) { + // This is the first empty line and there were no empty lines before, + // so this initial part of the comment is a foot of the prior token + // instead of being a head for the following one. Split it up. + // Alternatively, this might also be the last comment inside a flow + // scope, so it must be a footer. + if len(text) > 0 { + if start_mark.column-1 < next_indent { + // If dedented it's unrelated to the prior token. + token_mark = start_mark + } + parser.comments = append(parser.comments, yaml_comment_t{ + scan_mark: scan_mark, + token_mark: token_mark, + start_mark: start_mark, + end_mark: yaml_mark_t{parser.mark.index + peek, line, column}, + foot: text, + }) + scan_mark = yaml_mark_t{parser.mark.index + peek, line, column} + token_mark = scan_mark + text = nil + } + } else { + if len(text) > 0 && parser.buffer[parser.buffer_pos+peek] != 0 { + text = append(text, '\n') + } + } + } + if !is_break(parser.buffer, parser.buffer_pos+peek) { + break + } + first_empty = false + recent_empty = true + column = 0 + line++ + continue + } + + if len(text) > 0 && (close_flow || column-1 < next_indent && column != start_mark.column) { + // The comment at the different indentation is a foot of the + // preceding data rather than a head of the upcoming one. + parser.comments = append(parser.comments, yaml_comment_t{ + scan_mark: scan_mark, + token_mark: token_mark, + start_mark: start_mark, + end_mark: yaml_mark_t{parser.mark.index + peek, line, column}, + foot: text, + }) + scan_mark = yaml_mark_t{parser.mark.index + peek, line, column} + token_mark = scan_mark + text = nil + } + + if parser.buffer[parser.buffer_pos+peek] != '#' { + break + } + + if len(text) == 0 { + start_mark = yaml_mark_t{parser.mark.index + peek, line, column} + } else { + text = append(text, '\n') + } + + recent_empty = false + + // Consume until after the consumed comment line. + seen := parser.mark.index+peek + for { + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + if is_breakz(parser.buffer, parser.buffer_pos) { + if parser.mark.index >= seen { + break + } + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + skip_line(parser) + } else if parser.mark.index >= seen { + text = read(parser, text) + } else { + skip(parser) + } + } + + peek = 0 + column = 0 + line = parser.mark.line + next_indent = parser.indent + if next_indent < 0 { + next_indent = 0 + } + } + + if len(text) > 0 { + parser.comments = append(parser.comments, yaml_comment_t{ + scan_mark: scan_mark, + token_mark: start_mark, + start_mark: start_mark, + end_mark: yaml_mark_t{parser.mark.index + peek - 1, line, column}, + head: text, + }) + } + return true +} diff --git a/vendor/gopkg.in/yaml.v3/sorter.go b/vendor/gopkg.in/yaml.v3/sorter.go new file mode 100644 index 0000000..9210ece --- /dev/null +++ b/vendor/gopkg.in/yaml.v3/sorter.go @@ -0,0 +1,134 @@ +// +// Copyright (c) 2011-2019 Canonical Ltd +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package yaml + +import ( + "reflect" + "unicode" +) + +type keyList []reflect.Value + +func (l keyList) Len() int { return len(l) } +func (l keyList) Swap(i, j int) { l[i], l[j] = l[j], l[i] } +func (l keyList) Less(i, j int) bool { + a := l[i] + b := l[j] + ak := a.Kind() + bk := b.Kind() + for (ak == reflect.Interface || ak == reflect.Ptr) && !a.IsNil() { + a = a.Elem() + ak = a.Kind() + } + for (bk == reflect.Interface || bk == reflect.Ptr) && !b.IsNil() { + b = b.Elem() + bk = b.Kind() + } + af, aok := keyFloat(a) + bf, bok := keyFloat(b) + if aok && bok { + if af != bf { + return af < bf + } + if ak != bk { + return ak < bk + } + return numLess(a, b) + } + if ak != reflect.String || bk != reflect.String { + return ak < bk + } + ar, br := []rune(a.String()), []rune(b.String()) + digits := false + for i := 0; i < len(ar) && i < len(br); i++ { + if ar[i] == br[i] { + digits = unicode.IsDigit(ar[i]) + continue + } + al := unicode.IsLetter(ar[i]) + bl := unicode.IsLetter(br[i]) + if al && bl { + return ar[i] < br[i] + } + if al || bl { + if digits { + return al + } else { + return bl + } + } + var ai, bi int + var an, bn int64 + if ar[i] == '0' || br[i] == '0' { + for j := i - 1; j >= 0 && unicode.IsDigit(ar[j]); j-- { + if ar[j] != '0' { + an = 1 + bn = 1 + break + } + } + } + for ai = i; ai < len(ar) && unicode.IsDigit(ar[ai]); ai++ { + an = an*10 + int64(ar[ai]-'0') + } + for bi = i; bi < len(br) && unicode.IsDigit(br[bi]); bi++ { + bn = bn*10 + int64(br[bi]-'0') + } + if an != bn { + return an < bn + } + if ai != bi { + return ai < bi + } + return ar[i] < br[i] + } + return len(ar) < len(br) +} + +// keyFloat returns a float value for v if it is a number/bool +// and whether it is a number/bool or not. +func keyFloat(v reflect.Value) (f float64, ok bool) { + switch v.Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return float64(v.Int()), true + case reflect.Float32, reflect.Float64: + return v.Float(), true + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return float64(v.Uint()), true + case reflect.Bool: + if v.Bool() { + return 1, true + } + return 0, true + } + return 0, false +} + +// numLess returns whether a < b. +// a and b must necessarily have the same kind. +func numLess(a, b reflect.Value) bool { + switch a.Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return a.Int() < b.Int() + case reflect.Float32, reflect.Float64: + return a.Float() < b.Float() + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return a.Uint() < b.Uint() + case reflect.Bool: + return !a.Bool() && b.Bool() + } + panic("not a number") +} diff --git a/vendor/gopkg.in/yaml.v3/writerc.go b/vendor/gopkg.in/yaml.v3/writerc.go new file mode 100644 index 0000000..b8a116b --- /dev/null +++ b/vendor/gopkg.in/yaml.v3/writerc.go @@ -0,0 +1,48 @@ +// +// Copyright (c) 2011-2019 Canonical Ltd +// Copyright (c) 2006-2010 Kirill Simonov +// +// Permission is hereby granted, free of charge, to any person obtaining a copy of +// this software and associated documentation files (the "Software"), to deal in +// the Software without restriction, including without limitation the rights to +// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +// of the Software, and to permit persons to whom the Software is furnished to do +// so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in all +// copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +// SOFTWARE. + +package yaml + +// Set the writer error and return false. +func yaml_emitter_set_writer_error(emitter *yaml_emitter_t, problem string) bool { + emitter.error = yaml_WRITER_ERROR + emitter.problem = problem + return false +} + +// Flush the output buffer. +func yaml_emitter_flush(emitter *yaml_emitter_t) bool { + if emitter.write_handler == nil { + panic("write handler not set") + } + + // Check if the buffer is empty. + if emitter.buffer_pos == 0 { + return true + } + + if err := emitter.write_handler(emitter, emitter.buffer[:emitter.buffer_pos]); err != nil { + return yaml_emitter_set_writer_error(emitter, "write error: "+err.Error()) + } + emitter.buffer_pos = 0 + return true +} diff --git a/vendor/gopkg.in/yaml.v3/yaml.go b/vendor/gopkg.in/yaml.v3/yaml.go new file mode 100644 index 0000000..8cec6da --- /dev/null +++ b/vendor/gopkg.in/yaml.v3/yaml.go @@ -0,0 +1,698 @@ +// +// Copyright (c) 2011-2019 Canonical Ltd +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package yaml implements YAML support for the Go language. +// +// Source code and other details for the project are available at GitHub: +// +// https://github.com/go-yaml/yaml +// +package yaml + +import ( + "errors" + "fmt" + "io" + "reflect" + "strings" + "sync" + "unicode/utf8" +) + +// The Unmarshaler interface may be implemented by types to customize their +// behavior when being unmarshaled from a YAML document. +type Unmarshaler interface { + UnmarshalYAML(value *Node) error +} + +type obsoleteUnmarshaler interface { + UnmarshalYAML(unmarshal func(interface{}) error) error +} + +// The Marshaler interface may be implemented by types to customize their +// behavior when being marshaled into a YAML document. The returned value +// is marshaled in place of the original value implementing Marshaler. +// +// If an error is returned by MarshalYAML, the marshaling procedure stops +// and returns with the provided error. +type Marshaler interface { + MarshalYAML() (interface{}, error) +} + +// Unmarshal decodes the first document found within the in byte slice +// and assigns decoded values into the out value. +// +// Maps and pointers (to a struct, string, int, etc) are accepted as out +// values. If an internal pointer within a struct is not initialized, +// the yaml package will initialize it if necessary for unmarshalling +// the provided data. The out parameter must not be nil. +// +// The type of the decoded values should be compatible with the respective +// values in out. If one or more values cannot be decoded due to a type +// mismatches, decoding continues partially until the end of the YAML +// content, and a *yaml.TypeError is returned with details for all +// missed values. +// +// Struct fields are only unmarshalled if they are exported (have an +// upper case first letter), and are unmarshalled using the field name +// lowercased as the default key. Custom keys may be defined via the +// "yaml" name in the field tag: the content preceding the first comma +// is used as the key, and the following comma-separated options are +// used to tweak the marshalling process (see Marshal). +// Conflicting names result in a runtime error. +// +// For example: +// +// type T struct { +// F int `yaml:"a,omitempty"` +// B int +// } +// var t T +// yaml.Unmarshal([]byte("a: 1\nb: 2"), &t) +// +// See the documentation of Marshal for the format of tags and a list of +// supported tag options. +// +func Unmarshal(in []byte, out interface{}) (err error) { + return unmarshal(in, out, false) +} + +// A Decoder reads and decodes YAML values from an input stream. +type Decoder struct { + parser *parser + knownFields bool +} + +// NewDecoder returns a new decoder that reads from r. +// +// The decoder introduces its own buffering and may read +// data from r beyond the YAML values requested. +func NewDecoder(r io.Reader) *Decoder { + return &Decoder{ + parser: newParserFromReader(r), + } +} + +// KnownFields ensures that the keys in decoded mappings to +// exist as fields in the struct being decoded into. +func (dec *Decoder) KnownFields(enable bool) { + dec.knownFields = enable +} + +// Decode reads the next YAML-encoded value from its input +// and stores it in the value pointed to by v. +// +// See the documentation for Unmarshal for details about the +// conversion of YAML into a Go value. +func (dec *Decoder) Decode(v interface{}) (err error) { + d := newDecoder() + d.knownFields = dec.knownFields + defer handleErr(&err) + node := dec.parser.parse() + if node == nil { + return io.EOF + } + out := reflect.ValueOf(v) + if out.Kind() == reflect.Ptr && !out.IsNil() { + out = out.Elem() + } + d.unmarshal(node, out) + if len(d.terrors) > 0 { + return &TypeError{d.terrors} + } + return nil +} + +// Decode decodes the node and stores its data into the value pointed to by v. +// +// See the documentation for Unmarshal for details about the +// conversion of YAML into a Go value. +func (n *Node) Decode(v interface{}) (err error) { + d := newDecoder() + defer handleErr(&err) + out := reflect.ValueOf(v) + if out.Kind() == reflect.Ptr && !out.IsNil() { + out = out.Elem() + } + d.unmarshal(n, out) + if len(d.terrors) > 0 { + return &TypeError{d.terrors} + } + return nil +} + +func unmarshal(in []byte, out interface{}, strict bool) (err error) { + defer handleErr(&err) + d := newDecoder() + p := newParser(in) + defer p.destroy() + node := p.parse() + if node != nil { + v := reflect.ValueOf(out) + if v.Kind() == reflect.Ptr && !v.IsNil() { + v = v.Elem() + } + d.unmarshal(node, v) + } + if len(d.terrors) > 0 { + return &TypeError{d.terrors} + } + return nil +} + +// Marshal serializes the value provided into a YAML document. The structure +// of the generated document will reflect the structure of the value itself. +// Maps and pointers (to struct, string, int, etc) are accepted as the in value. +// +// Struct fields are only marshalled if they are exported (have an upper case +// first letter), and are marshalled using the field name lowercased as the +// default key. Custom keys may be defined via the "yaml" name in the field +// tag: the content preceding the first comma is used as the key, and the +// following comma-separated options are used to tweak the marshalling process. +// Conflicting names result in a runtime error. +// +// The field tag format accepted is: +// +// `(...) yaml:"[][,[,]]" (...)` +// +// The following flags are currently supported: +// +// omitempty Only include the field if it's not set to the zero +// value for the type or to empty slices or maps. +// Zero valued structs will be omitted if all their public +// fields are zero, unless they implement an IsZero +// method (see the IsZeroer interface type), in which +// case the field will be excluded if IsZero returns true. +// +// flow Marshal using a flow style (useful for structs, +// sequences and maps). +// +// inline Inline the field, which must be a struct or a map, +// causing all of its fields or keys to be processed as if +// they were part of the outer struct. For maps, keys must +// not conflict with the yaml keys of other struct fields. +// +// In addition, if the key is "-", the field is ignored. +// +// For example: +// +// type T struct { +// F int `yaml:"a,omitempty"` +// B int +// } +// yaml.Marshal(&T{B: 2}) // Returns "b: 2\n" +// yaml.Marshal(&T{F: 1}} // Returns "a: 1\nb: 0\n" +// +func Marshal(in interface{}) (out []byte, err error) { + defer handleErr(&err) + e := newEncoder() + defer e.destroy() + e.marshalDoc("", reflect.ValueOf(in)) + e.finish() + out = e.out + return +} + +// An Encoder writes YAML values to an output stream. +type Encoder struct { + encoder *encoder +} + +// NewEncoder returns a new encoder that writes to w. +// The Encoder should be closed after use to flush all data +// to w. +func NewEncoder(w io.Writer) *Encoder { + return &Encoder{ + encoder: newEncoderWithWriter(w), + } +} + +// Encode writes the YAML encoding of v to the stream. +// If multiple items are encoded to the stream, the +// second and subsequent document will be preceded +// with a "---" document separator, but the first will not. +// +// See the documentation for Marshal for details about the conversion of Go +// values to YAML. +func (e *Encoder) Encode(v interface{}) (err error) { + defer handleErr(&err) + e.encoder.marshalDoc("", reflect.ValueOf(v)) + return nil +} + +// Encode encodes value v and stores its representation in n. +// +// See the documentation for Marshal for details about the +// conversion of Go values into YAML. +func (n *Node) Encode(v interface{}) (err error) { + defer handleErr(&err) + e := newEncoder() + defer e.destroy() + e.marshalDoc("", reflect.ValueOf(v)) + e.finish() + p := newParser(e.out) + p.textless = true + defer p.destroy() + doc := p.parse() + *n = *doc.Content[0] + return nil +} + +// SetIndent changes the used indentation used when encoding. +func (e *Encoder) SetIndent(spaces int) { + if spaces < 0 { + panic("yaml: cannot indent to a negative number of spaces") + } + e.encoder.indent = spaces +} + +// Close closes the encoder by writing any remaining data. +// It does not write a stream terminating string "...". +func (e *Encoder) Close() (err error) { + defer handleErr(&err) + e.encoder.finish() + return nil +} + +func handleErr(err *error) { + if v := recover(); v != nil { + if e, ok := v.(yamlError); ok { + *err = e.err + } else { + panic(v) + } + } +} + +type yamlError struct { + err error +} + +func fail(err error) { + panic(yamlError{err}) +} + +func failf(format string, args ...interface{}) { + panic(yamlError{fmt.Errorf("yaml: "+format, args...)}) +} + +// A TypeError is returned by Unmarshal when one or more fields in +// the YAML document cannot be properly decoded into the requested +// types. When this error is returned, the value is still +// unmarshaled partially. +type TypeError struct { + Errors []string +} + +func (e *TypeError) Error() string { + return fmt.Sprintf("yaml: unmarshal errors:\n %s", strings.Join(e.Errors, "\n ")) +} + +type Kind uint32 + +const ( + DocumentNode Kind = 1 << iota + SequenceNode + MappingNode + ScalarNode + AliasNode +) + +type Style uint32 + +const ( + TaggedStyle Style = 1 << iota + DoubleQuotedStyle + SingleQuotedStyle + LiteralStyle + FoldedStyle + FlowStyle +) + +// Node represents an element in the YAML document hierarchy. While documents +// are typically encoded and decoded into higher level types, such as structs +// and maps, Node is an intermediate representation that allows detailed +// control over the content being decoded or encoded. +// +// It's worth noting that although Node offers access into details such as +// line numbers, colums, and comments, the content when re-encoded will not +// have its original textual representation preserved. An effort is made to +// render the data plesantly, and to preserve comments near the data they +// describe, though. +// +// Values that make use of the Node type interact with the yaml package in the +// same way any other type would do, by encoding and decoding yaml data +// directly or indirectly into them. +// +// For example: +// +// var person struct { +// Name string +// Address yaml.Node +// } +// err := yaml.Unmarshal(data, &person) +// +// Or by itself: +// +// var person Node +// err := yaml.Unmarshal(data, &person) +// +type Node struct { + // Kind defines whether the node is a document, a mapping, a sequence, + // a scalar value, or an alias to another node. The specific data type of + // scalar nodes may be obtained via the ShortTag and LongTag methods. + Kind Kind + + // Style allows customizing the apperance of the node in the tree. + Style Style + + // Tag holds the YAML tag defining the data type for the value. + // When decoding, this field will always be set to the resolved tag, + // even when it wasn't explicitly provided in the YAML content. + // When encoding, if this field is unset the value type will be + // implied from the node properties, and if it is set, it will only + // be serialized into the representation if TaggedStyle is used or + // the implicit tag diverges from the provided one. + Tag string + + // Value holds the unescaped and unquoted represenation of the value. + Value string + + // Anchor holds the anchor name for this node, which allows aliases to point to it. + Anchor string + + // Alias holds the node that this alias points to. Only valid when Kind is AliasNode. + Alias *Node + + // Content holds contained nodes for documents, mappings, and sequences. + Content []*Node + + // HeadComment holds any comments in the lines preceding the node and + // not separated by an empty line. + HeadComment string + + // LineComment holds any comments at the end of the line where the node is in. + LineComment string + + // FootComment holds any comments following the node and before empty lines. + FootComment string + + // Line and Column hold the node position in the decoded YAML text. + // These fields are not respected when encoding the node. + Line int + Column int +} + +// IsZero returns whether the node has all of its fields unset. +func (n *Node) IsZero() bool { + return n.Kind == 0 && n.Style == 0 && n.Tag == "" && n.Value == "" && n.Anchor == "" && n.Alias == nil && n.Content == nil && + n.HeadComment == "" && n.LineComment == "" && n.FootComment == "" && n.Line == 0 && n.Column == 0 +} + + +// LongTag returns the long form of the tag that indicates the data type for +// the node. If the Tag field isn't explicitly defined, one will be computed +// based on the node properties. +func (n *Node) LongTag() string { + return longTag(n.ShortTag()) +} + +// ShortTag returns the short form of the YAML tag that indicates data type for +// the node. If the Tag field isn't explicitly defined, one will be computed +// based on the node properties. +func (n *Node) ShortTag() string { + if n.indicatedString() { + return strTag + } + if n.Tag == "" || n.Tag == "!" { + switch n.Kind { + case MappingNode: + return mapTag + case SequenceNode: + return seqTag + case AliasNode: + if n.Alias != nil { + return n.Alias.ShortTag() + } + case ScalarNode: + tag, _ := resolve("", n.Value) + return tag + case 0: + // Special case to make the zero value convenient. + if n.IsZero() { + return nullTag + } + } + return "" + } + return shortTag(n.Tag) +} + +func (n *Node) indicatedString() bool { + return n.Kind == ScalarNode && + (shortTag(n.Tag) == strTag || + (n.Tag == "" || n.Tag == "!") && n.Style&(SingleQuotedStyle|DoubleQuotedStyle|LiteralStyle|FoldedStyle) != 0) +} + +// SetString is a convenience function that sets the node to a string value +// and defines its style in a pleasant way depending on its content. +func (n *Node) SetString(s string) { + n.Kind = ScalarNode + if utf8.ValidString(s) { + n.Value = s + n.Tag = strTag + } else { + n.Value = encodeBase64(s) + n.Tag = binaryTag + } + if strings.Contains(n.Value, "\n") { + n.Style = LiteralStyle + } +} + +// -------------------------------------------------------------------------- +// Maintain a mapping of keys to structure field indexes + +// The code in this section was copied from mgo/bson. + +// structInfo holds details for the serialization of fields of +// a given struct. +type structInfo struct { + FieldsMap map[string]fieldInfo + FieldsList []fieldInfo + + // InlineMap is the number of the field in the struct that + // contains an ,inline map, or -1 if there's none. + InlineMap int + + // InlineUnmarshalers holds indexes to inlined fields that + // contain unmarshaler values. + InlineUnmarshalers [][]int +} + +type fieldInfo struct { + Key string + Num int + OmitEmpty bool + Flow bool + // Id holds the unique field identifier, so we can cheaply + // check for field duplicates without maintaining an extra map. + Id int + + // Inline holds the field index if the field is part of an inlined struct. + Inline []int +} + +var structMap = make(map[reflect.Type]*structInfo) +var fieldMapMutex sync.RWMutex +var unmarshalerType reflect.Type + +func init() { + var v Unmarshaler + unmarshalerType = reflect.ValueOf(&v).Elem().Type() +} + +func getStructInfo(st reflect.Type) (*structInfo, error) { + fieldMapMutex.RLock() + sinfo, found := structMap[st] + fieldMapMutex.RUnlock() + if found { + return sinfo, nil + } + + n := st.NumField() + fieldsMap := make(map[string]fieldInfo) + fieldsList := make([]fieldInfo, 0, n) + inlineMap := -1 + inlineUnmarshalers := [][]int(nil) + for i := 0; i != n; i++ { + field := st.Field(i) + if field.PkgPath != "" && !field.Anonymous { + continue // Private field + } + + info := fieldInfo{Num: i} + + tag := field.Tag.Get("yaml") + if tag == "" && strings.Index(string(field.Tag), ":") < 0 { + tag = string(field.Tag) + } + if tag == "-" { + continue + } + + inline := false + fields := strings.Split(tag, ",") + if len(fields) > 1 { + for _, flag := range fields[1:] { + switch flag { + case "omitempty": + info.OmitEmpty = true + case "flow": + info.Flow = true + case "inline": + inline = true + default: + return nil, errors.New(fmt.Sprintf("unsupported flag %q in tag %q of type %s", flag, tag, st)) + } + } + tag = fields[0] + } + + if inline { + switch field.Type.Kind() { + case reflect.Map: + if inlineMap >= 0 { + return nil, errors.New("multiple ,inline maps in struct " + st.String()) + } + if field.Type.Key() != reflect.TypeOf("") { + return nil, errors.New("option ,inline needs a map with string keys in struct " + st.String()) + } + inlineMap = info.Num + case reflect.Struct, reflect.Ptr: + ftype := field.Type + for ftype.Kind() == reflect.Ptr { + ftype = ftype.Elem() + } + if ftype.Kind() != reflect.Struct { + return nil, errors.New("option ,inline may only be used on a struct or map field") + } + if reflect.PtrTo(ftype).Implements(unmarshalerType) { + inlineUnmarshalers = append(inlineUnmarshalers, []int{i}) + } else { + sinfo, err := getStructInfo(ftype) + if err != nil { + return nil, err + } + for _, index := range sinfo.InlineUnmarshalers { + inlineUnmarshalers = append(inlineUnmarshalers, append([]int{i}, index...)) + } + for _, finfo := range sinfo.FieldsList { + if _, found := fieldsMap[finfo.Key]; found { + msg := "duplicated key '" + finfo.Key + "' in struct " + st.String() + return nil, errors.New(msg) + } + if finfo.Inline == nil { + finfo.Inline = []int{i, finfo.Num} + } else { + finfo.Inline = append([]int{i}, finfo.Inline...) + } + finfo.Id = len(fieldsList) + fieldsMap[finfo.Key] = finfo + fieldsList = append(fieldsList, finfo) + } + } + default: + return nil, errors.New("option ,inline may only be used on a struct or map field") + } + continue + } + + if tag != "" { + info.Key = tag + } else { + info.Key = strings.ToLower(field.Name) + } + + if _, found = fieldsMap[info.Key]; found { + msg := "duplicated key '" + info.Key + "' in struct " + st.String() + return nil, errors.New(msg) + } + + info.Id = len(fieldsList) + fieldsList = append(fieldsList, info) + fieldsMap[info.Key] = info + } + + sinfo = &structInfo{ + FieldsMap: fieldsMap, + FieldsList: fieldsList, + InlineMap: inlineMap, + InlineUnmarshalers: inlineUnmarshalers, + } + + fieldMapMutex.Lock() + structMap[st] = sinfo + fieldMapMutex.Unlock() + return sinfo, nil +} + +// IsZeroer is used to check whether an object is zero to +// determine whether it should be omitted when marshaling +// with the omitempty flag. One notable implementation +// is time.Time. +type IsZeroer interface { + IsZero() bool +} + +func isZero(v reflect.Value) bool { + kind := v.Kind() + if z, ok := v.Interface().(IsZeroer); ok { + if (kind == reflect.Ptr || kind == reflect.Interface) && v.IsNil() { + return true + } + return z.IsZero() + } + switch kind { + case reflect.String: + return len(v.String()) == 0 + case reflect.Interface, reflect.Ptr: + return v.IsNil() + case reflect.Slice: + return v.Len() == 0 + case reflect.Map: + return v.Len() == 0 + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return v.Int() == 0 + case reflect.Float32, reflect.Float64: + return v.Float() == 0 + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return v.Uint() == 0 + case reflect.Bool: + return !v.Bool() + case reflect.Struct: + vt := v.Type() + for i := v.NumField() - 1; i >= 0; i-- { + if vt.Field(i).PkgPath != "" { + continue // Private field + } + if !isZero(v.Field(i)) { + return false + } + } + return true + } + return false +} diff --git a/vendor/gopkg.in/yaml.v3/yamlh.go b/vendor/gopkg.in/yaml.v3/yamlh.go new file mode 100644 index 0000000..7c6d007 --- /dev/null +++ b/vendor/gopkg.in/yaml.v3/yamlh.go @@ -0,0 +1,807 @@ +// +// Copyright (c) 2011-2019 Canonical Ltd +// Copyright (c) 2006-2010 Kirill Simonov +// +// Permission is hereby granted, free of charge, to any person obtaining a copy of +// this software and associated documentation files (the "Software"), to deal in +// the Software without restriction, including without limitation the rights to +// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +// of the Software, and to permit persons to whom the Software is furnished to do +// so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in all +// copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +// SOFTWARE. + +package yaml + +import ( + "fmt" + "io" +) + +// The version directive data. +type yaml_version_directive_t struct { + major int8 // The major version number. + minor int8 // The minor version number. +} + +// The tag directive data. +type yaml_tag_directive_t struct { + handle []byte // The tag handle. + prefix []byte // The tag prefix. +} + +type yaml_encoding_t int + +// The stream encoding. +const ( + // Let the parser choose the encoding. + yaml_ANY_ENCODING yaml_encoding_t = iota + + yaml_UTF8_ENCODING // The default UTF-8 encoding. + yaml_UTF16LE_ENCODING // The UTF-16-LE encoding with BOM. + yaml_UTF16BE_ENCODING // The UTF-16-BE encoding with BOM. +) + +type yaml_break_t int + +// Line break types. +const ( + // Let the parser choose the break type. + yaml_ANY_BREAK yaml_break_t = iota + + yaml_CR_BREAK // Use CR for line breaks (Mac style). + yaml_LN_BREAK // Use LN for line breaks (Unix style). + yaml_CRLN_BREAK // Use CR LN for line breaks (DOS style). +) + +type yaml_error_type_t int + +// Many bad things could happen with the parser and emitter. +const ( + // No error is produced. + yaml_NO_ERROR yaml_error_type_t = iota + + yaml_MEMORY_ERROR // Cannot allocate or reallocate a block of memory. + yaml_READER_ERROR // Cannot read or decode the input stream. + yaml_SCANNER_ERROR // Cannot scan the input stream. + yaml_PARSER_ERROR // Cannot parse the input stream. + yaml_COMPOSER_ERROR // Cannot compose a YAML document. + yaml_WRITER_ERROR // Cannot write to the output stream. + yaml_EMITTER_ERROR // Cannot emit a YAML stream. +) + +// The pointer position. +type yaml_mark_t struct { + index int // The position index. + line int // The position line. + column int // The position column. +} + +// Node Styles + +type yaml_style_t int8 + +type yaml_scalar_style_t yaml_style_t + +// Scalar styles. +const ( + // Let the emitter choose the style. + yaml_ANY_SCALAR_STYLE yaml_scalar_style_t = 0 + + yaml_PLAIN_SCALAR_STYLE yaml_scalar_style_t = 1 << iota // The plain scalar style. + yaml_SINGLE_QUOTED_SCALAR_STYLE // The single-quoted scalar style. + yaml_DOUBLE_QUOTED_SCALAR_STYLE // The double-quoted scalar style. + yaml_LITERAL_SCALAR_STYLE // The literal scalar style. + yaml_FOLDED_SCALAR_STYLE // The folded scalar style. +) + +type yaml_sequence_style_t yaml_style_t + +// Sequence styles. +const ( + // Let the emitter choose the style. + yaml_ANY_SEQUENCE_STYLE yaml_sequence_style_t = iota + + yaml_BLOCK_SEQUENCE_STYLE // The block sequence style. + yaml_FLOW_SEQUENCE_STYLE // The flow sequence style. +) + +type yaml_mapping_style_t yaml_style_t + +// Mapping styles. +const ( + // Let the emitter choose the style. + yaml_ANY_MAPPING_STYLE yaml_mapping_style_t = iota + + yaml_BLOCK_MAPPING_STYLE // The block mapping style. + yaml_FLOW_MAPPING_STYLE // The flow mapping style. +) + +// Tokens + +type yaml_token_type_t int + +// Token types. +const ( + // An empty token. + yaml_NO_TOKEN yaml_token_type_t = iota + + yaml_STREAM_START_TOKEN // A STREAM-START token. + yaml_STREAM_END_TOKEN // A STREAM-END token. + + yaml_VERSION_DIRECTIVE_TOKEN // A VERSION-DIRECTIVE token. + yaml_TAG_DIRECTIVE_TOKEN // A TAG-DIRECTIVE token. + yaml_DOCUMENT_START_TOKEN // A DOCUMENT-START token. + yaml_DOCUMENT_END_TOKEN // A DOCUMENT-END token. + + yaml_BLOCK_SEQUENCE_START_TOKEN // A BLOCK-SEQUENCE-START token. + yaml_BLOCK_MAPPING_START_TOKEN // A BLOCK-SEQUENCE-END token. + yaml_BLOCK_END_TOKEN // A BLOCK-END token. + + yaml_FLOW_SEQUENCE_START_TOKEN // A FLOW-SEQUENCE-START token. + yaml_FLOW_SEQUENCE_END_TOKEN // A FLOW-SEQUENCE-END token. + yaml_FLOW_MAPPING_START_TOKEN // A FLOW-MAPPING-START token. + yaml_FLOW_MAPPING_END_TOKEN // A FLOW-MAPPING-END token. + + yaml_BLOCK_ENTRY_TOKEN // A BLOCK-ENTRY token. + yaml_FLOW_ENTRY_TOKEN // A FLOW-ENTRY token. + yaml_KEY_TOKEN // A KEY token. + yaml_VALUE_TOKEN // A VALUE token. + + yaml_ALIAS_TOKEN // An ALIAS token. + yaml_ANCHOR_TOKEN // An ANCHOR token. + yaml_TAG_TOKEN // A TAG token. + yaml_SCALAR_TOKEN // A SCALAR token. +) + +func (tt yaml_token_type_t) String() string { + switch tt { + case yaml_NO_TOKEN: + return "yaml_NO_TOKEN" + case yaml_STREAM_START_TOKEN: + return "yaml_STREAM_START_TOKEN" + case yaml_STREAM_END_TOKEN: + return "yaml_STREAM_END_TOKEN" + case yaml_VERSION_DIRECTIVE_TOKEN: + return "yaml_VERSION_DIRECTIVE_TOKEN" + case yaml_TAG_DIRECTIVE_TOKEN: + return "yaml_TAG_DIRECTIVE_TOKEN" + case yaml_DOCUMENT_START_TOKEN: + return "yaml_DOCUMENT_START_TOKEN" + case yaml_DOCUMENT_END_TOKEN: + return "yaml_DOCUMENT_END_TOKEN" + case yaml_BLOCK_SEQUENCE_START_TOKEN: + return "yaml_BLOCK_SEQUENCE_START_TOKEN" + case yaml_BLOCK_MAPPING_START_TOKEN: + return "yaml_BLOCK_MAPPING_START_TOKEN" + case yaml_BLOCK_END_TOKEN: + return "yaml_BLOCK_END_TOKEN" + case yaml_FLOW_SEQUENCE_START_TOKEN: + return "yaml_FLOW_SEQUENCE_START_TOKEN" + case yaml_FLOW_SEQUENCE_END_TOKEN: + return "yaml_FLOW_SEQUENCE_END_TOKEN" + case yaml_FLOW_MAPPING_START_TOKEN: + return "yaml_FLOW_MAPPING_START_TOKEN" + case yaml_FLOW_MAPPING_END_TOKEN: + return "yaml_FLOW_MAPPING_END_TOKEN" + case yaml_BLOCK_ENTRY_TOKEN: + return "yaml_BLOCK_ENTRY_TOKEN" + case yaml_FLOW_ENTRY_TOKEN: + return "yaml_FLOW_ENTRY_TOKEN" + case yaml_KEY_TOKEN: + return "yaml_KEY_TOKEN" + case yaml_VALUE_TOKEN: + return "yaml_VALUE_TOKEN" + case yaml_ALIAS_TOKEN: + return "yaml_ALIAS_TOKEN" + case yaml_ANCHOR_TOKEN: + return "yaml_ANCHOR_TOKEN" + case yaml_TAG_TOKEN: + return "yaml_TAG_TOKEN" + case yaml_SCALAR_TOKEN: + return "yaml_SCALAR_TOKEN" + } + return "" +} + +// The token structure. +type yaml_token_t struct { + // The token type. + typ yaml_token_type_t + + // The start/end of the token. + start_mark, end_mark yaml_mark_t + + // The stream encoding (for yaml_STREAM_START_TOKEN). + encoding yaml_encoding_t + + // The alias/anchor/scalar value or tag/tag directive handle + // (for yaml_ALIAS_TOKEN, yaml_ANCHOR_TOKEN, yaml_SCALAR_TOKEN, yaml_TAG_TOKEN, yaml_TAG_DIRECTIVE_TOKEN). + value []byte + + // The tag suffix (for yaml_TAG_TOKEN). + suffix []byte + + // The tag directive prefix (for yaml_TAG_DIRECTIVE_TOKEN). + prefix []byte + + // The scalar style (for yaml_SCALAR_TOKEN). + style yaml_scalar_style_t + + // The version directive major/minor (for yaml_VERSION_DIRECTIVE_TOKEN). + major, minor int8 +} + +// Events + +type yaml_event_type_t int8 + +// Event types. +const ( + // An empty event. + yaml_NO_EVENT yaml_event_type_t = iota + + yaml_STREAM_START_EVENT // A STREAM-START event. + yaml_STREAM_END_EVENT // A STREAM-END event. + yaml_DOCUMENT_START_EVENT // A DOCUMENT-START event. + yaml_DOCUMENT_END_EVENT // A DOCUMENT-END event. + yaml_ALIAS_EVENT // An ALIAS event. + yaml_SCALAR_EVENT // A SCALAR event. + yaml_SEQUENCE_START_EVENT // A SEQUENCE-START event. + yaml_SEQUENCE_END_EVENT // A SEQUENCE-END event. + yaml_MAPPING_START_EVENT // A MAPPING-START event. + yaml_MAPPING_END_EVENT // A MAPPING-END event. + yaml_TAIL_COMMENT_EVENT +) + +var eventStrings = []string{ + yaml_NO_EVENT: "none", + yaml_STREAM_START_EVENT: "stream start", + yaml_STREAM_END_EVENT: "stream end", + yaml_DOCUMENT_START_EVENT: "document start", + yaml_DOCUMENT_END_EVENT: "document end", + yaml_ALIAS_EVENT: "alias", + yaml_SCALAR_EVENT: "scalar", + yaml_SEQUENCE_START_EVENT: "sequence start", + yaml_SEQUENCE_END_EVENT: "sequence end", + yaml_MAPPING_START_EVENT: "mapping start", + yaml_MAPPING_END_EVENT: "mapping end", + yaml_TAIL_COMMENT_EVENT: "tail comment", +} + +func (e yaml_event_type_t) String() string { + if e < 0 || int(e) >= len(eventStrings) { + return fmt.Sprintf("unknown event %d", e) + } + return eventStrings[e] +} + +// The event structure. +type yaml_event_t struct { + + // The event type. + typ yaml_event_type_t + + // The start and end of the event. + start_mark, end_mark yaml_mark_t + + // The document encoding (for yaml_STREAM_START_EVENT). + encoding yaml_encoding_t + + // The version directive (for yaml_DOCUMENT_START_EVENT). + version_directive *yaml_version_directive_t + + // The list of tag directives (for yaml_DOCUMENT_START_EVENT). + tag_directives []yaml_tag_directive_t + + // The comments + head_comment []byte + line_comment []byte + foot_comment []byte + tail_comment []byte + + // The anchor (for yaml_SCALAR_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT, yaml_ALIAS_EVENT). + anchor []byte + + // The tag (for yaml_SCALAR_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT). + tag []byte + + // The scalar value (for yaml_SCALAR_EVENT). + value []byte + + // Is the document start/end indicator implicit, or the tag optional? + // (for yaml_DOCUMENT_START_EVENT, yaml_DOCUMENT_END_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT, yaml_SCALAR_EVENT). + implicit bool + + // Is the tag optional for any non-plain style? (for yaml_SCALAR_EVENT). + quoted_implicit bool + + // The style (for yaml_SCALAR_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT). + style yaml_style_t +} + +func (e *yaml_event_t) scalar_style() yaml_scalar_style_t { return yaml_scalar_style_t(e.style) } +func (e *yaml_event_t) sequence_style() yaml_sequence_style_t { return yaml_sequence_style_t(e.style) } +func (e *yaml_event_t) mapping_style() yaml_mapping_style_t { return yaml_mapping_style_t(e.style) } + +// Nodes + +const ( + yaml_NULL_TAG = "tag:yaml.org,2002:null" // The tag !!null with the only possible value: null. + yaml_BOOL_TAG = "tag:yaml.org,2002:bool" // The tag !!bool with the values: true and false. + yaml_STR_TAG = "tag:yaml.org,2002:str" // The tag !!str for string values. + yaml_INT_TAG = "tag:yaml.org,2002:int" // The tag !!int for integer values. + yaml_FLOAT_TAG = "tag:yaml.org,2002:float" // The tag !!float for float values. + yaml_TIMESTAMP_TAG = "tag:yaml.org,2002:timestamp" // The tag !!timestamp for date and time values. + + yaml_SEQ_TAG = "tag:yaml.org,2002:seq" // The tag !!seq is used to denote sequences. + yaml_MAP_TAG = "tag:yaml.org,2002:map" // The tag !!map is used to denote mapping. + + // Not in original libyaml. + yaml_BINARY_TAG = "tag:yaml.org,2002:binary" + yaml_MERGE_TAG = "tag:yaml.org,2002:merge" + + yaml_DEFAULT_SCALAR_TAG = yaml_STR_TAG // The default scalar tag is !!str. + yaml_DEFAULT_SEQUENCE_TAG = yaml_SEQ_TAG // The default sequence tag is !!seq. + yaml_DEFAULT_MAPPING_TAG = yaml_MAP_TAG // The default mapping tag is !!map. +) + +type yaml_node_type_t int + +// Node types. +const ( + // An empty node. + yaml_NO_NODE yaml_node_type_t = iota + + yaml_SCALAR_NODE // A scalar node. + yaml_SEQUENCE_NODE // A sequence node. + yaml_MAPPING_NODE // A mapping node. +) + +// An element of a sequence node. +type yaml_node_item_t int + +// An element of a mapping node. +type yaml_node_pair_t struct { + key int // The key of the element. + value int // The value of the element. +} + +// The node structure. +type yaml_node_t struct { + typ yaml_node_type_t // The node type. + tag []byte // The node tag. + + // The node data. + + // The scalar parameters (for yaml_SCALAR_NODE). + scalar struct { + value []byte // The scalar value. + length int // The length of the scalar value. + style yaml_scalar_style_t // The scalar style. + } + + // The sequence parameters (for YAML_SEQUENCE_NODE). + sequence struct { + items_data []yaml_node_item_t // The stack of sequence items. + style yaml_sequence_style_t // The sequence style. + } + + // The mapping parameters (for yaml_MAPPING_NODE). + mapping struct { + pairs_data []yaml_node_pair_t // The stack of mapping pairs (key, value). + pairs_start *yaml_node_pair_t // The beginning of the stack. + pairs_end *yaml_node_pair_t // The end of the stack. + pairs_top *yaml_node_pair_t // The top of the stack. + style yaml_mapping_style_t // The mapping style. + } + + start_mark yaml_mark_t // The beginning of the node. + end_mark yaml_mark_t // The end of the node. + +} + +// The document structure. +type yaml_document_t struct { + + // The document nodes. + nodes []yaml_node_t + + // The version directive. + version_directive *yaml_version_directive_t + + // The list of tag directives. + tag_directives_data []yaml_tag_directive_t + tag_directives_start int // The beginning of the tag directives list. + tag_directives_end int // The end of the tag directives list. + + start_implicit int // Is the document start indicator implicit? + end_implicit int // Is the document end indicator implicit? + + // The start/end of the document. + start_mark, end_mark yaml_mark_t +} + +// The prototype of a read handler. +// +// The read handler is called when the parser needs to read more bytes from the +// source. The handler should write not more than size bytes to the buffer. +// The number of written bytes should be set to the size_read variable. +// +// [in,out] data A pointer to an application data specified by +// yaml_parser_set_input(). +// [out] buffer The buffer to write the data from the source. +// [in] size The size of the buffer. +// [out] size_read The actual number of bytes read from the source. +// +// On success, the handler should return 1. If the handler failed, +// the returned value should be 0. On EOF, the handler should set the +// size_read to 0 and return 1. +type yaml_read_handler_t func(parser *yaml_parser_t, buffer []byte) (n int, err error) + +// This structure holds information about a potential simple key. +type yaml_simple_key_t struct { + possible bool // Is a simple key possible? + required bool // Is a simple key required? + token_number int // The number of the token. + mark yaml_mark_t // The position mark. +} + +// The states of the parser. +type yaml_parser_state_t int + +const ( + yaml_PARSE_STREAM_START_STATE yaml_parser_state_t = iota + + yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE // Expect the beginning of an implicit document. + yaml_PARSE_DOCUMENT_START_STATE // Expect DOCUMENT-START. + yaml_PARSE_DOCUMENT_CONTENT_STATE // Expect the content of a document. + yaml_PARSE_DOCUMENT_END_STATE // Expect DOCUMENT-END. + yaml_PARSE_BLOCK_NODE_STATE // Expect a block node. + yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE // Expect a block node or indentless sequence. + yaml_PARSE_FLOW_NODE_STATE // Expect a flow node. + yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE // Expect the first entry of a block sequence. + yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE // Expect an entry of a block sequence. + yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE // Expect an entry of an indentless sequence. + yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE // Expect the first key of a block mapping. + yaml_PARSE_BLOCK_MAPPING_KEY_STATE // Expect a block mapping key. + yaml_PARSE_BLOCK_MAPPING_VALUE_STATE // Expect a block mapping value. + yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE // Expect the first entry of a flow sequence. + yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE // Expect an entry of a flow sequence. + yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE // Expect a key of an ordered mapping. + yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE // Expect a value of an ordered mapping. + yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE // Expect the and of an ordered mapping entry. + yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE // Expect the first key of a flow mapping. + yaml_PARSE_FLOW_MAPPING_KEY_STATE // Expect a key of a flow mapping. + yaml_PARSE_FLOW_MAPPING_VALUE_STATE // Expect a value of a flow mapping. + yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE // Expect an empty value of a flow mapping. + yaml_PARSE_END_STATE // Expect nothing. +) + +func (ps yaml_parser_state_t) String() string { + switch ps { + case yaml_PARSE_STREAM_START_STATE: + return "yaml_PARSE_STREAM_START_STATE" + case yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE: + return "yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE" + case yaml_PARSE_DOCUMENT_START_STATE: + return "yaml_PARSE_DOCUMENT_START_STATE" + case yaml_PARSE_DOCUMENT_CONTENT_STATE: + return "yaml_PARSE_DOCUMENT_CONTENT_STATE" + case yaml_PARSE_DOCUMENT_END_STATE: + return "yaml_PARSE_DOCUMENT_END_STATE" + case yaml_PARSE_BLOCK_NODE_STATE: + return "yaml_PARSE_BLOCK_NODE_STATE" + case yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE: + return "yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE" + case yaml_PARSE_FLOW_NODE_STATE: + return "yaml_PARSE_FLOW_NODE_STATE" + case yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE: + return "yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE" + case yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE: + return "yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE" + case yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE: + return "yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE" + case yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE: + return "yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE" + case yaml_PARSE_BLOCK_MAPPING_KEY_STATE: + return "yaml_PARSE_BLOCK_MAPPING_KEY_STATE" + case yaml_PARSE_BLOCK_MAPPING_VALUE_STATE: + return "yaml_PARSE_BLOCK_MAPPING_VALUE_STATE" + case yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE: + return "yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE" + case yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE: + return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE" + case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE: + return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE" + case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE: + return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE" + case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE: + return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE" + case yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE: + return "yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE" + case yaml_PARSE_FLOW_MAPPING_KEY_STATE: + return "yaml_PARSE_FLOW_MAPPING_KEY_STATE" + case yaml_PARSE_FLOW_MAPPING_VALUE_STATE: + return "yaml_PARSE_FLOW_MAPPING_VALUE_STATE" + case yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE: + return "yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE" + case yaml_PARSE_END_STATE: + return "yaml_PARSE_END_STATE" + } + return "" +} + +// This structure holds aliases data. +type yaml_alias_data_t struct { + anchor []byte // The anchor. + index int // The node id. + mark yaml_mark_t // The anchor mark. +} + +// The parser structure. +// +// All members are internal. Manage the structure using the +// yaml_parser_ family of functions. +type yaml_parser_t struct { + + // Error handling + + error yaml_error_type_t // Error type. + + problem string // Error description. + + // The byte about which the problem occurred. + problem_offset int + problem_value int + problem_mark yaml_mark_t + + // The error context. + context string + context_mark yaml_mark_t + + // Reader stuff + + read_handler yaml_read_handler_t // Read handler. + + input_reader io.Reader // File input data. + input []byte // String input data. + input_pos int + + eof bool // EOF flag + + buffer []byte // The working buffer. + buffer_pos int // The current position of the buffer. + + unread int // The number of unread characters in the buffer. + + newlines int // The number of line breaks since last non-break/non-blank character + + raw_buffer []byte // The raw buffer. + raw_buffer_pos int // The current position of the buffer. + + encoding yaml_encoding_t // The input encoding. + + offset int // The offset of the current position (in bytes). + mark yaml_mark_t // The mark of the current position. + + // Comments + + head_comment []byte // The current head comments + line_comment []byte // The current line comments + foot_comment []byte // The current foot comments + tail_comment []byte // Foot comment that happens at the end of a block. + stem_comment []byte // Comment in item preceding a nested structure (list inside list item, etc) + + comments []yaml_comment_t // The folded comments for all parsed tokens + comments_head int + + // Scanner stuff + + stream_start_produced bool // Have we started to scan the input stream? + stream_end_produced bool // Have we reached the end of the input stream? + + flow_level int // The number of unclosed '[' and '{' indicators. + + tokens []yaml_token_t // The tokens queue. + tokens_head int // The head of the tokens queue. + tokens_parsed int // The number of tokens fetched from the queue. + token_available bool // Does the tokens queue contain a token ready for dequeueing. + + indent int // The current indentation level. + indents []int // The indentation levels stack. + + simple_key_allowed bool // May a simple key occur at the current position? + simple_keys []yaml_simple_key_t // The stack of simple keys. + simple_keys_by_tok map[int]int // possible simple_key indexes indexed by token_number + + // Parser stuff + + state yaml_parser_state_t // The current parser state. + states []yaml_parser_state_t // The parser states stack. + marks []yaml_mark_t // The stack of marks. + tag_directives []yaml_tag_directive_t // The list of TAG directives. + + // Dumper stuff + + aliases []yaml_alias_data_t // The alias data. + + document *yaml_document_t // The currently parsed document. +} + +type yaml_comment_t struct { + + scan_mark yaml_mark_t // Position where scanning for comments started + token_mark yaml_mark_t // Position after which tokens will be associated with this comment + start_mark yaml_mark_t // Position of '#' comment mark + end_mark yaml_mark_t // Position where comment terminated + + head []byte + line []byte + foot []byte +} + +// Emitter Definitions + +// The prototype of a write handler. +// +// The write handler is called when the emitter needs to flush the accumulated +// characters to the output. The handler should write @a size bytes of the +// @a buffer to the output. +// +// @param[in,out] data A pointer to an application data specified by +// yaml_emitter_set_output(). +// @param[in] buffer The buffer with bytes to be written. +// @param[in] size The size of the buffer. +// +// @returns On success, the handler should return @c 1. If the handler failed, +// the returned value should be @c 0. +// +type yaml_write_handler_t func(emitter *yaml_emitter_t, buffer []byte) error + +type yaml_emitter_state_t int + +// The emitter states. +const ( + // Expect STREAM-START. + yaml_EMIT_STREAM_START_STATE yaml_emitter_state_t = iota + + yaml_EMIT_FIRST_DOCUMENT_START_STATE // Expect the first DOCUMENT-START or STREAM-END. + yaml_EMIT_DOCUMENT_START_STATE // Expect DOCUMENT-START or STREAM-END. + yaml_EMIT_DOCUMENT_CONTENT_STATE // Expect the content of a document. + yaml_EMIT_DOCUMENT_END_STATE // Expect DOCUMENT-END. + yaml_EMIT_FLOW_SEQUENCE_FIRST_ITEM_STATE // Expect the first item of a flow sequence. + yaml_EMIT_FLOW_SEQUENCE_TRAIL_ITEM_STATE // Expect the next item of a flow sequence, with the comma already written out + yaml_EMIT_FLOW_SEQUENCE_ITEM_STATE // Expect an item of a flow sequence. + yaml_EMIT_FLOW_MAPPING_FIRST_KEY_STATE // Expect the first key of a flow mapping. + yaml_EMIT_FLOW_MAPPING_TRAIL_KEY_STATE // Expect the next key of a flow mapping, with the comma already written out + yaml_EMIT_FLOW_MAPPING_KEY_STATE // Expect a key of a flow mapping. + yaml_EMIT_FLOW_MAPPING_SIMPLE_VALUE_STATE // Expect a value for a simple key of a flow mapping. + yaml_EMIT_FLOW_MAPPING_VALUE_STATE // Expect a value of a flow mapping. + yaml_EMIT_BLOCK_SEQUENCE_FIRST_ITEM_STATE // Expect the first item of a block sequence. + yaml_EMIT_BLOCK_SEQUENCE_ITEM_STATE // Expect an item of a block sequence. + yaml_EMIT_BLOCK_MAPPING_FIRST_KEY_STATE // Expect the first key of a block mapping. + yaml_EMIT_BLOCK_MAPPING_KEY_STATE // Expect the key of a block mapping. + yaml_EMIT_BLOCK_MAPPING_SIMPLE_VALUE_STATE // Expect a value for a simple key of a block mapping. + yaml_EMIT_BLOCK_MAPPING_VALUE_STATE // Expect a value of a block mapping. + yaml_EMIT_END_STATE // Expect nothing. +) + +// The emitter structure. +// +// All members are internal. Manage the structure using the @c yaml_emitter_ +// family of functions. +type yaml_emitter_t struct { + + // Error handling + + error yaml_error_type_t // Error type. + problem string // Error description. + + // Writer stuff + + write_handler yaml_write_handler_t // Write handler. + + output_buffer *[]byte // String output data. + output_writer io.Writer // File output data. + + buffer []byte // The working buffer. + buffer_pos int // The current position of the buffer. + + raw_buffer []byte // The raw buffer. + raw_buffer_pos int // The current position of the buffer. + + encoding yaml_encoding_t // The stream encoding. + + // Emitter stuff + + canonical bool // If the output is in the canonical style? + best_indent int // The number of indentation spaces. + best_width int // The preferred width of the output lines. + unicode bool // Allow unescaped non-ASCII characters? + line_break yaml_break_t // The preferred line break. + + state yaml_emitter_state_t // The current emitter state. + states []yaml_emitter_state_t // The stack of states. + + events []yaml_event_t // The event queue. + events_head int // The head of the event queue. + + indents []int // The stack of indentation levels. + + tag_directives []yaml_tag_directive_t // The list of tag directives. + + indent int // The current indentation level. + + flow_level int // The current flow level. + + root_context bool // Is it the document root context? + sequence_context bool // Is it a sequence context? + mapping_context bool // Is it a mapping context? + simple_key_context bool // Is it a simple mapping key context? + + line int // The current line. + column int // The current column. + whitespace bool // If the last character was a whitespace? + indention bool // If the last character was an indentation character (' ', '-', '?', ':')? + open_ended bool // If an explicit document end is required? + + space_above bool // Is there's an empty line above? + foot_indent int // The indent used to write the foot comment above, or -1 if none. + + // Anchor analysis. + anchor_data struct { + anchor []byte // The anchor value. + alias bool // Is it an alias? + } + + // Tag analysis. + tag_data struct { + handle []byte // The tag handle. + suffix []byte // The tag suffix. + } + + // Scalar analysis. + scalar_data struct { + value []byte // The scalar value. + multiline bool // Does the scalar contain line breaks? + flow_plain_allowed bool // Can the scalar be expessed in the flow plain style? + block_plain_allowed bool // Can the scalar be expressed in the block plain style? + single_quoted_allowed bool // Can the scalar be expressed in the single quoted style? + block_allowed bool // Can the scalar be expressed in the literal or folded styles? + style yaml_scalar_style_t // The output style. + } + + // Comments + head_comment []byte + line_comment []byte + foot_comment []byte + tail_comment []byte + + key_line_comment []byte + + // Dumper stuff + + opened bool // If the stream was already opened? + closed bool // If the stream was already closed? + + // The information associated with the document nodes. + anchors *struct { + references int // The number of references. + anchor int // The anchor id. + serialized bool // If the node has been emitted? + } + + last_anchor_id int // The last assigned anchor id. + + document *yaml_document_t // The currently emitted document. +} diff --git a/vendor/gopkg.in/yaml.v3/yamlprivateh.go b/vendor/gopkg.in/yaml.v3/yamlprivateh.go new file mode 100644 index 0000000..e88f9c5 --- /dev/null +++ b/vendor/gopkg.in/yaml.v3/yamlprivateh.go @@ -0,0 +1,198 @@ +// +// Copyright (c) 2011-2019 Canonical Ltd +// Copyright (c) 2006-2010 Kirill Simonov +// +// Permission is hereby granted, free of charge, to any person obtaining a copy of +// this software and associated documentation files (the "Software"), to deal in +// the Software without restriction, including without limitation the rights to +// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +// of the Software, and to permit persons to whom the Software is furnished to do +// so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in all +// copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +// SOFTWARE. + +package yaml + +const ( + // The size of the input raw buffer. + input_raw_buffer_size = 512 + + // The size of the input buffer. + // It should be possible to decode the whole raw buffer. + input_buffer_size = input_raw_buffer_size * 3 + + // The size of the output buffer. + output_buffer_size = 128 + + // The size of the output raw buffer. + // It should be possible to encode the whole output buffer. + output_raw_buffer_size = (output_buffer_size*2 + 2) + + // The size of other stacks and queues. + initial_stack_size = 16 + initial_queue_size = 16 + initial_string_size = 16 +) + +// Check if the character at the specified position is an alphabetical +// character, a digit, '_', or '-'. +func is_alpha(b []byte, i int) bool { + return b[i] >= '0' && b[i] <= '9' || b[i] >= 'A' && b[i] <= 'Z' || b[i] >= 'a' && b[i] <= 'z' || b[i] == '_' || b[i] == '-' +} + +// Check if the character at the specified position is a digit. +func is_digit(b []byte, i int) bool { + return b[i] >= '0' && b[i] <= '9' +} + +// Get the value of a digit. +func as_digit(b []byte, i int) int { + return int(b[i]) - '0' +} + +// Check if the character at the specified position is a hex-digit. +func is_hex(b []byte, i int) bool { + return b[i] >= '0' && b[i] <= '9' || b[i] >= 'A' && b[i] <= 'F' || b[i] >= 'a' && b[i] <= 'f' +} + +// Get the value of a hex-digit. +func as_hex(b []byte, i int) int { + bi := b[i] + if bi >= 'A' && bi <= 'F' { + return int(bi) - 'A' + 10 + } + if bi >= 'a' && bi <= 'f' { + return int(bi) - 'a' + 10 + } + return int(bi) - '0' +} + +// Check if the character is ASCII. +func is_ascii(b []byte, i int) bool { + return b[i] <= 0x7F +} + +// Check if the character at the start of the buffer can be printed unescaped. +func is_printable(b []byte, i int) bool { + return ((b[i] == 0x0A) || // . == #x0A + (b[i] >= 0x20 && b[i] <= 0x7E) || // #x20 <= . <= #x7E + (b[i] == 0xC2 && b[i+1] >= 0xA0) || // #0xA0 <= . <= #xD7FF + (b[i] > 0xC2 && b[i] < 0xED) || + (b[i] == 0xED && b[i+1] < 0xA0) || + (b[i] == 0xEE) || + (b[i] == 0xEF && // #xE000 <= . <= #xFFFD + !(b[i+1] == 0xBB && b[i+2] == 0xBF) && // && . != #xFEFF + !(b[i+1] == 0xBF && (b[i+2] == 0xBE || b[i+2] == 0xBF)))) +} + +// Check if the character at the specified position is NUL. +func is_z(b []byte, i int) bool { + return b[i] == 0x00 +} + +// Check if the beginning of the buffer is a BOM. +func is_bom(b []byte, i int) bool { + return b[0] == 0xEF && b[1] == 0xBB && b[2] == 0xBF +} + +// Check if the character at the specified position is space. +func is_space(b []byte, i int) bool { + return b[i] == ' ' +} + +// Check if the character at the specified position is tab. +func is_tab(b []byte, i int) bool { + return b[i] == '\t' +} + +// Check if the character at the specified position is blank (space or tab). +func is_blank(b []byte, i int) bool { + //return is_space(b, i) || is_tab(b, i) + return b[i] == ' ' || b[i] == '\t' +} + +// Check if the character at the specified position is a line break. +func is_break(b []byte, i int) bool { + return (b[i] == '\r' || // CR (#xD) + b[i] == '\n' || // LF (#xA) + b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9) // PS (#x2029) +} + +func is_crlf(b []byte, i int) bool { + return b[i] == '\r' && b[i+1] == '\n' +} + +// Check if the character is a line break or NUL. +func is_breakz(b []byte, i int) bool { + //return is_break(b, i) || is_z(b, i) + return ( + // is_break: + b[i] == '\r' || // CR (#xD) + b[i] == '\n' || // LF (#xA) + b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9 || // PS (#x2029) + // is_z: + b[i] == 0) +} + +// Check if the character is a line break, space, or NUL. +func is_spacez(b []byte, i int) bool { + //return is_space(b, i) || is_breakz(b, i) + return ( + // is_space: + b[i] == ' ' || + // is_breakz: + b[i] == '\r' || // CR (#xD) + b[i] == '\n' || // LF (#xA) + b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9 || // PS (#x2029) + b[i] == 0) +} + +// Check if the character is a line break, space, tab, or NUL. +func is_blankz(b []byte, i int) bool { + //return is_blank(b, i) || is_breakz(b, i) + return ( + // is_blank: + b[i] == ' ' || b[i] == '\t' || + // is_breakz: + b[i] == '\r' || // CR (#xD) + b[i] == '\n' || // LF (#xA) + b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9 || // PS (#x2029) + b[i] == 0) +} + +// Determine the width of the character. +func width(b byte) int { + // Don't replace these by a switch without first + // confirming that it is being inlined. + if b&0x80 == 0x00 { + return 1 + } + if b&0xE0 == 0xC0 { + return 2 + } + if b&0xF0 == 0xE0 { + return 3 + } + if b&0xF8 == 0xF0 { + return 4 + } + return 0 + +} diff --git a/vendor/modules.txt b/vendor/modules.txt index 7f1105f..18ef89d 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -34,8 +34,6 @@ github.com/goccy/go-yaml/parser github.com/goccy/go-yaml/printer github.com/goccy/go-yaml/scanner github.com/goccy/go-yaml/token -# github.com/gofrs/flock v0.7.1 -github.com/gofrs/flock # github.com/gofrs/uuid v3.3.0+incompatible github.com/gofrs/uuid # github.com/gogo/protobuf v1.3.2 @@ -123,23 +121,24 @@ github.com/pgavlin/goldmark/util github.com/pkg/errors # github.com/pmezard/go-difflib v1.0.0 github.com/pmezard/go-difflib/difflib -# github.com/pulumi/kube2pulumi v0.0.10 +# github.com/pulumi/kube2pulumi v0.0.11 ## explicit github.com/pulumi/kube2pulumi/pkg/kube2pulumi github.com/pulumi/kube2pulumi/pkg/pcl2pulumi github.com/pulumi/kube2pulumi/pkg/yaml2pcl -# github.com/pulumi/pulumi/pkg/v3 v3.4.0 +# github.com/pulumi/pulumi/pkg/v3 v3.24.1 github.com/pulumi/pulumi/pkg/v3/codegen github.com/pulumi/pulumi/pkg/v3/codegen/dotnet github.com/pulumi/pulumi/pkg/v3/codegen/go -github.com/pulumi/pulumi/pkg/v3/codegen/hcl2 github.com/pulumi/pulumi/pkg/v3/codegen/hcl2/model github.com/pulumi/pulumi/pkg/v3/codegen/hcl2/model/format github.com/pulumi/pulumi/pkg/v3/codegen/hcl2/syntax +github.com/pulumi/pulumi/pkg/v3/codegen/internal/tstypes github.com/pulumi/pulumi/pkg/v3/codegen/nodejs +github.com/pulumi/pulumi/pkg/v3/codegen/pcl github.com/pulumi/pulumi/pkg/v3/codegen/python github.com/pulumi/pulumi/pkg/v3/codegen/schema -# github.com/pulumi/pulumi/sdk/v3 v3.4.0 +# github.com/pulumi/pulumi/sdk/v3 v3.24.1 github.com/pulumi/pulumi/sdk/v3/go/common/apitype github.com/pulumi/pulumi/sdk/v3/go/common/diag github.com/pulumi/pulumi/sdk/v3/go/common/diag/colors @@ -166,8 +165,17 @@ github.com/pulumi/pulumi/sdk/v3/go/common/workspace github.com/pulumi/pulumi/sdk/v3/nodejs/npm github.com/pulumi/pulumi/sdk/v3/proto/go github.com/pulumi/pulumi/sdk/v3/python +# github.com/rivo/uniseg v0.2.0 +github.com/rivo/uniseg +# github.com/rogpeppe/go-internal v1.8.1 +github.com/rogpeppe/go-internal/internal/syscall/windows +github.com/rogpeppe/go-internal/internal/syscall/windows/sysdll +github.com/rogpeppe/go-internal/lockedfile +github.com/rogpeppe/go-internal/lockedfile/internal/filelock # github.com/sabhiram/go-gitignore v0.0.0-20180611051255-d3107576ba94 github.com/sabhiram/go-gitignore +# github.com/santhosh-tekuri/jsonschema/v5 v5.0.0 +github.com/santhosh-tekuri/jsonschema/v5 # github.com/sergi/go-diff v1.1.0 github.com/sergi/go-diff/diffmatchpatch # github.com/spf13/afero v1.6.0 @@ -257,8 +265,7 @@ golang.org/x/net/internal/socks golang.org/x/net/internal/timeseries golang.org/x/net/proxy golang.org/x/net/trace -# golang.org/x/sys v0.0.0-20210616094352-59db8d763f22 -## explicit +# golang.org/x/sys v0.0.0-20210817190340-bfb29a6856f2 golang.org/x/sys/cpu golang.org/x/sys/internal/unsafeheader golang.org/x/sys/plan9 @@ -413,6 +420,8 @@ gopkg.in/src-d/go-git.v4/utils/merkletrie/noder gopkg.in/warnings.v0 # gopkg.in/yaml.v2 v2.4.0 gopkg.in/yaml.v2 +# gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b +gopkg.in/yaml.v3 # sourcegraph.com/sourcegraph/appdash v0.0.0-20190731080439-ebfcffb1b5c0 sourcegraph.com/sourcegraph/appdash sourcegraph.com/sourcegraph/appdash/internal/wire