From 1cff136aed8784a4cc24065b5a60e3d9cea2c242 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 19 Apr 2024 12:31:38 +0000 Subject: [PATCH] Bump github.com/hashicorp/nomad from 1.4.6 to 1.4.11 Bumps [github.com/hashicorp/nomad](https://github.com/hashicorp/nomad) from 1.4.6 to 1.4.11. - [Release notes](https://github.com/hashicorp/nomad/releases) - [Changelog](https://github.com/hashicorp/nomad/blob/main/CHANGELOG-unsupported.md) - [Commits](https://github.com/hashicorp/nomad/compare/v1.4.6...v1.4.11) --- updated-dependencies: - dependency-name: github.com/hashicorp/nomad dependency-type: direct:production ... Signed-off-by: dependabot[bot] --- go.mod | 53 +- go.sum | 611 +- .../pkg/security/grantvmgroupaccess.go | 160 - .../go-winio/pkg/security/syscall_windows.go | 7 - .../go-winio/pkg/security/zsyscall_windows.go | 70 - .../github.com/Microsoft/go-winio/vhd/vhd.go | 350 - .../Microsoft/go-winio/vhd/zvhd_windows.go | 106 - .../Microsoft/hcsshim/.gitattributes | 1 - .../github.com/Microsoft/hcsshim/.gitignore | 38 - .../Microsoft/hcsshim/.golangci.yml | 99 - .../github.com/Microsoft/hcsshim/CODEOWNERS | 1 - vendor/github.com/Microsoft/hcsshim/LICENSE | 21 - vendor/github.com/Microsoft/hcsshim/Makefile | 87 - .../Microsoft/hcsshim/Protobuild.toml | 49 - vendor/github.com/Microsoft/hcsshim/README.md | 120 - .../hcsshim/computestorage/attach.go | 38 - .../hcsshim/computestorage/destroy.go | 26 - .../hcsshim/computestorage/detach.go | 26 - .../hcsshim/computestorage/export.go | 46 - .../hcsshim/computestorage/format.go | 26 - .../hcsshim/computestorage/helpers.go | 193 - .../hcsshim/computestorage/import.go | 41 - .../hcsshim/computestorage/initialize.go | 38 - .../Microsoft/hcsshim/computestorage/mount.go | 27 - .../Microsoft/hcsshim/computestorage/setup.go | 74 - .../hcsshim/computestorage/storage.go | 50 - .../computestorage/zsyscall_windows.go | 319 - .../github.com/Microsoft/hcsshim/container.go | 223 - vendor/github.com/Microsoft/hcsshim/errors.go | 245 - .../Microsoft/hcsshim/functional_tests.ps1 | 12 - .../github.com/Microsoft/hcsshim/hcsshim.go | 28 - .../Microsoft/hcsshim/hnsendpoint.go | 118 - .../Microsoft/hcsshim/hnsglobals.go | 16 - .../Microsoft/hcsshim/hnsnetwork.go | 36 - .../github.com/Microsoft/hcsshim/hnspolicy.go | 60 - .../Microsoft/hcsshim/hnspolicylist.go | 47 - .../Microsoft/hcsshim/hnssupport.go | 13 - .../github.com/Microsoft/hcsshim/interface.go | 114 - .../Microsoft/hcsshim/internal/cow/cow.go | 97 - .../hcsshim/internal/hcs/callback.go | 161 - .../Microsoft/hcsshim/internal/hcs/errors.go | 343 - .../Microsoft/hcsshim/internal/hcs/process.go | 589 -- .../hcsshim/internal/hcs/schema1/schema1.go | 250 - .../internal/hcs/schema2/attachment.go | 36 - .../hcsshim/internal/hcs/schema2/battery.go | 13 - .../hcs/schema2/cache_query_stats_response.go | 18 - .../hcsshim/internal/hcs/schema2/chipset.go | 27 - .../internal/hcs/schema2/close_handle.go | 14 - .../hcsshim/internal/hcs/schema2/com_port.go | 17 - .../internal/hcs/schema2/compute_system.go | 26 - .../internal/hcs/schema2/configuration.go | 72 - .../internal/hcs/schema2/console_size.go | 16 - .../hcsshim/internal/hcs/schema2/container.go | 36 - ...r_credential_guard_add_instance_request.go | 16 - ...edential_guard_hv_socket_service_config.go | 15 - .../container_credential_guard_instance.go | 16 - ...ainer_credential_guard_modify_operation.go | 17 - ...iner_credential_guard_operation_request.go | 15 - ...redential_guard_remove_instance_request.go | 14 - .../container_credential_guard_state.go | 25 - .../container_credential_guard_system_info.go | 14 - .../schema2/container_memory_information.go | 25 - .../hcsshim/internal/hcs/schema2/cpu_group.go | 15 - .../hcs/schema2/cpu_group_affinity.go | 15 - .../internal/hcs/schema2/cpu_group_config.go | 18 - .../hcs/schema2/cpu_group_configurations.go | 15 - .../hcs/schema2/cpu_group_operations.go | 18 - .../hcs/schema2/cpu_group_property.go | 15 - .../hcs/schema2/create_group_operation.go | 17 - .../hcs/schema2/delete_group_operation.go | 15 - .../hcsshim/internal/hcs/schema2/device.go | 27 - .../hcsshim/internal/hcs/schema2/devices.go | 46 - .../hcs/schema2/enhanced_mode_video.go | 14 - .../hcs/schema2/flexible_io_device.go | 18 - .../internal/hcs/schema2/guest_connection.go | 19 - .../hcs/schema2/guest_connection_info.go | 21 - .../hcs/schema2/guest_crash_reporting.go | 14 - .../hcsshim/internal/hcs/schema2/guest_os.go | 14 - .../internal/hcs/schema2/guest_state.go | 22 - .../schema2/host_processor_modify_request.go | 16 - .../internal/hcs/schema2/hosted_system.go | 16 - .../hcsshim/internal/hcs/schema2/hv_socket.go | 16 - .../internal/hcs/schema2/hv_socket_2.go | 15 - .../internal/hcs/schema2/hv_socket_address.go | 17 - .../hcs/schema2/hv_socket_service_config.go | 28 - .../hcs/schema2/hv_socket_system_config.go | 22 - .../hcs/schema2/interrupt_moderation_mode.go | 42 - .../internal/hcs/schema2/iov_settings.go | 22 - .../hcsshim/internal/hcs/schema2/keyboard.go | 13 - .../hcsshim/internal/hcs/schema2/layer.go | 21 - .../hcs/schema2/linux_kernel_direct.go | 18 - .../internal/hcs/schema2/logical_processor.go | 18 - .../internal/hcs/schema2/mapped_directory.go | 20 - .../internal/hcs/schema2/mapped_pipe.go | 18 - .../hcsshim/internal/hcs/schema2/memory.go | 14 - .../hcsshim/internal/hcs/schema2/memory_2.go | 49 - .../hcs/schema2/memory_information_for_vm.go | 18 - .../internal/hcs/schema2/memory_stats.go | 19 - .../model_container_definition_device.go | 14 - .../hcs/schema2/model_device_category.go | 15 - .../hcs/schema2/model_device_extension.go | 15 - .../hcs/schema2/model_device_instance.go | 17 - .../hcs/schema2/model_device_namespace.go | 16 - .../hcs/schema2/model_interface_class.go | 16 - .../internal/hcs/schema2/model_namespace.go | 15 - .../hcs/schema2/model_object_directory.go | 18 - .../hcs/schema2/model_object_namespace.go | 16 - .../hcs/schema2/model_object_symlink.go | 18 - .../hcs/schema2/modification_request.go | 15 - .../hcs/schema2/modify_setting_request.go | 20 - .../hcsshim/internal/hcs/schema2/mouse.go | 13 - .../internal/hcs/schema2/network_adapter.go | 17 - .../internal/hcs/schema2/networking.go | 23 - .../hcs/schema2/pause_notification.go | 15 - .../internal/hcs/schema2/pause_options.go | 17 - .../hcsshim/internal/hcs/schema2/plan9.go | 14 - .../internal/hcs/schema2/plan9_share.go | 34 - .../internal/hcs/schema2/process_details.go | 33 - .../hcs/schema2/process_modify_request.go | 19 - .../hcs/schema2/process_parameters.go | 46 - .../internal/hcs/schema2/process_status.go | 21 - .../hcsshim/internal/hcs/schema2/processor.go | 18 - .../internal/hcs/schema2/processor_2.go | 23 - .../internal/hcs/schema2/processor_stats.go | 19 - .../hcs/schema2/processor_topology.go | 15 - .../internal/hcs/schema2/properties.go | 54 - .../internal/hcs/schema2/property_query.go | 15 - .../internal/hcs/schema2/property_type.go | 26 - .../hcs/schema2/rdp_connection_options.go | 16 - .../internal/hcs/schema2/registry_changes.go | 16 - .../internal/hcs/schema2/registry_key.go | 18 - .../internal/hcs/schema2/registry_value.go | 30 - .../internal/hcs/schema2/restore_state.go | 19 - .../internal/hcs/schema2/save_options.go | 19 - .../hcsshim/internal/hcs/schema2/scsi.go | 16 - .../hcs/schema2/service_properties.go | 18 - .../schema2/shared_memory_configuration.go | 14 - .../hcs/schema2/shared_memory_region.go | 22 - .../hcs/schema2/shared_memory_region_info.go | 16 - .../internal/hcs/schema2/silo_properties.go | 17 - .../internal/hcs/schema2/statistics.go | 29 - .../hcsshim/internal/hcs/schema2/storage.go | 21 - .../internal/hcs/schema2/storage_qo_s.go | 16 - .../internal/hcs/schema2/storage_stats.go | 21 - .../hcsshim/internal/hcs/schema2/topology.go | 16 - .../hcsshim/internal/hcs/schema2/uefi.go | 20 - .../internal/hcs/schema2/uefi_boot_entry.go | 22 - .../hcsshim/internal/hcs/schema2/version.go | 16 - .../internal/hcs/schema2/video_monitor.go | 18 - .../internal/hcs/schema2/virtual_machine.go | 32 - .../internal/hcs/schema2/virtual_node_info.go | 20 - .../hcs/schema2/virtual_p_mem_controller.go | 20 - .../hcs/schema2/virtual_p_mem_device.go | 18 - .../hcs/schema2/virtual_p_mem_mapping.go | 15 - .../hcs/schema2/virtual_pci_device.go | 16 - .../hcs/schema2/virtual_pci_function.go | 18 - .../internal/hcs/schema2/virtual_smb.go | 16 - .../internal/hcs/schema2/virtual_smb_share.go | 20 - .../hcs/schema2/virtual_smb_share_options.go | 62 - .../hcsshim/internal/hcs/schema2/vm_memory.go | 26 - .../hcs/schema2/vm_processor_limits.go | 22 - .../hcs/schema2/windows_crash_reporting.go | 16 - .../Microsoft/hcsshim/internal/hcs/service.go | 49 - .../Microsoft/hcsshim/internal/hcs/system.go | 815 --- .../Microsoft/hcsshim/internal/hcs/utils.go | 62 - .../hcsshim/internal/hcs/waithelper.go | 68 - .../hcsshim/internal/hcserror/hcserror.go | 47 - .../Microsoft/hcsshim/internal/hns/hns.go | 23 - .../hcsshim/internal/hns/hnsendpoint.go | 338 - .../hcsshim/internal/hns/hnsfuncs.go | 49 - .../hcsshim/internal/hns/hnsglobals.go | 28 - .../hcsshim/internal/hns/hnsnetwork.go | 141 - .../hcsshim/internal/hns/hnspolicy.go | 110 - .../hcsshim/internal/hns/hnspolicylist.go | 201 - .../hcsshim/internal/hns/hnssupport.go | 49 - .../hcsshim/internal/hns/namespace.go | 111 - .../hcsshim/internal/hns/zsyscall_windows.go | 76 - .../hcsshim/internal/interop/interop.go | 23 - .../internal/interop/zsyscall_windows.go | 48 - .../hcsshim/internal/jobobject/iocp.go | 111 - .../hcsshim/internal/jobobject/jobobject.go | 538 -- .../hcsshim/internal/jobobject/limits.go | 315 - .../Microsoft/hcsshim/internal/log/g.go | 23 - .../hcsshim/internal/logfields/fields.go | 32 - .../hcsshim/internal/longpath/longpath.go | 24 - .../hcsshim/internal/mergemaps/merge.go | 52 - .../Microsoft/hcsshim/internal/oc/exporter.go | 43 - .../Microsoft/hcsshim/internal/oc/span.go | 17 - .../Microsoft/hcsshim/internal/queue/mq.go | 92 - .../hcsshim/internal/safefile/safeopen.go | 375 - .../hcsshim/internal/timeout/timeout.go | 74 - .../hcsshim/internal/vmcompute/vmcompute.go | 610 -- .../internal/vmcompute/zsyscall_windows.go | 581 -- .../hcsshim/internal/wclayer/activatelayer.go | 27 - .../hcsshim/internal/wclayer/baselayer.go | 182 - .../hcsshim/internal/wclayer/createlayer.go | 27 - .../internal/wclayer/createscratchlayer.go | 34 - .../internal/wclayer/deactivatelayer.go | 24 - .../hcsshim/internal/wclayer/destroylayer.go | 25 - .../internal/wclayer/expandscratchsize.go | 140 - .../hcsshim/internal/wclayer/exportlayer.go | 94 - .../internal/wclayer/getlayermountpath.go | 50 - .../internal/wclayer/getsharedbaseimages.go | 29 - .../hcsshim/internal/wclayer/grantvmaccess.go | 26 - .../hcsshim/internal/wclayer/importlayer.go | 166 - .../hcsshim/internal/wclayer/layerexists.go | 28 - .../hcsshim/internal/wclayer/layerid.go | 22 - .../hcsshim/internal/wclayer/layerutils.go | 97 - .../hcsshim/internal/wclayer/legacy.go | 811 --- .../hcsshim/internal/wclayer/nametoguid.go | 29 - .../hcsshim/internal/wclayer/preparelayer.go | 44 - .../hcsshim/internal/wclayer/processimage.go | 41 - .../internal/wclayer/unpreparelayer.go | 25 - .../hcsshim/internal/wclayer/wclayer.go | 35 - .../internal/wclayer/zsyscall_windows.go | 569 -- .../hcsshim/internal/winapi/console.go | 44 - .../hcsshim/internal/winapi/devices.go | 13 - .../hcsshim/internal/winapi/errors.go | 15 - .../hcsshim/internal/winapi/filesystem.go | 110 - .../hcsshim/internal/winapi/jobobject.go | 218 - .../hcsshim/internal/winapi/logon.go | 30 - .../hcsshim/internal/winapi/memory.go | 4 - .../Microsoft/hcsshim/internal/winapi/net.go | 3 - .../Microsoft/hcsshim/internal/winapi/path.go | 11 - .../hcsshim/internal/winapi/process.go | 65 - .../hcsshim/internal/winapi/processor.go | 7 - .../hcsshim/internal/winapi/system.go | 53 - .../hcsshim/internal/winapi/thread.go | 12 - .../hcsshim/internal/winapi/utils.go | 80 - .../hcsshim/internal/winapi/winapi.go | 5 - .../internal/winapi/zsyscall_windows.go | 354 - vendor/github.com/Microsoft/hcsshim/layer.go | 107 - .../hcsshim/osversion/osversion_windows.go | 50 - .../osversion/platform_compat_windows.go | 35 - .../hcsshim/osversion/windowsbuilds.go | 58 - .../github.com/Microsoft/hcsshim/process.go | 98 - .../Microsoft/hcsshim/zsyscall_windows.go | 54 - .../containerd/cgroups/stats/v1/doc.go | 17 - .../containerd/cgroups/stats/v1/metrics.pb.go | 6125 ----------------- .../cgroups/stats/v1/metrics.pb.txt | 790 --- .../containerd/cgroups/stats/v1/metrics.proto | 158 - .../containerd/containerd/sys/epoll.go | 34 - .../containerd/containerd/sys/fds.go | 35 - .../containerd/containerd/sys/filesys_unix.go | 32 - .../containerd/sys/filesys_windows.go | 345 - .../containerd/containerd/sys/oom_linux.go | 82 - .../containerd/sys/oom_unsupported.go | 49 - .../containerd/containerd/sys/socket_unix.go | 81 - .../containerd/sys/socket_windows.go | 30 - .../containerd/sys/userns_deprecated.go | 23 - vendor/github.com/docker/docker/AUTHORS | 283 +- .../docker/api/types/container/config.go | 27 + .../api/types/container/container_create.go | 20 - .../api/types/container/container_wait.go | 28 - .../api/types/container/create_response.go | 19 + .../docker/api/types/container/deprecated.go | 16 + .../docker/api/types/container/host_config.go | 92 +- .../api/types/container/wait_exit_error.go | 12 + .../api/types/container/wait_response.go | 18 + .../docker/docker/api/types/filters/parse.go | 9 +- .../docker/docker/api/types/mount/mount.go | 19 +- .../docker/docker/api/types/swarm/common.go | 10 +- .../docker/docker/api/types/swarm/node.go | 24 + .../docker/docker/api/types/swarm/swarm.go | 10 + .../docker/docker/api/types/swarm/task.go | 19 + .../docker/api/types/versions/compare.go | 3 + .../libnetwork/resolvconf/resolvconf.go | 106 +- .../docker/libnetwork/resolvconf/utils.go | 16 + .../docker/docker/pkg/archive/archive.go | 312 +- .../docker/pkg/archive/archive_linux.go | 2 +- .../docker/docker/pkg/archive/archive_unix.go | 33 +- .../docker/docker/pkg/archive/changes.go | 6 +- .../docker/docker/pkg/archive/copy.go | 8 +- .../docker/docker/pkg/archive/diff.go | 33 +- .../docker/docker/pkg/archive/diff_unix.go | 22 + .../docker/docker/pkg/archive/diff_windows.go | 6 + .../docker/docker/pkg/fileutils/deprecated.go | 44 + .../docker/docker/pkg/fileutils/fileutils.go | 224 - .../docker/pkg/homedir/homedir_linux.go | 9 + .../docker/pkg/homedir/homedir_others.go | 5 + .../docker/docker/pkg/idtools/idtools.go | 54 +- .../docker/docker/pkg/idtools/idtools_unix.go | 45 +- .../docker/pkg/idtools/idtools_windows.go | 9 + .../docker/pkg/idtools/usergroupadd_linux.go | 1 - .../docker/docker/pkg/ioutils/bytespipe.go | 30 +- .../docker/docker/pkg/ioutils/readers.go | 16 +- .../docker/docker/pkg/system/exitcode.go | 19 - .../docker/docker/pkg/system/filesys.go | 19 + .../docker/pkg/system/filesys_deprecated.go | 35 + .../docker/docker/pkg/system/filesys_unix.go | 52 +- .../docker/pkg/system/filesys_windows.go | 174 - .../docker/docker/pkg/system/image_os.go | 10 + .../docker/docker/pkg/system/init_windows.go | 21 +- .../docker/docker/pkg/system/lcow.go | 49 - .../docker/pkg/system/lcow_unsupported.go | 29 - .../docker/docker/pkg/system/meminfo_linux.go | 6 +- .../docker/docker/pkg/system/path.go | 33 +- .../docker/docker/pkg/system/path_unix.go | 6 + .../docker/docker/pkg/system/path_windows.go | 23 +- .../docker/docker/pkg/system/process_unix.go | 1 + .../github.com/docker/docker/pkg/system/rm.go | 79 - .../docker/docker/pkg/system/rm_windows.go | 6 - .../docker/docker/pkg/system/stat_windows.go | 4 +- .../docker/docker/pkg/system/syscall_unix.go | 12 - .../docker/pkg/system/syscall_windows.go | 136 - .../docker/docker/pkg/system/umask.go | 14 - .../docker/docker/pkg/system/umask_windows.go | 7 - .../docker/libnetwork/resolvconf/README.md | 1 - .../libnetwork/resolvconf/dns/resolvconf.go | 26 - .../docker/libnetwork/types/types.go | 649 -- vendor/github.com/fatih/color/README.md | 14 +- vendor/github.com/fatih/color/color.go | 46 +- .../github.com/fatih/color/color_windows.go | 19 + vendor/github.com/fatih/color/doc.go | 137 +- .../gogo/protobuf/gogoproto/Makefile | 37 - .../github.com/gogo/protobuf/gogoproto/doc.go | 169 - .../gogo/protobuf/gogoproto/gogo.pb.go | 874 --- .../gogo/protobuf/gogoproto/gogo.pb.golden | 45 - .../gogo/protobuf/gogoproto/gogo.proto | 144 - .../gogo/protobuf/gogoproto/helper.go | 415 -- .../protoc-gen-gogo/descriptor/Makefile | 36 - .../protoc-gen-gogo/descriptor/descriptor.go | 118 - .../descriptor/descriptor.pb.go | 2865 -------- .../descriptor/descriptor_gostring.gen.go | 752 -- .../protoc-gen-gogo/descriptor/helper.go | 390 -- .../gojuno/minimock/v3/.goreleaser.yml | 34 - .../github.com/gojuno/minimock/v3/.travis.yml | 21 - vendor/github.com/gojuno/minimock/v3/LICENSE | 21 - vendor/github.com/gojuno/minimock/v3/Makefile | 26 - .../github.com/gojuno/minimock/v3/README.md | 198 - .../gojuno/minimock/v3/camel_to_snake.go | 62 - vendor/github.com/gojuno/minimock/v3/doc.go | 264 - vendor/github.com/gojuno/minimock/v3/equal.go | 63 - vendor/github.com/gojuno/minimock/v3/logo.svg | 94 - .../gojuno/minimock/v3/mock_controller.go | 77 - .../gojuno/minimock/v3/safe_tester.go | 52 - .../github.com/gojuno/minimock/v3/template.go | 325 - vendor/github.com/golang/groupcache/LICENSE | 191 - .../github.com/golang/groupcache/lru/lru.go | 133 - vendor/github.com/golang/snappy/.gitignore | 16 - vendor/github.com/golang/snappy/AUTHORS | 18 - vendor/github.com/golang/snappy/CONTRIBUTORS | 41 - vendor/github.com/golang/snappy/README | 107 - .../github.com/golang/snappy/decode_amd64.s | 490 -- .../github.com/golang/snappy/decode_arm64.s | 494 -- vendor/github.com/golang/snappy/decode_asm.go | 15 - .../github.com/golang/snappy/encode_amd64.s | 730 -- .../github.com/golang/snappy/encode_arm64.s | 722 -- vendor/github.com/golang/snappy/encode_asm.go | 30 - .../github.com/gorilla/websocket/.gitignore | 25 - vendor/github.com/gorilla/websocket/AUTHORS | 9 - vendor/github.com/gorilla/websocket/LICENSE | 22 - vendor/github.com/gorilla/websocket/README.md | 39 - vendor/github.com/gorilla/websocket/client.go | 422 -- .../gorilla/websocket/compression.go | 148 - vendor/github.com/gorilla/websocket/conn.go | 1230 ---- vendor/github.com/gorilla/websocket/doc.go | 227 - vendor/github.com/gorilla/websocket/join.go | 42 - vendor/github.com/gorilla/websocket/json.go | 60 - vendor/github.com/gorilla/websocket/mask.go | 55 - .../github.com/gorilla/websocket/mask_safe.go | 16 - .../github.com/gorilla/websocket/prepared.go | 102 - vendor/github.com/gorilla/websocket/proxy.go | 77 - vendor/github.com/gorilla/websocket/server.go | 365 - .../gorilla/websocket/tls_handshake.go | 21 - .../gorilla/websocket/tls_handshake_116.go | 21 - vendor/github.com/gorilla/websocket/util.go | 283 - .../gorilla/websocket/x_net_proxy.go | 473 -- .../go-grpc-middleware/retry/doc.go | 2 +- .../go-grpc-middleware/retry/retry.go | 14 +- .../util/metautils/nicemd.go | 6 +- .../hashicorp/consul-template/config/auth.go | 141 - .../consul-template/config/config.go | 714 -- .../consul-template/config/consul.go | 205 - .../consul-template/config/convert.go | 210 - .../hashicorp/consul-template/config/dedup.go | 150 - .../config/default_delimiters.go | 53 - .../hashicorp/consul-template/config/env.go | 270 - .../hashicorp/consul-template/config/exec.go | 220 - .../consul-template/config/logfile.go | 127 - .../consul-template/config/mapstructure.go | 78 - .../hashicorp/consul-template/config/nomad.go | 193 - .../hashicorp/consul-template/config/retry.go | 170 - .../hashicorp/consul-template/config/ssl.go | 153 - .../consul-template/config/syslog.go | 106 - .../consul-template/config/template.go | 556 -- .../consul-template/config/transport.go | 198 - .../hashicorp/consul-template/config/vault.go | 435 -- .../hashicorp/consul-template/config/wait.go | 191 - .../dependency/catalog_datacenters.go | 112 - .../dependency/catalog_node.go | 181 - .../dependency/catalog_nodes.go | 150 - .../dependency/catalog_service.go | 154 - .../dependency/catalog_services.go | 124 - .../consul-template/dependency/client_set.go | 556 -- .../consul-template/dependency/connect_ca.go | 69 - .../dependency/connect_leaf.go | 74 - .../consul-template/dependency/dependency.go | 231 - .../consul-template/dependency/errors.go | 13 - .../consul-template/dependency/file.go | 129 - .../dependency/health_service.go | 274 - .../consul-template/dependency/kv_get.go | 115 - .../consul-template/dependency/kv_keys.go | 104 - .../consul-template/dependency/kv_list.go | 133 - .../dependency/nomad_service.go | 182 - .../dependency/nomad_services.go | 134 - .../dependency/nomad_var_get.go | 133 - .../dependency/nomad_var_list.go | 136 - .../dependency/nomad_var_structs.go | 153 - .../consul-template/dependency/set.go | 72 - .../dependency/vault_agent_token.go | 117 - .../dependency/vault_common.go | 372 - .../consul-template/dependency/vault_list.go | 153 - .../consul-template/dependency/vault_pki.go | 234 - .../consul-template/dependency/vault_read.go | 199 - .../consul-template/dependency/vault_token.go | 79 - .../consul-template/dependency/vault_write.go | 176 - .../consul-template/version/version.go | 16 - .../cgroups => hashicorp/cronexpr}/LICENSE | 1 + .../hashicorp/go-hclog/intlogger.go | 6 +- .../github.com/hashicorp/go-hclog/logger.go | 3 + .../hashicorp/go-hclog/nulllogger.go | 2 + .../hashicorp/go-plugin/CHANGELOG.md | 10 + .../github.com/hashicorp/go-plugin/README.md | 5 +- .../hashicorp/go-plugin/grpc_client.go | 2 +- .../internal/plugin/grpc_broker.pb.go | 2 +- .../internal/plugin/grpc_controller.pb.go | 2 +- .../internal/plugin/grpc_stdio.pb.go | 2 +- .../hashicorp/go-plugin/notes_unix.go | 3 + .../hashicorp/go-plugin/notes_windows.go | 3 + .../hashicorp/go-secure-stdlib/mlock/mlock.go | 15 - .../go-secure-stdlib/mlock/mlock_unavail.go | 13 - .../go-secure-stdlib/mlock/mlock_unix.go | 18 - .../github.com/hashicorp/go-uuid/.travis.yml | 12 - vendor/github.com/hashicorp/go-uuid/LICENSE | 365 - vendor/github.com/hashicorp/go-uuid/README.md | 8 - vendor/github.com/hashicorp/go-uuid/uuid.go | 83 - vendor/github.com/hashicorp/nomad/acl/acl.go | 4 + .../github.com/hashicorp/nomad/acl/policy.go | 64 +- vendor/github.com/hashicorp/nomad/api/LICENSE | 363 - vendor/github.com/hashicorp/nomad/api/acl.go | 477 -- .../github.com/hashicorp/nomad/api/agent.go | 571 -- .../hashicorp/nomad/api/allocations.go | 563 -- .../hashicorp/nomad/api/allocations_exec.go | 246 - vendor/github.com/hashicorp/nomad/api/api.go | 1143 --- .../hashicorp/nomad/api/constraint.go | 30 - .../github.com/hashicorp/nomad/api/consul.go | 607 -- .../hashicorp/nomad/api/contexts/contexts.go | 34 - vendor/github.com/hashicorp/nomad/api/csi.go | 597 -- .../hashicorp/nomad/api/deployments.go | 297 - .../hashicorp/nomad/api/evaluations.go | 150 - .../hashicorp/nomad/api/event_stream.go | 204 - vendor/github.com/hashicorp/nomad/api/fs.go | 404 -- .../github.com/hashicorp/nomad/api/ioutil.go | 81 - vendor/github.com/hashicorp/nomad/api/jobs.go | 1392 ---- .../github.com/hashicorp/nomad/api/keyring.go | 88 - .../hashicorp/nomad/api/namespace.go | 98 - .../github.com/hashicorp/nomad/api/nodes.go | 959 --- .../hashicorp/nomad/api/operator.go | 361 - .../hashicorp/nomad/api/operator_autopilot.go | 222 - .../hashicorp/nomad/api/operator_metrics.go | 87 - .../github.com/hashicorp/nomad/api/quota.go | 191 - vendor/github.com/hashicorp/nomad/api/raw.go | 38 - .../hashicorp/nomad/api/recommendations.go | 123 - .../github.com/hashicorp/nomad/api/regions.go | 24 - .../hashicorp/nomad/api/resources.go | 277 - .../github.com/hashicorp/nomad/api/scaling.go | 119 - .../github.com/hashicorp/nomad/api/search.go | 97 - .../hashicorp/nomad/api/sentinel.go | 81 - .../hashicorp/nomad/api/services.go | 317 - .../github.com/hashicorp/nomad/api/status.go | 43 - .../github.com/hashicorp/nomad/api/system.go | 23 - .../github.com/hashicorp/nomad/api/tasks.go | 1109 --- .../github.com/hashicorp/nomad/api/utils.go | 32 - .../hashicorp/nomad/api/variables.go | 472 -- .../taskrunner/interfaces/events.go | 7 - .../taskrunner/interfaces/handle.go | 11 - .../taskrunner/interfaces/lifecycle.go | 27 - .../allocrunner/taskrunner/state/state.go | 111 - .../hashicorp/nomad/client/config/artifact.go | 86 - .../hashicorp/nomad/client/config/config.go | 906 --- .../hashicorp/nomad/client/config/testing.go | 72 - .../nomad/client/devicemanager/state/state.go | 11 - .../nomad/client/dynamicplugins/registry.go | 60 + .../nomad/client/lib/cgutil/cgutil_linux.go | 24 + .../client/lib/cgutil/cpuset_manager_v1.go | 7 +- .../client/lib/cgutil/cpuset_manager_v2.go | 3 +- .../pluginmanager/csimanager/instance.go | 8 +- .../pluginmanager/csimanager/interface.go | 16 + .../pluginmanager/csimanager/manager.go | 17 +- .../client/pluginmanager/csimanager/volume.go | 18 +- .../drivermanager/state/state.go | 11 - .../client/serviceregistration/address.go | 174 - .../serviceregistration/checks/client.go | 219 - .../serviceregistration/checks/result.go | 108 - .../nomad/client/serviceregistration/id.go | 27 - .../service_registration.go | 156 - .../client/serviceregistration/watcher.go | 326 - .../client/serviceregistration/workload.go | 83 - .../hashicorp/nomad/client/state/08types.go | 131 - .../hashicorp/nomad/client/state/12types.go | 9 - .../hashicorp/nomad/client/state/db_bolt.go | 869 --- .../hashicorp/nomad/client/state/db_error.go | 114 - .../hashicorp/nomad/client/state/db_mem.go | 280 - .../hashicorp/nomad/client/state/db_noop.go | 109 - .../hashicorp/nomad/client/state/interface.go | 136 - .../hashicorp/nomad/client/state/upgrade.go | 349 - .../nomad/client/structs/allochook.go | 21 +- .../hashicorp/nomad/client/taskenv/env.go | 9 +- .../drivers/shared/executor/executor_linux.go | 16 +- .../hashicorp/nomad/helper/boltdd/boltdd.go | 457 -- .../helper/bufconndialer/bufconndialer.go | 48 - .../hashicorp/nomad/helper/funcs.go | 11 + .../helper/pluginutils/loader/api_versions.go | 16 - .../helper/pluginutils/loader/filter_unix.go | 11 - .../pluginutils/loader/filter_windows.go | 16 - .../nomad/helper/pluginutils/loader/init.go | 502 -- .../helper/pluginutils/loader/instance.go | 63 - .../nomad/helper/pluginutils/loader/loader.go | 287 - .../helper/pluginutils/loader/testing.go | 87 - .../nomad/helper/pluginutils/loader/util.go | 26 - .../hashicorp/nomad/nomad/mock/alloc.go | 30 + .../hashicorp/nomad/nomad/mock/job.go | 52 + .../hashicorp/nomad/nomad/structs/diff.go | 6 +- .../hashicorp/nomad/nomad/structs/funcs.go | 2 +- .../hashicorp/nomad/nomad/structs/network.go | 6 +- .../hashicorp/nomad/nomad/structs/structs.go | 26 + .../nomad/nomad/structs/variables.go | 5 +- .../hashicorp/nomad/nomad/structs/volumes.go | 8 + .../nomad/plugins/drivers/proto/driver.pb.go | 525 +- .../nomad/plugins/drivers/proto/driver.proto | 12 + .../plugins/drivers/testutils/dns_testing.go | 27 +- .../plugins/drivers/testutils/testing.go | 13 +- .../hashicorp/nomad/plugins/drivers/utils.go | 10 +- .../nomad/testutil/container_images.go | 43 + .../hashicorp/nomad/testutil/wait.go | 24 +- .../hashicorp/nomad/version/version.go | 92 - vendor/github.com/hashicorp/vault/api/auth.go | 5 +- .../vault/api/auth/kubernetes/LICENSE | 363 - .../vault/api/auth/kubernetes/kubernetes.go | 133 - .../hashicorp/vault/api/auth_token.go | 3 + .../github.com/hashicorp/vault/api/client.go | 68 +- vendor/github.com/hashicorp/vault/api/help.go | 3 + vendor/github.com/hashicorp/vault/api/kv.go | 3 + .../github.com/hashicorp/vault/api/kv_v1.go | 3 + .../github.com/hashicorp/vault/api/kv_v2.go | 3 + .../hashicorp/vault/api/lifetime_watcher.go | 53 +- .../github.com/hashicorp/vault/api/logical.go | 71 +- .../hashicorp/vault/api/output_policy.go | 35 +- .../hashicorp/vault/api/output_string.go | 3 + .../hashicorp/vault/api/plugin_helpers.go | 12 +- .../helper/consts => api}/plugin_types.go | 9 +- .../github.com/hashicorp/vault/api/request.go | 7 +- .../hashicorp/vault/api/response.go | 17 +- .../github.com/hashicorp/vault/api/secret.go | 75 +- vendor/github.com/hashicorp/vault/api/ssh.go | 3 + .../hashicorp/vault/api/ssh_agent.go | 33 +- vendor/github.com/hashicorp/vault/api/sys.go | 3 + .../hashicorp/vault/api/sys_audit.go | 3 + .../hashicorp/vault/api/sys_auth.go | 3 + .../hashicorp/vault/api/sys_capabilities.go | 3 + .../hashicorp/vault/api/sys_config_cors.go | 3 + .../hashicorp/vault/api/sys_generate_root.go | 3 + .../hashicorp/vault/api/sys_hastatus.go | 3 + .../hashicorp/vault/api/sys_health.go | 3 + .../hashicorp/vault/api/sys_init.go | 3 + .../hashicorp/vault/api/sys_leader.go | 3 + .../hashicorp/vault/api/sys_leases.go | 3 + .../github.com/hashicorp/vault/api/sys_mfa.go | 3 + .../hashicorp/vault/api/sys_monitor.go | 7 +- .../hashicorp/vault/api/sys_mounts.go | 67 +- .../hashicorp/vault/api/sys_plugins.go | 28 +- .../hashicorp/vault/api/sys_policy.go | 3 + .../hashicorp/vault/api/sys_raft.go | 3 + .../hashicorp/vault/api/sys_rekey.go | 3 + .../hashicorp/vault/api/sys_rotate.go | 3 + .../hashicorp/vault/api/sys_seal.go | 36 +- .../hashicorp/vault/api/sys_stepdown.go | 3 + vendor/github.com/hashicorp/vault/sdk/LICENSE | 365 - .../vault/sdk/helper/certutil/helpers.go | 1386 ---- .../vault/sdk/helper/certutil/types.go | 1015 --- .../vault/sdk/helper/compressutil/compress.go | 222 - .../vault/sdk/helper/consts/agent.go | 12 - .../vault/sdk/helper/consts/consts.go | 37 - .../sdk/helper/consts/deprecation_status.go | 31 - .../vault/sdk/helper/consts/error.go | 25 - .../vault/sdk/helper/consts/replication.go | 159 - .../vault/sdk/helper/consts/token_consts.go | 10 - .../vault/sdk/helper/cryptoutil/cryptoutil.go | 11 - .../vault/sdk/helper/errutil/error.go | 20 - .../hashicorp/vault/sdk/helper/hclutil/hcl.go | 36 - .../vault/sdk/helper/jsonutil/json.go | 100 - .../vault/sdk/helper/license/feature.go | 10 - .../vault/sdk/helper/locksutil/locks.go | 58 - .../vault/sdk/helper/logging/logging.go | 81 - .../sdk/helper/pathmanager/pathmanager.go | 136 - .../vault/sdk/helper/pluginutil/env.go | 77 - .../sdk/helper/pluginutil/multiplexing.go | 80 - .../sdk/helper/pluginutil/multiplexing.pb.go | 213 - .../sdk/helper/pluginutil/multiplexing.proto | 13 - .../helper/pluginutil/multiplexing_grpc.pb.go | 101 - .../vault/sdk/helper/pluginutil/run_config.go | 179 - .../vault/sdk/helper/pluginutil/runner.go | 115 - .../vault/sdk/helper/pluginutil/tls.go | 106 - .../vault/sdk/helper/strutil/strutil.go | 94 - .../vault/sdk/helper/wrapping/wrapinfo.go | 37 - .../hashicorp/vault/sdk/logical/audit.go | 19 - .../hashicorp/vault/sdk/logical/auth.go | 129 - .../hashicorp/vault/sdk/logical/connection.go | 18 - .../vault/sdk/logical/controlgroup.go | 17 - .../hashicorp/vault/sdk/logical/error.go | 122 - .../vault/sdk/logical/identity.pb.go | 709 -- .../vault/sdk/logical/identity.proto | 91 - .../hashicorp/vault/sdk/logical/lease.go | 53 - .../hashicorp/vault/sdk/logical/logical.go | 156 - .../vault/sdk/logical/logical_storage.go | 52 - .../vault/sdk/logical/managed_key.go | 97 - .../hashicorp/vault/sdk/logical/plugin.pb.go | 171 - .../hashicorp/vault/sdk/logical/plugin.proto | 16 - .../hashicorp/vault/sdk/logical/request.go | 394 -- .../hashicorp/vault/sdk/logical/response.go | 322 - .../vault/sdk/logical/response_util.go | 204 - .../hashicorp/vault/sdk/logical/secret.go | 30 - .../hashicorp/vault/sdk/logical/storage.go | 158 - .../vault/sdk/logical/storage_inmem.go | 87 - .../vault/sdk/logical/storage_view.go | 110 - .../vault/sdk/logical/system_view.go | 235 - .../hashicorp/vault/sdk/logical/testing.go | 87 - .../hashicorp/vault/sdk/logical/token.go | 304 - .../vault/sdk/logical/translate_response.go | 161 - .../hashicorp/vault/sdk/logical/version.pb.go | 204 - .../hashicorp/vault/sdk/logical/version.proto | 17 - .../vault/sdk/logical/version_grpc.pb.go | 103 - .../hashicorp/vault/sdk/physical/cache.go | 260 - .../hashicorp/vault/sdk/physical/encoding.go | 108 - .../hashicorp/vault/sdk/physical/entry.go | 20 - .../hashicorp/vault/sdk/physical/error.go | 110 - .../vault/sdk/physical/inmem/inmem.go | 310 - .../vault/sdk/physical/inmem/inmem_ha.go | 167 - .../hashicorp/vault/sdk/physical/latency.go | 113 - .../hashicorp/vault/sdk/physical/physical.go | 134 - .../vault/sdk/physical/physical_access.go | 40 - .../vault/sdk/physical/physical_view.go | 94 - .../hashicorp/vault/sdk/physical/testing.go | 497 -- .../vault/sdk/physical/transactions.go | 150 - .../hashicorp/vault/sdk/version/cgo.go | 7 - .../hashicorp/vault/sdk/version/version.go | 80 - .../vault/sdk/version/version_base.go | 17 - vendor/github.com/hashicorp/yamux/session.go | 108 +- vendor/github.com/hashicorp/yamux/stream.go | 27 +- .../github.com/ishidawataru/sctp/.gitignore | 16 - .../github.com/ishidawataru/sctp/.travis.yml | 18 - .../github.com/ishidawataru/sctp/GO_LICENSE | 27 - vendor/github.com/ishidawataru/sctp/NOTICE | 3 - vendor/github.com/ishidawataru/sctp/README.md | 18 - .../ishidawataru/sctp/ipsock_linux.go | 222 - vendor/github.com/ishidawataru/sctp/sctp.go | 729 -- .../ishidawataru/sctp/sctp_linux.go | 305 - .../ishidawataru/sctp/sctp_unsupported.go | 98 - .../klauspost/compress/.gitattributes | 2 + .../v3 => klauspost/compress}/.gitignore | 11 +- .../klauspost/compress/.goreleaser.yml | 141 + .../libnetwork => klauspost/compress}/LICENSE | 108 +- .../github.com/klauspost/compress/README.md | 578 ++ .../klauspost/compress/compressible.go | 85 + .../klauspost/compress/fse/README.md | 79 + .../klauspost/compress/fse/bitreader.go | 122 + .../klauspost/compress/fse/bitwriter.go | 168 + .../klauspost/compress/fse/bytereader.go | 47 + .../klauspost/compress/fse/compress.go | 683 ++ .../klauspost/compress/fse/decompress.go | 374 + .../github.com/klauspost/compress/fse/fse.go | 144 + vendor/github.com/klauspost/compress/gen.sh | 4 + .../klauspost/compress/huff0/.gitignore | 1 + .../klauspost/compress/huff0/README.md | 89 + .../klauspost/compress/huff0/bitreader.go | 233 + .../klauspost/compress/huff0/bitwriter.go | 95 + .../klauspost/compress/huff0/bytereader.go | 44 + .../klauspost/compress/huff0/compress.go | 730 ++ .../klauspost/compress/huff0/decompress.go | 1167 ++++ .../compress/huff0/decompress_amd64.go | 226 + .../compress/huff0/decompress_amd64.s | 846 +++ .../compress/huff0/decompress_generic.go | 299 + .../klauspost/compress/huff0/huff0.go | 337 + .../compress/internal/cpuinfo/cpuinfo.go | 34 + .../internal/cpuinfo/cpuinfo_amd64.go | 11 + .../compress/internal/cpuinfo/cpuinfo_amd64.s | 36 + .../compress/internal/snapref}/LICENSE | 0 .../compress/internal/snapref}/decode.go | 2 +- .../internal/snapref}/decode_other.go | 4 +- .../compress/internal/snapref}/encode.go | 2 +- .../internal/snapref}/encode_other.go | 10 +- .../compress/internal/snapref}/snappy.go | 4 +- vendor/github.com/klauspost/compress/s2sx.mod | 4 + vendor/github.com/klauspost/compress/s2sx.sum | 0 .../klauspost/compress/zstd/README.md | 441 ++ .../klauspost/compress/zstd/bitreader.go | 140 + .../klauspost/compress/zstd/bitwriter.go | 113 + .../klauspost/compress/zstd/blockdec.go | 720 ++ .../klauspost/compress/zstd/blockenc.go | 871 +++ .../compress/zstd/blocktype_string.go | 85 + .../klauspost/compress/zstd/bytebuf.go | 131 + .../klauspost/compress/zstd/bytereader.go | 82 + .../klauspost/compress/zstd/decodeheader.go | 230 + .../klauspost/compress/zstd/decoder.go | 950 +++ .../compress/zstd/decoder_options.go | 149 + .../klauspost/compress/zstd/dict.go | 122 + .../klauspost/compress/zstd/enc_base.go | 188 + .../klauspost/compress/zstd/enc_best.go | 559 ++ .../klauspost/compress/zstd/enc_better.go | 1246 ++++ .../klauspost/compress/zstd/enc_dfast.go | 1127 +++ .../klauspost/compress/zstd/enc_fast.go | 900 +++ .../klauspost/compress/zstd/encoder.go | 641 ++ .../compress/zstd/encoder_options.go | 317 + .../klauspost/compress/zstd/framedec.go | 436 ++ .../klauspost/compress/zstd/frameenc.go | 137 + .../klauspost/compress/zstd/fse_decoder.go | 307 + .../compress/zstd/fse_decoder_amd64.go | 65 + .../compress/zstd/fse_decoder_amd64.s | 126 + .../compress/zstd/fse_decoder_generic.go | 72 + .../klauspost/compress/zstd/fse_encoder.go | 701 ++ .../klauspost/compress/zstd/fse_predefined.go | 158 + .../klauspost/compress/zstd/hash.go | 35 + .../klauspost/compress/zstd/history.go | 116 + .../compress/zstd/internal/xxhash/LICENSE.txt | 22 + .../compress/zstd/internal/xxhash/README.md | 58 + .../compress/zstd/internal/xxhash/xxhash.go | 237 + .../zstd/internal/xxhash/xxhash_amd64.s | 216 + .../zstd/internal/xxhash/xxhash_arm64.s | 186 + .../zstd/internal/xxhash/xxhash_asm.go | 16 + .../zstd/internal/xxhash/xxhash_other.go | 77 + .../zstd/internal/xxhash/xxhash_safe.go | 11 + .../klauspost/compress/zstd/seqdec.go | 509 ++ .../klauspost/compress/zstd/seqdec_amd64.go | 379 + .../klauspost/compress/zstd/seqdec_amd64.s | 4099 +++++++++++ .../klauspost/compress/zstd/seqdec_generic.go | 237 + .../klauspost/compress/zstd/seqenc.go | 114 + .../klauspost/compress/zstd/snappy.go | 435 ++ .../github.com/klauspost/compress/zstd/zip.go | 141 + .../klauspost/compress/zstd/zstd.go | 152 + .../github.com/mattn/go-isatty/isatty_bsd.go | 4 +- .../sctp => moby/patternmatcher}/LICENSE | 18 +- vendor/github.com/moby/patternmatcher/NOTICE | 16 + .../moby/patternmatcher/patternmatcher.go | 474 ++ .../moby/sys/sequential}/LICENSE | 2 +- vendor/github.com/moby/sys/sequential/doc.go | 15 + .../moby/sys/sequential/sequential_unix.go | 45 + .../moby/sys/sequential/sequential_windows.go | 161 + vendor/github.com/pierrec/lz4/.gitignore | 34 - vendor/github.com/pierrec/lz4/.travis.yml | 24 - vendor/github.com/pierrec/lz4/LICENSE | 28 - vendor/github.com/pierrec/lz4/README.md | 90 - vendor/github.com/pierrec/lz4/block.go | 413 -- vendor/github.com/pierrec/lz4/debug.go | 23 - vendor/github.com/pierrec/lz4/debug_stub.go | 7 - vendor/github.com/pierrec/lz4/decode_amd64.go | 8 - vendor/github.com/pierrec/lz4/decode_amd64.s | 375 - vendor/github.com/pierrec/lz4/decode_other.go | 98 - vendor/github.com/pierrec/lz4/errors.go | 30 - .../pierrec/lz4/internal/xxh32/xxh32zero.go | 223 - vendor/github.com/pierrec/lz4/lz4.go | 116 - vendor/github.com/pierrec/lz4/lz4_go1.10.go | 29 - .../github.com/pierrec/lz4/lz4_notgo1.10.go | 29 - vendor/github.com/pierrec/lz4/reader.go | 335 - .../github.com/pierrec/lz4/reader_legacy.go | 207 - vendor/github.com/pierrec/lz4/writer.go | 422 -- .../github.com/pierrec/lz4/writer_legacy.go | 182 - .../shirou/gopsutil/v3/cpu/cpu_aix_nocgo.go | 22 +- .../shirou/gopsutil/v3/cpu/cpu_darwin.go | 15 +- .../shirou/gopsutil/v3/cpu/cpu_linux.go | 12 +- .../shirou/gopsutil/v3/disk/disk.go | 2 + .../shirou/gopsutil/v3/disk/disk_aix_nocgo.go | 14 +- .../shirou/gopsutil/v3/disk/disk_freebsd.go | 3 +- .../shirou/gopsutil/v3/disk/disk_linux.go | 5 +- .../shirou/gopsutil/v3/disk/disk_windows.go | 17 +- .../shirou/gopsutil/v3/host/host.go | 2 + .../shirou/gopsutil/v3/host/host_linux.go | 13 +- .../gopsutil/v3/host/host_linux_loong64.go | 48 + .../gopsutil/v3/internal/common/binary.go | 1 + .../gopsutil/v3/internal/common/common.go | 1 + .../v3/internal/common/common_windows.go | 7 +- .../shirou/gopsutil/v3/mem/mem_aix_nocgo.go | 2 +- .../shirou/gopsutil/v3/mem/mem_freebsd.go | 3 +- .../shirou/gopsutil/v3/mem/mem_linux.go | 3 +- .../shirou/gopsutil/v3/mem/mem_openbsd.go | 1 + .../shirou/gopsutil/v3/net/net_aix_cgo.go | 12 +- .../shirou/gopsutil/v3/net/net_windows.go | 3 +- .../gopsutil/v3/process/process_freebsd.go | 8 +- .../gopsutil/v3/process/process_linux.go | 5 +- .../gopsutil/v3/process/process_posix.go | 5 +- .../v3/process/process_windows_32bit.go | 3 +- .../shoenig/go-m1cpu/.golangci.yaml | 12 + .../mlock => shoenig/go-m1cpu}/LICENSE | 0 vendor/github.com/shoenig/go-m1cpu/Makefile | 12 + vendor/github.com/shoenig/go-m1cpu/README.md | 66 + vendor/github.com/shoenig/go-m1cpu/cpu.go | 213 + .../shoenig/go-m1cpu/incompatible.go | 53 + .../shoenig/test/interfaces/interfaces.go | 24 + .../test/internal/assertions/assertions.go | 82 +- .../shoenig/test/internal/util/slices.go | 10 + vendor/github.com/shoenig/test/must/must.go | 79 +- vendor/go.etcd.io/bbolt/.gitignore | 10 - vendor/go.etcd.io/bbolt/LICENSE | 20 - vendor/go.etcd.io/bbolt/Makefile | 63 - vendor/go.etcd.io/bbolt/README.md | 967 --- vendor/go.etcd.io/bbolt/bolt_386.go | 7 - vendor/go.etcd.io/bbolt/bolt_amd64.go | 7 - vendor/go.etcd.io/bbolt/bolt_arm.go | 7 - vendor/go.etcd.io/bbolt/bolt_arm64.go | 10 - vendor/go.etcd.io/bbolt/bolt_linux.go | 10 - vendor/go.etcd.io/bbolt/bolt_loong64.go | 10 - vendor/go.etcd.io/bbolt/bolt_mips64x.go | 10 - vendor/go.etcd.io/bbolt/bolt_mipsx.go | 10 - vendor/go.etcd.io/bbolt/bolt_openbsd.go | 27 - vendor/go.etcd.io/bbolt/bolt_ppc.go | 10 - vendor/go.etcd.io/bbolt/bolt_ppc64.go | 10 - vendor/go.etcd.io/bbolt/bolt_ppc64le.go | 10 - vendor/go.etcd.io/bbolt/bolt_riscv64.go | 10 - vendor/go.etcd.io/bbolt/bolt_s390x.go | 10 - vendor/go.etcd.io/bbolt/bolt_unix.go | 87 - vendor/go.etcd.io/bbolt/bolt_unix_aix.go | 91 - vendor/go.etcd.io/bbolt/bolt_unix_solaris.go | 88 - vendor/go.etcd.io/bbolt/bolt_windows.go | 117 - vendor/go.etcd.io/bbolt/boltsync_unix.go | 9 - vendor/go.etcd.io/bbolt/bucket.go | 791 --- vendor/go.etcd.io/bbolt/compact.go | 119 - vendor/go.etcd.io/bbolt/cursor.go | 420 -- vendor/go.etcd.io/bbolt/db.go | 1361 ---- vendor/go.etcd.io/bbolt/doc.go | 40 - vendor/go.etcd.io/bbolt/errors.go | 78 - vendor/go.etcd.io/bbolt/freelist.go | 405 -- vendor/go.etcd.io/bbolt/freelist_hmap.go | 178 - vendor/go.etcd.io/bbolt/mlock_unix.go | 37 - vendor/go.etcd.io/bbolt/mlock_windows.go | 11 - vendor/go.etcd.io/bbolt/node.go | 610 -- vendor/go.etcd.io/bbolt/page.go | 214 - vendor/go.etcd.io/bbolt/tx.go | 797 --- vendor/go.etcd.io/bbolt/tx_check.go | 226 - vendor/go.etcd.io/bbolt/unsafe.go | 39 - vendor/go.opencensus.io/.gitignore | 9 - vendor/go.opencensus.io/AUTHORS | 1 - vendor/go.opencensus.io/CONTRIBUTING.md | 63 - vendor/go.opencensus.io/Makefile | 97 - vendor/go.opencensus.io/README.md | 267 - vendor/go.opencensus.io/appveyor.yml | 24 - vendor/go.opencensus.io/internal/internal.go | 37 - vendor/go.opencensus.io/internal/sanitize.go | 50 - .../internal/traceinternals.go | 53 - vendor/go.opencensus.io/opencensus.go | 21 - vendor/go.opencensus.io/trace/basetypes.go | 129 - vendor/go.opencensus.io/trace/config.go | 86 - vendor/go.opencensus.io/trace/doc.go | 52 - vendor/go.opencensus.io/trace/evictedqueue.go | 38 - vendor/go.opencensus.io/trace/export.go | 97 - .../trace/internal/internal.go | 22 - vendor/go.opencensus.io/trace/lrumap.go | 61 - vendor/go.opencensus.io/trace/sampling.go | 75 - vendor/go.opencensus.io/trace/spanbucket.go | 130 - vendor/go.opencensus.io/trace/spanstore.go | 308 - vendor/go.opencensus.io/trace/status_codes.go | 37 - vendor/go.opencensus.io/trace/trace.go | 595 -- vendor/go.opencensus.io/trace/trace_api.go | 265 - vendor/go.opencensus.io/trace/trace_go11.go | 33 - .../go.opencensus.io/trace/trace_nongo11.go | 26 - .../trace/tracestate/tracestate.go | 147 - vendor/go.uber.org/atomic/.codecov.yml | 19 - vendor/go.uber.org/atomic/.gitignore | 15 - vendor/go.uber.org/atomic/CHANGELOG.md | 100 - vendor/go.uber.org/atomic/LICENSE.txt | 19 - vendor/go.uber.org/atomic/Makefile | 79 - vendor/go.uber.org/atomic/README.md | 63 - vendor/go.uber.org/atomic/bool.go | 81 - vendor/go.uber.org/atomic/bool_ext.go | 53 - vendor/go.uber.org/atomic/doc.go | 23 - vendor/go.uber.org/atomic/duration.go | 82 - vendor/go.uber.org/atomic/duration_ext.go | 40 - vendor/go.uber.org/atomic/error.go | 51 - vendor/go.uber.org/atomic/error_ext.go | 39 - vendor/go.uber.org/atomic/float64.go | 77 - vendor/go.uber.org/atomic/float64_ext.go | 69 - vendor/go.uber.org/atomic/gen.go | 27 - vendor/go.uber.org/atomic/int32.go | 102 - vendor/go.uber.org/atomic/int64.go | 102 - vendor/go.uber.org/atomic/nocmp.go | 35 - vendor/go.uber.org/atomic/string.go | 54 - vendor/go.uber.org/atomic/string_ext.go | 45 - vendor/go.uber.org/atomic/time.go | 55 - vendor/go.uber.org/atomic/time_ext.go | 36 - vendor/go.uber.org/atomic/uint32.go | 102 - vendor/go.uber.org/atomic/uint64.go | 102 - vendor/go.uber.org/atomic/uintptr.go | 102 - vendor/go.uber.org/atomic/unsafe_pointer.go | 58 - vendor/go.uber.org/atomic/value.go | 31 - vendor/golang.org/x/crypto/cryptobyte/asn1.go | 825 --- .../x/crypto/cryptobyte/asn1/asn1.go | 46 - .../golang.org/x/crypto/cryptobyte/builder.go | 350 - .../golang.org/x/crypto/cryptobyte/string.go | 183 - vendor/golang.org/x/exp/slices/slices.go | 45 +- vendor/golang.org/x/exp/slices/sort.go | 2 +- .../grpc/test/bufconn/bufconn.go | 318 - vendor/modules.txt | 181 +- vendor/oss.indeed.com/go/libtime/.gitignore | 4 - vendor/oss.indeed.com/go/libtime/.travis.yml | 13 - .../go/libtime/CODE_OF_CONDUCT.md | 76 - vendor/oss.indeed.com/go/libtime/LICENSE | 11 - vendor/oss.indeed.com/go/libtime/OSSMETADATA | 1 - vendor/oss.indeed.com/go/libtime/README.md | 69 - vendor/oss.indeed.com/go/libtime/after.go | 28 - vendor/oss.indeed.com/go/libtime/clock.go | 48 - vendor/oss.indeed.com/go/libtime/repo.cfg | 1 - vendor/oss.indeed.com/go/libtime/sleep.go | 28 - .../oss.indeed.com/go/libtime/timeformat.go | 11 - vendor/oss.indeed.com/go/libtime/timer.go | 36 - vendor/oss.indeed.com/go/libtime/tools.go | 7 - vendor/oss.indeed.com/go/libtime/unixtime.go | 28 - 915 files changed, 28863 insertions(+), 96812 deletions(-) delete mode 100644 vendor/github.com/Microsoft/go-winio/pkg/security/grantvmgroupaccess.go delete mode 100644 vendor/github.com/Microsoft/go-winio/pkg/security/syscall_windows.go delete mode 100644 vendor/github.com/Microsoft/go-winio/pkg/security/zsyscall_windows.go delete mode 100644 vendor/github.com/Microsoft/go-winio/vhd/vhd.go delete mode 100644 vendor/github.com/Microsoft/go-winio/vhd/zvhd_windows.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/.gitattributes delete mode 100644 vendor/github.com/Microsoft/hcsshim/.gitignore delete mode 100644 vendor/github.com/Microsoft/hcsshim/.golangci.yml delete mode 100644 vendor/github.com/Microsoft/hcsshim/CODEOWNERS delete mode 100644 vendor/github.com/Microsoft/hcsshim/LICENSE delete mode 100644 vendor/github.com/Microsoft/hcsshim/Makefile delete mode 100644 vendor/github.com/Microsoft/hcsshim/Protobuild.toml delete mode 100644 vendor/github.com/Microsoft/hcsshim/README.md delete mode 100644 vendor/github.com/Microsoft/hcsshim/computestorage/attach.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/computestorage/destroy.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/computestorage/detach.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/computestorage/export.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/computestorage/format.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/computestorage/helpers.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/computestorage/import.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/computestorage/initialize.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/computestorage/mount.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/computestorage/setup.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/computestorage/storage.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/computestorage/zsyscall_windows.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/container.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/errors.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/functional_tests.ps1 delete mode 100644 vendor/github.com/Microsoft/hcsshim/hcsshim.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/hnsendpoint.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/hnsglobals.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/hnsnetwork.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/hnspolicy.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/hnspolicylist.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/hnssupport.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/interface.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/internal/cow/cow.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/callback.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/errors.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/process.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema1/schema1.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/attachment.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/battery.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/cache_query_stats_response.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/chipset.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/close_handle.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/com_port.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/compute_system.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/configuration.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/console_size.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/container.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/container_credential_guard_add_instance_request.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/container_credential_guard_hv_socket_service_config.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/container_credential_guard_instance.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/container_credential_guard_modify_operation.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/container_credential_guard_operation_request.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/container_credential_guard_remove_instance_request.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/container_credential_guard_state.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/container_credential_guard_system_info.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/container_memory_information.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/cpu_group.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/cpu_group_affinity.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/cpu_group_config.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/cpu_group_configurations.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/cpu_group_operations.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/cpu_group_property.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/create_group_operation.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/delete_group_operation.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/device.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/devices.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/enhanced_mode_video.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/flexible_io_device.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/guest_connection.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/guest_connection_info.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/guest_crash_reporting.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/guest_os.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/guest_state.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/host_processor_modify_request.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/hosted_system.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/hv_socket.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/hv_socket_2.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/hv_socket_address.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/hv_socket_service_config.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/hv_socket_system_config.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/interrupt_moderation_mode.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/iov_settings.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/keyboard.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/layer.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/linux_kernel_direct.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/logical_processor.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/mapped_directory.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/mapped_pipe.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/memory.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/memory_2.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/memory_information_for_vm.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/memory_stats.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/model_container_definition_device.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/model_device_category.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/model_device_extension.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/model_device_instance.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/model_device_namespace.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/model_interface_class.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/model_namespace.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/model_object_directory.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/model_object_namespace.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/model_object_symlink.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/modification_request.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/modify_setting_request.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/mouse.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/network_adapter.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/networking.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/pause_notification.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/pause_options.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/plan9.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/plan9_share.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/process_details.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/process_modify_request.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/process_parameters.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/process_status.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/processor.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/processor_2.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/processor_stats.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/processor_topology.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/properties.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/property_query.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/property_type.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/rdp_connection_options.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/registry_changes.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/registry_key.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/registry_value.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/restore_state.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/save_options.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/scsi.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/service_properties.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/shared_memory_configuration.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/shared_memory_region.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/shared_memory_region_info.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/silo_properties.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/statistics.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/storage.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/storage_qo_s.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/storage_stats.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/topology.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/uefi.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/uefi_boot_entry.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/version.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/video_monitor.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/virtual_machine.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/virtual_node_info.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/virtual_p_mem_controller.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/virtual_p_mem_device.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/virtual_p_mem_mapping.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/virtual_pci_device.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/virtual_pci_function.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/virtual_smb.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/virtual_smb_share.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/virtual_smb_share_options.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/vm_memory.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/vm_processor_limits.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/windows_crash_reporting.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/service.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/system.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/utils.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/waithelper.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcserror/hcserror.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hns/hns.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hns/hnsendpoint.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hns/hnsfuncs.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hns/hnsglobals.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hns/hnsnetwork.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hns/hnspolicy.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hns/hnspolicylist.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hns/hnssupport.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hns/namespace.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hns/zsyscall_windows.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/internal/interop/interop.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/internal/interop/zsyscall_windows.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/internal/jobobject/iocp.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/internal/jobobject/jobobject.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/internal/jobobject/limits.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/internal/log/g.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/internal/logfields/fields.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/internal/longpath/longpath.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/internal/mergemaps/merge.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/internal/oc/exporter.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/internal/oc/span.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/internal/queue/mq.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/internal/safefile/safeopen.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/internal/timeout/timeout.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/internal/vmcompute/vmcompute.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/internal/vmcompute/zsyscall_windows.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/internal/wclayer/activatelayer.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/internal/wclayer/baselayer.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/internal/wclayer/createlayer.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/internal/wclayer/createscratchlayer.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/internal/wclayer/deactivatelayer.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/internal/wclayer/destroylayer.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/internal/wclayer/expandscratchsize.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/internal/wclayer/exportlayer.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/internal/wclayer/getlayermountpath.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/internal/wclayer/getsharedbaseimages.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/internal/wclayer/grantvmaccess.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/internal/wclayer/importlayer.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/internal/wclayer/layerexists.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/internal/wclayer/layerid.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/internal/wclayer/layerutils.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/internal/wclayer/legacy.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/internal/wclayer/nametoguid.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/internal/wclayer/preparelayer.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/internal/wclayer/processimage.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/internal/wclayer/unpreparelayer.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/internal/wclayer/wclayer.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/internal/wclayer/zsyscall_windows.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/internal/winapi/console.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/internal/winapi/devices.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/internal/winapi/errors.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/internal/winapi/filesystem.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/internal/winapi/jobobject.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/internal/winapi/logon.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/internal/winapi/memory.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/internal/winapi/net.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/internal/winapi/path.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/internal/winapi/process.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/internal/winapi/processor.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/internal/winapi/system.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/internal/winapi/thread.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/internal/winapi/utils.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/internal/winapi/winapi.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/internal/winapi/zsyscall_windows.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/layer.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/osversion/osversion_windows.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/osversion/platform_compat_windows.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/osversion/windowsbuilds.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/process.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/zsyscall_windows.go delete mode 100644 vendor/github.com/containerd/cgroups/stats/v1/doc.go delete mode 100644 vendor/github.com/containerd/cgroups/stats/v1/metrics.pb.go delete mode 100644 vendor/github.com/containerd/cgroups/stats/v1/metrics.pb.txt delete mode 100644 vendor/github.com/containerd/cgroups/stats/v1/metrics.proto delete mode 100644 vendor/github.com/containerd/containerd/sys/epoll.go delete mode 100644 vendor/github.com/containerd/containerd/sys/fds.go delete mode 100644 vendor/github.com/containerd/containerd/sys/filesys_unix.go delete mode 100644 vendor/github.com/containerd/containerd/sys/filesys_windows.go delete mode 100644 vendor/github.com/containerd/containerd/sys/oom_linux.go delete mode 100644 vendor/github.com/containerd/containerd/sys/oom_unsupported.go delete mode 100644 vendor/github.com/containerd/containerd/sys/socket_unix.go delete mode 100644 vendor/github.com/containerd/containerd/sys/socket_windows.go delete mode 100644 vendor/github.com/containerd/containerd/sys/userns_deprecated.go delete mode 100644 vendor/github.com/docker/docker/api/types/container/container_create.go delete mode 100644 vendor/github.com/docker/docker/api/types/container/container_wait.go create mode 100644 vendor/github.com/docker/docker/api/types/container/create_response.go create mode 100644 vendor/github.com/docker/docker/api/types/container/deprecated.go create mode 100644 vendor/github.com/docker/docker/api/types/container/wait_exit_error.go create mode 100644 vendor/github.com/docker/docker/api/types/container/wait_response.go rename vendor/github.com/docker/{ => docker}/libnetwork/resolvconf/resolvconf.go (76%) create mode 100644 vendor/github.com/docker/docker/libnetwork/resolvconf/utils.go create mode 100644 vendor/github.com/docker/docker/pkg/archive/diff_unix.go create mode 100644 vendor/github.com/docker/docker/pkg/archive/diff_windows.go create mode 100644 vendor/github.com/docker/docker/pkg/fileutils/deprecated.go delete mode 100644 vendor/github.com/docker/docker/pkg/system/exitcode.go create mode 100644 vendor/github.com/docker/docker/pkg/system/filesys.go create mode 100644 vendor/github.com/docker/docker/pkg/system/filesys_deprecated.go create mode 100644 vendor/github.com/docker/docker/pkg/system/image_os.go delete mode 100644 vendor/github.com/docker/docker/pkg/system/lcow.go delete mode 100644 vendor/github.com/docker/docker/pkg/system/lcow_unsupported.go delete mode 100644 vendor/github.com/docker/docker/pkg/system/rm.go delete mode 100644 vendor/github.com/docker/docker/pkg/system/rm_windows.go delete mode 100644 vendor/github.com/docker/docker/pkg/system/syscall_unix.go delete mode 100644 vendor/github.com/docker/docker/pkg/system/syscall_windows.go delete mode 100644 vendor/github.com/docker/docker/pkg/system/umask.go delete mode 100644 vendor/github.com/docker/docker/pkg/system/umask_windows.go delete mode 100644 vendor/github.com/docker/libnetwork/resolvconf/README.md delete mode 100644 vendor/github.com/docker/libnetwork/resolvconf/dns/resolvconf.go delete mode 100644 vendor/github.com/docker/libnetwork/types/types.go create mode 100644 vendor/github.com/fatih/color/color_windows.go delete mode 100644 vendor/github.com/gogo/protobuf/gogoproto/Makefile delete mode 100644 vendor/github.com/gogo/protobuf/gogoproto/doc.go delete mode 100644 vendor/github.com/gogo/protobuf/gogoproto/gogo.pb.go delete mode 100644 vendor/github.com/gogo/protobuf/gogoproto/gogo.pb.golden delete mode 100644 vendor/github.com/gogo/protobuf/gogoproto/gogo.proto delete mode 100644 vendor/github.com/gogo/protobuf/gogoproto/helper.go delete mode 100644 vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/Makefile delete mode 100644 vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/descriptor.go delete mode 100644 vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/descriptor.pb.go delete mode 100644 vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/descriptor_gostring.gen.go delete mode 100644 vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/helper.go delete mode 100644 vendor/github.com/gojuno/minimock/v3/.goreleaser.yml delete mode 100644 vendor/github.com/gojuno/minimock/v3/.travis.yml delete mode 100644 vendor/github.com/gojuno/minimock/v3/LICENSE delete mode 100644 vendor/github.com/gojuno/minimock/v3/Makefile delete mode 100644 vendor/github.com/gojuno/minimock/v3/README.md delete mode 100644 vendor/github.com/gojuno/minimock/v3/camel_to_snake.go delete mode 100644 vendor/github.com/gojuno/minimock/v3/doc.go delete mode 100644 vendor/github.com/gojuno/minimock/v3/equal.go delete mode 100644 vendor/github.com/gojuno/minimock/v3/logo.svg delete mode 100644 vendor/github.com/gojuno/minimock/v3/mock_controller.go delete mode 100644 vendor/github.com/gojuno/minimock/v3/safe_tester.go delete mode 100644 vendor/github.com/gojuno/minimock/v3/template.go delete mode 100644 vendor/github.com/golang/groupcache/LICENSE delete mode 100644 vendor/github.com/golang/groupcache/lru/lru.go delete mode 100644 vendor/github.com/golang/snappy/.gitignore delete mode 100644 vendor/github.com/golang/snappy/AUTHORS delete mode 100644 vendor/github.com/golang/snappy/CONTRIBUTORS delete mode 100644 vendor/github.com/golang/snappy/README delete mode 100644 vendor/github.com/golang/snappy/decode_amd64.s delete mode 100644 vendor/github.com/golang/snappy/decode_arm64.s delete mode 100644 vendor/github.com/golang/snappy/decode_asm.go delete mode 100644 vendor/github.com/golang/snappy/encode_amd64.s delete mode 100644 vendor/github.com/golang/snappy/encode_arm64.s delete mode 100644 vendor/github.com/golang/snappy/encode_asm.go delete mode 100644 vendor/github.com/gorilla/websocket/.gitignore delete mode 100644 vendor/github.com/gorilla/websocket/AUTHORS delete mode 100644 vendor/github.com/gorilla/websocket/LICENSE delete mode 100644 vendor/github.com/gorilla/websocket/README.md delete mode 100644 vendor/github.com/gorilla/websocket/client.go delete mode 100644 vendor/github.com/gorilla/websocket/compression.go delete mode 100644 vendor/github.com/gorilla/websocket/conn.go delete mode 100644 vendor/github.com/gorilla/websocket/doc.go delete mode 100644 vendor/github.com/gorilla/websocket/join.go delete mode 100644 vendor/github.com/gorilla/websocket/json.go delete mode 100644 vendor/github.com/gorilla/websocket/mask.go delete mode 100644 vendor/github.com/gorilla/websocket/mask_safe.go delete mode 100644 vendor/github.com/gorilla/websocket/prepared.go delete mode 100644 vendor/github.com/gorilla/websocket/proxy.go delete mode 100644 vendor/github.com/gorilla/websocket/server.go delete mode 100644 vendor/github.com/gorilla/websocket/tls_handshake.go delete mode 100644 vendor/github.com/gorilla/websocket/tls_handshake_116.go delete mode 100644 vendor/github.com/gorilla/websocket/util.go delete mode 100644 vendor/github.com/gorilla/websocket/x_net_proxy.go delete mode 100644 vendor/github.com/hashicorp/consul-template/config/auth.go delete mode 100644 vendor/github.com/hashicorp/consul-template/config/config.go delete mode 100644 vendor/github.com/hashicorp/consul-template/config/consul.go delete mode 100644 vendor/github.com/hashicorp/consul-template/config/convert.go delete mode 100644 vendor/github.com/hashicorp/consul-template/config/dedup.go delete mode 100644 vendor/github.com/hashicorp/consul-template/config/default_delimiters.go delete mode 100644 vendor/github.com/hashicorp/consul-template/config/env.go delete mode 100644 vendor/github.com/hashicorp/consul-template/config/exec.go delete mode 100644 vendor/github.com/hashicorp/consul-template/config/logfile.go delete mode 100644 vendor/github.com/hashicorp/consul-template/config/mapstructure.go delete mode 100644 vendor/github.com/hashicorp/consul-template/config/nomad.go delete mode 100644 vendor/github.com/hashicorp/consul-template/config/retry.go delete mode 100644 vendor/github.com/hashicorp/consul-template/config/ssl.go delete mode 100644 vendor/github.com/hashicorp/consul-template/config/syslog.go delete mode 100644 vendor/github.com/hashicorp/consul-template/config/template.go delete mode 100644 vendor/github.com/hashicorp/consul-template/config/transport.go delete mode 100644 vendor/github.com/hashicorp/consul-template/config/vault.go delete mode 100644 vendor/github.com/hashicorp/consul-template/config/wait.go delete mode 100644 vendor/github.com/hashicorp/consul-template/dependency/catalog_datacenters.go delete mode 100644 vendor/github.com/hashicorp/consul-template/dependency/catalog_node.go delete mode 100644 vendor/github.com/hashicorp/consul-template/dependency/catalog_nodes.go delete mode 100644 vendor/github.com/hashicorp/consul-template/dependency/catalog_service.go delete mode 100644 vendor/github.com/hashicorp/consul-template/dependency/catalog_services.go delete mode 100644 vendor/github.com/hashicorp/consul-template/dependency/client_set.go delete mode 100644 vendor/github.com/hashicorp/consul-template/dependency/connect_ca.go delete mode 100644 vendor/github.com/hashicorp/consul-template/dependency/connect_leaf.go delete mode 100644 vendor/github.com/hashicorp/consul-template/dependency/dependency.go delete mode 100644 vendor/github.com/hashicorp/consul-template/dependency/errors.go delete mode 100644 vendor/github.com/hashicorp/consul-template/dependency/file.go delete mode 100644 vendor/github.com/hashicorp/consul-template/dependency/health_service.go delete mode 100644 vendor/github.com/hashicorp/consul-template/dependency/kv_get.go delete mode 100644 vendor/github.com/hashicorp/consul-template/dependency/kv_keys.go delete mode 100644 vendor/github.com/hashicorp/consul-template/dependency/kv_list.go delete mode 100644 vendor/github.com/hashicorp/consul-template/dependency/nomad_service.go delete mode 100644 vendor/github.com/hashicorp/consul-template/dependency/nomad_services.go delete mode 100644 vendor/github.com/hashicorp/consul-template/dependency/nomad_var_get.go delete mode 100644 vendor/github.com/hashicorp/consul-template/dependency/nomad_var_list.go delete mode 100644 vendor/github.com/hashicorp/consul-template/dependency/nomad_var_structs.go delete mode 100644 vendor/github.com/hashicorp/consul-template/dependency/set.go delete mode 100644 vendor/github.com/hashicorp/consul-template/dependency/vault_agent_token.go delete mode 100644 vendor/github.com/hashicorp/consul-template/dependency/vault_common.go delete mode 100644 vendor/github.com/hashicorp/consul-template/dependency/vault_list.go delete mode 100644 vendor/github.com/hashicorp/consul-template/dependency/vault_pki.go delete mode 100644 vendor/github.com/hashicorp/consul-template/dependency/vault_read.go delete mode 100644 vendor/github.com/hashicorp/consul-template/dependency/vault_token.go delete mode 100644 vendor/github.com/hashicorp/consul-template/dependency/vault_write.go delete mode 100644 vendor/github.com/hashicorp/consul-template/version/version.go rename vendor/github.com/{containerd/cgroups => hashicorp/cronexpr}/LICENSE (99%) delete mode 100644 vendor/github.com/hashicorp/go-secure-stdlib/mlock/mlock.go delete mode 100644 vendor/github.com/hashicorp/go-secure-stdlib/mlock/mlock_unavail.go delete mode 100644 vendor/github.com/hashicorp/go-secure-stdlib/mlock/mlock_unix.go delete mode 100644 vendor/github.com/hashicorp/go-uuid/.travis.yml delete mode 100644 vendor/github.com/hashicorp/go-uuid/LICENSE delete mode 100644 vendor/github.com/hashicorp/go-uuid/README.md delete mode 100644 vendor/github.com/hashicorp/go-uuid/uuid.go delete mode 100644 vendor/github.com/hashicorp/nomad/api/LICENSE delete mode 100644 vendor/github.com/hashicorp/nomad/api/acl.go delete mode 100644 vendor/github.com/hashicorp/nomad/api/agent.go delete mode 100644 vendor/github.com/hashicorp/nomad/api/allocations.go delete mode 100644 vendor/github.com/hashicorp/nomad/api/allocations_exec.go delete mode 100644 vendor/github.com/hashicorp/nomad/api/api.go delete mode 100644 vendor/github.com/hashicorp/nomad/api/constraint.go delete mode 100644 vendor/github.com/hashicorp/nomad/api/consul.go delete mode 100644 vendor/github.com/hashicorp/nomad/api/contexts/contexts.go delete mode 100644 vendor/github.com/hashicorp/nomad/api/csi.go delete mode 100644 vendor/github.com/hashicorp/nomad/api/deployments.go delete mode 100644 vendor/github.com/hashicorp/nomad/api/evaluations.go delete mode 100644 vendor/github.com/hashicorp/nomad/api/event_stream.go delete mode 100644 vendor/github.com/hashicorp/nomad/api/fs.go delete mode 100644 vendor/github.com/hashicorp/nomad/api/ioutil.go delete mode 100644 vendor/github.com/hashicorp/nomad/api/jobs.go delete mode 100644 vendor/github.com/hashicorp/nomad/api/keyring.go delete mode 100644 vendor/github.com/hashicorp/nomad/api/namespace.go delete mode 100644 vendor/github.com/hashicorp/nomad/api/nodes.go delete mode 100644 vendor/github.com/hashicorp/nomad/api/operator.go delete mode 100644 vendor/github.com/hashicorp/nomad/api/operator_autopilot.go delete mode 100644 vendor/github.com/hashicorp/nomad/api/operator_metrics.go delete mode 100644 vendor/github.com/hashicorp/nomad/api/quota.go delete mode 100644 vendor/github.com/hashicorp/nomad/api/raw.go delete mode 100644 vendor/github.com/hashicorp/nomad/api/recommendations.go delete mode 100644 vendor/github.com/hashicorp/nomad/api/regions.go delete mode 100644 vendor/github.com/hashicorp/nomad/api/resources.go delete mode 100644 vendor/github.com/hashicorp/nomad/api/scaling.go delete mode 100644 vendor/github.com/hashicorp/nomad/api/search.go delete mode 100644 vendor/github.com/hashicorp/nomad/api/sentinel.go delete mode 100644 vendor/github.com/hashicorp/nomad/api/services.go delete mode 100644 vendor/github.com/hashicorp/nomad/api/status.go delete mode 100644 vendor/github.com/hashicorp/nomad/api/system.go delete mode 100644 vendor/github.com/hashicorp/nomad/api/tasks.go delete mode 100644 vendor/github.com/hashicorp/nomad/api/utils.go delete mode 100644 vendor/github.com/hashicorp/nomad/api/variables.go delete mode 100644 vendor/github.com/hashicorp/nomad/client/allocrunner/taskrunner/interfaces/events.go delete mode 100644 vendor/github.com/hashicorp/nomad/client/allocrunner/taskrunner/interfaces/handle.go delete mode 100644 vendor/github.com/hashicorp/nomad/client/allocrunner/taskrunner/interfaces/lifecycle.go delete mode 100644 vendor/github.com/hashicorp/nomad/client/allocrunner/taskrunner/state/state.go delete mode 100644 vendor/github.com/hashicorp/nomad/client/config/artifact.go delete mode 100644 vendor/github.com/hashicorp/nomad/client/config/config.go delete mode 100644 vendor/github.com/hashicorp/nomad/client/config/testing.go delete mode 100644 vendor/github.com/hashicorp/nomad/client/devicemanager/state/state.go delete mode 100644 vendor/github.com/hashicorp/nomad/client/pluginmanager/drivermanager/state/state.go delete mode 100644 vendor/github.com/hashicorp/nomad/client/serviceregistration/address.go delete mode 100644 vendor/github.com/hashicorp/nomad/client/serviceregistration/checks/client.go delete mode 100644 vendor/github.com/hashicorp/nomad/client/serviceregistration/checks/result.go delete mode 100644 vendor/github.com/hashicorp/nomad/client/serviceregistration/id.go delete mode 100644 vendor/github.com/hashicorp/nomad/client/serviceregistration/service_registration.go delete mode 100644 vendor/github.com/hashicorp/nomad/client/serviceregistration/watcher.go delete mode 100644 vendor/github.com/hashicorp/nomad/client/serviceregistration/workload.go delete mode 100644 vendor/github.com/hashicorp/nomad/client/state/08types.go delete mode 100644 vendor/github.com/hashicorp/nomad/client/state/12types.go delete mode 100644 vendor/github.com/hashicorp/nomad/client/state/db_bolt.go delete mode 100644 vendor/github.com/hashicorp/nomad/client/state/db_error.go delete mode 100644 vendor/github.com/hashicorp/nomad/client/state/db_mem.go delete mode 100644 vendor/github.com/hashicorp/nomad/client/state/db_noop.go delete mode 100644 vendor/github.com/hashicorp/nomad/client/state/interface.go delete mode 100644 vendor/github.com/hashicorp/nomad/client/state/upgrade.go delete mode 100644 vendor/github.com/hashicorp/nomad/helper/boltdd/boltdd.go delete mode 100644 vendor/github.com/hashicorp/nomad/helper/bufconndialer/bufconndialer.go delete mode 100644 vendor/github.com/hashicorp/nomad/helper/pluginutils/loader/api_versions.go delete mode 100644 vendor/github.com/hashicorp/nomad/helper/pluginutils/loader/filter_unix.go delete mode 100644 vendor/github.com/hashicorp/nomad/helper/pluginutils/loader/filter_windows.go delete mode 100644 vendor/github.com/hashicorp/nomad/helper/pluginutils/loader/init.go delete mode 100644 vendor/github.com/hashicorp/nomad/helper/pluginutils/loader/instance.go delete mode 100644 vendor/github.com/hashicorp/nomad/helper/pluginutils/loader/loader.go delete mode 100644 vendor/github.com/hashicorp/nomad/helper/pluginutils/loader/testing.go delete mode 100644 vendor/github.com/hashicorp/nomad/helper/pluginutils/loader/util.go create mode 100644 vendor/github.com/hashicorp/nomad/testutil/container_images.go delete mode 100644 vendor/github.com/hashicorp/nomad/version/version.go delete mode 100644 vendor/github.com/hashicorp/vault/api/auth/kubernetes/LICENSE delete mode 100644 vendor/github.com/hashicorp/vault/api/auth/kubernetes/kubernetes.go rename vendor/github.com/hashicorp/vault/{sdk/helper/consts => api}/plugin_types.go (84%) delete mode 100644 vendor/github.com/hashicorp/vault/sdk/LICENSE delete mode 100644 vendor/github.com/hashicorp/vault/sdk/helper/certutil/helpers.go delete mode 100644 vendor/github.com/hashicorp/vault/sdk/helper/certutil/types.go delete mode 100644 vendor/github.com/hashicorp/vault/sdk/helper/compressutil/compress.go delete mode 100644 vendor/github.com/hashicorp/vault/sdk/helper/consts/agent.go delete mode 100644 vendor/github.com/hashicorp/vault/sdk/helper/consts/consts.go delete mode 100644 vendor/github.com/hashicorp/vault/sdk/helper/consts/deprecation_status.go delete mode 100644 vendor/github.com/hashicorp/vault/sdk/helper/consts/error.go delete mode 100644 vendor/github.com/hashicorp/vault/sdk/helper/consts/replication.go delete mode 100644 vendor/github.com/hashicorp/vault/sdk/helper/consts/token_consts.go delete mode 100644 vendor/github.com/hashicorp/vault/sdk/helper/cryptoutil/cryptoutil.go delete mode 100644 vendor/github.com/hashicorp/vault/sdk/helper/errutil/error.go delete mode 100644 vendor/github.com/hashicorp/vault/sdk/helper/hclutil/hcl.go delete mode 100644 vendor/github.com/hashicorp/vault/sdk/helper/jsonutil/json.go delete mode 100644 vendor/github.com/hashicorp/vault/sdk/helper/license/feature.go delete mode 100644 vendor/github.com/hashicorp/vault/sdk/helper/locksutil/locks.go delete mode 100644 vendor/github.com/hashicorp/vault/sdk/helper/logging/logging.go delete mode 100644 vendor/github.com/hashicorp/vault/sdk/helper/pathmanager/pathmanager.go delete mode 100644 vendor/github.com/hashicorp/vault/sdk/helper/pluginutil/env.go delete mode 100644 vendor/github.com/hashicorp/vault/sdk/helper/pluginutil/multiplexing.go delete mode 100644 vendor/github.com/hashicorp/vault/sdk/helper/pluginutil/multiplexing.pb.go delete mode 100644 vendor/github.com/hashicorp/vault/sdk/helper/pluginutil/multiplexing.proto delete mode 100644 vendor/github.com/hashicorp/vault/sdk/helper/pluginutil/multiplexing_grpc.pb.go delete mode 100644 vendor/github.com/hashicorp/vault/sdk/helper/pluginutil/run_config.go delete mode 100644 vendor/github.com/hashicorp/vault/sdk/helper/pluginutil/runner.go delete mode 100644 vendor/github.com/hashicorp/vault/sdk/helper/pluginutil/tls.go delete mode 100644 vendor/github.com/hashicorp/vault/sdk/helper/strutil/strutil.go delete mode 100644 vendor/github.com/hashicorp/vault/sdk/helper/wrapping/wrapinfo.go delete mode 100644 vendor/github.com/hashicorp/vault/sdk/logical/audit.go delete mode 100644 vendor/github.com/hashicorp/vault/sdk/logical/auth.go delete mode 100644 vendor/github.com/hashicorp/vault/sdk/logical/connection.go delete mode 100644 vendor/github.com/hashicorp/vault/sdk/logical/controlgroup.go delete mode 100644 vendor/github.com/hashicorp/vault/sdk/logical/error.go delete mode 100644 vendor/github.com/hashicorp/vault/sdk/logical/identity.pb.go delete mode 100644 vendor/github.com/hashicorp/vault/sdk/logical/identity.proto delete mode 100644 vendor/github.com/hashicorp/vault/sdk/logical/lease.go delete mode 100644 vendor/github.com/hashicorp/vault/sdk/logical/logical.go delete mode 100644 vendor/github.com/hashicorp/vault/sdk/logical/logical_storage.go delete mode 100644 vendor/github.com/hashicorp/vault/sdk/logical/managed_key.go delete mode 100644 vendor/github.com/hashicorp/vault/sdk/logical/plugin.pb.go delete mode 100644 vendor/github.com/hashicorp/vault/sdk/logical/plugin.proto delete mode 100644 vendor/github.com/hashicorp/vault/sdk/logical/request.go delete mode 100644 vendor/github.com/hashicorp/vault/sdk/logical/response.go delete mode 100644 vendor/github.com/hashicorp/vault/sdk/logical/response_util.go delete mode 100644 vendor/github.com/hashicorp/vault/sdk/logical/secret.go delete mode 100644 vendor/github.com/hashicorp/vault/sdk/logical/storage.go delete mode 100644 vendor/github.com/hashicorp/vault/sdk/logical/storage_inmem.go delete mode 100644 vendor/github.com/hashicorp/vault/sdk/logical/storage_view.go delete mode 100644 vendor/github.com/hashicorp/vault/sdk/logical/system_view.go delete mode 100644 vendor/github.com/hashicorp/vault/sdk/logical/testing.go delete mode 100644 vendor/github.com/hashicorp/vault/sdk/logical/token.go delete mode 100644 vendor/github.com/hashicorp/vault/sdk/logical/translate_response.go delete mode 100644 vendor/github.com/hashicorp/vault/sdk/logical/version.pb.go delete mode 100644 vendor/github.com/hashicorp/vault/sdk/logical/version.proto delete mode 100644 vendor/github.com/hashicorp/vault/sdk/logical/version_grpc.pb.go delete mode 100644 vendor/github.com/hashicorp/vault/sdk/physical/cache.go delete mode 100644 vendor/github.com/hashicorp/vault/sdk/physical/encoding.go delete mode 100644 vendor/github.com/hashicorp/vault/sdk/physical/entry.go delete mode 100644 vendor/github.com/hashicorp/vault/sdk/physical/error.go delete mode 100644 vendor/github.com/hashicorp/vault/sdk/physical/inmem/inmem.go delete mode 100644 vendor/github.com/hashicorp/vault/sdk/physical/inmem/inmem_ha.go delete mode 100644 vendor/github.com/hashicorp/vault/sdk/physical/latency.go delete mode 100644 vendor/github.com/hashicorp/vault/sdk/physical/physical.go delete mode 100644 vendor/github.com/hashicorp/vault/sdk/physical/physical_access.go delete mode 100644 vendor/github.com/hashicorp/vault/sdk/physical/physical_view.go delete mode 100644 vendor/github.com/hashicorp/vault/sdk/physical/testing.go delete mode 100644 vendor/github.com/hashicorp/vault/sdk/physical/transactions.go delete mode 100644 vendor/github.com/hashicorp/vault/sdk/version/cgo.go delete mode 100644 vendor/github.com/hashicorp/vault/sdk/version/version.go delete mode 100644 vendor/github.com/hashicorp/vault/sdk/version/version_base.go delete mode 100644 vendor/github.com/ishidawataru/sctp/.gitignore delete mode 100644 vendor/github.com/ishidawataru/sctp/.travis.yml delete mode 100644 vendor/github.com/ishidawataru/sctp/GO_LICENSE delete mode 100644 vendor/github.com/ishidawataru/sctp/NOTICE delete mode 100644 vendor/github.com/ishidawataru/sctp/README.md delete mode 100644 vendor/github.com/ishidawataru/sctp/ipsock_linux.go delete mode 100644 vendor/github.com/ishidawataru/sctp/sctp.go delete mode 100644 vendor/github.com/ishidawataru/sctp/sctp_linux.go delete mode 100644 vendor/github.com/ishidawataru/sctp/sctp_unsupported.go create mode 100644 vendor/github.com/klauspost/compress/.gitattributes rename vendor/github.com/{gojuno/minimock/v3 => klauspost/compress}/.gitignore (74%) create mode 100644 vendor/github.com/klauspost/compress/.goreleaser.yml rename vendor/github.com/{docker/libnetwork => klauspost/compress}/LICENSE (67%) create mode 100644 vendor/github.com/klauspost/compress/README.md create mode 100644 vendor/github.com/klauspost/compress/compressible.go create mode 100644 vendor/github.com/klauspost/compress/fse/README.md create mode 100644 vendor/github.com/klauspost/compress/fse/bitreader.go create mode 100644 vendor/github.com/klauspost/compress/fse/bitwriter.go create mode 100644 vendor/github.com/klauspost/compress/fse/bytereader.go create mode 100644 vendor/github.com/klauspost/compress/fse/compress.go create mode 100644 vendor/github.com/klauspost/compress/fse/decompress.go create mode 100644 vendor/github.com/klauspost/compress/fse/fse.go create mode 100644 vendor/github.com/klauspost/compress/gen.sh create mode 100644 vendor/github.com/klauspost/compress/huff0/.gitignore create mode 100644 vendor/github.com/klauspost/compress/huff0/README.md create mode 100644 vendor/github.com/klauspost/compress/huff0/bitreader.go create mode 100644 vendor/github.com/klauspost/compress/huff0/bitwriter.go create mode 100644 vendor/github.com/klauspost/compress/huff0/bytereader.go create mode 100644 vendor/github.com/klauspost/compress/huff0/compress.go create mode 100644 vendor/github.com/klauspost/compress/huff0/decompress.go create mode 100644 vendor/github.com/klauspost/compress/huff0/decompress_amd64.go create mode 100644 vendor/github.com/klauspost/compress/huff0/decompress_amd64.s create mode 100644 vendor/github.com/klauspost/compress/huff0/decompress_generic.go create mode 100644 vendor/github.com/klauspost/compress/huff0/huff0.go create mode 100644 vendor/github.com/klauspost/compress/internal/cpuinfo/cpuinfo.go create mode 100644 vendor/github.com/klauspost/compress/internal/cpuinfo/cpuinfo_amd64.go create mode 100644 vendor/github.com/klauspost/compress/internal/cpuinfo/cpuinfo_amd64.s rename vendor/github.com/{golang/snappy => klauspost/compress/internal/snapref}/LICENSE (100%) rename vendor/github.com/{golang/snappy => klauspost/compress/internal/snapref}/decode.go (99%) rename vendor/github.com/{golang/snappy => klauspost/compress/internal/snapref}/decode_other.go (98%) rename vendor/github.com/{golang/snappy => klauspost/compress/internal/snapref}/encode.go (99%) rename vendor/github.com/{golang/snappy => klauspost/compress/internal/snapref}/encode_other.go (98%) rename vendor/github.com/{golang/snappy => klauspost/compress/internal/snapref}/snappy.go (96%) create mode 100644 vendor/github.com/klauspost/compress/s2sx.mod create mode 100644 vendor/github.com/klauspost/compress/s2sx.sum create mode 100644 vendor/github.com/klauspost/compress/zstd/README.md create mode 100644 vendor/github.com/klauspost/compress/zstd/bitreader.go create mode 100644 vendor/github.com/klauspost/compress/zstd/bitwriter.go create mode 100644 vendor/github.com/klauspost/compress/zstd/blockdec.go create mode 100644 vendor/github.com/klauspost/compress/zstd/blockenc.go create mode 100644 vendor/github.com/klauspost/compress/zstd/blocktype_string.go create mode 100644 vendor/github.com/klauspost/compress/zstd/bytebuf.go create mode 100644 vendor/github.com/klauspost/compress/zstd/bytereader.go create mode 100644 vendor/github.com/klauspost/compress/zstd/decodeheader.go create mode 100644 vendor/github.com/klauspost/compress/zstd/decoder.go create mode 100644 vendor/github.com/klauspost/compress/zstd/decoder_options.go create mode 100644 vendor/github.com/klauspost/compress/zstd/dict.go create mode 100644 vendor/github.com/klauspost/compress/zstd/enc_base.go create mode 100644 vendor/github.com/klauspost/compress/zstd/enc_best.go create mode 100644 vendor/github.com/klauspost/compress/zstd/enc_better.go create mode 100644 vendor/github.com/klauspost/compress/zstd/enc_dfast.go create mode 100644 vendor/github.com/klauspost/compress/zstd/enc_fast.go create mode 100644 vendor/github.com/klauspost/compress/zstd/encoder.go create mode 100644 vendor/github.com/klauspost/compress/zstd/encoder_options.go create mode 100644 vendor/github.com/klauspost/compress/zstd/framedec.go create mode 100644 vendor/github.com/klauspost/compress/zstd/frameenc.go create mode 100644 vendor/github.com/klauspost/compress/zstd/fse_decoder.go create mode 100644 vendor/github.com/klauspost/compress/zstd/fse_decoder_amd64.go create mode 100644 vendor/github.com/klauspost/compress/zstd/fse_decoder_amd64.s create mode 100644 vendor/github.com/klauspost/compress/zstd/fse_decoder_generic.go create mode 100644 vendor/github.com/klauspost/compress/zstd/fse_encoder.go create mode 100644 vendor/github.com/klauspost/compress/zstd/fse_predefined.go create mode 100644 vendor/github.com/klauspost/compress/zstd/hash.go create mode 100644 vendor/github.com/klauspost/compress/zstd/history.go create mode 100644 vendor/github.com/klauspost/compress/zstd/internal/xxhash/LICENSE.txt create mode 100644 vendor/github.com/klauspost/compress/zstd/internal/xxhash/README.md create mode 100644 vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash.go create mode 100644 vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_amd64.s create mode 100644 vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_arm64.s create mode 100644 vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_asm.go create mode 100644 vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_other.go create mode 100644 vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_safe.go create mode 100644 vendor/github.com/klauspost/compress/zstd/seqdec.go create mode 100644 vendor/github.com/klauspost/compress/zstd/seqdec_amd64.go create mode 100644 vendor/github.com/klauspost/compress/zstd/seqdec_amd64.s create mode 100644 vendor/github.com/klauspost/compress/zstd/seqdec_generic.go create mode 100644 vendor/github.com/klauspost/compress/zstd/seqenc.go create mode 100644 vendor/github.com/klauspost/compress/zstd/snappy.go create mode 100644 vendor/github.com/klauspost/compress/zstd/zip.go create mode 100644 vendor/github.com/klauspost/compress/zstd/zstd.go rename vendor/github.com/{ishidawataru/sctp => moby/patternmatcher}/LICENSE (93%) create mode 100644 vendor/github.com/moby/patternmatcher/NOTICE create mode 100644 vendor/github.com/moby/patternmatcher/patternmatcher.go rename vendor/{go.opencensus.io => github.com/moby/sys/sequential}/LICENSE (99%) create mode 100644 vendor/github.com/moby/sys/sequential/doc.go create mode 100644 vendor/github.com/moby/sys/sequential/sequential_unix.go create mode 100644 vendor/github.com/moby/sys/sequential/sequential_windows.go delete mode 100644 vendor/github.com/pierrec/lz4/.gitignore delete mode 100644 vendor/github.com/pierrec/lz4/.travis.yml delete mode 100644 vendor/github.com/pierrec/lz4/LICENSE delete mode 100644 vendor/github.com/pierrec/lz4/README.md delete mode 100644 vendor/github.com/pierrec/lz4/block.go delete mode 100644 vendor/github.com/pierrec/lz4/debug.go delete mode 100644 vendor/github.com/pierrec/lz4/debug_stub.go delete mode 100644 vendor/github.com/pierrec/lz4/decode_amd64.go delete mode 100644 vendor/github.com/pierrec/lz4/decode_amd64.s delete mode 100644 vendor/github.com/pierrec/lz4/decode_other.go delete mode 100644 vendor/github.com/pierrec/lz4/errors.go delete mode 100644 vendor/github.com/pierrec/lz4/internal/xxh32/xxh32zero.go delete mode 100644 vendor/github.com/pierrec/lz4/lz4.go delete mode 100644 vendor/github.com/pierrec/lz4/lz4_go1.10.go delete mode 100644 vendor/github.com/pierrec/lz4/lz4_notgo1.10.go delete mode 100644 vendor/github.com/pierrec/lz4/reader.go delete mode 100644 vendor/github.com/pierrec/lz4/reader_legacy.go delete mode 100644 vendor/github.com/pierrec/lz4/writer.go delete mode 100644 vendor/github.com/pierrec/lz4/writer_legacy.go create mode 100644 vendor/github.com/shirou/gopsutil/v3/host/host_linux_loong64.go create mode 100644 vendor/github.com/shoenig/go-m1cpu/.golangci.yaml rename vendor/github.com/{hashicorp/go-secure-stdlib/mlock => shoenig/go-m1cpu}/LICENSE (100%) create mode 100644 vendor/github.com/shoenig/go-m1cpu/Makefile create mode 100644 vendor/github.com/shoenig/go-m1cpu/README.md create mode 100644 vendor/github.com/shoenig/go-m1cpu/cpu.go create mode 100644 vendor/github.com/shoenig/go-m1cpu/incompatible.go create mode 100644 vendor/github.com/shoenig/test/internal/util/slices.go delete mode 100644 vendor/go.etcd.io/bbolt/.gitignore delete mode 100644 vendor/go.etcd.io/bbolt/LICENSE delete mode 100644 vendor/go.etcd.io/bbolt/Makefile delete mode 100644 vendor/go.etcd.io/bbolt/README.md delete mode 100644 vendor/go.etcd.io/bbolt/bolt_386.go delete mode 100644 vendor/go.etcd.io/bbolt/bolt_amd64.go delete mode 100644 vendor/go.etcd.io/bbolt/bolt_arm.go delete mode 100644 vendor/go.etcd.io/bbolt/bolt_arm64.go delete mode 100644 vendor/go.etcd.io/bbolt/bolt_linux.go delete mode 100644 vendor/go.etcd.io/bbolt/bolt_loong64.go delete mode 100644 vendor/go.etcd.io/bbolt/bolt_mips64x.go delete mode 100644 vendor/go.etcd.io/bbolt/bolt_mipsx.go delete mode 100644 vendor/go.etcd.io/bbolt/bolt_openbsd.go delete mode 100644 vendor/go.etcd.io/bbolt/bolt_ppc.go delete mode 100644 vendor/go.etcd.io/bbolt/bolt_ppc64.go delete mode 100644 vendor/go.etcd.io/bbolt/bolt_ppc64le.go delete mode 100644 vendor/go.etcd.io/bbolt/bolt_riscv64.go delete mode 100644 vendor/go.etcd.io/bbolt/bolt_s390x.go delete mode 100644 vendor/go.etcd.io/bbolt/bolt_unix.go delete mode 100644 vendor/go.etcd.io/bbolt/bolt_unix_aix.go delete mode 100644 vendor/go.etcd.io/bbolt/bolt_unix_solaris.go delete mode 100644 vendor/go.etcd.io/bbolt/bolt_windows.go delete mode 100644 vendor/go.etcd.io/bbolt/boltsync_unix.go delete mode 100644 vendor/go.etcd.io/bbolt/bucket.go delete mode 100644 vendor/go.etcd.io/bbolt/compact.go delete mode 100644 vendor/go.etcd.io/bbolt/cursor.go delete mode 100644 vendor/go.etcd.io/bbolt/db.go delete mode 100644 vendor/go.etcd.io/bbolt/doc.go delete mode 100644 vendor/go.etcd.io/bbolt/errors.go delete mode 100644 vendor/go.etcd.io/bbolt/freelist.go delete mode 100644 vendor/go.etcd.io/bbolt/freelist_hmap.go delete mode 100644 vendor/go.etcd.io/bbolt/mlock_unix.go delete mode 100644 vendor/go.etcd.io/bbolt/mlock_windows.go delete mode 100644 vendor/go.etcd.io/bbolt/node.go delete mode 100644 vendor/go.etcd.io/bbolt/page.go delete mode 100644 vendor/go.etcd.io/bbolt/tx.go delete mode 100644 vendor/go.etcd.io/bbolt/tx_check.go delete mode 100644 vendor/go.etcd.io/bbolt/unsafe.go delete mode 100644 vendor/go.opencensus.io/.gitignore delete mode 100644 vendor/go.opencensus.io/AUTHORS delete mode 100644 vendor/go.opencensus.io/CONTRIBUTING.md delete mode 100644 vendor/go.opencensus.io/Makefile delete mode 100644 vendor/go.opencensus.io/README.md delete mode 100644 vendor/go.opencensus.io/appveyor.yml delete mode 100644 vendor/go.opencensus.io/internal/internal.go delete mode 100644 vendor/go.opencensus.io/internal/sanitize.go delete mode 100644 vendor/go.opencensus.io/internal/traceinternals.go delete mode 100644 vendor/go.opencensus.io/opencensus.go delete mode 100644 vendor/go.opencensus.io/trace/basetypes.go delete mode 100644 vendor/go.opencensus.io/trace/config.go delete mode 100644 vendor/go.opencensus.io/trace/doc.go delete mode 100644 vendor/go.opencensus.io/trace/evictedqueue.go delete mode 100644 vendor/go.opencensus.io/trace/export.go delete mode 100644 vendor/go.opencensus.io/trace/internal/internal.go delete mode 100644 vendor/go.opencensus.io/trace/lrumap.go delete mode 100644 vendor/go.opencensus.io/trace/sampling.go delete mode 100644 vendor/go.opencensus.io/trace/spanbucket.go delete mode 100644 vendor/go.opencensus.io/trace/spanstore.go delete mode 100644 vendor/go.opencensus.io/trace/status_codes.go delete mode 100644 vendor/go.opencensus.io/trace/trace.go delete mode 100644 vendor/go.opencensus.io/trace/trace_api.go delete mode 100644 vendor/go.opencensus.io/trace/trace_go11.go delete mode 100644 vendor/go.opencensus.io/trace/trace_nongo11.go delete mode 100644 vendor/go.opencensus.io/trace/tracestate/tracestate.go delete mode 100644 vendor/go.uber.org/atomic/.codecov.yml delete mode 100644 vendor/go.uber.org/atomic/.gitignore delete mode 100644 vendor/go.uber.org/atomic/CHANGELOG.md delete mode 100644 vendor/go.uber.org/atomic/LICENSE.txt delete mode 100644 vendor/go.uber.org/atomic/Makefile delete mode 100644 vendor/go.uber.org/atomic/README.md delete mode 100644 vendor/go.uber.org/atomic/bool.go delete mode 100644 vendor/go.uber.org/atomic/bool_ext.go delete mode 100644 vendor/go.uber.org/atomic/doc.go delete mode 100644 vendor/go.uber.org/atomic/duration.go delete mode 100644 vendor/go.uber.org/atomic/duration_ext.go delete mode 100644 vendor/go.uber.org/atomic/error.go delete mode 100644 vendor/go.uber.org/atomic/error_ext.go delete mode 100644 vendor/go.uber.org/atomic/float64.go delete mode 100644 vendor/go.uber.org/atomic/float64_ext.go delete mode 100644 vendor/go.uber.org/atomic/gen.go delete mode 100644 vendor/go.uber.org/atomic/int32.go delete mode 100644 vendor/go.uber.org/atomic/int64.go delete mode 100644 vendor/go.uber.org/atomic/nocmp.go delete mode 100644 vendor/go.uber.org/atomic/string.go delete mode 100644 vendor/go.uber.org/atomic/string_ext.go delete mode 100644 vendor/go.uber.org/atomic/time.go delete mode 100644 vendor/go.uber.org/atomic/time_ext.go delete mode 100644 vendor/go.uber.org/atomic/uint32.go delete mode 100644 vendor/go.uber.org/atomic/uint64.go delete mode 100644 vendor/go.uber.org/atomic/uintptr.go delete mode 100644 vendor/go.uber.org/atomic/unsafe_pointer.go delete mode 100644 vendor/go.uber.org/atomic/value.go delete mode 100644 vendor/golang.org/x/crypto/cryptobyte/asn1.go delete mode 100644 vendor/golang.org/x/crypto/cryptobyte/asn1/asn1.go delete mode 100644 vendor/golang.org/x/crypto/cryptobyte/builder.go delete mode 100644 vendor/golang.org/x/crypto/cryptobyte/string.go delete mode 100644 vendor/google.golang.org/grpc/test/bufconn/bufconn.go delete mode 100644 vendor/oss.indeed.com/go/libtime/.gitignore delete mode 100644 vendor/oss.indeed.com/go/libtime/.travis.yml delete mode 100644 vendor/oss.indeed.com/go/libtime/CODE_OF_CONDUCT.md delete mode 100644 vendor/oss.indeed.com/go/libtime/LICENSE delete mode 100644 vendor/oss.indeed.com/go/libtime/OSSMETADATA delete mode 100644 vendor/oss.indeed.com/go/libtime/README.md delete mode 100644 vendor/oss.indeed.com/go/libtime/after.go delete mode 100644 vendor/oss.indeed.com/go/libtime/clock.go delete mode 100644 vendor/oss.indeed.com/go/libtime/repo.cfg delete mode 100644 vendor/oss.indeed.com/go/libtime/sleep.go delete mode 100644 vendor/oss.indeed.com/go/libtime/timeformat.go delete mode 100644 vendor/oss.indeed.com/go/libtime/timer.go delete mode 100644 vendor/oss.indeed.com/go/libtime/tools.go delete mode 100644 vendor/oss.indeed.com/go/libtime/unixtime.go diff --git a/go.mod b/go.mod index b476cdb4..ba817ddf 100644 --- a/go.mod +++ b/go.mod @@ -8,23 +8,19 @@ require ( github.com/godbus/dbus v5.0.1+incompatible github.com/google/go-containerregistry v0.5.1 github.com/hashicorp/consul-template v0.29.6-0.20221026140134-90370e07bf62 - github.com/hashicorp/go-hclog v1.3.1 - github.com/hashicorp/go-plugin v1.4.9 - github.com/hashicorp/nomad v1.4.6 + github.com/hashicorp/go-hclog v1.4.0 + github.com/hashicorp/go-plugin v1.4.10 + github.com/hashicorp/nomad v1.4.11 github.com/stretchr/testify v1.8.4 ) require ( - cloud.google.com/go v0.110.4 // indirect - cloud.google.com/go/iam v1.1.1 // indirect - cloud.google.com/go/storage v1.30.1 // indirect github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 // indirect github.com/LK4D4/joincontext v0.0.0-20171026170139-1724345da6d5 // indirect github.com/Masterminds/goutils v1.1.1 // indirect github.com/Masterminds/semver/v3 v3.1.1 // indirect github.com/Masterminds/sprig/v3 v3.2.1 // indirect github.com/Microsoft/go-winio v0.5.2 // indirect - github.com/Microsoft/hcsshim v0.9.10 // indirect github.com/agext/levenshtein v1.2.1 // indirect github.com/apparentlymart/go-textseg/v13 v13.0.0 // indirect github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e // indirect @@ -36,7 +32,6 @@ require ( github.com/checkpoint-restore/go-criu/v5 v5.3.0 // indirect github.com/cilium/ebpf v0.9.1 // indirect github.com/container-storage-interface/spec v1.4.0 // indirect - github.com/containerd/cgroups v1.0.4 // indirect github.com/containerd/console v1.0.3 // indirect github.com/containerd/containerd v1.6.26 // indirect github.com/containerd/stargz-snapshotter/estargz v0.4.1 // indirect @@ -47,30 +42,24 @@ require ( github.com/davecgh/go-spew v1.1.1 // indirect github.com/docker/cli v23.0.1+incompatible // indirect github.com/docker/distribution v2.8.2+incompatible // indirect - github.com/docker/docker v20.10.24+incompatible // indirect + github.com/docker/docker v23.0.3+incompatible // indirect github.com/docker/docker-credential-helpers v0.6.4 // indirect github.com/docker/go-connections v0.4.0 // indirect github.com/docker/go-units v0.5.0 // indirect - github.com/docker/libnetwork v0.8.0-dev.2.0.20210525090646-64b7a4574d14 // indirect github.com/dustin/go-humanize v1.0.0 // indirect - github.com/fatih/color v1.13.0 // indirect + github.com/fatih/color v1.15.0 // indirect github.com/fsouza/go-dockerclient v1.8.2 // indirect github.com/go-ole/go-ole v1.2.6 // indirect github.com/godbus/dbus/v5 v5.1.0 // indirect github.com/gogo/protobuf v1.3.2 // indirect - github.com/gojuno/minimock/v3 v3.0.6 // indirect github.com/golang-jwt/jwt/v4 v4.4.2 // indirect - github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect github.com/golang/protobuf v1.5.3 // indirect - github.com/golang/snappy v0.0.4 // indirect github.com/google/btree v1.0.0 // indirect github.com/google/go-cmp v0.5.9 // indirect github.com/google/uuid v1.3.0 // indirect - github.com/googleapis/gax-go/v2 v2.11.0 // indirect - github.com/gorilla/websocket v1.5.0 // indirect - github.com/grpc-ecosystem/go-grpc-middleware v1.3.0 // indirect + github.com/grpc-ecosystem/go-grpc-middleware v1.4.0 // indirect github.com/hashicorp/consul/api v1.15.3 // indirect - github.com/hashicorp/cronexpr v1.1.1 // indirect + github.com/hashicorp/cronexpr v1.1.2 // indirect github.com/hashicorp/errwrap v1.1.0 // indirect github.com/hashicorp/go-cleanhttp v0.5.2 // indirect github.com/hashicorp/go-immutable-radix v1.3.1 // indirect @@ -79,37 +68,32 @@ require ( github.com/hashicorp/go-retryablehttp v0.7.0 // indirect github.com/hashicorp/go-rootcerts v1.0.2 // indirect github.com/hashicorp/go-secure-stdlib/listenerutil v0.1.4 // indirect - github.com/hashicorp/go-secure-stdlib/mlock v0.1.2 // indirect github.com/hashicorp/go-secure-stdlib/parseutil v0.1.6 // indirect github.com/hashicorp/go-secure-stdlib/reloadutil v0.1.1 // indirect github.com/hashicorp/go-secure-stdlib/strutil v0.1.2 // indirect github.com/hashicorp/go-secure-stdlib/tlsutil v0.1.2 // indirect github.com/hashicorp/go-set v0.1.8 // indirect github.com/hashicorp/go-sockaddr v1.0.2 // indirect - github.com/hashicorp/go-uuid v1.0.3 // indirect github.com/hashicorp/go-version v1.6.0 // indirect github.com/hashicorp/golang-lru v0.5.4 // indirect github.com/hashicorp/hcl v1.0.1-vault-3 // indirect github.com/hashicorp/hcl/v2 v2.9.2-0.20220525143345-ab3cae0737bc // indirect github.com/hashicorp/memberlist v0.5.0 // indirect - github.com/hashicorp/nomad/api v0.0.0-20221006174558-2aa7e66bdb52 // indirect github.com/hashicorp/raft v1.3.11 // indirect github.com/hashicorp/raft-autopilot v0.1.6 // indirect github.com/hashicorp/serf v0.10.1 // indirect - github.com/hashicorp/vault/api v1.8.2 // indirect - github.com/hashicorp/vault/api/auth/kubernetes v0.3.0 // indirect - github.com/hashicorp/vault/sdk v0.6.1 // indirect - github.com/hashicorp/yamux v0.0.0-20211028200310-0bc27b27de87 // indirect + github.com/hashicorp/vault/api v1.9.1 // indirect + github.com/hashicorp/yamux v0.1.1 // indirect github.com/hpcloud/tail v1.0.1-0.20170814160653-37f427138745 // indirect github.com/huandu/xstrings v1.3.2 // indirect github.com/imdario/mergo v0.3.13 // indirect - github.com/ishidawataru/sctp v0.0.0-20191218070446-00ab2ac2db07 // indirect github.com/jefferai/isbadcipher v0.0.0-20190226160619-51d2077c035f // indirect + github.com/klauspost/compress v1.15.11 // indirect github.com/kr/pretty v0.3.0 // indirect github.com/kr/text v0.2.0 // indirect github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0 // indirect github.com/mattn/go-colorable v0.1.13 // indirect - github.com/mattn/go-isatty v0.0.16 // indirect + github.com/mattn/go-isatty v0.0.17 // indirect github.com/miekg/dns v1.1.50 // indirect github.com/mitchellh/cli v1.1.5 // indirect github.com/mitchellh/copystructure v1.2.0 // indirect @@ -120,8 +104,10 @@ require ( github.com/mitchellh/hashstructure v1.1.0 // indirect github.com/mitchellh/mapstructure v1.5.0 // indirect github.com/mitchellh/reflectwalk v1.0.2 // indirect + github.com/moby/patternmatcher v0.5.0 // indirect github.com/moby/sys/mount v0.3.3 // indirect github.com/moby/sys/mountinfo v0.6.2 // indirect + github.com/moby/sys/sequential v0.5.0 // indirect github.com/moby/term v0.0.0-20210619224110-3f7ff695adc6 // indirect github.com/morikuni/aec v1.0.0 // indirect github.com/mrunalp/fileutils v0.5.1 // indirect @@ -131,7 +117,6 @@ require ( github.com/opencontainers/runc v1.1.12 // indirect github.com/opencontainers/runtime-spec v1.0.3-0.20210326190908-1c3f411f0417 // indirect github.com/opencontainers/selinux v1.10.1 // indirect - github.com/pierrec/lz4 v2.6.1+incompatible // indirect github.com/pkg/errors v0.9.1 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect github.com/posener/complete v1.2.3 // indirect @@ -140,8 +125,9 @@ require ( github.com/ryanuber/go-glob v1.0.0 // indirect github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529 // indirect github.com/seccomp/libseccomp-golang v0.10.0 // indirect - github.com/shirou/gopsutil/v3 v3.23.1 // indirect - github.com/shoenig/test v0.6.1 // indirect + github.com/shirou/gopsutil/v3 v3.23.4 // indirect + github.com/shoenig/go-m1cpu v0.1.5 // indirect + github.com/shoenig/test v0.6.3 // indirect github.com/shopspring/decimal v1.2.0 // indirect github.com/sirupsen/logrus v1.9.3 // indirect github.com/spf13/cast v1.3.1 // indirect @@ -154,11 +140,8 @@ require ( github.com/vmihailenco/tagparser v0.1.1 // indirect github.com/yusufpapurcu/wmi v1.2.2 // indirect github.com/zclconf/go-cty v1.12.1 // indirect - go.etcd.io/bbolt v1.3.7 // indirect - go.opencensus.io v0.24.0 // indirect - go.uber.org/atomic v1.9.0 // indirect golang.org/x/crypto v0.21.0 // indirect - golang.org/x/exp v0.0.0-20220921164117-439092de6870 // indirect + golang.org/x/exp v0.0.0-20230108222341-4b8118a2686a // indirect golang.org/x/mod v0.8.0 // indirect golang.org/x/net v0.23.0 // indirect golang.org/x/sync v0.3.0 // indirect @@ -166,7 +149,6 @@ require ( golang.org/x/text v0.14.0 // indirect golang.org/x/time v0.0.0-20220224211638-0e9765cccd65 // indirect golang.org/x/tools v0.6.0 // indirect - google.golang.org/api v0.126.0 // indirect google.golang.org/appengine v1.6.7 // indirect google.golang.org/genproto/googleapis/rpc v0.0.0-20230711160842-782d3b101e98 // indirect google.golang.org/grpc v1.58.3 // indirect @@ -175,5 +157,4 @@ require ( gopkg.in/square/go-jose.v2 v2.6.0 // indirect gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect - oss.indeed.com/go/libtime v1.6.0 // indirect ) diff --git a/go.sum b/go.sum index 38e53ad9..e76769cb 100644 --- a/go.sum +++ b/go.sum @@ -1,4 +1,3 @@ -bazil.org/fuse v0.0.0-20160811212531-371fbbdaa898/go.mod h1:Xbm+BRKSBEpa4q4hTSxohYNQpsxXPbPry4JJWOB3LB8= cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= @@ -9,44 +8,23 @@ cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To= cloud.google.com/go v0.52.0/go.mod h1:pXajvRH/6o3+F9jDHZWQ5PbGhn+o8w9qiu/CffaVdO4= cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M= -cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bPc= cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs= -cloud.google.com/go v0.110.4 h1:1JYyxKMN9hd5dR2MYTPWkGUgcoxVVhg0LKNKEo0qvmk= -cloud.google.com/go v0.110.4/go.mod h1:+EYjdK8e5RME/VY/qLCAtuyALQ9q67dvuum8i+H5xsI= cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= -cloud.google.com/go/compute v1.21.0 h1:JNBsyXVoOoNJtTQcnEY5uYpZIbeCTYIeDe0Xh1bySMk= -cloud.google.com/go/compute/metadata v0.2.3 h1:mg4jlk7mCAj6xXp9UJ4fjI9VUI5rubuGBW5aJ7UnBMY= cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= -cloud.google.com/go/iam v1.1.1 h1:lW7fzj15aVIXYHREOqjRBV9PsH0Z6u8Y46a1YGvQP4Y= -cloud.google.com/go/iam v1.1.1/go.mod h1:A5avdyVL2tCppe4unb0951eI9jreack+RJ0/d+KUZOU= cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos= cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= -cloud.google.com/go/storage v1.30.1 h1:uOdMxAs8HExqBlnLtnQyP0YkvbiDpdGShGKtx6U/oNM= -cloud.google.com/go/storage v1.30.1/go.mod h1:NfxhC0UJE1aXSx7CIIbCf7y9HKT7BiccwkR7+P7gN8E= dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= -github.com/Azure/azure-sdk-for-go v16.2.1+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8= github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 h1:UQHMgLO+TxOElx5B5HZ4hJQsoJ/PvUvKRhJHDQXO8P8= github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E= -github.com/Azure/go-autorest v10.8.1+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= -github.com/Azure/go-autorest v14.2.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= -github.com/Azure/go-autorest/autorest v0.11.1/go.mod h1:JFgpikqFJ/MleTTxwepExTKnFUKKszPS8UavbQYUMuw= -github.com/Azure/go-autorest/autorest/adal v0.9.0/go.mod h1:/c022QCutn2P7uY+/oQWWNcK9YU+MH96NgK+jErpbcg= -github.com/Azure/go-autorest/autorest/adal v0.9.5/go.mod h1:B7KF7jKIeC9Mct5spmyCB/A8CG/sEz1vwIRGv/bbw7A= -github.com/Azure/go-autorest/autorest/date v0.3.0/go.mod h1:BI0uouVdmngYNUzGWeSYnokU+TrmwEsOqdt8Y6sso74= -github.com/Azure/go-autorest/autorest/mocks v0.4.0/go.mod h1:LTp+uSrOhSkaKrUy935gNZuuIPPVsHlr9DSOxSayd+k= -github.com/Azure/go-autorest/autorest/mocks v0.4.1/go.mod h1:LTp+uSrOhSkaKrUy935gNZuuIPPVsHlr9DSOxSayd+k= -github.com/Azure/go-autorest/logger v0.2.0/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZmbF5NWuPV8+WeEW8= -github.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBpUA79WCAKPPZVC2DeU= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= -github.com/BurntSushi/toml v1.2.1 h1:9F2/+DoOYIOksmaJFPw1tGFy1eDnIJXg+UHjuD8lTak= github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= github.com/DataDog/datadog-go v2.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ= github.com/DataDog/datadog-go v3.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ= @@ -55,51 +33,28 @@ github.com/LK4D4/joincontext v0.0.0-20171026170139-1724345da6d5/go.mod h1:nxQPcN github.com/Masterminds/goutils v1.1.0/go.mod h1:8cTjp+g8YejhMuvIA5y2vz3BpJxksy863GQaJW2MFNU= github.com/Masterminds/goutils v1.1.1 h1:5nUrii3FMTL5diU80unEVvNevw1nH4+ZV4DSLVJLSYI= github.com/Masterminds/goutils v1.1.1/go.mod h1:8cTjp+g8YejhMuvIA5y2vz3BpJxksy863GQaJW2MFNU= -github.com/Masterminds/semver v1.5.0 h1:H65muMkzWKEuNDnfl9d70GUjFniHKHRbFPGBuZ3QEww= github.com/Masterminds/semver v1.5.0/go.mod h1:MB6lktGJrhw8PrUyiEoblNEGEQ+RzHPF078ddwwvV3Y= github.com/Masterminds/semver/v3 v3.1.1 h1:hLg3sBzpNErnxhQtUy/mmLR2I9foDujNK030IGemrRc= github.com/Masterminds/semver/v3 v3.1.1/go.mod h1:VPu/7SZ7ePZ3QOrcuXROw5FAcLl4a0cBrbBpGY/8hQs= -github.com/Masterminds/sprig v2.22.0+incompatible h1:z4yfnGrZ7netVz+0EDJ0Wi+5VZCSYp4Z0m2dk6cEM60= github.com/Masterminds/sprig v2.22.0+incompatible/go.mod h1:y6hNFY5UBTIWBxnzTeuNhlNS5hqE0NB0E6fgfo2Br3o= github.com/Masterminds/sprig/v3 v3.2.1 h1:n6EPaDyLSvCEa3frruQvAiHuNp2dhBlMSmkEr+HuzGc= github.com/Masterminds/sprig/v3 v3.2.1/go.mod h1:UoaO7Yp8KlPnJIYWTFkMaqPUYKTfGFPhxNuwnnxkKlk= -github.com/Microsoft/go-winio v0.4.11/go.mod h1:VhR8bwka0BXejwEJY73c50VrPtXAaKcyvVC4A4RozmA= github.com/Microsoft/go-winio v0.4.14/go.mod h1:qXqCSQ3Xa7+6tgxaGTIe4Kpcdsi+P8jBhyzoq1bpyYA= -github.com/Microsoft/go-winio v0.4.15-0.20190919025122-fc70bd9a86b5/go.mod h1:tTuCMEN+UleMWgg9dVx4Hu52b1bJo+59jBh3ajtinzw= -github.com/Microsoft/go-winio v0.4.16-0.20201130162521-d1ffc52c7331/go.mod h1:XB6nPKklQyQ7GC9LdcBEcBl8PF76WugXOPRXwdLnMv0= -github.com/Microsoft/go-winio v0.4.16/go.mod h1:XB6nPKklQyQ7GC9LdcBEcBl8PF76WugXOPRXwdLnMv0= -github.com/Microsoft/go-winio v0.4.17-0.20210211115548-6eac466e5fa3/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84= -github.com/Microsoft/go-winio v0.4.17-0.20210324224401-5516f17a5958/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84= -github.com/Microsoft/go-winio v0.4.17/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84= github.com/Microsoft/go-winio v0.5.2 h1:a9IhgEQBCUEk6QCdml9CiJGhAws+YwffDHEMp1VMrpA= github.com/Microsoft/go-winio v0.5.2/go.mod h1:WpS1mjBmmwHBEWmogvA2mj8546UReBk4v8QkMxJ6pZY= -github.com/Microsoft/hcsshim v0.8.6/go.mod h1:Op3hHsoHPAvb6lceZHDtd9OkTew38wNoXnJs8iY7rUg= -github.com/Microsoft/hcsshim v0.8.7-0.20190325164909-8abdbb8205e4/go.mod h1:Op3hHsoHPAvb6lceZHDtd9OkTew38wNoXnJs8iY7rUg= -github.com/Microsoft/hcsshim v0.8.7/go.mod h1:OHd7sQqRFrYd3RmSgbgji+ctCwkbq2wbEYNSzOYtcBQ= -github.com/Microsoft/hcsshim v0.8.9/go.mod h1:5692vkUqntj1idxauYlpoINNKeqCiG6Sg38RRsjT5y8= -github.com/Microsoft/hcsshim v0.8.14/go.mod h1:NtVKoYxQuTLx6gEq0L96c9Ju4JbRJ4nY2ow3VK6a9Lg= -github.com/Microsoft/hcsshim v0.8.15/go.mod h1:x38A4YbHbdxJtc0sF6oIz+RG0npwSCAvn69iY6URG00= -github.com/Microsoft/hcsshim v0.8.16/go.mod h1:o5/SZqmR7x9JNKsW3pu+nqHm0MF8vbA+VxGOoXdC600= -github.com/Microsoft/hcsshim v0.8.21/go.mod h1:+w2gRZ5ReXQhFOrvSQeNfhrYB/dg3oDwTOcER2fw4I4= github.com/Microsoft/hcsshim v0.9.10 h1:TxXGNmcbQxBKVWvjvTocNb6jrPyeHlk5EiDhhgHgggs= -github.com/Microsoft/hcsshim v0.9.10/go.mod h1:7pLA8lDk46WKDWlVsENo92gC0XFa8rbKfyFRBqxEbCc= -github.com/Microsoft/hcsshim/test v0.0.0-20201218223536-d3e5debf77da/go.mod h1:5hlzMzRKMLyo42nCZ9oml8AdTlq/0cvIaBv6tK1RehU= -github.com/Microsoft/hcsshim/test v0.0.0-20210227013316-43a75bb4edd3/go.mod h1:mw7qgWloBUl75W/gVH3cQszUg1+gUITj7D6NY7ywVnY= github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ= github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= github.com/PuerkitoBio/purell v1.0.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= github.com/PuerkitoBio/urlesc v0.0.0-20160726150825-5bd2802263f2/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= -github.com/Shopify/logrus-bugsnag v0.0.0-20171204204709-577dee27f20d/go.mod h1:HI8ITrYtUY+O+ZhtlqUnD8+KwNPOyugEhfP9fdUIaEQ= github.com/agext/levenshtein v1.2.1 h1:QmvMAjj2aEICytGiWzmxoE0x2KZvE0fvmqMOfy2tjT8= github.com/agext/levenshtein v1.2.1/go.mod h1:JEDfjyjHDjOF/1e4FlBE/PkbqA9OfWu2ki2W0IB5558= github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= -github.com/alexflint/go-filemutex v0.0.0-20171022225611-72bdc8eae2ae/go.mod h1:CgnQgUtFrFz9mxFNtED3jI5tLDjKlOM+oUF/sTk6ps0= -github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= github.com/apparentlymart/go-dump v0.0.0-20180507223929-23540a00eaa3 h1:ZSTrOEhiM5J5RFxEaFvMZVEAM1KvT1YzbEOwB2EAGjA= github.com/apparentlymart/go-dump v0.0.0-20180507223929-23540a00eaa3/go.mod h1:oL81AME2rN47vu18xqj1S1jPIPuN7afo62yKTNn3XMM= github.com/apparentlymart/go-textseg v1.0.0/go.mod h1:z96Txxhf3xSFMPmb5X/1W05FF/Nj9VFpLOpjS5yuumk= @@ -110,284 +65,125 @@ github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hC github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY= github.com/armon/go-metrics v0.0.0-20190430140413-ec5e00d3c878/go.mod h1:3AMJUQhVx52RsWOnlkpikZr01T/yAVN2gn0861vByNg= -github.com/armon/go-metrics v0.3.9/go.mod h1:4O98XIr/9W0sxpJ8UaYkvjk10Iff7SnFrb4QAOwNTFc= github.com/armon/go-metrics v0.3.10/go.mod h1:4O98XIr/9W0sxpJ8UaYkvjk10Iff7SnFrb4QAOwNTFc= github.com/armon/go-metrics v0.4.1 h1:hR91U9KYmb6bLBYLQjyM+3j+rcd/UhE+G78SFnF8gJA= github.com/armon/go-metrics v0.4.1/go.mod h1:E6amYzXo6aW1tqzoZGT755KkbgrJsSdpwZ+3JqfkOG4= github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= github.com/armon/go-radix v1.0.0 h1:F4z6KzEeeQIMeLFa97iZU6vupzoecKdU5TX24SNppXI= github.com/armon/go-radix v1.0.0/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= -github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= -github.com/aws/aws-sdk-go v1.15.11/go.mod h1:mFuSZ37Z9YOHbQEwBWztmVzqXrEkub65tZoCYDt7FT0= -github.com/aws/aws-sdk-go v1.44.184 h1:/MggyE66rOImXJKl1HqhLQITvWvqIV7w1Q4MaG6FHUo= -github.com/beorn7/perks v0.0.0-20160804104726-4c0e84591b9a/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= +github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= -github.com/bgentry/go-netrc v0.0.0-20140422174119-9fd32a8b3d3d h1:xDfNPAt8lFiC1UJrqV3uuy861HCTo708pDMbjHHdCas= github.com/bgentry/speakeasy v0.1.0 h1:ByYyxL9InA1OWqxJqqp2A5pYHUrCiAL6K3J+LKSsQkY= github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= -github.com/bitly/go-simplejson v0.5.0/go.mod h1:cXHtHw4XUPsvGaxgjIAn8PhEWG9NfngEKAMDJEczWVA= -github.com/bits-and-blooms/bitset v1.2.0/go.mod h1:gIdJ4wp64HaoK2YrL1Q5/N7Y16edYb8uY+O0FJTyyDA= -github.com/blang/semver v3.1.0+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk= -github.com/blang/semver v3.5.1+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk= -github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869/go.mod h1:Ekp36dRnpXw/yCqJaO+ZrUyxD+3VXMFFr56k5XYrpB4= github.com/boltdb/bolt v1.3.1/go.mod h1:clJnj/oiGkjum5o1McbSZDSLxVThjynRyGBgiAx27Ps= github.com/brianvoe/gofakeit/v6 v6.19.0 h1:g+yJ+meWVEsAmR+bV4mNM/eXI0N+0pZ3D+Mi+G5+YQo= github.com/brianvoe/gofakeit/v6 v6.19.0/go.mod h1:Ow6qC71xtwm79anlwKRlWZW6zVq9D2XHE4QSSMP/rU8= -github.com/bshuster-repo/logrus-logstash-hook v0.4.1/go.mod h1:zsTqEiSzDgAa/8GZR7E1qaXrhYNDKBYy5/dWPTIflbk= -github.com/buger/jsonparser v0.0.0-20180808090653-f4dd9f5a6b44/go.mod h1:bbYlZJ7hK1yFx9hf58LP0zeX7UjIGs20ufpu3evjr+s= -github.com/bugsnag/bugsnag-go v0.0.0-20141110184014-b1d153021fcd/go.mod h1:2oa8nejYd4cQ/b0hMIopN0lCRxU0bueqREvZLWFrtK8= -github.com/bugsnag/osext v0.0.0-20130617224835-0dd3f918b21b/go.mod h1:obH5gd0BsqsP2LwDJ9aOkm/6J86V6lyAXCoQWGw3K50= -github.com/bugsnag/panicwrap v0.0.0-20151223152923-e2c28503fcd0/go.mod h1:D/8v3kj0zr8ZAKg1AQ6crr+5VwKN5eIywRkfhyM/+dE= -github.com/cenkalti/backoff/v3 v3.0.0/go.mod h1:cIeZDE3IrqwwJl6VUwCN6trj1oXrTS4rc0ij+ULvLYs= github.com/cenkalti/backoff/v3 v3.2.2 h1:cfUAAO3yvKMYKPrvhDuHSwQnhZNk/RMHKdZqKTxfm6M= github.com/cenkalti/backoff/v3 v3.2.2/go.mod h1:cIeZDE3IrqwwJl6VUwCN6trj1oXrTS4rc0ij+ULvLYs= -github.com/cenkalti/backoff/v4 v4.1.1/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko= github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44= -github.com/checkpoint-restore/go-criu/v4 v4.1.0/go.mod h1:xUQBLp4RLc5zJtWY++yjOoMoB5lihDt7fai+75m+rGw= -github.com/checkpoint-restore/go-criu/v5 v5.0.0/go.mod h1:cfwC0EG7HMUenopBsUf9d89JlCLQIfgVcNsNN0t6T2M= github.com/checkpoint-restore/go-criu/v5 v5.3.0 h1:wpFFOoomK3389ue2lAb0Boag6XPht5QYpipxmSNL4d8= github.com/checkpoint-restore/go-criu/v5 v5.3.0/go.mod h1:E/eQpaFtUKGOOSEBZgmKAcn+zUUwWxqcaKZlF54wK8E= github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= -github.com/cilium/ebpf v0.0.0-20200110133405-4032b1d8aae3/go.mod h1:MA5e5Lr8slmEg9bt0VpxxWqJlO4iwu3FBdHUzV7wQVg= -github.com/cilium/ebpf v0.0.0-20200702112145-1c8d4c9ef775/go.mod h1:7cR51M8ViRLIdUjrmSXlK9pkrsDlLHbO8jiB8X8JnOc= -github.com/cilium/ebpf v0.2.0/go.mod h1:To2CFviqOWL/M0gIMsvSMlqe7em/l1ALkX1PyjrX2Qs= -github.com/cilium/ebpf v0.4.0/go.mod h1:4tRaxcgiL706VnOzHOdBlY8IEAIdxINsQBcU4xJJXRs= -github.com/cilium/ebpf v0.6.2/go.mod h1:4tRaxcgiL706VnOzHOdBlY8IEAIdxINsQBcU4xJJXRs= github.com/cilium/ebpf v0.9.1 h1:64sn2K3UKw8NbP/blsixRpF3nXuyhz/VjRlRzvlBRu4= github.com/cilium/ebpf v0.9.1/go.mod h1:+OhNOIXx/Fnu1IE8bJz2dzOA+VSfyTfdNUVdlQnxUFY= github.com/circonus-labs/circonus-gometrics v2.3.1+incompatible/go.mod h1:nmEj6Dob7S7YxXgwXpfOuvO54S+tGdZdw9fuRZt25Ag= github.com/circonus-labs/circonusllhist v0.1.3/go.mod h1:kMXHVDlOchFAehlya5ePtbp5jckzBHf4XRpQvBOLI+I= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= -github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= -github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8= github.com/container-storage-interface/spec v1.4.0 h1:ozAshSKxpJnYUfmkpZCTYyF/4MYeYlhdXbAvPvfGmkg= github.com/container-storage-interface/spec v1.4.0/go.mod h1:6URME8mwIBbpVyZV93Ce5St17xBiQJQY67NDsuohiy4= -github.com/containerd/aufs v0.0.0-20200908144142-dab0cbea06f4/go.mod h1:nukgQABAEopAHvB6j7cnP5zJ+/3aVcE7hCYqvIwAHyE= -github.com/containerd/aufs v0.0.0-20201003224125-76a6863f2989/go.mod h1:AkGGQs9NM2vtYHaUen+NljV0/baGCAPELGm2q9ZXpWU= -github.com/containerd/aufs v0.0.0-20210316121734-20793ff83c97/go.mod h1:kL5kd6KM5TzQjR79jljyi4olc1Vrx6XBlcyj3gNv2PU= -github.com/containerd/aufs v1.0.0/go.mod h1:kL5kd6KM5TzQjR79jljyi4olc1Vrx6XBlcyj3gNv2PU= -github.com/containerd/btrfs v0.0.0-20201111183144-404b9149801e/go.mod h1:jg2QkJcsabfHugurUvvPhS3E08Oxiuh5W/g1ybB4e0E= -github.com/containerd/btrfs v0.0.0-20210316141732-918d888fb676/go.mod h1:zMcX3qkXTAi9GI50+0HOeuV8LU2ryCE/V2vG/ZBiTss= -github.com/containerd/btrfs v1.0.0/go.mod h1:zMcX3qkXTAi9GI50+0HOeuV8LU2ryCE/V2vG/ZBiTss= -github.com/containerd/cgroups v0.0.0-20190717030353-c4b9ac5c7601/go.mod h1:X9rLEHIqSf/wfK8NsPqxJmeZgW4pcfzdXITDrUSJ6uI= -github.com/containerd/cgroups v0.0.0-20190919134610-bf292b21730f/go.mod h1:OApqhQ4XNSNC13gXIwDjhOQxjWa/NxkwZXJ1EvqT0ko= -github.com/containerd/cgroups v0.0.0-20200531161412-0dbf7f05ba59/go.mod h1:pA0z1pT8KYB3TCXK/ocprsh7MAkoW8bZVzPdih9snmM= -github.com/containerd/cgroups v0.0.0-20200710171044-318312a37340/go.mod h1:s5q4SojHctfxANBDvMeIaIovkq29IP48TKAxnhYRxvo= -github.com/containerd/cgroups v0.0.0-20200824123100-0b889c03f102/go.mod h1:s5q4SojHctfxANBDvMeIaIovkq29IP48TKAxnhYRxvo= -github.com/containerd/cgroups v0.0.0-20210114181951-8a68de567b68/go.mod h1:ZJeTFisyysqgcCdecO57Dj79RfL0LNeGiFUqLYQRYLE= -github.com/containerd/cgroups v1.0.1/go.mod h1:0SJrPIenamHDcZhEcJMNBB85rHcUsw4f25ZfBiPYRkU= -github.com/containerd/cgroups v1.0.4 h1:jN/mbWBEaz+T1pi5OFtnkQ+8qnmEbAr1Oo1FRm5B0dA= -github.com/containerd/cgroups v1.0.4/go.mod h1:nLNQtsF7Sl2HxNebu77i1R0oDlhiTG+kO4JTrUzo6IA= -github.com/containerd/console v0.0.0-20180822173158-c12b1e7919c1/go.mod h1:Tj/on1eG8kiEhd0+fhSDzsPAFESxzBBvdyEgyryXffw= -github.com/containerd/console v0.0.0-20181022165439-0650fd9eeb50/go.mod h1:Tj/on1eG8kiEhd0+fhSDzsPAFESxzBBvdyEgyryXffw= -github.com/containerd/console v0.0.0-20191206165004-02ecf6a7291e/go.mod h1:8Pf4gM6VEbTNRIT26AyyU7hxdQU3MvAvxVI0sc00XBE= -github.com/containerd/console v1.0.1/go.mod h1:XUsP6YE/mKtz6bxc+I8UiKKTP04qjQL4qcS3XoQ5xkw= -github.com/containerd/console v1.0.2/go.mod h1:ytZPjGgY2oeTkAONYafi2kSj0aYggsf8acV1PGKCbzQ= github.com/containerd/console v1.0.3 h1:lIr7SlA5PxZyMV30bDW0MGbiOPXwc63yRuCP0ARubLw= github.com/containerd/console v1.0.3/go.mod h1:7LqA/THxQ86k76b8c/EMSiaJ3h1eZkMkXar0TQ1gf3U= -github.com/containerd/containerd v1.2.10/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= -github.com/containerd/containerd v1.3.0-beta.2.0.20190828155532-0293cbd26c69/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= github.com/containerd/containerd v1.3.0/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= -github.com/containerd/containerd v1.3.1-0.20191213020239-082f7e3aed57/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= -github.com/containerd/containerd v1.3.2/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= -github.com/containerd/containerd v1.4.0-beta.2.0.20200729163537-40b22ef07410/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= -github.com/containerd/containerd v1.4.1/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= -github.com/containerd/containerd v1.4.3/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= -github.com/containerd/containerd v1.5.0-beta.1/go.mod h1:5HfvG1V2FsKesEGQ17k5/T7V960Tmcumvqn8Mc+pCYQ= -github.com/containerd/containerd v1.5.0-beta.3/go.mod h1:/wr9AVtEM7x9c+n0+stptlo/uBBoBORwEx6ardVcmKU= -github.com/containerd/containerd v1.5.0-beta.4/go.mod h1:GmdgZd2zA2GYIBZ0w09ZvgqEq8EfBp/m3lcVZIvPHhI= -github.com/containerd/containerd v1.5.0-rc.0/go.mod h1:V/IXoMqNGgBlabz3tHD2TWDoTJseu1FGOKuoA4nNb2s= -github.com/containerd/containerd v1.5.1/go.mod h1:0DOxVqwDy2iZvrZp2JUx/E+hS0UNTVn7dJnIOwtYR4g= -github.com/containerd/containerd v1.5.7/go.mod h1:gyvv6+ugqY25TiXxcZC3L5yOeYgEw0QMhscqVp1AR9c= github.com/containerd/containerd v1.6.26 h1:VVfrE6ZpyisvB1fzoY8Vkiq4sy+i5oF4uk7zu03RaHs= github.com/containerd/containerd v1.6.26/go.mod h1:I4TRdsdoo5MlKob5khDJS2EPT1l1oMNaE2MBm6FrwxM= -github.com/containerd/continuity v0.0.0-20190426062206-aaeac12a7ffc/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y= -github.com/containerd/continuity v0.0.0-20190815185530-f2a389ac0a02/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y= -github.com/containerd/continuity v0.0.0-20191127005431-f65d91d395eb/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y= -github.com/containerd/continuity v0.0.0-20200710164510-efbc4488d8fe/go.mod h1:cECdGN1O8G9bgKTlLhuPJimka6Xb/Gg7vYzCTNVxhvo= -github.com/containerd/continuity v0.0.0-20201208142359-180525291bb7/go.mod h1:kR3BEg7bDFaEddKm54WSmrol1fKWDU1nKYkgrcgZT7Y= -github.com/containerd/continuity v0.0.0-20210208174643-50096c924a4e/go.mod h1:EXlVlkqNba9rJe3j7w3Xa924itAMLgZH4UD/Q4PExuQ= -github.com/containerd/continuity v0.1.0/go.mod h1:ICJu0PwR54nI0yPEnJ6jcS+J7CZAUXrLh8lPo2knzsM= github.com/containerd/continuity v0.3.0 h1:nisirsYROK15TAMVukJOUyGJjz4BNQJBVsNvAXZJ/eg= -github.com/containerd/fifo v0.0.0-20180307165137-3d5202aec260/go.mod h1:ODA38xgv3Kuk8dQz2ZQXpnv/UZZUHUCL7pnLehbXgQI= -github.com/containerd/fifo v0.0.0-20190226154929-a9fb20d87448/go.mod h1:ODA38xgv3Kuk8dQz2ZQXpnv/UZZUHUCL7pnLehbXgQI= -github.com/containerd/fifo v0.0.0-20200410184934-f15a3290365b/go.mod h1:jPQ2IAeZRCYxpS/Cm1495vGFww6ecHmMk1YJH2Q5ln0= -github.com/containerd/fifo v0.0.0-20201026212402-0724c46b320c/go.mod h1:jPQ2IAeZRCYxpS/Cm1495vGFww6ecHmMk1YJH2Q5ln0= -github.com/containerd/fifo v0.0.0-20210316144830-115abcc95a1d/go.mod h1:ocF/ME1SX5b1AOlWi9r677YJmCPSwwWnQ9O123vzpE4= -github.com/containerd/fifo v1.0.0/go.mod h1:ocF/ME1SX5b1AOlWi9r677YJmCPSwwWnQ9O123vzpE4= -github.com/containerd/go-cni v1.0.1/go.mod h1:+vUpYxKvAF72G9i1WoDOiPGRtQpqsNW/ZHtSlv++smU= -github.com/containerd/go-cni v1.0.2/go.mod h1:nrNABBHzu0ZwCug9Ije8hL2xBCYh/pjfMb1aZGrrohk= -github.com/containerd/go-cni v1.1.7 h1:1yKpVCQAXI21BJIy8q7Nyk4CWpIgUno6ib7JIDca7D4= -github.com/containerd/go-runc v0.0.0-20180907222934-5a6d9f37cfa3/go.mod h1:IV7qH3hrUgRmyYrtgEeGWJfWbgcHL9CSRruz2Vqcph0= -github.com/containerd/go-runc v0.0.0-20190911050354-e029b79d8cda/go.mod h1:IV7qH3hrUgRmyYrtgEeGWJfWbgcHL9CSRruz2Vqcph0= -github.com/containerd/go-runc v0.0.0-20200220073739-7016d3ce2328/go.mod h1:PpyHrqVs8FTi9vpyHwPwiNEGaACDxT/N/pLcvMSRA9g= -github.com/containerd/go-runc v0.0.0-20201020171139-16b287bc67d0/go.mod h1:cNU0ZbCgCQVZK4lgG3P+9tn9/PaJNmoDXPpoJhDR+Ok= -github.com/containerd/go-runc v1.0.0/go.mod h1:cNU0ZbCgCQVZK4lgG3P+9tn9/PaJNmoDXPpoJhDR+Ok= -github.com/containerd/imgcrypt v1.0.1/go.mod h1:mdd8cEPW7TPgNG4FpuP3sGBiQ7Yi/zak9TYCG3juvb0= -github.com/containerd/imgcrypt v1.0.4-0.20210301171431-0ae5c75f59ba/go.mod h1:6TNsg0ctmizkrOgXRNQjAPFWpMYRWuiB6dSF4Pfa5SA= -github.com/containerd/imgcrypt v1.1.1-0.20210312161619-7ed62a527887/go.mod h1:5AZJNI6sLHJljKuI9IHnw1pWqo/F0nGDOuR9zgTs7ow= -github.com/containerd/imgcrypt v1.1.1/go.mod h1:xpLnwiQmEUJPvQoAapeb2SNCxz7Xr6PJrXQb0Dpc4ms= -github.com/containerd/nri v0.0.0-20201007170849-eb1350a75164/go.mod h1:+2wGSDGFYfE5+So4M5syatU0N0f0LbWpuqyMi4/BE8c= -github.com/containerd/nri v0.0.0-20210316161719-dbaa18c31c14/go.mod h1:lmxnXF6oMkbqs39FiCt1s0R2HSMhcLel9vNL3m4AaeY= -github.com/containerd/nri v0.1.0/go.mod h1:lmxnXF6oMkbqs39FiCt1s0R2HSMhcLel9vNL3m4AaeY= github.com/containerd/stargz-snapshotter/estargz v0.4.1 h1:5e7heayhB7CcgdTkqfZqrNaNv15gABwr3Q2jBTbLlt4= github.com/containerd/stargz-snapshotter/estargz v0.4.1/go.mod h1:x7Q9dg9QYb4+ELgxmo4gBUeJB0tl5dqH1Sdz0nJU1QM= -github.com/containerd/ttrpc v0.0.0-20190828154514-0e0f228740de/go.mod h1:PvCDdDGpgqzQIzDW1TphrGLssLDZp2GuS+X5DkEJB8o= -github.com/containerd/ttrpc v0.0.0-20190828172938-92c8520ef9f8/go.mod h1:PvCDdDGpgqzQIzDW1TphrGLssLDZp2GuS+X5DkEJB8o= -github.com/containerd/ttrpc v0.0.0-20191028202541-4f1b8fe65a5c/go.mod h1:LPm1u0xBw8r8NOKoOdNMeVHSawSsltak+Ihv+etqsE8= -github.com/containerd/ttrpc v1.0.1/go.mod h1:UAxOpgT9ziI0gJrmKvgcZivgxOp8iFPSk8httJEt98Y= -github.com/containerd/ttrpc v1.0.2/go.mod h1:UAxOpgT9ziI0gJrmKvgcZivgxOp8iFPSk8httJEt98Y= -github.com/containerd/ttrpc v1.1.0/go.mod h1:XX4ZTnoOId4HklF4edwc4DcqskFZuvXB1Evzy5KFQpQ= -github.com/containerd/typeurl v0.0.0-20180627222232-a93fcdb778cd/go.mod h1:Cm3kwCdlkCfMSHURc+r6fwoGH6/F1hH3S4sg0rLFWPc= -github.com/containerd/typeurl v0.0.0-20190911142611-5eb25027c9fd/go.mod h1:GeKYzf2pQcqv7tJ0AoCuuhtnqhva5LNU3U+OyKxxJpk= -github.com/containerd/typeurl v1.0.1/go.mod h1:TB1hUtrpaiO88KEK56ijojHS1+NeF0izUACaJW2mdXg= -github.com/containerd/typeurl v1.0.2/go.mod h1:9trJWW2sRlGub4wZJRTW83VtbOLS6hwcDZXTn6oPz9s= -github.com/containerd/zfs v0.0.0-20200918131355-0a33824f23a2/go.mod h1:8IgZOBdv8fAgXddBT4dBXJPtxyRsejFIpXoklgxgEjw= -github.com/containerd/zfs v0.0.0-20210301145711-11e8f1707f62/go.mod h1:A9zfAbMlQwE+/is6hi0Xw8ktpL+6glmqZYtevJgaB8Y= -github.com/containerd/zfs v0.0.0-20210315114300-dde8f0fda960/go.mod h1:m+m51S1DvAP6r3FcmYCp54bQ34pyOwTieQDNRIRHsFY= -github.com/containerd/zfs v0.0.0-20210324211415-d5c4544f0433/go.mod h1:m+m51S1DvAP6r3FcmYCp54bQ34pyOwTieQDNRIRHsFY= -github.com/containerd/zfs v1.0.0/go.mod h1:m+m51S1DvAP6r3FcmYCp54bQ34pyOwTieQDNRIRHsFY= -github.com/containernetworking/cni v0.7.1/go.mod h1:LGwApLUm2FpoOfxTDEeq8T9ipbpZ61X79hmU3w8FmsY= -github.com/containernetworking/cni v0.8.0/go.mod h1:LGwApLUm2FpoOfxTDEeq8T9ipbpZ61X79hmU3w8FmsY= -github.com/containernetworking/cni v0.8.1/go.mod h1:LGwApLUm2FpoOfxTDEeq8T9ipbpZ61X79hmU3w8FmsY= github.com/containernetworking/cni v1.1.2 h1:wtRGZVv7olUHMOqouPpn3cXJWpJgM6+EUl31EQbXALQ= -github.com/containernetworking/plugins v0.8.6/go.mod h1:qnw5mN19D8fIwkqW7oHHYDHVlzhJpcY6TQxn/fUyDDM= -github.com/containernetworking/plugins v0.9.1/go.mod h1:xP/idU2ldlzN6m4p5LmGiwRDjeJr6FLK6vuiUwoH7P8= github.com/containernetworking/plugins v1.2.0 h1:SWgg3dQG1yzUo4d9iD8cwSVh1VqI+bP7mkPDoSfP9VU= github.com/containernetworking/plugins v1.2.0/go.mod h1:/VjX4uHecW5vVimFa1wkG4s+r/s9qIfPdqlLF4TW8c4= -github.com/containers/ocicrypt v1.0.1/go.mod h1:MeJDzk1RJHv89LjsH0Sp5KTY3ZYkjXO/C+bKAeWFIrc= -github.com/containers/ocicrypt v1.1.0/go.mod h1:b8AOe0YR67uU8OqfVNcznfFpAzu3rdgUV4GP9qXPfu4= -github.com/containers/ocicrypt v1.1.1/go.mod h1:Dm55fwWm1YZAjYRaJ94z2mfZikIyIN4B0oB3dj3jFxY= github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= -github.com/coreos/go-iptables v0.4.5/go.mod h1:/mVI274lEDI2ns62jHCDnCyBF9Iwsmekav8Dbxlm1MU= -github.com/coreos/go-iptables v0.5.0/go.mod h1:/mVI274lEDI2ns62jHCDnCyBF9Iwsmekav8Dbxlm1MU= github.com/coreos/go-iptables v0.6.0 h1:is9qnZMPYjLd8LYqmm/qlE+wwEgJIkTYdhV3rfZo4jk= github.com/coreos/go-iptables v0.6.0/go.mod h1:Qe8Bv2Xik5FyTXwgIbLAnv2sWSBmvWdFETJConOQ//Q= -github.com/coreos/go-oidc v2.1.0+incompatible/go.mod h1:CgnwVTmzoESiwO9qyAFEMiHoZ1nMCKZlZ9V6mm3/LKc= github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= -github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= -github.com/coreos/go-systemd v0.0.0-20161114122254-48702e0da86b/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= -github.com/coreos/go-systemd v0.0.0-20180511133405-39ca1b05acc7/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= github.com/coreos/go-systemd v0.0.0-20191104093116-d3cd4ed1dbcf h1:iW4rZ826su+pqaw19uhpSCzhj44qo35pNgKFGqzDKkU= github.com/coreos/go-systemd v0.0.0-20191104093116-d3cd4ed1dbcf/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= -github.com/coreos/go-systemd/v22 v22.0.0/go.mod h1:xO0FLkIi5MaZafQlIrOotqXZ90ih+1atmu1JpKERPPk= -github.com/coreos/go-systemd/v22 v22.1.0/go.mod h1:xO0FLkIi5MaZafQlIrOotqXZ90ih+1atmu1JpKERPPk= -github.com/coreos/go-systemd/v22 v22.3.2/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= github.com/coreos/go-systemd/v22 v22.5.0 h1:RrqgGjYQKalulkV8NGVIfkXQf6YYmOyiJKk8iXXhfZs= github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= -github.com/coreos/pkg v0.0.0-20160727233714-3ac0863d7acf/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= -github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= -github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/creack/pty v1.1.11/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/creack/pty v1.1.18 h1:n56/Zwd5o6whRC5PMGretI4IdRLlmBXYNjScPaBgsbY= github.com/creack/pty v1.1.18/go.mod h1:MOBLtS5ELjhRRrroQr9kyvTxUAFNvYEK993ew/Vr4O4= -github.com/cyphar/filepath-securejoin v0.2.2/go.mod h1:FpkQEhXnPnOthhzymB7CGsFk2G9VLXONKD9G7QGMM+4= github.com/cyphar/filepath-securejoin v0.2.4 h1:Ugdm7cg7i6ZK6x3xDF1oEu1nfkyfH53EtKeQYTC3kyg= github.com/cyphar/filepath-securejoin v0.2.4/go.mod h1:aPGpWjXOXUn2NCNjFvBE6aRxGGx79pTxQpKOJNYHHl4= -github.com/d2g/dhcp4 v0.0.0-20170904100407-a1d1b6c41b1c/go.mod h1:Ct2BUK8SB0YC1SMSibvLzxjeJLnrYEVLULFNiHY9YfQ= -github.com/d2g/dhcp4client v1.0.0/go.mod h1:j0hNfjhrt2SxUOw55nL0ATM/z4Yt3t2Kd1mW34z5W5s= -github.com/d2g/dhcp4server v0.0.0-20181031114812-7d4a0a7f59a5/go.mod h1:Eo87+Kg/IX2hfWJfwxMzLyuSZyxSoAug2nGa1G2QAi8= -github.com/d2g/hardwareaddr v0.0.0-20190221164911-e7d9fbe030e4/go.mod h1:bMl4RjIciD2oAxI7DmWRx6gbeqrkoLqv3MV0vzNad+I= github.com/danieljoos/wincred v1.1.0/go.mod h1:XYlo+eRTsVA9aHGp7NGjFkPla4m+DCL7hqDjlFjiygg= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/denverdino/aliyungo v0.0.0-20190125010748-a747050bb1ba/go.mod h1:dV8lFg6daOBZbT6/BDGIz6Y3WFGn8juu6G+CQ6LHtl0= -github.com/dgrijalva/jwt-go v0.0.0-20170104182250-a601269ab70c/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no= -github.com/dnaeon/go-vcr v1.0.1/go.mod h1:aBB1+wY4s93YsC3HHjMBMrwTj2R9FHDzUr9KyGc8n1E= github.com/docker/cli v0.0.0-20191017083524-a8ff7f821017/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= github.com/docker/cli v23.0.1+incompatible h1:LRyWITpGzl2C9e9uGxzisptnxAn1zfZKXy13Ul2Q5oM= github.com/docker/cli v23.0.1+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= -github.com/docker/distribution v0.0.0-20190905152932-14b96e55d84c/go.mod h1:0+TTO4EOBfRPhZXAeF1Vu+W3hHZ8eLp8PgKVZlcvtFY= -github.com/docker/distribution v2.7.1-0.20190205005809-0d3efadf0154+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= github.com/docker/distribution v2.7.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= github.com/docker/distribution v2.8.2+incompatible h1:T3de5rq0dB1j30rp0sA2rER+m322EBzniBPB6ZIzuh8= github.com/docker/distribution v2.8.2+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= github.com/docker/docker v1.4.2-0.20190924003213-a8608b5b67c7/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= -github.com/docker/docker v20.10.24+incompatible h1:Ugvxm7a8+Gz6vqQYQQ2W7GYq5EUPaAiuPgIfVyI3dYE= -github.com/docker/docker v20.10.24+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker v23.0.3+incompatible h1:9GhVsShNWz1hO//9BNg/dpMnZW25KydO4wtVxWAIbho= +github.com/docker/docker v23.0.3+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/docker-credential-helpers v0.6.3/go.mod h1:WRaJzqw3CTB9bk10avuGsjVBZsD05qeibJ1/TYlvc0Y= github.com/docker/docker-credential-helpers v0.6.4 h1:axCks+yV+2MR3/kZhAmy07yC56WZ2Pwu/fKWtKuZB0o= github.com/docker/docker-credential-helpers v0.6.4/go.mod h1:ofX3UI0Gz1TteYBjtgs07O36Pyasyp66D2uKT7H8W1c= github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ= github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec= -github.com/docker/go-events v0.0.0-20170721190031-9461782956ad/go.mod h1:Uw6UezgYA44ePAFQYUehOuCzmy5zmg/+nl2ZfMWGkpA= -github.com/docker/go-events v0.0.0-20190806004212-e31b211e4f1c/go.mod h1:Uw6UezgYA44ePAFQYUehOuCzmy5zmg/+nl2ZfMWGkpA= -github.com/docker/go-metrics v0.0.0-20180209012529-399ea8c73916/go.mod h1:/u0gXw0Gay3ceNrsHubL3BtdOL2fHf93USgMTe0W5dI= github.com/docker/go-metrics v0.0.1 h1:AgB/0SvBxihN0X8OR4SjsblXkbMvalQ8cjmtKQ2rQV8= -github.com/docker/go-metrics v0.0.1/go.mod h1:cG1hvH2utMXtqgqqYE9plW6lDxS3/5ayHzueweSI3Vw= github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= -github.com/docker/libnetwork v0.8.0-dev.2.0.20210525090646-64b7a4574d14 h1:GZvuJOpa10/Yl2EinacWoMqJ+XtNPbikclDZvNXBNO8= -github.com/docker/libnetwork v0.8.0-dev.2.0.20210525090646-64b7a4574d14/go.mod h1:93m0aTqz6z+g32wla4l4WxTrdtvBRmVzYRkYvasA5Z8= -github.com/docker/libtrust v0.0.0-20150114040149-fa567046d9b1/go.mod h1:cyGadeNEkKy96OOhEzfZl+yxihPEzKnqJwvfuSUqbZE= -github.com/docker/spdystream v0.0.0-20160310174837-449fdfce4d96/go.mod h1:Qh8CwZgvJUkLughtfhJv5dyTYa91l1fOUCrgjqmcifM= github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE= -github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= github.com/dustin/go-humanize v1.0.0 h1:VSnTsYCnlFHaM2/igO1h6X3HA71jcobQuxemgkq4zYo= github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= -github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc= github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= github.com/emicklei/go-restful v2.9.5+incompatible/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= -github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= -github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ= -github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= -github.com/evanphx/json-patch v4.9.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= -github.com/evanphx/json-patch/v5 v5.5.0/go.mod h1:G79N1coSVB93tBe7j6PhzjmR3/2VvlbKOFpnXhI9Bw4= github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= github.com/fatih/color v1.9.0/go.mod h1:eQcE1qtQxscV5RaZvpXrrb8Drkc3/DdQ+uUYCNjL+zU= -github.com/fatih/color v1.13.0 h1:8LOYc1KYPPmyKMuN8QV2DNRWNbLo6LZ0iLs8+mlH53w= github.com/fatih/color v1.13.0/go.mod h1:kLAiJbzzSOZDVNGyDpeOxJ47H46qBXwg5ILebYFFOfk= -github.com/fatih/structs v1.1.0 h1:Q7juDM0QtcnhCpeyLGQKyg4TOIghuNXrkL32pHAUMxo= -github.com/fatih/structs v1.1.0/go.mod h1:9NiDSp5zOcgEDl+j00MP/WkGVPOlPRLejGD8Ga6PJ7M= -github.com/form3tech-oss/jwt-go v3.2.2+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k= -github.com/frankban/quicktest v1.10.0/go.mod h1:ui7WezCLWMWxVWr1GETZY3smRy0G4KWq9vcPtJmFl7Y= -github.com/frankban/quicktest v1.11.3/go.mod h1:wRf/ReqHper53s+kmmSZizM8NamnL3IM0I9ntUbOk+k= -github.com/frankban/quicktest v1.13.0/go.mod h1:qLE0fzW0VuyUAJgPU19zByoIr0HtCHN/r/VLSOOIySU= +github.com/fatih/color v1.15.0 h1:kOqh6YHBtK8aywxGerMG2Eq3H6Qgoqeo13Bk2Mv/nBs= +github.com/fatih/color v1.15.0/go.mod h1:0h5ZqXfHYED7Bhv2ZJamyIOUej9KtShiJESRwBDUSsw= github.com/frankban/quicktest v1.14.0 h1:+cqqvzZV87b4adx/5ayVOaYZ2CrvM4ejQvUdBzPPUss= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= github.com/fsnotify/fsnotify v1.4.9 h1:hsms1Qyu0jgnwNXIxa+/V/PDsU6CfLf6CNO8H7IWoS4= -github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= github.com/fsouza/go-dockerclient v1.8.2 h1:8A2/xvHxpe5ivfQ2XDJ5EpLBTaxmsFZUJpfX1zSORRY= github.com/fsouza/go-dockerclient v1.8.2/go.mod h1:oenNB8JjNKY4o8I/sug4Qah9si/7OxH4MjL+u7oBxP8= -github.com/fullsailor/pkcs7 v0.0.0-20190404230743-d7302db945fa/go.mod h1:KnogPXtdwXqoenmZCw6S+25EAm2MkxbG0deNDu4cbSA= -github.com/garyburd/redigo v0.0.0-20150301180006-535138d7bcd7/go.mod h1:NR3MbYisc3/PwhQ00EMzDiPmrwpPxAn5GI05/YaO1SY= github.com/ghodss/yaml v0.0.0-20150909031657-73d445a93680/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= -github.com/go-asn1-ber/asn1-ber v1.3.1/go.mod h1:hEBeB/ic+5LoWskz+yKT7vGhhPYkProFKoKdwZRWMe0= github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= -github.com/go-ini/ini v1.25.4/go.mod h1:ByCAeIL28uOIIG0E3PJtZPDL8WnHpFKFOtgjp+3Ies8= github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= -github.com/go-ldap/ldap/v3 v3.1.10/go.mod h1:5Zun81jBTabRaI8lzN7E1JjyEl1g6zI6u9pd8luAK4Q= +github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY= github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= +github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A= github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas= github.com/go-logr/logr v0.2.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU= github.com/go-ole/go-ole v1.2.6 h1:/Fpf6oFPoeFik9ty7siob0G6Ke8QvQEuVcuChpwXzpY= @@ -404,68 +200,45 @@ github.com/go-openapi/swag v0.0.0-20160704191624-1d0bd113de87/go.mod h1:DXUve3Dp github.com/go-openapi/swag v0.19.2/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= -github.com/go-test/deep v1.0.2/go.mod h1:wGDj63lr65AM2AQyKZd/NYHGb0R+1RLqB8NKt3aSFNA= github.com/go-test/deep v1.0.3 h1:ZrJSEWsXzPOxaZnFteGEfooLba+ju3FYIbOrS+rQd68= github.com/go-test/deep v1.0.3/go.mod h1:wGDj63lr65AM2AQyKZd/NYHGb0R+1RLqB8NKt3aSFNA= -github.com/godbus/dbus v0.0.0-20151105175453-c7fdd8b5cd55/go.mod h1:/YcGZj5zSblfDWMMoOzV4fas9FZnQYTkDnsGvmh2Grw= -github.com/godbus/dbus v0.0.0-20180201030542-885f9cc04c9c/go.mod h1:/YcGZj5zSblfDWMMoOzV4fas9FZnQYTkDnsGvmh2Grw= -github.com/godbus/dbus v0.0.0-20190422162347-ade71ed3457e/go.mod h1:bBOAhwG1umN6/6ZUMtDFBMQR8jRg9O75tm9K00oMsK4= github.com/godbus/dbus v5.0.1+incompatible h1:fsDsnr/6MFSIm3kl6JJpq5pH+vO/rA5jUunalLnzSc8= github.com/godbus/dbus v5.0.1+incompatible/go.mod h1:/YcGZj5zSblfDWMMoOzV4fas9FZnQYTkDnsGvmh2Grw= -github.com/godbus/dbus/v5 v5.0.3/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= github.com/godbus/dbus/v5 v5.1.0 h1:4KLkAxT3aOY8Li4FRJe/KvhoNFFxo0m6fNuFUO8QJUk= github.com/godbus/dbus/v5 v5.1.0/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= -github.com/gogo/googleapis v1.2.0/go.mod h1:Njal3psf3qN6dwBtQfUmBZh2ybovJ0tlu3o/AC7HYjU= -github.com/gogo/googleapis v1.4.0/go.mod h1:5YRNX2z1oM5gXdAkurHa942MDgEJyk02w4OecKY87+c= github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4= -github.com/gogo/protobuf v1.2.2-0.20190723190241-65acae22fc9d/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= -github.com/gogo/protobuf v1.3.0/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= -github.com/gojuno/minimock/v3 v3.0.4/go.mod h1:HqeqnwV8mAABn3pO5hqF+RE7gjA0jsN8cbbSogoGrzI= -github.com/gojuno/minimock/v3 v3.0.6 h1:YqHcVR10x2ZvswPK8Ix5yk+hMpspdQ3ckSpkOzyF85I= -github.com/gojuno/minimock/v3 v3.0.6/go.mod h1:v61ZjAKHr+WnEkND63nQPCZ/DTfQgJdvbCi3IuoMblY= github.com/golang-jwt/jwt/v4 v4.4.2 h1:rcc4lwaZgFMCZ5jxF9ABolDcIHdBytAFgqFPbSJQAYs= github.com/golang-jwt/jwt/v4 v4.4.2/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= -github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE= -github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= -github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= -github.com/golang/mock v1.6.0/go.mod h1:p6yTPP+5HYm5mzsMV8JkE6ZKdX+/wYM6Hr+LicevLPs= github.com/golang/protobuf v1.1.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= -github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk= github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= -github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= -github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= -github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg= github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= -github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM= -github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v1.0.0 h1:0udJVsspx3VBr5FwtLhQQtuAsVc79tTq0ocGIPAU6qo= github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= @@ -473,11 +246,7 @@ github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5a github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE= @@ -492,50 +261,31 @@ github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OI github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= -github.com/google/s2a-go v0.1.4 h1:1kZ/sQM3srePvKs3tXAvQzo66XfcReoqFpIpIccE7Oc= -github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/google/uuid v1.2.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I= github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/googleapis/enterprise-certificate-proxy v0.2.3 h1:yk9/cqRKtT9wXZSsRH9aurXEpJX+U6FLtpYTdC3R06k= github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= -github.com/googleapis/gax-go/v2 v2.11.0 h1:9V9PWXEsWnPpQhu/PeQIkS4eGzMlTLGgt80cUUI8Ki4= -github.com/googleapis/gax-go/v2 v2.11.0/go.mod h1:DxmR61SGKkGLa2xigwuZIQpkCI2S5iydzRfb3peWZJI= github.com/googleapis/gnostic v0.4.1/go.mod h1:LRhVm6pbyptWbWbuZ38d1eyptfvIytN3ir6b65WBswg= -github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= -github.com/gorilla/handlers v0.0.0-20150720190736-60c7bfde3e33/go.mod h1:Qkdc/uu4tH4g6mTK6auzZ766c4CA0Ng8+o/OAirnOIQ= -github.com/gorilla/mux v1.7.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= github.com/gorilla/mux v1.7.3/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= github.com/gorilla/mux v1.8.0 h1:i40aqfkR1h2SlN9hojwV5ZA91wcXFOvkdNIeFDP5koI= -github.com/gorilla/websocket v0.0.0-20170926233335-4201258b820c/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= -github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= -github.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWmnc= -github.com/gorilla/websocket v1.5.0/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= -github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= -github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de4/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= -github.com/grpc-ecosystem/go-grpc-middleware v1.3.0 h1:+9834+KizmvFV7pXQGSXQTsaWhq2GjuNUt0aUU0YBYw= -github.com/grpc-ecosystem/go-grpc-middleware v1.3.0/go.mod h1:z0ButlSOZa5vEBq9m2m2hlwIgKw+rp3sdCBRoJY+30Y= +github.com/grpc-ecosystem/go-grpc-middleware v1.4.0 h1:UH//fgunKIs4JdUbpDl1VZCDaL56wXCB/5+wF6uHfaI= +github.com/grpc-ecosystem/go-grpc-middleware v1.4.0/go.mod h1:g5qyo/la0ALbONm6Vbp88Yd8NsDy6rZz+RcrMPxvld8= github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= -github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= -github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= github.com/hashicorp/consul-template v0.29.6-0.20221026140134-90370e07bf62 h1:72EUkkdM0uFQZVHpx69lM0bBqRhmtqsCV3Up48dfw2w= github.com/hashicorp/consul-template v0.29.6-0.20221026140134-90370e07bf62/go.mod h1:oznME/M/L6XDklrE62H9R1Rp+WYtxrISywtwXpA+bgU= github.com/hashicorp/consul/api v1.15.3 h1:WYONYL2rxTXtlekAqblR2SCdJsizMDIj/uXb5wNy9zU= github.com/hashicorp/consul/api v1.15.3/go.mod h1:/g/qgcoBcEXALCNZgRRisyTW0nY86++L0KbeAMXYCeY= github.com/hashicorp/consul/sdk v0.11.0/go.mod h1:yPkX5Q6CsxTFMjQQDJwzeNmUUF5NUGGbrDsv9wTb8cw= github.com/hashicorp/consul/sdk v0.13.0 h1:lce3nFlpv8humJL8rNrrGHYSKc3q+Kxfeg3Ii1m6ZWU= -github.com/hashicorp/cronexpr v1.1.1 h1:NJZDd87hGXjoZBdvyCF9mX4DCq5Wy7+A/w+A7q0wn6c= -github.com/hashicorp/cronexpr v1.1.1/go.mod h1:P4wA0KBl9C5q2hABiMO7cp6jcIg96CDh1Efb3g1PWA4= -github.com/hashicorp/errwrap v0.0.0-20141028054710-7554cd9344ce/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= +github.com/hashicorp/cronexpr v1.1.2 h1:wG/ZYIKT+RT3QkOdgYc+xsKWVRgnxJ1OJtjjy84fJ9A= +github.com/hashicorp/cronexpr v1.1.2/go.mod h1:P4wA0KBl9C5q2hABiMO7cp6jcIg96CDh1Efb3g1PWA4= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I= github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= @@ -543,49 +293,36 @@ github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtng github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= github.com/hashicorp/go-cleanhttp v0.5.2 h1:035FKYIWjmULyFRBKPs8TBQoi0x6d9G4xc9neXJWAZQ= github.com/hashicorp/go-cleanhttp v0.5.2/go.mod h1:kO/YDlP8L1346E6Sodw+PrpBSV4/SoxCXGY6BqNFT48= -github.com/hashicorp/go-envparse v0.0.0-20180119215841-310ca1881b22 h1:HTmDIaSN95gbdMyrsbNiXSdW4fbGctGQwEqv0H7OhDQ= -github.com/hashicorp/go-getter v1.7.0 h1:bzrYP+qu/gMrL1au7/aDvkoOVGUJpeKBgbqRHACAFDY= github.com/hashicorp/go-hclog v0.9.1/go.mod h1:5CU+agLiy3J7N7QjHK5d05KxGsuXiQLrjA0H7acj2lQ= github.com/hashicorp/go-hclog v0.9.2/go.mod h1:5CU+agLiy3J7N7QjHK5d05KxGsuXiQLrjA0H7acj2lQ= github.com/hashicorp/go-hclog v0.12.0/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ= github.com/hashicorp/go-hclog v0.14.1/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ= -github.com/hashicorp/go-hclog v0.16.2/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ= -github.com/hashicorp/go-hclog v1.3.1 h1:vDwF1DFNZhntP4DAjuTpOw3uEgMUpXh1pB5fW9DqHpo= -github.com/hashicorp/go-hclog v1.3.1/go.mod h1:W4Qnvbt70Wk/zYJryRzDRU/4r0kIg0PVHBcfoyhpF5M= +github.com/hashicorp/go-hclog v1.4.0 h1:ctuWFGrhFha8BnnzxqeRGidlEcQkDyL5u8J8t5eA11I= +github.com/hashicorp/go-hclog v1.4.0/go.mod h1:W4Qnvbt70Wk/zYJryRzDRU/4r0kIg0PVHBcfoyhpF5M= github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= github.com/hashicorp/go-immutable-radix v1.3.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= github.com/hashicorp/go-immutable-radix v1.3.1 h1:DKHmCUm2hRBK510BaiZlwvpD40f8bJFeZnpfm2KLowc= github.com/hashicorp/go-immutable-radix v1.3.1/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= -github.com/hashicorp/go-kms-wrapping/entropy/v2 v2.0.0/go.mod h1:xvb32K2keAc+R8DSFG2IwDcydK9DBQE+fGA5fsw6hSk= github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM= github.com/hashicorp/go-msgpack v0.5.5/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM= github.com/hashicorp/go-msgpack v1.1.5 h1:9byZdVjKTe5mce63pRVNP1L7UAmdHOTEMGehn6KvJWs= github.com/hashicorp/go-msgpack v1.1.5/go.mod h1:gWVc3sv/wbDmR3rQsj1CAktEZzoz1YNK9NfGLXJ69/4= -github.com/hashicorp/go-multierror v0.0.0-20161216184304-ed905158d874/go.mod h1:JMRHfdO9jKNzS/+BTlxCjKNQHg/jZAft8U7LloJvN7I= github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk= github.com/hashicorp/go-multierror v1.1.0/go.mod h1:spPvp8C1qA32ftKqdAHm4hHTbPw+vmowP0z+KUhOZdA= github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo= github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM= -github.com/hashicorp/go-plugin v1.4.3/go.mod h1:5fGEH17QVwTTcR0zV7yhDPLLmFX9YSZ38b18Udy6vYQ= -github.com/hashicorp/go-plugin v1.4.9 h1:ESiK220/qE0aGxWdzKIvRH69iLiuN/PjoLTm69RoWtU= -github.com/hashicorp/go-plugin v1.4.9/go.mod h1:viDMjcLJuDui6pXb8U4HVfb8AamCWhHGUjr2IrTF67s= +github.com/hashicorp/go-plugin v1.4.10 h1:xUbmA4jC6Dq163/fWcp8P3JuHilrHHMLNRxzGQJ9hNk= +github.com/hashicorp/go-plugin v1.4.10/go.mod h1:6/1TEzT0eQznvI/gV2CM29DLSkAK/e58mUWKVsPaph0= github.com/hashicorp/go-retryablehttp v0.5.3/go.mod h1:9B5zBasrRhHXnJnui7y6sL7es7NDiJgTc6Er0maI1Xs= -github.com/hashicorp/go-retryablehttp v0.6.6/go.mod h1:vAew36LZh98gCBJNLH42IQ1ER/9wtLZZ8meHqQvEYWY= github.com/hashicorp/go-retryablehttp v0.7.0 h1:eu1EI/mbirUgP5C8hVsTNaGZreBDlYiwC1FZWkvQPQ4= github.com/hashicorp/go-retryablehttp v0.7.0/go.mod h1:vAew36LZh98gCBJNLH42IQ1ER/9wtLZZ8meHqQvEYWY= github.com/hashicorp/go-rootcerts v1.0.2 h1:jzhAVGtqPKbwpyCPELlgNWhE1znq+qwJtW5Oi2viEzc= github.com/hashicorp/go-rootcerts v1.0.2/go.mod h1:pqUvnprVnM5bf7AOirdbb01K4ccR319Vf4pU3K5EGc8= -github.com/hashicorp/go-safetemp v1.0.0 h1:2HR189eFNrjHQyENnQMMpCiBAsRxzbTMIgBhEyExpmo= -github.com/hashicorp/go-secure-stdlib/base62 v0.1.1/go.mod h1:EdWO6czbmthiwZ3/PUsDV+UD1D5IRU4ActiaWGwt0Yw= github.com/hashicorp/go-secure-stdlib/listenerutil v0.1.4 h1:6ajbq64FhrIJZ6prrff3upVVDil4yfCrnSKwTH0HIPE= github.com/hashicorp/go-secure-stdlib/listenerutil v0.1.4/go.mod h1:myX7XYMJRIP4PLHtYJiKMTJcKOX0M5ZJNwP0iw+l3uw= -github.com/hashicorp/go-secure-stdlib/mlock v0.1.1/go.mod h1:zq93CJChV6L9QTfGKtfBxKqD7BqqXx5O04A/ns2p5+I= -github.com/hashicorp/go-secure-stdlib/mlock v0.1.2 h1:p4AKXPPS24tO8Wc8i1gLvSKdmkiSY5xuju57czJ/IJQ= -github.com/hashicorp/go-secure-stdlib/mlock v0.1.2/go.mod h1:zq93CJChV6L9QTfGKtfBxKqD7BqqXx5O04A/ns2p5+I= github.com/hashicorp/go-secure-stdlib/parseutil v0.1.1/go.mod h1:QmrqtbKuxxSWTN3ETMPuB+VtEiBJ/A9XhoYGv8E1uD8= github.com/hashicorp/go-secure-stdlib/parseutil v0.1.6 h1:om4Al8Oy7kCm/B86rLCLah4Dt5Aa0Fr5rYBG60OzwHQ= github.com/hashicorp/go-secure-stdlib/parseutil v0.1.6/go.mod h1:QmrqtbKuxxSWTN3ETMPuB+VtEiBJ/A9XhoYGv8E1uD8= -github.com/hashicorp/go-secure-stdlib/password v0.1.1/go.mod h1:9hH302QllNwu1o2TGYtSk8I8kTAN0ca1EHpwhm5Mmzo= github.com/hashicorp/go-secure-stdlib/reloadutil v0.1.1 h1:SMGUnbpAcat8rIKHkBPjfv81yC46a8eCNZ2hsR2l1EI= github.com/hashicorp/go-secure-stdlib/reloadutil v0.1.1/go.mod h1:Ch/bf00Qnx77MZd49JRgHYqHQjtEmTgGU2faufpVZb0= github.com/hashicorp/go-secure-stdlib/strutil v0.1.1/go.mod h1:gKOamz3EwoIoJq7mlMIRBpVTAUn8qPCrEclOKKWhD3U= @@ -604,8 +341,6 @@ github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/b github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= github.com/hashicorp/go-uuid v1.0.2/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= github.com/hashicorp/go-uuid v1.0.3 h1:2gKiV6YVmrJ1i2CKKa9obLvRieoRGviZFL26PcT/Co8= -github.com/hashicorp/go-uuid v1.0.3/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= -github.com/hashicorp/go-version v1.2.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= github.com/hashicorp/go-version v1.6.0 h1:feTTfFNnjP967rlCxM/I9g701jU+RN74YKx2mOkIeek= github.com/hashicorp/go-version v1.6.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= @@ -623,10 +358,8 @@ github.com/hashicorp/memberlist v0.3.0/go.mod h1:MS2lj3INKhZjWNqd3N0m3J+Jxf3DAOn github.com/hashicorp/memberlist v0.3.1/go.mod h1:MS2lj3INKhZjWNqd3N0m3J+Jxf3DAOnAH9VT3Sh9MUE= github.com/hashicorp/memberlist v0.5.0 h1:EtYPN8DpAURiapus508I4n9CzHs2W+8NZGbmmR/prTM= github.com/hashicorp/memberlist v0.5.0/go.mod h1:yvyXLpo0QaGE59Y7hDTsTzDD25JYBZ4mHgHUZ8lrOI0= -github.com/hashicorp/nomad v1.4.6 h1:28KdRPWy9S9LCEFKpvk53icv4he5d8bRTeRrg9R0IQU= -github.com/hashicorp/nomad v1.4.6/go.mod h1:Bv6eHWRLyqnViX/O6aqbHhtwVA3Rqq2IWSjmyKy3A/g= -github.com/hashicorp/nomad/api v0.0.0-20221006174558-2aa7e66bdb52 h1:O7oUpvlilRu3CWMiQtQF/pkF6ZUSWmjD0sn+T8oqewo= -github.com/hashicorp/nomad/api v0.0.0-20221006174558-2aa7e66bdb52/go.mod h1:1dS8jZqAXhEreBcb26wpaV4Llk2cLO2sucuDKI+oTIs= +github.com/hashicorp/nomad v1.4.11 h1:Zj379aYKsyFN6ZB+hX1GpYUNCZz8ytiK1PBpKnkSAc4= +github.com/hashicorp/nomad v1.4.11/go.mod h1:FiO0tdR+hySVATFJygYCVkiXxDBc3LkxkL0YMQZDTVA= github.com/hashicorp/raft v1.2.0/go.mod h1:vPAJM8Asw6u8LxC3eJCUZmRP/E4QmUGE1R7g7k8sG/8= github.com/hashicorp/raft v1.3.11 h1:p3v6gf6l3S797NnK5av3HcczOC1T5CLoaRvg0g9ys4A= github.com/hashicorp/raft v1.3.11/go.mod h1:J8naEwc6XaaCfts7+28whSeRvCqTd6e20BlCU3LtEO4= @@ -636,18 +369,10 @@ github.com/hashicorp/raft-boltdb v0.0.0-20171010151810-6e5ba93211ea/go.mod h1:pN github.com/hashicorp/serf v0.9.7/go.mod h1:TXZNMjZQijwlDvp+r0b63xZ45H7JmCmgg4gpTwn9UV4= github.com/hashicorp/serf v0.10.1 h1:Z1H2J60yRKvfDYAOZLd2MU0ND4AH/WDz7xYHDWQsIPY= github.com/hashicorp/serf v0.10.1/go.mod h1:yL2t6BqATOLGc5HF7qbFkTfXoPIY0WZdWHfEvMqbG+4= -github.com/hashicorp/vault/api v1.8.0/go.mod h1:uJrw6D3y9Rv7hhmS17JQC50jbPDAZdjZoTtrCCxxs7E= -github.com/hashicorp/vault/api v1.8.2 h1:C7OL9YtOtwQbTKI9ogB0A1wffRbCN+rH/LLCHO3d8HM= -github.com/hashicorp/vault/api v1.8.2/go.mod h1:ML8aYzBIhY5m1MD1B2Q0JV89cC85YVH4t5kBaZiyVaE= -github.com/hashicorp/vault/api/auth/kubernetes v0.3.0 h1:HkaCmTKzcgLa2tjdiAid1rbmyQNmQGHfnmvIIM2WorY= -github.com/hashicorp/vault/api/auth/kubernetes v0.3.0/go.mod h1:l1B4MGtLc+P37MabBQiIhP3qd9agj0vqhETmaQjjC/Y= -github.com/hashicorp/vault/sdk v0.6.0/go.mod h1:+DRpzoXIdMvKc88R4qxr+edwy/RvH5QK8itmxLiDHLc= -github.com/hashicorp/vault/sdk v0.6.1 h1:sjZC1z4j5Rh2GXYbkxn5BLK05S1p7+MhW4AgdUmgRUA= -github.com/hashicorp/vault/sdk v0.6.1/go.mod h1:Ck4JuAC6usTphfrrRJCRH+7/N7O2ozZzkm/fzQFt4uM= -github.com/hashicorp/yamux v0.0.0-20180604194846-3520598351bb/go.mod h1:+NfK9FKeTrX5uv1uIXGdwYDTeHna2qgaIlx54MXqjAM= -github.com/hashicorp/yamux v0.0.0-20211028200310-0bc27b27de87 h1:xixZ2bWeofWV68J+x6AzmKuVM/JWCQwkWm6GW/MUR6I= -github.com/hashicorp/yamux v0.0.0-20211028200310-0bc27b27de87/go.mod h1:CtWFDAQgb7dxtzFs4tWbplKIe2jSi3+5vKbgIO0SLnQ= -github.com/hexdigest/gowrap v1.1.7/go.mod h1:Z+nBFUDLa01iaNM+/jzoOA1JJ7sm51rnYFauKFUB5fs= +github.com/hashicorp/vault/api v1.9.1 h1:LtY/I16+5jVGU8rufyyAkwopgq/HpUnxFBg+QLOAV38= +github.com/hashicorp/vault/api v1.9.1/go.mod h1:78kktNcQYbBGSrOjQfHjXN32OhhxXnbYl3zxpd2uPUs= +github.com/hashicorp/yamux v0.1.1 h1:yrQxtgseBDrq9Y652vSRDvsKCJKOUD+GzTS4Y0Y8pvE= +github.com/hashicorp/yamux v0.1.1/go.mod h1:CtWFDAQgb7dxtzFs4tWbplKIe2jSi3+5vKbgIO0SLnQ= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= github.com/hpcloud/tail v1.0.1-0.20170814160653-37f427138745 h1:8as8OQ+RF1QrsHvWWsKBtBKINhD9QaD1iozA1wrO4aA= github.com/hpcloud/tail v1.0.1-0.20170814160653-37f427138745/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= @@ -655,44 +380,29 @@ github.com/huandu/xstrings v1.3.1/go.mod h1:y5/lhBue+AyNmUVz9RLU9xbLR0o4KIIExikq github.com/huandu/xstrings v1.3.2 h1:L18LIDzqlW6xN2rEkpdV8+oL/IXWJ1APd+vsdYy4Wdw= github.com/huandu/xstrings v1.3.2/go.mod h1:y5/lhBue+AyNmUVz9RLU9xbLR0o4KIIExikq4ovT0aE= github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= -github.com/imdario/mergo v0.3.5/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= -github.com/imdario/mergo v0.3.8/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= -github.com/imdario/mergo v0.3.10/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= github.com/imdario/mergo v0.3.11/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= -github.com/imdario/mergo v0.3.12/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= github.com/imdario/mergo v0.3.13 h1:lFzP57bqS/wsqKssCGmtLAb8A0wKjLGrve2q3PPVcBk= github.com/imdario/mergo v0.3.13/go.mod h1:4lJ1jqUDcsbIECGy0RUJAXNIhg+6ocWgb1ALK2O4oXg= github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= github.com/ishidawataru/sctp v0.0.0-20191218070446-00ab2ac2db07 h1:rw3IAne6CDuVFlZbPOkA7bhxlqawFh7RJJ+CejfMaxE= -github.com/ishidawataru/sctp v0.0.0-20191218070446-00ab2ac2db07/go.mod h1:co9pwDoBCm1kGxawmb4sPq0cSIOOWNPT4KnHotMP1Zg= -github.com/j-keck/arping v0.0.0-20160618110441-2cf9dc699c56/go.mod h1:ymszkNOg6tORTn+6F6j+Jc8TOr5osrynvN6ivFWZ2GA= github.com/jefferai/isbadcipher v0.0.0-20190226160619-51d2077c035f h1:E87tDTVS5W65euzixn7clSzK66puSt1H4I5SC0EmHH4= github.com/jefferai/isbadcipher v0.0.0-20190226160619-51d2077c035f/go.mod h1:3J2qVK16Lq8V+wfiL2lPeDZ7UWMxk5LemerHa1p6N00= -github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= github.com/jhump/protoreflect v1.6.0 h1:h5jfMVslIg6l29nsMs0D8Wj17RDVdNYti0vDN/PZZoE= -github.com/jhump/protoreflect v1.6.0/go.mod h1:eaTn3RZAmMBcV0fifFvlm6VHNz3wSkYyXYWUh7ymB74= -github.com/jmespath/go-jmespath v0.0.0-20160202185014-0b12d6b521d8/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= -github.com/jmespath/go-jmespath v0.0.0-20160803190731-bd40a432e4c7/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= -github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg= github.com/joefitzgerald/rainbow-reporter v0.1.0/go.mod h1:481CNgqmVHQZzdIbN52CupLJyoVwB10FQ/IQlF1pdL8= github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= -github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= -github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q= github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= -github.com/klauspost/compress v1.11.3/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= -github.com/klauspost/compress v1.11.13/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= github.com/klauspost/compress v1.15.11 h1:Lcadnb3RKGin4FYM/orgq0qde+nc15E5Cbqg4B9Sx9c= +github.com/klauspost/compress v1.15.11/go.mod h1:QPwzmACJjUTFsnSHH934V6woptycfrDDJnH7hvFVbGM= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= -github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= @@ -707,7 +417,6 @@ github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= github.com/kylelemons/godebug v0.0.0-20170820004349-d65d576e9348 h1:MtvEpTB6LX3vkb4ax0b5D2DHbNAUsen0Gx5wZoq3lV4= github.com/kylelemons/godebug v0.0.0-20170820004349-d65d576e9348/go.mod h1:B69LEHPfb2qLo0BaaOLcbitczOKLWTsrBG9LczfCD4k= -github.com/linuxkit/virtsock v0.0.0-20201010232012-f8cee7dfc7a3/go.mod h1:3r6x7q95whyfWQpmGZTu3gk3v2YkMi05HEzl7Tf7YEo= github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0 h1:6E+4a0GO5zZEnZ81pIr0yLvtUWk2if982qA3F3QD6H4= github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0/go.mod h1:zJYVVT2jmtg6P3p1VtQj7WsuWi/y4VnjVBn7F8KPB3I= github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= @@ -715,7 +424,6 @@ github.com/mailru/easyjson v0.0.0-20160728113105-d5b7844b561a/go.mod h1:C1wdFJiN github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.7.0/go.mod h1:KAzv3t3aY1NaHWoQz1+4F1ccyAH66Jk7yos7ldAVICs= -github.com/marstr/guid v1.1.0/go.mod h1:74gB1z2wpxxInTG6yaqA7KrtM0NZ+RbrcqDvYHefzho= github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= github.com/mattn/go-colorable v0.1.6/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= @@ -724,27 +432,21 @@ github.com/mattn/go-colorable v0.1.12/go.mod h1:u5H1YNBxpqRaxsYJYSkiCWKzEfiAb1Gb github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA= github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg= github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= -github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= github.com/mattn/go-isatty v0.0.10/go.mod h1:qgIWMr58cqv1PHHyhnkY9lrL7etaEgOFcMEpPG5Rm84= github.com/mattn/go-isatty v0.0.11/go.mod h1:PhnuNfih5lzO57/f3n+odYbM4JtupLOxQOAqxQCu2WE= github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94= -github.com/mattn/go-isatty v0.0.16 h1:bq3VjFmv/sOjHtdEhmkEV4x1AJtvUvOJ2PFAZ5+peKQ= github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= -github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= -github.com/mattn/go-shellwords v1.0.3/go.mod h1:3xCvwCdWdlDJUrvuMn7Wuy9eWs4pE8vqg+NOMyg4B2o= -github.com/mattn/go-shellwords v1.0.6/go.mod h1:3xCvwCdWdlDJUrvuMn7Wuy9eWs4pE8vqg+NOMyg4B2o= +github.com/mattn/go-isatty v0.0.17 h1:BTarxUcIeDqL27Mc+vyvdWYSL28zpIhv3RoTdsLMPng= +github.com/mattn/go-isatty v0.0.17/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= -github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= github.com/matttproud/golang_protobuf_extensions v1.0.4 h1:mmDVorXM7PCGKw94cs5zkfA9PSy5pEvNWRP0ET0TIVo= github.com/maxbrunsfeld/counterfeiter/v6 v6.2.2/go.mod h1:eD9eIE7cdwcMi9rYluz88Jz2VyhSmden33/aXg4oVIY= github.com/miekg/dns v1.1.26/go.mod h1:bPDLeHnStXmXAq1m/Ch/hvfNHr14JKNPMBo3VZKjuso= github.com/miekg/dns v1.1.41/go.mod h1:p6aan82bvRIyn+zDIv9xYNUpwa73JcSh9BKwknJysuI= github.com/miekg/dns v1.1.50 h1:DQUfb9uc6smULcREF09Uc+/Gd46YWqJd5DbpPE9xkcA= github.com/miekg/dns v1.1.50/go.mod h1:e3IlAVfNqAllflbibAZEWOXOQ+Ynzk/dDozDxY7XnME= -github.com/miekg/pkcs11 v1.0.3/go.mod h1:XsNlhZGX73bx86s2hdc/FuaLm2CPZJemRLMA+WTFxgs= -github.com/mistifyio/go-zfs v2.1.2-0.20190413222219-f784269be439+incompatible/go.mod h1:8AuVvqP/mXw1px98n46wfvcGfQ4ci2FwoAjKYxuo3Z4= github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc= github.com/mitchellh/cli v1.1.0/go.mod h1:xcISNoH86gajksDmfB23e/pu+B+GeFRMYmoHXxx3xhI= github.com/mitchellh/cli v1.1.2/go.mod h1:6iaV0fGdElS6dPBx0EApTxHrcWvmJphyh2n8YBLPPZ4= @@ -757,8 +459,6 @@ github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= github.com/mitchellh/go-ps v0.0.0-20190716172923-621e5597135b h1:9+ke9YJ9KGWw5ANXK6ozjoK47uI3uNbXv4YVINBnGm8= github.com/mitchellh/go-ps v0.0.0-20190716172923-621e5597135b/go.mod h1:r1VsdOzOPt1ZSrGZWFoNhsAedKnEd6r9Np1+5blZCWk= -github.com/mitchellh/go-testing-interface v0.0.0-20171004221916-a61a99592b77/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= -github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= github.com/mitchellh/go-testing-interface v1.14.1 h1:jrgshOhYAUVNMAJiKbEu7EqAwgJJ2JqpQmpLJOu07cU= github.com/mitchellh/go-testing-interface v1.14.1/go.mod h1:gfgS7OtZj6MA4U1UrDRp04twqAjfvlZyCfX3sDjEym8= github.com/mitchellh/go-wordwrap v0.0.0-20150314170334-ad45545899c7/go.mod h1:ZXFpozHsX6DPmq2I0TCekCxypsnAUbP2oI0UX1GXzOo= @@ -772,19 +472,17 @@ github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh github.com/mitchellh/mapstructure v1.4.1/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY= github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= -github.com/mitchellh/osext v0.0.0-20151018003038-5e2d6d41470f/go.mod h1:OkQIRizQZAeMln+1tSwduZz7+Af5oFlKirV/MSYes2A= github.com/mitchellh/reflectwalk v1.0.0/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= github.com/mitchellh/reflectwalk v1.0.2 h1:G2LzWKi524PWgd3mLHV8Y5k7s6XUvT0Gef6zxSIeXaQ= github.com/mitchellh/reflectwalk v1.0.2/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= -github.com/moby/locker v1.0.1/go.mod h1:S7SDdo5zpBK84bzzVlKr2V0hz+7x9hWbYC/kq7oQppc= +github.com/moby/patternmatcher v0.5.0 h1:YCZgJOeULcxLw1Q+sVR636pmS7sPEn1Qo2iAN6M7DBo= +github.com/moby/patternmatcher v0.5.0/go.mod h1:hDPoyOpDY7OrrMDLaYoY3hf52gNCR/YOUYxkhApJIxc= github.com/moby/sys/mount v0.3.3 h1:fX1SVkXFJ47XWDoeFW4Sq7PdQJnV2QIDZAqjNqgEjUs= github.com/moby/sys/mount v0.3.3/go.mod h1:PBaEorSNTLG5t/+4EgukEQVlAvVEc6ZjTySwKdqp5K0= -github.com/moby/sys/mountinfo v0.4.0/go.mod h1:rEr8tzG/lsIZHBtN/JjGG+LMYx9eXgW2JI+6q0qou+A= -github.com/moby/sys/mountinfo v0.4.1/go.mod h1:rEr8tzG/lsIZHBtN/JjGG+LMYx9eXgW2JI+6q0qou+A= github.com/moby/sys/mountinfo v0.6.2 h1:BzJjoreD5BMFNmD9Rus6gdd1pLuecOFPt8wC+Vygl78= github.com/moby/sys/mountinfo v0.6.2/go.mod h1:IJb6JQeOklcdMU9F5xQ8ZALD+CUr5VlGpwtX+VE0rpI= -github.com/moby/sys/symlink v0.1.0/go.mod h1:GGDODQmbFOjFsXvfLVn3+ZRxkch54RkSiGqsZeMYowQ= -github.com/moby/term v0.0.0-20200312100748-672ec06f55cd/go.mod h1:DdlQx2hp0Ss5/fLikoLlEeIYiATotOjgB//nb973jeo= +github.com/moby/sys/sequential v0.5.0 h1:OPvI35Lzn9K04PBbCLW0g4LcFAJgHsvXsRyewg5lXtc= +github.com/moby/sys/sequential v0.5.0/go.mod h1:tH2cOOs5V9MlPiXcQzRC+eEyab644PWKGRYaaV5ZZlo= github.com/moby/term v0.0.0-20210619224110-3f7ff695adc6 h1:dcztxKSvZ4Id8iPpHERQBbIJfabdt4wUm5qy3wOL2Zc= github.com/moby/term v0.0.0-20210619224110-3f7ff695adc6/go.mod h1:E2VnQOmVuvZB6UYnnDB0qG5Nq/1tD9acaOpo6xmt0Kw= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= @@ -793,85 +491,42 @@ github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lN github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= github.com/morikuni/aec v1.0.0 h1:nP9CBfwrvYnBRgY6qfDQkygYDmYwOilePFkwzv4dU8A= github.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc= -github.com/mrunalp/fileutils v0.5.0/go.mod h1:M1WthSahJixYnrXQl/DFQuteStB1weuxD2QJNHXfbSQ= github.com/mrunalp/fileutils v0.5.1 h1:F+S7ZlNKnrwHfSwdlgNSkKo67ReVf8o9fel6C3dkm/Q= github.com/mrunalp/fileutils v0.5.1/go.mod h1:M1WthSahJixYnrXQl/DFQuteStB1weuxD2QJNHXfbSQ= github.com/munnerz/goautoneg v0.0.0-20120707110453-a547fc61f48d/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= -github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= -github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= -github.com/ncw/swift v1.0.47/go.mod h1:23YIA4yWVnGwv2dQlN4bB7egfYX6YLn0Yo/S6zZO/ZM= -github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e h1:fD57ERR4JtEqsWbfPhv4DMiApHyliiK5xCTNVSPiaAs= github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= -github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE= -github.com/oklog/run v1.0.0/go.mod h1:dlhp/R75TPv97u0XWUtDeV/lRKWPKSdTuV0TZvrmrQA= github.com/oklog/run v1.1.0 h1:GEenZ1cK0+q0+wsJew9qUg/DyD8k3JzYsZAi5gYi2mA= github.com/oklog/run v1.1.0/go.mod h1:sVPdnTZT1zYwAJeCMu2Th4T21pA3FPOQRfWjQlk7DVU= github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= -github.com/olekukonko/tablewriter v0.0.0-20170122224234-a0225b3f23b5/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo= -github.com/onsi/ginkgo v0.0.0-20151202141238-7f8ab55aaf3b/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v0.0.0-20170829012221-11459a886d9c/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.8.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= -github.com/onsi/ginkgo v1.10.1/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= -github.com/onsi/ginkgo v1.10.3/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= -github.com/onsi/ginkgo v1.11.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.12.0/go.mod h1:oUhWkIvk5aDxtKvDDuw8gItl8pKl42LzjC9KZE0HfGg= -github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE= -github.com/onsi/gomega v0.0.0-20151007035656-2152b45fa28a/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= github.com/onsi/gomega v1.5.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= -github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= github.com/onsi/gomega v1.9.0/go.mod h1:Ho0h+IUsWyvy1OpqCwxlQ/21gkhVunqlU8fDGcoTdcA= -github.com/onsi/gomega v1.10.3/go.mod h1:V9xEwhxec5O8UDM77eCW8vLymOMltsqPVYWrpDsH8xc= github.com/onsi/gomega v1.24.2 h1:J/tulyYK6JwBldPViHJReihxxZ+22FHs0piGjQAvoUE= -github.com/opencontainers/go-digest v0.0.0-20170106003457-a6d0ee40d420/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= -github.com/opencontainers/go-digest v0.0.0-20180430190053-c9281466c8b2/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= -github.com/opencontainers/go-digest v1.0.0-rc1/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= -github.com/opencontainers/go-digest v1.0.0-rc1.0.20180430190053-c9281466c8b2/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= -github.com/opencontainers/image-spec v1.0.0/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= github.com/opencontainers/image-spec v1.0.1/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= github.com/opencontainers/image-spec v1.1.0-rc2.0.20221005185240-3a7f492d3f1b h1:YWuSjZCQAPM8UUBLkYUk1e+rZcvWHJmFb6i6rM44Xs8= github.com/opencontainers/image-spec v1.1.0-rc2.0.20221005185240-3a7f492d3f1b/go.mod h1:3OVijpioIKYWTqjiG0zfF6wvoJ4fAXGbjdZuI2NgsRQ= -github.com/opencontainers/runc v0.0.0-20190115041553-12f6a991201f/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= -github.com/opencontainers/runc v0.1.1/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= -github.com/opencontainers/runc v1.0.0-rc8.0.20190926000215-3e425f80a8c9/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= -github.com/opencontainers/runc v1.0.0-rc9/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= -github.com/opencontainers/runc v1.0.0-rc93/go.mod h1:3NOsor4w32B2tC0Zbl8Knk4Wg84SM2ImC1fxBuqJ/H0= -github.com/opencontainers/runc v1.0.2/go.mod h1:aTaHFFwQXuA71CiyxOdFFIorAoemI04suvGRQFzWTD0= github.com/opencontainers/runc v1.1.12 h1:BOIssBaW1La0/qbNZHXOOa71dZfZEQOzW7dqQf3phss= github.com/opencontainers/runc v1.1.12/go.mod h1:S+lQwSfncpBha7XTy/5lBwWgm5+y5Ma/O44Ekby9FK8= -github.com/opencontainers/runtime-spec v0.1.2-0.20190507144316-5b71a03e2700/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= -github.com/opencontainers/runtime-spec v1.0.1/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= -github.com/opencontainers/runtime-spec v1.0.2-0.20190207185410-29686dbc5559/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= -github.com/opencontainers/runtime-spec v1.0.2/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= -github.com/opencontainers/runtime-spec v1.0.3-0.20200929063507-e6143ca7d51d/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= github.com/opencontainers/runtime-spec v1.0.3-0.20210326190908-1c3f411f0417 h1:3snG66yBm59tKhhSPQrQ/0bCrv1LQbKt40LnUPiUxdc= github.com/opencontainers/runtime-spec v1.0.3-0.20210326190908-1c3f411f0417/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= -github.com/opencontainers/runtime-tools v0.0.0-20181011054405-1d69bd0f9c39/go.mod h1:r3f7wjNzSs2extwzU3Y+6pKfobzPh+kKFJ3ofN+3nfs= -github.com/opencontainers/selinux v1.6.0/go.mod h1:VVGKuOLlE7v4PJyT6h7mNWvq1rzqiriPsEqVhc+svHE= -github.com/opencontainers/selinux v1.8.0/go.mod h1:RScLhm78qiWa2gbVCcGkC7tCGdgk3ogry1nUQF8Evvo= -github.com/opencontainers/selinux v1.8.2/go.mod h1:MUIHuUEvKB1wtJjQdOyYRgOnLD2xAPP8dBsCoU0KuF8= github.com/opencontainers/selinux v1.10.1 h1:09LIPVRP3uuZGQvgR+SgMSNBd1Eb3vlRbGqQpoHsF8w= github.com/opencontainers/selinux v1.10.1/go.mod h1:2i0OySw99QjzBBQByd1Gr9gSjvuho1lHsJxIJ3gGbJI= -github.com/opentracing/opentracing-go v1.0.2/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= github.com/pascaldekloe/goe v0.1.0 h1:cBOtyMzM9HTpWjXfbbunk26uA6nG3a8n06Wieeh0MwY= github.com/pascaldekloe/goe v0.1.0/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= -github.com/pelletier/go-toml v1.8.1/go.mod h1:T2/BmBdy8dvIRq1a/8aqjN41wvWlN4lrapLU/GW4pbc= -github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= -github.com/pierrec/lz4 v2.5.2+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= -github.com/pierrec/lz4 v2.6.1+incompatible h1:9UY3+iC23yxF0UfGaYrGplQ+79Rg+h/q9FV9ix19jjM= -github.com/pierrec/lz4 v2.6.1+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/pkg/errors v0.8.1-0.20171018195549-f15c970de5b7/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= @@ -882,47 +537,31 @@ github.com/posener/complete v1.2.3 h1:NP0eAhjcjImqslEwo/1hq7gpajME0fTLTezBKDqfXq github.com/posener/complete v1.2.3/go.mod h1:WZIdtGGp+qx0sLrYKtIRAruyNpv6hFCicSgv7Sy7s/s= github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c h1:ncq/mPwQF4JjgDlrVEn3C11VoGHZN7m8qihwgMEtzYw= github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c/go.mod h1:OmDBASR4679mdNQnz2pUhc2G8CO2JrUAVFDRBDP/hJE= -github.com/pquerna/cachecontrol v0.0.0-20171018203845-0dec1b30a021/go.mod h1:prYjPmNq4d1NPVmpShWobRqXY3q7Vp+80DqgxxUrUIA= -github.com/prometheus/client_golang v0.0.0-20180209125602-c332b6f63c06/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= github.com/prometheus/client_golang v0.9.2/go.mod h1:OsXs2jCmiKlQ1lTBmv21f2mNfw4xf/QclQDMrYNZzcM= github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso= github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= -github.com/prometheus/client_golang v1.1.0/go.mod h1:I1FGZT9+L76gKKOs5djB6ezCbFQP1xR9D75/vuwEF3g= github.com/prometheus/client_golang v1.4.0/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU= -github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= github.com/prometheus/client_golang v1.13.0 h1:b71QUfeo5M8gq2+evJdTPfZhYMAU0uKPkyPJ7TPsloU= -github.com/prometheus/client_model v0.0.0-20171117100541-99fa1f4be8e5/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.2.0 h1:uq5h0d+GuxiXLJLNABMgp2qUWDPiLvgCzz2dUR+/W/M= github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/common v0.0.0-20180110214958-89604d197083/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= github.com/prometheus/common v0.0.0-20181126121408-4724e9255275/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= -github.com/prometheus/common v0.6.0/go.mod h1:eBmuwkDJBwy6iBfxCBob6t6dR6ENT/y+J+Zk0j9GMYc= github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8bs7vj7HSQ4= -github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo= github.com/prometheus/common v0.37.0 h1:ccBbHCgIiT9uSoFY0vX8H3zsNR5eLt17/RQLUvn8pXE= -github.com/prometheus/procfs v0.0.0-20180125133057-cb4147076ac7/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20181204211112-1dc9a6cbc91a/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= -github.com/prometheus/procfs v0.0.0-20190522114515-bc1a522cf7b1/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= -github.com/prometheus/procfs v0.0.3/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDaekg4FpcdQ= -github.com/prometheus/procfs v0.0.5/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDaekg4FpcdQ= github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A= -github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= -github.com/prometheus/procfs v0.2.0/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= -github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= github.com/prometheus/procfs v0.8.0 h1:ODq8ZFEaYeCaZOJlZZdJA2AbQR98dSHSM1KW/You5mo= github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU= github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= -github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc= github.com/rogpeppe/go-internal v1.9.0 h1:73kH8U+JUqXU8lRuOHeVHaa/SZPifC7BkcraZVejAe8= @@ -932,61 +571,47 @@ github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb github.com/ryanuber/columnize v2.1.0+incompatible/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= github.com/ryanuber/go-glob v1.0.0 h1:iQh3xXAumdQ+4Ufa5b25cRpC5TYKlno6hsv6Cb3pkBk= github.com/ryanuber/go-glob v1.0.0/go.mod h1:807d1WSdnB0XRJzKNil9Om6lcp/3a0v4qIHxIXzX/Yc= -github.com/safchain/ethtool v0.0.0-20190326074333-42ed695e3de8/go.mod h1:Z0q5wiBQGYcxhMZ6gUqHn6pYNLypFAvaL3UvgZLR0U4= -github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0= github.com/sclevine/spec v1.2.0/go.mod h1:W4J29eT/Kzv7/b9IWLB055Z+qvVC9vt0Arko24q7p+U= github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529 h1:nn5Wsu0esKSJiIVhscUtVbo7ada43DJhG55ua/hjS5I= github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= -github.com/seccomp/libseccomp-golang v0.9.1/go.mod h1:GbW5+tmTXfcxTToHLXlScSlAvWlF4P2Ca7zGrPiEpWo= github.com/seccomp/libseccomp-golang v0.10.0 h1:aA4bp+/Zzi0BnWZ2F1wgNBs5gTpm+na2rWM6M9YjLpY= github.com/seccomp/libseccomp-golang v0.10.0/go.mod h1:JA8cRccbGaA1s33RQf7Y1+q9gHmZX1yB/z9WDN1C6fg= github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo= -github.com/shirou/gopsutil/v3 v3.23.1 h1:a9KKO+kGLKEvcPIs4W62v0nu3sciVDOOOPUD0Hz7z/4= -github.com/shirou/gopsutil/v3 v3.23.1/go.mod h1:NN6mnm5/0k8jw4cBfCnJtr5L7ErOTg18tMNpgFkn0hA= -github.com/shoenig/test v0.6.1 h1:TVIih3yGvaH8Yci2OedB/NAhOC9UlNi5+ajCVyMPflg= -github.com/shoenig/test v0.6.1/go.mod h1:byHiCGXqrVaflBLAMq/srcZIHynQPQgeyvkvXnjqq0k= +github.com/shirou/gopsutil/v3 v3.23.4 h1:hZwmDxZs7Ewt75DV81r4pFMqbq+di2cbt9FsQBqLD2o= +github.com/shirou/gopsutil/v3 v3.23.4/go.mod h1:ZcGxyfzAMRevhUR2+cfhXDH6gQdFYE/t8j1nsU4mPI8= +github.com/shoenig/go-m1cpu v0.1.5 h1:LF57Z/Fpb/WdGLjt2HZilNnmZOxg/q2bSKTQhgbrLrQ= +github.com/shoenig/go-m1cpu v0.1.5/go.mod h1:Wwvst4LR89UxjeFtLRMrpgRiyY4xPsejnVZym39dbAQ= +github.com/shoenig/test v0.6.3 h1:GVXWJFk9PiOjN0KoJ7VrJGH6uLPnqxR7/fe3HUPfE0c= +github.com/shoenig/test v0.6.3/go.mod h1:byHiCGXqrVaflBLAMq/srcZIHynQPQgeyvkvXnjqq0k= github.com/shopspring/decimal v1.2.0 h1:abSATXmQEYyShuxI4/vyW3tV1MrKAJzCZ/0zLUXYbsQ= github.com/shopspring/decimal v1.2.0/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o= github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= -github.com/sirupsen/logrus v1.0.4-0.20170822132746-89742aefa4b2/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc= -github.com/sirupsen/logrus v1.0.6/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q= github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= -github.com/sirupsen/logrus v1.8.1/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ= github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= -github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= -github.com/smartystreets/goconvey v0.0.0-20190330032615-68dc04aab96a/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= -github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk= github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= github.com/spf13/cast v1.3.1 h1:nFm6S0SMdyzrzcmThSipiEubIDy8WEXKNZ0UOgiRpng= github.com/spf13/cast v1.3.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= -github.com/spf13/cobra v0.0.2-0.20171109065643-2da4a54c5cee/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= -github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= github.com/spf13/cobra v1.0.0/go.mod h1:/6GTrnGXV9HjY+aR4k0oJ5tcvakLuG6EuKReYlHNrgE= github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= github.com/spf13/pflag v0.0.0-20170130214245-9ff6c6923cff/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= -github.com/spf13/pflag v1.0.1-0.20171106142849-4c012f6dcd95/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= -github.com/spf13/pflag v1.0.1/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.2/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/spf13/viper v1.4.0/go.mod h1:PTJ7Z/lr49W6bUbkmS1V3by4uWynFiR9p7+dSq/yZzE= -github.com/stefanberger/go-pkcs11uri v0.0.0-20201008174630-78d3cae3a980/go.mod h1:AO3tvPzVZ/ayst6UlUKUv6rcPQInYe3IknH3jYhAKu8= -github.com/stretchr/objx v0.0.0-20180129172003-8a3f7159479f/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE= github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= github.com/stretchr/objx v0.5.0 h1:1zr/of2m5FGMsad5YfcqgdqdWrIhu+EBEJRhR1U7z/c= github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= -github.com/stretchr/testify v0.0.0-20180303142811-b89eecf5ca5d/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= @@ -996,34 +621,20 @@ github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/ github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.2/go.mod h1:R6va5+xMeoiuVRoj+gSkQ7d3FALtqAAGI1FQKckRals= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= -github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= +github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= -github.com/syndtr/gocapability v0.0.0-20170704070218-db04d3cc01c8/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww= -github.com/syndtr/gocapability v0.0.0-20180916011248-d98352740cb2/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww= github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635 h1:kdXcSzyDtseVEc4yCz2qF8ZrQvIDBJLl4S1c3GCXmoI= github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww= -github.com/tchap/go-patricia v2.2.6+incompatible/go.mod h1:bmLyhP68RS6kStMGxByiQ23RP/odRBOTVjwp2cDyi6I= github.com/tklauser/go-sysconf v0.3.11 h1:89WgdJhk5SNwJfu+GKyYveZ4IaJ7xAkecBo+KdJV0CM= github.com/tklauser/go-sysconf v0.3.11/go.mod h1:GqXfhXY3kiPa0nAXPDIQIWzJbMCB7AmcWpGR8lSZfqI= github.com/tklauser/numcpus v0.6.0 h1:kebhY2Qt+3U6RNK7UqpYNA+tJ23IBEGKkB7JQBfDYms= github.com/tklauser/numcpus v0.6.0/go.mod h1:FEZLMke0lhOUG6w2JadTzp0a+Nl8PF/GFkQ5UVIcaL4= -github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926/go.mod h1:9ESjWnEqriFuLhtthL60Sar/7RFoluCcXsuvEwTV5KM= github.com/ugorji/go v1.1.4/go.mod h1:uQMGLiO92mf5W77hV/PUCpI3pbzQx3CRekS0kk+RGrc= -github.com/ulikunitz/xz v0.5.10 h1:t92gobL9l3HE202wg3rlk19F6X+JOxl9BBrCCMYEYd8= -github.com/urfave/cli v0.0.0-20171014202726-7bc6a0acffa5/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA= -github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA= -github.com/urfave/cli v1.22.1/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= -github.com/urfave/cli v1.22.2/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= -github.com/vishvananda/netlink v0.0.0-20181108222139-023a6dafdcdf/go.mod h1:+SR5DhBJrl6ZM7CoCKvpw5BKroDKQ+PJqOg65H/2ktk= -github.com/vishvananda/netlink v1.1.0/go.mod h1:cTgwzPIzzgDAYoQrMm0EdrjRUBkTqKYppBueQtXaqoE= -github.com/vishvananda/netlink v1.1.1-0.20201029203352-d40f9887b852/go.mod h1:twkDnbuQxJYemMlGd4JFIcuhgX83tXhKS2B/PRMpOho= github.com/vishvananda/netlink v1.2.1-beta.2 h1:Llsql0lnQEbHj0I1OuKyp8otXp0r3q0mPkuhwHfStVs= github.com/vishvananda/netlink v1.2.1-beta.2/go.mod h1:twkDnbuQxJYemMlGd4JFIcuhgX83tXhKS2B/PRMpOho= -github.com/vishvananda/netns v0.0.0-20180720170159-13995c7128cc/go.mod h1:ZjcWmFBXmLKZu9Nxj3WKYEafiSqer2rnvPr0en9UNpI= -github.com/vishvananda/netns v0.0.0-20191106174202-0a2b9b5464df/go.mod h1:JP3t17pCcGlemwknint6hfoeCVQrEMVwxRLRjXpq+BU= github.com/vishvananda/netns v0.0.0-20200728191858-db3c7e526aae/go.mod h1:DD4vA1DwXk04H54A1oHXtwZmA0grkVMdPxx/VGLCah0= github.com/vishvananda/netns v0.0.0-20211101163701-50045581ed74 h1:gga7acRE695APm9hlsSMoOoE65U4/TcqNj90mc69Rlg= github.com/vishvananda/netns v0.0.0-20211101163701-50045581ed74/go.mod h1:DD4vA1DwXk04H54A1oHXtwZmA0grkVMdPxx/VGLCah0= @@ -1032,11 +643,6 @@ github.com/vmihailenco/msgpack/v4 v4.3.12 h1:07s4sz9IReOgdikxLTKNbBdqDMLsjPKXwvC github.com/vmihailenco/msgpack/v4 v4.3.12/go.mod h1:gborTTJjAo/GWTqqRjrLCn9pgNN+NXzzngzBKDPIqw4= github.com/vmihailenco/tagparser v0.1.1 h1:quXMXlA39OCbd2wAdTsGDlK9RkOk6Wuw+x37wVyIuWY= github.com/vmihailenco/tagparser v0.1.1/go.mod h1:OeAg3pn3UbLjkWt+rN9oFYB6u/cQgqMEUPoW2WPyhdI= -github.com/willf/bitset v1.1.11-0.20200630133818-d5bec3311243/go.mod h1:RjeCKbqT1RxIR/KWY6phxZiaY1IyutSBfGjNPySAYV4= -github.com/willf/bitset v1.1.11/go.mod h1:83CECat5yLh5zVOf4P1ErAgKA5UDvKtgyUABdr3+MjI= -github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU= -github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415/go.mod h1:GwrjFmJcFw6At/Gs6z4yjiIwzuJ1/+UwLxMQDVQXShQ= -github.com/xeipuuv/gojsonschema v0.0.0-20180618132009-1d523034197f/go.mod h1:5yf86TLmAcydyeJq5YvxkGPE2fm/u4myDekKRoLuqhs= github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= @@ -1044,54 +650,34 @@ github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9dec github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= github.com/yusufpapurcu/wmi v1.2.2 h1:KBNDSne4vP5mbSWnJbO+51IMOXJB67QiYCSBrubbPRg= github.com/yusufpapurcu/wmi v1.2.2/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0= -github.com/yvasiyarov/go-metrics v0.0.0-20140926110328-57bccd1ccd43/go.mod h1:aX5oPXxHm3bOH+xeAttToC8pqch2ScQN/JoXYupl6xs= -github.com/yvasiyarov/gorelic v0.0.0-20141212073537-a9bba5b9ab50/go.mod h1:NUSPSUX/bi6SeDMUh6brw0nXpxHnc96TguQh0+r/ssA= -github.com/yvasiyarov/newrelic_platform_go v0.0.0-20140908184405-b21fdbd4370f/go.mod h1:GlGEuHIJweS1mbCqG+7vt2nvWLzLLnRHbXz5JKd/Qbg= github.com/zclconf/go-cty v1.2.0/go.mod h1:hOPWgoHbaTUnI5k4D2ld+GRpFJSCe6bCM7m1q/N4PQ8= github.com/zclconf/go-cty v1.8.0/go.mod h1:vVKLxnk3puL4qRAv72AO+W99LUD4da90g3uUAzyuvAk= github.com/zclconf/go-cty v1.12.1 h1:PcupnljUm9EIvbgSHQnHhUr3fO6oFmkOrvs2BAFNXXY= github.com/zclconf/go-cty v1.12.1/go.mod h1:s9IfD1LK5ccNMSWCVFCE2rJfHiZgi7JijgeWIMfhLvA= github.com/zclconf/go-cty-debug v0.0.0-20191215020915-b22d67c1ba0b/go.mod h1:ZRKQfBXbGkpdV6QMzT3rU1kSTAnfu1dO8dPKjYprgj8= go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= -go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= -go.etcd.io/bbolt v1.3.5/go.mod h1:G5EMThwa9y8QZGBClrRx5EY+Yw9kAhnjy3bSjsnlVTQ= -go.etcd.io/bbolt v1.3.6/go.mod h1:qXsaaIqmgQH0T+OPdb99Bf+PKfBBQVAdyD6TY9G8XM4= -go.etcd.io/bbolt v1.3.7 h1:j+zJOnnEjF/kyHlDDgGnVL/AIqIJPq8UoB2GSNfkUfQ= -go.etcd.io/bbolt v1.3.7/go.mod h1:N9Mkw9X8x5fupy0IKsmuqVtoGDyxsaDlbk4Rd05IAQw= -go.etcd.io/etcd v0.5.0-alpha.5.0.20200910180754-dd1b699fc489/go.mod h1:yVHk9ub3CSBatqGNg7GRmsnfLWtoW60w4eDYfh7vHDg= -go.mozilla.org/pkcs7 v0.0.0-20200128120323-432b2356ecb1/go.mod h1:SNgMg+EgDFwmvSmLRTNKC5fegJjB7v23qTQ0XLGUNHk= go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= -go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= -go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= -go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= -go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= -go.uber.org/atomic v1.9.0 h1:ECmE8Bn/WFTYwEW/bpKD3M8VtR/zQVbavAoalC1PYyE= -go.uber.org/atomic v1.9.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= +go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= go.uber.org/goleak v1.1.10/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A= go.uber.org/goleak v1.2.0 h1:xqgm/S+aQvhWFTtR0XK3Jvg7z8kGV8P4X14IzwN3Eqk= go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= +go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU= go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= -golang.org/x/crypto v0.0.0-20171113213409-9f005a07e0d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +go.uber.org/zap v1.18.1/go.mod h1:xg/QME4nWcxGxrpdeYfq7UvYrLh66cuVKdrbD1XF/NI= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= -golang.org/x/crypto v0.0.0-20181009213950-7c1a557ab941/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190923035154-9ee001bba392/go.mod h1:/lpIB1dKB+9EgE3H3cr1v9wB50oz8l4C4h62xy7jSTY= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200414173820-0848c9571904/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20200728195943-123391ffb6de/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200820211705-5c72a883971a/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= -golang.org/x/crypto v0.0.0-20210711020723-a769d52b0f97/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20220517005047-85d78b3ac167/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.21.0 h1:X31++rzVUdKhX5sWmSOFZxx8UW/ldWx55cbf08iNAMA= golang.org/x/crypto v0.21.0/go.mod h1:0BP7YvVV9gBbVKyeTG0Gyn+gZm94bibOW5BjDEYAOMs= @@ -1105,8 +691,8 @@ golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u0 golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= -golang.org/x/exp v0.0.0-20220921164117-439092de6870 h1:j8b6j9gzSigH28O5SjSpQSSh9lFd6f5D/q0aHjNTulc= -golang.org/x/exp v0.0.0-20220921164117-439092de6870/go.mod h1:cyybsKvd6eL0RnXn6p/Grxp8F5bW7iYuBgsNCOHpMYE= +golang.org/x/exp v0.0.0-20230108222341-4b8118a2686a h1:tlXy25amD5A7gOfbXdqCGN5k8ESEed/Ee1E5RcrYnqU= +golang.org/x/exp v0.0.0-20230108222341-4b8118a2686a/go.mod h1:CxIveKay+FTh1D0yPZemJVgC/95VzuuOLq5Qi4xnoYc= golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= @@ -1130,12 +716,10 @@ golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.8.0 h1:LUYupSeNrTNCGzR/hVBk2NHZO4hXcVaW1k4Qx7rjPx8= golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= -golang.org/x/net v0.0.0-20180530234432-1e491301e022/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180811021610-c39426892332/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181011144130-49bb7cea24b1/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181201002055-351d144fa1fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -1148,14 +732,11 @@ golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn golang.org/x/net v0.0.0-20190522155817-f3200d17e092/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190619014844-b5b0513f8c1b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190923162816-aa69164e4478/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20191004110552-13f9640d40b9/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= @@ -1164,17 +745,13 @@ golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLL golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= -golang.org/x/net v0.0.0-20201006153459-a7d1128ccaa0/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.0.0-20201224014010-6772e930b67b/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= golang.org/x/net v0.0.0-20210410081132-afb366fc7cd1/go.mod h1:9tjilg8BloeKEkVJvy7fQ90B1CfIiPueXVOjqfkSzI8= golang.org/x/net v0.0.0-20210726213435-c6fcb2dbf985/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.0.0-20210825183410-e898025ed96a/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20211216030914-fe4d6282115f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.23.0 h1:7EYJ93RZ9vYSZAIb2x3lnuvqO5zneoD6IvWjuhfxjTs= @@ -1184,7 +761,6 @@ golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4Iltr golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.10.0 h1:zHCpF2Khkwy4mMB4bv0U37YtJdTGW8jI0glAApi0Kh8= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -1211,75 +787,47 @@ golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190514135907-3a4b5fb9f71f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190522044717-8097e1b27ff5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190523142557-0e01d883c5c5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190602015325-4c4f7f33c9ed/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190606203320-7fc4e5ec1444/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190616124812-15dcb6c0061f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190626221950-04f50cda93cb/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190801041406-cbf593c0f2f3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190812073006-9eafafc0a87e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190922100055-0a153f010e69/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190924154521-2837fb4f24fe/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191008105621-543471e840be/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191022100944-742c48ecaeb7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191115151921-52ab43148777/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191210023423-ac6580df4449/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200120151820-655fe14d7479/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200124204421-9fbb57f87de9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200217220822-9197077df867/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200602225109-6fdc65e7d980/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200622214017-ed371f2e16b4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200728102440-3e129f6d46b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200817155316-9781c653f443/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200909081042-eff7692f9009/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200916030750-2334cc1a136f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200922070232-aee5d888a860/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200923182605-d9f96fdee20d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20201112073958-5cba982894dd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20201117170446-d9b008d0a637/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20201202213521-69691e467435/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201204225414-ed752295db88/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210303074136-134d130e1a04/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210324051608-47abb6519492/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210426230700-d19ff857e887/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211025201205-69cdffdb9359/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220412211240-33da011f77ad/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220503163025-988cb79eb6c6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= @@ -1287,7 +835,7 @@ golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20220728004956-3c1f35247d10/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.4.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.7.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.18.0 h1:DBdB3niSjOA/O0blCZBqDefyWNYveAYMNF1Wum0DYQ4= golang.org/x/sys v0.18.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= @@ -1302,12 +850,10 @@ golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ= golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= -golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20200416051211-89c76fbcd5d1/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20200630173020-3af7569d3a1e/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20220224211638-0e9765cccd65 h1:M73Iuj3xbbb9Uk1DYhzydthsj6oOd6l9bpuFcNoUvTs= golang.org/x/time v0.0.0-20220224211638-0e9765cccd65/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= @@ -1319,7 +865,6 @@ golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3 golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190424220101-1e8e1cfdf96b/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= @@ -1349,14 +894,12 @@ golang.org/x/tools v0.0.0-20200204074204-1cc6d1ef6c74/go.mod h1:TB2adYChydJhpapK golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200224181240-023911ca70b2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200505023115-26f46d2f7ef8/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200616133436-c1934b75d054/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200916195026-c9a70fc28ce3/go.mod h1:z6u4i615ZeAfBE4XtMziQW1fSVJXACjjbWkB/mvPzlU= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.6-0.20210726203631-07bc1bf47fb2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.6.0 h1:BOw41kyTf3PuCW1pVQf8+Cyg8pMlkYB1oo9iJ6D/lKM= golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= @@ -1364,8 +907,6 @@ golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8T golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2 h1:H2TDz8ibqkAF6YGhCdN3jS9O0/s90v0rJh3X/OLHEUk= -google.golang.org/api v0.0.0-20160322025152-9bf6e6e569ff/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0= google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= @@ -1375,10 +916,7 @@ google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsb google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= google.golang.org/api v0.17.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= google.golang.org/api v0.18.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= google.golang.org/api v0.22.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.126.0 h1:q4GJq+cAdMAC7XP7njvQ4tvohGLiSlytuL4BQxbIZ+o= -google.golang.org/api v0.126.0/go.mod h1:mBwVAtz+87bEN6CbA1GtZPDOqY2R5ONPqJeIlvyo4Aw= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= @@ -1387,14 +925,11 @@ google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCID google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= google.golang.org/appengine v1.6.7 h1:FZR1q0exgwxzPzp/aF+VccGrSfxfPpkBqjIIEq3ru6c= google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= -google.golang.org/cloud v0.0.0-20151119220103-975617b05ea8/go.mod h1:0H1ncTHf11KCFhTc/+EFRbzSCOZx+VUbRMk55Yv5MYk= -google.golang.org/genproto v0.0.0-20170818010345-ee236bd376b0/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190522204451-c2c4e71fbf69/go.mod h1:z3L6/3dTEVtUr6QSP8miRzeRqwQOioJ9I66odjN4I7s= google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= @@ -1403,42 +938,26 @@ google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvx google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= google.golang.org/genproto v0.0.0-20200115191322-ca5a22157cba/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20200117163144-32f20d992d24/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= google.golang.org/genproto v0.0.0-20200122232147-0452cf42e150/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= google.golang.org/genproto v0.0.0-20200204135345-fa8e72b47b90/go.mod h1:GmwEX6Z4W5gMy59cAlVYjN9JhxgbQH6Gn+gFDQe2lzA= google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200423170343-7949de9c1215/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= google.golang.org/genproto v0.0.0-20200527145253-8367513e4ece/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA= -google.golang.org/genproto v0.0.0-20201110150050-8816d57aaa9a/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20230711160842-782d3b101e98 h1:Z0hjGZePRE0ZBWotvtrwxFNrNE9CUAGtplaDK5NNI/g= -google.golang.org/genproto/googleapis/api v0.0.0-20230711160842-782d3b101e98 h1:FmF5cCW94Ij59cfpoLiwTgodWmm60eEV0CjlsVg2fuw= google.golang.org/genproto/googleapis/rpc v0.0.0-20230711160842-782d3b101e98 h1:bVf09lpb+OJbByTj913DRJioFFAjf/ZGxEz7MajTp2U= google.golang.org/genproto/googleapis/rpc v0.0.0-20230711160842-782d3b101e98/go.mod h1:TUfxEVdsvPg18p6AslUXFoLdpED4oBnGwyqk3dV1XzM= -google.golang.org/grpc v0.0.0-20160317175043-d3ddb4469d5a/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= -google.golang.org/grpc v1.8.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= google.golang.org/grpc v1.21.0/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= -google.golang.org/grpc v1.23.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= -google.golang.org/grpc v1.24.0/go.mod h1:XDChyiUovWa60DnaeDeZmSW86xtLtjtZbwvSiRnRtcA= google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= -google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= -google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0= -google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= -google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= -google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= -google.golang.org/grpc v1.41.0/go.mod h1:U3l9uK9J0sini8mHphKoXyaqDA/8VyGnDee1zzIUK6k= google.golang.org/grpc v1.58.3 h1:BjnpXut1btbtgN/6sp+brB2Kbm2LjNXnidYujAVbSoQ= google.golang.org/grpc v1.58.3/go.mod h1:tgX3ZQDlNJGU96V6yHh1T/JeoBQ2TXdr43YbYSsCJk0= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= @@ -1447,34 +966,23 @@ google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQ google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4= -google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.31.0 h1:g0LDEJHgrBl9N9r17Ru3sqWhkIx2NB67okBHPwC7hs8= google.golang.org/protobuf v1.31.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= -gopkg.in/airbrake/gobrake.v2 v2.0.9/go.mod h1:/h5ZAUhDkGaJfjzjKLSjv6zCL6O0LLBxU4K+aSYdM/U= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20141024133853-64131543e789/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f h1:BLraFXnmrev5lT+xlilqcH8XK9/i0At2xKjWk4p6zsU= gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/cheggaaa/pb.v1 v1.0.25/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= gopkg.in/fsnotify.v1 v1.4.7 h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4= gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= -gopkg.in/gemnasium/logrus-airbrake-hook.v2 v2.1.2/go.mod h1:Xk6kEKp8OKb+X14hQBKWaSkCsqBpgog8nAV2xsGOxlo= -gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= -gopkg.in/natefinch/lumberjack.v2 v2.0.0/go.mod h1:l0ndWWf7gzL7RNwBG7wST/UCcT4T24xpD6X8LsfU/+k= gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo= -gopkg.in/square/go-jose.v2 v2.2.2/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= -gopkg.in/square/go-jose.v2 v2.3.1/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= -gopkg.in/square/go-jose.v2 v2.5.1/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= gopkg.in/square/go-jose.v2 v2.6.0 h1:NGk74WTnPKBNUhNzQX7PYcTLUjoq7mzKk2OKbvwk2iI= gopkg.in/square/go-jose.v2 v2.6.0/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= @@ -1482,21 +990,18 @@ gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWD gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74= gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= -gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.0/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gotest.tools v2.2.0+incompatible h1:VsBPFP1AI068pPrMxtb/S8Zkgf9xEmTLJjfM+P5UIEo= gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw= gotest.tools/v3 v3.0.2/go.mod h1:3SzNCllyD9/Y+b5r9JIKQ474KzkZyqLqEfYqMsX94Bk= -gotest.tools/v3 v3.0.3/go.mod h1:Z7Lb0S5l+klDB31fvDQX8ss/FlKDxtlFlw3Oa8Ymbl8= gotest.tools/v3 v3.5.0 h1:Ljk6PdHdOhAb5aDMWXjDLMMhph+BpztA4v1QdqEW2eY= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= @@ -1504,26 +1009,7 @@ honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWh honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= -k8s.io/api v0.20.1/go.mod h1:KqwcCVogGxQY3nBlRpwt+wpAMF/KjaCc7RpywacvqUo= -k8s.io/api v0.20.4/go.mod h1:++lNL1AJMkDymriNniQsWRkMDzRaX2Y/POTUi8yvqYQ= -k8s.io/api v0.20.6/go.mod h1:X9e8Qag6JV/bL5G6bU8sdVRltWKmdHsFUGS3eVndqE8= -k8s.io/apimachinery v0.20.1/go.mod h1:WlLqWAHZGg07AeltaI0MV5uk1Omp8xaN0JGLY6gkRpU= -k8s.io/apimachinery v0.20.4/go.mod h1:WlLqWAHZGg07AeltaI0MV5uk1Omp8xaN0JGLY6gkRpU= -k8s.io/apimachinery v0.20.6/go.mod h1:ejZXtW1Ra6V1O5H8xPBGz+T3+4gfkTCeExAHKU57MAc= -k8s.io/apiserver v0.20.1/go.mod h1:ro5QHeQkgMS7ZGpvf4tSMx6bBOgPfE+f52KwvXfScaU= -k8s.io/apiserver v0.20.4/go.mod h1:Mc80thBKOyy7tbvFtB4kJv1kbdD0eIH8k8vianJcbFM= -k8s.io/apiserver v0.20.6/go.mod h1:QIJXNt6i6JB+0YQRNcS0hdRHJlMhflFmsBDeSgT1r8Q= -k8s.io/client-go v0.20.1/go.mod h1:/zcHdt1TeWSd5HoUe6elJmHSQ6uLLgp4bIJHVEuy+/Y= -k8s.io/client-go v0.20.4/go.mod h1:LiMv25ND1gLUdBeYxBIwKpkSC5IsozMMmOOeSJboP+k= -k8s.io/client-go v0.20.6/go.mod h1:nNQMnOvEUEsOzRRFIIkdmYOjAZrC8bgq0ExboWSU1I0= k8s.io/code-generator v0.19.7/go.mod h1:lwEq3YnLYb/7uVXLorOJfxg+cUu2oihFhHZ0n9NIla0= -k8s.io/component-base v0.20.1/go.mod h1:guxkoJnNoh8LNrbtiQOlyp2Y2XFCZQmrcg2n/DeYNLk= -k8s.io/component-base v0.20.4/go.mod h1:t4p9EdiagbVCJKrQ1RsA5/V4rFQNDfRlevJajlGwgjI= -k8s.io/component-base v0.20.6/go.mod h1:6f1MPBAeI+mvuts3sIdtpjljHWBQ2cIy38oBIWMYnrM= -k8s.io/cri-api v0.17.3/go.mod h1:X1sbHmuXhwaHs9xxYffLqJogVsnI+f6cPRcgPel7ywM= -k8s.io/cri-api v0.20.1/go.mod h1:2JRbKt+BFLTjtrILYVqQK5jqhI+XNdF6UiGMgczeBCI= -k8s.io/cri-api v0.20.4/go.mod h1:2JRbKt+BFLTjtrILYVqQK5jqhI+XNdF6UiGMgczeBCI= -k8s.io/cri-api v0.20.6/go.mod h1:ew44AjNXwyn1s0U4xCKGodU7J1HzBeZ1MpGrpa5r8Yc= k8s.io/gengo v0.0.0-20200413195148-3a45101e95ac/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= k8s.io/gengo v0.0.0-20200428234225-8167cfdcfc14/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= k8s.io/gengo v0.0.0-20201113003025-83324d819ded/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E= @@ -1531,18 +1017,9 @@ k8s.io/klog/v2 v2.0.0/go.mod h1:PBfzABfn139FHAV07az/IF9Wp1bkk3vpT2XSJ76fSDE= k8s.io/klog/v2 v2.2.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y= k8s.io/klog/v2 v2.4.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y= k8s.io/kube-openapi v0.0.0-20200805222855-6aeccd4b50c6/go.mod h1:UuqjUnNftUyPE5H64/qeyjQoUZhGpeFDVdxjTeEVN2o= -k8s.io/kube-openapi v0.0.0-20201113171705-d219536bb9fd/go.mod h1:WOJ3KddDSol4tAGcJo0Tvi+dK12EcqSLqcWsryKMpfM= -k8s.io/kubernetes v1.13.0/go.mod h1:ocZa8+6APFNC2tX1DZASIbocyYT5jHzqFVsY5aoB7Jk= -k8s.io/utils v0.0.0-20201110183641-67b214c5f920/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= -oss.indeed.com/go/libtime v1.6.0 h1:XQyczJihse/wQGo59OfPF3f4f+Sywv4R8vdGB3S9BfU= -oss.indeed.com/go/libtime v1.6.0/go.mod h1:B2sdEcuzB0zhTKkAuHy4JInKRc7Al3tME4qWam6R7mA= rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= -sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.14/go.mod h1:LEScyzhFmoF5pso/YSeBstl57mOzx9xlU9n85RGrDQg= -sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.15/go.mod h1:LEScyzhFmoF5pso/YSeBstl57mOzx9xlU9n85RGrDQg= sigs.k8s.io/structured-merge-diff/v4 v4.0.1/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw= -sigs.k8s.io/structured-merge-diff/v4 v4.0.2/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw= -sigs.k8s.io/structured-merge-diff/v4 v4.0.3/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw= sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o= sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc= diff --git a/vendor/github.com/Microsoft/go-winio/pkg/security/grantvmgroupaccess.go b/vendor/github.com/Microsoft/go-winio/pkg/security/grantvmgroupaccess.go deleted file mode 100644 index 60292078..00000000 --- a/vendor/github.com/Microsoft/go-winio/pkg/security/grantvmgroupaccess.go +++ /dev/null @@ -1,160 +0,0 @@ -// +build windows - -package security - -import ( - "fmt" - "os" - "syscall" - "unsafe" -) - -type ( - accessMask uint32 - accessMode uint32 - desiredAccess uint32 - inheritMode uint32 - objectType uint32 - shareMode uint32 - securityInformation uint32 - trusteeForm uint32 - trusteeType uint32 - - explicitAccess struct { - accessPermissions accessMask - accessMode accessMode - inheritance inheritMode - trustee trustee - } - - trustee struct { - multipleTrustee *trustee - multipleTrusteeOperation int32 - trusteeForm trusteeForm - trusteeType trusteeType - name uintptr - } -) - -const ( - accessMaskDesiredPermission accessMask = 1 << 31 // GENERIC_READ - - accessModeGrant accessMode = 1 - - desiredAccessReadControl desiredAccess = 0x20000 - desiredAccessWriteDac desiredAccess = 0x40000 - - gvmga = "GrantVmGroupAccess:" - - inheritModeNoInheritance inheritMode = 0x0 - inheritModeSubContainersAndObjectsInherit inheritMode = 0x3 - - objectTypeFileObject objectType = 0x1 - - securityInformationDACL securityInformation = 0x4 - - shareModeRead shareMode = 0x1 - shareModeWrite shareMode = 0x2 - - sidVmGroup = "S-1-5-83-0" - - trusteeFormIsSid trusteeForm = 0 - - trusteeTypeWellKnownGroup trusteeType = 5 -) - -// GrantVMGroupAccess sets the DACL for a specified file or directory to -// include Grant ACE entries for the VM Group SID. This is a golang re- -// implementation of the same function in vmcompute, just not exported in -// RS5. Which kind of sucks. Sucks a lot :/ -func GrantVmGroupAccess(name string) error { - // Stat (to determine if `name` is a directory). - s, err := os.Stat(name) - if err != nil { - return fmt.Errorf("%s os.Stat %s: %w", gvmga, name, err) - } - - // Get a handle to the file/directory. Must defer Close on success. - fd, err := createFile(name, s.IsDir()) - if err != nil { - return err // Already wrapped - } - defer syscall.CloseHandle(fd) - - // Get the current DACL and Security Descriptor. Must defer LocalFree on success. - ot := objectTypeFileObject - si := securityInformationDACL - sd := uintptr(0) - origDACL := uintptr(0) - if err := getSecurityInfo(fd, uint32(ot), uint32(si), nil, nil, &origDACL, nil, &sd); err != nil { - return fmt.Errorf("%s GetSecurityInfo %s: %w", gvmga, name, err) - } - defer syscall.LocalFree((syscall.Handle)(unsafe.Pointer(sd))) - - // Generate a new DACL which is the current DACL with the required ACEs added. - // Must defer LocalFree on success. - newDACL, err := generateDACLWithAcesAdded(name, s.IsDir(), origDACL) - if err != nil { - return err // Already wrapped - } - defer syscall.LocalFree((syscall.Handle)(unsafe.Pointer(newDACL))) - - // And finally use SetSecurityInfo to apply the updated DACL. - if err := setSecurityInfo(fd, uint32(ot), uint32(si), uintptr(0), uintptr(0), newDACL, uintptr(0)); err != nil { - return fmt.Errorf("%s SetSecurityInfo %s: %w", gvmga, name, err) - } - - return nil -} - -// createFile is a helper function to call [Nt]CreateFile to get a handle to -// the file or directory. -func createFile(name string, isDir bool) (syscall.Handle, error) { - namep := syscall.StringToUTF16(name) - da := uint32(desiredAccessReadControl | desiredAccessWriteDac) - sm := uint32(shareModeRead | shareModeWrite) - fa := uint32(syscall.FILE_ATTRIBUTE_NORMAL) - if isDir { - fa = uint32(fa | syscall.FILE_FLAG_BACKUP_SEMANTICS) - } - fd, err := syscall.CreateFile(&namep[0], da, sm, nil, syscall.OPEN_EXISTING, fa, 0) - if err != nil { - return 0, fmt.Errorf("%s syscall.CreateFile %s: %w", gvmga, name, err) - } - return fd, nil -} - -// generateDACLWithAcesAdded generates a new DACL with the two needed ACEs added. -// The caller is responsible for LocalFree of the returned DACL on success. -func generateDACLWithAcesAdded(name string, isDir bool, origDACL uintptr) (uintptr, error) { - // Generate pointers to the SIDs based on the string SIDs - sid, err := syscall.StringToSid(sidVmGroup) - if err != nil { - return 0, fmt.Errorf("%s syscall.StringToSid %s %s: %w", gvmga, name, sidVmGroup, err) - } - - inheritance := inheritModeNoInheritance - if isDir { - inheritance = inheritModeSubContainersAndObjectsInherit - } - - eaArray := []explicitAccess{ - explicitAccess{ - accessPermissions: accessMaskDesiredPermission, - accessMode: accessModeGrant, - inheritance: inheritance, - trustee: trustee{ - trusteeForm: trusteeFormIsSid, - trusteeType: trusteeTypeWellKnownGroup, - name: uintptr(unsafe.Pointer(sid)), - }, - }, - } - - modifiedDACL := uintptr(0) - if err := setEntriesInAcl(uintptr(uint32(1)), uintptr(unsafe.Pointer(&eaArray[0])), origDACL, &modifiedDACL); err != nil { - return 0, fmt.Errorf("%s SetEntriesInAcl %s: %w", gvmga, name, err) - } - - return modifiedDACL, nil -} diff --git a/vendor/github.com/Microsoft/go-winio/pkg/security/syscall_windows.go b/vendor/github.com/Microsoft/go-winio/pkg/security/syscall_windows.go deleted file mode 100644 index d7096716..00000000 --- a/vendor/github.com/Microsoft/go-winio/pkg/security/syscall_windows.go +++ /dev/null @@ -1,7 +0,0 @@ -package security - -//go:generate go run mksyscall_windows.go -output zsyscall_windows.go syscall_windows.go - -//sys getSecurityInfo(handle syscall.Handle, objectType uint32, si uint32, ppsidOwner **uintptr, ppsidGroup **uintptr, ppDacl *uintptr, ppSacl *uintptr, ppSecurityDescriptor *uintptr) (win32err error) = advapi32.GetSecurityInfo -//sys setSecurityInfo(handle syscall.Handle, objectType uint32, si uint32, psidOwner uintptr, psidGroup uintptr, pDacl uintptr, pSacl uintptr) (win32err error) = advapi32.SetSecurityInfo -//sys setEntriesInAcl(count uintptr, pListOfEEs uintptr, oldAcl uintptr, newAcl *uintptr) (win32err error) = advapi32.SetEntriesInAclW diff --git a/vendor/github.com/Microsoft/go-winio/pkg/security/zsyscall_windows.go b/vendor/github.com/Microsoft/go-winio/pkg/security/zsyscall_windows.go deleted file mode 100644 index 4084680e..00000000 --- a/vendor/github.com/Microsoft/go-winio/pkg/security/zsyscall_windows.go +++ /dev/null @@ -1,70 +0,0 @@ -// Code generated by 'go generate'; DO NOT EDIT. - -package security - -import ( - "syscall" - "unsafe" - - "golang.org/x/sys/windows" -) - -var _ unsafe.Pointer - -// Do the interface allocations only once for common -// Errno values. -const ( - errnoERROR_IO_PENDING = 997 -) - -var ( - errERROR_IO_PENDING error = syscall.Errno(errnoERROR_IO_PENDING) - errERROR_EINVAL error = syscall.EINVAL -) - -// errnoErr returns common boxed Errno values, to prevent -// allocations at runtime. -func errnoErr(e syscall.Errno) error { - switch e { - case 0: - return errERROR_EINVAL - case errnoERROR_IO_PENDING: - return errERROR_IO_PENDING - } - // TODO: add more here, after collecting data on the common - // error values see on Windows. (perhaps when running - // all.bat?) - return e -} - -var ( - modadvapi32 = windows.NewLazySystemDLL("advapi32.dll") - - procGetSecurityInfo = modadvapi32.NewProc("GetSecurityInfo") - procSetEntriesInAclW = modadvapi32.NewProc("SetEntriesInAclW") - procSetSecurityInfo = modadvapi32.NewProc("SetSecurityInfo") -) - -func getSecurityInfo(handle syscall.Handle, objectType uint32, si uint32, ppsidOwner **uintptr, ppsidGroup **uintptr, ppDacl *uintptr, ppSacl *uintptr, ppSecurityDescriptor *uintptr) (win32err error) { - r0, _, _ := syscall.Syscall9(procGetSecurityInfo.Addr(), 8, uintptr(handle), uintptr(objectType), uintptr(si), uintptr(unsafe.Pointer(ppsidOwner)), uintptr(unsafe.Pointer(ppsidGroup)), uintptr(unsafe.Pointer(ppDacl)), uintptr(unsafe.Pointer(ppSacl)), uintptr(unsafe.Pointer(ppSecurityDescriptor)), 0) - if r0 != 0 { - win32err = syscall.Errno(r0) - } - return -} - -func setEntriesInAcl(count uintptr, pListOfEEs uintptr, oldAcl uintptr, newAcl *uintptr) (win32err error) { - r0, _, _ := syscall.Syscall6(procSetEntriesInAclW.Addr(), 4, uintptr(count), uintptr(pListOfEEs), uintptr(oldAcl), uintptr(unsafe.Pointer(newAcl)), 0, 0) - if r0 != 0 { - win32err = syscall.Errno(r0) - } - return -} - -func setSecurityInfo(handle syscall.Handle, objectType uint32, si uint32, psidOwner uintptr, psidGroup uintptr, pDacl uintptr, pSacl uintptr) (win32err error) { - r0, _, _ := syscall.Syscall9(procSetSecurityInfo.Addr(), 7, uintptr(handle), uintptr(objectType), uintptr(si), uintptr(psidOwner), uintptr(psidGroup), uintptr(pDacl), uintptr(pSacl), 0, 0) - if r0 != 0 { - win32err = syscall.Errno(r0) - } - return -} diff --git a/vendor/github.com/Microsoft/go-winio/vhd/vhd.go b/vendor/github.com/Microsoft/go-winio/vhd/vhd.go deleted file mode 100644 index f7f78fc2..00000000 --- a/vendor/github.com/Microsoft/go-winio/vhd/vhd.go +++ /dev/null @@ -1,350 +0,0 @@ -//go:build windows -// +build windows - -package vhd - -import ( - "fmt" - "syscall" - - "github.com/Microsoft/go-winio/pkg/guid" - "golang.org/x/sys/windows" -) - -//go:generate go run mksyscall_windows.go -output zvhd_windows.go vhd.go - -//sys createVirtualDisk(virtualStorageType *VirtualStorageType, path string, virtualDiskAccessMask uint32, securityDescriptor *uintptr, createVirtualDiskFlags uint32, providerSpecificFlags uint32, parameters *CreateVirtualDiskParameters, overlapped *syscall.Overlapped, handle *syscall.Handle) (win32err error) = virtdisk.CreateVirtualDisk -//sys openVirtualDisk(virtualStorageType *VirtualStorageType, path string, virtualDiskAccessMask uint32, openVirtualDiskFlags uint32, parameters *openVirtualDiskParameters, handle *syscall.Handle) (win32err error) = virtdisk.OpenVirtualDisk -//sys attachVirtualDisk(handle syscall.Handle, securityDescriptor *uintptr, attachVirtualDiskFlag uint32, providerSpecificFlags uint32, parameters *AttachVirtualDiskParameters, overlapped *syscall.Overlapped) (win32err error) = virtdisk.AttachVirtualDisk -//sys detachVirtualDisk(handle syscall.Handle, detachVirtualDiskFlags uint32, providerSpecificFlags uint32) (win32err error) = virtdisk.DetachVirtualDisk -//sys getVirtualDiskPhysicalPath(handle syscall.Handle, diskPathSizeInBytes *uint32, buffer *uint16) (win32err error) = virtdisk.GetVirtualDiskPhysicalPath - -type ( - CreateVirtualDiskFlag uint32 - VirtualDiskFlag uint32 - AttachVirtualDiskFlag uint32 - DetachVirtualDiskFlag uint32 - VirtualDiskAccessMask uint32 -) - -type VirtualStorageType struct { - DeviceID uint32 - VendorID guid.GUID -} - -type CreateVersion2 struct { - UniqueID guid.GUID - MaximumSize uint64 - BlockSizeInBytes uint32 - SectorSizeInBytes uint32 - PhysicalSectorSizeInByte uint32 - ParentPath *uint16 // string - SourcePath *uint16 // string - OpenFlags uint32 - ParentVirtualStorageType VirtualStorageType - SourceVirtualStorageType VirtualStorageType - ResiliencyGUID guid.GUID -} - -type CreateVirtualDiskParameters struct { - Version uint32 // Must always be set to 2 - Version2 CreateVersion2 -} - -type OpenVersion2 struct { - GetInfoOnly bool - ReadOnly bool - ResiliencyGUID guid.GUID -} - -type OpenVirtualDiskParameters struct { - Version uint32 // Must always be set to 2 - Version2 OpenVersion2 -} - -// The higher level `OpenVersion2` struct uses bools to refer to `GetInfoOnly` and `ReadOnly` for ease of use. However, -// the internal windows structure uses `BOOLS` aka int32s for these types. `openVersion2` is used for translating -// `OpenVersion2` fields to the correct windows internal field types on the `Open____` methods. -type openVersion2 struct { - getInfoOnly int32 - readOnly int32 - resiliencyGUID guid.GUID -} - -type openVirtualDiskParameters struct { - version uint32 - version2 openVersion2 -} - -type AttachVersion2 struct { - RestrictedOffset uint64 - RestrictedLength uint64 -} - -type AttachVirtualDiskParameters struct { - Version uint32 - Version2 AttachVersion2 -} - -const ( - VIRTUAL_STORAGE_TYPE_DEVICE_VHDX = 0x3 - - // Access Mask for opening a VHD - VirtualDiskAccessNone VirtualDiskAccessMask = 0x00000000 - VirtualDiskAccessAttachRO VirtualDiskAccessMask = 0x00010000 - VirtualDiskAccessAttachRW VirtualDiskAccessMask = 0x00020000 - VirtualDiskAccessDetach VirtualDiskAccessMask = 0x00040000 - VirtualDiskAccessGetInfo VirtualDiskAccessMask = 0x00080000 - VirtualDiskAccessCreate VirtualDiskAccessMask = 0x00100000 - VirtualDiskAccessMetaOps VirtualDiskAccessMask = 0x00200000 - VirtualDiskAccessRead VirtualDiskAccessMask = 0x000d0000 - VirtualDiskAccessAll VirtualDiskAccessMask = 0x003f0000 - VirtualDiskAccessWritable VirtualDiskAccessMask = 0x00320000 - - // Flags for creating a VHD - CreateVirtualDiskFlagNone CreateVirtualDiskFlag = 0x0 - CreateVirtualDiskFlagFullPhysicalAllocation CreateVirtualDiskFlag = 0x1 - CreateVirtualDiskFlagPreventWritesToSourceDisk CreateVirtualDiskFlag = 0x2 - CreateVirtualDiskFlagDoNotCopyMetadataFromParent CreateVirtualDiskFlag = 0x4 - CreateVirtualDiskFlagCreateBackingStorage CreateVirtualDiskFlag = 0x8 - CreateVirtualDiskFlagUseChangeTrackingSourceLimit CreateVirtualDiskFlag = 0x10 - CreateVirtualDiskFlagPreserveParentChangeTrackingState CreateVirtualDiskFlag = 0x20 - CreateVirtualDiskFlagVhdSetUseOriginalBackingStorage CreateVirtualDiskFlag = 0x40 - CreateVirtualDiskFlagSparseFile CreateVirtualDiskFlag = 0x80 - CreateVirtualDiskFlagPmemCompatible CreateVirtualDiskFlag = 0x100 - CreateVirtualDiskFlagSupportCompressedVolumes CreateVirtualDiskFlag = 0x200 - - // Flags for opening a VHD - OpenVirtualDiskFlagNone VirtualDiskFlag = 0x00000000 - OpenVirtualDiskFlagNoParents VirtualDiskFlag = 0x00000001 - OpenVirtualDiskFlagBlankFile VirtualDiskFlag = 0x00000002 - OpenVirtualDiskFlagBootDrive VirtualDiskFlag = 0x00000004 - OpenVirtualDiskFlagCachedIO VirtualDiskFlag = 0x00000008 - OpenVirtualDiskFlagCustomDiffChain VirtualDiskFlag = 0x00000010 - OpenVirtualDiskFlagParentCachedIO VirtualDiskFlag = 0x00000020 - OpenVirtualDiskFlagVhdsetFileOnly VirtualDiskFlag = 0x00000040 - OpenVirtualDiskFlagIgnoreRelativeParentLocator VirtualDiskFlag = 0x00000080 - OpenVirtualDiskFlagNoWriteHardening VirtualDiskFlag = 0x00000100 - OpenVirtualDiskFlagSupportCompressedVolumes VirtualDiskFlag = 0x00000200 - - // Flags for attaching a VHD - AttachVirtualDiskFlagNone AttachVirtualDiskFlag = 0x00000000 - AttachVirtualDiskFlagReadOnly AttachVirtualDiskFlag = 0x00000001 - AttachVirtualDiskFlagNoDriveLetter AttachVirtualDiskFlag = 0x00000002 - AttachVirtualDiskFlagPermanentLifetime AttachVirtualDiskFlag = 0x00000004 - AttachVirtualDiskFlagNoLocalHost AttachVirtualDiskFlag = 0x00000008 - AttachVirtualDiskFlagNoSecurityDescriptor AttachVirtualDiskFlag = 0x00000010 - AttachVirtualDiskFlagBypassDefaultEncryptionPolicy AttachVirtualDiskFlag = 0x00000020 - AttachVirtualDiskFlagNonPnp AttachVirtualDiskFlag = 0x00000040 - AttachVirtualDiskFlagRestrictedRange AttachVirtualDiskFlag = 0x00000080 - AttachVirtualDiskFlagSinglePartition AttachVirtualDiskFlag = 0x00000100 - AttachVirtualDiskFlagRegisterVolume AttachVirtualDiskFlag = 0x00000200 - - // Flags for detaching a VHD - DetachVirtualDiskFlagNone DetachVirtualDiskFlag = 0x0 -) - -// CreateVhdx is a helper function to create a simple vhdx file at the given path using -// default values. -func CreateVhdx(path string, maxSizeInGb, blockSizeInMb uint32) error { - params := CreateVirtualDiskParameters{ - Version: 2, - Version2: CreateVersion2{ - MaximumSize: uint64(maxSizeInGb) * 1024 * 1024 * 1024, - BlockSizeInBytes: blockSizeInMb * 1024 * 1024, - }, - } - - handle, err := CreateVirtualDisk(path, VirtualDiskAccessNone, CreateVirtualDiskFlagNone, ¶ms) - if err != nil { - return err - } - - return syscall.CloseHandle(handle) -} - -// DetachVirtualDisk detaches a virtual hard disk by handle. -func DetachVirtualDisk(handle syscall.Handle) (err error) { - if err := detachVirtualDisk(handle, 0, 0); err != nil { - return fmt.Errorf("failed to detach virtual disk: %w", err) - } - return nil -} - -// DetachVhd detaches a vhd found at `path`. -func DetachVhd(path string) error { - handle, err := OpenVirtualDisk( - path, - VirtualDiskAccessNone, - OpenVirtualDiskFlagCachedIO|OpenVirtualDiskFlagIgnoreRelativeParentLocator, - ) - if err != nil { - return err - } - defer syscall.CloseHandle(handle) - return DetachVirtualDisk(handle) -} - -// AttachVirtualDisk attaches a virtual hard disk for use. -func AttachVirtualDisk(handle syscall.Handle, attachVirtualDiskFlag AttachVirtualDiskFlag, parameters *AttachVirtualDiskParameters) (err error) { - // Supports both version 1 and 2 of the attach parameters as version 2 wasn't present in RS5. - if err := attachVirtualDisk( - handle, - nil, - uint32(attachVirtualDiskFlag), - 0, - parameters, - nil, - ); err != nil { - return fmt.Errorf("failed to attach virtual disk: %w", err) - } - return nil -} - -// AttachVhd attaches a virtual hard disk at `path` for use. Attaches using version 2 -// of the ATTACH_VIRTUAL_DISK_PARAMETERS. -func AttachVhd(path string) (err error) { - handle, err := OpenVirtualDisk( - path, - VirtualDiskAccessNone, - OpenVirtualDiskFlagCachedIO|OpenVirtualDiskFlagIgnoreRelativeParentLocator, - ) - if err != nil { - return err - } - - defer syscall.CloseHandle(handle) - params := AttachVirtualDiskParameters{Version: 2} - if err := AttachVirtualDisk( - handle, - AttachVirtualDiskFlagNone, - ¶ms, - ); err != nil { - return fmt.Errorf("failed to attach virtual disk: %w", err) - } - return nil -} - -// OpenVirtualDisk obtains a handle to a VHD opened with supplied access mask and flags. -func OpenVirtualDisk(vhdPath string, virtualDiskAccessMask VirtualDiskAccessMask, openVirtualDiskFlags VirtualDiskFlag) (syscall.Handle, error) { - parameters := OpenVirtualDiskParameters{Version: 2} - handle, err := OpenVirtualDiskWithParameters( - vhdPath, - virtualDiskAccessMask, - openVirtualDiskFlags, - ¶meters, - ) - if err != nil { - return 0, err - } - return handle, nil -} - -// OpenVirtualDiskWithParameters obtains a handle to a VHD opened with supplied access mask, flags and parameters. -func OpenVirtualDiskWithParameters(vhdPath string, virtualDiskAccessMask VirtualDiskAccessMask, openVirtualDiskFlags VirtualDiskFlag, parameters *OpenVirtualDiskParameters) (syscall.Handle, error) { - var ( - handle syscall.Handle - defaultType VirtualStorageType - getInfoOnly int32 - readOnly int32 - ) - if parameters.Version != 2 { - return handle, fmt.Errorf("only version 2 VHDs are supported, found version: %d", parameters.Version) - } - if parameters.Version2.GetInfoOnly { - getInfoOnly = 1 - } - if parameters.Version2.ReadOnly { - readOnly = 1 - } - params := &openVirtualDiskParameters{ - version: parameters.Version, - version2: openVersion2{ - getInfoOnly, - readOnly, - parameters.Version2.ResiliencyGUID, - }, - } - if err := openVirtualDisk( - &defaultType, - vhdPath, - uint32(virtualDiskAccessMask), - uint32(openVirtualDiskFlags), - params, - &handle, - ); err != nil { - return 0, fmt.Errorf("failed to open virtual disk: %w", err) - } - return handle, nil -} - -// CreateVirtualDisk creates a virtual harddisk and returns a handle to the disk. -func CreateVirtualDisk(path string, virtualDiskAccessMask VirtualDiskAccessMask, createVirtualDiskFlags CreateVirtualDiskFlag, parameters *CreateVirtualDiskParameters) (syscall.Handle, error) { - var ( - handle syscall.Handle - defaultType VirtualStorageType - ) - if parameters.Version != 2 { - return handle, fmt.Errorf("only version 2 VHDs are supported, found version: %d", parameters.Version) - } - - if err := createVirtualDisk( - &defaultType, - path, - uint32(virtualDiskAccessMask), - nil, - uint32(createVirtualDiskFlags), - 0, - parameters, - nil, - &handle, - ); err != nil { - return handle, fmt.Errorf("failed to create virtual disk: %w", err) - } - return handle, nil -} - -// GetVirtualDiskPhysicalPath takes a handle to a virtual hard disk and returns the physical -// path of the disk on the machine. This path is in the form \\.\PhysicalDriveX where X is an integer -// that represents the particular enumeration of the physical disk on the caller's system. -func GetVirtualDiskPhysicalPath(handle syscall.Handle) (_ string, err error) { - var ( - diskPathSizeInBytes uint32 = 256 * 2 // max path length 256 wide chars - diskPhysicalPathBuf [256]uint16 - ) - if err := getVirtualDiskPhysicalPath( - handle, - &diskPathSizeInBytes, - &diskPhysicalPathBuf[0], - ); err != nil { - return "", fmt.Errorf("failed to get disk physical path: %w", err) - } - return windows.UTF16ToString(diskPhysicalPathBuf[:]), nil -} - -// CreateDiffVhd is a helper function to create a differencing virtual disk. -func CreateDiffVhd(diffVhdPath, baseVhdPath string, blockSizeInMB uint32) error { - // Setting `ParentPath` is how to signal to create a differencing disk. - createParams := &CreateVirtualDiskParameters{ - Version: 2, - Version2: CreateVersion2{ - ParentPath: windows.StringToUTF16Ptr(baseVhdPath), - BlockSizeInBytes: blockSizeInMB * 1024 * 1024, - OpenFlags: uint32(OpenVirtualDiskFlagCachedIO), - }, - } - - vhdHandle, err := CreateVirtualDisk( - diffVhdPath, - VirtualDiskAccessNone, - CreateVirtualDiskFlagNone, - createParams, - ) - if err != nil { - return fmt.Errorf("failed to create differencing vhd: %w", err) - } - if err := syscall.CloseHandle(vhdHandle); err != nil { - return fmt.Errorf("failed to close differencing vhd handle: %w", err) - } - return nil -} diff --git a/vendor/github.com/Microsoft/go-winio/vhd/zvhd_windows.go b/vendor/github.com/Microsoft/go-winio/vhd/zvhd_windows.go deleted file mode 100644 index 1d7498db..00000000 --- a/vendor/github.com/Microsoft/go-winio/vhd/zvhd_windows.go +++ /dev/null @@ -1,106 +0,0 @@ -// Code generated by 'go generate'; DO NOT EDIT. - -package vhd - -import ( - "syscall" - "unsafe" - - "golang.org/x/sys/windows" -) - -var _ unsafe.Pointer - -// Do the interface allocations only once for common -// Errno values. -const ( - errnoERROR_IO_PENDING = 997 -) - -var ( - errERROR_IO_PENDING error = syscall.Errno(errnoERROR_IO_PENDING) - errERROR_EINVAL error = syscall.EINVAL -) - -// errnoErr returns common boxed Errno values, to prevent -// allocations at runtime. -func errnoErr(e syscall.Errno) error { - switch e { - case 0: - return errERROR_EINVAL - case errnoERROR_IO_PENDING: - return errERROR_IO_PENDING - } - // TODO: add more here, after collecting data on the common - // error values see on Windows. (perhaps when running - // all.bat?) - return e -} - -var ( - modvirtdisk = windows.NewLazySystemDLL("virtdisk.dll") - - procAttachVirtualDisk = modvirtdisk.NewProc("AttachVirtualDisk") - procCreateVirtualDisk = modvirtdisk.NewProc("CreateVirtualDisk") - procDetachVirtualDisk = modvirtdisk.NewProc("DetachVirtualDisk") - procGetVirtualDiskPhysicalPath = modvirtdisk.NewProc("GetVirtualDiskPhysicalPath") - procOpenVirtualDisk = modvirtdisk.NewProc("OpenVirtualDisk") -) - -func attachVirtualDisk(handle syscall.Handle, securityDescriptor *uintptr, attachVirtualDiskFlag uint32, providerSpecificFlags uint32, parameters *AttachVirtualDiskParameters, overlapped *syscall.Overlapped) (win32err error) { - r0, _, _ := syscall.Syscall6(procAttachVirtualDisk.Addr(), 6, uintptr(handle), uintptr(unsafe.Pointer(securityDescriptor)), uintptr(attachVirtualDiskFlag), uintptr(providerSpecificFlags), uintptr(unsafe.Pointer(parameters)), uintptr(unsafe.Pointer(overlapped))) - if r0 != 0 { - win32err = syscall.Errno(r0) - } - return -} - -func createVirtualDisk(virtualStorageType *VirtualStorageType, path string, virtualDiskAccessMask uint32, securityDescriptor *uintptr, createVirtualDiskFlags uint32, providerSpecificFlags uint32, parameters *CreateVirtualDiskParameters, overlapped *syscall.Overlapped, handle *syscall.Handle) (win32err error) { - var _p0 *uint16 - _p0, win32err = syscall.UTF16PtrFromString(path) - if win32err != nil { - return - } - return _createVirtualDisk(virtualStorageType, _p0, virtualDiskAccessMask, securityDescriptor, createVirtualDiskFlags, providerSpecificFlags, parameters, overlapped, handle) -} - -func _createVirtualDisk(virtualStorageType *VirtualStorageType, path *uint16, virtualDiskAccessMask uint32, securityDescriptor *uintptr, createVirtualDiskFlags uint32, providerSpecificFlags uint32, parameters *CreateVirtualDiskParameters, overlapped *syscall.Overlapped, handle *syscall.Handle) (win32err error) { - r0, _, _ := syscall.Syscall9(procCreateVirtualDisk.Addr(), 9, uintptr(unsafe.Pointer(virtualStorageType)), uintptr(unsafe.Pointer(path)), uintptr(virtualDiskAccessMask), uintptr(unsafe.Pointer(securityDescriptor)), uintptr(createVirtualDiskFlags), uintptr(providerSpecificFlags), uintptr(unsafe.Pointer(parameters)), uintptr(unsafe.Pointer(overlapped)), uintptr(unsafe.Pointer(handle))) - if r0 != 0 { - win32err = syscall.Errno(r0) - } - return -} - -func detachVirtualDisk(handle syscall.Handle, detachVirtualDiskFlags uint32, providerSpecificFlags uint32) (win32err error) { - r0, _, _ := syscall.Syscall(procDetachVirtualDisk.Addr(), 3, uintptr(handle), uintptr(detachVirtualDiskFlags), uintptr(providerSpecificFlags)) - if r0 != 0 { - win32err = syscall.Errno(r0) - } - return -} - -func getVirtualDiskPhysicalPath(handle syscall.Handle, diskPathSizeInBytes *uint32, buffer *uint16) (win32err error) { - r0, _, _ := syscall.Syscall(procGetVirtualDiskPhysicalPath.Addr(), 3, uintptr(handle), uintptr(unsafe.Pointer(diskPathSizeInBytes)), uintptr(unsafe.Pointer(buffer))) - if r0 != 0 { - win32err = syscall.Errno(r0) - } - return -} - -func openVirtualDisk(virtualStorageType *VirtualStorageType, path string, virtualDiskAccessMask uint32, openVirtualDiskFlags uint32, parameters *openVirtualDiskParameters, handle *syscall.Handle) (win32err error) { - var _p0 *uint16 - _p0, win32err = syscall.UTF16PtrFromString(path) - if win32err != nil { - return - } - return _openVirtualDisk(virtualStorageType, _p0, virtualDiskAccessMask, openVirtualDiskFlags, parameters, handle) -} - -func _openVirtualDisk(virtualStorageType *VirtualStorageType, path *uint16, virtualDiskAccessMask uint32, openVirtualDiskFlags uint32, parameters *openVirtualDiskParameters, handle *syscall.Handle) (win32err error) { - r0, _, _ := syscall.Syscall6(procOpenVirtualDisk.Addr(), 6, uintptr(unsafe.Pointer(virtualStorageType)), uintptr(unsafe.Pointer(path)), uintptr(virtualDiskAccessMask), uintptr(openVirtualDiskFlags), uintptr(unsafe.Pointer(parameters)), uintptr(unsafe.Pointer(handle))) - if r0 != 0 { - win32err = syscall.Errno(r0) - } - return -} diff --git a/vendor/github.com/Microsoft/hcsshim/.gitattributes b/vendor/github.com/Microsoft/hcsshim/.gitattributes deleted file mode 100644 index 94f480de..00000000 --- a/vendor/github.com/Microsoft/hcsshim/.gitattributes +++ /dev/null @@ -1 +0,0 @@ -* text=auto eol=lf \ No newline at end of file diff --git a/vendor/github.com/Microsoft/hcsshim/.gitignore b/vendor/github.com/Microsoft/hcsshim/.gitignore deleted file mode 100644 index 54ed6f06..00000000 --- a/vendor/github.com/Microsoft/hcsshim/.gitignore +++ /dev/null @@ -1,38 +0,0 @@ -# Binaries for programs and plugins -*.exe -*.dll -*.so -*.dylib - -# Ignore vscode setting files -.vscode/ - -# Test binary, build with `go test -c` -*.test - -# Output of the go coverage tool, specifically when used with LiteIDE -*.out - -# Project-local glide cache, RE: https://github.com/Masterminds/glide/issues/736 -.glide/ - -# Ignore gcs bin directory -service/bin/ -service/pkg/ - -*.img -*.vhd -*.tar.gz - -# Make stuff -.rootfs-done -bin/* -rootfs/* -*.o -/build/ - -deps/* -out/* - -.idea/ -.vscode/ \ No newline at end of file diff --git a/vendor/github.com/Microsoft/hcsshim/.golangci.yml b/vendor/github.com/Microsoft/hcsshim/.golangci.yml deleted file mode 100644 index 2400e7f1..00000000 --- a/vendor/github.com/Microsoft/hcsshim/.golangci.yml +++ /dev/null @@ -1,99 +0,0 @@ -run: - timeout: 8m - -linters: - enable: - - stylecheck - -linters-settings: - stylecheck: - # https://staticcheck.io/docs/checks - checks: ["all"] - - -issues: - # This repo has a LOT of generated schema files, operating system bindings, and other things that ST1003 from stylecheck won't like - # (screaming case Windows api constants for example). There's also some structs that we *could* change the initialisms to be Go - # friendly (Id -> ID) but they're exported and it would be a breaking change. This makes it so that most new code, code that isn't - # supposed to be a pretty faithful mapping to an OS call/constants, or non-generated code still checks if we're following idioms, - # while ignoring the things that are just noise or would be more of a hassle than it'd be worth to change. - exclude-rules: - - path: layer.go - linters: - - stylecheck - Text: "ST1003:" - - - path: hcsshim.go - linters: - - stylecheck - Text: "ST1003:" - - - path: internal\\hcs\\schema2\\ - linters: - - stylecheck - Text: "ST1003:" - - - path: internal\\wclayer\\ - linters: - - stylecheck - Text: "ST1003:" - - - path: hcn\\ - linters: - - stylecheck - Text: "ST1003:" - - - path: internal\\hcs\\schema1\\ - linters: - - stylecheck - Text: "ST1003:" - - - path: internal\\hns\\ - linters: - - stylecheck - Text: "ST1003:" - - - path: ext4\\internal\\compactext4\\ - linters: - - stylecheck - Text: "ST1003:" - - - path: ext4\\internal\\format\\ - linters: - - stylecheck - Text: "ST1003:" - - - path: internal\\guestrequest\\ - linters: - - stylecheck - Text: "ST1003:" - - - path: internal\\guest\\prot\\ - linters: - - stylecheck - Text: "ST1003:" - - - path: internal\\windevice\\ - linters: - - stylecheck - Text: "ST1003:" - - - path: internal\\winapi\\ - linters: - - stylecheck - Text: "ST1003:" - - - path: internal\\vmcompute\\ - linters: - - stylecheck - Text: "ST1003:" - - - path: internal\\regstate\\ - linters: - - stylecheck - Text: "ST1003:" - - - path: internal\\hcserror\\ - linters: - - stylecheck - Text: "ST1003:" \ No newline at end of file diff --git a/vendor/github.com/Microsoft/hcsshim/CODEOWNERS b/vendor/github.com/Microsoft/hcsshim/CODEOWNERS deleted file mode 100644 index f4c5a07d..00000000 --- a/vendor/github.com/Microsoft/hcsshim/CODEOWNERS +++ /dev/null @@ -1 +0,0 @@ -* @microsoft/containerplat \ No newline at end of file diff --git a/vendor/github.com/Microsoft/hcsshim/LICENSE b/vendor/github.com/Microsoft/hcsshim/LICENSE deleted file mode 100644 index 49d21669..00000000 --- a/vendor/github.com/Microsoft/hcsshim/LICENSE +++ /dev/null @@ -1,21 +0,0 @@ -The MIT License (MIT) - -Copyright (c) 2015 Microsoft - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. \ No newline at end of file diff --git a/vendor/github.com/Microsoft/hcsshim/Makefile b/vendor/github.com/Microsoft/hcsshim/Makefile deleted file mode 100644 index a8f5516c..00000000 --- a/vendor/github.com/Microsoft/hcsshim/Makefile +++ /dev/null @@ -1,87 +0,0 @@ -BASE:=base.tar.gz - -GO:=go -GO_FLAGS:=-ldflags "-s -w" # strip Go binaries -CGO_ENABLED:=0 -GOMODVENDOR:= - -CFLAGS:=-O2 -Wall -LDFLAGS:=-static -s # strip C binaries - -GO_FLAGS_EXTRA:= -ifeq "$(GOMODVENDOR)" "1" -GO_FLAGS_EXTRA += -mod=vendor -endif -GO_BUILD:=CGO_ENABLED=$(CGO_ENABLED) $(GO) build $(GO_FLAGS) $(GO_FLAGS_EXTRA) - -SRCROOT=$(dir $(abspath $(firstword $(MAKEFILE_LIST)))) - -# The link aliases for gcstools -GCS_TOOLS=\ - generichook - -.PHONY: all always rootfs test - -all: out/initrd.img out/rootfs.tar.gz - -clean: - find -name '*.o' -print0 | xargs -0 -r rm - rm -rf bin deps rootfs out - -test: - cd $(SRCROOT) && go test -v ./internal/guest/... - -out/delta.tar.gz: bin/init bin/vsockexec bin/cmd/gcs bin/cmd/gcstools Makefile - @mkdir -p out - rm -rf rootfs - mkdir -p rootfs/bin/ - cp bin/init rootfs/ - cp bin/vsockexec rootfs/bin/ - cp bin/cmd/gcs rootfs/bin/ - cp bin/cmd/gcstools rootfs/bin/ - for tool in $(GCS_TOOLS); do ln -s gcstools rootfs/bin/$$tool; done - git -C $(SRCROOT) rev-parse HEAD > rootfs/gcs.commit && \ - git -C $(SRCROOT) rev-parse --abbrev-ref HEAD > rootfs/gcs.branch - tar -zcf $@ -C rootfs . - rm -rf rootfs - -out/rootfs.tar.gz: out/initrd.img - rm -rf rootfs-conv - mkdir rootfs-conv - gunzip -c out/initrd.img | (cd rootfs-conv && cpio -imd) - tar -zcf $@ -C rootfs-conv . - rm -rf rootfs-conv - -out/initrd.img: $(BASE) out/delta.tar.gz $(SRCROOT)/hack/catcpio.sh - $(SRCROOT)/hack/catcpio.sh "$(BASE)" out/delta.tar.gz > out/initrd.img.uncompressed - gzip -c out/initrd.img.uncompressed > $@ - rm out/initrd.img.uncompressed - --include deps/cmd/gcs.gomake --include deps/cmd/gcstools.gomake - -# Implicit rule for includes that define Go targets. -%.gomake: $(SRCROOT)/Makefile - @mkdir -p $(dir $@) - @/bin/echo $(@:deps/%.gomake=bin/%): $(SRCROOT)/hack/gomakedeps.sh > $@.new - @/bin/echo -e '\t@mkdir -p $$(dir $$@) $(dir $@)' >> $@.new - @/bin/echo -e '\t$$(GO_BUILD) -o $$@.new $$(SRCROOT)/$$(@:bin/%=%)' >> $@.new - @/bin/echo -e '\tGO="$(GO)" $$(SRCROOT)/hack/gomakedeps.sh $$@ $$(SRCROOT)/$$(@:bin/%=%) $$(GO_FLAGS) $$(GO_FLAGS_EXTRA) > $(@:%.gomake=%.godeps).new' >> $@.new - @/bin/echo -e '\tmv $(@:%.gomake=%.godeps).new $(@:%.gomake=%.godeps)' >> $@.new - @/bin/echo -e '\tmv $$@.new $$@' >> $@.new - @/bin/echo -e '-include $(@:%.gomake=%.godeps)' >> $@.new - mv $@.new $@ - -VPATH=$(SRCROOT) - -bin/vsockexec: vsockexec/vsockexec.o vsockexec/vsock.o - @mkdir -p bin - $(CC) $(LDFLAGS) -o $@ $^ - -bin/init: init/init.o vsockexec/vsock.o - @mkdir -p bin - $(CC) $(LDFLAGS) -o $@ $^ - -%.o: %.c - @mkdir -p $(dir $@) - $(CC) $(CFLAGS) $(CPPFLAGS) -c -o $@ $< diff --git a/vendor/github.com/Microsoft/hcsshim/Protobuild.toml b/vendor/github.com/Microsoft/hcsshim/Protobuild.toml deleted file mode 100644 index ee18671a..00000000 --- a/vendor/github.com/Microsoft/hcsshim/Protobuild.toml +++ /dev/null @@ -1,49 +0,0 @@ -version = "unstable" -generator = "gogoctrd" -plugins = ["grpc", "fieldpath"] - -# Control protoc include paths. Below are usually some good defaults, but feel -# free to try it without them if it works for your project. -[includes] - # Include paths that will be added before all others. Typically, you want to - # treat the root of the project as an include, but this may not be necessary. - before = ["./protobuf"] - - # Paths that should be treated as include roots in relation to the vendor - # directory. These will be calculated with the vendor directory nearest the - # target package. - packages = ["github.com/gogo/protobuf"] - - # Paths that will be added untouched to the end of the includes. We use - # `/usr/local/include` to pickup the common install location of protobuf. - # This is the default. - after = ["/usr/local/include"] - -# This section maps protobuf imports to Go packages. These will become -# `-M` directives in the call to the go protobuf generator. -[packages] - "gogoproto/gogo.proto" = "github.com/gogo/protobuf/gogoproto" - "google/protobuf/any.proto" = "github.com/gogo/protobuf/types" - "google/protobuf/empty.proto" = "github.com/gogo/protobuf/types" - "google/protobuf/struct.proto" = "github.com/gogo/protobuf/types" - "google/protobuf/descriptor.proto" = "github.com/gogo/protobuf/protoc-gen-gogo/descriptor" - "google/protobuf/field_mask.proto" = "github.com/gogo/protobuf/types" - "google/protobuf/timestamp.proto" = "github.com/gogo/protobuf/types" - "google/protobuf/duration.proto" = "github.com/gogo/protobuf/types" - "github/containerd/cgroups/stats/v1/metrics.proto" = "github.com/containerd/cgroups/stats/v1" - -[[overrides]] -prefixes = ["github.com/Microsoft/hcsshim/internal/shimdiag"] -plugins = ["ttrpc"] - -[[overrides]] -prefixes = ["github.com/Microsoft/hcsshim/internal/computeagent"] -plugins = ["ttrpc"] - -[[overrides]] -prefixes = ["github.com/Microsoft/hcsshim/internal/ncproxyttrpc"] -plugins = ["ttrpc"] - -[[overrides]] -prefixes = ["github.com/Microsoft/hcsshim/internal/vmservice"] -plugins = ["ttrpc"] \ No newline at end of file diff --git a/vendor/github.com/Microsoft/hcsshim/README.md b/vendor/github.com/Microsoft/hcsshim/README.md deleted file mode 100644 index b8ca926a..00000000 --- a/vendor/github.com/Microsoft/hcsshim/README.md +++ /dev/null @@ -1,120 +0,0 @@ -# hcsshim - -[![Build status](https://github.com/microsoft/hcsshim/actions/workflows/ci.yml/badge.svg?branch=master)](https://github.com/microsoft/hcsshim/actions?query=branch%3Amaster) - -This package contains the Golang interface for using the Windows [Host Compute Service](https://techcommunity.microsoft.com/t5/containers/introducing-the-host-compute-service-hcs/ba-p/382332) (HCS) to launch and manage [Windows Containers](https://docs.microsoft.com/en-us/virtualization/windowscontainers/about/). It also contains other helpers and functions for managing Windows Containers such as the Golang interface for the Host Network Service (HNS), as well as code for the [guest agent](./internal/guest/README.md) (commonly referred to as the GCS or Guest Compute Service in the codebase) used to support running Linux Hyper-V containers. - -It is primarily used in the [Moby](https://github.com/moby/moby) and [Containerd](https://github.com/containerd/containerd) projects, but it can be freely used by other projects as well. - -## Building - -While this repository can be used as a library of sorts to call the HCS apis, there are a couple binaries built out of the repository as well. The main ones being the Linux guest agent, and an implementation of the [runtime v2 containerd shim api](https://github.com/containerd/containerd/blob/master/runtime/v2/README.md). -### Linux Hyper-V Container Guest Agent - -To build the Linux guest agent itself all that's needed is to set your GOOS to "Linux" and build out of ./cmd/gcs. -```powershell -C:\> $env:GOOS="linux" -C:\> go build .\cmd\gcs\ -``` - -or on a Linux machine -```sh -> go build ./cmd/gcs -``` - -If you want it to be packaged inside of a rootfs to boot with alongside all of the other tools then you'll need to provide a rootfs that it can be packaged inside of. An easy way is to export the rootfs of a container. - -```sh -docker pull busybox -docker run --name base_image_container busybox -docker export base_image_container | gzip > base.tar.gz -BASE=./base.tar.gz -make all -``` - -If the build is successful, in the `./out` folder you should see: -```sh -> ls ./out/ -delta.tar.gz initrd.img rootfs.tar.gz -``` - -### Containerd Shim -For info on the Runtime V2 API: https://github.com/containerd/containerd/blob/master/runtime/v2/README.md. - -Contrary to the typical Linux architecture of shim -> runc, the runhcs shim is used both to launch and manage the lifetime of containers. - -```powershell -C:\> $env:GOOS="windows" -C:\> go build .\cmd\containerd-shim-runhcs-v1 -``` - -Then place the binary in the same directory that Containerd is located at in your environment. A default Containerd configuration file can be generated by running: -```powershell -.\containerd.exe config default | Out-File "C:\Program Files\containerd\config.toml" -Encoding ascii -``` - -This config file will already have the shim set as the default runtime for cri interactions. - -To trial using the shim out with ctr.exe: -```powershell -C:\> ctr.exe run --runtime io.containerd.runhcs.v1 --rm mcr.microsoft.com/windows/nanoserver:2004 windows-test cmd /c "echo Hello World!" -``` - -## Contributing - -This project welcomes contributions and suggestions. Most contributions require you to agree to a -Contributor License Agreement (CLA) declaring that you have the right to, and actually do, grant us -the rights to use your contribution. For details, visit https://cla.microsoft.com. - -When you submit a pull request, a CLA-bot will automatically determine whether you need to provide -a CLA and decorate the PR appropriately (e.g., label, comment). Simply follow the instructions -provided by the bot. You will only need to do this once across all repos using our CLA. - -We also require that contributors [sign their commits](https://git-scm.com/docs/git-commit) using `git commit -s` or `git commit --signoff` to -certify they either authored the work themselves or otherwise have permission to use it in this project. Please see https://developercertificate.org/ for -more info, as well as to make sure that you can attest to the rules listed. Our CI uses the [DCO Github app](https://github.com/apps/dco) to ensure -that all commits in a given PR are signed-off. - -### Test Directory (Important to note) - -This project has tried to trim some dependencies from the root Go modules file that would be cumbersome to get transitively included if this -project is being vendored/used as a library. Some of these dependencies were only being used for tests, so the /test directory in this project also has -its own go.mod file where these are now included to get around this issue. Our tests rely on the code in this project to run, so the test Go modules file -has a relative path replace directive to pull in the latest hcsshim code that the tests actually touch from this project -(which is the repo itself on your disk). - -``` -replace ( - github.com/Microsoft/hcsshim => ../ -) -``` - -Because of this, for most code changes you may need to run `go mod vendor` + `go mod tidy` in the /test directory in this repository, as the -CI in this project will check if the files are out of date and will fail if this is true. - - -## Code of Conduct - -This project has adopted the [Microsoft Open Source Code of Conduct](https://opensource.microsoft.com/codeofconduct/). -For more information see the [Code of Conduct FAQ](https://opensource.microsoft.com/codeofconduct/faq/) or -contact [opencode@microsoft.com](mailto:opencode@microsoft.com) with any additional questions or comments. - -## Dependencies - -This project requires Golang 1.9 or newer to build. - -For system requirements to run this project, see the Microsoft docs on [Windows Container requirements](https://docs.microsoft.com/en-us/virtualization/windowscontainers/deploy-containers/system-requirements). - -## Reporting Security Issues - -Security issues and bugs should be reported privately, via email, to the Microsoft Security -Response Center (MSRC) at [secure@microsoft.com](mailto:secure@microsoft.com). You should -receive a response within 24 hours. If for some reason you do not, please follow up via -email to ensure we received your original message. Further information, including the -[MSRC PGP](https://technet.microsoft.com/en-us/security/dn606155) key, can be found in -the [Security TechCenter](https://technet.microsoft.com/en-us/security/default). - -For additional details, see [Report a Computer Security Vulnerability](https://technet.microsoft.com/en-us/security/ff852094.aspx) on Technet - ---------------- -Copyright (c) 2018 Microsoft Corp. All rights reserved. diff --git a/vendor/github.com/Microsoft/hcsshim/computestorage/attach.go b/vendor/github.com/Microsoft/hcsshim/computestorage/attach.go deleted file mode 100644 index 7f1f2823..00000000 --- a/vendor/github.com/Microsoft/hcsshim/computestorage/attach.go +++ /dev/null @@ -1,38 +0,0 @@ -package computestorage - -import ( - "context" - "encoding/json" - - "github.com/Microsoft/hcsshim/internal/oc" - "github.com/pkg/errors" - "go.opencensus.io/trace" -) - -// AttachLayerStorageFilter sets up the layer storage filter on a writable -// container layer. -// -// `layerPath` is a path to a directory the writable layer is mounted. If the -// path does not end in a `\` the platform will append it automatically. -// -// `layerData` is the parent read-only layer data. -func AttachLayerStorageFilter(ctx context.Context, layerPath string, layerData LayerData) (err error) { - title := "hcsshim.AttachLayerStorageFilter" - ctx, span := trace.StartSpan(ctx, title) //nolint:ineffassign,staticcheck - defer span.End() - defer func() { oc.SetSpanStatus(span, err) }() - span.AddAttributes( - trace.StringAttribute("layerPath", layerPath), - ) - - bytes, err := json.Marshal(layerData) - if err != nil { - return err - } - - err = hcsAttachLayerStorageFilter(layerPath, string(bytes)) - if err != nil { - return errors.Wrap(err, "failed to attach layer storage filter") - } - return nil -} diff --git a/vendor/github.com/Microsoft/hcsshim/computestorage/destroy.go b/vendor/github.com/Microsoft/hcsshim/computestorage/destroy.go deleted file mode 100644 index 8e28e6c5..00000000 --- a/vendor/github.com/Microsoft/hcsshim/computestorage/destroy.go +++ /dev/null @@ -1,26 +0,0 @@ -package computestorage - -import ( - "context" - - "github.com/Microsoft/hcsshim/internal/oc" - "github.com/pkg/errors" - "go.opencensus.io/trace" -) - -// DestroyLayer deletes a container layer. -// -// `layerPath` is a path to a directory containing the layer to export. -func DestroyLayer(ctx context.Context, layerPath string) (err error) { - title := "hcsshim.DestroyLayer" - ctx, span := trace.StartSpan(ctx, title) //nolint:ineffassign,staticcheck - defer span.End() - defer func() { oc.SetSpanStatus(span, err) }() - span.AddAttributes(trace.StringAttribute("layerPath", layerPath)) - - err = hcsDestroyLayer(layerPath) - if err != nil { - return errors.Wrap(err, "failed to destroy layer") - } - return nil -} diff --git a/vendor/github.com/Microsoft/hcsshim/computestorage/detach.go b/vendor/github.com/Microsoft/hcsshim/computestorage/detach.go deleted file mode 100644 index 43547325..00000000 --- a/vendor/github.com/Microsoft/hcsshim/computestorage/detach.go +++ /dev/null @@ -1,26 +0,0 @@ -package computestorage - -import ( - "context" - - "github.com/Microsoft/hcsshim/internal/oc" - "github.com/pkg/errors" - "go.opencensus.io/trace" -) - -// DetachLayerStorageFilter detaches the layer storage filter on a writable container layer. -// -// `layerPath` is a path to a directory containing the layer to export. -func DetachLayerStorageFilter(ctx context.Context, layerPath string) (err error) { - title := "hcsshim.DetachLayerStorageFilter" - ctx, span := trace.StartSpan(ctx, title) //nolint:ineffassign,staticcheck - defer span.End() - defer func() { oc.SetSpanStatus(span, err) }() - span.AddAttributes(trace.StringAttribute("layerPath", layerPath)) - - err = hcsDetachLayerStorageFilter(layerPath) - if err != nil { - return errors.Wrap(err, "failed to detach layer storage filter") - } - return nil -} diff --git a/vendor/github.com/Microsoft/hcsshim/computestorage/export.go b/vendor/github.com/Microsoft/hcsshim/computestorage/export.go deleted file mode 100644 index a1b12dd1..00000000 --- a/vendor/github.com/Microsoft/hcsshim/computestorage/export.go +++ /dev/null @@ -1,46 +0,0 @@ -package computestorage - -import ( - "context" - "encoding/json" - - "github.com/Microsoft/hcsshim/internal/oc" - "github.com/pkg/errors" - "go.opencensus.io/trace" -) - -// ExportLayer exports a container layer. -// -// `layerPath` is a path to a directory containing the layer to export. -// -// `exportFolderPath` is a pre-existing folder to export the layer to. -// -// `layerData` is the parent layer data. -// -// `options` are the export options applied to the exported layer. -func ExportLayer(ctx context.Context, layerPath, exportFolderPath string, layerData LayerData, options ExportLayerOptions) (err error) { - title := "hcsshim.ExportLayer" - ctx, span := trace.StartSpan(ctx, title) //nolint:ineffassign,staticcheck - defer span.End() - defer func() { oc.SetSpanStatus(span, err) }() - span.AddAttributes( - trace.StringAttribute("layerPath", layerPath), - trace.StringAttribute("exportFolderPath", exportFolderPath), - ) - - ldbytes, err := json.Marshal(layerData) - if err != nil { - return err - } - - obytes, err := json.Marshal(options) - if err != nil { - return err - } - - err = hcsExportLayer(layerPath, exportFolderPath, string(ldbytes), string(obytes)) - if err != nil { - return errors.Wrap(err, "failed to export layer") - } - return nil -} diff --git a/vendor/github.com/Microsoft/hcsshim/computestorage/format.go b/vendor/github.com/Microsoft/hcsshim/computestorage/format.go deleted file mode 100644 index 83c0fa33..00000000 --- a/vendor/github.com/Microsoft/hcsshim/computestorage/format.go +++ /dev/null @@ -1,26 +0,0 @@ -package computestorage - -import ( - "context" - - "github.com/Microsoft/hcsshim/internal/oc" - "github.com/pkg/errors" - "go.opencensus.io/trace" - "golang.org/x/sys/windows" -) - -// FormatWritableLayerVhd formats a virtual disk for use as a writable container layer. -// -// If the VHD is not mounted it will be temporarily mounted. -func FormatWritableLayerVhd(ctx context.Context, vhdHandle windows.Handle) (err error) { - title := "hcsshim.FormatWritableLayerVhd" - ctx, span := trace.StartSpan(ctx, title) //nolint:ineffassign,staticcheck - defer span.End() - defer func() { oc.SetSpanStatus(span, err) }() - - err = hcsFormatWritableLayerVhd(vhdHandle) - if err != nil { - return errors.Wrap(err, "failed to format writable layer vhd") - } - return nil -} diff --git a/vendor/github.com/Microsoft/hcsshim/computestorage/helpers.go b/vendor/github.com/Microsoft/hcsshim/computestorage/helpers.go deleted file mode 100644 index 87fee452..00000000 --- a/vendor/github.com/Microsoft/hcsshim/computestorage/helpers.go +++ /dev/null @@ -1,193 +0,0 @@ -package computestorage - -import ( - "context" - "os" - "path/filepath" - "syscall" - - "github.com/Microsoft/go-winio/pkg/security" - "github.com/Microsoft/go-winio/vhd" - "github.com/pkg/errors" - "golang.org/x/sys/windows" -) - -const defaultVHDXBlockSizeInMB = 1 - -// SetupContainerBaseLayer is a helper to setup a containers scratch. It -// will create and format the vhdx's inside and the size is configurable with the sizeInGB -// parameter. -// -// `layerPath` is the path to the base container layer on disk. -// -// `baseVhdPath` is the path to where the base vhdx for the base layer should be created. -// -// `diffVhdPath` is the path where the differencing disk for the base layer should be created. -// -// `sizeInGB` is the size in gigabytes to make the base vhdx. -func SetupContainerBaseLayer(ctx context.Context, layerPath, baseVhdPath, diffVhdPath string, sizeInGB uint64) (err error) { - var ( - hivesPath = filepath.Join(layerPath, "Hives") - layoutPath = filepath.Join(layerPath, "Layout") - ) - - // We need to remove the hives directory and layout file as `SetupBaseOSLayer` fails if these files - // already exist. `SetupBaseOSLayer` will create these files internally. We also remove the base and - // differencing disks if they exist in case we're asking for a different size. - if _, err := os.Stat(hivesPath); err == nil { - if err := os.RemoveAll(hivesPath); err != nil { - return errors.Wrap(err, "failed to remove prexisting hives directory") - } - } - if _, err := os.Stat(layoutPath); err == nil { - if err := os.RemoveAll(layoutPath); err != nil { - return errors.Wrap(err, "failed to remove prexisting layout file") - } - } - - if _, err := os.Stat(baseVhdPath); err == nil { - if err := os.RemoveAll(baseVhdPath); err != nil { - return errors.Wrap(err, "failed to remove base vhdx path") - } - } - if _, err := os.Stat(diffVhdPath); err == nil { - if err := os.RemoveAll(diffVhdPath); err != nil { - return errors.Wrap(err, "failed to remove differencing vhdx") - } - } - - createParams := &vhd.CreateVirtualDiskParameters{ - Version: 2, - Version2: vhd.CreateVersion2{ - MaximumSize: sizeInGB * 1024 * 1024 * 1024, - BlockSizeInBytes: defaultVHDXBlockSizeInMB * 1024 * 1024, - }, - } - handle, err := vhd.CreateVirtualDisk(baseVhdPath, vhd.VirtualDiskAccessNone, vhd.CreateVirtualDiskFlagNone, createParams) - if err != nil { - return errors.Wrap(err, "failed to create vhdx") - } - - defer func() { - if err != nil { - _ = syscall.CloseHandle(handle) - os.RemoveAll(baseVhdPath) - os.RemoveAll(diffVhdPath) - } - }() - - if err = FormatWritableLayerVhd(ctx, windows.Handle(handle)); err != nil { - return err - } - // Base vhd handle must be closed before calling SetupBaseLayer in case of Container layer - if err = syscall.CloseHandle(handle); err != nil { - return errors.Wrap(err, "failed to close vhdx handle") - } - - options := OsLayerOptions{ - Type: OsLayerTypeContainer, - } - - // SetupBaseOSLayer expects an empty vhd handle for a container layer and will - // error out otherwise. - if err = SetupBaseOSLayer(ctx, layerPath, 0, options); err != nil { - return err - } - // Create the differencing disk that will be what's copied for the final rw layer - // for a container. - if err = vhd.CreateDiffVhd(diffVhdPath, baseVhdPath, defaultVHDXBlockSizeInMB); err != nil { - return errors.Wrap(err, "failed to create differencing disk") - } - - if err = security.GrantVmGroupAccess(baseVhdPath); err != nil { - return errors.Wrapf(err, "failed to grant vm group access to %s", baseVhdPath) - } - if err = security.GrantVmGroupAccess(diffVhdPath); err != nil { - return errors.Wrapf(err, "failed to grant vm group access to %s", diffVhdPath) - } - return nil -} - -// SetupUtilityVMBaseLayer is a helper to setup a UVMs scratch space. It will create and format -// the vhdx inside and the size is configurable by the sizeInGB parameter. -// -// `uvmPath` is the path to the UtilityVM filesystem. -// -// `baseVhdPath` is the path to where the base vhdx for the UVM should be created. -// -// `diffVhdPath` is the path where the differencing disk for the UVM should be created. -// -// `sizeInGB` specifies the size in gigabytes to make the base vhdx. -func SetupUtilityVMBaseLayer(ctx context.Context, uvmPath, baseVhdPath, diffVhdPath string, sizeInGB uint64) (err error) { - // Remove the base and differencing disks if they exist in case we're asking for a different size. - if _, err := os.Stat(baseVhdPath); err == nil { - if err := os.RemoveAll(baseVhdPath); err != nil { - return errors.Wrap(err, "failed to remove base vhdx") - } - } - if _, err := os.Stat(diffVhdPath); err == nil { - if err := os.RemoveAll(diffVhdPath); err != nil { - return errors.Wrap(err, "failed to remove differencing vhdx") - } - } - - // Just create the vhdx for utilityVM layer, no need to format it. - createParams := &vhd.CreateVirtualDiskParameters{ - Version: 2, - Version2: vhd.CreateVersion2{ - MaximumSize: sizeInGB * 1024 * 1024 * 1024, - BlockSizeInBytes: defaultVHDXBlockSizeInMB * 1024 * 1024, - }, - } - handle, err := vhd.CreateVirtualDisk(baseVhdPath, vhd.VirtualDiskAccessNone, vhd.CreateVirtualDiskFlagNone, createParams) - if err != nil { - return errors.Wrap(err, "failed to create vhdx") - } - - defer func() { - if err != nil { - _ = syscall.CloseHandle(handle) - os.RemoveAll(baseVhdPath) - os.RemoveAll(diffVhdPath) - } - }() - - // If it is a UtilityVM layer then the base vhdx must be attached when calling - // `SetupBaseOSLayer` - attachParams := &vhd.AttachVirtualDiskParameters{ - Version: 2, - } - if err := vhd.AttachVirtualDisk(handle, vhd.AttachVirtualDiskFlagNone, attachParams); err != nil { - return errors.Wrapf(err, "failed to attach virtual disk") - } - - options := OsLayerOptions{ - Type: OsLayerTypeVM, - } - if err := SetupBaseOSLayer(ctx, uvmPath, windows.Handle(handle), options); err != nil { - return err - } - - // Detach and close the handle after setting up the layer as we don't need the handle - // for anything else and we no longer need to be attached either. - if err = vhd.DetachVirtualDisk(handle); err != nil { - return errors.Wrap(err, "failed to detach vhdx") - } - if err = syscall.CloseHandle(handle); err != nil { - return errors.Wrap(err, "failed to close vhdx handle") - } - - // Create the differencing disk that will be what's copied for the final rw layer - // for a container. - if err = vhd.CreateDiffVhd(diffVhdPath, baseVhdPath, defaultVHDXBlockSizeInMB); err != nil { - return errors.Wrap(err, "failed to create differencing disk") - } - - if err := security.GrantVmGroupAccess(baseVhdPath); err != nil { - return errors.Wrapf(err, "failed to grant vm group access to %s", baseVhdPath) - } - if err := security.GrantVmGroupAccess(diffVhdPath); err != nil { - return errors.Wrapf(err, "failed to grant vm group access to %s", diffVhdPath) - } - return nil -} diff --git a/vendor/github.com/Microsoft/hcsshim/computestorage/import.go b/vendor/github.com/Microsoft/hcsshim/computestorage/import.go deleted file mode 100644 index 0c61dab3..00000000 --- a/vendor/github.com/Microsoft/hcsshim/computestorage/import.go +++ /dev/null @@ -1,41 +0,0 @@ -package computestorage - -import ( - "context" - "encoding/json" - - "github.com/Microsoft/hcsshim/internal/oc" - "github.com/pkg/errors" - "go.opencensus.io/trace" -) - -// ImportLayer imports a container layer. -// -// `layerPath` is a path to a directory to import the layer to. If the directory -// does not exist it will be automatically created. -// -// `sourceFolderpath` is a pre-existing folder that contains the layer to -// import. -// -// `layerData` is the parent layer data. -func ImportLayer(ctx context.Context, layerPath, sourceFolderPath string, layerData LayerData) (err error) { - title := "hcsshim.ImportLayer" - ctx, span := trace.StartSpan(ctx, title) //nolint:ineffassign,staticcheck - defer span.End() - defer func() { oc.SetSpanStatus(span, err) }() - span.AddAttributes( - trace.StringAttribute("layerPath", layerPath), - trace.StringAttribute("sourceFolderPath", sourceFolderPath), - ) - - bytes, err := json.Marshal(layerData) - if err != nil { - return err - } - - err = hcsImportLayer(layerPath, sourceFolderPath, string(bytes)) - if err != nil { - return errors.Wrap(err, "failed to import layer") - } - return nil -} diff --git a/vendor/github.com/Microsoft/hcsshim/computestorage/initialize.go b/vendor/github.com/Microsoft/hcsshim/computestorage/initialize.go deleted file mode 100644 index 53ed8ea6..00000000 --- a/vendor/github.com/Microsoft/hcsshim/computestorage/initialize.go +++ /dev/null @@ -1,38 +0,0 @@ -package computestorage - -import ( - "context" - "encoding/json" - - "github.com/Microsoft/hcsshim/internal/oc" - "github.com/pkg/errors" - "go.opencensus.io/trace" -) - -// InitializeWritableLayer initializes a writable layer for a container. -// -// `layerPath` is a path to a directory the layer is mounted. If the -// path does not end in a `\` the platform will append it automatically. -// -// `layerData` is the parent read-only layer data. -func InitializeWritableLayer(ctx context.Context, layerPath string, layerData LayerData) (err error) { - title := "hcsshim.InitializeWritableLayer" - ctx, span := trace.StartSpan(ctx, title) //nolint:ineffassign,staticcheck - defer span.End() - defer func() { oc.SetSpanStatus(span, err) }() - span.AddAttributes( - trace.StringAttribute("layerPath", layerPath), - ) - - bytes, err := json.Marshal(layerData) - if err != nil { - return err - } - - // Options are not used in the platform as of RS5 - err = hcsInitializeWritableLayer(layerPath, string(bytes), "") - if err != nil { - return errors.Wrap(err, "failed to intitialize container layer") - } - return nil -} diff --git a/vendor/github.com/Microsoft/hcsshim/computestorage/mount.go b/vendor/github.com/Microsoft/hcsshim/computestorage/mount.go deleted file mode 100644 index fcdbbef8..00000000 --- a/vendor/github.com/Microsoft/hcsshim/computestorage/mount.go +++ /dev/null @@ -1,27 +0,0 @@ -package computestorage - -import ( - "context" - - "github.com/Microsoft/hcsshim/internal/interop" - "github.com/Microsoft/hcsshim/internal/oc" - "github.com/pkg/errors" - "go.opencensus.io/trace" - "golang.org/x/sys/windows" -) - -// GetLayerVhdMountPath returns the volume path for a virtual disk of a writable container layer. -func GetLayerVhdMountPath(ctx context.Context, vhdHandle windows.Handle) (path string, err error) { - title := "hcsshim.GetLayerVhdMountPath" - ctx, span := trace.StartSpan(ctx, title) //nolint:ineffassign,staticcheck - defer span.End() - defer func() { oc.SetSpanStatus(span, err) }() - - var mountPath *uint16 - err = hcsGetLayerVhdMountPath(vhdHandle, &mountPath) - if err != nil { - return "", errors.Wrap(err, "failed to get vhd mount path") - } - path = interop.ConvertAndFreeCoTaskMemString(mountPath) - return path, nil -} diff --git a/vendor/github.com/Microsoft/hcsshim/computestorage/setup.go b/vendor/github.com/Microsoft/hcsshim/computestorage/setup.go deleted file mode 100644 index 06aaf841..00000000 --- a/vendor/github.com/Microsoft/hcsshim/computestorage/setup.go +++ /dev/null @@ -1,74 +0,0 @@ -package computestorage - -import ( - "context" - "encoding/json" - - "github.com/Microsoft/hcsshim/internal/oc" - "github.com/Microsoft/hcsshim/osversion" - "github.com/pkg/errors" - "go.opencensus.io/trace" - "golang.org/x/sys/windows" -) - -// SetupBaseOSLayer sets up a layer that contains a base OS for a container. -// -// `layerPath` is a path to a directory containing the layer. -// -// `vhdHandle` is an empty file handle of `options.Type == OsLayerTypeContainer` -// or else it is a file handle to the 'SystemTemplateBase.vhdx' if `options.Type -// == OsLayerTypeVm`. -// -// `options` are the options applied while processing the layer. -func SetupBaseOSLayer(ctx context.Context, layerPath string, vhdHandle windows.Handle, options OsLayerOptions) (err error) { - title := "hcsshim.SetupBaseOSLayer" - ctx, span := trace.StartSpan(ctx, title) //nolint:ineffassign,staticcheck - defer span.End() - defer func() { oc.SetSpanStatus(span, err) }() - span.AddAttributes( - trace.StringAttribute("layerPath", layerPath), - ) - - bytes, err := json.Marshal(options) - if err != nil { - return err - } - - err = hcsSetupBaseOSLayer(layerPath, vhdHandle, string(bytes)) - if err != nil { - return errors.Wrap(err, "failed to setup base OS layer") - } - return nil -} - -// SetupBaseOSVolume sets up a volume that contains a base OS for a container. -// -// `layerPath` is a path to a directory containing the layer. -// -// `volumePath` is the path to the volume to be used for setup. -// -// `options` are the options applied while processing the layer. -func SetupBaseOSVolume(ctx context.Context, layerPath, volumePath string, options OsLayerOptions) (err error) { - if osversion.Build() < 19645 { - return errors.New("SetupBaseOSVolume is not present on builds older than 19645") - } - title := "hcsshim.SetupBaseOSVolume" - ctx, span := trace.StartSpan(ctx, title) //nolint:ineffassign,staticcheck - defer span.End() - defer func() { oc.SetSpanStatus(span, err) }() - span.AddAttributes( - trace.StringAttribute("layerPath", layerPath), - trace.StringAttribute("volumePath", volumePath), - ) - - bytes, err := json.Marshal(options) - if err != nil { - return err - } - - err = hcsSetupBaseOSVolume(layerPath, volumePath, string(bytes)) - if err != nil { - return errors.Wrap(err, "failed to setup base OS layer") - } - return nil -} diff --git a/vendor/github.com/Microsoft/hcsshim/computestorage/storage.go b/vendor/github.com/Microsoft/hcsshim/computestorage/storage.go deleted file mode 100644 index 95aff9c1..00000000 --- a/vendor/github.com/Microsoft/hcsshim/computestorage/storage.go +++ /dev/null @@ -1,50 +0,0 @@ -// Package computestorage is a wrapper around the HCS storage APIs. These are new storage APIs introduced -// separate from the original graphdriver calls intended to give more freedom around creating -// and managing container layers and scratch spaces. -package computestorage - -import ( - hcsschema "github.com/Microsoft/hcsshim/internal/hcs/schema2" -) - -//go:generate go run ../mksyscall_windows.go -output zsyscall_windows.go storage.go - -//sys hcsImportLayer(layerPath string, sourceFolderPath string, layerData string) (hr error) = computestorage.HcsImportLayer? -//sys hcsExportLayer(layerPath string, exportFolderPath string, layerData string, options string) (hr error) = computestorage.HcsExportLayer? -//sys hcsDestroyLayer(layerPath string) (hr error) = computestorage.HcsDestoryLayer? -//sys hcsSetupBaseOSLayer(layerPath string, handle windows.Handle, options string) (hr error) = computestorage.HcsSetupBaseOSLayer? -//sys hcsInitializeWritableLayer(writableLayerPath string, layerData string, options string) (hr error) = computestorage.HcsInitializeWritableLayer? -//sys hcsAttachLayerStorageFilter(layerPath string, layerData string) (hr error) = computestorage.HcsAttachLayerStorageFilter? -//sys hcsDetachLayerStorageFilter(layerPath string) (hr error) = computestorage.HcsDetachLayerStorageFilter? -//sys hcsFormatWritableLayerVhd(handle windows.Handle) (hr error) = computestorage.HcsFormatWritableLayerVhd? -//sys hcsGetLayerVhdMountPath(vhdHandle windows.Handle, mountPath **uint16) (hr error) = computestorage.HcsGetLayerVhdMountPath? -//sys hcsSetupBaseOSVolume(layerPath string, volumePath string, options string) (hr error) = computestorage.HcsSetupBaseOSVolume? - -// LayerData is the data used to describe parent layer information. -type LayerData struct { - SchemaVersion hcsschema.Version `json:"SchemaVersion,omitempty"` - Layers []hcsschema.Layer `json:"Layers,omitempty"` -} - -// ExportLayerOptions are the set of options that are used with the `computestorage.HcsExportLayer` syscall. -type ExportLayerOptions struct { - IsWritableLayer bool `json:"IsWritableLayer,omitempty"` -} - -// OsLayerType is the type of layer being operated on. -type OsLayerType string - -const ( - // OsLayerTypeContainer is a container layer. - OsLayerTypeContainer OsLayerType = "Container" - // OsLayerTypeVM is a virtual machine layer. - OsLayerTypeVM OsLayerType = "Vm" -) - -// OsLayerOptions are the set of options that are used with the `SetupBaseOSLayer` and -// `SetupBaseOSVolume` calls. -type OsLayerOptions struct { - Type OsLayerType `json:"Type,omitempty"` - DisableCiCacheOptimization bool `json:"DisableCiCacheOptimization,omitempty"` - SkipUpdateBcdForBoot bool `json:"SkipUpdateBcdForBoot,omitempty"` -} diff --git a/vendor/github.com/Microsoft/hcsshim/computestorage/zsyscall_windows.go b/vendor/github.com/Microsoft/hcsshim/computestorage/zsyscall_windows.go deleted file mode 100644 index 4f951806..00000000 --- a/vendor/github.com/Microsoft/hcsshim/computestorage/zsyscall_windows.go +++ /dev/null @@ -1,319 +0,0 @@ -// Code generated mksyscall_windows.exe DO NOT EDIT - -package computestorage - -import ( - "syscall" - "unsafe" - - "golang.org/x/sys/windows" -) - -var _ unsafe.Pointer - -// Do the interface allocations only once for common -// Errno values. -const ( - errnoERROR_IO_PENDING = 997 -) - -var ( - errERROR_IO_PENDING error = syscall.Errno(errnoERROR_IO_PENDING) -) - -// errnoErr returns common boxed Errno values, to prevent -// allocations at runtime. -func errnoErr(e syscall.Errno) error { - switch e { - case 0: - return nil - case errnoERROR_IO_PENDING: - return errERROR_IO_PENDING - } - // TODO: add more here, after collecting data on the common - // error values see on Windows. (perhaps when running - // all.bat?) - return e -} - -var ( - modcomputestorage = windows.NewLazySystemDLL("computestorage.dll") - - procHcsImportLayer = modcomputestorage.NewProc("HcsImportLayer") - procHcsExportLayer = modcomputestorage.NewProc("HcsExportLayer") - procHcsDestoryLayer = modcomputestorage.NewProc("HcsDestoryLayer") - procHcsSetupBaseOSLayer = modcomputestorage.NewProc("HcsSetupBaseOSLayer") - procHcsInitializeWritableLayer = modcomputestorage.NewProc("HcsInitializeWritableLayer") - procHcsAttachLayerStorageFilter = modcomputestorage.NewProc("HcsAttachLayerStorageFilter") - procHcsDetachLayerStorageFilter = modcomputestorage.NewProc("HcsDetachLayerStorageFilter") - procHcsFormatWritableLayerVhd = modcomputestorage.NewProc("HcsFormatWritableLayerVhd") - procHcsGetLayerVhdMountPath = modcomputestorage.NewProc("HcsGetLayerVhdMountPath") - procHcsSetupBaseOSVolume = modcomputestorage.NewProc("HcsSetupBaseOSVolume") -) - -func hcsImportLayer(layerPath string, sourceFolderPath string, layerData string) (hr error) { - var _p0 *uint16 - _p0, hr = syscall.UTF16PtrFromString(layerPath) - if hr != nil { - return - } - var _p1 *uint16 - _p1, hr = syscall.UTF16PtrFromString(sourceFolderPath) - if hr != nil { - return - } - var _p2 *uint16 - _p2, hr = syscall.UTF16PtrFromString(layerData) - if hr != nil { - return - } - return _hcsImportLayer(_p0, _p1, _p2) -} - -func _hcsImportLayer(layerPath *uint16, sourceFolderPath *uint16, layerData *uint16) (hr error) { - if hr = procHcsImportLayer.Find(); hr != nil { - return - } - r0, _, _ := syscall.Syscall(procHcsImportLayer.Addr(), 3, uintptr(unsafe.Pointer(layerPath)), uintptr(unsafe.Pointer(sourceFolderPath)), uintptr(unsafe.Pointer(layerData))) - if int32(r0) < 0 { - if r0&0x1fff0000 == 0x00070000 { - r0 &= 0xffff - } - hr = syscall.Errno(r0) - } - return -} - -func hcsExportLayer(layerPath string, exportFolderPath string, layerData string, options string) (hr error) { - var _p0 *uint16 - _p0, hr = syscall.UTF16PtrFromString(layerPath) - if hr != nil { - return - } - var _p1 *uint16 - _p1, hr = syscall.UTF16PtrFromString(exportFolderPath) - if hr != nil { - return - } - var _p2 *uint16 - _p2, hr = syscall.UTF16PtrFromString(layerData) - if hr != nil { - return - } - var _p3 *uint16 - _p3, hr = syscall.UTF16PtrFromString(options) - if hr != nil { - return - } - return _hcsExportLayer(_p0, _p1, _p2, _p3) -} - -func _hcsExportLayer(layerPath *uint16, exportFolderPath *uint16, layerData *uint16, options *uint16) (hr error) { - if hr = procHcsExportLayer.Find(); hr != nil { - return - } - r0, _, _ := syscall.Syscall6(procHcsExportLayer.Addr(), 4, uintptr(unsafe.Pointer(layerPath)), uintptr(unsafe.Pointer(exportFolderPath)), uintptr(unsafe.Pointer(layerData)), uintptr(unsafe.Pointer(options)), 0, 0) - if int32(r0) < 0 { - if r0&0x1fff0000 == 0x00070000 { - r0 &= 0xffff - } - hr = syscall.Errno(r0) - } - return -} - -func hcsDestroyLayer(layerPath string) (hr error) { - var _p0 *uint16 - _p0, hr = syscall.UTF16PtrFromString(layerPath) - if hr != nil { - return - } - return _hcsDestroyLayer(_p0) -} - -func _hcsDestroyLayer(layerPath *uint16) (hr error) { - if hr = procHcsDestoryLayer.Find(); hr != nil { - return - } - r0, _, _ := syscall.Syscall(procHcsDestoryLayer.Addr(), 1, uintptr(unsafe.Pointer(layerPath)), 0, 0) - if int32(r0) < 0 { - if r0&0x1fff0000 == 0x00070000 { - r0 &= 0xffff - } - hr = syscall.Errno(r0) - } - return -} - -func hcsSetupBaseOSLayer(layerPath string, handle windows.Handle, options string) (hr error) { - var _p0 *uint16 - _p0, hr = syscall.UTF16PtrFromString(layerPath) - if hr != nil { - return - } - var _p1 *uint16 - _p1, hr = syscall.UTF16PtrFromString(options) - if hr != nil { - return - } - return _hcsSetupBaseOSLayer(_p0, handle, _p1) -} - -func _hcsSetupBaseOSLayer(layerPath *uint16, handle windows.Handle, options *uint16) (hr error) { - if hr = procHcsSetupBaseOSLayer.Find(); hr != nil { - return - } - r0, _, _ := syscall.Syscall(procHcsSetupBaseOSLayer.Addr(), 3, uintptr(unsafe.Pointer(layerPath)), uintptr(handle), uintptr(unsafe.Pointer(options))) - if int32(r0) < 0 { - if r0&0x1fff0000 == 0x00070000 { - r0 &= 0xffff - } - hr = syscall.Errno(r0) - } - return -} - -func hcsInitializeWritableLayer(writableLayerPath string, layerData string, options string) (hr error) { - var _p0 *uint16 - _p0, hr = syscall.UTF16PtrFromString(writableLayerPath) - if hr != nil { - return - } - var _p1 *uint16 - _p1, hr = syscall.UTF16PtrFromString(layerData) - if hr != nil { - return - } - var _p2 *uint16 - _p2, hr = syscall.UTF16PtrFromString(options) - if hr != nil { - return - } - return _hcsInitializeWritableLayer(_p0, _p1, _p2) -} - -func _hcsInitializeWritableLayer(writableLayerPath *uint16, layerData *uint16, options *uint16) (hr error) { - if hr = procHcsInitializeWritableLayer.Find(); hr != nil { - return - } - r0, _, _ := syscall.Syscall(procHcsInitializeWritableLayer.Addr(), 3, uintptr(unsafe.Pointer(writableLayerPath)), uintptr(unsafe.Pointer(layerData)), uintptr(unsafe.Pointer(options))) - if int32(r0) < 0 { - if r0&0x1fff0000 == 0x00070000 { - r0 &= 0xffff - } - hr = syscall.Errno(r0) - } - return -} - -func hcsAttachLayerStorageFilter(layerPath string, layerData string) (hr error) { - var _p0 *uint16 - _p0, hr = syscall.UTF16PtrFromString(layerPath) - if hr != nil { - return - } - var _p1 *uint16 - _p1, hr = syscall.UTF16PtrFromString(layerData) - if hr != nil { - return - } - return _hcsAttachLayerStorageFilter(_p0, _p1) -} - -func _hcsAttachLayerStorageFilter(layerPath *uint16, layerData *uint16) (hr error) { - if hr = procHcsAttachLayerStorageFilter.Find(); hr != nil { - return - } - r0, _, _ := syscall.Syscall(procHcsAttachLayerStorageFilter.Addr(), 2, uintptr(unsafe.Pointer(layerPath)), uintptr(unsafe.Pointer(layerData)), 0) - if int32(r0) < 0 { - if r0&0x1fff0000 == 0x00070000 { - r0 &= 0xffff - } - hr = syscall.Errno(r0) - } - return -} - -func hcsDetachLayerStorageFilter(layerPath string) (hr error) { - var _p0 *uint16 - _p0, hr = syscall.UTF16PtrFromString(layerPath) - if hr != nil { - return - } - return _hcsDetachLayerStorageFilter(_p0) -} - -func _hcsDetachLayerStorageFilter(layerPath *uint16) (hr error) { - if hr = procHcsDetachLayerStorageFilter.Find(); hr != nil { - return - } - r0, _, _ := syscall.Syscall(procHcsDetachLayerStorageFilter.Addr(), 1, uintptr(unsafe.Pointer(layerPath)), 0, 0) - if int32(r0) < 0 { - if r0&0x1fff0000 == 0x00070000 { - r0 &= 0xffff - } - hr = syscall.Errno(r0) - } - return -} - -func hcsFormatWritableLayerVhd(handle windows.Handle) (hr error) { - if hr = procHcsFormatWritableLayerVhd.Find(); hr != nil { - return - } - r0, _, _ := syscall.Syscall(procHcsFormatWritableLayerVhd.Addr(), 1, uintptr(handle), 0, 0) - if int32(r0) < 0 { - if r0&0x1fff0000 == 0x00070000 { - r0 &= 0xffff - } - hr = syscall.Errno(r0) - } - return -} - -func hcsGetLayerVhdMountPath(vhdHandle windows.Handle, mountPath **uint16) (hr error) { - if hr = procHcsGetLayerVhdMountPath.Find(); hr != nil { - return - } - r0, _, _ := syscall.Syscall(procHcsGetLayerVhdMountPath.Addr(), 2, uintptr(vhdHandle), uintptr(unsafe.Pointer(mountPath)), 0) - if int32(r0) < 0 { - if r0&0x1fff0000 == 0x00070000 { - r0 &= 0xffff - } - hr = syscall.Errno(r0) - } - return -} - -func hcsSetupBaseOSVolume(layerPath string, volumePath string, options string) (hr error) { - var _p0 *uint16 - _p0, hr = syscall.UTF16PtrFromString(layerPath) - if hr != nil { - return - } - var _p1 *uint16 - _p1, hr = syscall.UTF16PtrFromString(volumePath) - if hr != nil { - return - } - var _p2 *uint16 - _p2, hr = syscall.UTF16PtrFromString(options) - if hr != nil { - return - } - return _hcsSetupBaseOSVolume(_p0, _p1, _p2) -} - -func _hcsSetupBaseOSVolume(layerPath *uint16, volumePath *uint16, options *uint16) (hr error) { - if hr = procHcsSetupBaseOSVolume.Find(); hr != nil { - return - } - r0, _, _ := syscall.Syscall(procHcsSetupBaseOSVolume.Addr(), 3, uintptr(unsafe.Pointer(layerPath)), uintptr(unsafe.Pointer(volumePath)), uintptr(unsafe.Pointer(options))) - if int32(r0) < 0 { - if r0&0x1fff0000 == 0x00070000 { - r0 &= 0xffff - } - hr = syscall.Errno(r0) - } - return -} diff --git a/vendor/github.com/Microsoft/hcsshim/container.go b/vendor/github.com/Microsoft/hcsshim/container.go deleted file mode 100644 index bfd72289..00000000 --- a/vendor/github.com/Microsoft/hcsshim/container.go +++ /dev/null @@ -1,223 +0,0 @@ -package hcsshim - -import ( - "context" - "fmt" - "os" - "sync" - "time" - - "github.com/Microsoft/hcsshim/internal/hcs" - "github.com/Microsoft/hcsshim/internal/hcs/schema1" - "github.com/Microsoft/hcsshim/internal/mergemaps" -) - -// ContainerProperties holds the properties for a container and the processes running in that container -type ContainerProperties = schema1.ContainerProperties - -// MemoryStats holds the memory statistics for a container -type MemoryStats = schema1.MemoryStats - -// ProcessorStats holds the processor statistics for a container -type ProcessorStats = schema1.ProcessorStats - -// StorageStats holds the storage statistics for a container -type StorageStats = schema1.StorageStats - -// NetworkStats holds the network statistics for a container -type NetworkStats = schema1.NetworkStats - -// Statistics is the structure returned by a statistics call on a container -type Statistics = schema1.Statistics - -// ProcessList is the structure of an item returned by a ProcessList call on a container -type ProcessListItem = schema1.ProcessListItem - -// MappedVirtualDiskController is the structure of an item returned by a MappedVirtualDiskList call on a container -type MappedVirtualDiskController = schema1.MappedVirtualDiskController - -// Type of Request Support in ModifySystem -type RequestType = schema1.RequestType - -// Type of Resource Support in ModifySystem -type ResourceType = schema1.ResourceType - -// RequestType const -const ( - Add = schema1.Add - Remove = schema1.Remove - Network = schema1.Network -) - -// ResourceModificationRequestResponse is the structure used to send request to the container to modify the system -// Supported resource types are Network and Request Types are Add/Remove -type ResourceModificationRequestResponse = schema1.ResourceModificationRequestResponse - -type container struct { - system *hcs.System - waitOnce sync.Once - waitErr error - waitCh chan struct{} -} - -// createComputeSystemAdditionalJSON is read from the environment at initialisation -// time. It allows an environment variable to define additional JSON which -// is merged in the CreateComputeSystem call to HCS. -var createContainerAdditionalJSON []byte - -func init() { - createContainerAdditionalJSON = ([]byte)(os.Getenv("HCSSHIM_CREATECONTAINER_ADDITIONALJSON")) -} - -// CreateContainer creates a new container with the given configuration but does not start it. -func CreateContainer(id string, c *ContainerConfig) (Container, error) { - fullConfig, err := mergemaps.MergeJSON(c, createContainerAdditionalJSON) - if err != nil { - return nil, fmt.Errorf("failed to merge additional JSON '%s': %s", createContainerAdditionalJSON, err) - } - - system, err := hcs.CreateComputeSystem(context.Background(), id, fullConfig) - if err != nil { - return nil, err - } - return &container{system: system}, err -} - -// OpenContainer opens an existing container by ID. -func OpenContainer(id string) (Container, error) { - system, err := hcs.OpenComputeSystem(context.Background(), id) - if err != nil { - return nil, err - } - return &container{system: system}, err -} - -// GetContainers gets a list of the containers on the system that match the query -func GetContainers(q ComputeSystemQuery) ([]ContainerProperties, error) { - return hcs.GetComputeSystems(context.Background(), q) -} - -// Start synchronously starts the container. -func (container *container) Start() error { - return convertSystemError(container.system.Start(context.Background()), container) -} - -// Shutdown requests a container shutdown, but it may not actually be shutdown until Wait() succeeds. -func (container *container) Shutdown() error { - err := container.system.Shutdown(context.Background()) - if err != nil { - return convertSystemError(err, container) - } - return &ContainerError{Container: container, Err: ErrVmcomputeOperationPending, Operation: "hcsshim::ComputeSystem::Shutdown"} -} - -// Terminate requests a container terminate, but it may not actually be terminated until Wait() succeeds. -func (container *container) Terminate() error { - err := container.system.Terminate(context.Background()) - if err != nil { - return convertSystemError(err, container) - } - return &ContainerError{Container: container, Err: ErrVmcomputeOperationPending, Operation: "hcsshim::ComputeSystem::Terminate"} -} - -// Waits synchronously waits for the container to shutdown or terminate. -func (container *container) Wait() error { - err := container.system.Wait() - if err == nil { - err = container.system.ExitError() - } - return convertSystemError(err, container) -} - -// WaitTimeout synchronously waits for the container to terminate or the duration to elapse. It -// returns false if timeout occurs. -func (container *container) WaitTimeout(timeout time.Duration) error { - container.waitOnce.Do(func() { - container.waitCh = make(chan struct{}) - go func() { - container.waitErr = container.Wait() - close(container.waitCh) - }() - }) - t := time.NewTimer(timeout) - defer t.Stop() - select { - case <-t.C: - return &ContainerError{Container: container, Err: ErrTimeout, Operation: "hcsshim::ComputeSystem::Wait"} - case <-container.waitCh: - return container.waitErr - } -} - -// Pause pauses the execution of a container. -func (container *container) Pause() error { - return convertSystemError(container.system.Pause(context.Background()), container) -} - -// Resume resumes the execution of a container. -func (container *container) Resume() error { - return convertSystemError(container.system.Resume(context.Background()), container) -} - -// HasPendingUpdates returns true if the container has updates pending to install -func (container *container) HasPendingUpdates() (bool, error) { - return false, nil -} - -// Statistics returns statistics for the container. This is a legacy v1 call -func (container *container) Statistics() (Statistics, error) { - properties, err := container.system.Properties(context.Background(), schema1.PropertyTypeStatistics) - if err != nil { - return Statistics{}, convertSystemError(err, container) - } - - return properties.Statistics, nil -} - -// ProcessList returns an array of ProcessListItems for the container. This is a legacy v1 call -func (container *container) ProcessList() ([]ProcessListItem, error) { - properties, err := container.system.Properties(context.Background(), schema1.PropertyTypeProcessList) - if err != nil { - return nil, convertSystemError(err, container) - } - - return properties.ProcessList, nil -} - -// This is a legacy v1 call -func (container *container) MappedVirtualDisks() (map[int]MappedVirtualDiskController, error) { - properties, err := container.system.Properties(context.Background(), schema1.PropertyTypeMappedVirtualDisk) - if err != nil { - return nil, convertSystemError(err, container) - } - - return properties.MappedVirtualDiskControllers, nil -} - -// CreateProcess launches a new process within the container. -func (container *container) CreateProcess(c *ProcessConfig) (Process, error) { - p, err := container.system.CreateProcess(context.Background(), c) - if err != nil { - return nil, convertSystemError(err, container) - } - return &process{p: p.(*hcs.Process)}, nil -} - -// OpenProcess gets an interface to an existing process within the container. -func (container *container) OpenProcess(pid int) (Process, error) { - p, err := container.system.OpenProcess(context.Background(), pid) - if err != nil { - return nil, convertSystemError(err, container) - } - return &process{p: p}, nil -} - -// Close cleans up any state associated with the container but does not terminate or wait for it. -func (container *container) Close() error { - return convertSystemError(container.system.Close(), container) -} - -// Modify the System -func (container *container) Modify(config *ResourceModificationRequestResponse) error { - return convertSystemError(container.system.Modify(context.Background(), config), container) -} diff --git a/vendor/github.com/Microsoft/hcsshim/errors.go b/vendor/github.com/Microsoft/hcsshim/errors.go deleted file mode 100644 index f367022e..00000000 --- a/vendor/github.com/Microsoft/hcsshim/errors.go +++ /dev/null @@ -1,245 +0,0 @@ -package hcsshim - -import ( - "fmt" - "syscall" - - "github.com/Microsoft/hcsshim/internal/hns" - - "github.com/Microsoft/hcsshim/internal/hcs" - "github.com/Microsoft/hcsshim/internal/hcserror" -) - -var ( - // ErrComputeSystemDoesNotExist is an error encountered when the container being operated on no longer exists = hcs.exist - ErrComputeSystemDoesNotExist = hcs.ErrComputeSystemDoesNotExist - - // ErrElementNotFound is an error encountered when the object being referenced does not exist - ErrElementNotFound = hcs.ErrElementNotFound - - // ErrElementNotFound is an error encountered when the object being referenced does not exist - ErrNotSupported = hcs.ErrNotSupported - - // ErrInvalidData is an error encountered when the request being sent to hcs is invalid/unsupported - // decimal -2147024883 / hex 0x8007000d - ErrInvalidData = hcs.ErrInvalidData - - // ErrHandleClose is an error encountered when the handle generating the notification being waited on has been closed - ErrHandleClose = hcs.ErrHandleClose - - // ErrAlreadyClosed is an error encountered when using a handle that has been closed by the Close method - ErrAlreadyClosed = hcs.ErrAlreadyClosed - - // ErrInvalidNotificationType is an error encountered when an invalid notification type is used - ErrInvalidNotificationType = hcs.ErrInvalidNotificationType - - // ErrInvalidProcessState is an error encountered when the process is not in a valid state for the requested operation - ErrInvalidProcessState = hcs.ErrInvalidProcessState - - // ErrTimeout is an error encountered when waiting on a notification times out - ErrTimeout = hcs.ErrTimeout - - // ErrUnexpectedContainerExit is the error encountered when a container exits while waiting for - // a different expected notification - ErrUnexpectedContainerExit = hcs.ErrUnexpectedContainerExit - - // ErrUnexpectedProcessAbort is the error encountered when communication with the compute service - // is lost while waiting for a notification - ErrUnexpectedProcessAbort = hcs.ErrUnexpectedProcessAbort - - // ErrUnexpectedValue is an error encountered when hcs returns an invalid value - ErrUnexpectedValue = hcs.ErrUnexpectedValue - - // ErrVmcomputeAlreadyStopped is an error encountered when a shutdown or terminate request is made on a stopped container - ErrVmcomputeAlreadyStopped = hcs.ErrVmcomputeAlreadyStopped - - // ErrVmcomputeOperationPending is an error encountered when the operation is being completed asynchronously - ErrVmcomputeOperationPending = hcs.ErrVmcomputeOperationPending - - // ErrVmcomputeOperationInvalidState is an error encountered when the compute system is not in a valid state for the requested operation - ErrVmcomputeOperationInvalidState = hcs.ErrVmcomputeOperationInvalidState - - // ErrProcNotFound is an error encountered when a procedure look up fails. - ErrProcNotFound = hcs.ErrProcNotFound - - // ErrVmcomputeOperationAccessIsDenied is an error which can be encountered when enumerating compute systems in RS1/RS2 - // builds when the underlying silo might be in the process of terminating. HCS was fixed in RS3. - ErrVmcomputeOperationAccessIsDenied = hcs.ErrVmcomputeOperationAccessIsDenied - - // ErrVmcomputeInvalidJSON is an error encountered when the compute system does not support/understand the messages sent by management - ErrVmcomputeInvalidJSON = hcs.ErrVmcomputeInvalidJSON - - // ErrVmcomputeUnknownMessage is an error encountered guest compute system doesn't support the message - ErrVmcomputeUnknownMessage = hcs.ErrVmcomputeUnknownMessage - - // ErrNotSupported is an error encountered when hcs doesn't support the request - ErrPlatformNotSupported = hcs.ErrPlatformNotSupported -) - -type EndpointNotFoundError = hns.EndpointNotFoundError -type NetworkNotFoundError = hns.NetworkNotFoundError - -// ProcessError is an error encountered in HCS during an operation on a Process object -type ProcessError struct { - Process *process - Operation string - Err error - Events []hcs.ErrorEvent -} - -// ContainerError is an error encountered in HCS during an operation on a Container object -type ContainerError struct { - Container *container - Operation string - Err error - Events []hcs.ErrorEvent -} - -func (e *ContainerError) Error() string { - if e == nil { - return "" - } - - if e.Container == nil { - return "unexpected nil container for error: " + e.Err.Error() - } - - s := "container " + e.Container.system.ID() - - if e.Operation != "" { - s += " encountered an error during " + e.Operation - } - - switch e.Err.(type) { - case nil: - break - case syscall.Errno: - s += fmt.Sprintf(": failure in a Windows system call: %s (0x%x)", e.Err, hcserror.Win32FromError(e.Err)) - default: - s += fmt.Sprintf(": %s", e.Err.Error()) - } - - for _, ev := range e.Events { - s += "\n" + ev.String() - } - - return s -} - -func (e *ProcessError) Error() string { - if e == nil { - return "" - } - - if e.Process == nil { - return "Unexpected nil process for error: " + e.Err.Error() - } - - s := fmt.Sprintf("process %d in container %s", e.Process.p.Pid(), e.Process.p.SystemID()) - if e.Operation != "" { - s += " encountered an error during " + e.Operation - } - - switch e.Err.(type) { - case nil: - break - case syscall.Errno: - s += fmt.Sprintf(": failure in a Windows system call: %s (0x%x)", e.Err, hcserror.Win32FromError(e.Err)) - default: - s += fmt.Sprintf(": %s", e.Err.Error()) - } - - for _, ev := range e.Events { - s += "\n" + ev.String() - } - - return s -} - -// IsNotExist checks if an error is caused by the Container or Process not existing. -// Note: Currently, ErrElementNotFound can mean that a Process has either -// already exited, or does not exist. Both IsAlreadyStopped and IsNotExist -// will currently return true when the error is ErrElementNotFound. -func IsNotExist(err error) bool { - if _, ok := err.(EndpointNotFoundError); ok { - return true - } - if _, ok := err.(NetworkNotFoundError); ok { - return true - } - return hcs.IsNotExist(getInnerError(err)) -} - -// IsAlreadyClosed checks if an error is caused by the Container or Process having been -// already closed by a call to the Close() method. -func IsAlreadyClosed(err error) bool { - return hcs.IsAlreadyClosed(getInnerError(err)) -} - -// IsPending returns a boolean indicating whether the error is that -// the requested operation is being completed in the background. -func IsPending(err error) bool { - return hcs.IsPending(getInnerError(err)) -} - -// IsTimeout returns a boolean indicating whether the error is caused by -// a timeout waiting for the operation to complete. -func IsTimeout(err error) bool { - return hcs.IsTimeout(getInnerError(err)) -} - -// IsAlreadyStopped returns a boolean indicating whether the error is caused by -// a Container or Process being already stopped. -// Note: Currently, ErrElementNotFound can mean that a Process has either -// already exited, or does not exist. Both IsAlreadyStopped and IsNotExist -// will currently return true when the error is ErrElementNotFound. -func IsAlreadyStopped(err error) bool { - return hcs.IsAlreadyStopped(getInnerError(err)) -} - -// IsNotSupported returns a boolean indicating whether the error is caused by -// unsupported platform requests -// Note: Currently Unsupported platform requests can be mean either -// ErrVmcomputeInvalidJSON, ErrInvalidData, ErrNotSupported or ErrVmcomputeUnknownMessage -// is thrown from the Platform -func IsNotSupported(err error) bool { - return hcs.IsNotSupported(getInnerError(err)) -} - -// IsOperationInvalidState returns true when err is caused by -// `ErrVmcomputeOperationInvalidState`. -func IsOperationInvalidState(err error) bool { - return hcs.IsOperationInvalidState(getInnerError(err)) -} - -// IsAccessIsDenied returns true when err is caused by -// `ErrVmcomputeOperationAccessIsDenied`. -func IsAccessIsDenied(err error) bool { - return hcs.IsAccessIsDenied(getInnerError(err)) -} - -func getInnerError(err error) error { - switch pe := err.(type) { - case nil: - return nil - case *ContainerError: - err = pe.Err - case *ProcessError: - err = pe.Err - } - return err -} - -func convertSystemError(err error, c *container) error { - if serr, ok := err.(*hcs.SystemError); ok { - return &ContainerError{Container: c, Operation: serr.Op, Err: serr.Err, Events: serr.Events} - } - return err -} - -func convertProcessError(err error, p *process) error { - if perr, ok := err.(*hcs.ProcessError); ok { - return &ProcessError{Process: p, Operation: perr.Op, Err: perr.Err, Events: perr.Events} - } - return err -} diff --git a/vendor/github.com/Microsoft/hcsshim/functional_tests.ps1 b/vendor/github.com/Microsoft/hcsshim/functional_tests.ps1 deleted file mode 100644 index ce6edbcf..00000000 --- a/vendor/github.com/Microsoft/hcsshim/functional_tests.ps1 +++ /dev/null @@ -1,12 +0,0 @@ -# Requirements so far: -# dockerd running -# - image microsoft/nanoserver (matching host base image) docker load -i c:\baseimages\nanoserver.tar -# - image alpine (linux) docker pull --platform=linux alpine - - -# TODO: Add this a parameter for debugging. ie "functional-tests -debug=$true" -#$env:HCSSHIM_FUNCTIONAL_TESTS_DEBUG="yes please" - -#pushd uvm -go test -v -tags "functional uvmcreate uvmscratch uvmscsi uvmvpmem uvmvsmb uvmp9" ./... -#popd \ No newline at end of file diff --git a/vendor/github.com/Microsoft/hcsshim/hcsshim.go b/vendor/github.com/Microsoft/hcsshim/hcsshim.go deleted file mode 100644 index ceb3ac85..00000000 --- a/vendor/github.com/Microsoft/hcsshim/hcsshim.go +++ /dev/null @@ -1,28 +0,0 @@ -// Shim for the Host Compute Service (HCS) to manage Windows Server -// containers and Hyper-V containers. - -package hcsshim - -import ( - "syscall" - - "github.com/Microsoft/hcsshim/internal/hcserror" -) - -//go:generate go run mksyscall_windows.go -output zsyscall_windows.go hcsshim.go - -//sys SetCurrentThreadCompartmentId(compartmentId uint32) (hr error) = iphlpapi.SetCurrentThreadCompartmentId - -const ( - // Specific user-visible exit codes - WaitErrExecFailed = 32767 - - ERROR_GEN_FAILURE = hcserror.ERROR_GEN_FAILURE - ERROR_SHUTDOWN_IN_PROGRESS = syscall.Errno(1115) - WSAEINVAL = syscall.Errno(10022) - - // Timeout on wait calls - TimeoutInfinite = 0xFFFFFFFF -) - -type HcsError = hcserror.HcsError diff --git a/vendor/github.com/Microsoft/hcsshim/hnsendpoint.go b/vendor/github.com/Microsoft/hcsshim/hnsendpoint.go deleted file mode 100644 index 9e005944..00000000 --- a/vendor/github.com/Microsoft/hcsshim/hnsendpoint.go +++ /dev/null @@ -1,118 +0,0 @@ -package hcsshim - -import ( - "github.com/Microsoft/hcsshim/internal/hns" -) - -// HNSEndpoint represents a network endpoint in HNS -type HNSEndpoint = hns.HNSEndpoint - -// HNSEndpointStats represent the stats for an networkendpoint in HNS -type HNSEndpointStats = hns.EndpointStats - -// Namespace represents a Compartment. -type Namespace = hns.Namespace - -//SystemType represents the type of the system on which actions are done -type SystemType string - -// SystemType const -const ( - ContainerType SystemType = "Container" - VirtualMachineType SystemType = "VirtualMachine" - HostType SystemType = "Host" -) - -// EndpointAttachDetachRequest is the structure used to send request to the container to modify the system -// Supported resource types are Network and Request Types are Add/Remove -type EndpointAttachDetachRequest = hns.EndpointAttachDetachRequest - -// EndpointResquestResponse is object to get the endpoint request response -type EndpointResquestResponse = hns.EndpointResquestResponse - -// HNSEndpointRequest makes a HNS call to modify/query a network endpoint -func HNSEndpointRequest(method, path, request string) (*HNSEndpoint, error) { - return hns.HNSEndpointRequest(method, path, request) -} - -// HNSListEndpointRequest makes a HNS call to query the list of available endpoints -func HNSListEndpointRequest() ([]HNSEndpoint, error) { - return hns.HNSListEndpointRequest() -} - -// HotAttachEndpoint makes a HCS Call to attach the endpoint to the container -func HotAttachEndpoint(containerID string, endpointID string) error { - endpoint, err := GetHNSEndpointByID(endpointID) - if err != nil { - return err - } - isAttached, err := endpoint.IsAttached(containerID) - if isAttached { - return err - } - return modifyNetworkEndpoint(containerID, endpointID, Add) -} - -// HotDetachEndpoint makes a HCS Call to detach the endpoint from the container -func HotDetachEndpoint(containerID string, endpointID string) error { - endpoint, err := GetHNSEndpointByID(endpointID) - if err != nil { - return err - } - isAttached, err := endpoint.IsAttached(containerID) - if !isAttached { - return err - } - return modifyNetworkEndpoint(containerID, endpointID, Remove) -} - -// ModifyContainer corresponding to the container id, by sending a request -func modifyContainer(id string, request *ResourceModificationRequestResponse) error { - container, err := OpenContainer(id) - if err != nil { - if IsNotExist(err) { - return ErrComputeSystemDoesNotExist - } - return getInnerError(err) - } - defer container.Close() - err = container.Modify(request) - if err != nil { - if IsNotSupported(err) { - return ErrPlatformNotSupported - } - return getInnerError(err) - } - - return nil -} - -func modifyNetworkEndpoint(containerID string, endpointID string, request RequestType) error { - requestMessage := &ResourceModificationRequestResponse{ - Resource: Network, - Request: request, - Data: endpointID, - } - err := modifyContainer(containerID, requestMessage) - - if err != nil { - return err - } - - return nil -} - -// GetHNSEndpointByID get the Endpoint by ID -func GetHNSEndpointByID(endpointID string) (*HNSEndpoint, error) { - return hns.GetHNSEndpointByID(endpointID) -} - -// GetHNSEndpointByName gets the endpoint filtered by Name -func GetHNSEndpointByName(endpointName string) (*HNSEndpoint, error) { - return hns.GetHNSEndpointByName(endpointName) -} - -// GetHNSEndpointStats gets the endpoint stats by ID -func GetHNSEndpointStats(endpointName string) (*HNSEndpointStats, error) { - return hns.GetHNSEndpointStats(endpointName) -} diff --git a/vendor/github.com/Microsoft/hcsshim/hnsglobals.go b/vendor/github.com/Microsoft/hcsshim/hnsglobals.go deleted file mode 100644 index 2b538190..00000000 --- a/vendor/github.com/Microsoft/hcsshim/hnsglobals.go +++ /dev/null @@ -1,16 +0,0 @@ -package hcsshim - -import ( - "github.com/Microsoft/hcsshim/internal/hns" -) - -type HNSGlobals = hns.HNSGlobals -type HNSVersion = hns.HNSVersion - -var ( - HNSVersion1803 = hns.HNSVersion1803 -) - -func GetHNSGlobals() (*HNSGlobals, error) { - return hns.GetHNSGlobals() -} diff --git a/vendor/github.com/Microsoft/hcsshim/hnsnetwork.go b/vendor/github.com/Microsoft/hcsshim/hnsnetwork.go deleted file mode 100644 index f775fa1d..00000000 --- a/vendor/github.com/Microsoft/hcsshim/hnsnetwork.go +++ /dev/null @@ -1,36 +0,0 @@ -package hcsshim - -import ( - "github.com/Microsoft/hcsshim/internal/hns" -) - -// Subnet is assoicated with a network and represents a list -// of subnets available to the network -type Subnet = hns.Subnet - -// MacPool is assoicated with a network and represents a list -// of macaddresses available to the network -type MacPool = hns.MacPool - -// HNSNetwork represents a network in HNS -type HNSNetwork = hns.HNSNetwork - -// HNSNetworkRequest makes a call into HNS to update/query a single network -func HNSNetworkRequest(method, path, request string) (*HNSNetwork, error) { - return hns.HNSNetworkRequest(method, path, request) -} - -// HNSListNetworkRequest makes a HNS call to query the list of available networks -func HNSListNetworkRequest(method, path, request string) ([]HNSNetwork, error) { - return hns.HNSListNetworkRequest(method, path, request) -} - -// GetHNSNetworkByID -func GetHNSNetworkByID(networkID string) (*HNSNetwork, error) { - return hns.GetHNSNetworkByID(networkID) -} - -// GetHNSNetworkName filtered by Name -func GetHNSNetworkByName(networkName string) (*HNSNetwork, error) { - return hns.GetHNSNetworkByName(networkName) -} diff --git a/vendor/github.com/Microsoft/hcsshim/hnspolicy.go b/vendor/github.com/Microsoft/hcsshim/hnspolicy.go deleted file mode 100644 index 00ab2636..00000000 --- a/vendor/github.com/Microsoft/hcsshim/hnspolicy.go +++ /dev/null @@ -1,60 +0,0 @@ -package hcsshim - -import ( - "github.com/Microsoft/hcsshim/internal/hns" -) - -// Type of Request Support in ModifySystem -type PolicyType = hns.PolicyType - -// RequestType const -const ( - Nat = hns.Nat - ACL = hns.ACL - PA = hns.PA - VLAN = hns.VLAN - VSID = hns.VSID - VNet = hns.VNet - L2Driver = hns.L2Driver - Isolation = hns.Isolation - QOS = hns.QOS - OutboundNat = hns.OutboundNat - ExternalLoadBalancer = hns.ExternalLoadBalancer - Route = hns.Route - Proxy = hns.Proxy -) - -type ProxyPolicy = hns.ProxyPolicy - -type NatPolicy = hns.NatPolicy - -type QosPolicy = hns.QosPolicy - -type IsolationPolicy = hns.IsolationPolicy - -type VlanPolicy = hns.VlanPolicy - -type VsidPolicy = hns.VsidPolicy - -type PaPolicy = hns.PaPolicy - -type OutboundNatPolicy = hns.OutboundNatPolicy - -type ActionType = hns.ActionType -type DirectionType = hns.DirectionType -type RuleType = hns.RuleType - -const ( - Allow = hns.Allow - Block = hns.Block - - In = hns.In - Out = hns.Out - - Host = hns.Host - Switch = hns.Switch -) - -type ACLPolicy = hns.ACLPolicy - -type Policy = hns.Policy diff --git a/vendor/github.com/Microsoft/hcsshim/hnspolicylist.go b/vendor/github.com/Microsoft/hcsshim/hnspolicylist.go deleted file mode 100644 index 55aaa4a5..00000000 --- a/vendor/github.com/Microsoft/hcsshim/hnspolicylist.go +++ /dev/null @@ -1,47 +0,0 @@ -package hcsshim - -import ( - "github.com/Microsoft/hcsshim/internal/hns" -) - -// RoutePolicy is a structure defining schema for Route based Policy -type RoutePolicy = hns.RoutePolicy - -// ELBPolicy is a structure defining schema for ELB LoadBalancing based Policy -type ELBPolicy = hns.ELBPolicy - -// LBPolicy is a structure defining schema for LoadBalancing based Policy -type LBPolicy = hns.LBPolicy - -// PolicyList is a structure defining schema for Policy list request -type PolicyList = hns.PolicyList - -// HNSPolicyListRequest makes a call into HNS to update/query a single network -func HNSPolicyListRequest(method, path, request string) (*PolicyList, error) { - return hns.HNSPolicyListRequest(method, path, request) -} - -// HNSListPolicyListRequest gets all the policy list -func HNSListPolicyListRequest() ([]PolicyList, error) { - return hns.HNSListPolicyListRequest() -} - -// PolicyListRequest makes a HNS call to modify/query a network policy list -func PolicyListRequest(method, path, request string) (*PolicyList, error) { - return hns.PolicyListRequest(method, path, request) -} - -// GetPolicyListByID get the policy list by ID -func GetPolicyListByID(policyListID string) (*PolicyList, error) { - return hns.GetPolicyListByID(policyListID) -} - -// AddLoadBalancer policy list for the specified endpoints -func AddLoadBalancer(endpoints []HNSEndpoint, isILB bool, sourceVIP, vip string, protocol uint16, internalPort uint16, externalPort uint16) (*PolicyList, error) { - return hns.AddLoadBalancer(endpoints, isILB, sourceVIP, vip, protocol, internalPort, externalPort) -} - -// AddRoute adds route policy list for the specified endpoints -func AddRoute(endpoints []HNSEndpoint, destinationPrefix string, nextHop string, encapEnabled bool) (*PolicyList, error) { - return hns.AddRoute(endpoints, destinationPrefix, nextHop, encapEnabled) -} diff --git a/vendor/github.com/Microsoft/hcsshim/hnssupport.go b/vendor/github.com/Microsoft/hcsshim/hnssupport.go deleted file mode 100644 index 69405244..00000000 --- a/vendor/github.com/Microsoft/hcsshim/hnssupport.go +++ /dev/null @@ -1,13 +0,0 @@ -package hcsshim - -import ( - "github.com/Microsoft/hcsshim/internal/hns" -) - -type HNSSupportedFeatures = hns.HNSSupportedFeatures - -type HNSAclFeatures = hns.HNSAclFeatures - -func GetHNSSupportedFeatures() HNSSupportedFeatures { - return hns.GetHNSSupportedFeatures() -} diff --git a/vendor/github.com/Microsoft/hcsshim/interface.go b/vendor/github.com/Microsoft/hcsshim/interface.go deleted file mode 100644 index 300eb599..00000000 --- a/vendor/github.com/Microsoft/hcsshim/interface.go +++ /dev/null @@ -1,114 +0,0 @@ -package hcsshim - -import ( - "io" - "time" - - "github.com/Microsoft/hcsshim/internal/hcs/schema1" -) - -// ProcessConfig is used as both the input of Container.CreateProcess -// and to convert the parameters to JSON for passing onto the HCS -type ProcessConfig = schema1.ProcessConfig - -type Layer = schema1.Layer -type MappedDir = schema1.MappedDir -type MappedPipe = schema1.MappedPipe -type HvRuntime = schema1.HvRuntime -type MappedVirtualDisk = schema1.MappedVirtualDisk - -// AssignedDevice represents a device that has been directly assigned to a container -// -// NOTE: Support added in RS5 -type AssignedDevice = schema1.AssignedDevice - -// ContainerConfig is used as both the input of CreateContainer -// and to convert the parameters to JSON for passing onto the HCS -type ContainerConfig = schema1.ContainerConfig - -type ComputeSystemQuery = schema1.ComputeSystemQuery - -// Container represents a created (but not necessarily running) container. -type Container interface { - // Start synchronously starts the container. - Start() error - - // Shutdown requests a container shutdown, but it may not actually be shutdown until Wait() succeeds. - Shutdown() error - - // Terminate requests a container terminate, but it may not actually be terminated until Wait() succeeds. - Terminate() error - - // Waits synchronously waits for the container to shutdown or terminate. - Wait() error - - // WaitTimeout synchronously waits for the container to terminate or the duration to elapse. It - // returns false if timeout occurs. - WaitTimeout(time.Duration) error - - // Pause pauses the execution of a container. - Pause() error - - // Resume resumes the execution of a container. - Resume() error - - // HasPendingUpdates returns true if the container has updates pending to install. - HasPendingUpdates() (bool, error) - - // Statistics returns statistics for a container. - Statistics() (Statistics, error) - - // ProcessList returns details for the processes in a container. - ProcessList() ([]ProcessListItem, error) - - // MappedVirtualDisks returns virtual disks mapped to a utility VM, indexed by controller - MappedVirtualDisks() (map[int]MappedVirtualDiskController, error) - - // CreateProcess launches a new process within the container. - CreateProcess(c *ProcessConfig) (Process, error) - - // OpenProcess gets an interface to an existing process within the container. - OpenProcess(pid int) (Process, error) - - // Close cleans up any state associated with the container but does not terminate or wait for it. - Close() error - - // Modify the System - Modify(config *ResourceModificationRequestResponse) error -} - -// Process represents a running or exited process. -type Process interface { - // Pid returns the process ID of the process within the container. - Pid() int - - // Kill signals the process to terminate but does not wait for it to finish terminating. - Kill() error - - // Wait waits for the process to exit. - Wait() error - - // WaitTimeout waits for the process to exit or the duration to elapse. It returns - // false if timeout occurs. - WaitTimeout(time.Duration) error - - // ExitCode returns the exit code of the process. The process must have - // already terminated. - ExitCode() (int, error) - - // ResizeConsole resizes the console of the process. - ResizeConsole(width, height uint16) error - - // Stdio returns the stdin, stdout, and stderr pipes, respectively. Closing - // these pipes does not close the underlying pipes; it should be possible to - // call this multiple times to get multiple interfaces. - Stdio() (io.WriteCloser, io.ReadCloser, io.ReadCloser, error) - - // CloseStdin closes the write side of the stdin pipe so that the process is - // notified on the read side that there is no more data in stdin. - CloseStdin() error - - // Close cleans up any state associated with the process but does not kill - // or wait on it. - Close() error -} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/cow/cow.go b/vendor/github.com/Microsoft/hcsshim/internal/cow/cow.go deleted file mode 100644 index f46af33b..00000000 --- a/vendor/github.com/Microsoft/hcsshim/internal/cow/cow.go +++ /dev/null @@ -1,97 +0,0 @@ -package cow - -import ( - "context" - "io" - - "github.com/Microsoft/hcsshim/internal/hcs/schema1" - hcsschema "github.com/Microsoft/hcsshim/internal/hcs/schema2" -) - -// Process is the interface for an OS process running in a container or utility VM. -type Process interface { - // Close releases resources associated with the process and closes the - // writer and readers returned by Stdio. Depending on the implementation, - // this may also terminate the process. - Close() error - // CloseStdin causes the process's stdin handle to receive EOF/EPIPE/whatever - // is appropriate to indicate that no more data is available. - CloseStdin(ctx context.Context) error - // CloseStdout closes the stdout connection to the process. It is used to indicate - // that we are done receiving output on the shim side. - CloseStdout(ctx context.Context) error - // CloseStderr closes the stderr connection to the process. It is used to indicate - // that we are done receiving output on the shim side. - CloseStderr(ctx context.Context) error - // Pid returns the process ID. - Pid() int - // Stdio returns the stdio streams for a process. These may be nil if a stream - // was not requested during CreateProcess. - Stdio() (_ io.Writer, _ io.Reader, _ io.Reader) - // ResizeConsole resizes the virtual terminal associated with the process. - ResizeConsole(ctx context.Context, width, height uint16) error - // Kill sends a SIGKILL or equivalent signal to the process and returns whether - // the signal was delivered. It does not wait for the process to terminate. - Kill(ctx context.Context) (bool, error) - // Signal sends a signal to the process and returns whether the signal was - // delivered. The input is OS specific (either - // guestrequest.SignalProcessOptionsWCOW or - // guestrequest.SignalProcessOptionsLCOW). It does not wait for the process - // to terminate. - Signal(ctx context.Context, options interface{}) (bool, error) - // Wait waits for the process to complete, or for a connection to the process to be - // terminated by some error condition (including calling Close). - Wait() error - // ExitCode returns the exit code of the process. Returns an error if the process is - // not running. - ExitCode() (int, error) -} - -// ProcessHost is the interface for creating processes. -type ProcessHost interface { - // CreateProcess creates a process. The configuration is host specific - // (either hcsschema.ProcessParameters or lcow.ProcessParameters). - CreateProcess(ctx context.Context, config interface{}) (Process, error) - // OS returns the host's operating system, "linux" or "windows". - OS() string - // IsOCI specifies whether this is an OCI-compliant process host. If true, - // then the configuration passed to CreateProcess should have an OCI process - // spec (or nil if this is the initial process in an OCI container). - // Otherwise, it should have the HCS-specific process parameters. - IsOCI() bool -} - -// Container is the interface for container objects, either running on the host or -// in a utility VM. -type Container interface { - ProcessHost - // Close releases the resources associated with the container. Depending on - // the implementation, this may also terminate the container. - Close() error - // ID returns the container ID. - ID() string - // Properties returns the requested container properties targeting a V1 schema container. - Properties(ctx context.Context, types ...schema1.PropertyType) (*schema1.ContainerProperties, error) - // PropertiesV2 returns the requested container properties targeting a V2 schema container. - PropertiesV2(ctx context.Context, types ...hcsschema.PropertyType) (*hcsschema.Properties, error) - // Start starts a container. - Start(ctx context.Context) error - // Shutdown sends a shutdown request to the container (but does not wait for - // the shutdown to complete). - Shutdown(ctx context.Context) error - // Terminate sends a terminate request to the container (but does not wait - // for the terminate to complete). - Terminate(ctx context.Context) error - // Wait waits for the container to terminate, or for the connection to the - // container to be terminated by some error condition (including calling - // Close). - Wait() error - // WaitChannel returns the wait channel of the container - WaitChannel() <-chan struct{} - // WaitError returns the container termination error. - // This function should only be called after the channel in WaitChannel() - // is closed. Otherwise it is not thread safe. - WaitError() error - // Modify sends a request to modify container resources - Modify(ctx context.Context, config interface{}) error -} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/callback.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/callback.go deleted file mode 100644 index d13772b0..00000000 --- a/vendor/github.com/Microsoft/hcsshim/internal/hcs/callback.go +++ /dev/null @@ -1,161 +0,0 @@ -package hcs - -import ( - "fmt" - "sync" - "syscall" - - "github.com/Microsoft/hcsshim/internal/interop" - "github.com/Microsoft/hcsshim/internal/logfields" - "github.com/Microsoft/hcsshim/internal/vmcompute" - "github.com/sirupsen/logrus" -) - -var ( - nextCallback uintptr - callbackMap = map[uintptr]*notificationWatcherContext{} - callbackMapLock = sync.RWMutex{} - - notificationWatcherCallback = syscall.NewCallback(notificationWatcher) - - // Notifications for HCS_SYSTEM handles - hcsNotificationSystemExited hcsNotification = 0x00000001 - hcsNotificationSystemCreateCompleted hcsNotification = 0x00000002 - hcsNotificationSystemStartCompleted hcsNotification = 0x00000003 - hcsNotificationSystemPauseCompleted hcsNotification = 0x00000004 - hcsNotificationSystemResumeCompleted hcsNotification = 0x00000005 - hcsNotificationSystemCrashReport hcsNotification = 0x00000006 - hcsNotificationSystemSiloJobCreated hcsNotification = 0x00000007 - hcsNotificationSystemSaveCompleted hcsNotification = 0x00000008 - hcsNotificationSystemRdpEnhancedModeStateChanged hcsNotification = 0x00000009 - hcsNotificationSystemShutdownFailed hcsNotification = 0x0000000A - hcsNotificationSystemGetPropertiesCompleted hcsNotification = 0x0000000B - hcsNotificationSystemModifyCompleted hcsNotification = 0x0000000C - hcsNotificationSystemCrashInitiated hcsNotification = 0x0000000D - hcsNotificationSystemGuestConnectionClosed hcsNotification = 0x0000000E - - // Notifications for HCS_PROCESS handles - hcsNotificationProcessExited hcsNotification = 0x00010000 - - // Common notifications - hcsNotificationInvalid hcsNotification = 0x00000000 - hcsNotificationServiceDisconnect hcsNotification = 0x01000000 -) - -type hcsNotification uint32 - -func (hn hcsNotification) String() string { - switch hn { - case hcsNotificationSystemExited: - return "SystemExited" - case hcsNotificationSystemCreateCompleted: - return "SystemCreateCompleted" - case hcsNotificationSystemStartCompleted: - return "SystemStartCompleted" - case hcsNotificationSystemPauseCompleted: - return "SystemPauseCompleted" - case hcsNotificationSystemResumeCompleted: - return "SystemResumeCompleted" - case hcsNotificationSystemCrashReport: - return "SystemCrashReport" - case hcsNotificationSystemSiloJobCreated: - return "SystemSiloJobCreated" - case hcsNotificationSystemSaveCompleted: - return "SystemSaveCompleted" - case hcsNotificationSystemRdpEnhancedModeStateChanged: - return "SystemRdpEnhancedModeStateChanged" - case hcsNotificationSystemShutdownFailed: - return "SystemShutdownFailed" - case hcsNotificationSystemGetPropertiesCompleted: - return "SystemGetPropertiesCompleted" - case hcsNotificationSystemModifyCompleted: - return "SystemModifyCompleted" - case hcsNotificationSystemCrashInitiated: - return "SystemCrashInitiated" - case hcsNotificationSystemGuestConnectionClosed: - return "SystemGuestConnectionClosed" - case hcsNotificationProcessExited: - return "ProcessExited" - case hcsNotificationInvalid: - return "Invalid" - case hcsNotificationServiceDisconnect: - return "ServiceDisconnect" - default: - return fmt.Sprintf("Unknown: %d", hn) - } -} - -type notificationChannel chan error - -type notificationWatcherContext struct { - channels notificationChannels - handle vmcompute.HcsCallback - - systemID string - processID int -} - -type notificationChannels map[hcsNotification]notificationChannel - -func newSystemChannels() notificationChannels { - channels := make(notificationChannels) - for _, notif := range []hcsNotification{ - hcsNotificationServiceDisconnect, - hcsNotificationSystemExited, - hcsNotificationSystemCreateCompleted, - hcsNotificationSystemStartCompleted, - hcsNotificationSystemPauseCompleted, - hcsNotificationSystemResumeCompleted, - hcsNotificationSystemSaveCompleted, - } { - channels[notif] = make(notificationChannel, 1) - } - return channels -} - -func newProcessChannels() notificationChannels { - channels := make(notificationChannels) - for _, notif := range []hcsNotification{ - hcsNotificationServiceDisconnect, - hcsNotificationProcessExited, - } { - channels[notif] = make(notificationChannel, 1) - } - return channels -} - -func closeChannels(channels notificationChannels) { - for _, c := range channels { - close(c) - } -} - -func notificationWatcher(notificationType hcsNotification, callbackNumber uintptr, notificationStatus uintptr, notificationData *uint16) uintptr { - var result error - if int32(notificationStatus) < 0 { - result = interop.Win32FromHresult(notificationStatus) - } - - callbackMapLock.RLock() - context := callbackMap[callbackNumber] - callbackMapLock.RUnlock() - - if context == nil { - return 0 - } - - log := logrus.WithFields(logrus.Fields{ - "notification-type": notificationType.String(), - "system-id": context.systemID, - }) - if context.processID != 0 { - log.Data[logfields.ProcessID] = context.processID - } - log.Debug("HCS notification") - - if channel, ok := context.channels[notificationType]; ok { - channel <- result - } - - return 0 -} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/errors.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/errors.go deleted file mode 100644 index 295d4b84..00000000 --- a/vendor/github.com/Microsoft/hcsshim/internal/hcs/errors.go +++ /dev/null @@ -1,343 +0,0 @@ -package hcs - -import ( - "context" - "encoding/json" - "errors" - "fmt" - "net" - "syscall" - - "github.com/Microsoft/hcsshim/internal/log" -) - -var ( - // ErrComputeSystemDoesNotExist is an error encountered when the container being operated on no longer exists - ErrComputeSystemDoesNotExist = syscall.Errno(0xc037010e) - - // ErrElementNotFound is an error encountered when the object being referenced does not exist - ErrElementNotFound = syscall.Errno(0x490) - - // ErrElementNotFound is an error encountered when the object being referenced does not exist - ErrNotSupported = syscall.Errno(0x32) - - // ErrInvalidData is an error encountered when the request being sent to hcs is invalid/unsupported - // decimal -2147024883 / hex 0x8007000d - ErrInvalidData = syscall.Errno(0xd) - - // ErrHandleClose is an error encountered when the handle generating the notification being waited on has been closed - ErrHandleClose = errors.New("hcsshim: the handle generating this notification has been closed") - - // ErrAlreadyClosed is an error encountered when using a handle that has been closed by the Close method - ErrAlreadyClosed = errors.New("hcsshim: the handle has already been closed") - - // ErrInvalidNotificationType is an error encountered when an invalid notification type is used - ErrInvalidNotificationType = errors.New("hcsshim: invalid notification type") - - // ErrInvalidProcessState is an error encountered when the process is not in a valid state for the requested operation - ErrInvalidProcessState = errors.New("the process is in an invalid state for the attempted operation") - - // ErrTimeout is an error encountered when waiting on a notification times out - ErrTimeout = errors.New("hcsshim: timeout waiting for notification") - - // ErrUnexpectedContainerExit is the error encountered when a container exits while waiting for - // a different expected notification - ErrUnexpectedContainerExit = errors.New("unexpected container exit") - - // ErrUnexpectedProcessAbort is the error encountered when communication with the compute service - // is lost while waiting for a notification - ErrUnexpectedProcessAbort = errors.New("lost communication with compute service") - - // ErrUnexpectedValue is an error encountered when hcs returns an invalid value - ErrUnexpectedValue = errors.New("unexpected value returned from hcs") - - // ErrVmcomputeAlreadyStopped is an error encountered when a shutdown or terminate request is made on a stopped container - ErrVmcomputeAlreadyStopped = syscall.Errno(0xc0370110) - - // ErrVmcomputeOperationPending is an error encountered when the operation is being completed asynchronously - ErrVmcomputeOperationPending = syscall.Errno(0xC0370103) - - // ErrVmcomputeOperationInvalidState is an error encountered when the compute system is not in a valid state for the requested operation - ErrVmcomputeOperationInvalidState = syscall.Errno(0xc0370105) - - // ErrProcNotFound is an error encountered when a procedure look up fails. - ErrProcNotFound = syscall.Errno(0x7f) - - // ErrVmcomputeOperationAccessIsDenied is an error which can be encountered when enumerating compute systems in RS1/RS2 - // builds when the underlying silo might be in the process of terminating. HCS was fixed in RS3. - ErrVmcomputeOperationAccessIsDenied = syscall.Errno(0x5) - - // ErrVmcomputeInvalidJSON is an error encountered when the compute system does not support/understand the messages sent by management - ErrVmcomputeInvalidJSON = syscall.Errno(0xc037010d) - - // ErrVmcomputeUnknownMessage is an error encountered guest compute system doesn't support the message - ErrVmcomputeUnknownMessage = syscall.Errno(0xc037010b) - - // ErrVmcomputeUnexpectedExit is an error encountered when the compute system terminates unexpectedly - ErrVmcomputeUnexpectedExit = syscall.Errno(0xC0370106) - - // ErrNotSupported is an error encountered when hcs doesn't support the request - ErrPlatformNotSupported = errors.New("unsupported platform request") - - // ErrProcessAlreadyStopped is returned by hcs if the process we're trying to kill has already been stopped. - ErrProcessAlreadyStopped = syscall.Errno(0x8037011f) - - // ErrInvalidHandle is an error that can be encountrered when querying the properties of a compute system when the handle to that - // compute system has already been closed. - ErrInvalidHandle = syscall.Errno(0x6) -) - -type ErrorEvent struct { - Message string `json:"Message,omitempty"` // Fully formated error message - StackTrace string `json:"StackTrace,omitempty"` // Stack trace in string form - Provider string `json:"Provider,omitempty"` - EventID uint16 `json:"EventId,omitempty"` - Flags uint32 `json:"Flags,omitempty"` - Source string `json:"Source,omitempty"` - //Data []EventData `json:"Data,omitempty"` // Omit this as HCS doesn't encode this well. It's more confusing to include. It is however logged in debug mode (see processHcsResult function) -} - -type hcsResult struct { - Error int32 - ErrorMessage string - ErrorEvents []ErrorEvent `json:"ErrorEvents,omitempty"` -} - -func (ev *ErrorEvent) String() string { - evs := "[Event Detail: " + ev.Message - if ev.StackTrace != "" { - evs += " Stack Trace: " + ev.StackTrace - } - if ev.Provider != "" { - evs += " Provider: " + ev.Provider - } - if ev.EventID != 0 { - evs = fmt.Sprintf("%s EventID: %d", evs, ev.EventID) - } - if ev.Flags != 0 { - evs = fmt.Sprintf("%s flags: %d", evs, ev.Flags) - } - if ev.Source != "" { - evs += " Source: " + ev.Source - } - evs += "]" - return evs -} - -func processHcsResult(ctx context.Context, resultJSON string) []ErrorEvent { - if resultJSON != "" { - result := &hcsResult{} - if err := json.Unmarshal([]byte(resultJSON), result); err != nil { - log.G(ctx).WithError(err).Warning("Could not unmarshal HCS result") - return nil - } - return result.ErrorEvents - } - return nil -} - -type HcsError struct { - Op string - Err error - Events []ErrorEvent -} - -var _ net.Error = &HcsError{} - -func (e *HcsError) Error() string { - s := e.Op + ": " + e.Err.Error() - for _, ev := range e.Events { - s += "\n" + ev.String() - } - return s -} - -func (e *HcsError) Temporary() bool { - err, ok := e.Err.(net.Error) - return ok && err.Temporary() //nolint:staticcheck -} - -func (e *HcsError) Timeout() bool { - err, ok := e.Err.(net.Error) - return ok && err.Timeout() -} - -// ProcessError is an error encountered in HCS during an operation on a Process object -type ProcessError struct { - SystemID string - Pid int - Op string - Err error - Events []ErrorEvent -} - -var _ net.Error = &ProcessError{} - -// SystemError is an error encountered in HCS during an operation on a Container object -type SystemError struct { - ID string - Op string - Err error - Events []ErrorEvent -} - -var _ net.Error = &SystemError{} - -func (e *SystemError) Error() string { - s := e.Op + " " + e.ID + ": " + e.Err.Error() - for _, ev := range e.Events { - s += "\n" + ev.String() - } - return s -} - -func (e *SystemError) Temporary() bool { - err, ok := e.Err.(net.Error) - return ok && err.Temporary() //nolint:staticcheck -} - -func (e *SystemError) Timeout() bool { - err, ok := e.Err.(net.Error) - return ok && err.Timeout() -} - -func makeSystemError(system *System, op string, err error, events []ErrorEvent) error { - // Don't double wrap errors - if _, ok := err.(*SystemError); ok { - return err - } - return &SystemError{ - ID: system.ID(), - Op: op, - Err: err, - Events: events, - } -} - -func (e *ProcessError) Error() string { - s := fmt.Sprintf("%s %s:%d: %s", e.Op, e.SystemID, e.Pid, e.Err.Error()) - for _, ev := range e.Events { - s += "\n" + ev.String() - } - return s -} - -func (e *ProcessError) Temporary() bool { - err, ok := e.Err.(net.Error) - return ok && err.Temporary() //nolint:staticcheck -} - -func (e *ProcessError) Timeout() bool { - err, ok := e.Err.(net.Error) - return ok && err.Timeout() -} - -func makeProcessError(process *Process, op string, err error, events []ErrorEvent) error { - // Don't double wrap errors - if _, ok := err.(*ProcessError); ok { - return err - } - return &ProcessError{ - Pid: process.Pid(), - SystemID: process.SystemID(), - Op: op, - Err: err, - Events: events, - } -} - -// IsNotExist checks if an error is caused by the Container or Process not existing. -// Note: Currently, ErrElementNotFound can mean that a Process has either -// already exited, or does not exist. Both IsAlreadyStopped and IsNotExist -// will currently return true when the error is ErrElementNotFound. -func IsNotExist(err error) bool { - err = getInnerError(err) - return err == ErrComputeSystemDoesNotExist || - err == ErrElementNotFound -} - -// IsErrorInvalidHandle checks whether the error is the result of an operation carried -// out on a handle that is invalid/closed. This error popped up while trying to query -// stats on a container in the process of being stopped. -func IsErrorInvalidHandle(err error) bool { - err = getInnerError(err) - return err == ErrInvalidHandle -} - -// IsAlreadyClosed checks if an error is caused by the Container or Process having been -// already closed by a call to the Close() method. -func IsAlreadyClosed(err error) bool { - err = getInnerError(err) - return err == ErrAlreadyClosed -} - -// IsPending returns a boolean indicating whether the error is that -// the requested operation is being completed in the background. -func IsPending(err error) bool { - err = getInnerError(err) - return err == ErrVmcomputeOperationPending -} - -// IsTimeout returns a boolean indicating whether the error is caused by -// a timeout waiting for the operation to complete. -func IsTimeout(err error) bool { - if err, ok := err.(net.Error); ok && err.Timeout() { - return true - } - err = getInnerError(err) - return err == ErrTimeout -} - -// IsAlreadyStopped returns a boolean indicating whether the error is caused by -// a Container or Process being already stopped. -// Note: Currently, ErrElementNotFound can mean that a Process has either -// already exited, or does not exist. Both IsAlreadyStopped and IsNotExist -// will currently return true when the error is ErrElementNotFound. -func IsAlreadyStopped(err error) bool { - err = getInnerError(err) - return err == ErrVmcomputeAlreadyStopped || - err == ErrProcessAlreadyStopped || - err == ErrElementNotFound -} - -// IsNotSupported returns a boolean indicating whether the error is caused by -// unsupported platform requests -// Note: Currently Unsupported platform requests can be mean either -// ErrVmcomputeInvalidJSON, ErrInvalidData, ErrNotSupported or ErrVmcomputeUnknownMessage -// is thrown from the Platform -func IsNotSupported(err error) bool { - err = getInnerError(err) - // If Platform doesn't recognize or support the request sent, below errors are seen - return err == ErrVmcomputeInvalidJSON || - err == ErrInvalidData || - err == ErrNotSupported || - err == ErrVmcomputeUnknownMessage -} - -// IsOperationInvalidState returns true when err is caused by -// `ErrVmcomputeOperationInvalidState`. -func IsOperationInvalidState(err error) bool { - err = getInnerError(err) - return err == ErrVmcomputeOperationInvalidState -} - -// IsAccessIsDenied returns true when err is caused by -// `ErrVmcomputeOperationAccessIsDenied`. -func IsAccessIsDenied(err error) bool { - err = getInnerError(err) - return err == ErrVmcomputeOperationAccessIsDenied -} - -func getInnerError(err error) error { - switch pe := err.(type) { - case nil: - return nil - case *HcsError: - err = pe.Err - case *SystemError: - err = pe.Err - case *ProcessError: - err = pe.Err - } - return err -} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/process.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/process.go deleted file mode 100644 index 78490d6c..00000000 --- a/vendor/github.com/Microsoft/hcsshim/internal/hcs/process.go +++ /dev/null @@ -1,589 +0,0 @@ -package hcs - -import ( - "context" - "encoding/json" - "errors" - "io" - "os" - "sync" - "syscall" - "time" - - "github.com/Microsoft/hcsshim/internal/log" - "github.com/Microsoft/hcsshim/internal/oc" - "github.com/Microsoft/hcsshim/internal/vmcompute" - "go.opencensus.io/trace" -) - -// ContainerError is an error encountered in HCS -type Process struct { - handleLock sync.RWMutex - handle vmcompute.HcsProcess - processID int - system *System - hasCachedStdio bool - stdioLock sync.Mutex - stdin io.WriteCloser - stdout io.ReadCloser - stderr io.ReadCloser - callbackNumber uintptr - killSignalDelivered bool - - closedWaitOnce sync.Once - waitBlock chan struct{} - exitCode int - waitError error -} - -func newProcess(process vmcompute.HcsProcess, processID int, computeSystem *System) *Process { - return &Process{ - handle: process, - processID: processID, - system: computeSystem, - waitBlock: make(chan struct{}), - } -} - -type processModifyRequest struct { - Operation string - ConsoleSize *consoleSize `json:",omitempty"` - CloseHandle *closeHandle `json:",omitempty"` -} - -type consoleSize struct { - Height uint16 - Width uint16 -} - -type closeHandle struct { - Handle string -} - -type processStatus struct { - ProcessID uint32 - Exited bool - ExitCode uint32 - LastWaitResult int32 -} - -const stdIn string = "StdIn" - -const ( - modifyConsoleSize string = "ConsoleSize" - modifyCloseHandle string = "CloseHandle" -) - -// Pid returns the process ID of the process within the container. -func (process *Process) Pid() int { - return process.processID -} - -// SystemID returns the ID of the process's compute system. -func (process *Process) SystemID() string { - return process.system.ID() -} - -func (process *Process) processSignalResult(ctx context.Context, err error) (bool, error) { - switch err { - case nil: - return true, nil - case ErrVmcomputeOperationInvalidState, ErrComputeSystemDoesNotExist, ErrElementNotFound: - select { - case <-process.waitBlock: - // The process exit notification has already arrived. - default: - // The process should be gone, but we have not received the notification. - // After a second, force unblock the process wait to work around a possible - // deadlock in the HCS. - go func() { - time.Sleep(time.Second) - process.closedWaitOnce.Do(func() { - log.G(ctx).WithError(err).Warn("force unblocking process waits") - process.exitCode = -1 - process.waitError = err - close(process.waitBlock) - }) - }() - } - return false, nil - default: - return false, err - } -} - -// Signal signals the process with `options`. -// -// For LCOW `guestrequest.SignalProcessOptionsLCOW`. -// -// For WCOW `guestrequest.SignalProcessOptionsWCOW`. -func (process *Process) Signal(ctx context.Context, options interface{}) (bool, error) { - process.handleLock.RLock() - defer process.handleLock.RUnlock() - - operation := "hcs::Process::Signal" - - if process.handle == 0 { - return false, makeProcessError(process, operation, ErrAlreadyClosed, nil) - } - - optionsb, err := json.Marshal(options) - if err != nil { - return false, err - } - - resultJSON, err := vmcompute.HcsSignalProcess(ctx, process.handle, string(optionsb)) - events := processHcsResult(ctx, resultJSON) - delivered, err := process.processSignalResult(ctx, err) - if err != nil { - err = makeProcessError(process, operation, err, events) - } - return delivered, err -} - -// Kill signals the process to terminate but does not wait for it to finish terminating. -func (process *Process) Kill(ctx context.Context) (bool, error) { - process.handleLock.RLock() - defer process.handleLock.RUnlock() - - operation := "hcs::Process::Kill" - - if process.handle == 0 { - return false, makeProcessError(process, operation, ErrAlreadyClosed, nil) - } - - if process.killSignalDelivered { - // A kill signal has already been sent to this process. Sending a second - // one offers no real benefit, as processes cannot stop themselves from - // being terminated, once a TerminateProcess has been issued. Sending a - // second kill may result in a number of errors (two of which detailed bellow) - // and which we can avoid handling. - return true, nil - } - - // HCS serializes the signals sent to a target pid per compute system handle. - // To avoid SIGKILL being serialized behind other signals, we open a new compute - // system handle to deliver the kill signal. - // If the calls to opening a new compute system handle fail, we forcefully - // terminate the container itself so that no container is left behind - hcsSystem, err := OpenComputeSystem(ctx, process.system.id) - if err != nil { - // log error and force termination of container - log.G(ctx).WithField("err", err).Error("OpenComputeSystem() call failed") - err = process.system.Terminate(ctx) - // if the Terminate() call itself ever failed, log and return error - if err != nil { - log.G(ctx).WithField("err", err).Error("Terminate() call failed") - return false, err - } - process.system.Close() - return true, nil - } - defer hcsSystem.Close() - - newProcessHandle, err := hcsSystem.OpenProcess(ctx, process.Pid()) - if err != nil { - // Return true only if the target process has either already - // exited, or does not exist. - if IsAlreadyStopped(err) { - return true, nil - } else { - return false, err - } - } - defer newProcessHandle.Close() - - resultJSON, err := vmcompute.HcsTerminateProcess(ctx, newProcessHandle.handle) - if err != nil { - // We still need to check these two cases, as processes may still be killed by an - // external actor (human operator, OOM, random script etc). - if errors.Is(err, os.ErrPermission) || IsAlreadyStopped(err) { - // There are two cases where it should be safe to ignore an error returned - // by HcsTerminateProcess. The first one is cause by the fact that - // HcsTerminateProcess ends up calling TerminateProcess in the context - // of a container. According to the TerminateProcess documentation: - // https://docs.microsoft.com/en-us/windows/win32/api/processthreadsapi/nf-processthreadsapi-terminateprocess#remarks - // After a process has terminated, call to TerminateProcess with open - // handles to the process fails with ERROR_ACCESS_DENIED (5) error code. - // It's safe to ignore this error here. HCS should always have permissions - // to kill processes inside any container. So an ERROR_ACCESS_DENIED - // is unlikely to be anything else than what the ending remarks in the - // documentation states. - // - // The second case is generated by hcs itself, if for any reason HcsTerminateProcess - // is called twice in a very short amount of time. In such cases, hcs may return - // HCS_E_PROCESS_ALREADY_STOPPED. - return true, nil - } - } - events := processHcsResult(ctx, resultJSON) - delivered, err := newProcessHandle.processSignalResult(ctx, err) - if err != nil { - err = makeProcessError(newProcessHandle, operation, err, events) - } - - process.killSignalDelivered = delivered - return delivered, err -} - -// waitBackground waits for the process exit notification. Once received sets -// `process.waitError` (if any) and unblocks all `Wait` calls. -// -// This MUST be called exactly once per `process.handle` but `Wait` is safe to -// call multiple times. -func (process *Process) waitBackground() { - operation := "hcs::Process::waitBackground" - ctx, span := trace.StartSpan(context.Background(), operation) - defer span.End() - span.AddAttributes( - trace.StringAttribute("cid", process.SystemID()), - trace.Int64Attribute("pid", int64(process.processID))) - - var ( - err error - exitCode = -1 - propertiesJSON string - resultJSON string - ) - - err = waitForNotification(ctx, process.callbackNumber, hcsNotificationProcessExited, nil) - if err != nil { - err = makeProcessError(process, operation, err, nil) - log.G(ctx).WithError(err).Error("failed wait") - } else { - process.handleLock.RLock() - defer process.handleLock.RUnlock() - - // Make sure we didnt race with Close() here - if process.handle != 0 { - propertiesJSON, resultJSON, err = vmcompute.HcsGetProcessProperties(ctx, process.handle) - events := processHcsResult(ctx, resultJSON) - if err != nil { - err = makeProcessError(process, operation, err, events) //nolint:ineffassign - } else { - properties := &processStatus{} - err = json.Unmarshal([]byte(propertiesJSON), properties) - if err != nil { - err = makeProcessError(process, operation, err, nil) //nolint:ineffassign - } else { - if properties.LastWaitResult != 0 { - log.G(ctx).WithField("wait-result", properties.LastWaitResult).Warning("non-zero last wait result") - } else { - exitCode = int(properties.ExitCode) - } - } - } - } - } - log.G(ctx).WithField("exitCode", exitCode).Debug("process exited") - - process.closedWaitOnce.Do(func() { - process.exitCode = exitCode - process.waitError = err - close(process.waitBlock) - }) - oc.SetSpanStatus(span, err) -} - -// Wait waits for the process to exit. If the process has already exited returns -// the pervious error (if any). -func (process *Process) Wait() error { - <-process.waitBlock - return process.waitError -} - -// ResizeConsole resizes the console of the process. -func (process *Process) ResizeConsole(ctx context.Context, width, height uint16) error { - process.handleLock.RLock() - defer process.handleLock.RUnlock() - - operation := "hcs::Process::ResizeConsole" - - if process.handle == 0 { - return makeProcessError(process, operation, ErrAlreadyClosed, nil) - } - - modifyRequest := processModifyRequest{ - Operation: modifyConsoleSize, - ConsoleSize: &consoleSize{ - Height: height, - Width: width, - }, - } - - modifyRequestb, err := json.Marshal(modifyRequest) - if err != nil { - return err - } - - resultJSON, err := vmcompute.HcsModifyProcess(ctx, process.handle, string(modifyRequestb)) - events := processHcsResult(ctx, resultJSON) - if err != nil { - return makeProcessError(process, operation, err, events) - } - - return nil -} - -// ExitCode returns the exit code of the process. The process must have -// already terminated. -func (process *Process) ExitCode() (int, error) { - select { - case <-process.waitBlock: - if process.waitError != nil { - return -1, process.waitError - } - return process.exitCode, nil - default: - return -1, makeProcessError(process, "hcs::Process::ExitCode", ErrInvalidProcessState, nil) - } -} - -// StdioLegacy returns the stdin, stdout, and stderr pipes, respectively. Closing -// these pipes does not close the underlying pipes. Once returned, these pipes -// are the responsibility of the caller to close. -func (process *Process) StdioLegacy() (_ io.WriteCloser, _ io.ReadCloser, _ io.ReadCloser, err error) { - operation := "hcs::Process::StdioLegacy" - ctx, span := trace.StartSpan(context.Background(), operation) - defer span.End() - defer func() { oc.SetSpanStatus(span, err) }() - span.AddAttributes( - trace.StringAttribute("cid", process.SystemID()), - trace.Int64Attribute("pid", int64(process.processID))) - - process.handleLock.RLock() - defer process.handleLock.RUnlock() - - if process.handle == 0 { - return nil, nil, nil, makeProcessError(process, operation, ErrAlreadyClosed, nil) - } - - process.stdioLock.Lock() - defer process.stdioLock.Unlock() - if process.hasCachedStdio { - stdin, stdout, stderr := process.stdin, process.stdout, process.stderr - process.stdin, process.stdout, process.stderr = nil, nil, nil - process.hasCachedStdio = false - return stdin, stdout, stderr, nil - } - - processInfo, resultJSON, err := vmcompute.HcsGetProcessInfo(ctx, process.handle) - events := processHcsResult(ctx, resultJSON) - if err != nil { - return nil, nil, nil, makeProcessError(process, operation, err, events) - } - - pipes, err := makeOpenFiles([]syscall.Handle{processInfo.StdInput, processInfo.StdOutput, processInfo.StdError}) - if err != nil { - return nil, nil, nil, makeProcessError(process, operation, err, nil) - } - - return pipes[0], pipes[1], pipes[2], nil -} - -// Stdio returns the stdin, stdout, and stderr pipes, respectively. -// To close them, close the process handle. -func (process *Process) Stdio() (stdin io.Writer, stdout, stderr io.Reader) { - process.stdioLock.Lock() - defer process.stdioLock.Unlock() - return process.stdin, process.stdout, process.stderr -} - -// CloseStdin closes the write side of the stdin pipe so that the process is -// notified on the read side that there is no more data in stdin. -func (process *Process) CloseStdin(ctx context.Context) error { - process.handleLock.RLock() - defer process.handleLock.RUnlock() - - operation := "hcs::Process::CloseStdin" - - if process.handle == 0 { - return makeProcessError(process, operation, ErrAlreadyClosed, nil) - } - - modifyRequest := processModifyRequest{ - Operation: modifyCloseHandle, - CloseHandle: &closeHandle{ - Handle: stdIn, - }, - } - - modifyRequestb, err := json.Marshal(modifyRequest) - if err != nil { - return err - } - - resultJSON, err := vmcompute.HcsModifyProcess(ctx, process.handle, string(modifyRequestb)) - events := processHcsResult(ctx, resultJSON) - if err != nil { - return makeProcessError(process, operation, err, events) - } - - process.stdioLock.Lock() - if process.stdin != nil { - process.stdin.Close() - process.stdin = nil - } - process.stdioLock.Unlock() - - return nil -} - -func (process *Process) CloseStdout(ctx context.Context) (err error) { - ctx, span := trace.StartSpan(ctx, "hcs::Process::CloseStdout") //nolint:ineffassign,staticcheck - defer span.End() - defer func() { oc.SetSpanStatus(span, err) }() - span.AddAttributes( - trace.StringAttribute("cid", process.SystemID()), - trace.Int64Attribute("pid", int64(process.processID))) - - process.handleLock.Lock() - defer process.handleLock.Unlock() - - if process.handle == 0 { - return nil - } - - process.stdioLock.Lock() - defer process.stdioLock.Unlock() - if process.stdout != nil { - process.stdout.Close() - process.stdout = nil - } - return nil -} - -func (process *Process) CloseStderr(ctx context.Context) (err error) { - ctx, span := trace.StartSpan(ctx, "hcs::Process::CloseStderr") //nolint:ineffassign,staticcheck - defer span.End() - defer func() { oc.SetSpanStatus(span, err) }() - span.AddAttributes( - trace.StringAttribute("cid", process.SystemID()), - trace.Int64Attribute("pid", int64(process.processID))) - - process.handleLock.Lock() - defer process.handleLock.Unlock() - - if process.handle == 0 { - return nil - } - - process.stdioLock.Lock() - defer process.stdioLock.Unlock() - if process.stderr != nil { - process.stderr.Close() - process.stderr = nil - - } - return nil -} - -// Close cleans up any state associated with the process but does not kill -// or wait on it. -func (process *Process) Close() (err error) { - operation := "hcs::Process::Close" - ctx, span := trace.StartSpan(context.Background(), operation) - defer span.End() - defer func() { oc.SetSpanStatus(span, err) }() - span.AddAttributes( - trace.StringAttribute("cid", process.SystemID()), - trace.Int64Attribute("pid", int64(process.processID))) - - process.handleLock.Lock() - defer process.handleLock.Unlock() - - // Don't double free this - if process.handle == 0 { - return nil - } - - process.stdioLock.Lock() - if process.stdin != nil { - process.stdin.Close() - process.stdin = nil - } - if process.stdout != nil { - process.stdout.Close() - process.stdout = nil - } - if process.stderr != nil { - process.stderr.Close() - process.stderr = nil - } - process.stdioLock.Unlock() - - if err = process.unregisterCallback(ctx); err != nil { - return makeProcessError(process, operation, err, nil) - } - - if err = vmcompute.HcsCloseProcess(ctx, process.handle); err != nil { - return makeProcessError(process, operation, err, nil) - } - - process.handle = 0 - process.closedWaitOnce.Do(func() { - process.exitCode = -1 - process.waitError = ErrAlreadyClosed - close(process.waitBlock) - }) - - return nil -} - -func (process *Process) registerCallback(ctx context.Context) error { - callbackContext := ¬ificationWatcherContext{ - channels: newProcessChannels(), - systemID: process.SystemID(), - processID: process.processID, - } - - callbackMapLock.Lock() - callbackNumber := nextCallback - nextCallback++ - callbackMap[callbackNumber] = callbackContext - callbackMapLock.Unlock() - - callbackHandle, err := vmcompute.HcsRegisterProcessCallback(ctx, process.handle, notificationWatcherCallback, callbackNumber) - if err != nil { - return err - } - callbackContext.handle = callbackHandle - process.callbackNumber = callbackNumber - - return nil -} - -func (process *Process) unregisterCallback(ctx context.Context) error { - callbackNumber := process.callbackNumber - - callbackMapLock.RLock() - callbackContext := callbackMap[callbackNumber] - callbackMapLock.RUnlock() - - if callbackContext == nil { - return nil - } - - handle := callbackContext.handle - - if handle == 0 { - return nil - } - - // vmcompute.HcsUnregisterProcessCallback has its own synchronization to - // wait for all callbacks to complete. We must NOT hold the callbackMapLock. - err := vmcompute.HcsUnregisterProcessCallback(ctx, handle) - if err != nil { - return err - } - - closeChannels(callbackContext.channels) - - callbackMapLock.Lock() - delete(callbackMap, callbackNumber) - callbackMapLock.Unlock() - - handle = 0 //nolint:ineffassign - - return nil -} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema1/schema1.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema1/schema1.go deleted file mode 100644 index b621c559..00000000 --- a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema1/schema1.go +++ /dev/null @@ -1,250 +0,0 @@ -package schema1 - -import ( - "encoding/json" - "time" - - "github.com/Microsoft/go-winio/pkg/guid" - hcsschema "github.com/Microsoft/hcsshim/internal/hcs/schema2" -) - -// ProcessConfig is used as both the input of Container.CreateProcess -// and to convert the parameters to JSON for passing onto the HCS -type ProcessConfig struct { - ApplicationName string `json:",omitempty"` - CommandLine string `json:",omitempty"` - CommandArgs []string `json:",omitempty"` // Used by Linux Containers on Windows - User string `json:",omitempty"` - WorkingDirectory string `json:",omitempty"` - Environment map[string]string `json:",omitempty"` - EmulateConsole bool `json:",omitempty"` - CreateStdInPipe bool `json:",omitempty"` - CreateStdOutPipe bool `json:",omitempty"` - CreateStdErrPipe bool `json:",omitempty"` - ConsoleSize [2]uint `json:",omitempty"` - CreateInUtilityVm bool `json:",omitempty"` // Used by Linux Containers on Windows - OCISpecification *json.RawMessage `json:",omitempty"` // Used by Linux Containers on Windows -} - -type Layer struct { - ID string - Path string -} - -type MappedDir struct { - HostPath string - ContainerPath string - ReadOnly bool - BandwidthMaximum uint64 - IOPSMaximum uint64 - CreateInUtilityVM bool - // LinuxMetadata - Support added in 1803/RS4+. - LinuxMetadata bool `json:",omitempty"` -} - -type MappedPipe struct { - HostPath string - ContainerPipeName string -} - -type HvRuntime struct { - ImagePath string `json:",omitempty"` - SkipTemplate bool `json:",omitempty"` - LinuxInitrdFile string `json:",omitempty"` // File under ImagePath on host containing an initrd image for starting a Linux utility VM - LinuxKernelFile string `json:",omitempty"` // File under ImagePath on host containing a kernel for starting a Linux utility VM - LinuxBootParameters string `json:",omitempty"` // Additional boot parameters for starting a Linux Utility VM in initrd mode - BootSource string `json:",omitempty"` // "Vhd" for Linux Utility VM booting from VHD - WritableBootSource bool `json:",omitempty"` // Linux Utility VM booting from VHD -} - -type MappedVirtualDisk struct { - HostPath string `json:",omitempty"` // Path to VHD on the host - ContainerPath string // Platform-specific mount point path in the container - CreateInUtilityVM bool `json:",omitempty"` - ReadOnly bool `json:",omitempty"` - Cache string `json:",omitempty"` // "" (Unspecified); "Disabled"; "Enabled"; "Private"; "PrivateAllowSharing" - AttachOnly bool `json:",omitempty"` -} - -// AssignedDevice represents a device that has been directly assigned to a container -// -// NOTE: Support added in RS5 -type AssignedDevice struct { - // InterfaceClassGUID of the device to assign to container. - InterfaceClassGUID string `json:"InterfaceClassGuid,omitempty"` -} - -// ContainerConfig is used as both the input of CreateContainer -// and to convert the parameters to JSON for passing onto the HCS -type ContainerConfig struct { - SystemType string // HCS requires this to be hard-coded to "Container" - Name string // Name of the container. We use the docker ID. - Owner string `json:",omitempty"` // The management platform that created this container - VolumePath string `json:",omitempty"` // Windows volume path for scratch space. Used by Windows Server Containers only. Format \\?\\Volume{GUID} - IgnoreFlushesDuringBoot bool `json:",omitempty"` // Optimization hint for container startup in Windows - LayerFolderPath string `json:",omitempty"` // Where the layer folders are located. Used by Windows Server Containers only. Format %root%\windowsfilter\containerID - Layers []Layer // List of storage layers. Required for Windows Server and Hyper-V Containers. Format ID=GUID;Path=%root%\windowsfilter\layerID - Credentials string `json:",omitempty"` // Credentials information - ProcessorCount uint32 `json:",omitempty"` // Number of processors to assign to the container. - ProcessorWeight uint64 `json:",omitempty"` // CPU shares (relative weight to other containers with cpu shares). Range is from 1 to 10000. A value of 0 results in default shares. - ProcessorMaximum int64 `json:",omitempty"` // Specifies the portion of processor cycles that this container can use as a percentage times 100. Range is from 1 to 10000. A value of 0 results in no limit. - StorageIOPSMaximum uint64 `json:",omitempty"` // Maximum Storage IOPS - StorageBandwidthMaximum uint64 `json:",omitempty"` // Maximum Storage Bandwidth in bytes per second - StorageSandboxSize uint64 `json:",omitempty"` // Size in bytes that the container system drive should be expanded to if smaller - MemoryMaximumInMB int64 `json:",omitempty"` // Maximum memory available to the container in Megabytes - HostName string `json:",omitempty"` // Hostname - MappedDirectories []MappedDir `json:",omitempty"` // List of mapped directories (volumes/mounts) - MappedPipes []MappedPipe `json:",omitempty"` // List of mapped Windows named pipes - HvPartition bool // True if it a Hyper-V Container - NetworkSharedContainerName string `json:",omitempty"` // Name (ID) of the container that we will share the network stack with. - EndpointList []string `json:",omitempty"` // List of networking endpoints to be attached to container - HvRuntime *HvRuntime `json:",omitempty"` // Hyper-V container settings. Used by Hyper-V containers only. Format ImagePath=%root%\BaseLayerID\UtilityVM - Servicing bool `json:",omitempty"` // True if this container is for servicing - AllowUnqualifiedDNSQuery bool `json:",omitempty"` // True to allow unqualified DNS name resolution - DNSSearchList string `json:",omitempty"` // Comma seperated list of DNS suffixes to use for name resolution - ContainerType string `json:",omitempty"` // "Linux" for Linux containers on Windows. Omitted otherwise. - TerminateOnLastHandleClosed bool `json:",omitempty"` // Should HCS terminate the container once all handles have been closed - MappedVirtualDisks []MappedVirtualDisk `json:",omitempty"` // Array of virtual disks to mount at start - AssignedDevices []AssignedDevice `json:",omitempty"` // Array of devices to assign. NOTE: Support added in RS5 -} - -type ComputeSystemQuery struct { - IDs []string `json:"Ids,omitempty"` - Types []string `json:",omitempty"` - Names []string `json:",omitempty"` - Owners []string `json:",omitempty"` -} - -type PropertyType string - -const ( - PropertyTypeStatistics PropertyType = "Statistics" // V1 and V2 - PropertyTypeProcessList PropertyType = "ProcessList" // V1 and V2 - PropertyTypeMappedVirtualDisk PropertyType = "MappedVirtualDisk" // Not supported in V2 schema call - PropertyTypeGuestConnection PropertyType = "GuestConnection" // V1 and V2. Nil return from HCS before RS5 -) - -type PropertyQuery struct { - PropertyTypes []PropertyType `json:",omitempty"` -} - -// ContainerProperties holds the properties for a container and the processes running in that container -type ContainerProperties struct { - ID string `json:"Id"` - State string - Name string - SystemType string - RuntimeOSType string `json:"RuntimeOsType,omitempty"` - Owner string - SiloGUID string `json:"SiloGuid,omitempty"` - RuntimeID guid.GUID `json:"RuntimeId,omitempty"` - IsRuntimeTemplate bool `json:",omitempty"` - RuntimeImagePath string `json:",omitempty"` - Stopped bool `json:",omitempty"` - ExitType string `json:",omitempty"` - AreUpdatesPending bool `json:",omitempty"` - ObRoot string `json:",omitempty"` - Statistics Statistics `json:",omitempty"` - ProcessList []ProcessListItem `json:",omitempty"` - MappedVirtualDiskControllers map[int]MappedVirtualDiskController `json:",omitempty"` - GuestConnectionInfo GuestConnectionInfo `json:",omitempty"` -} - -// MemoryStats holds the memory statistics for a container -type MemoryStats struct { - UsageCommitBytes uint64 `json:"MemoryUsageCommitBytes,omitempty"` - UsageCommitPeakBytes uint64 `json:"MemoryUsageCommitPeakBytes,omitempty"` - UsagePrivateWorkingSetBytes uint64 `json:"MemoryUsagePrivateWorkingSetBytes,omitempty"` -} - -// ProcessorStats holds the processor statistics for a container -type ProcessorStats struct { - TotalRuntime100ns uint64 `json:",omitempty"` - RuntimeUser100ns uint64 `json:",omitempty"` - RuntimeKernel100ns uint64 `json:",omitempty"` -} - -// StorageStats holds the storage statistics for a container -type StorageStats struct { - ReadCountNormalized uint64 `json:",omitempty"` - ReadSizeBytes uint64 `json:",omitempty"` - WriteCountNormalized uint64 `json:",omitempty"` - WriteSizeBytes uint64 `json:",omitempty"` -} - -// NetworkStats holds the network statistics for a container -type NetworkStats struct { - BytesReceived uint64 `json:",omitempty"` - BytesSent uint64 `json:",omitempty"` - PacketsReceived uint64 `json:",omitempty"` - PacketsSent uint64 `json:",omitempty"` - DroppedPacketsIncoming uint64 `json:",omitempty"` - DroppedPacketsOutgoing uint64 `json:",omitempty"` - EndpointId string `json:",omitempty"` - InstanceId string `json:",omitempty"` -} - -// Statistics is the structure returned by a statistics call on a container -type Statistics struct { - Timestamp time.Time `json:",omitempty"` - ContainerStartTime time.Time `json:",omitempty"` - Uptime100ns uint64 `json:",omitempty"` - Memory MemoryStats `json:",omitempty"` - Processor ProcessorStats `json:",omitempty"` - Storage StorageStats `json:",omitempty"` - Network []NetworkStats `json:",omitempty"` -} - -// ProcessList is the structure of an item returned by a ProcessList call on a container -type ProcessListItem struct { - CreateTimestamp time.Time `json:",omitempty"` - ImageName string `json:",omitempty"` - KernelTime100ns uint64 `json:",omitempty"` - MemoryCommitBytes uint64 `json:",omitempty"` - MemoryWorkingSetPrivateBytes uint64 `json:",omitempty"` - MemoryWorkingSetSharedBytes uint64 `json:",omitempty"` - ProcessId uint32 `json:",omitempty"` - UserTime100ns uint64 `json:",omitempty"` -} - -// MappedVirtualDiskController is the structure of an item returned by a MappedVirtualDiskList call on a container -type MappedVirtualDiskController struct { - MappedVirtualDisks map[int]MappedVirtualDisk `json:",omitempty"` -} - -// GuestDefinedCapabilities is part of the GuestConnectionInfo returned by a GuestConnection call on a utility VM -type GuestDefinedCapabilities struct { - NamespaceAddRequestSupported bool `json:",omitempty"` - SignalProcessSupported bool `json:",omitempty"` - DumpStacksSupported bool `json:",omitempty"` - DeleteContainerStateSupported bool `json:",omitempty"` - UpdateContainerSupported bool `json:",omitempty"` -} - -// GuestConnectionInfo is the structure of an iterm return by a GuestConnection call on a utility VM -type GuestConnectionInfo struct { - SupportedSchemaVersions []hcsschema.Version `json:",omitempty"` - ProtocolVersion uint32 `json:",omitempty"` - GuestDefinedCapabilities GuestDefinedCapabilities `json:",omitempty"` -} - -// Type of Request Support in ModifySystem -type RequestType string - -// Type of Resource Support in ModifySystem -type ResourceType string - -// RequestType const -const ( - Add RequestType = "Add" - Remove RequestType = "Remove" - Network ResourceType = "Network" -) - -// ResourceModificationRequestResponse is the structure used to send request to the container to modify the system -// Supported resource types are Network and Request Types are Add/Remove -type ResourceModificationRequestResponse struct { - Resource ResourceType `json:"ResourceType"` - Data interface{} `json:"Settings"` - Request RequestType `json:"RequestType,omitempty"` -} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/attachment.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/attachment.go deleted file mode 100644 index 70884aad..00000000 --- a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/attachment.go +++ /dev/null @@ -1,36 +0,0 @@ -/* - * HCS API - * - * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) - * - * API version: 2.1 - * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) - */ - -package hcsschema - -type Attachment struct { - Type_ string `json:"Type,omitempty"` - - Path string `json:"Path,omitempty"` - - IgnoreFlushes bool `json:"IgnoreFlushes,omitempty"` - - CachingMode string `json:"CachingMode,omitempty"` - - NoWriteHardening bool `json:"NoWriteHardening,omitempty"` - - DisableExpansionOptimization bool `json:"DisableExpansionOptimization,omitempty"` - - IgnoreRelativeLocator bool `json:"IgnoreRelativeLocator,omitempty"` - - CaptureIoAttributionContext bool `json:"CaptureIoAttributionContext,omitempty"` - - ReadOnly bool `json:"ReadOnly,omitempty"` - - SupportCompressedVolumes bool `json:"SupportCompressedVolumes,omitempty"` - - AlwaysAllowSparseFiles bool `json:"AlwaysAllowSparseFiles,omitempty"` - - ExtensibleVirtualDiskType string `json:"ExtensibleVirtualDiskType,omitempty"` -} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/battery.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/battery.go deleted file mode 100644 index ecbbed4c..00000000 --- a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/battery.go +++ /dev/null @@ -1,13 +0,0 @@ -/* - * HCS API - * - * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) - * - * API version: 2.1 - * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) - */ - -package hcsschema - -type Battery struct { -} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/cache_query_stats_response.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/cache_query_stats_response.go deleted file mode 100644 index c1ea3953..00000000 --- a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/cache_query_stats_response.go +++ /dev/null @@ -1,18 +0,0 @@ -/* - * HCS API - * - * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) - * - * API version: 2.1 - * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) - */ - -package hcsschema - -type CacheQueryStatsResponse struct { - L3OccupancyBytes int32 `json:"L3OccupancyBytes,omitempty"` - - L3TotalBwBytes int32 `json:"L3TotalBwBytes,omitempty"` - - L3LocalBwBytes int32 `json:"L3LocalBwBytes,omitempty"` -} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/chipset.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/chipset.go deleted file mode 100644 index ca75277a..00000000 --- a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/chipset.go +++ /dev/null @@ -1,27 +0,0 @@ -/* - * HCS API - * - * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) - * - * API version: 2.1 - * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) - */ - -package hcsschema - -type Chipset struct { - Uefi *Uefi `json:"Uefi,omitempty"` - - IsNumLockDisabled bool `json:"IsNumLockDisabled,omitempty"` - - BaseBoardSerialNumber string `json:"BaseBoardSerialNumber,omitempty"` - - ChassisSerialNumber string `json:"ChassisSerialNumber,omitempty"` - - ChassisAssetTag string `json:"ChassisAssetTag,omitempty"` - - UseUtc bool `json:"UseUtc,omitempty"` - - // LinuxKernelDirect - Added in v2.2 Builds >=181117 - LinuxKernelDirect *LinuxKernelDirect `json:"LinuxKernelDirect,omitempty"` -} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/close_handle.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/close_handle.go deleted file mode 100644 index b4f9c315..00000000 --- a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/close_handle.go +++ /dev/null @@ -1,14 +0,0 @@ -/* - * HCS API - * - * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) - * - * API version: 2.1 - * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) - */ - -package hcsschema - -type CloseHandle struct { - Handle string `json:"Handle,omitempty"` -} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/com_port.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/com_port.go deleted file mode 100644 index 8bf8cab6..00000000 --- a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/com_port.go +++ /dev/null @@ -1,17 +0,0 @@ -/* - * HCS API - * - * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) - * - * API version: 2.1 - * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) - */ - -package hcsschema - -// ComPort specifies the named pipe that will be used for the port, with empty string indicating a disconnected port. -type ComPort struct { - NamedPipe string `json:"NamedPipe,omitempty"` - - OptimizeForDebugger bool `json:"OptimizeForDebugger,omitempty"` -} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/compute_system.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/compute_system.go deleted file mode 100644 index 10cea67e..00000000 --- a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/compute_system.go +++ /dev/null @@ -1,26 +0,0 @@ -/* - * HCS API - * - * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) - * - * API version: 2.1 - * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) - */ - -package hcsschema - -type ComputeSystem struct { - Owner string `json:"Owner,omitempty"` - - SchemaVersion *Version `json:"SchemaVersion,omitempty"` - - HostingSystemId string `json:"HostingSystemId,omitempty"` - - HostedSystem interface{} `json:"HostedSystem,omitempty"` - - Container *Container `json:"Container,omitempty"` - - VirtualMachine *VirtualMachine `json:"VirtualMachine,omitempty"` - - ShouldTerminateOnLastHandleClosed bool `json:"ShouldTerminateOnLastHandleClosed,omitempty"` -} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/configuration.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/configuration.go deleted file mode 100644 index 1d5dfe68..00000000 --- a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/configuration.go +++ /dev/null @@ -1,72 +0,0 @@ -/* - * HCS API - * - * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) - * - * API version: 2.1 - * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) - */ - -package hcsschema - -import ( - "net/http" -) - -// contextKeys are used to identify the type of value in the context. -// Since these are string, it is possible to get a short description of the -// context key for logging and debugging using key.String(). - -type contextKey string - -func (c contextKey) String() string { - return "auth " + string(c) -} - -var ( - // ContextOAuth2 takes a oauth2.TokenSource as authentication for the request. - ContextOAuth2 = contextKey("token") - - // ContextBasicAuth takes BasicAuth as authentication for the request. - ContextBasicAuth = contextKey("basic") - - // ContextAccessToken takes a string oauth2 access token as authentication for the request. - ContextAccessToken = contextKey("accesstoken") - - // ContextAPIKey takes an APIKey as authentication for the request - ContextAPIKey = contextKey("apikey") -) - -// BasicAuth provides basic http authentication to a request passed via context using ContextBasicAuth -type BasicAuth struct { - UserName string `json:"userName,omitempty"` - Password string `json:"password,omitempty"` -} - -// APIKey provides API key based authentication to a request passed via context using ContextAPIKey -type APIKey struct { - Key string - Prefix string -} - -type Configuration struct { - BasePath string `json:"basePath,omitempty"` - Host string `json:"host,omitempty"` - Scheme string `json:"scheme,omitempty"` - DefaultHeader map[string]string `json:"defaultHeader,omitempty"` - UserAgent string `json:"userAgent,omitempty"` - HTTPClient *http.Client -} - -func NewConfiguration() *Configuration { - cfg := &Configuration{ - BasePath: "https://localhost", - DefaultHeader: make(map[string]string), - UserAgent: "Swagger-Codegen/2.1.0/go", - } - return cfg -} - -func (c *Configuration) AddDefaultHeader(key string, value string) { - c.DefaultHeader[key] = value -} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/console_size.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/console_size.go deleted file mode 100644 index 68aa04a5..00000000 --- a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/console_size.go +++ /dev/null @@ -1,16 +0,0 @@ -/* - * HCS API - * - * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) - * - * API version: 2.1 - * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) - */ - -package hcsschema - -type ConsoleSize struct { - Height int32 `json:"Height,omitempty"` - - Width int32 `json:"Width,omitempty"` -} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/container.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/container.go deleted file mode 100644 index 39a54432..00000000 --- a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/container.go +++ /dev/null @@ -1,36 +0,0 @@ -/* - * HCS API - * - * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) - * - * API version: 2.1 - * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) - */ - -package hcsschema - -type Container struct { - GuestOs *GuestOs `json:"GuestOs,omitempty"` - - Storage *Storage `json:"Storage,omitempty"` - - MappedDirectories []MappedDirectory `json:"MappedDirectories,omitempty"` - - MappedPipes []MappedPipe `json:"MappedPipes,omitempty"` - - Memory *Memory `json:"Memory,omitempty"` - - Processor *Processor `json:"Processor,omitempty"` - - Networking *Networking `json:"Networking,omitempty"` - - HvSocket *HvSocket `json:"HvSocket,omitempty"` - - ContainerCredentialGuard *ContainerCredentialGuardState `json:"ContainerCredentialGuard,omitempty"` - - RegistryChanges *RegistryChanges `json:"RegistryChanges,omitempty"` - - AssignedDevices []Device `json:"AssignedDevices,omitempty"` - - AdditionalDeviceNamespace *ContainerDefinitionDevice `json:"AdditionalDeviceNamespace,omitempty"` -} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/container_credential_guard_add_instance_request.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/container_credential_guard_add_instance_request.go deleted file mode 100644 index 495c6ebc..00000000 --- a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/container_credential_guard_add_instance_request.go +++ /dev/null @@ -1,16 +0,0 @@ -/* - * HCS API - * - * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) - * - * API version: 2.4 - * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) - */ - -package hcsschema - -type ContainerCredentialGuardAddInstanceRequest struct { - Id string `json:"Id,omitempty"` - CredentialSpec string `json:"CredentialSpec,omitempty"` - Transport string `json:"Transport,omitempty"` -} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/container_credential_guard_hv_socket_service_config.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/container_credential_guard_hv_socket_service_config.go deleted file mode 100644 index 1ed4c008..00000000 --- a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/container_credential_guard_hv_socket_service_config.go +++ /dev/null @@ -1,15 +0,0 @@ -/* - * HCS API - * - * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) - * - * API version: 2.4 - * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) - */ - -package hcsschema - -type ContainerCredentialGuardHvSocketServiceConfig struct { - ServiceId string `json:"ServiceId,omitempty"` - ServiceConfig *HvSocketServiceConfig `json:"ServiceConfig,omitempty"` -} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/container_credential_guard_instance.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/container_credential_guard_instance.go deleted file mode 100644 index d7ebd0fc..00000000 --- a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/container_credential_guard_instance.go +++ /dev/null @@ -1,16 +0,0 @@ -/* - * HCS API - * - * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) - * - * API version: 2.4 - * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) - */ - -package hcsschema - -type ContainerCredentialGuardInstance struct { - Id string `json:"Id,omitempty"` - CredentialGuard *ContainerCredentialGuardState `json:"CredentialGuard,omitempty"` - HvSocketConfig *ContainerCredentialGuardHvSocketServiceConfig `json:"HvSocketConfig,omitempty"` -} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/container_credential_guard_modify_operation.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/container_credential_guard_modify_operation.go deleted file mode 100644 index 71005b09..00000000 --- a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/container_credential_guard_modify_operation.go +++ /dev/null @@ -1,17 +0,0 @@ -/* - * HCS API - * - * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) - * - * API version: 2.4 - * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) - */ - -package hcsschema - -type ContainerCredentialGuardModifyOperation string - -const ( - AddInstance ContainerCredentialGuardModifyOperation = "AddInstance" - RemoveInstance ContainerCredentialGuardModifyOperation = "RemoveInstance" -) diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/container_credential_guard_operation_request.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/container_credential_guard_operation_request.go deleted file mode 100644 index 952cda49..00000000 --- a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/container_credential_guard_operation_request.go +++ /dev/null @@ -1,15 +0,0 @@ -/* - * HCS API - * - * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) - * - * API version: 2.4 - * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) - */ - -package hcsschema - -type ContainerCredentialGuardOperationRequest struct { - Operation ContainerCredentialGuardModifyOperation `json:"Operation,omitempty"` - OperationDetails interface{} `json:"OperationDetails,omitempty"` -} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/container_credential_guard_remove_instance_request.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/container_credential_guard_remove_instance_request.go deleted file mode 100644 index 32e5a3be..00000000 --- a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/container_credential_guard_remove_instance_request.go +++ /dev/null @@ -1,14 +0,0 @@ -/* - * HCS API - * - * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) - * - * API version: 2.4 - * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) - */ - -package hcsschema - -type ContainerCredentialGuardRemoveInstanceRequest struct { - Id string `json:"Id,omitempty"` -} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/container_credential_guard_state.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/container_credential_guard_state.go deleted file mode 100644 index 0f8f6443..00000000 --- a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/container_credential_guard_state.go +++ /dev/null @@ -1,25 +0,0 @@ -/* - * HCS API - * - * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) - * - * API version: 2.1 - * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) - */ - -package hcsschema - -type ContainerCredentialGuardState struct { - - // Authentication cookie for calls to a Container Credential Guard instance. - Cookie string `json:"Cookie,omitempty"` - - // Name of the RPC endpoint of the Container Credential Guard instance. - RpcEndpoint string `json:"RpcEndpoint,omitempty"` - - // Transport used for the configured Container Credential Guard instance. - Transport string `json:"Transport,omitempty"` - - // Credential spec used for the configured Container Credential Guard instance. - CredentialSpec string `json:"CredentialSpec,omitempty"` -} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/container_credential_guard_system_info.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/container_credential_guard_system_info.go deleted file mode 100644 index ea306fa2..00000000 --- a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/container_credential_guard_system_info.go +++ /dev/null @@ -1,14 +0,0 @@ -/* - * HCS API - * - * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) - * - * API version: 2.4 - * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) - */ - -package hcsschema - -type ContainerCredentialGuardSystemInfo struct { - Instances []ContainerCredentialGuardInstance `json:"Instances,omitempty"` -} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/container_memory_information.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/container_memory_information.go deleted file mode 100644 index 1fd7ca5d..00000000 --- a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/container_memory_information.go +++ /dev/null @@ -1,25 +0,0 @@ -/* - * HCS API - * - * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) - * - * API version: 2.1 - * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) - */ - -package hcsschema - -// memory usage as viewed from within the container -type ContainerMemoryInformation struct { - TotalPhysicalBytes int32 `json:"TotalPhysicalBytes,omitempty"` - - TotalUsage int32 `json:"TotalUsage,omitempty"` - - CommittedBytes int32 `json:"CommittedBytes,omitempty"` - - SharedCommittedBytes int32 `json:"SharedCommittedBytes,omitempty"` - - CommitLimitBytes int32 `json:"CommitLimitBytes,omitempty"` - - PeakCommitmentBytes int32 `json:"PeakCommitmentBytes,omitempty"` -} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/cpu_group.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/cpu_group.go deleted file mode 100644 index 90332a51..00000000 --- a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/cpu_group.go +++ /dev/null @@ -1,15 +0,0 @@ -/* - * HCS API - * - * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) - * - * API version: 2.4 - * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) - */ - -package hcsschema - -// CPU groups allow Hyper-V administrators to better manage and allocate the host's CPU resources across guest virtual machines -type CpuGroup struct { - Id string `json:"Id,omitempty"` -} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/cpu_group_affinity.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/cpu_group_affinity.go deleted file mode 100644 index 8794961b..00000000 --- a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/cpu_group_affinity.go +++ /dev/null @@ -1,15 +0,0 @@ -/* - * HCS API - * - * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) - * - * API version: 2.4 - * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) - */ - -package hcsschema - -type CpuGroupAffinity struct { - LogicalProcessorCount int32 `json:"LogicalProcessorCount,omitempty"` - LogicalProcessors []int32 `json:"LogicalProcessors,omitempty"` -} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/cpu_group_config.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/cpu_group_config.go deleted file mode 100644 index 0be0475d..00000000 --- a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/cpu_group_config.go +++ /dev/null @@ -1,18 +0,0 @@ -/* - * HCS API - * - * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) - * - * API version: 2.4 - * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) - */ - -package hcsschema - -type CpuGroupConfig struct { - GroupId string `json:"GroupId,omitempty"` - Affinity *CpuGroupAffinity `json:"Affinity,omitempty"` - GroupProperties []CpuGroupProperty `json:"GroupProperties,omitempty"` - // Hypervisor CPU group IDs exposed to clients - HypervisorGroupId uint64 `json:"HypervisorGroupId,omitempty"` -} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/cpu_group_configurations.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/cpu_group_configurations.go deleted file mode 100644 index 3ace0ccc..00000000 --- a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/cpu_group_configurations.go +++ /dev/null @@ -1,15 +0,0 @@ -/* - * HCS API - * - * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) - * - * API version: 2.4 - * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) - */ - -package hcsschema - -// Structure used to return cpu groups for a Service property query -type CpuGroupConfigurations struct { - CpuGroups []CpuGroupConfig `json:"CpuGroups,omitempty"` -} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/cpu_group_operations.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/cpu_group_operations.go deleted file mode 100644 index 7d897807..00000000 --- a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/cpu_group_operations.go +++ /dev/null @@ -1,18 +0,0 @@ -/* - * HCS API - * - * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) - * - * API version: 2.4 - * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) - */ - -package hcsschema - -type CPUGroupOperation string - -const ( - CreateGroup CPUGroupOperation = "CreateGroup" - DeleteGroup CPUGroupOperation = "DeleteGroup" - SetProperty CPUGroupOperation = "SetProperty" -) diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/cpu_group_property.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/cpu_group_property.go deleted file mode 100644 index bbad6a2c..00000000 --- a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/cpu_group_property.go +++ /dev/null @@ -1,15 +0,0 @@ -/* - * HCS API - * - * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) - * - * API version: 2.4 - * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) - */ - -package hcsschema - -type CpuGroupProperty struct { - PropertyCode uint32 `json:"PropertyCode,omitempty"` - PropertyValue uint32 `json:"PropertyValue,omitempty"` -} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/create_group_operation.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/create_group_operation.go deleted file mode 100644 index 91a8278f..00000000 --- a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/create_group_operation.go +++ /dev/null @@ -1,17 +0,0 @@ -/* - * HCS API - * - * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) - * - * API version: 2.4 - * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) - */ - -package hcsschema - -// Create group operation settings -type CreateGroupOperation struct { - GroupId string `json:"GroupId,omitempty"` - LogicalProcessorCount uint32 `json:"LogicalProcessorCount,omitempty"` - LogicalProcessors []uint32 `json:"LogicalProcessors,omitempty"` -} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/delete_group_operation.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/delete_group_operation.go deleted file mode 100644 index 134bd988..00000000 --- a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/delete_group_operation.go +++ /dev/null @@ -1,15 +0,0 @@ -/* - * HCS API - * - * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) - * - * API version: 2.4 - * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) - */ - -package hcsschema - -// Delete group operation settings -type DeleteGroupOperation struct { - GroupId string `json:"GroupId,omitempty"` -} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/device.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/device.go deleted file mode 100644 index 31c4538a..00000000 --- a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/device.go +++ /dev/null @@ -1,27 +0,0 @@ -/* - * HCS API - * - * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) - * - * API version: 2.1 - * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) - */ - -package hcsschema - -type DeviceType string - -const ( - ClassGUID DeviceType = "ClassGuid" - DeviceInstanceID DeviceType = "DeviceInstance" - GPUMirror DeviceType = "GpuMirror" -) - -type Device struct { - // The type of device to assign to the container. - Type DeviceType `json:"Type,omitempty"` - // The interface class guid of the device interfaces to assign to the container. Only used when Type is ClassGuid. - InterfaceClassGuid string `json:"InterfaceClassGuid,omitempty"` - // The location path of the device to assign to the container. Only used when Type is DeviceInstanceID. - LocationPath string `json:"LocationPath,omitempty"` -} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/devices.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/devices.go deleted file mode 100644 index e985d96d..00000000 --- a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/devices.go +++ /dev/null @@ -1,46 +0,0 @@ -/* - * HCS API - * - * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) - * - * API version: 2.1 - * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) - */ - -package hcsschema - -type Devices struct { - ComPorts map[string]ComPort `json:"ComPorts,omitempty"` - - Scsi map[string]Scsi `json:"Scsi,omitempty"` - - VirtualPMem *VirtualPMemController `json:"VirtualPMem,omitempty"` - - NetworkAdapters map[string]NetworkAdapter `json:"NetworkAdapters,omitempty"` - - VideoMonitor *VideoMonitor `json:"VideoMonitor,omitempty"` - - Keyboard *Keyboard `json:"Keyboard,omitempty"` - - Mouse *Mouse `json:"Mouse,omitempty"` - - HvSocket *HvSocket2 `json:"HvSocket,omitempty"` - - EnhancedModeVideo *EnhancedModeVideo `json:"EnhancedModeVideo,omitempty"` - - GuestCrashReporting *GuestCrashReporting `json:"GuestCrashReporting,omitempty"` - - VirtualSmb *VirtualSmb `json:"VirtualSmb,omitempty"` - - Plan9 *Plan9 `json:"Plan9,omitempty"` - - Battery *Battery `json:"Battery,omitempty"` - - FlexibleIov map[string]FlexibleIoDevice `json:"FlexibleIov,omitempty"` - - SharedMemory *SharedMemoryConfiguration `json:"SharedMemory,omitempty"` - - // TODO: This is pre-release support in schema 2.3. Need to add build number - // docs when a public build with this is out. - VirtualPci map[string]VirtualPciDevice `json:",omitempty"` -} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/enhanced_mode_video.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/enhanced_mode_video.go deleted file mode 100644 index 85450c41..00000000 --- a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/enhanced_mode_video.go +++ /dev/null @@ -1,14 +0,0 @@ -/* - * HCS API - * - * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) - * - * API version: 2.1 - * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) - */ - -package hcsschema - -type EnhancedModeVideo struct { - ConnectionOptions *RdpConnectionOptions `json:"ConnectionOptions,omitempty"` -} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/flexible_io_device.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/flexible_io_device.go deleted file mode 100644 index fe86cab6..00000000 --- a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/flexible_io_device.go +++ /dev/null @@ -1,18 +0,0 @@ -/* - * HCS API - * - * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) - * - * API version: 2.1 - * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) - */ - -package hcsschema - -type FlexibleIoDevice struct { - EmulatorId string `json:"EmulatorId,omitempty"` - - HostingModel string `json:"HostingModel,omitempty"` - - Configuration []string `json:"Configuration,omitempty"` -} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/guest_connection.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/guest_connection.go deleted file mode 100644 index 7db29495..00000000 --- a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/guest_connection.go +++ /dev/null @@ -1,19 +0,0 @@ -/* - * HCS API - * - * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) - * - * API version: 2.1 - * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) - */ - -package hcsschema - -type GuestConnection struct { - - // Use Vsock rather than Hyper-V sockets to communicate with the guest service. - UseVsock bool `json:"UseVsock,omitempty"` - - // Don't disconnect the guest connection when pausing the virtual machine. - UseConnectedSuspend bool `json:"UseConnectedSuspend,omitempty"` -} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/guest_connection_info.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/guest_connection_info.go deleted file mode 100644 index 8a369bab..00000000 --- a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/guest_connection_info.go +++ /dev/null @@ -1,21 +0,0 @@ -/* - * HCS API - * - * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) - * - * API version: 2.1 - * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) - */ - -package hcsschema - -// Information about the guest. -type GuestConnectionInfo struct { - - // Each schema version x.y stands for the range of versions a.b where a==x and b<=y. This list comes from the SupportedSchemaVersions field in GcsCapabilities. - SupportedSchemaVersions []Version `json:"SupportedSchemaVersions,omitempty"` - - ProtocolVersion int32 `json:"ProtocolVersion,omitempty"` - - GuestDefinedCapabilities *interface{} `json:"GuestDefinedCapabilities,omitempty"` -} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/guest_crash_reporting.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/guest_crash_reporting.go deleted file mode 100644 index af828004..00000000 --- a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/guest_crash_reporting.go +++ /dev/null @@ -1,14 +0,0 @@ -/* - * HCS API - * - * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) - * - * API version: 2.1 - * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) - */ - -package hcsschema - -type GuestCrashReporting struct { - WindowsCrashSettings *WindowsCrashReporting `json:"WindowsCrashSettings,omitempty"` -} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/guest_os.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/guest_os.go deleted file mode 100644 index 8838519a..00000000 --- a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/guest_os.go +++ /dev/null @@ -1,14 +0,0 @@ -/* - * HCS API - * - * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) - * - * API version: 2.1 - * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) - */ - -package hcsschema - -type GuestOs struct { - HostName string `json:"HostName,omitempty"` -} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/guest_state.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/guest_state.go deleted file mode 100644 index ef1eec88..00000000 --- a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/guest_state.go +++ /dev/null @@ -1,22 +0,0 @@ -/* - * HCS API - * - * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) - * - * API version: 2.1 - * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) - */ - -package hcsschema - -type GuestState struct { - - // The path to an existing file uses for persistent guest state storage. An empty string indicates the system should initialize new transient, in-memory guest state. - GuestStateFilePath string `json:"GuestStateFilePath,omitempty"` - - // The path to an existing file for persistent runtime state storage. An empty string indicates the system should initialize new transient, in-memory runtime state. - RuntimeStateFilePath string `json:"RuntimeStateFilePath,omitempty"` - - // If true, the guest state and runtime state files will be used as templates to populate transient, in-memory state instead of using the files as persistent backing store. - ForceTransientState bool `json:"ForceTransientState,omitempty"` -} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/host_processor_modify_request.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/host_processor_modify_request.go deleted file mode 100644 index 2238ce53..00000000 --- a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/host_processor_modify_request.go +++ /dev/null @@ -1,16 +0,0 @@ -/* - * HCS API - * - * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) - * - * API version: 2.4 - * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) - */ - -package hcsschema - -// Structure used to request a service processor modification -type HostProcessorModificationRequest struct { - Operation CPUGroupOperation `json:"Operation,omitempty"` - OperationDetails interface{} `json:"OperationDetails,omitempty"` -} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/hosted_system.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/hosted_system.go deleted file mode 100644 index ea3084bc..00000000 --- a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/hosted_system.go +++ /dev/null @@ -1,16 +0,0 @@ -/* - * HCS API - * - * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) - * - * API version: 2.1 - * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) - */ - -package hcsschema - -type HostedSystem struct { - SchemaVersion *Version `json:"SchemaVersion,omitempty"` - - Container *Container `json:"Container,omitempty"` -} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/hv_socket.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/hv_socket.go deleted file mode 100644 index 23b2ee9e..00000000 --- a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/hv_socket.go +++ /dev/null @@ -1,16 +0,0 @@ -/* - * HCS API - * - * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) - * - * API version: 2.1 - * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) - */ - -package hcsschema - -type HvSocket struct { - Config *HvSocketSystemConfig `json:"Config,omitempty"` - - EnablePowerShellDirect bool `json:"EnablePowerShellDirect,omitempty"` -} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/hv_socket_2.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/hv_socket_2.go deleted file mode 100644 index a017691f..00000000 --- a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/hv_socket_2.go +++ /dev/null @@ -1,15 +0,0 @@ -/* - * HCS API - * - * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) - * - * API version: 2.1 - * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) - */ - -package hcsschema - -// HvSocket configuration for a VM -type HvSocket2 struct { - HvSocketConfig *HvSocketSystemConfig `json:"HvSocketConfig,omitempty"` -} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/hv_socket_address.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/hv_socket_address.go deleted file mode 100644 index 84c11b93..00000000 --- a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/hv_socket_address.go +++ /dev/null @@ -1,17 +0,0 @@ -/* - * HCS API - * - * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) - * - * API version: 2.4 - * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) - */ - -package hcsschema - -// This class defines address settings applied to a VM -// by the GCS every time a VM starts or restores. -type HvSocketAddress struct { - LocalAddress string `json:"LocalAddress,omitempty"` - ParentAddress string `json:"ParentAddress,omitempty"` -} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/hv_socket_service_config.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/hv_socket_service_config.go deleted file mode 100644 index ecd9f7fb..00000000 --- a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/hv_socket_service_config.go +++ /dev/null @@ -1,28 +0,0 @@ -/* - * HCS API - * - * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) - * - * API version: 2.1 - * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) - */ - -package hcsschema - -type HvSocketServiceConfig struct { - - // SDDL string that HvSocket will check before allowing a host process to bind to this specific service. If not specified, defaults to the system DefaultBindSecurityDescriptor, defined in HvSocketSystemWpConfig in V1. - BindSecurityDescriptor string `json:"BindSecurityDescriptor,omitempty"` - - // SDDL string that HvSocket will check before allowing a host process to connect to this specific service. If not specified, defaults to the system DefaultConnectSecurityDescriptor, defined in HvSocketSystemWpConfig in V1. - ConnectSecurityDescriptor string `json:"ConnectSecurityDescriptor,omitempty"` - - // If true, HvSocket will process wildcard binds for this service/system combination. Wildcard binds are secured in the registry at SOFTWARE/Microsoft/Windows NT/CurrentVersion/Virtualization/HvSocket/WildcardDescriptors - AllowWildcardBinds bool `json:"AllowWildcardBinds,omitempty"` - - // Disabled controls whether the HvSocket service is accepting connection requests. - // This set to true will make the service refuse all incoming connections as well as cancel - // any connections already established. The service itself will still be active however - // and can be re-enabled at a future time. - Disabled bool `json:"Disabled,omitempty"` -} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/hv_socket_system_config.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/hv_socket_system_config.go deleted file mode 100644 index 69f4f9d3..00000000 --- a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/hv_socket_system_config.go +++ /dev/null @@ -1,22 +0,0 @@ -/* - * HCS API - * - * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) - * - * API version: 2.1 - * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) - */ - -package hcsschema - -// This is the HCS Schema version of the HvSocket configuration. The VMWP version is located in Config.Devices.IC in V1. -type HvSocketSystemConfig struct { - - // SDDL string that HvSocket will check before allowing a host process to bind to an unlisted service for this specific container/VM (not wildcard binds). - DefaultBindSecurityDescriptor string `json:"DefaultBindSecurityDescriptor,omitempty"` - - // SDDL string that HvSocket will check before allowing a host process to connect to an unlisted service in the VM/container. - DefaultConnectSecurityDescriptor string `json:"DefaultConnectSecurityDescriptor,omitempty"` - - ServiceTable map[string]HvSocketServiceConfig `json:"ServiceTable,omitempty"` -} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/interrupt_moderation_mode.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/interrupt_moderation_mode.go deleted file mode 100644 index a614d63b..00000000 --- a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/interrupt_moderation_mode.go +++ /dev/null @@ -1,42 +0,0 @@ -/* - * HCS API - * - * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) - * - * API version: 2.4 - * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) - */ - -package hcsschema - -type InterruptModerationName string - -// The valid interrupt moderation modes for I/O virtualization (IOV) offloading. -const ( - DefaultName InterruptModerationName = "Default" - AdaptiveName InterruptModerationName = "Adaptive" - OffName InterruptModerationName = "Off" - LowName InterruptModerationName = "Low" - MediumName InterruptModerationName = "Medium" - HighName InterruptModerationName = "High" -) - -type InterruptModerationValue uint32 - -const ( - DefaultValue InterruptModerationValue = iota - AdaptiveValue - OffValue - LowValue InterruptModerationValue = 100 - MediumValue InterruptModerationValue = 200 - HighValue InterruptModerationValue = 300 -) - -var InterruptModerationValueToName = map[InterruptModerationValue]InterruptModerationName{ - DefaultValue: DefaultName, - AdaptiveValue: AdaptiveName, - OffValue: OffName, - LowValue: LowName, - MediumValue: MediumName, - HighValue: HighName, -} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/iov_settings.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/iov_settings.go deleted file mode 100644 index 2a55cc37..00000000 --- a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/iov_settings.go +++ /dev/null @@ -1,22 +0,0 @@ -/* - * HCS API - * - * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) - * - * API version: 2.4 - * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) - */ - -package hcsschema - -type IovSettings struct { - // The weight assigned to this port for I/O virtualization (IOV) offloading. - // Setting this to 0 disables IOV offloading. - OffloadWeight *uint32 `json:"OffloadWeight,omitempty"` - - // The number of queue pairs requested for this port for I/O virtualization (IOV) offloading. - QueuePairsRequested *uint32 `json:"QueuePairsRequested,omitempty"` - - // The interrupt moderation mode for I/O virtualization (IOV) offloading. - InterruptModeration *InterruptModerationName `json:"InterruptModeration,omitempty"` -} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/keyboard.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/keyboard.go deleted file mode 100644 index 3d3fa3b1..00000000 --- a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/keyboard.go +++ /dev/null @@ -1,13 +0,0 @@ -/* - * HCS API - * - * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) - * - * API version: 2.1 - * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) - */ - -package hcsschema - -type Keyboard struct { -} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/layer.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/layer.go deleted file mode 100644 index 176c49d4..00000000 --- a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/layer.go +++ /dev/null @@ -1,21 +0,0 @@ -/* - * HCS API - * - * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) - * - * API version: 2.1 - * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) - */ - -package hcsschema - -type Layer struct { - Id string `json:"Id,omitempty"` - - Path string `json:"Path,omitempty"` - - PathType string `json:"PathType,omitempty"` - - // Unspecified defaults to Enabled - Cache string `json:"Cache,omitempty"` -} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/linux_kernel_direct.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/linux_kernel_direct.go deleted file mode 100644 index 0ab6c280..00000000 --- a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/linux_kernel_direct.go +++ /dev/null @@ -1,18 +0,0 @@ -/* - * HCS API - * - * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) - * - * API version: 2.2 - * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) - */ - -package hcsschema - -type LinuxKernelDirect struct { - KernelFilePath string `json:"KernelFilePath,omitempty"` - - InitRdPath string `json:"InitRdPath,omitempty"` - - KernelCmdLine string `json:"KernelCmdLine,omitempty"` -} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/logical_processor.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/logical_processor.go deleted file mode 100644 index 2e3aa5e1..00000000 --- a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/logical_processor.go +++ /dev/null @@ -1,18 +0,0 @@ -/* - * HCS API - * - * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) - * - * API version: 2.4 - * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) - */ - -package hcsschema - -type LogicalProcessor struct { - LpIndex uint32 `json:"LpIndex,omitempty"` - NodeNumber uint8 `json:"NodeNumber,omitempty"` - PackageId uint32 `json:"PackageId,omitempty"` - CoreId uint32 `json:"CoreId,omitempty"` - RootVpIndex int32 `json:"RootVpIndex,omitempty"` -} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/mapped_directory.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/mapped_directory.go deleted file mode 100644 index 9b86a404..00000000 --- a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/mapped_directory.go +++ /dev/null @@ -1,20 +0,0 @@ -/* - * HCS API - * - * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) - * - * API version: 2.1 - * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) - */ - -package hcsschema - -type MappedDirectory struct { - HostPath string `json:"HostPath,omitempty"` - - HostPathType string `json:"HostPathType,omitempty"` - - ContainerPath string `json:"ContainerPath,omitempty"` - - ReadOnly bool `json:"ReadOnly,omitempty"` -} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/mapped_pipe.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/mapped_pipe.go deleted file mode 100644 index 208074e9..00000000 --- a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/mapped_pipe.go +++ /dev/null @@ -1,18 +0,0 @@ -/* - * HCS API - * - * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) - * - * API version: 2.1 - * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) - */ - -package hcsschema - -type MappedPipe struct { - ContainerPipeName string `json:"ContainerPipeName,omitempty"` - - HostPath string `json:"HostPath,omitempty"` - - HostPathType string `json:"HostPathType,omitempty"` -} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/memory.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/memory.go deleted file mode 100644 index 30749c67..00000000 --- a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/memory.go +++ /dev/null @@ -1,14 +0,0 @@ -/* - * HCS API - * - * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) - * - * API version: 2.1 - * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) - */ - -package hcsschema - -type Memory struct { - SizeInMB uint64 `json:"SizeInMB,omitempty"` -} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/memory_2.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/memory_2.go deleted file mode 100644 index 71224c75..00000000 --- a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/memory_2.go +++ /dev/null @@ -1,49 +0,0 @@ -/* - * HCS API - * - * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) - * - * API version: 2.1 - * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) - */ - -package hcsschema - -type Memory2 struct { - SizeInMB uint64 `json:"SizeInMB,omitempty"` - - AllowOvercommit bool `json:"AllowOvercommit,omitempty"` - - EnableHotHint bool `json:"EnableHotHint,omitempty"` - - EnableColdHint bool `json:"EnableColdHint,omitempty"` - - EnableEpf bool `json:"EnableEpf,omitempty"` - - // EnableDeferredCommit is private in the schema. If regenerated need to add back. - EnableDeferredCommit bool `json:"EnableDeferredCommit,omitempty"` - - // EnableColdDiscardHint if enabled, then the memory cold discard hint feature is exposed - // to the VM, allowing it to trim non-zeroed pages from the working set (if supported by - // the guest operating system). - EnableColdDiscardHint bool `json:"EnableColdDiscardHint,omitempty"` - - // LowMmioGapInMB is the low MMIO region allocated below 4GB. - // - // TODO: This is pre-release support in schema 2.3. Need to add build number - // docs when a public build with this is out. - LowMMIOGapInMB uint64 `json:"LowMmioGapInMB,omitempty"` - - // HighMmioBaseInMB is the high MMIO region allocated above 4GB (base and - // size). - // - // TODO: This is pre-release support in schema 2.3. Need to add build number - // docs when a public build with this is out. - HighMMIOBaseInMB uint64 `json:"HighMmioBaseInMB,omitempty"` - - // HighMmioGapInMB is the high MMIO region. - // - // TODO: This is pre-release support in schema 2.3. Need to add build number - // docs when a public build with this is out. - HighMMIOGapInMB uint64 `json:"HighMmioGapInMB,omitempty"` -} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/memory_information_for_vm.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/memory_information_for_vm.go deleted file mode 100644 index 811779b0..00000000 --- a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/memory_information_for_vm.go +++ /dev/null @@ -1,18 +0,0 @@ -/* - * HCS API - * - * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) - * - * API version: 2.1 - * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) - */ - -package hcsschema - -type MemoryInformationForVm struct { - VirtualNodeCount uint32 `json:"VirtualNodeCount,omitempty"` - - VirtualMachineMemory *VmMemory `json:"VirtualMachineMemory,omitempty"` - - VirtualNodes []VirtualNodeInfo `json:"VirtualNodes,omitempty"` -} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/memory_stats.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/memory_stats.go deleted file mode 100644 index 906ba597..00000000 --- a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/memory_stats.go +++ /dev/null @@ -1,19 +0,0 @@ -/* - * HCS API - * - * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) - * - * API version: 2.1 - * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) - */ - -package hcsschema - -// Memory runtime statistics -type MemoryStats struct { - MemoryUsageCommitBytes uint64 `json:"MemoryUsageCommitBytes,omitempty"` - - MemoryUsageCommitPeakBytes uint64 `json:"MemoryUsageCommitPeakBytes,omitempty"` - - MemoryUsagePrivateWorkingSetBytes uint64 `json:"MemoryUsagePrivateWorkingSetBytes,omitempty"` -} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/model_container_definition_device.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/model_container_definition_device.go deleted file mode 100644 index 8dbe40b3..00000000 --- a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/model_container_definition_device.go +++ /dev/null @@ -1,14 +0,0 @@ -/* - * HCS API - * - * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) - * - * API version: 2.4 - * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) - */ - -package hcsschema - -type ContainerDefinitionDevice struct { - DeviceExtension []DeviceExtension `json:"device_extension,omitempty"` -} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/model_device_category.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/model_device_category.go deleted file mode 100644 index 8fe89f92..00000000 --- a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/model_device_category.go +++ /dev/null @@ -1,15 +0,0 @@ -/* - * HCS API - * - * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) - * - * API version: 2.4 - * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) - */ - -package hcsschema - -type DeviceCategory struct { - Name string `json:"name,omitempty"` - InterfaceClass []InterfaceClass `json:"interface_class,omitempty"` -} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/model_device_extension.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/model_device_extension.go deleted file mode 100644 index a62568d8..00000000 --- a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/model_device_extension.go +++ /dev/null @@ -1,15 +0,0 @@ -/* - * HCS API - * - * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) - * - * API version: 2.4 - * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) - */ - -package hcsschema - -type DeviceExtension struct { - DeviceCategory *DeviceCategory `json:"device_category,omitempty"` - Namespace *DeviceExtensionNamespace `json:"namespace,omitempty"` -} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/model_device_instance.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/model_device_instance.go deleted file mode 100644 index a7410feb..00000000 --- a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/model_device_instance.go +++ /dev/null @@ -1,17 +0,0 @@ -/* - * HCS API - * - * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) - * - * API version: 2.4 - * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) - */ - -package hcsschema - -type DeviceInstance struct { - Id string `json:"id,omitempty"` - LocationPath string `json:"location_path,omitempty"` - PortName string `json:"port_name,omitempty"` - InterfaceClass []InterfaceClass `json:"interface_class,omitempty"` -} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/model_device_namespace.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/model_device_namespace.go deleted file mode 100644 index 35536406..00000000 --- a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/model_device_namespace.go +++ /dev/null @@ -1,16 +0,0 @@ -/* - * HCS API - * - * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) - * - * API version: 2.4 - * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) - */ - -package hcsschema - -type DeviceNamespace struct { - RequiresDriverstore bool `json:"requires_driverstore,omitempty"` - DeviceCategory []DeviceCategory `json:"device_category,omitempty"` - DeviceInstance []DeviceInstance `json:"device_instance,omitempty"` -} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/model_interface_class.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/model_interface_class.go deleted file mode 100644 index 7be98b54..00000000 --- a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/model_interface_class.go +++ /dev/null @@ -1,16 +0,0 @@ -/* - * HCS API - * - * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) - * - * API version: 2.4 - * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) - */ - -package hcsschema - -type InterfaceClass struct { - Type_ string `json:"type,omitempty"` - Identifier string `json:"identifier,omitempty"` - Recurse bool `json:"recurse,omitempty"` -} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/model_namespace.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/model_namespace.go deleted file mode 100644 index 3ab9cf1e..00000000 --- a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/model_namespace.go +++ /dev/null @@ -1,15 +0,0 @@ -/* - * HCS API - * - * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) - * - * API version: 2.4 - * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) - */ - -package hcsschema - -type DeviceExtensionNamespace struct { - Ob *ObjectNamespace `json:"ob,omitempty"` - Device *DeviceNamespace `json:"device,omitempty"` -} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/model_object_directory.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/model_object_directory.go deleted file mode 100644 index d2f51b3b..00000000 --- a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/model_object_directory.go +++ /dev/null @@ -1,18 +0,0 @@ -/* - * HCS API - * - * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) - * - * API version: 2.4 - * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) - */ - -package hcsschema - -type ObjectDirectory struct { - Name string `json:"name,omitempty"` - Clonesd string `json:"clonesd,omitempty"` - Shadow string `json:"shadow,omitempty"` - Symlink []ObjectSymlink `json:"symlink,omitempty"` - Objdir []ObjectDirectory `json:"objdir,omitempty"` -} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/model_object_namespace.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/model_object_namespace.go deleted file mode 100644 index 47dfb55b..00000000 --- a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/model_object_namespace.go +++ /dev/null @@ -1,16 +0,0 @@ -/* - * HCS API - * - * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) - * - * API version: 2.4 - * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) - */ - -package hcsschema - -type ObjectNamespace struct { - Shadow string `json:"shadow,omitempty"` - Symlink []ObjectSymlink `json:"symlink,omitempty"` - Objdir []ObjectDirectory `json:"objdir,omitempty"` -} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/model_object_symlink.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/model_object_symlink.go deleted file mode 100644 index 8867ebe5..00000000 --- a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/model_object_symlink.go +++ /dev/null @@ -1,18 +0,0 @@ -/* - * HCS API - * - * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) - * - * API version: 2.4 - * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) - */ - -package hcsschema - -type ObjectSymlink struct { - Name string `json:"name,omitempty"` - Path string `json:"path,omitempty"` - Scope string `json:"scope,omitempty"` - Pathtoclone string `json:"pathtoclone,omitempty"` - AccessMask int32 `json:"access_mask,omitempty"` -} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/modification_request.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/modification_request.go deleted file mode 100644 index 1384ed88..00000000 --- a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/modification_request.go +++ /dev/null @@ -1,15 +0,0 @@ -/* - * HCS API - * - * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) - * - * API version: 2.4 - * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) - */ - -package hcsschema - -type ModificationRequest struct { - PropertyType PropertyType `json:"PropertyType,omitempty"` - Settings interface{} `json:"Settings,omitempty"` -} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/modify_setting_request.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/modify_setting_request.go deleted file mode 100644 index d29455a3..00000000 --- a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/modify_setting_request.go +++ /dev/null @@ -1,20 +0,0 @@ -/* - * HCS API - * - * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) - * - * API version: 2.1 - * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) - */ - -package hcsschema - -type ModifySettingRequest struct { - ResourcePath string `json:"ResourcePath,omitempty"` - - RequestType string `json:"RequestType,omitempty"` - - Settings interface{} `json:"Settings,omitempty"` // NOTE: Swagger generated as *interface{}. Locally updated - - GuestRequest interface{} `json:"GuestRequest,omitempty"` // NOTE: Swagger generated as *interface{}. Locally updated -} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/mouse.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/mouse.go deleted file mode 100644 index ccf8b938..00000000 --- a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/mouse.go +++ /dev/null @@ -1,13 +0,0 @@ -/* - * HCS API - * - * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) - * - * API version: 2.1 - * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) - */ - -package hcsschema - -type Mouse struct { -} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/network_adapter.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/network_adapter.go deleted file mode 100644 index 7408abd3..00000000 --- a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/network_adapter.go +++ /dev/null @@ -1,17 +0,0 @@ -/* - * HCS API - * - * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) - * - * API version: 2.1 - * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) - */ - -package hcsschema - -type NetworkAdapter struct { - EndpointId string `json:"EndpointId,omitempty"` - MacAddress string `json:"MacAddress,omitempty"` - // The I/O virtualization (IOV) offloading configuration. - IovSettings *IovSettings `json:"IovSettings,omitempty"` -} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/networking.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/networking.go deleted file mode 100644 index e5ea187a..00000000 --- a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/networking.go +++ /dev/null @@ -1,23 +0,0 @@ -/* - * HCS API - * - * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) - * - * API version: 2.1 - * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) - */ - -package hcsschema - -type Networking struct { - AllowUnqualifiedDnsQuery bool `json:"AllowUnqualifiedDnsQuery,omitempty"` - - DnsSearchList string `json:"DnsSearchList,omitempty"` - - NetworkSharedContainerName string `json:"NetworkSharedContainerName,omitempty"` - - // Guid in windows; string in linux - Namespace string `json:"Namespace,omitempty"` - - NetworkAdapters []string `json:"NetworkAdapters,omitempty"` -} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/pause_notification.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/pause_notification.go deleted file mode 100644 index d96c9501..00000000 --- a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/pause_notification.go +++ /dev/null @@ -1,15 +0,0 @@ -/* - * HCS API - * - * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) - * - * API version: 2.1 - * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) - */ - -package hcsschema - -// Notification data that is indicated to components running in the Virtual Machine. -type PauseNotification struct { - Reason string `json:"Reason,omitempty"` -} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/pause_options.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/pause_options.go deleted file mode 100644 index 21707a88..00000000 --- a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/pause_options.go +++ /dev/null @@ -1,17 +0,0 @@ -/* - * HCS API - * - * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) - * - * API version: 2.1 - * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) - */ - -package hcsschema - -// Options for HcsPauseComputeSystem -type PauseOptions struct { - SuspensionLevel string `json:"SuspensionLevel,omitempty"` - - HostedNotification *PauseNotification `json:"HostedNotification,omitempty"` -} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/plan9.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/plan9.go deleted file mode 100644 index 29d8c801..00000000 --- a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/plan9.go +++ /dev/null @@ -1,14 +0,0 @@ -/* - * HCS API - * - * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) - * - * API version: 2.1 - * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) - */ - -package hcsschema - -type Plan9 struct { - Shares []Plan9Share `json:"Shares,omitempty"` -} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/plan9_share.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/plan9_share.go deleted file mode 100644 index 41f8fdea..00000000 --- a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/plan9_share.go +++ /dev/null @@ -1,34 +0,0 @@ -/* - * HCS API - * - * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) - * - * API version: 2.1 - * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) - */ - -package hcsschema - -type Plan9Share struct { - Name string `json:"Name,omitempty"` - - // The name by which the guest operation system can access this share, via the aname parameter in the Plan9 protocol. - AccessName string `json:"AccessName,omitempty"` - - Path string `json:"Path,omitempty"` - - Port int32 `json:"Port,omitempty"` - - // Flags are marked private. Until they are exported correctly - // - // ReadOnly 0x00000001 - // LinuxMetadata 0x00000004 - // CaseSensitive 0x00000008 - Flags int32 `json:"Flags,omitempty"` - - ReadOnly bool `json:"ReadOnly,omitempty"` - - UseShareRootIdentity bool `json:"UseShareRootIdentity,omitempty"` - - AllowedFiles []string `json:"AllowedFiles,omitempty"` -} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/process_details.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/process_details.go deleted file mode 100644 index e9a662dd..00000000 --- a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/process_details.go +++ /dev/null @@ -1,33 +0,0 @@ -/* - * HCS API - * - * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) - * - * API version: 2.1 - * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) - */ - -package hcsschema - -import ( - "time" -) - -// Information about a process running in a container -type ProcessDetails struct { - ProcessId int32 `json:"ProcessId,omitempty"` - - ImageName string `json:"ImageName,omitempty"` - - CreateTimestamp time.Time `json:"CreateTimestamp,omitempty"` - - UserTime100ns int32 `json:"UserTime100ns,omitempty"` - - KernelTime100ns int32 `json:"KernelTime100ns,omitempty"` - - MemoryCommitBytes int32 `json:"MemoryCommitBytes,omitempty"` - - MemoryWorkingSetPrivateBytes int32 `json:"MemoryWorkingSetPrivateBytes,omitempty"` - - MemoryWorkingSetSharedBytes int32 `json:"MemoryWorkingSetSharedBytes,omitempty"` -} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/process_modify_request.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/process_modify_request.go deleted file mode 100644 index e4ed095c..00000000 --- a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/process_modify_request.go +++ /dev/null @@ -1,19 +0,0 @@ -/* - * HCS API - * - * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) - * - * API version: 2.1 - * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) - */ - -package hcsschema - -// Passed to HcsRpc_ModifyProcess -type ProcessModifyRequest struct { - Operation string `json:"Operation,omitempty"` - - ConsoleSize *ConsoleSize `json:"ConsoleSize,omitempty"` - - CloseHandle *CloseHandle `json:"CloseHandle,omitempty"` -} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/process_parameters.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/process_parameters.go deleted file mode 100644 index 82b0d053..00000000 --- a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/process_parameters.go +++ /dev/null @@ -1,46 +0,0 @@ -/* - * HCS API - * - * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) - * - * API version: 2.1 - * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) - */ - -package hcsschema - -type ProcessParameters struct { - ApplicationName string `json:"ApplicationName,omitempty"` - - CommandLine string `json:"CommandLine,omitempty"` - - // optional alternative to CommandLine, currently only supported by Linux GCS - CommandArgs []string `json:"CommandArgs,omitempty"` - - User string `json:"User,omitempty"` - - WorkingDirectory string `json:"WorkingDirectory,omitempty"` - - Environment map[string]string `json:"Environment,omitempty"` - - // if set, will run as low-privilege process - RestrictedToken bool `json:"RestrictedToken,omitempty"` - - // if set, ignore StdErrPipe - EmulateConsole bool `json:"EmulateConsole,omitempty"` - - CreateStdInPipe bool `json:"CreateStdInPipe,omitempty"` - - CreateStdOutPipe bool `json:"CreateStdOutPipe,omitempty"` - - CreateStdErrPipe bool `json:"CreateStdErrPipe,omitempty"` - - // height then width - ConsoleSize []int32 `json:"ConsoleSize,omitempty"` - - // if set, find an existing session for the user and create the process in it - UseExistingLogin bool `json:"UseExistingLogin,omitempty"` - - // if set, use the legacy console instead of conhost - UseLegacyConsole bool `json:"UseLegacyConsole,omitempty"` -} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/process_status.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/process_status.go deleted file mode 100644 index ad9a4fa9..00000000 --- a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/process_status.go +++ /dev/null @@ -1,21 +0,0 @@ -/* - * HCS API - * - * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) - * - * API version: 2.1 - * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) - */ - -package hcsschema - -// Status of a process running in a container -type ProcessStatus struct { - ProcessId int32 `json:"ProcessId,omitempty"` - - Exited bool `json:"Exited,omitempty"` - - ExitCode int32 `json:"ExitCode,omitempty"` - - LastWaitResult int32 `json:"LastWaitResult,omitempty"` -} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/processor.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/processor.go deleted file mode 100644 index bb24e88d..00000000 --- a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/processor.go +++ /dev/null @@ -1,18 +0,0 @@ -/* - * HCS API - * - * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) - * - * API version: 2.1 - * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) - */ - -package hcsschema - -type Processor struct { - Count int32 `json:"Count,omitempty"` - - Maximum int32 `json:"Maximum,omitempty"` - - Weight int32 `json:"Weight,omitempty"` -} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/processor_2.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/processor_2.go deleted file mode 100644 index c64f335e..00000000 --- a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/processor_2.go +++ /dev/null @@ -1,23 +0,0 @@ -/* - * HCS API - * - * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) - * - * API version: 2.5 - * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) - */ - -package hcsschema - -type Processor2 struct { - Count int32 `json:"Count,omitempty"` - - Limit int32 `json:"Limit,omitempty"` - - Weight int32 `json:"Weight,omitempty"` - - ExposeVirtualizationExtensions bool `json:"ExposeVirtualizationExtensions,omitempty"` - - // An optional object that configures the CPU Group to which a Virtual Machine is going to bind to. - CpuGroup *CpuGroup `json:"CpuGroup,omitempty"` -} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/processor_stats.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/processor_stats.go deleted file mode 100644 index 6157e252..00000000 --- a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/processor_stats.go +++ /dev/null @@ -1,19 +0,0 @@ -/* - * HCS API - * - * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) - * - * API version: 2.1 - * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) - */ - -package hcsschema - -// CPU runtime statistics -type ProcessorStats struct { - TotalRuntime100ns uint64 `json:"TotalRuntime100ns,omitempty"` - - RuntimeUser100ns uint64 `json:"RuntimeUser100ns,omitempty"` - - RuntimeKernel100ns uint64 `json:"RuntimeKernel100ns,omitempty"` -} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/processor_topology.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/processor_topology.go deleted file mode 100644 index 885156e7..00000000 --- a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/processor_topology.go +++ /dev/null @@ -1,15 +0,0 @@ -/* - * HCS API - * - * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) - * - * API version: 2.4 - * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) - */ - -package hcsschema - -type ProcessorTopology struct { - LogicalProcessorCount uint32 `json:"LogicalProcessorCount,omitempty"` - LogicalProcessors []LogicalProcessor `json:"LogicalProcessors,omitempty"` -} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/properties.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/properties.go deleted file mode 100644 index 17558cba..00000000 --- a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/properties.go +++ /dev/null @@ -1,54 +0,0 @@ -/* - * HCS API - * - * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) - * - * API version: 2.1 - * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) - */ - -package hcsschema - -import ( - v1 "github.com/containerd/cgroups/stats/v1" -) - -type Properties struct { - Id string `json:"Id,omitempty"` - - SystemType string `json:"SystemType,omitempty"` - - RuntimeOsType string `json:"RuntimeOsType,omitempty"` - - Name string `json:"Name,omitempty"` - - Owner string `json:"Owner,omitempty"` - - RuntimeId string `json:"RuntimeId,omitempty"` - - RuntimeTemplateId string `json:"RuntimeTemplateId,omitempty"` - - State string `json:"State,omitempty"` - - Stopped bool `json:"Stopped,omitempty"` - - ExitType string `json:"ExitType,omitempty"` - - Memory *MemoryInformationForVm `json:"Memory,omitempty"` - - Statistics *Statistics `json:"Statistics,omitempty"` - - ProcessList []ProcessDetails `json:"ProcessList,omitempty"` - - TerminateOnLastHandleClosed bool `json:"TerminateOnLastHandleClosed,omitempty"` - - HostingSystemId string `json:"HostingSystemId,omitempty"` - - SharedMemoryRegionInfo []SharedMemoryRegionInfo `json:"SharedMemoryRegionInfo,omitempty"` - - GuestConnectionInfo *GuestConnectionInfo `json:"GuestConnectionInfo,omitempty"` - - // Metrics is not part of the API for HCS but this is used for LCOW v2 to - // return the full cgroup metrics from the guest. - Metrics *v1.Metrics `json:"LCOWMetrics,omitempty"` -} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/property_query.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/property_query.go deleted file mode 100644 index d6d80df1..00000000 --- a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/property_query.go +++ /dev/null @@ -1,15 +0,0 @@ -/* - * HCS API - * - * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) - * - * API version: 2.1 - * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) - */ - -package hcsschema - -// By default the basic properties will be returned. This query provides a way to request specific properties. -type PropertyQuery struct { - PropertyTypes []PropertyType `json:"PropertyTypes,omitempty"` -} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/property_type.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/property_type.go deleted file mode 100644 index 98f2c96e..00000000 --- a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/property_type.go +++ /dev/null @@ -1,26 +0,0 @@ -/* - * HCS API - * - * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) - * - * API version: 2.1 - * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) - */ - -package hcsschema - -type PropertyType string - -const ( - PTMemory PropertyType = "Memory" - PTGuestMemory PropertyType = "GuestMemory" - PTStatistics PropertyType = "Statistics" - PTProcessList PropertyType = "ProcessList" - PTTerminateOnLastHandleClosed PropertyType = "TerminateOnLastHandleClosed" - PTSharedMemoryRegion PropertyType = "SharedMemoryRegion" - PTContainerCredentialGuard PropertyType = "ContainerCredentialGuard" // This field is not generated by swagger. This was added manually. - PTGuestConnection PropertyType = "GuestConnection" - PTICHeartbeatStatus PropertyType = "ICHeartbeatStatus" - PTProcessorTopology PropertyType = "ProcessorTopology" - PTCPUGroup PropertyType = "CpuGroup" -) diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/rdp_connection_options.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/rdp_connection_options.go deleted file mode 100644 index 8d5f5c17..00000000 --- a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/rdp_connection_options.go +++ /dev/null @@ -1,16 +0,0 @@ -/* - * HCS API - * - * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) - * - * API version: 2.1 - * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) - */ - -package hcsschema - -type RdpConnectionOptions struct { - AccessSids []string `json:"AccessSids,omitempty"` - - NamedPipe string `json:"NamedPipe,omitempty"` -} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/registry_changes.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/registry_changes.go deleted file mode 100644 index 006906f6..00000000 --- a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/registry_changes.go +++ /dev/null @@ -1,16 +0,0 @@ -/* - * HCS API - * - * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) - * - * API version: 2.1 - * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) - */ - -package hcsschema - -type RegistryChanges struct { - AddValues []RegistryValue `json:"AddValues,omitempty"` - - DeleteKeys []RegistryKey `json:"DeleteKeys,omitempty"` -} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/registry_key.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/registry_key.go deleted file mode 100644 index 26fde99c..00000000 --- a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/registry_key.go +++ /dev/null @@ -1,18 +0,0 @@ -/* - * HCS API - * - * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) - * - * API version: 2.1 - * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) - */ - -package hcsschema - -type RegistryKey struct { - Hive string `json:"Hive,omitempty"` - - Name string `json:"Name,omitempty"` - - Volatile bool `json:"Volatile,omitempty"` -} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/registry_value.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/registry_value.go deleted file mode 100644 index 3f203176..00000000 --- a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/registry_value.go +++ /dev/null @@ -1,30 +0,0 @@ -/* - * HCS API - * - * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) - * - * API version: 2.1 - * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) - */ - -package hcsschema - -type RegistryValue struct { - Key *RegistryKey `json:"Key,omitempty"` - - Name string `json:"Name,omitempty"` - - Type_ string `json:"Type,omitempty"` - - // One and only one value type must be set. - StringValue string `json:"StringValue,omitempty"` - - BinaryValue string `json:"BinaryValue,omitempty"` - - DWordValue int32 `json:"DWordValue,omitempty"` - - QWordValue int32 `json:"QWordValue,omitempty"` - - // Only used if RegistryValueType is CustomType The data is in BinaryValue - CustomType int32 `json:"CustomType,omitempty"` -} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/restore_state.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/restore_state.go deleted file mode 100644 index 778ff587..00000000 --- a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/restore_state.go +++ /dev/null @@ -1,19 +0,0 @@ -/* - * HCS API - * - * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) - * - * API version: 2.1 - * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) - */ - -package hcsschema - -type RestoreState struct { - - // The path to the save state file to restore the system from. - SaveStateFilePath string `json:"SaveStateFilePath,omitempty"` - - // The ID of the template system to clone this new system off of. An empty string indicates the system should not be cloned from a template. - TemplateSystemId string `json:"TemplateSystemId,omitempty"` -} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/save_options.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/save_options.go deleted file mode 100644 index e55fa1d9..00000000 --- a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/save_options.go +++ /dev/null @@ -1,19 +0,0 @@ -/* - * HCS API - * - * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) - * - * API version: 2.1 - * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) - */ - -package hcsschema - -type SaveOptions struct { - - // The type of save operation to be performed. - SaveType string `json:"SaveType,omitempty"` - - // The path to the file that will container the saved state. - SaveStateFilePath string `json:"SaveStateFilePath,omitempty"` -} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/scsi.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/scsi.go deleted file mode 100644 index bf253a47..00000000 --- a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/scsi.go +++ /dev/null @@ -1,16 +0,0 @@ -/* - * HCS API - * - * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) - * - * API version: 2.1 - * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) - */ - -package hcsschema - -type Scsi struct { - - // Map of attachments, where the key is the integer LUN number on the controller. - Attachments map[string]Attachment `json:"Attachments,omitempty"` -} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/service_properties.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/service_properties.go deleted file mode 100644 index b8142ca6..00000000 --- a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/service_properties.go +++ /dev/null @@ -1,18 +0,0 @@ -/* - * HCS API - * - * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) - * - * API version: 2.4 - * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) - */ - -package hcsschema - -import "encoding/json" - -type ServiceProperties struct { - // Changed Properties field to []json.RawMessage from []interface{} to avoid having to - // remarshal sp.Properties[n] and unmarshal into the type(s) we want. - Properties []json.RawMessage `json:"Properties,omitempty"` -} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/shared_memory_configuration.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/shared_memory_configuration.go deleted file mode 100644 index df9baa92..00000000 --- a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/shared_memory_configuration.go +++ /dev/null @@ -1,14 +0,0 @@ -/* - * HCS API - * - * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) - * - * API version: 2.1 - * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) - */ - -package hcsschema - -type SharedMemoryConfiguration struct { - Regions []SharedMemoryRegion `json:"Regions,omitempty"` -} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/shared_memory_region.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/shared_memory_region.go deleted file mode 100644 index 825b7186..00000000 --- a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/shared_memory_region.go +++ /dev/null @@ -1,22 +0,0 @@ -/* - * HCS API - * - * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) - * - * API version: 2.1 - * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) - */ - -package hcsschema - -type SharedMemoryRegion struct { - SectionName string `json:"SectionName,omitempty"` - - StartOffset int32 `json:"StartOffset,omitempty"` - - Length int32 `json:"Length,omitempty"` - - AllowGuestWrite bool `json:"AllowGuestWrite,omitempty"` - - HiddenFromGuest bool `json:"HiddenFromGuest,omitempty"` -} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/shared_memory_region_info.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/shared_memory_region_info.go deleted file mode 100644 index f67b08eb..00000000 --- a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/shared_memory_region_info.go +++ /dev/null @@ -1,16 +0,0 @@ -/* - * HCS API - * - * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) - * - * API version: 2.1 - * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) - */ - -package hcsschema - -type SharedMemoryRegionInfo struct { - SectionName string `json:"SectionName,omitempty"` - - GuestPhysicalAddress int32 `json:"GuestPhysicalAddress,omitempty"` -} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/silo_properties.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/silo_properties.go deleted file mode 100644 index 5eaf6a7f..00000000 --- a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/silo_properties.go +++ /dev/null @@ -1,17 +0,0 @@ -/* - * HCS API - * - * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) - * - * API version: 2.1 - * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) - */ - -package hcsschema - -// Silo job information -type SiloProperties struct { - Enabled bool `json:"Enabled,omitempty"` - - JobName string `json:"JobName,omitempty"` -} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/statistics.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/statistics.go deleted file mode 100644 index ba7a6b39..00000000 --- a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/statistics.go +++ /dev/null @@ -1,29 +0,0 @@ -/* - * HCS API - * - * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) - * - * API version: 2.1 - * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) - */ - -package hcsschema - -import ( - "time" -) - -// Runtime statistics for a container -type Statistics struct { - Timestamp time.Time `json:"Timestamp,omitempty"` - - ContainerStartTime time.Time `json:"ContainerStartTime,omitempty"` - - Uptime100ns uint64 `json:"Uptime100ns,omitempty"` - - Processor *ProcessorStats `json:"Processor,omitempty"` - - Memory *MemoryStats `json:"Memory,omitempty"` - - Storage *StorageStats `json:"Storage,omitempty"` -} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/storage.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/storage.go deleted file mode 100644 index 2627af91..00000000 --- a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/storage.go +++ /dev/null @@ -1,21 +0,0 @@ -/* - * HCS API - * - * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) - * - * API version: 2.1 - * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) - */ - -package hcsschema - -type Storage struct { - - // List of layers that describe the parent hierarchy for a container's storage. These layers combined together, presented as a disposable and/or committable working storage, are used by the container to record all changes done to the parent layers. - Layers []Layer `json:"Layers,omitempty"` - - // Path that points to the scratch space of a container, where parent layers are combined together to present a new disposable and/or committable layer with the changes done during its runtime. - Path string `json:"Path,omitempty"` - - QoS *StorageQoS `json:"QoS,omitempty"` -} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/storage_qo_s.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/storage_qo_s.go deleted file mode 100644 index 9c5e6eb5..00000000 --- a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/storage_qo_s.go +++ /dev/null @@ -1,16 +0,0 @@ -/* - * HCS API - * - * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) - * - * API version: 2.1 - * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) - */ - -package hcsschema - -type StorageQoS struct { - IopsMaximum int32 `json:"IopsMaximum,omitempty"` - - BandwidthMaximum int32 `json:"BandwidthMaximum,omitempty"` -} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/storage_stats.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/storage_stats.go deleted file mode 100644 index 4f042ffd..00000000 --- a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/storage_stats.go +++ /dev/null @@ -1,21 +0,0 @@ -/* - * HCS API - * - * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) - * - * API version: 2.1 - * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) - */ - -package hcsschema - -// Storage runtime statistics -type StorageStats struct { - ReadCountNormalized uint64 `json:"ReadCountNormalized,omitempty"` - - ReadSizeBytes uint64 `json:"ReadSizeBytes,omitempty"` - - WriteCountNormalized uint64 `json:"WriteCountNormalized,omitempty"` - - WriteSizeBytes uint64 `json:"WriteSizeBytes,omitempty"` -} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/topology.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/topology.go deleted file mode 100644 index 83486994..00000000 --- a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/topology.go +++ /dev/null @@ -1,16 +0,0 @@ -/* - * HCS API - * - * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) - * - * API version: 2.1 - * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) - */ - -package hcsschema - -type Topology struct { - Memory *Memory2 `json:"Memory,omitempty"` - - Processor *Processor2 `json:"Processor,omitempty"` -} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/uefi.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/uefi.go deleted file mode 100644 index 0e48ece5..00000000 --- a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/uefi.go +++ /dev/null @@ -1,20 +0,0 @@ -/* - * HCS API - * - * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) - * - * API version: 2.1 - * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) - */ - -package hcsschema - -type Uefi struct { - EnableDebugger bool `json:"EnableDebugger,omitempty"` - - SecureBootTemplateId string `json:"SecureBootTemplateId,omitempty"` - - BootThis *UefiBootEntry `json:"BootThis,omitempty"` - - Console string `json:"Console,omitempty"` -} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/uefi_boot_entry.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/uefi_boot_entry.go deleted file mode 100644 index 3ab409d8..00000000 --- a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/uefi_boot_entry.go +++ /dev/null @@ -1,22 +0,0 @@ -/* - * HCS API - * - * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) - * - * API version: 2.1 - * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) - */ - -package hcsschema - -type UefiBootEntry struct { - DeviceType string `json:"DeviceType,omitempty"` - - DevicePath string `json:"DevicePath,omitempty"` - - DiskNumber int32 `json:"DiskNumber,omitempty"` - - OptionalData string `json:"OptionalData,omitempty"` - - VmbFsRootPath string `json:"VmbFsRootPath,omitempty"` -} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/version.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/version.go deleted file mode 100644 index 2abfccca..00000000 --- a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/version.go +++ /dev/null @@ -1,16 +0,0 @@ -/* - * HCS API - * - * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) - * - * API version: 2.1 - * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) - */ - -package hcsschema - -type Version struct { - Major int32 `json:"Major,omitempty"` - - Minor int32 `json:"Minor,omitempty"` -} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/video_monitor.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/video_monitor.go deleted file mode 100644 index ec5d0fb9..00000000 --- a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/video_monitor.go +++ /dev/null @@ -1,18 +0,0 @@ -/* - * HCS API - * - * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) - * - * API version: 2.1 - * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) - */ - -package hcsschema - -type VideoMonitor struct { - HorizontalResolution int32 `json:"HorizontalResolution,omitempty"` - - VerticalResolution int32 `json:"VerticalResolution,omitempty"` - - ConnectionOptions *RdpConnectionOptions `json:"ConnectionOptions,omitempty"` -} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/virtual_machine.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/virtual_machine.go deleted file mode 100644 index 2d22b1bc..00000000 --- a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/virtual_machine.go +++ /dev/null @@ -1,32 +0,0 @@ -/* - * HCS API - * - * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) - * - * API version: 2.1 - * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) - */ - -package hcsschema - -type VirtualMachine struct { - - // StopOnReset is private in the schema. If regenerated need to put back. - StopOnReset bool `json:"StopOnReset,omitempty"` - - Chipset *Chipset `json:"Chipset,omitempty"` - - ComputeTopology *Topology `json:"ComputeTopology,omitempty"` - - Devices *Devices `json:"Devices,omitempty"` - - GuestState *GuestState `json:"GuestState,omitempty"` - - RestoreState *RestoreState `json:"RestoreState,omitempty"` - - RegistryChanges *RegistryChanges `json:"RegistryChanges,omitempty"` - - StorageQoS *StorageQoS `json:"StorageQoS,omitempty"` - - GuestConnection *GuestConnection `json:"GuestConnection,omitempty"` -} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/virtual_node_info.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/virtual_node_info.go deleted file mode 100644 index 91a3c83d..00000000 --- a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/virtual_node_info.go +++ /dev/null @@ -1,20 +0,0 @@ -/* - * HCS API - * - * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) - * - * API version: 2.1 - * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) - */ - -package hcsschema - -type VirtualNodeInfo struct { - VirtualNodeIndex int32 `json:"VirtualNodeIndex,omitempty"` - - PhysicalNodeNumber int32 `json:"PhysicalNodeNumber,omitempty"` - - VirtualProcessorCount int32 `json:"VirtualProcessorCount,omitempty"` - - MemoryUsageInPages int32 `json:"MemoryUsageInPages,omitempty"` -} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/virtual_p_mem_controller.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/virtual_p_mem_controller.go deleted file mode 100644 index f5b7f3e3..00000000 --- a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/virtual_p_mem_controller.go +++ /dev/null @@ -1,20 +0,0 @@ -/* - * HCS API - * - * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) - * - * API version: 2.1 - * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) - */ - -package hcsschema - -type VirtualPMemController struct { - Devices map[string]VirtualPMemDevice `json:"Devices,omitempty"` - - MaximumCount uint32 `json:"MaximumCount,omitempty"` - - MaximumSizeBytes uint64 `json:"MaximumSizeBytes,omitempty"` - - Backing string `json:"Backing,omitempty"` -} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/virtual_p_mem_device.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/virtual_p_mem_device.go deleted file mode 100644 index 70cf2d90..00000000 --- a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/virtual_p_mem_device.go +++ /dev/null @@ -1,18 +0,0 @@ -/* - * HCS API - * - * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) - * - * API version: 2.1 - * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) - */ - -package hcsschema - -type VirtualPMemDevice struct { - HostPath string `json:"HostPath,omitempty"` - - ReadOnly bool `json:"ReadOnly,omitempty"` - - ImageFormat string `json:"ImageFormat,omitempty"` -} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/virtual_p_mem_mapping.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/virtual_p_mem_mapping.go deleted file mode 100644 index 9ef322f6..00000000 --- a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/virtual_p_mem_mapping.go +++ /dev/null @@ -1,15 +0,0 @@ -/* - * HCS API - * - * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) - * - * API version: 2.4 - * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) - */ - -package hcsschema - -type VirtualPMemMapping struct { - HostPath string `json:"HostPath,omitempty"` - ImageFormat string `json:"ImageFormat,omitempty"` -} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/virtual_pci_device.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/virtual_pci_device.go deleted file mode 100644 index f5e05903..00000000 --- a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/virtual_pci_device.go +++ /dev/null @@ -1,16 +0,0 @@ -/* - * HCS API - * - * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) - * - * API version: 2.3 - * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) - */ - -package hcsschema - -// TODO: This is pre-release support in schema 2.3. Need to add build number -// docs when a public build with this is out. -type VirtualPciDevice struct { - Functions []VirtualPciFunction `json:",omitempty"` -} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/virtual_pci_function.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/virtual_pci_function.go deleted file mode 100644 index cedb7d18..00000000 --- a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/virtual_pci_function.go +++ /dev/null @@ -1,18 +0,0 @@ -/* - * HCS API - * - * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) - * - * API version: 2.3 - * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) - */ - -package hcsschema - -// TODO: This is pre-release support in schema 2.3. Need to add build number -// docs when a public build with this is out. -type VirtualPciFunction struct { - DeviceInstancePath string `json:",omitempty"` - - VirtualFunction uint16 `json:",omitempty"` -} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/virtual_smb.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/virtual_smb.go deleted file mode 100644 index 362df363..00000000 --- a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/virtual_smb.go +++ /dev/null @@ -1,16 +0,0 @@ -/* - * HCS API - * - * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) - * - * API version: 2.1 - * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) - */ - -package hcsschema - -type VirtualSmb struct { - Shares []VirtualSmbShare `json:"Shares,omitempty"` - - DirectFileMappingInMB int64 `json:"DirectFileMappingInMB,omitempty"` -} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/virtual_smb_share.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/virtual_smb_share.go deleted file mode 100644 index 915e9b63..00000000 --- a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/virtual_smb_share.go +++ /dev/null @@ -1,20 +0,0 @@ -/* - * HCS API - * - * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) - * - * API version: 2.1 - * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) - */ - -package hcsschema - -type VirtualSmbShare struct { - Name string `json:"Name,omitempty"` - - Path string `json:"Path,omitempty"` - - AllowedFiles []string `json:"AllowedFiles,omitempty"` - - Options *VirtualSmbShareOptions `json:"Options,omitempty"` -} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/virtual_smb_share_options.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/virtual_smb_share_options.go deleted file mode 100644 index 75196bd8..00000000 --- a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/virtual_smb_share_options.go +++ /dev/null @@ -1,62 +0,0 @@ -/* - * HCS API - * - * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) - * - * API version: 2.1 - * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) - */ - -package hcsschema - -type VirtualSmbShareOptions struct { - ReadOnly bool `json:"ReadOnly,omitempty"` - - // convert exclusive access to shared read access - ShareRead bool `json:"ShareRead,omitempty"` - - // all opens will use cached I/O - CacheIo bool `json:"CacheIo,omitempty"` - - // disable oplock support - NoOplocks bool `json:"NoOplocks,omitempty"` - - // Acquire the backup privilege when attempting to open - TakeBackupPrivilege bool `json:"TakeBackupPrivilege,omitempty"` - - // Use the identity of the share root when opening - UseShareRootIdentity bool `json:"UseShareRootIdentity,omitempty"` - - // disable Direct Mapping - NoDirectmap bool `json:"NoDirectmap,omitempty"` - - // disable Byterange locks - NoLocks bool `json:"NoLocks,omitempty"` - - // disable Directory CHange Notifications - NoDirnotify bool `json:"NoDirnotify,omitempty"` - - // share is use for VM shared memory - VmSharedMemory bool `json:"VmSharedMemory,omitempty"` - - // allow access only to the files specified in AllowedFiles - RestrictFileAccess bool `json:"RestrictFileAccess,omitempty"` - - // disable all oplocks except Level II - ForceLevelIIOplocks bool `json:"ForceLevelIIOplocks,omitempty"` - - // Allow the host to reparse this base layer - ReparseBaseLayer bool `json:"ReparseBaseLayer,omitempty"` - - // Enable pseudo-oplocks - PseudoOplocks bool `json:"PseudoOplocks,omitempty"` - - // All opens will use non-cached IO - NonCacheIo bool `json:"NonCacheIo,omitempty"` - - // Enable pseudo directory change notifications - PseudoDirnotify bool `json:"PseudoDirnotify,omitempty"` - - // Block directory enumeration, renames, and deletes. - SingleFileMapping bool `json:"SingleFileMapping,omitempty"` -} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/vm_memory.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/vm_memory.go deleted file mode 100644 index 8e1836dd..00000000 --- a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/vm_memory.go +++ /dev/null @@ -1,26 +0,0 @@ -/* - * HCS API - * - * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) - * - * API version: 2.1 - * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) - */ - -package hcsschema - -type VmMemory struct { - AvailableMemory int32 `json:"AvailableMemory,omitempty"` - - AvailableMemoryBuffer int32 `json:"AvailableMemoryBuffer,omitempty"` - - ReservedMemory uint64 `json:"ReservedMemory,omitempty"` - - AssignedMemory uint64 `json:"AssignedMemory,omitempty"` - - SlpActive bool `json:"SlpActive,omitempty"` - - BalancingEnabled bool `json:"BalancingEnabled,omitempty"` - - DmOperationInProgress bool `json:"DmOperationInProgress,omitempty"` -} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/vm_processor_limits.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/vm_processor_limits.go deleted file mode 100644 index de1b9cf1..00000000 --- a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/vm_processor_limits.go +++ /dev/null @@ -1,22 +0,0 @@ -/* - * HCS API - * - * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) - * - * API version: 2.4 - * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) - */ - -package hcsschema - -// ProcessorLimits is used when modifying processor scheduling limits of a virtual machine. -type ProcessorLimits struct { - // Maximum amount of host CPU resources that the virtual machine can use. - Limit uint64 `json:"Limit,omitempty"` - // Value describing the relative priority of this virtual machine compared to other virtual machines. - Weight uint64 `json:"Weight,omitempty"` - // Minimum amount of host CPU resources that the virtual machine is guaranteed. - Reservation uint64 `json:"Reservation,omitempty"` - // Provides the target maximum CPU frequency, in MHz, for a virtual machine. - MaximumFrequencyMHz uint32 `json:"MaximumFrequencyMHz,omitempty"` -} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/windows_crash_reporting.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/windows_crash_reporting.go deleted file mode 100644 index 8ed7e566..00000000 --- a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/windows_crash_reporting.go +++ /dev/null @@ -1,16 +0,0 @@ -/* - * HCS API - * - * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) - * - * API version: 2.1 - * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) - */ - -package hcsschema - -type WindowsCrashReporting struct { - DumpFileName string `json:"DumpFileName,omitempty"` - - MaxDumpSize int64 `json:"MaxDumpSize,omitempty"` -} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/service.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/service.go deleted file mode 100644 index a634dfc1..00000000 --- a/vendor/github.com/Microsoft/hcsshim/internal/hcs/service.go +++ /dev/null @@ -1,49 +0,0 @@ -package hcs - -import ( - "context" - "encoding/json" - - hcsschema "github.com/Microsoft/hcsshim/internal/hcs/schema2" - "github.com/Microsoft/hcsshim/internal/vmcompute" -) - -// GetServiceProperties returns properties of the host compute service. -func GetServiceProperties(ctx context.Context, q hcsschema.PropertyQuery) (*hcsschema.ServiceProperties, error) { - operation := "hcs::GetServiceProperties" - - queryb, err := json.Marshal(q) - if err != nil { - return nil, err - } - propertiesJSON, resultJSON, err := vmcompute.HcsGetServiceProperties(ctx, string(queryb)) - events := processHcsResult(ctx, resultJSON) - if err != nil { - return nil, &HcsError{Op: operation, Err: err, Events: events} - } - - if propertiesJSON == "" { - return nil, ErrUnexpectedValue - } - properties := &hcsschema.ServiceProperties{} - if err := json.Unmarshal([]byte(propertiesJSON), properties); err != nil { - return nil, err - } - return properties, nil -} - -// ModifyServiceSettings modifies settings of the host compute service. -func ModifyServiceSettings(ctx context.Context, settings hcsschema.ModificationRequest) error { - operation := "hcs::ModifyServiceSettings" - - settingsJSON, err := json.Marshal(settings) - if err != nil { - return err - } - resultJSON, err := vmcompute.HcsModifyServiceSettings(ctx, string(settingsJSON)) - events := processHcsResult(ctx, resultJSON) - if err != nil { - return &HcsError{Op: operation, Err: err, Events: events} - } - return nil -} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/system.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/system.go deleted file mode 100644 index a76f6b25..00000000 --- a/vendor/github.com/Microsoft/hcsshim/internal/hcs/system.go +++ /dev/null @@ -1,815 +0,0 @@ -package hcs - -import ( - "context" - "encoding/json" - "errors" - "fmt" - "strings" - "sync" - "syscall" - "time" - - "github.com/Microsoft/hcsshim/internal/cow" - "github.com/Microsoft/hcsshim/internal/hcs/schema1" - hcsschema "github.com/Microsoft/hcsshim/internal/hcs/schema2" - "github.com/Microsoft/hcsshim/internal/jobobject" - "github.com/Microsoft/hcsshim/internal/log" - "github.com/Microsoft/hcsshim/internal/logfields" - "github.com/Microsoft/hcsshim/internal/oc" - "github.com/Microsoft/hcsshim/internal/timeout" - "github.com/Microsoft/hcsshim/internal/vmcompute" - "github.com/sirupsen/logrus" - "go.opencensus.io/trace" -) - -type System struct { - handleLock sync.RWMutex - handle vmcompute.HcsSystem - id string - callbackNumber uintptr - - closedWaitOnce sync.Once - waitBlock chan struct{} - waitError error - exitError error - os, typ, owner string - startTime time.Time -} - -func newSystem(id string) *System { - return &System{ - id: id, - waitBlock: make(chan struct{}), - } -} - -// Implementation detail for silo naming, this should NOT be relied upon very heavily. -func siloNameFmt(containerID string) string { - return fmt.Sprintf(`\Container_%s`, containerID) -} - -// CreateComputeSystem creates a new compute system with the given configuration but does not start it. -func CreateComputeSystem(ctx context.Context, id string, hcsDocumentInterface interface{}) (_ *System, err error) { - operation := "hcs::CreateComputeSystem" - - // hcsCreateComputeSystemContext is an async operation. Start the outer span - // here to measure the full create time. - ctx, span := trace.StartSpan(ctx, operation) - defer span.End() - defer func() { oc.SetSpanStatus(span, err) }() - span.AddAttributes(trace.StringAttribute("cid", id)) - - computeSystem := newSystem(id) - - hcsDocumentB, err := json.Marshal(hcsDocumentInterface) - if err != nil { - return nil, err - } - - hcsDocument := string(hcsDocumentB) - - var ( - identity syscall.Handle - resultJSON string - createError error - ) - computeSystem.handle, resultJSON, createError = vmcompute.HcsCreateComputeSystem(ctx, id, hcsDocument, identity) - if createError == nil || IsPending(createError) { - defer func() { - if err != nil { - computeSystem.Close() - } - }() - if err = computeSystem.registerCallback(ctx); err != nil { - // Terminate the compute system if it still exists. We're okay to - // ignore a failure here. - _ = computeSystem.Terminate(ctx) - return nil, makeSystemError(computeSystem, operation, err, nil) - } - } - - events, err := processAsyncHcsResult(ctx, createError, resultJSON, computeSystem.callbackNumber, hcsNotificationSystemCreateCompleted, &timeout.SystemCreate) - if err != nil { - if err == ErrTimeout { - // Terminate the compute system if it still exists. We're okay to - // ignore a failure here. - _ = computeSystem.Terminate(ctx) - } - return nil, makeSystemError(computeSystem, operation, err, events) - } - go computeSystem.waitBackground() - if err = computeSystem.getCachedProperties(ctx); err != nil { - return nil, err - } - return computeSystem, nil -} - -// OpenComputeSystem opens an existing compute system by ID. -func OpenComputeSystem(ctx context.Context, id string) (*System, error) { - operation := "hcs::OpenComputeSystem" - - computeSystem := newSystem(id) - handle, resultJSON, err := vmcompute.HcsOpenComputeSystem(ctx, id) - events := processHcsResult(ctx, resultJSON) - if err != nil { - return nil, makeSystemError(computeSystem, operation, err, events) - } - computeSystem.handle = handle - defer func() { - if err != nil { - computeSystem.Close() - } - }() - if err = computeSystem.registerCallback(ctx); err != nil { - return nil, makeSystemError(computeSystem, operation, err, nil) - } - go computeSystem.waitBackground() - if err = computeSystem.getCachedProperties(ctx); err != nil { - return nil, err - } - return computeSystem, nil -} - -func (computeSystem *System) getCachedProperties(ctx context.Context) error { - props, err := computeSystem.Properties(ctx) - if err != nil { - return err - } - computeSystem.typ = strings.ToLower(props.SystemType) - computeSystem.os = strings.ToLower(props.RuntimeOSType) - computeSystem.owner = strings.ToLower(props.Owner) - if computeSystem.os == "" && computeSystem.typ == "container" { - // Pre-RS5 HCS did not return the OS, but it only supported containers - // that ran Windows. - computeSystem.os = "windows" - } - return nil -} - -// OS returns the operating system of the compute system, "linux" or "windows". -func (computeSystem *System) OS() string { - return computeSystem.os -} - -// IsOCI returns whether processes in the compute system should be created via -// OCI. -func (computeSystem *System) IsOCI() bool { - return computeSystem.os == "linux" && computeSystem.typ == "container" -} - -// GetComputeSystems gets a list of the compute systems on the system that match the query -func GetComputeSystems(ctx context.Context, q schema1.ComputeSystemQuery) ([]schema1.ContainerProperties, error) { - operation := "hcs::GetComputeSystems" - - queryb, err := json.Marshal(q) - if err != nil { - return nil, err - } - - computeSystemsJSON, resultJSON, err := vmcompute.HcsEnumerateComputeSystems(ctx, string(queryb)) - events := processHcsResult(ctx, resultJSON) - if err != nil { - return nil, &HcsError{Op: operation, Err: err, Events: events} - } - - if computeSystemsJSON == "" { - return nil, ErrUnexpectedValue - } - computeSystems := []schema1.ContainerProperties{} - if err = json.Unmarshal([]byte(computeSystemsJSON), &computeSystems); err != nil { - return nil, err - } - - return computeSystems, nil -} - -// Start synchronously starts the computeSystem. -func (computeSystem *System) Start(ctx context.Context) (err error) { - operation := "hcs::System::Start" - - // hcsStartComputeSystemContext is an async operation. Start the outer span - // here to measure the full start time. - ctx, span := trace.StartSpan(ctx, operation) - defer span.End() - defer func() { oc.SetSpanStatus(span, err) }() - span.AddAttributes(trace.StringAttribute("cid", computeSystem.id)) - - computeSystem.handleLock.RLock() - defer computeSystem.handleLock.RUnlock() - - if computeSystem.handle == 0 { - return makeSystemError(computeSystem, operation, ErrAlreadyClosed, nil) - } - - resultJSON, err := vmcompute.HcsStartComputeSystem(ctx, computeSystem.handle, "") - events, err := processAsyncHcsResult(ctx, err, resultJSON, computeSystem.callbackNumber, hcsNotificationSystemStartCompleted, &timeout.SystemStart) - if err != nil { - return makeSystemError(computeSystem, operation, err, events) - } - computeSystem.startTime = time.Now() - return nil -} - -// ID returns the compute system's identifier. -func (computeSystem *System) ID() string { - return computeSystem.id -} - -// Shutdown requests a compute system shutdown. -func (computeSystem *System) Shutdown(ctx context.Context) error { - computeSystem.handleLock.RLock() - defer computeSystem.handleLock.RUnlock() - - operation := "hcs::System::Shutdown" - - if computeSystem.handle == 0 { - return nil - } - - resultJSON, err := vmcompute.HcsShutdownComputeSystem(ctx, computeSystem.handle, "") - events := processHcsResult(ctx, resultJSON) - switch err { - case nil, ErrVmcomputeAlreadyStopped, ErrComputeSystemDoesNotExist, ErrVmcomputeOperationPending: - default: - return makeSystemError(computeSystem, operation, err, events) - } - return nil -} - -// Terminate requests a compute system terminate. -func (computeSystem *System) Terminate(ctx context.Context) error { - computeSystem.handleLock.RLock() - defer computeSystem.handleLock.RUnlock() - - operation := "hcs::System::Terminate" - - if computeSystem.handle == 0 { - return nil - } - - resultJSON, err := vmcompute.HcsTerminateComputeSystem(ctx, computeSystem.handle, "") - events := processHcsResult(ctx, resultJSON) - switch err { - case nil, ErrVmcomputeAlreadyStopped, ErrComputeSystemDoesNotExist, ErrVmcomputeOperationPending: - default: - return makeSystemError(computeSystem, operation, err, events) - } - return nil -} - -// waitBackground waits for the compute system exit notification. Once received -// sets `computeSystem.waitError` (if any) and unblocks all `Wait` calls. -// -// This MUST be called exactly once per `computeSystem.handle` but `Wait` is -// safe to call multiple times. -func (computeSystem *System) waitBackground() { - operation := "hcs::System::waitBackground" - ctx, span := trace.StartSpan(context.Background(), operation) - defer span.End() - span.AddAttributes(trace.StringAttribute("cid", computeSystem.id)) - - err := waitForNotification(ctx, computeSystem.callbackNumber, hcsNotificationSystemExited, nil) - switch err { - case nil: - log.G(ctx).Debug("system exited") - case ErrVmcomputeUnexpectedExit: - log.G(ctx).Debug("unexpected system exit") - computeSystem.exitError = makeSystemError(computeSystem, operation, err, nil) - err = nil - default: - err = makeSystemError(computeSystem, operation, err, nil) - } - computeSystem.closedWaitOnce.Do(func() { - computeSystem.waitError = err - close(computeSystem.waitBlock) - }) - oc.SetSpanStatus(span, err) -} - -func (computeSystem *System) WaitChannel() <-chan struct{} { - return computeSystem.waitBlock -} - -func (computeSystem *System) WaitError() error { - return computeSystem.waitError -} - -// Wait synchronously waits for the compute system to shutdown or terminate. If -// the compute system has already exited returns the previous error (if any). -func (computeSystem *System) Wait() error { - <-computeSystem.WaitChannel() - return computeSystem.WaitError() -} - -// ExitError returns an error describing the reason the compute system terminated. -func (computeSystem *System) ExitError() error { - select { - case <-computeSystem.waitBlock: - if computeSystem.waitError != nil { - return computeSystem.waitError - } - return computeSystem.exitError - default: - return errors.New("container not exited") - } -} - -// Properties returns the requested container properties targeting a V1 schema container. -func (computeSystem *System) Properties(ctx context.Context, types ...schema1.PropertyType) (*schema1.ContainerProperties, error) { - computeSystem.handleLock.RLock() - defer computeSystem.handleLock.RUnlock() - - operation := "hcs::System::Properties" - - queryBytes, err := json.Marshal(schema1.PropertyQuery{PropertyTypes: types}) - if err != nil { - return nil, makeSystemError(computeSystem, operation, err, nil) - } - - propertiesJSON, resultJSON, err := vmcompute.HcsGetComputeSystemProperties(ctx, computeSystem.handle, string(queryBytes)) - events := processHcsResult(ctx, resultJSON) - if err != nil { - return nil, makeSystemError(computeSystem, operation, err, events) - } - - if propertiesJSON == "" { - return nil, ErrUnexpectedValue - } - properties := &schema1.ContainerProperties{} - if err := json.Unmarshal([]byte(propertiesJSON), properties); err != nil { - return nil, makeSystemError(computeSystem, operation, err, nil) - } - - return properties, nil -} - -// queryInProc handles querying for container properties without reaching out to HCS. `props` -// will be updated to contain any data returned from the queries present in `types`. If any properties -// failed to be queried they will be tallied up and returned in as the first return value. Failures on -// query are NOT considered errors; the only failure case for this method is if the containers job object -// cannot be opened. -func (computeSystem *System) queryInProc(ctx context.Context, props *hcsschema.Properties, types []hcsschema.PropertyType) ([]hcsschema.PropertyType, error) { - // In the future we can make use of some new functionality in the HCS that allows you - // to pass a job object for HCS to use for the container. Currently, the only way we'll - // be able to open the job/silo is if we're running as SYSTEM. - jobOptions := &jobobject.Options{ - UseNTVariant: true, - Name: siloNameFmt(computeSystem.id), - } - job, err := jobobject.Open(ctx, jobOptions) - if err != nil { - return nil, err - } - defer job.Close() - - var fallbackQueryTypes []hcsschema.PropertyType - for _, propType := range types { - switch propType { - case hcsschema.PTStatistics: - // Handle a bad caller asking for the same type twice. No use in re-querying if this is - // filled in already. - if props.Statistics == nil { - props.Statistics, err = computeSystem.statisticsInProc(job) - if err != nil { - log.G(ctx).WithError(err).Warn("failed to get statistics in-proc") - - fallbackQueryTypes = append(fallbackQueryTypes, propType) - } - } - default: - fallbackQueryTypes = append(fallbackQueryTypes, propType) - } - } - - return fallbackQueryTypes, nil -} - -// statisticsInProc emulates what HCS does to grab statistics for a given container with a small -// change to make grabbing the private working set total much more efficient. -func (computeSystem *System) statisticsInProc(job *jobobject.JobObject) (*hcsschema.Statistics, error) { - // Start timestamp for these stats before we grab them to match HCS - timestamp := time.Now() - - memInfo, err := job.QueryMemoryStats() - if err != nil { - return nil, err - } - - processorInfo, err := job.QueryProcessorStats() - if err != nil { - return nil, err - } - - storageInfo, err := job.QueryStorageStats() - if err != nil { - return nil, err - } - - // This calculates the private working set more efficiently than HCS does. HCS calls NtQuerySystemInformation - // with the class SystemProcessInformation which returns an array containing system information for *every* - // process running on the machine. They then grab the pids that are running in the container and filter down - // the entries in the array to only what's running in that silo and start tallying up the total. This doesn't - // work well as performance should get worse if more processess are running on the machine in general and not - // just in the container. All of the additional information besides the WorkingSetPrivateSize field is ignored - // as well which isn't great and is wasted work to fetch. - // - // HCS only let's you grab statistics in an all or nothing fashion, so we can't just grab the private - // working set ourselves and ask for everything else seperately. The optimization we can make here is - // to open the silo ourselves and do the same queries for the rest of the info, as well as calculating - // the private working set in a more efficient manner by: - // - // 1. Find the pids running in the silo - // 2. Get a process handle for every process (only need PROCESS_QUERY_LIMITED_INFORMATION access) - // 3. Call NtQueryInformationProcess on each process with the class ProcessVmCounters - // 4. Tally up the total using the field PrivateWorkingSetSize in VM_COUNTERS_EX2. - privateWorkingSet, err := job.QueryPrivateWorkingSet() - if err != nil { - return nil, err - } - - return &hcsschema.Statistics{ - Timestamp: timestamp, - ContainerStartTime: computeSystem.startTime, - Uptime100ns: uint64(time.Since(computeSystem.startTime).Nanoseconds()) / 100, - Memory: &hcsschema.MemoryStats{ - MemoryUsageCommitBytes: memInfo.JobMemory, - MemoryUsageCommitPeakBytes: memInfo.PeakJobMemoryUsed, - MemoryUsagePrivateWorkingSetBytes: privateWorkingSet, - }, - Processor: &hcsschema.ProcessorStats{ - RuntimeKernel100ns: uint64(processorInfo.TotalKernelTime), - RuntimeUser100ns: uint64(processorInfo.TotalUserTime), - TotalRuntime100ns: uint64(processorInfo.TotalKernelTime + processorInfo.TotalUserTime), - }, - Storage: &hcsschema.StorageStats{ - ReadCountNormalized: uint64(storageInfo.ReadStats.IoCount), - ReadSizeBytes: storageInfo.ReadStats.TotalSize, - WriteCountNormalized: uint64(storageInfo.WriteStats.IoCount), - WriteSizeBytes: storageInfo.WriteStats.TotalSize, - }, - }, nil -} - -// hcsPropertiesV2Query is a helper to make a HcsGetComputeSystemProperties call using the V2 schema property types. -func (computeSystem *System) hcsPropertiesV2Query(ctx context.Context, types []hcsschema.PropertyType) (*hcsschema.Properties, error) { - operation := "hcs::System::PropertiesV2" - - queryBytes, err := json.Marshal(hcsschema.PropertyQuery{PropertyTypes: types}) - if err != nil { - return nil, makeSystemError(computeSystem, operation, err, nil) - } - - propertiesJSON, resultJSON, err := vmcompute.HcsGetComputeSystemProperties(ctx, computeSystem.handle, string(queryBytes)) - events := processHcsResult(ctx, resultJSON) - if err != nil { - return nil, makeSystemError(computeSystem, operation, err, events) - } - - if propertiesJSON == "" { - return nil, ErrUnexpectedValue - } - props := &hcsschema.Properties{} - if err := json.Unmarshal([]byte(propertiesJSON), props); err != nil { - return nil, makeSystemError(computeSystem, operation, err, nil) - } - - return props, nil -} - -// PropertiesV2 returns the requested compute systems properties targeting a V2 schema compute system. -func (computeSystem *System) PropertiesV2(ctx context.Context, types ...hcsschema.PropertyType) (_ *hcsschema.Properties, err error) { - computeSystem.handleLock.RLock() - defer computeSystem.handleLock.RUnlock() - - // Let HCS tally up the total for VM based queries instead of querying ourselves. - if computeSystem.typ != "container" { - return computeSystem.hcsPropertiesV2Query(ctx, types) - } - - // Define a starter Properties struct with the default fields returned from every - // query. Owner is only returned from Statistics but it's harmless to include. - properties := &hcsschema.Properties{ - Id: computeSystem.id, - SystemType: computeSystem.typ, - RuntimeOsType: computeSystem.os, - Owner: computeSystem.owner, - } - - logEntry := log.G(ctx) - // First lets try and query ourselves without reaching to HCS. If any of the queries fail - // we'll take note and fallback to querying HCS for any of the failed types. - fallbackTypes, err := computeSystem.queryInProc(ctx, properties, types) - if err == nil && len(fallbackTypes) == 0 { - return properties, nil - } else if err != nil { - logEntry.WithError(fmt.Errorf("failed to query compute system properties in-proc: %w", err)) - fallbackTypes = types - } - - logEntry.WithFields(logrus.Fields{ - logfields.ContainerID: computeSystem.id, - "propertyTypes": fallbackTypes, - }).Info("falling back to HCS for property type queries") - - hcsProperties, err := computeSystem.hcsPropertiesV2Query(ctx, fallbackTypes) - if err != nil { - return nil, err - } - - // Now add in anything that we might have successfully queried in process. - if properties.Statistics != nil { - hcsProperties.Statistics = properties.Statistics - hcsProperties.Owner = properties.Owner - } - - // For future support for querying processlist in-proc as well. - if properties.ProcessList != nil { - hcsProperties.ProcessList = properties.ProcessList - } - - return hcsProperties, nil -} - -// Pause pauses the execution of the computeSystem. This feature is not enabled in TP5. -func (computeSystem *System) Pause(ctx context.Context) (err error) { - operation := "hcs::System::Pause" - - // hcsPauseComputeSystemContext is an async peration. Start the outer span - // here to measure the full pause time. - ctx, span := trace.StartSpan(ctx, operation) - defer span.End() - defer func() { oc.SetSpanStatus(span, err) }() - span.AddAttributes(trace.StringAttribute("cid", computeSystem.id)) - - computeSystem.handleLock.RLock() - defer computeSystem.handleLock.RUnlock() - - if computeSystem.handle == 0 { - return makeSystemError(computeSystem, operation, ErrAlreadyClosed, nil) - } - - resultJSON, err := vmcompute.HcsPauseComputeSystem(ctx, computeSystem.handle, "") - events, err := processAsyncHcsResult(ctx, err, resultJSON, computeSystem.callbackNumber, hcsNotificationSystemPauseCompleted, &timeout.SystemPause) - if err != nil { - return makeSystemError(computeSystem, operation, err, events) - } - - return nil -} - -// Resume resumes the execution of the computeSystem. This feature is not enabled in TP5. -func (computeSystem *System) Resume(ctx context.Context) (err error) { - operation := "hcs::System::Resume" - - // hcsResumeComputeSystemContext is an async operation. Start the outer span - // here to measure the full restore time. - ctx, span := trace.StartSpan(ctx, operation) - defer span.End() - defer func() { oc.SetSpanStatus(span, err) }() - span.AddAttributes(trace.StringAttribute("cid", computeSystem.id)) - - computeSystem.handleLock.RLock() - defer computeSystem.handleLock.RUnlock() - - if computeSystem.handle == 0 { - return makeSystemError(computeSystem, operation, ErrAlreadyClosed, nil) - } - - resultJSON, err := vmcompute.HcsResumeComputeSystem(ctx, computeSystem.handle, "") - events, err := processAsyncHcsResult(ctx, err, resultJSON, computeSystem.callbackNumber, hcsNotificationSystemResumeCompleted, &timeout.SystemResume) - if err != nil { - return makeSystemError(computeSystem, operation, err, events) - } - - return nil -} - -// Save the compute system -func (computeSystem *System) Save(ctx context.Context, options interface{}) (err error) { - operation := "hcs::System::Save" - - // hcsSaveComputeSystemContext is an async peration. Start the outer span - // here to measure the full save time. - ctx, span := trace.StartSpan(ctx, operation) - defer span.End() - defer func() { oc.SetSpanStatus(span, err) }() - span.AddAttributes(trace.StringAttribute("cid", computeSystem.id)) - - saveOptions, err := json.Marshal(options) - if err != nil { - return err - } - - computeSystem.handleLock.RLock() - defer computeSystem.handleLock.RUnlock() - - if computeSystem.handle == 0 { - return makeSystemError(computeSystem, operation, ErrAlreadyClosed, nil) - } - - result, err := vmcompute.HcsSaveComputeSystem(ctx, computeSystem.handle, string(saveOptions)) - events, err := processAsyncHcsResult(ctx, err, result, computeSystem.callbackNumber, hcsNotificationSystemSaveCompleted, &timeout.SystemSave) - if err != nil { - return makeSystemError(computeSystem, operation, err, events) - } - - return nil -} - -func (computeSystem *System) createProcess(ctx context.Context, operation string, c interface{}) (*Process, *vmcompute.HcsProcessInformation, error) { - computeSystem.handleLock.RLock() - defer computeSystem.handleLock.RUnlock() - - if computeSystem.handle == 0 { - return nil, nil, makeSystemError(computeSystem, operation, ErrAlreadyClosed, nil) - } - - configurationb, err := json.Marshal(c) - if err != nil { - return nil, nil, makeSystemError(computeSystem, operation, err, nil) - } - - configuration := string(configurationb) - processInfo, processHandle, resultJSON, err := vmcompute.HcsCreateProcess(ctx, computeSystem.handle, configuration) - events := processHcsResult(ctx, resultJSON) - if err != nil { - return nil, nil, makeSystemError(computeSystem, operation, err, events) - } - - log.G(ctx).WithField("pid", processInfo.ProcessId).Debug("created process pid") - return newProcess(processHandle, int(processInfo.ProcessId), computeSystem), &processInfo, nil -} - -// CreateProcess launches a new process within the computeSystem. -func (computeSystem *System) CreateProcess(ctx context.Context, c interface{}) (cow.Process, error) { - operation := "hcs::System::CreateProcess" - process, processInfo, err := computeSystem.createProcess(ctx, operation, c) - if err != nil { - return nil, err - } - defer func() { - if err != nil { - process.Close() - } - }() - - pipes, err := makeOpenFiles([]syscall.Handle{processInfo.StdInput, processInfo.StdOutput, processInfo.StdError}) - if err != nil { - return nil, makeSystemError(computeSystem, operation, err, nil) - } - process.stdin = pipes[0] - process.stdout = pipes[1] - process.stderr = pipes[2] - process.hasCachedStdio = true - - if err = process.registerCallback(ctx); err != nil { - return nil, makeSystemError(computeSystem, operation, err, nil) - } - go process.waitBackground() - - return process, nil -} - -// OpenProcess gets an interface to an existing process within the computeSystem. -func (computeSystem *System) OpenProcess(ctx context.Context, pid int) (*Process, error) { - computeSystem.handleLock.RLock() - defer computeSystem.handleLock.RUnlock() - - operation := "hcs::System::OpenProcess" - - if computeSystem.handle == 0 { - return nil, makeSystemError(computeSystem, operation, ErrAlreadyClosed, nil) - } - - processHandle, resultJSON, err := vmcompute.HcsOpenProcess(ctx, computeSystem.handle, uint32(pid)) - events := processHcsResult(ctx, resultJSON) - if err != nil { - return nil, makeSystemError(computeSystem, operation, err, events) - } - - process := newProcess(processHandle, pid, computeSystem) - if err = process.registerCallback(ctx); err != nil { - return nil, makeSystemError(computeSystem, operation, err, nil) - } - go process.waitBackground() - - return process, nil -} - -// Close cleans up any state associated with the compute system but does not terminate or wait for it. -func (computeSystem *System) Close() (err error) { - operation := "hcs::System::Close" - ctx, span := trace.StartSpan(context.Background(), operation) - defer span.End() - defer func() { oc.SetSpanStatus(span, err) }() - span.AddAttributes(trace.StringAttribute("cid", computeSystem.id)) - - computeSystem.handleLock.Lock() - defer computeSystem.handleLock.Unlock() - - // Don't double free this - if computeSystem.handle == 0 { - return nil - } - - if err = computeSystem.unregisterCallback(ctx); err != nil { - return makeSystemError(computeSystem, operation, err, nil) - } - - err = vmcompute.HcsCloseComputeSystem(ctx, computeSystem.handle) - if err != nil { - return makeSystemError(computeSystem, operation, err, nil) - } - - computeSystem.handle = 0 - computeSystem.closedWaitOnce.Do(func() { - computeSystem.waitError = ErrAlreadyClosed - close(computeSystem.waitBlock) - }) - - return nil -} - -func (computeSystem *System) registerCallback(ctx context.Context) error { - callbackContext := ¬ificationWatcherContext{ - channels: newSystemChannels(), - systemID: computeSystem.id, - } - - callbackMapLock.Lock() - callbackNumber := nextCallback - nextCallback++ - callbackMap[callbackNumber] = callbackContext - callbackMapLock.Unlock() - - callbackHandle, err := vmcompute.HcsRegisterComputeSystemCallback(ctx, computeSystem.handle, notificationWatcherCallback, callbackNumber) - if err != nil { - return err - } - callbackContext.handle = callbackHandle - computeSystem.callbackNumber = callbackNumber - - return nil -} - -func (computeSystem *System) unregisterCallback(ctx context.Context) error { - callbackNumber := computeSystem.callbackNumber - - callbackMapLock.RLock() - callbackContext := callbackMap[callbackNumber] - callbackMapLock.RUnlock() - - if callbackContext == nil { - return nil - } - - handle := callbackContext.handle - - if handle == 0 { - return nil - } - - // hcsUnregisterComputeSystemCallback has its own syncronization - // to wait for all callbacks to complete. We must NOT hold the callbackMapLock. - err := vmcompute.HcsUnregisterComputeSystemCallback(ctx, handle) - if err != nil { - return err - } - - closeChannels(callbackContext.channels) - - callbackMapLock.Lock() - delete(callbackMap, callbackNumber) - callbackMapLock.Unlock() - - handle = 0 //nolint:ineffassign - - return nil -} - -// Modify the System by sending a request to HCS -func (computeSystem *System) Modify(ctx context.Context, config interface{}) error { - computeSystem.handleLock.RLock() - defer computeSystem.handleLock.RUnlock() - - operation := "hcs::System::Modify" - - if computeSystem.handle == 0 { - return makeSystemError(computeSystem, operation, ErrAlreadyClosed, nil) - } - - requestBytes, err := json.Marshal(config) - if err != nil { - return err - } - - requestJSON := string(requestBytes) - resultJSON, err := vmcompute.HcsModifyComputeSystem(ctx, computeSystem.handle, requestJSON) - events := processHcsResult(ctx, resultJSON) - if err != nil { - return makeSystemError(computeSystem, operation, err, events) - } - - return nil -} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/utils.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/utils.go deleted file mode 100644 index 3342e5bb..00000000 --- a/vendor/github.com/Microsoft/hcsshim/internal/hcs/utils.go +++ /dev/null @@ -1,62 +0,0 @@ -package hcs - -import ( - "context" - "io" - "syscall" - - "github.com/Microsoft/go-winio" - diskutil "github.com/Microsoft/go-winio/vhd" - "github.com/Microsoft/hcsshim/computestorage" - "github.com/pkg/errors" - "golang.org/x/sys/windows" -) - -// makeOpenFiles calls winio.MakeOpenFile for each handle in a slice but closes all the handles -// if there is an error. -func makeOpenFiles(hs []syscall.Handle) (_ []io.ReadWriteCloser, err error) { - fs := make([]io.ReadWriteCloser, len(hs)) - for i, h := range hs { - if h != syscall.Handle(0) { - if err == nil { - fs[i], err = winio.MakeOpenFile(h) - } - if err != nil { - syscall.Close(h) - } - } - } - if err != nil { - for _, f := range fs { - if f != nil { - f.Close() - } - } - return nil, err - } - return fs, nil -} - -// CreateNTFSVHD creates a VHD formatted with NTFS of size `sizeGB` at the given `vhdPath`. -func CreateNTFSVHD(ctx context.Context, vhdPath string, sizeGB uint32) (err error) { - if err := diskutil.CreateVhdx(vhdPath, sizeGB, 1); err != nil { - return errors.Wrap(err, "failed to create VHD") - } - - vhd, err := diskutil.OpenVirtualDisk(vhdPath, diskutil.VirtualDiskAccessNone, diskutil.OpenVirtualDiskFlagNone) - if err != nil { - return errors.Wrap(err, "failed to open VHD") - } - defer func() { - err2 := windows.CloseHandle(windows.Handle(vhd)) - if err == nil { - err = errors.Wrap(err2, "failed to close VHD") - } - }() - - if err := computestorage.FormatWritableLayerVhd(ctx, windows.Handle(vhd)); err != nil { - return errors.Wrap(err, "failed to format VHD") - } - - return nil -} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/waithelper.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/waithelper.go deleted file mode 100644 index db4e14fd..00000000 --- a/vendor/github.com/Microsoft/hcsshim/internal/hcs/waithelper.go +++ /dev/null @@ -1,68 +0,0 @@ -package hcs - -import ( - "context" - "time" - - "github.com/Microsoft/hcsshim/internal/log" -) - -func processAsyncHcsResult(ctx context.Context, err error, resultJSON string, callbackNumber uintptr, expectedNotification hcsNotification, timeout *time.Duration) ([]ErrorEvent, error) { - events := processHcsResult(ctx, resultJSON) - if IsPending(err) { - return nil, waitForNotification(ctx, callbackNumber, expectedNotification, timeout) - } - - return events, err -} - -func waitForNotification(ctx context.Context, callbackNumber uintptr, expectedNotification hcsNotification, timeout *time.Duration) error { - callbackMapLock.RLock() - if _, ok := callbackMap[callbackNumber]; !ok { - callbackMapLock.RUnlock() - log.G(ctx).WithField("callbackNumber", callbackNumber).Error("failed to waitForNotification: callbackNumber does not exist in callbackMap") - return ErrHandleClose - } - channels := callbackMap[callbackNumber].channels - callbackMapLock.RUnlock() - - expectedChannel := channels[expectedNotification] - if expectedChannel == nil { - log.G(ctx).WithField("type", expectedNotification).Error("unknown notification type in waitForNotification") - return ErrInvalidNotificationType - } - - var c <-chan time.Time - if timeout != nil { - timer := time.NewTimer(*timeout) - c = timer.C - defer timer.Stop() - } - - select { - case err, ok := <-expectedChannel: - if !ok { - return ErrHandleClose - } - return err - case err, ok := <-channels[hcsNotificationSystemExited]: - if !ok { - return ErrHandleClose - } - // If the expected notification is hcsNotificationSystemExited which of the two selects - // chosen is random. Return the raw error if hcsNotificationSystemExited is expected - if channels[hcsNotificationSystemExited] == expectedChannel { - return err - } - return ErrUnexpectedContainerExit - case _, ok := <-channels[hcsNotificationServiceDisconnect]: - if !ok { - return ErrHandleClose - } - // hcsNotificationServiceDisconnect should never be an expected notification - // it does not need the same handling as hcsNotificationSystemExited - return ErrUnexpectedProcessAbort - case <-c: - return ErrTimeout - } -} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcserror/hcserror.go b/vendor/github.com/Microsoft/hcsshim/internal/hcserror/hcserror.go deleted file mode 100644 index 921c2c85..00000000 --- a/vendor/github.com/Microsoft/hcsshim/internal/hcserror/hcserror.go +++ /dev/null @@ -1,47 +0,0 @@ -package hcserror - -import ( - "fmt" - "syscall" -) - -const ERROR_GEN_FAILURE = syscall.Errno(31) - -type HcsError struct { - title string - rest string - Err error -} - -func (e *HcsError) Error() string { - s := e.title - if len(s) > 0 && s[len(s)-1] != ' ' { - s += " " - } - s += fmt.Sprintf("failed in Win32: %s (0x%x)", e.Err, Win32FromError(e.Err)) - if e.rest != "" { - if e.rest[0] != ' ' { - s += " " - } - s += e.rest - } - return s -} - -func New(err error, title, rest string) error { - // Pass through DLL errors directly since they do not originate from HCS. - if _, ok := err.(*syscall.DLLError); ok { - return err - } - return &HcsError{title, rest, err} -} - -func Win32FromError(err error) uint32 { - if herr, ok := err.(*HcsError); ok { - return Win32FromError(herr.Err) - } - if code, ok := err.(syscall.Errno); ok { - return uint32(code) - } - return uint32(ERROR_GEN_FAILURE) -} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hns/hns.go b/vendor/github.com/Microsoft/hcsshim/internal/hns/hns.go deleted file mode 100644 index b2e475f5..00000000 --- a/vendor/github.com/Microsoft/hcsshim/internal/hns/hns.go +++ /dev/null @@ -1,23 +0,0 @@ -package hns - -import "fmt" - -//go:generate go run ../../mksyscall_windows.go -output zsyscall_windows.go hns.go - -//sys _hnsCall(method string, path string, object string, response **uint16) (hr error) = vmcompute.HNSCall? - -type EndpointNotFoundError struct { - EndpointName string -} - -func (e EndpointNotFoundError) Error() string { - return fmt.Sprintf("Endpoint %s not found", e.EndpointName) -} - -type NetworkNotFoundError struct { - NetworkName string -} - -func (e NetworkNotFoundError) Error() string { - return fmt.Sprintf("Network %s not found", e.NetworkName) -} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hns/hnsendpoint.go b/vendor/github.com/Microsoft/hcsshim/internal/hns/hnsendpoint.go deleted file mode 100644 index 7cf954c7..00000000 --- a/vendor/github.com/Microsoft/hcsshim/internal/hns/hnsendpoint.go +++ /dev/null @@ -1,338 +0,0 @@ -package hns - -import ( - "encoding/json" - "net" - "strings" - - "github.com/sirupsen/logrus" -) - -// HNSEndpoint represents a network endpoint in HNS -type HNSEndpoint struct { - Id string `json:"ID,omitempty"` - Name string `json:",omitempty"` - VirtualNetwork string `json:",omitempty"` - VirtualNetworkName string `json:",omitempty"` - Policies []json.RawMessage `json:",omitempty"` - MacAddress string `json:",omitempty"` - IPAddress net.IP `json:",omitempty"` - IPv6Address net.IP `json:",omitempty"` - DNSSuffix string `json:",omitempty"` - DNSServerList string `json:",omitempty"` - DNSDomain string `json:",omitempty"` - GatewayAddress string `json:",omitempty"` - GatewayAddressV6 string `json:",omitempty"` - EnableInternalDNS bool `json:",omitempty"` - DisableICC bool `json:",omitempty"` - PrefixLength uint8 `json:",omitempty"` - IPv6PrefixLength uint8 `json:",omitempty"` - IsRemoteEndpoint bool `json:",omitempty"` - EnableLowMetric bool `json:",omitempty"` - Namespace *Namespace `json:",omitempty"` - EncapOverhead uint16 `json:",omitempty"` - SharedContainers []string `json:",omitempty"` -} - -//SystemType represents the type of the system on which actions are done -type SystemType string - -// SystemType const -const ( - ContainerType SystemType = "Container" - VirtualMachineType SystemType = "VirtualMachine" - HostType SystemType = "Host" -) - -// EndpointAttachDetachRequest is the structure used to send request to the container to modify the system -// Supported resource types are Network and Request Types are Add/Remove -type EndpointAttachDetachRequest struct { - ContainerID string `json:"ContainerId,omitempty"` - SystemType SystemType `json:"SystemType"` - CompartmentID uint16 `json:"CompartmentId,omitempty"` - VirtualNICName string `json:"VirtualNicName,omitempty"` -} - -// EndpointResquestResponse is object to get the endpoint request response -type EndpointResquestResponse struct { - Success bool - Error string -} - -// EndpointStats is the object that has stats for a given endpoint -type EndpointStats struct { - BytesReceived uint64 `json:"BytesReceived"` - BytesSent uint64 `json:"BytesSent"` - DroppedPacketsIncoming uint64 `json:"DroppedPacketsIncoming"` - DroppedPacketsOutgoing uint64 `json:"DroppedPacketsOutgoing"` - EndpointID string `json:"EndpointId"` - InstanceID string `json:"InstanceId"` - PacketsReceived uint64 `json:"PacketsReceived"` - PacketsSent uint64 `json:"PacketsSent"` -} - -// HNSEndpointRequest makes a HNS call to modify/query a network endpoint -func HNSEndpointRequest(method, path, request string) (*HNSEndpoint, error) { - endpoint := &HNSEndpoint{} - err := hnsCall(method, "/endpoints/"+path, request, &endpoint) - if err != nil { - return nil, err - } - - return endpoint, nil -} - -// HNSListEndpointRequest makes a HNS call to query the list of available endpoints -func HNSListEndpointRequest() ([]HNSEndpoint, error) { - var endpoint []HNSEndpoint - err := hnsCall("GET", "/endpoints/", "", &endpoint) - if err != nil { - return nil, err - } - - return endpoint, nil -} - -// hnsEndpointStatsRequest makes a HNS call to query the stats for a given endpoint ID -func hnsEndpointStatsRequest(id string) (*EndpointStats, error) { - var stats EndpointStats - err := hnsCall("GET", "/endpointstats/"+id, "", &stats) - if err != nil { - return nil, err - } - - return &stats, nil -} - -// GetHNSEndpointByID get the Endpoint by ID -func GetHNSEndpointByID(endpointID string) (*HNSEndpoint, error) { - return HNSEndpointRequest("GET", endpointID, "") -} - -// GetHNSEndpointStats get the stats for a n Endpoint by ID -func GetHNSEndpointStats(endpointID string) (*EndpointStats, error) { - return hnsEndpointStatsRequest(endpointID) -} - -// GetHNSEndpointByName gets the endpoint filtered by Name -func GetHNSEndpointByName(endpointName string) (*HNSEndpoint, error) { - hnsResponse, err := HNSListEndpointRequest() - if err != nil { - return nil, err - } - for _, hnsEndpoint := range hnsResponse { - if hnsEndpoint.Name == endpointName { - return &hnsEndpoint, nil - } - } - return nil, EndpointNotFoundError{EndpointName: endpointName} -} - -type endpointAttachInfo struct { - SharedContainers json.RawMessage `json:",omitempty"` -} - -func (endpoint *HNSEndpoint) IsAttached(vID string) (bool, error) { - attachInfo := endpointAttachInfo{} - err := hnsCall("GET", "/endpoints/"+endpoint.Id, "", &attachInfo) - - // Return false allows us to just return the err - if err != nil { - return false, err - } - - if strings.Contains(strings.ToLower(string(attachInfo.SharedContainers)), strings.ToLower(vID)) { - return true, nil - } - - return false, nil - -} - -// Create Endpoint by sending EndpointRequest to HNS. TODO: Create a separate HNS interface to place all these methods -func (endpoint *HNSEndpoint) Create() (*HNSEndpoint, error) { - operation := "Create" - title := "hcsshim::HNSEndpoint::" + operation - logrus.Debugf(title+" id=%s", endpoint.Id) - - jsonString, err := json.Marshal(endpoint) - if err != nil { - return nil, err - } - return HNSEndpointRequest("POST", "", string(jsonString)) -} - -// Delete Endpoint by sending EndpointRequest to HNS -func (endpoint *HNSEndpoint) Delete() (*HNSEndpoint, error) { - operation := "Delete" - title := "hcsshim::HNSEndpoint::" + operation - logrus.Debugf(title+" id=%s", endpoint.Id) - - return HNSEndpointRequest("DELETE", endpoint.Id, "") -} - -// Update Endpoint -func (endpoint *HNSEndpoint) Update() (*HNSEndpoint, error) { - operation := "Update" - title := "hcsshim::HNSEndpoint::" + operation - logrus.Debugf(title+" id=%s", endpoint.Id) - jsonString, err := json.Marshal(endpoint) - if err != nil { - return nil, err - } - err = hnsCall("POST", "/endpoints/"+endpoint.Id, string(jsonString), &endpoint) - - return endpoint, err -} - -// ApplyACLPolicy applies a set of ACL Policies on the Endpoint -func (endpoint *HNSEndpoint) ApplyACLPolicy(policies ...*ACLPolicy) error { - operation := "ApplyACLPolicy" - title := "hcsshim::HNSEndpoint::" + operation - logrus.Debugf(title+" id=%s", endpoint.Id) - - for _, policy := range policies { - if policy == nil { - continue - } - jsonString, err := json.Marshal(policy) - if err != nil { - return err - } - endpoint.Policies = append(endpoint.Policies, jsonString) - } - - _, err := endpoint.Update() - return err -} - -// ApplyProxyPolicy applies a set of Proxy Policies on the Endpoint -func (endpoint *HNSEndpoint) ApplyProxyPolicy(policies ...*ProxyPolicy) error { - operation := "ApplyProxyPolicy" - title := "hcsshim::HNSEndpoint::" + operation - logrus.Debugf(title+" id=%s", endpoint.Id) - - for _, policy := range policies { - if policy == nil { - continue - } - jsonString, err := json.Marshal(policy) - if err != nil { - return err - } - endpoint.Policies = append(endpoint.Policies, jsonString) - } - - _, err := endpoint.Update() - return err -} - -// ContainerAttach attaches an endpoint to container -func (endpoint *HNSEndpoint) ContainerAttach(containerID string, compartmentID uint16) error { - operation := "ContainerAttach" - title := "hcsshim::HNSEndpoint::" + operation - logrus.Debugf(title+" id=%s", endpoint.Id) - - requestMessage := &EndpointAttachDetachRequest{ - ContainerID: containerID, - CompartmentID: compartmentID, - SystemType: ContainerType, - } - response := &EndpointResquestResponse{} - jsonString, err := json.Marshal(requestMessage) - if err != nil { - return err - } - return hnsCall("POST", "/endpoints/"+endpoint.Id+"/attach", string(jsonString), &response) -} - -// ContainerDetach detaches an endpoint from container -func (endpoint *HNSEndpoint) ContainerDetach(containerID string) error { - operation := "ContainerDetach" - title := "hcsshim::HNSEndpoint::" + operation - logrus.Debugf(title+" id=%s", endpoint.Id) - - requestMessage := &EndpointAttachDetachRequest{ - ContainerID: containerID, - SystemType: ContainerType, - } - response := &EndpointResquestResponse{} - - jsonString, err := json.Marshal(requestMessage) - if err != nil { - return err - } - return hnsCall("POST", "/endpoints/"+endpoint.Id+"/detach", string(jsonString), &response) -} - -// HostAttach attaches a nic on the host -func (endpoint *HNSEndpoint) HostAttach(compartmentID uint16) error { - operation := "HostAttach" - title := "hcsshim::HNSEndpoint::" + operation - logrus.Debugf(title+" id=%s", endpoint.Id) - requestMessage := &EndpointAttachDetachRequest{ - CompartmentID: compartmentID, - SystemType: HostType, - } - response := &EndpointResquestResponse{} - - jsonString, err := json.Marshal(requestMessage) - if err != nil { - return err - } - return hnsCall("POST", "/endpoints/"+endpoint.Id+"/attach", string(jsonString), &response) - -} - -// HostDetach detaches a nic on the host -func (endpoint *HNSEndpoint) HostDetach() error { - operation := "HostDetach" - title := "hcsshim::HNSEndpoint::" + operation - logrus.Debugf(title+" id=%s", endpoint.Id) - requestMessage := &EndpointAttachDetachRequest{ - SystemType: HostType, - } - response := &EndpointResquestResponse{} - - jsonString, err := json.Marshal(requestMessage) - if err != nil { - return err - } - return hnsCall("POST", "/endpoints/"+endpoint.Id+"/detach", string(jsonString), &response) -} - -// VirtualMachineNICAttach attaches a endpoint to a virtual machine -func (endpoint *HNSEndpoint) VirtualMachineNICAttach(virtualMachineNICName string) error { - operation := "VirtualMachineNicAttach" - title := "hcsshim::HNSEndpoint::" + operation - logrus.Debugf(title+" id=%s", endpoint.Id) - requestMessage := &EndpointAttachDetachRequest{ - VirtualNICName: virtualMachineNICName, - SystemType: VirtualMachineType, - } - response := &EndpointResquestResponse{} - - jsonString, err := json.Marshal(requestMessage) - if err != nil { - return err - } - return hnsCall("POST", "/endpoints/"+endpoint.Id+"/attach", string(jsonString), &response) -} - -// VirtualMachineNICDetach detaches a endpoint from a virtual machine -func (endpoint *HNSEndpoint) VirtualMachineNICDetach() error { - operation := "VirtualMachineNicDetach" - title := "hcsshim::HNSEndpoint::" + operation - logrus.Debugf(title+" id=%s", endpoint.Id) - - requestMessage := &EndpointAttachDetachRequest{ - SystemType: VirtualMachineType, - } - response := &EndpointResquestResponse{} - - jsonString, err := json.Marshal(requestMessage) - if err != nil { - return err - } - return hnsCall("POST", "/endpoints/"+endpoint.Id+"/detach", string(jsonString), &response) -} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hns/hnsfuncs.go b/vendor/github.com/Microsoft/hcsshim/internal/hns/hnsfuncs.go deleted file mode 100644 index 2df4a57f..00000000 --- a/vendor/github.com/Microsoft/hcsshim/internal/hns/hnsfuncs.go +++ /dev/null @@ -1,49 +0,0 @@ -package hns - -import ( - "encoding/json" - "fmt" - - "github.com/Microsoft/hcsshim/internal/hcserror" - "github.com/Microsoft/hcsshim/internal/interop" - "github.com/sirupsen/logrus" -) - -func hnsCallRawResponse(method, path, request string) (*hnsResponse, error) { - var responseBuffer *uint16 - logrus.Debugf("[%s]=>[%s] Request : %s", method, path, request) - - err := _hnsCall(method, path, request, &responseBuffer) - if err != nil { - return nil, hcserror.New(err, "hnsCall ", "") - } - response := interop.ConvertAndFreeCoTaskMemString(responseBuffer) - - hnsresponse := &hnsResponse{} - if err = json.Unmarshal([]byte(response), &hnsresponse); err != nil { - return nil, err - } - return hnsresponse, nil -} - -func hnsCall(method, path, request string, returnResponse interface{}) error { - hnsresponse, err := hnsCallRawResponse(method, path, request) - if err != nil { - return fmt.Errorf("failed during hnsCallRawResponse: %v", err) - } - if !hnsresponse.Success { - return fmt.Errorf("hns failed with error : %s", hnsresponse.Error) - } - - if len(hnsresponse.Output) == 0 { - return nil - } - - logrus.Debugf("Network Response : %s", hnsresponse.Output) - err = json.Unmarshal(hnsresponse.Output, returnResponse) - if err != nil { - return err - } - - return nil -} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hns/hnsglobals.go b/vendor/github.com/Microsoft/hcsshim/internal/hns/hnsglobals.go deleted file mode 100644 index a8d8cc56..00000000 --- a/vendor/github.com/Microsoft/hcsshim/internal/hns/hnsglobals.go +++ /dev/null @@ -1,28 +0,0 @@ -package hns - -type HNSGlobals struct { - Version HNSVersion `json:"Version"` -} - -type HNSVersion struct { - Major int `json:"Major"` - Minor int `json:"Minor"` -} - -var ( - HNSVersion1803 = HNSVersion{Major: 7, Minor: 2} -) - -func GetHNSGlobals() (*HNSGlobals, error) { - var version HNSVersion - err := hnsCall("GET", "/globals/version", "", &version) - if err != nil { - return nil, err - } - - globals := &HNSGlobals{ - Version: version, - } - - return globals, nil -} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hns/hnsnetwork.go b/vendor/github.com/Microsoft/hcsshim/internal/hns/hnsnetwork.go deleted file mode 100644 index f12d3ab0..00000000 --- a/vendor/github.com/Microsoft/hcsshim/internal/hns/hnsnetwork.go +++ /dev/null @@ -1,141 +0,0 @@ -package hns - -import ( - "encoding/json" - "errors" - "github.com/sirupsen/logrus" - "net" -) - -// Subnet is assoicated with a network and represents a list -// of subnets available to the network -type Subnet struct { - AddressPrefix string `json:",omitempty"` - GatewayAddress string `json:",omitempty"` - Policies []json.RawMessage `json:",omitempty"` -} - -// MacPool is assoicated with a network and represents a list -// of macaddresses available to the network -type MacPool struct { - StartMacAddress string `json:",omitempty"` - EndMacAddress string `json:",omitempty"` -} - -// HNSNetwork represents a network in HNS -type HNSNetwork struct { - Id string `json:"ID,omitempty"` - Name string `json:",omitempty"` - Type string `json:",omitempty"` - NetworkAdapterName string `json:",omitempty"` - SourceMac string `json:",omitempty"` - Policies []json.RawMessage `json:",omitempty"` - MacPools []MacPool `json:",omitempty"` - Subnets []Subnet `json:",omitempty"` - DNSSuffix string `json:",omitempty"` - DNSServerList string `json:",omitempty"` - DNSServerCompartment uint32 `json:",omitempty"` - ManagementIP string `json:",omitempty"` - AutomaticDNS bool `json:",omitempty"` -} - -type hnsResponse struct { - Success bool - Error string - Output json.RawMessage -} - -// HNSNetworkRequest makes a call into HNS to update/query a single network -func HNSNetworkRequest(method, path, request string) (*HNSNetwork, error) { - var network HNSNetwork - err := hnsCall(method, "/networks/"+path, request, &network) - if err != nil { - return nil, err - } - - return &network, nil -} - -// HNSListNetworkRequest makes a HNS call to query the list of available networks -func HNSListNetworkRequest(method, path, request string) ([]HNSNetwork, error) { - var network []HNSNetwork - err := hnsCall(method, "/networks/"+path, request, &network) - if err != nil { - return nil, err - } - - return network, nil -} - -// GetHNSNetworkByID -func GetHNSNetworkByID(networkID string) (*HNSNetwork, error) { - return HNSNetworkRequest("GET", networkID, "") -} - -// GetHNSNetworkName filtered by Name -func GetHNSNetworkByName(networkName string) (*HNSNetwork, error) { - hsnnetworks, err := HNSListNetworkRequest("GET", "", "") - if err != nil { - return nil, err - } - for _, hnsnetwork := range hsnnetworks { - if hnsnetwork.Name == networkName { - return &hnsnetwork, nil - } - } - return nil, NetworkNotFoundError{NetworkName: networkName} -} - -// Create Network by sending NetworkRequest to HNS. -func (network *HNSNetwork) Create() (*HNSNetwork, error) { - operation := "Create" - title := "hcsshim::HNSNetwork::" + operation - logrus.Debugf(title+" id=%s", network.Id) - - for _, subnet := range network.Subnets { - if (subnet.AddressPrefix != "") && (subnet.GatewayAddress == "") { - return nil, errors.New("network create error, subnet has address prefix but no gateway specified") - } - } - - jsonString, err := json.Marshal(network) - if err != nil { - return nil, err - } - return HNSNetworkRequest("POST", "", string(jsonString)) -} - -// Delete Network by sending NetworkRequest to HNS -func (network *HNSNetwork) Delete() (*HNSNetwork, error) { - operation := "Delete" - title := "hcsshim::HNSNetwork::" + operation - logrus.Debugf(title+" id=%s", network.Id) - - return HNSNetworkRequest("DELETE", network.Id, "") -} - -// Creates an endpoint on the Network. -func (network *HNSNetwork) NewEndpoint(ipAddress net.IP, macAddress net.HardwareAddr) *HNSEndpoint { - return &HNSEndpoint{ - VirtualNetwork: network.Id, - IPAddress: ipAddress, - MacAddress: string(macAddress), - } -} - -func (network *HNSNetwork) CreateEndpoint(endpoint *HNSEndpoint) (*HNSEndpoint, error) { - operation := "CreateEndpoint" - title := "hcsshim::HNSNetwork::" + operation - logrus.Debugf(title+" id=%s, endpointId=%s", network.Id, endpoint.Id) - - endpoint.VirtualNetwork = network.Id - return endpoint.Create() -} - -func (network *HNSNetwork) CreateRemoteEndpoint(endpoint *HNSEndpoint) (*HNSEndpoint, error) { - operation := "CreateRemoteEndpoint" - title := "hcsshim::HNSNetwork::" + operation - logrus.Debugf(title+" id=%s", network.Id) - endpoint.IsRemoteEndpoint = true - return network.CreateEndpoint(endpoint) -} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hns/hnspolicy.go b/vendor/github.com/Microsoft/hcsshim/internal/hns/hnspolicy.go deleted file mode 100644 index 84b36821..00000000 --- a/vendor/github.com/Microsoft/hcsshim/internal/hns/hnspolicy.go +++ /dev/null @@ -1,110 +0,0 @@ -package hns - -// Type of Request Support in ModifySystem -type PolicyType string - -// RequestType const -const ( - Nat PolicyType = "NAT" - ACL PolicyType = "ACL" - PA PolicyType = "PA" - VLAN PolicyType = "VLAN" - VSID PolicyType = "VSID" - VNet PolicyType = "VNET" - L2Driver PolicyType = "L2Driver" - Isolation PolicyType = "Isolation" - QOS PolicyType = "QOS" - OutboundNat PolicyType = "OutBoundNAT" - ExternalLoadBalancer PolicyType = "ELB" - Route PolicyType = "ROUTE" - Proxy PolicyType = "PROXY" -) - -type NatPolicy struct { - Type PolicyType `json:"Type"` - Protocol string `json:",omitempty"` - InternalPort uint16 `json:",omitempty"` - ExternalPort uint16 `json:",omitempty"` - ExternalPortReserved bool `json:",omitempty"` -} - -type QosPolicy struct { - Type PolicyType `json:"Type"` - MaximumOutgoingBandwidthInBytes uint64 -} - -type IsolationPolicy struct { - Type PolicyType `json:"Type"` - VLAN uint - VSID uint - InDefaultIsolation bool -} - -type VlanPolicy struct { - Type PolicyType `json:"Type"` - VLAN uint -} - -type VsidPolicy struct { - Type PolicyType `json:"Type"` - VSID uint -} - -type PaPolicy struct { - Type PolicyType `json:"Type"` - PA string `json:"PA"` -} - -type OutboundNatPolicy struct { - Policy - VIP string `json:"VIP,omitempty"` - Exceptions []string `json:"ExceptionList,omitempty"` - Destinations []string `json:",omitempty"` -} - -type ProxyPolicy struct { - Type PolicyType `json:"Type"` - IP string `json:",omitempty"` - Port string `json:",omitempty"` - ExceptionList []string `json:",omitempty"` - Destination string `json:",omitempty"` - OutboundNat bool `json:",omitempty"` -} - -type ActionType string -type DirectionType string -type RuleType string - -const ( - Allow ActionType = "Allow" - Block ActionType = "Block" - - In DirectionType = "In" - Out DirectionType = "Out" - - Host RuleType = "Host" - Switch RuleType = "Switch" -) - -type ACLPolicy struct { - Type PolicyType `json:"Type"` - Id string `json:"Id,omitempty"` - Protocol uint16 `json:",omitempty"` - Protocols string `json:"Protocols,omitempty"` - InternalPort uint16 `json:",omitempty"` - Action ActionType - Direction DirectionType - LocalAddresses string `json:",omitempty"` - RemoteAddresses string `json:",omitempty"` - LocalPorts string `json:"LocalPorts,omitempty"` - LocalPort uint16 `json:",omitempty"` - RemotePorts string `json:"RemotePorts,omitempty"` - RemotePort uint16 `json:",omitempty"` - RuleType RuleType `json:"RuleType,omitempty"` - Priority uint16 `json:",omitempty"` - ServiceName string `json:",omitempty"` -} - -type Policy struct { - Type PolicyType `json:"Type"` -} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hns/hnspolicylist.go b/vendor/github.com/Microsoft/hcsshim/internal/hns/hnspolicylist.go deleted file mode 100644 index 31322a68..00000000 --- a/vendor/github.com/Microsoft/hcsshim/internal/hns/hnspolicylist.go +++ /dev/null @@ -1,201 +0,0 @@ -package hns - -import ( - "encoding/json" - - "github.com/sirupsen/logrus" -) - -// RoutePolicy is a structure defining schema for Route based Policy -type RoutePolicy struct { - Policy - DestinationPrefix string `json:"DestinationPrefix,omitempty"` - NextHop string `json:"NextHop,omitempty"` - EncapEnabled bool `json:"NeedEncap,omitempty"` -} - -// ELBPolicy is a structure defining schema for ELB LoadBalancing based Policy -type ELBPolicy struct { - LBPolicy - SourceVIP string `json:"SourceVIP,omitempty"` - VIPs []string `json:"VIPs,omitempty"` - ILB bool `json:"ILB,omitempty"` - DSR bool `json:"IsDSR,omitempty"` -} - -// LBPolicy is a structure defining schema for LoadBalancing based Policy -type LBPolicy struct { - Policy - Protocol uint16 `json:"Protocol,omitempty"` - InternalPort uint16 - ExternalPort uint16 -} - -// PolicyList is a structure defining schema for Policy list request -type PolicyList struct { - ID string `json:"ID,omitempty"` - EndpointReferences []string `json:"References,omitempty"` - Policies []json.RawMessage `json:"Policies,omitempty"` -} - -// HNSPolicyListRequest makes a call into HNS to update/query a single network -func HNSPolicyListRequest(method, path, request string) (*PolicyList, error) { - var policy PolicyList - err := hnsCall(method, "/policylists/"+path, request, &policy) - if err != nil { - return nil, err - } - - return &policy, nil -} - -// HNSListPolicyListRequest gets all the policy list -func HNSListPolicyListRequest() ([]PolicyList, error) { - var plist []PolicyList - err := hnsCall("GET", "/policylists/", "", &plist) - if err != nil { - return nil, err - } - - return plist, nil -} - -// PolicyListRequest makes a HNS call to modify/query a network policy list -func PolicyListRequest(method, path, request string) (*PolicyList, error) { - policylist := &PolicyList{} - err := hnsCall(method, "/policylists/"+path, request, &policylist) - if err != nil { - return nil, err - } - - return policylist, nil -} - -// GetPolicyListByID get the policy list by ID -func GetPolicyListByID(policyListID string) (*PolicyList, error) { - return PolicyListRequest("GET", policyListID, "") -} - -// Create PolicyList by sending PolicyListRequest to HNS. -func (policylist *PolicyList) Create() (*PolicyList, error) { - operation := "Create" - title := "hcsshim::PolicyList::" + operation - logrus.Debugf(title+" id=%s", policylist.ID) - jsonString, err := json.Marshal(policylist) - if err != nil { - return nil, err - } - return PolicyListRequest("POST", "", string(jsonString)) -} - -// Delete deletes PolicyList -func (policylist *PolicyList) Delete() (*PolicyList, error) { - operation := "Delete" - title := "hcsshim::PolicyList::" + operation - logrus.Debugf(title+" id=%s", policylist.ID) - - return PolicyListRequest("DELETE", policylist.ID, "") -} - -// AddEndpoint add an endpoint to a Policy List -func (policylist *PolicyList) AddEndpoint(endpoint *HNSEndpoint) (*PolicyList, error) { - operation := "AddEndpoint" - title := "hcsshim::PolicyList::" + operation - logrus.Debugf(title+" id=%s, endpointId:%s", policylist.ID, endpoint.Id) - - _, err := policylist.Delete() - if err != nil { - return nil, err - } - - // Add Endpoint to the Existing List - policylist.EndpointReferences = append(policylist.EndpointReferences, "/endpoints/"+endpoint.Id) - - return policylist.Create() -} - -// RemoveEndpoint removes an endpoint from the Policy List -func (policylist *PolicyList) RemoveEndpoint(endpoint *HNSEndpoint) (*PolicyList, error) { - operation := "RemoveEndpoint" - title := "hcsshim::PolicyList::" + operation - logrus.Debugf(title+" id=%s, endpointId:%s", policylist.ID, endpoint.Id) - - _, err := policylist.Delete() - if err != nil { - return nil, err - } - - elementToRemove := "/endpoints/" + endpoint.Id - - var references []string - - for _, endpointReference := range policylist.EndpointReferences { - if endpointReference == elementToRemove { - continue - } - references = append(references, endpointReference) - } - policylist.EndpointReferences = references - return policylist.Create() -} - -// AddLoadBalancer policy list for the specified endpoints -func AddLoadBalancer(endpoints []HNSEndpoint, isILB bool, sourceVIP, vip string, protocol uint16, internalPort uint16, externalPort uint16) (*PolicyList, error) { - operation := "AddLoadBalancer" - title := "hcsshim::PolicyList::" + operation - logrus.Debugf(title+" endpointId=%v, isILB=%v, sourceVIP=%s, vip=%s, protocol=%v, internalPort=%v, externalPort=%v", endpoints, isILB, sourceVIP, vip, protocol, internalPort, externalPort) - - policylist := &PolicyList{} - - elbPolicy := &ELBPolicy{ - SourceVIP: sourceVIP, - ILB: isILB, - } - - if len(vip) > 0 { - elbPolicy.VIPs = []string{vip} - } - elbPolicy.Type = ExternalLoadBalancer - elbPolicy.Protocol = protocol - elbPolicy.InternalPort = internalPort - elbPolicy.ExternalPort = externalPort - - for _, endpoint := range endpoints { - policylist.EndpointReferences = append(policylist.EndpointReferences, "/endpoints/"+endpoint.Id) - } - - jsonString, err := json.Marshal(elbPolicy) - if err != nil { - return nil, err - } - policylist.Policies = append(policylist.Policies, jsonString) - return policylist.Create() -} - -// AddRoute adds route policy list for the specified endpoints -func AddRoute(endpoints []HNSEndpoint, destinationPrefix string, nextHop string, encapEnabled bool) (*PolicyList, error) { - operation := "AddRoute" - title := "hcsshim::PolicyList::" + operation - logrus.Debugf(title+" destinationPrefix:%s", destinationPrefix) - - policylist := &PolicyList{} - - rPolicy := &RoutePolicy{ - DestinationPrefix: destinationPrefix, - NextHop: nextHop, - EncapEnabled: encapEnabled, - } - rPolicy.Type = Route - - for _, endpoint := range endpoints { - policylist.EndpointReferences = append(policylist.EndpointReferences, "/endpoints/"+endpoint.Id) - } - - jsonString, err := json.Marshal(rPolicy) - if err != nil { - return nil, err - } - - policylist.Policies = append(policylist.Policies, jsonString) - return policylist.Create() -} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hns/hnssupport.go b/vendor/github.com/Microsoft/hcsshim/internal/hns/hnssupport.go deleted file mode 100644 index d5efba7f..00000000 --- a/vendor/github.com/Microsoft/hcsshim/internal/hns/hnssupport.go +++ /dev/null @@ -1,49 +0,0 @@ -package hns - -import ( - "github.com/sirupsen/logrus" -) - -type HNSSupportedFeatures struct { - Acl HNSAclFeatures `json:"ACL"` -} - -type HNSAclFeatures struct { - AclAddressLists bool `json:"AclAddressLists"` - AclNoHostRulePriority bool `json:"AclHostRulePriority"` - AclPortRanges bool `json:"AclPortRanges"` - AclRuleId bool `json:"AclRuleId"` -} - -func GetHNSSupportedFeatures() HNSSupportedFeatures { - var hnsFeatures HNSSupportedFeatures - - globals, err := GetHNSGlobals() - if err != nil { - // Expected on pre-1803 builds, all features will be false/unsupported - logrus.Debugf("Unable to obtain HNS globals: %s", err) - return hnsFeatures - } - - hnsFeatures.Acl = HNSAclFeatures{ - AclAddressLists: isHNSFeatureSupported(globals.Version, HNSVersion1803), - AclNoHostRulePriority: isHNSFeatureSupported(globals.Version, HNSVersion1803), - AclPortRanges: isHNSFeatureSupported(globals.Version, HNSVersion1803), - AclRuleId: isHNSFeatureSupported(globals.Version, HNSVersion1803), - } - - return hnsFeatures -} - -func isHNSFeatureSupported(currentVersion HNSVersion, minVersionSupported HNSVersion) bool { - if currentVersion.Major < minVersionSupported.Major { - return false - } - if currentVersion.Major > minVersionSupported.Major { - return true - } - if currentVersion.Minor < minVersionSupported.Minor { - return false - } - return true -} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hns/namespace.go b/vendor/github.com/Microsoft/hcsshim/internal/hns/namespace.go deleted file mode 100644 index d3b04eef..00000000 --- a/vendor/github.com/Microsoft/hcsshim/internal/hns/namespace.go +++ /dev/null @@ -1,111 +0,0 @@ -package hns - -import ( - "encoding/json" - "fmt" - "os" - "path" - "strings" -) - -type namespaceRequest struct { - IsDefault bool `json:",omitempty"` -} - -type namespaceEndpointRequest struct { - ID string `json:"Id"` -} - -type NamespaceResource struct { - Type string - Data json.RawMessage -} - -type namespaceResourceRequest struct { - Type string - Data interface{} -} - -type Namespace struct { - ID string - IsDefault bool `json:",omitempty"` - ResourceList []NamespaceResource `json:",omitempty"` - CompartmentId uint32 `json:",omitempty"` -} - -func issueNamespaceRequest(id *string, method, subpath string, request interface{}) (*Namespace, error) { - var err error - hnspath := "/namespaces/" - if id != nil { - hnspath = path.Join(hnspath, *id) - } - if subpath != "" { - hnspath = path.Join(hnspath, subpath) - } - var reqJSON []byte - if request != nil { - if reqJSON, err = json.Marshal(request); err != nil { - return nil, err - } - } - var ns Namespace - err = hnsCall(method, hnspath, string(reqJSON), &ns) - if err != nil { - if strings.Contains(err.Error(), "Element not found.") { - return nil, os.ErrNotExist - } - return nil, fmt.Errorf("%s %s: %s", method, hnspath, err) - } - return &ns, err -} - -func CreateNamespace() (string, error) { - req := namespaceRequest{} - ns, err := issueNamespaceRequest(nil, "POST", "", &req) - if err != nil { - return "", err - } - return ns.ID, nil -} - -func RemoveNamespace(id string) error { - _, err := issueNamespaceRequest(&id, "DELETE", "", nil) - return err -} - -func GetNamespaceEndpoints(id string) ([]string, error) { - ns, err := issueNamespaceRequest(&id, "GET", "", nil) - if err != nil { - return nil, err - } - var endpoints []string - for _, rsrc := range ns.ResourceList { - if rsrc.Type == "Endpoint" { - var endpoint namespaceEndpointRequest - err = json.Unmarshal(rsrc.Data, &endpoint) - if err != nil { - return nil, fmt.Errorf("unmarshal endpoint: %s", err) - } - endpoints = append(endpoints, endpoint.ID) - } - } - return endpoints, nil -} - -func AddNamespaceEndpoint(id string, endpointID string) error { - resource := namespaceResourceRequest{ - Type: "Endpoint", - Data: namespaceEndpointRequest{endpointID}, - } - _, err := issueNamespaceRequest(&id, "POST", "addresource", &resource) - return err -} - -func RemoveNamespaceEndpoint(id string, endpointID string) error { - resource := namespaceResourceRequest{ - Type: "Endpoint", - Data: namespaceEndpointRequest{endpointID}, - } - _, err := issueNamespaceRequest(&id, "POST", "removeresource", &resource) - return err -} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hns/zsyscall_windows.go b/vendor/github.com/Microsoft/hcsshim/internal/hns/zsyscall_windows.go deleted file mode 100644 index 204633a4..00000000 --- a/vendor/github.com/Microsoft/hcsshim/internal/hns/zsyscall_windows.go +++ /dev/null @@ -1,76 +0,0 @@ -// Code generated mksyscall_windows.exe DO NOT EDIT - -package hns - -import ( - "syscall" - "unsafe" - - "golang.org/x/sys/windows" -) - -var _ unsafe.Pointer - -// Do the interface allocations only once for common -// Errno values. -const ( - errnoERROR_IO_PENDING = 997 -) - -var ( - errERROR_IO_PENDING error = syscall.Errno(errnoERROR_IO_PENDING) -) - -// errnoErr returns common boxed Errno values, to prevent -// allocations at runtime. -func errnoErr(e syscall.Errno) error { - switch e { - case 0: - return nil - case errnoERROR_IO_PENDING: - return errERROR_IO_PENDING - } - // TODO: add more here, after collecting data on the common - // error values see on Windows. (perhaps when running - // all.bat?) - return e -} - -var ( - modvmcompute = windows.NewLazySystemDLL("vmcompute.dll") - - procHNSCall = modvmcompute.NewProc("HNSCall") -) - -func _hnsCall(method string, path string, object string, response **uint16) (hr error) { - var _p0 *uint16 - _p0, hr = syscall.UTF16PtrFromString(method) - if hr != nil { - return - } - var _p1 *uint16 - _p1, hr = syscall.UTF16PtrFromString(path) - if hr != nil { - return - } - var _p2 *uint16 - _p2, hr = syscall.UTF16PtrFromString(object) - if hr != nil { - return - } - return __hnsCall(_p0, _p1, _p2, response) -} - -func __hnsCall(method *uint16, path *uint16, object *uint16, response **uint16) (hr error) { - if hr = procHNSCall.Find(); hr != nil { - return - } - r0, _, _ := syscall.Syscall6(procHNSCall.Addr(), 4, uintptr(unsafe.Pointer(method)), uintptr(unsafe.Pointer(path)), uintptr(unsafe.Pointer(object)), uintptr(unsafe.Pointer(response)), 0, 0) - if int32(r0) < 0 { - if r0&0x1fff0000 == 0x00070000 { - r0 &= 0xffff - } - hr = syscall.Errno(r0) - } - return -} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/interop/interop.go b/vendor/github.com/Microsoft/hcsshim/internal/interop/interop.go deleted file mode 100644 index 922f7c67..00000000 --- a/vendor/github.com/Microsoft/hcsshim/internal/interop/interop.go +++ /dev/null @@ -1,23 +0,0 @@ -package interop - -import ( - "syscall" - "unsafe" -) - -//go:generate go run ../../mksyscall_windows.go -output zsyscall_windows.go interop.go - -//sys coTaskMemFree(buffer unsafe.Pointer) = api_ms_win_core_com_l1_1_0.CoTaskMemFree - -func ConvertAndFreeCoTaskMemString(buffer *uint16) string { - str := syscall.UTF16ToString((*[1 << 29]uint16)(unsafe.Pointer(buffer))[:]) - coTaskMemFree(unsafe.Pointer(buffer)) - return str -} - -func Win32FromHresult(hr uintptr) syscall.Errno { - if hr&0x1fff0000 == 0x00070000 { - return syscall.Errno(hr & 0xffff) - } - return syscall.Errno(hr) -} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/interop/zsyscall_windows.go b/vendor/github.com/Microsoft/hcsshim/internal/interop/zsyscall_windows.go deleted file mode 100644 index 12b0c71c..00000000 --- a/vendor/github.com/Microsoft/hcsshim/internal/interop/zsyscall_windows.go +++ /dev/null @@ -1,48 +0,0 @@ -// Code generated mksyscall_windows.exe DO NOT EDIT - -package interop - -import ( - "syscall" - "unsafe" - - "golang.org/x/sys/windows" -) - -var _ unsafe.Pointer - -// Do the interface allocations only once for common -// Errno values. -const ( - errnoERROR_IO_PENDING = 997 -) - -var ( - errERROR_IO_PENDING error = syscall.Errno(errnoERROR_IO_PENDING) -) - -// errnoErr returns common boxed Errno values, to prevent -// allocations at runtime. -func errnoErr(e syscall.Errno) error { - switch e { - case 0: - return nil - case errnoERROR_IO_PENDING: - return errERROR_IO_PENDING - } - // TODO: add more here, after collecting data on the common - // error values see on Windows. (perhaps when running - // all.bat?) - return e -} - -var ( - modapi_ms_win_core_com_l1_1_0 = windows.NewLazySystemDLL("api-ms-win-core-com-l1-1-0.dll") - - procCoTaskMemFree = modapi_ms_win_core_com_l1_1_0.NewProc("CoTaskMemFree") -) - -func coTaskMemFree(buffer unsafe.Pointer) { - syscall.Syscall(procCoTaskMemFree.Addr(), 1, uintptr(buffer), 0, 0) - return -} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/jobobject/iocp.go b/vendor/github.com/Microsoft/hcsshim/internal/jobobject/iocp.go deleted file mode 100644 index 5d6acd69..00000000 --- a/vendor/github.com/Microsoft/hcsshim/internal/jobobject/iocp.go +++ /dev/null @@ -1,111 +0,0 @@ -package jobobject - -import ( - "context" - "fmt" - "sync" - "unsafe" - - "github.com/Microsoft/hcsshim/internal/log" - "github.com/Microsoft/hcsshim/internal/queue" - "github.com/Microsoft/hcsshim/internal/winapi" - "github.com/sirupsen/logrus" - "golang.org/x/sys/windows" -) - -var ( - ioInitOnce sync.Once - initIOErr error - // Global iocp handle that will be re-used for every job object - ioCompletionPort windows.Handle - // Mapping of job handle to queue to place notifications in. - jobMap sync.Map -) - -// MsgAllProcessesExited is a type representing a message that every process in a job has exited. -type MsgAllProcessesExited struct{} - -// MsgUnimplemented represents a message that we are aware of, but that isn't implemented currently. -// This should not be treated as an error. -type MsgUnimplemented struct{} - -// pollIOCP polls the io completion port forever. -func pollIOCP(ctx context.Context, iocpHandle windows.Handle) { - var ( - overlapped uintptr - code uint32 - key uintptr - ) - - for { - err := windows.GetQueuedCompletionStatus(iocpHandle, &code, &key, (**windows.Overlapped)(unsafe.Pointer(&overlapped)), windows.INFINITE) - if err != nil { - log.G(ctx).WithError(err).Error("failed to poll for job object message") - continue - } - if val, ok := jobMap.Load(key); ok { - msq, ok := val.(*queue.MessageQueue) - if !ok { - log.G(ctx).WithField("value", msq).Warn("encountered non queue type in job map") - continue - } - notification, err := parseMessage(code, overlapped) - if err != nil { - log.G(ctx).WithFields(logrus.Fields{ - "code": code, - "overlapped": overlapped, - }).Warn("failed to parse job object message") - continue - } - if err := msq.Enqueue(notification); err == queue.ErrQueueClosed { - // Write will only return an error when the queue is closed. - // The only time a queue would ever be closed is when we call `Close` on - // the job it belongs to which also removes it from the jobMap, so something - // went wrong here. We can't return as this is reading messages for all jobs - // so just log it and move on. - log.G(ctx).WithFields(logrus.Fields{ - "code": code, - "overlapped": overlapped, - }).Warn("tried to write to a closed queue") - continue - } - } else { - log.G(ctx).Warn("received a message for a job not present in the mapping") - } - } -} - -func parseMessage(code uint32, overlapped uintptr) (interface{}, error) { - // Check code and parse out relevant information related to that notification - // that we care about. For now all we handle is the message that all processes - // in the job have exited. - switch code { - case winapi.JOB_OBJECT_MSG_ACTIVE_PROCESS_ZERO: - return MsgAllProcessesExited{}, nil - // Other messages for completeness and a check to make sure that if we fall - // into the default case that this is a code we don't know how to handle. - case winapi.JOB_OBJECT_MSG_END_OF_JOB_TIME: - case winapi.JOB_OBJECT_MSG_END_OF_PROCESS_TIME: - case winapi.JOB_OBJECT_MSG_ACTIVE_PROCESS_LIMIT: - case winapi.JOB_OBJECT_MSG_NEW_PROCESS: - case winapi.JOB_OBJECT_MSG_EXIT_PROCESS: - case winapi.JOB_OBJECT_MSG_ABNORMAL_EXIT_PROCESS: - case winapi.JOB_OBJECT_MSG_PROCESS_MEMORY_LIMIT: - case winapi.JOB_OBJECT_MSG_JOB_MEMORY_LIMIT: - case winapi.JOB_OBJECT_MSG_NOTIFICATION_LIMIT: - default: - return nil, fmt.Errorf("unknown job notification type: %d", code) - } - return MsgUnimplemented{}, nil -} - -// Assigns an IO completion port to get notified of events for the registered job -// object. -func attachIOCP(job windows.Handle, iocp windows.Handle) error { - info := winapi.JOBOBJECT_ASSOCIATE_COMPLETION_PORT{ - CompletionKey: job, - CompletionPort: iocp, - } - _, err := windows.SetInformationJobObject(job, windows.JobObjectAssociateCompletionPortInformation, uintptr(unsafe.Pointer(&info)), uint32(unsafe.Sizeof(info))) - return err -} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/jobobject/jobobject.go b/vendor/github.com/Microsoft/hcsshim/internal/jobobject/jobobject.go deleted file mode 100644 index c9fdd921..00000000 --- a/vendor/github.com/Microsoft/hcsshim/internal/jobobject/jobobject.go +++ /dev/null @@ -1,538 +0,0 @@ -package jobobject - -import ( - "context" - "errors" - "fmt" - "sync" - "unsafe" - - "github.com/Microsoft/hcsshim/internal/queue" - "github.com/Microsoft/hcsshim/internal/winapi" - "golang.org/x/sys/windows" -) - -// This file provides higher level constructs for the win32 job object API. -// Most of the core creation and management functions are already present in "golang.org/x/sys/windows" -// (CreateJobObject, AssignProcessToJobObject, etc.) as well as most of the limit information -// structs and associated limit flags. Whatever is not present from the job object API -// in golang.org/x/sys/windows is located in /internal/winapi. -// -// https://docs.microsoft.com/en-us/windows/win32/procthread/job-objects - -// JobObject is a high level wrapper around a Windows job object. Holds a handle to -// the job, a queue to receive iocp notifications about the lifecycle -// of the job and a mutex for synchronized handle access. -type JobObject struct { - handle windows.Handle - mq *queue.MessageQueue - handleLock sync.RWMutex -} - -// JobLimits represents the resource constraints that can be applied to a job object. -type JobLimits struct { - CPULimit uint32 - CPUWeight uint32 - MemoryLimitInBytes uint64 - MaxIOPS int64 - MaxBandwidth int64 -} - -type CPURateControlType uint32 - -const ( - WeightBased CPURateControlType = iota - RateBased -) - -// Processor resource controls -const ( - cpuLimitMin = 1 - cpuLimitMax = 10000 - cpuWeightMin = 1 - cpuWeightMax = 9 -) - -var ( - ErrAlreadyClosed = errors.New("the handle has already been closed") - ErrNotRegistered = errors.New("job is not registered to receive notifications") -) - -// Options represents the set of configurable options when making or opening a job object. -type Options struct { - // `Name` specifies the name of the job object if a named job object is desired. - Name string - // `Notifications` specifies if the job will be registered to receive notifications. - // Defaults to false. - Notifications bool - // `UseNTVariant` specifies if we should use the `Nt` variant of Open/CreateJobObject. - // Defaults to false. - UseNTVariant bool - // `IOTracking` enables tracking I/O statistics on the job object. More specifically this - // calls SetInformationJobObject with the JobObjectIoAttribution class. - EnableIOTracking bool -} - -// Create creates a job object. -// -// If options.Name is an empty string, the job will not be assigned a name. -// -// If options.Notifications are not enabled `PollNotifications` will return immediately with error `errNotRegistered`. -// -// If `options` is nil, use default option values. -// -// Returns a JobObject structure and an error if there is one. -func Create(ctx context.Context, options *Options) (_ *JobObject, err error) { - if options == nil { - options = &Options{} - } - - var jobName *winapi.UnicodeString - if options.Name != "" { - jobName, err = winapi.NewUnicodeString(options.Name) - if err != nil { - return nil, err - } - } - - var jobHandle windows.Handle - if options.UseNTVariant { - oa := winapi.ObjectAttributes{ - Length: unsafe.Sizeof(winapi.ObjectAttributes{}), - ObjectName: jobName, - Attributes: 0, - } - status := winapi.NtCreateJobObject(&jobHandle, winapi.JOB_OBJECT_ALL_ACCESS, &oa) - if status != 0 { - return nil, winapi.RtlNtStatusToDosError(status) - } - } else { - var jobNameBuf *uint16 - if jobName != nil && jobName.Buffer != nil { - jobNameBuf = jobName.Buffer - } - jobHandle, err = windows.CreateJobObject(nil, jobNameBuf) - if err != nil { - return nil, err - } - } - - defer func() { - if err != nil { - windows.Close(jobHandle) - } - }() - - job := &JobObject{ - handle: jobHandle, - } - - // If the IOCP we'll be using to receive messages for all jobs hasn't been - // created, create it and start polling. - if options.Notifications { - mq, err := setupNotifications(ctx, job) - if err != nil { - return nil, err - } - job.mq = mq - } - - if options.EnableIOTracking { - if err := enableIOTracking(jobHandle); err != nil { - return nil, err - } - } - - return job, nil -} - -// Open opens an existing job object with name provided in `options`. If no name is provided -// return an error since we need to know what job object to open. -// -// If options.Notifications is false `PollNotifications` will return immediately with error `errNotRegistered`. -// -// Returns a JobObject structure and an error if there is one. -func Open(ctx context.Context, options *Options) (_ *JobObject, err error) { - if options == nil || (options != nil && options.Name == "") { - return nil, errors.New("no job object name specified to open") - } - - unicodeJobName, err := winapi.NewUnicodeString(options.Name) - if err != nil { - return nil, err - } - - var jobHandle windows.Handle - if options != nil && options.UseNTVariant { - oa := winapi.ObjectAttributes{ - Length: unsafe.Sizeof(winapi.ObjectAttributes{}), - ObjectName: unicodeJobName, - Attributes: 0, - } - status := winapi.NtOpenJobObject(&jobHandle, winapi.JOB_OBJECT_ALL_ACCESS, &oa) - if status != 0 { - return nil, winapi.RtlNtStatusToDosError(status) - } - } else { - jobHandle, err = winapi.OpenJobObject(winapi.JOB_OBJECT_ALL_ACCESS, false, unicodeJobName.Buffer) - if err != nil { - return nil, err - } - } - - defer func() { - if err != nil { - windows.Close(jobHandle) - } - }() - - job := &JobObject{ - handle: jobHandle, - } - - // If the IOCP we'll be using to receive messages for all jobs hasn't been - // created, create it and start polling. - if options != nil && options.Notifications { - mq, err := setupNotifications(ctx, job) - if err != nil { - return nil, err - } - job.mq = mq - } - - return job, nil -} - -// helper function to setup notifications for creating/opening a job object -func setupNotifications(ctx context.Context, job *JobObject) (*queue.MessageQueue, error) { - job.handleLock.RLock() - defer job.handleLock.RUnlock() - - if job.handle == 0 { - return nil, ErrAlreadyClosed - } - - ioInitOnce.Do(func() { - h, err := windows.CreateIoCompletionPort(windows.InvalidHandle, 0, 0, 0xffffffff) - if err != nil { - initIOErr = err - return - } - ioCompletionPort = h - go pollIOCP(ctx, h) - }) - - if initIOErr != nil { - return nil, initIOErr - } - - mq := queue.NewMessageQueue() - jobMap.Store(uintptr(job.handle), mq) - if err := attachIOCP(job.handle, ioCompletionPort); err != nil { - jobMap.Delete(uintptr(job.handle)) - return nil, fmt.Errorf("failed to attach job to IO completion port: %w", err) - } - return mq, nil -} - -// PollNotification will poll for a job object notification. This call should only be called once -// per job (ideally in a goroutine loop) and will block if there is not a notification ready. -// This call will return immediately with error `ErrNotRegistered` if the job was not registered -// to receive notifications during `Create`. Internally, messages will be queued and there -// is no worry of messages being dropped. -func (job *JobObject) PollNotification() (interface{}, error) { - if job.mq == nil { - return nil, ErrNotRegistered - } - return job.mq.Dequeue() -} - -// UpdateProcThreadAttribute updates the passed in ProcThreadAttributeList to contain what is necessary to -// launch a process in a job at creation time. This can be used to avoid having to call Assign() after a process -// has already started running. -func (job *JobObject) UpdateProcThreadAttribute(attrList *windows.ProcThreadAttributeListContainer) error { - job.handleLock.RLock() - defer job.handleLock.RUnlock() - - if job.handle == 0 { - return ErrAlreadyClosed - } - - if err := attrList.Update( - winapi.PROC_THREAD_ATTRIBUTE_JOB_LIST, - unsafe.Pointer(&job.handle), - unsafe.Sizeof(job.handle), - ); err != nil { - return fmt.Errorf("failed to update proc thread attributes for job object: %w", err) - } - - return nil -} - -// Close closes the job object handle. -func (job *JobObject) Close() error { - job.handleLock.Lock() - defer job.handleLock.Unlock() - - if job.handle == 0 { - return ErrAlreadyClosed - } - - if err := windows.Close(job.handle); err != nil { - return err - } - - if job.mq != nil { - job.mq.Close() - } - // Handles now invalid so if the map entry to receive notifications for this job still - // exists remove it so we can stop receiving notifications. - if _, ok := jobMap.Load(uintptr(job.handle)); ok { - jobMap.Delete(uintptr(job.handle)) - } - - job.handle = 0 - return nil -} - -// Assign assigns a process to the job object. -func (job *JobObject) Assign(pid uint32) error { - job.handleLock.RLock() - defer job.handleLock.RUnlock() - - if job.handle == 0 { - return ErrAlreadyClosed - } - - if pid == 0 { - return errors.New("invalid pid: 0") - } - hProc, err := windows.OpenProcess(winapi.PROCESS_ALL_ACCESS, true, pid) - if err != nil { - return err - } - defer windows.Close(hProc) - return windows.AssignProcessToJobObject(job.handle, hProc) -} - -// Terminate terminates the job, essentially calls TerminateProcess on every process in the -// job. -func (job *JobObject) Terminate(exitCode uint32) error { - job.handleLock.RLock() - defer job.handleLock.RUnlock() - if job.handle == 0 { - return ErrAlreadyClosed - } - return windows.TerminateJobObject(job.handle, exitCode) -} - -// Pids returns all of the process IDs in the job object. -func (job *JobObject) Pids() ([]uint32, error) { - job.handleLock.RLock() - defer job.handleLock.RUnlock() - - if job.handle == 0 { - return nil, ErrAlreadyClosed - } - - info := winapi.JOBOBJECT_BASIC_PROCESS_ID_LIST{} - err := winapi.QueryInformationJobObject( - job.handle, - winapi.JobObjectBasicProcessIdList, - unsafe.Pointer(&info), - uint32(unsafe.Sizeof(info)), - nil, - ) - - // This is either the case where there is only one process or no processes in - // the job. Any other case will result in ERROR_MORE_DATA. Check if info.NumberOfProcessIdsInList - // is 1 and just return this, otherwise return an empty slice. - if err == nil { - if info.NumberOfProcessIdsInList == 1 { - return []uint32{uint32(info.ProcessIdList[0])}, nil - } - // Return empty slice instead of nil to play well with the caller of this. - // Do not return an error if no processes are running inside the job - return []uint32{}, nil - } - - if err != winapi.ERROR_MORE_DATA { - return nil, fmt.Errorf("failed initial query for PIDs in job object: %w", err) - } - - jobBasicProcessIDListSize := unsafe.Sizeof(info) + (unsafe.Sizeof(info.ProcessIdList[0]) * uintptr(info.NumberOfAssignedProcesses-1)) - buf := make([]byte, jobBasicProcessIDListSize) - if err = winapi.QueryInformationJobObject( - job.handle, - winapi.JobObjectBasicProcessIdList, - unsafe.Pointer(&buf[0]), - uint32(len(buf)), - nil, - ); err != nil { - return nil, fmt.Errorf("failed to query for PIDs in job object: %w", err) - } - - bufInfo := (*winapi.JOBOBJECT_BASIC_PROCESS_ID_LIST)(unsafe.Pointer(&buf[0])) - pids := make([]uint32, bufInfo.NumberOfProcessIdsInList) - for i, bufPid := range bufInfo.AllPids() { - pids[i] = uint32(bufPid) - } - return pids, nil -} - -// QueryMemoryStats gets the memory stats for the job object. -func (job *JobObject) QueryMemoryStats() (*winapi.JOBOBJECT_MEMORY_USAGE_INFORMATION, error) { - job.handleLock.RLock() - defer job.handleLock.RUnlock() - - if job.handle == 0 { - return nil, ErrAlreadyClosed - } - - info := winapi.JOBOBJECT_MEMORY_USAGE_INFORMATION{} - if err := winapi.QueryInformationJobObject( - job.handle, - winapi.JobObjectMemoryUsageInformation, - unsafe.Pointer(&info), - uint32(unsafe.Sizeof(info)), - nil, - ); err != nil { - return nil, fmt.Errorf("failed to query for job object memory stats: %w", err) - } - return &info, nil -} - -// QueryProcessorStats gets the processor stats for the job object. -func (job *JobObject) QueryProcessorStats() (*winapi.JOBOBJECT_BASIC_ACCOUNTING_INFORMATION, error) { - job.handleLock.RLock() - defer job.handleLock.RUnlock() - - if job.handle == 0 { - return nil, ErrAlreadyClosed - } - - info := winapi.JOBOBJECT_BASIC_ACCOUNTING_INFORMATION{} - if err := winapi.QueryInformationJobObject( - job.handle, - winapi.JobObjectBasicAccountingInformation, - unsafe.Pointer(&info), - uint32(unsafe.Sizeof(info)), - nil, - ); err != nil { - return nil, fmt.Errorf("failed to query for job object process stats: %w", err) - } - return &info, nil -} - -// QueryStorageStats gets the storage (I/O) stats for the job object. This call will error -// if either `EnableIOTracking` wasn't set to true on creation of the job, or SetIOTracking() -// hasn't been called since creation of the job. -func (job *JobObject) QueryStorageStats() (*winapi.JOBOBJECT_IO_ATTRIBUTION_INFORMATION, error) { - job.handleLock.RLock() - defer job.handleLock.RUnlock() - - if job.handle == 0 { - return nil, ErrAlreadyClosed - } - - info := winapi.JOBOBJECT_IO_ATTRIBUTION_INFORMATION{ - ControlFlags: winapi.JOBOBJECT_IO_ATTRIBUTION_CONTROL_ENABLE, - } - if err := winapi.QueryInformationJobObject( - job.handle, - winapi.JobObjectIoAttribution, - unsafe.Pointer(&info), - uint32(unsafe.Sizeof(info)), - nil, - ); err != nil { - return nil, fmt.Errorf("failed to query for job object storage stats: %w", err) - } - return &info, nil -} - -// QueryPrivateWorkingSet returns the private working set size for the job. This is calculated by adding up the -// private working set for every process running in the job. -func (job *JobObject) QueryPrivateWorkingSet() (uint64, error) { - pids, err := job.Pids() - if err != nil { - return 0, err - } - - openAndQueryWorkingSet := func(pid uint32) (uint64, error) { - h, err := windows.OpenProcess(windows.PROCESS_QUERY_LIMITED_INFORMATION, false, pid) - if err != nil { - // Continue to the next if OpenProcess doesn't return a valid handle (fails). Handles a - // case where one of the pids in the job exited before we open. - return 0, nil - } - defer func() { - _ = windows.Close(h) - }() - // Check if the process is actually running in the job still. There's a small chance - // that the process could have exited and had its pid re-used between grabbing the pids - // in the job and opening the handle to it above. - var inJob int32 - if err := winapi.IsProcessInJob(h, job.handle, &inJob); err != nil { - // This shouldn't fail unless we have incorrect access rights which we control - // here so probably best to error out if this failed. - return 0, err - } - // Don't report stats for this process as it's not running in the job. This shouldn't be - // an error condition though. - if inJob == 0 { - return 0, nil - } - - var vmCounters winapi.VM_COUNTERS_EX2 - status := winapi.NtQueryInformationProcess( - h, - winapi.ProcessVmCounters, - unsafe.Pointer(&vmCounters), - uint32(unsafe.Sizeof(vmCounters)), - nil, - ) - if !winapi.NTSuccess(status) { - return 0, fmt.Errorf("failed to query information for process: %w", winapi.RtlNtStatusToDosError(status)) - } - return uint64(vmCounters.PrivateWorkingSetSize), nil - } - - var jobWorkingSetSize uint64 - for _, pid := range pids { - workingSet, err := openAndQueryWorkingSet(pid) - if err != nil { - return 0, err - } - jobWorkingSetSize += workingSet - } - - return jobWorkingSetSize, nil -} - -// SetIOTracking enables IO tracking for processes in the job object. -// This enables use of the QueryStorageStats method. -func (job *JobObject) SetIOTracking() error { - job.handleLock.RLock() - defer job.handleLock.RUnlock() - - if job.handle == 0 { - return ErrAlreadyClosed - } - - return enableIOTracking(job.handle) -} - -func enableIOTracking(job windows.Handle) error { - info := winapi.JOBOBJECT_IO_ATTRIBUTION_INFORMATION{ - ControlFlags: winapi.JOBOBJECT_IO_ATTRIBUTION_CONTROL_ENABLE, - } - if _, err := windows.SetInformationJobObject( - job, - winapi.JobObjectIoAttribution, - uintptr(unsafe.Pointer(&info)), - uint32(unsafe.Sizeof(info)), - ); err != nil { - return fmt.Errorf("failed to enable IO tracking on job object: %w", err) - } - return nil -} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/jobobject/limits.go b/vendor/github.com/Microsoft/hcsshim/internal/jobobject/limits.go deleted file mode 100644 index 4efde292..00000000 --- a/vendor/github.com/Microsoft/hcsshim/internal/jobobject/limits.go +++ /dev/null @@ -1,315 +0,0 @@ -package jobobject - -import ( - "errors" - "fmt" - "unsafe" - - "github.com/Microsoft/hcsshim/internal/winapi" - "golang.org/x/sys/windows" -) - -const ( - memoryLimitMax uint64 = 0xffffffffffffffff -) - -func isFlagSet(flag, controlFlags uint32) bool { - return (flag & controlFlags) == flag -} - -// SetResourceLimits sets resource limits on the job object (cpu, memory, storage). -func (job *JobObject) SetResourceLimits(limits *JobLimits) error { - // Go through and check what limits were specified and apply them to the job. - if limits.MemoryLimitInBytes != 0 { - if err := job.SetMemoryLimit(limits.MemoryLimitInBytes); err != nil { - return fmt.Errorf("failed to set job object memory limit: %w", err) - } - } - - if limits.CPULimit != 0 { - if err := job.SetCPULimit(RateBased, limits.CPULimit); err != nil { - return fmt.Errorf("failed to set job object cpu limit: %w", err) - } - } else if limits.CPUWeight != 0 { - if err := job.SetCPULimit(WeightBased, limits.CPUWeight); err != nil { - return fmt.Errorf("failed to set job object cpu limit: %w", err) - } - } - - if limits.MaxBandwidth != 0 || limits.MaxIOPS != 0 { - if err := job.SetIOLimit(limits.MaxBandwidth, limits.MaxIOPS); err != nil { - return fmt.Errorf("failed to set io limit on job object: %w", err) - } - } - return nil -} - -// SetTerminateOnLastHandleClose sets the job object flag that specifies that the job should terminate -// all processes in the job on the last open handle being closed. -func (job *JobObject) SetTerminateOnLastHandleClose() error { - info, err := job.getExtendedInformation() - if err != nil { - return err - } - info.BasicLimitInformation.LimitFlags |= windows.JOB_OBJECT_LIMIT_KILL_ON_JOB_CLOSE - return job.setExtendedInformation(info) -} - -// SetMemoryLimit sets the memory limit of the job object based on the given `memoryLimitInBytes`. -func (job *JobObject) SetMemoryLimit(memoryLimitInBytes uint64) error { - if memoryLimitInBytes >= memoryLimitMax { - return errors.New("memory limit specified exceeds the max size") - } - - info, err := job.getExtendedInformation() - if err != nil { - return err - } - - info.JobMemoryLimit = uintptr(memoryLimitInBytes) - info.BasicLimitInformation.LimitFlags |= windows.JOB_OBJECT_LIMIT_JOB_MEMORY - return job.setExtendedInformation(info) -} - -// GetMemoryLimit gets the memory limit in bytes of the job object. -func (job *JobObject) GetMemoryLimit() (uint64, error) { - info, err := job.getExtendedInformation() - if err != nil { - return 0, err - } - return uint64(info.JobMemoryLimit), nil -} - -// SetCPULimit sets the CPU limit depending on the specified `CPURateControlType` to -// `rateControlValue` for the job object. -func (job *JobObject) SetCPULimit(rateControlType CPURateControlType, rateControlValue uint32) error { - cpuInfo, err := job.getCPURateControlInformation() - if err != nil { - return err - } - switch rateControlType { - case WeightBased: - if rateControlValue < cpuWeightMin || rateControlValue > cpuWeightMax { - return fmt.Errorf("processor weight value of `%d` is invalid", rateControlValue) - } - cpuInfo.ControlFlags |= winapi.JOB_OBJECT_CPU_RATE_CONTROL_ENABLE | winapi.JOB_OBJECT_CPU_RATE_CONTROL_WEIGHT_BASED - cpuInfo.Value = rateControlValue - case RateBased: - if rateControlValue < cpuLimitMin || rateControlValue > cpuLimitMax { - return fmt.Errorf("processor rate of `%d` is invalid", rateControlValue) - } - cpuInfo.ControlFlags |= winapi.JOB_OBJECT_CPU_RATE_CONTROL_ENABLE | winapi.JOB_OBJECT_CPU_RATE_CONTROL_HARD_CAP - cpuInfo.Value = rateControlValue - default: - return errors.New("invalid job object cpu rate control type") - } - return job.setCPURateControlInfo(cpuInfo) -} - -// GetCPULimit gets the cpu limits for the job object. -// `rateControlType` is used to indicate what type of cpu limit to query for. -func (job *JobObject) GetCPULimit(rateControlType CPURateControlType) (uint32, error) { - info, err := job.getCPURateControlInformation() - if err != nil { - return 0, err - } - - if !isFlagSet(winapi.JOB_OBJECT_CPU_RATE_CONTROL_ENABLE, info.ControlFlags) { - return 0, errors.New("the job does not have cpu rate control enabled") - } - - switch rateControlType { - case WeightBased: - if !isFlagSet(winapi.JOB_OBJECT_CPU_RATE_CONTROL_WEIGHT_BASED, info.ControlFlags) { - return 0, errors.New("cannot get cpu weight for job object without cpu weight option set") - } - case RateBased: - if !isFlagSet(winapi.JOB_OBJECT_CPU_RATE_CONTROL_HARD_CAP, info.ControlFlags) { - return 0, errors.New("cannot get cpu rate hard cap for job object without cpu rate hard cap option set") - } - default: - return 0, errors.New("invalid job object cpu rate control type") - } - return info.Value, nil -} - -// SetCPUAffinity sets the processor affinity for the job object. -// The affinity is passed in as a bitmask. -func (job *JobObject) SetCPUAffinity(affinityBitMask uint64) error { - info, err := job.getExtendedInformation() - if err != nil { - return err - } - info.BasicLimitInformation.LimitFlags |= uint32(windows.JOB_OBJECT_LIMIT_AFFINITY) - info.BasicLimitInformation.Affinity = uintptr(affinityBitMask) - return job.setExtendedInformation(info) -} - -// GetCPUAffinity gets the processor affinity for the job object. -// The returned affinity is a bitmask. -func (job *JobObject) GetCPUAffinity() (uint64, error) { - info, err := job.getExtendedInformation() - if err != nil { - return 0, err - } - return uint64(info.BasicLimitInformation.Affinity), nil -} - -// SetIOLimit sets the IO limits specified on the job object. -func (job *JobObject) SetIOLimit(maxBandwidth, maxIOPS int64) error { - ioInfo, err := job.getIOLimit() - if err != nil { - return err - } - ioInfo.ControlFlags |= winapi.JOB_OBJECT_IO_RATE_CONTROL_ENABLE - if maxBandwidth != 0 { - ioInfo.MaxBandwidth = maxBandwidth - } - if maxIOPS != 0 { - ioInfo.MaxIops = maxIOPS - } - return job.setIORateControlInfo(ioInfo) -} - -// GetIOMaxBandwidthLimit gets the max bandwidth for the job object. -func (job *JobObject) GetIOMaxBandwidthLimit() (int64, error) { - info, err := job.getIOLimit() - if err != nil { - return 0, err - } - return info.MaxBandwidth, nil -} - -// GetIOMaxIopsLimit gets the max iops for the job object. -func (job *JobObject) GetIOMaxIopsLimit() (int64, error) { - info, err := job.getIOLimit() - if err != nil { - return 0, err - } - return info.MaxIops, nil -} - -// Helper function for getting a job object's extended information. -func (job *JobObject) getExtendedInformation() (*windows.JOBOBJECT_EXTENDED_LIMIT_INFORMATION, error) { - job.handleLock.RLock() - defer job.handleLock.RUnlock() - - if job.handle == 0 { - return nil, ErrAlreadyClosed - } - - info := windows.JOBOBJECT_EXTENDED_LIMIT_INFORMATION{} - if err := winapi.QueryInformationJobObject( - job.handle, - windows.JobObjectExtendedLimitInformation, - unsafe.Pointer(&info), - uint32(unsafe.Sizeof(info)), - nil, - ); err != nil { - return nil, fmt.Errorf("query %v returned error: %w", info, err) - } - return &info, nil -} - -// Helper function for getting a job object's CPU rate control information. -func (job *JobObject) getCPURateControlInformation() (*winapi.JOBOBJECT_CPU_RATE_CONTROL_INFORMATION, error) { - job.handleLock.RLock() - defer job.handleLock.RUnlock() - - if job.handle == 0 { - return nil, ErrAlreadyClosed - } - - info := winapi.JOBOBJECT_CPU_RATE_CONTROL_INFORMATION{} - if err := winapi.QueryInformationJobObject( - job.handle, - windows.JobObjectCpuRateControlInformation, - unsafe.Pointer(&info), - uint32(unsafe.Sizeof(info)), - nil, - ); err != nil { - return nil, fmt.Errorf("query %v returned error: %w", info, err) - } - return &info, nil -} - -// Helper function for setting a job object's extended information. -func (job *JobObject) setExtendedInformation(info *windows.JOBOBJECT_EXTENDED_LIMIT_INFORMATION) error { - job.handleLock.RLock() - defer job.handleLock.RUnlock() - - if job.handle == 0 { - return ErrAlreadyClosed - } - - if _, err := windows.SetInformationJobObject( - job.handle, - windows.JobObjectExtendedLimitInformation, - uintptr(unsafe.Pointer(info)), - uint32(unsafe.Sizeof(*info)), - ); err != nil { - return fmt.Errorf("failed to set Extended info %v on job object: %w", info, err) - } - return nil -} - -// Helper function for querying job handle for IO limit information. -func (job *JobObject) getIOLimit() (*winapi.JOBOBJECT_IO_RATE_CONTROL_INFORMATION, error) { - job.handleLock.RLock() - defer job.handleLock.RUnlock() - - if job.handle == 0 { - return nil, ErrAlreadyClosed - } - - ioInfo := &winapi.JOBOBJECT_IO_RATE_CONTROL_INFORMATION{} - var blockCount uint32 = 1 - - if _, err := winapi.QueryIoRateControlInformationJobObject( - job.handle, - nil, - &ioInfo, - &blockCount, - ); err != nil { - return nil, fmt.Errorf("query %v returned error: %w", ioInfo, err) - } - - if !isFlagSet(winapi.JOB_OBJECT_IO_RATE_CONTROL_ENABLE, ioInfo.ControlFlags) { - return nil, fmt.Errorf("query %v cannot get IO limits for job object without IO rate control option set", ioInfo) - } - return ioInfo, nil -} - -// Helper function for setting a job object's IO rate control information. -func (job *JobObject) setIORateControlInfo(ioInfo *winapi.JOBOBJECT_IO_RATE_CONTROL_INFORMATION) error { - job.handleLock.RLock() - defer job.handleLock.RUnlock() - - if job.handle == 0 { - return ErrAlreadyClosed - } - - if _, err := winapi.SetIoRateControlInformationJobObject(job.handle, ioInfo); err != nil { - return fmt.Errorf("failed to set IO limit info %v on job object: %w", ioInfo, err) - } - return nil -} - -// Helper function for setting a job object's CPU rate control information. -func (job *JobObject) setCPURateControlInfo(cpuInfo *winapi.JOBOBJECT_CPU_RATE_CONTROL_INFORMATION) error { - job.handleLock.RLock() - defer job.handleLock.RUnlock() - - if job.handle == 0 { - return ErrAlreadyClosed - } - if _, err := windows.SetInformationJobObject( - job.handle, - windows.JobObjectCpuRateControlInformation, - uintptr(unsafe.Pointer(cpuInfo)), - uint32(unsafe.Sizeof(cpuInfo)), - ); err != nil { - return fmt.Errorf("failed to set cpu limit info %v on job object: %w", cpuInfo, err) - } - return nil -} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/log/g.go b/vendor/github.com/Microsoft/hcsshim/internal/log/g.go deleted file mode 100644 index ba6b1a4a..00000000 --- a/vendor/github.com/Microsoft/hcsshim/internal/log/g.go +++ /dev/null @@ -1,23 +0,0 @@ -package log - -import ( - "context" - - "github.com/sirupsen/logrus" - "go.opencensus.io/trace" -) - -// G returns a `logrus.Entry` with the `TraceID, SpanID` from `ctx` if `ctx` -// contains an OpenCensus `trace.Span`. -func G(ctx context.Context) *logrus.Entry { - span := trace.FromContext(ctx) - if span != nil { - sctx := span.SpanContext() - return logrus.WithFields(logrus.Fields{ - "traceID": sctx.TraceID.String(), - "spanID": sctx.SpanID.String(), - // "parentSpanID": TODO: JTERRY75 - Try to convince OC to export this? - }) - } - return logrus.NewEntry(logrus.StandardLogger()) -} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/logfields/fields.go b/vendor/github.com/Microsoft/hcsshim/internal/logfields/fields.go deleted file mode 100644 index cf2c166d..00000000 --- a/vendor/github.com/Microsoft/hcsshim/internal/logfields/fields.go +++ /dev/null @@ -1,32 +0,0 @@ -package logfields - -const ( - // Identifiers - - ContainerID = "cid" - UVMID = "uvm-id" - ProcessID = "pid" - - // Common Misc - - // Timeout represents an operation timeout. - Timeout = "timeout" - JSON = "json" - - // Keys/values - - Field = "field" - OCIAnnotation = "oci-annotation" - Value = "value" - - // Golang type's - - ExpectedType = "expected-type" - Bool = "bool" - Uint32 = "uint32" - Uint64 = "uint64" - - // runhcs - - VMShimOperation = "vmshim-op" -) diff --git a/vendor/github.com/Microsoft/hcsshim/internal/longpath/longpath.go b/vendor/github.com/Microsoft/hcsshim/internal/longpath/longpath.go deleted file mode 100644 index e5b8b85e..00000000 --- a/vendor/github.com/Microsoft/hcsshim/internal/longpath/longpath.go +++ /dev/null @@ -1,24 +0,0 @@ -package longpath - -import ( - "path/filepath" - "strings" -) - -// LongAbs makes a path absolute and returns it in NT long path form. -func LongAbs(path string) (string, error) { - if strings.HasPrefix(path, `\\?\`) || strings.HasPrefix(path, `\\.\`) { - return path, nil - } - if !filepath.IsAbs(path) { - absPath, err := filepath.Abs(path) - if err != nil { - return "", err - } - path = absPath - } - if strings.HasPrefix(path, `\\`) { - return `\\?\UNC\` + path[2:], nil - } - return `\\?\` + path, nil -} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/mergemaps/merge.go b/vendor/github.com/Microsoft/hcsshim/internal/mergemaps/merge.go deleted file mode 100644 index 7e95efb3..00000000 --- a/vendor/github.com/Microsoft/hcsshim/internal/mergemaps/merge.go +++ /dev/null @@ -1,52 +0,0 @@ -package mergemaps - -import "encoding/json" - -// Merge recursively merges map `fromMap` into map `ToMap`. Any pre-existing values -// in ToMap are overwritten. Values in fromMap are added to ToMap. -// From http://stackoverflow.com/questions/40491438/merging-two-json-strings-in-golang -func Merge(fromMap, ToMap interface{}) interface{} { - switch fromMap := fromMap.(type) { - case map[string]interface{}: - ToMap, ok := ToMap.(map[string]interface{}) - if !ok { - return fromMap - } - for keyToMap, valueToMap := range ToMap { - if valueFromMap, ok := fromMap[keyToMap]; ok { - fromMap[keyToMap] = Merge(valueFromMap, valueToMap) - } else { - fromMap[keyToMap] = valueToMap - } - } - case nil: - // merge(nil, map[string]interface{...}) -> map[string]interface{...} - ToMap, ok := ToMap.(map[string]interface{}) - if ok { - return ToMap - } - } - return fromMap -} - -// MergeJSON merges the contents of a JSON string into an object representation, -// returning a new object suitable for translating to JSON. -func MergeJSON(object interface{}, additionalJSON []byte) (interface{}, error) { - if len(additionalJSON) == 0 { - return object, nil - } - objectJSON, err := json.Marshal(object) - if err != nil { - return nil, err - } - var objectMap, newMap map[string]interface{} - err = json.Unmarshal(objectJSON, &objectMap) - if err != nil { - return nil, err - } - err = json.Unmarshal(additionalJSON, &newMap) - if err != nil { - return nil, err - } - return Merge(newMap, objectMap), nil -} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/oc/exporter.go b/vendor/github.com/Microsoft/hcsshim/internal/oc/exporter.go deleted file mode 100644 index f428bdaf..00000000 --- a/vendor/github.com/Microsoft/hcsshim/internal/oc/exporter.go +++ /dev/null @@ -1,43 +0,0 @@ -package oc - -import ( - "github.com/sirupsen/logrus" - "go.opencensus.io/trace" -) - -var _ = (trace.Exporter)(&LogrusExporter{}) - -// LogrusExporter is an OpenCensus `trace.Exporter` that exports -// `trace.SpanData` to logrus output. -type LogrusExporter struct { -} - -// ExportSpan exports `s` based on the the following rules: -// -// 1. All output will contain `s.Attributes`, `s.TraceID`, `s.SpanID`, -// `s.ParentSpanID` for correlation -// -// 2. Any calls to .Annotate will not be supported. -// -// 3. The span itself will be written at `logrus.InfoLevel` unless -// `s.Status.Code != 0` in which case it will be written at `logrus.ErrorLevel` -// providing `s.Status.Message` as the error value. -func (le *LogrusExporter) ExportSpan(s *trace.SpanData) { - // Combine all span annotations with traceID, spanID, parentSpanID - baseEntry := logrus.WithFields(logrus.Fields(s.Attributes)) - baseEntry.Data["traceID"] = s.TraceID.String() - baseEntry.Data["spanID"] = s.SpanID.String() - baseEntry.Data["parentSpanID"] = s.ParentSpanID.String() - baseEntry.Data["startTime"] = s.StartTime - baseEntry.Data["endTime"] = s.EndTime - baseEntry.Data["duration"] = s.EndTime.Sub(s.StartTime).String() - baseEntry.Data["name"] = s.Name - baseEntry.Time = s.StartTime - - level := logrus.InfoLevel - if s.Status.Code != 0 { - level = logrus.ErrorLevel - baseEntry.Data[logrus.ErrorKey] = s.Status.Message - } - baseEntry.Log(level, "Span") -} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/oc/span.go b/vendor/github.com/Microsoft/hcsshim/internal/oc/span.go deleted file mode 100644 index fee4765c..00000000 --- a/vendor/github.com/Microsoft/hcsshim/internal/oc/span.go +++ /dev/null @@ -1,17 +0,0 @@ -package oc - -import ( - "go.opencensus.io/trace" -) - -// SetSpanStatus sets `span.SetStatus` to the proper status depending on `err`. If -// `err` is `nil` assumes `trace.StatusCodeOk`. -func SetSpanStatus(span *trace.Span, err error) { - status := trace.Status{} - if err != nil { - // TODO: JTERRY75 - Handle errors in a non-generic way - status.Code = trace.StatusCodeUnknown - status.Message = err.Error() - } - span.SetStatus(status) -} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/queue/mq.go b/vendor/github.com/Microsoft/hcsshim/internal/queue/mq.go deleted file mode 100644 index 4eb9bb9f..00000000 --- a/vendor/github.com/Microsoft/hcsshim/internal/queue/mq.go +++ /dev/null @@ -1,92 +0,0 @@ -package queue - -import ( - "errors" - "sync" -) - -var ErrQueueClosed = errors.New("the queue is closed for reading and writing") - -// MessageQueue represents a threadsafe message queue to be used to retrieve or -// write messages to. -type MessageQueue struct { - m *sync.RWMutex - c *sync.Cond - messages []interface{} - closed bool -} - -// NewMessageQueue returns a new MessageQueue. -func NewMessageQueue() *MessageQueue { - m := &sync.RWMutex{} - return &MessageQueue{ - m: m, - c: sync.NewCond(m), - messages: []interface{}{}, - } -} - -// Enqueue writes `msg` to the queue. -func (mq *MessageQueue) Enqueue(msg interface{}) error { - mq.m.Lock() - defer mq.m.Unlock() - - if mq.closed { - return ErrQueueClosed - } - mq.messages = append(mq.messages, msg) - // Signal a waiter that there is now a value available in the queue. - mq.c.Signal() - return nil -} - -// Dequeue will read a value from the queue and remove it. If the queue -// is empty, this will block until the queue is closed or a value gets enqueued. -func (mq *MessageQueue) Dequeue() (interface{}, error) { - mq.m.Lock() - defer mq.m.Unlock() - - for !mq.closed && mq.size() == 0 { - mq.c.Wait() - } - - // We got woken up, check if it's because the queue got closed. - if mq.closed { - return nil, ErrQueueClosed - } - - val := mq.messages[0] - mq.messages[0] = nil - mq.messages = mq.messages[1:] - return val, nil -} - -// Size returns the size of the queue. -func (mq *MessageQueue) Size() int { - mq.m.RLock() - defer mq.m.RUnlock() - return mq.size() -} - -// Nonexported size check to check if the queue is empty inside already locked functions. -func (mq *MessageQueue) size() int { - return len(mq.messages) -} - -// Close closes the queue for future writes or reads. Any attempts to read or write from the -// queue after close will return ErrQueueClosed. This is safe to call multiple times. -func (mq *MessageQueue) Close() { - mq.m.Lock() - defer mq.m.Unlock() - - // Already closed, noop - if mq.closed { - return - } - - mq.messages = nil - mq.closed = true - // If there's anybody currently waiting on a value from Dequeue, we need to - // broadcast so the read(s) can return ErrQueueClosed. - mq.c.Broadcast() -} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/safefile/safeopen.go b/vendor/github.com/Microsoft/hcsshim/internal/safefile/safeopen.go deleted file mode 100644 index 66b8d7e0..00000000 --- a/vendor/github.com/Microsoft/hcsshim/internal/safefile/safeopen.go +++ /dev/null @@ -1,375 +0,0 @@ -package safefile - -import ( - "errors" - "io" - "os" - "path/filepath" - "strings" - "syscall" - "unicode/utf16" - "unsafe" - - "github.com/Microsoft/hcsshim/internal/longpath" - "github.com/Microsoft/hcsshim/internal/winapi" - - winio "github.com/Microsoft/go-winio" -) - -func OpenRoot(path string) (*os.File, error) { - longpath, err := longpath.LongAbs(path) - if err != nil { - return nil, err - } - return winio.OpenForBackup(longpath, syscall.GENERIC_READ, syscall.FILE_SHARE_READ|syscall.FILE_SHARE_WRITE|syscall.FILE_SHARE_DELETE, syscall.OPEN_EXISTING) -} - -func cleanGoStringRelativePath(path string) (string, error) { - path = filepath.Clean(path) - if strings.Contains(path, ":") { - // Since alternate data streams must follow the file they - // are attached to, finding one here (out of order) is invalid. - return "", errors.New("path contains invalid character `:`") - } - fspath := filepath.FromSlash(path) - if len(fspath) > 0 && fspath[0] == '\\' { - return "", errors.New("expected relative path") - } - return fspath, nil -} - -func ntRelativePath(path string) ([]uint16, error) { - fspath, err := cleanGoStringRelativePath(path) - if err != nil { - return nil, err - } - - path16 := utf16.Encode(([]rune)(fspath)) - if len(path16) > 32767 { - return nil, syscall.ENAMETOOLONG - } - - return path16, nil -} - -// openRelativeInternal opens a relative path from the given root, failing if -// any of the intermediate path components are reparse points. -func openRelativeInternal(path string, root *os.File, accessMask uint32, shareFlags uint32, createDisposition uint32, flags uint32) (*os.File, error) { - var ( - h uintptr - iosb winapi.IOStatusBlock - oa winapi.ObjectAttributes - ) - - cleanRelativePath, err := cleanGoStringRelativePath(path) - if err != nil { - return nil, err - } - - if root == nil || root.Fd() == 0 { - return nil, errors.New("missing root directory") - } - - pathUnicode, err := winapi.NewUnicodeString(cleanRelativePath) - if err != nil { - return nil, err - } - - oa.Length = unsafe.Sizeof(oa) - oa.ObjectName = pathUnicode - oa.RootDirectory = uintptr(root.Fd()) - oa.Attributes = winapi.OBJ_DONT_REPARSE - status := winapi.NtCreateFile( - &h, - accessMask|syscall.SYNCHRONIZE, - &oa, - &iosb, - nil, - 0, - shareFlags, - createDisposition, - winapi.FILE_OPEN_FOR_BACKUP_INTENT|winapi.FILE_SYNCHRONOUS_IO_NONALERT|flags, - nil, - 0, - ) - if status != 0 { - return nil, winapi.RtlNtStatusToDosError(status) - } - - fullPath, err := longpath.LongAbs(filepath.Join(root.Name(), path)) - if err != nil { - syscall.Close(syscall.Handle(h)) - return nil, err - } - - return os.NewFile(h, fullPath), nil -} - -// OpenRelative opens a relative path from the given root, failing if -// any of the intermediate path components are reparse points. -func OpenRelative(path string, root *os.File, accessMask uint32, shareFlags uint32, createDisposition uint32, flags uint32) (*os.File, error) { - f, err := openRelativeInternal(path, root, accessMask, shareFlags, createDisposition, flags) - if err != nil { - err = &os.PathError{Op: "open", Path: filepath.Join(root.Name(), path), Err: err} - } - return f, err -} - -// LinkRelative creates a hard link from oldname to newname (relative to oldroot -// and newroot), failing if any of the intermediate path components are reparse -// points. -func LinkRelative(oldname string, oldroot *os.File, newname string, newroot *os.File) error { - // Open the old file. - oldf, err := openRelativeInternal( - oldname, - oldroot, - syscall.FILE_WRITE_ATTRIBUTES, - syscall.FILE_SHARE_READ|syscall.FILE_SHARE_WRITE|syscall.FILE_SHARE_DELETE, - winapi.FILE_OPEN, - 0, - ) - if err != nil { - return &os.LinkError{Op: "link", Old: filepath.Join(oldroot.Name(), oldname), New: filepath.Join(newroot.Name(), newname), Err: err} - } - defer oldf.Close() - - // Open the parent of the new file. - var parent *os.File - parentPath := filepath.Dir(newname) - if parentPath != "." { - parent, err = openRelativeInternal( - parentPath, - newroot, - syscall.GENERIC_READ, - syscall.FILE_SHARE_READ|syscall.FILE_SHARE_WRITE|syscall.FILE_SHARE_DELETE, - winapi.FILE_OPEN, - winapi.FILE_DIRECTORY_FILE) - if err != nil { - return &os.LinkError{Op: "link", Old: oldf.Name(), New: filepath.Join(newroot.Name(), newname), Err: err} - } - defer parent.Close() - - fi, err := winio.GetFileBasicInfo(parent) - if err != nil { - return err - } - if (fi.FileAttributes & syscall.FILE_ATTRIBUTE_REPARSE_POINT) != 0 { - return &os.LinkError{Op: "link", Old: oldf.Name(), New: filepath.Join(newroot.Name(), newname), Err: winapi.RtlNtStatusToDosError(winapi.STATUS_REPARSE_POINT_ENCOUNTERED)} - } - - } else { - parent = newroot - } - - // Issue an NT call to create the link. This will be safe because NT will - // not open any more directories to create the link, so it cannot walk any - // more reparse points. - newbase := filepath.Base(newname) - newbase16, err := ntRelativePath(newbase) - if err != nil { - return err - } - - size := int(unsafe.Offsetof(winapi.FileLinkInformation{}.FileName)) + len(newbase16)*2 - linkinfoBuffer := winapi.LocalAlloc(0, size) - defer winapi.LocalFree(linkinfoBuffer) - - linkinfo := (*winapi.FileLinkInformation)(unsafe.Pointer(linkinfoBuffer)) - linkinfo.RootDirectory = parent.Fd() - linkinfo.FileNameLength = uint32(len(newbase16) * 2) - copy(winapi.Uint16BufferToSlice(&linkinfo.FileName[0], len(newbase16)), newbase16) - - var iosb winapi.IOStatusBlock - status := winapi.NtSetInformationFile( - oldf.Fd(), - &iosb, - linkinfoBuffer, - uint32(size), - winapi.FileLinkInformationClass, - ) - if status != 0 { - return &os.LinkError{Op: "link", Old: oldf.Name(), New: filepath.Join(parent.Name(), newbase), Err: winapi.RtlNtStatusToDosError(status)} - } - - return nil -} - -// deleteOnClose marks a file to be deleted when the handle is closed. -func deleteOnClose(f *os.File) error { - disposition := winapi.FileDispositionInformationEx{Flags: winapi.FILE_DISPOSITION_DELETE} - var iosb winapi.IOStatusBlock - status := winapi.NtSetInformationFile( - f.Fd(), - &iosb, - uintptr(unsafe.Pointer(&disposition)), - uint32(unsafe.Sizeof(disposition)), - winapi.FileDispositionInformationExClass, - ) - if status != 0 { - return winapi.RtlNtStatusToDosError(status) - } - return nil -} - -// clearReadOnly clears the readonly attribute on a file. -func clearReadOnly(f *os.File) error { - bi, err := winio.GetFileBasicInfo(f) - if err != nil { - return err - } - if bi.FileAttributes&syscall.FILE_ATTRIBUTE_READONLY == 0 { - return nil - } - sbi := winio.FileBasicInfo{ - FileAttributes: bi.FileAttributes &^ syscall.FILE_ATTRIBUTE_READONLY, - } - if sbi.FileAttributes == 0 { - sbi.FileAttributes = syscall.FILE_ATTRIBUTE_NORMAL - } - return winio.SetFileBasicInfo(f, &sbi) -} - -// RemoveRelative removes a file or directory relative to a root, failing if any -// intermediate path components are reparse points. -func RemoveRelative(path string, root *os.File) error { - f, err := openRelativeInternal( - path, - root, - winapi.FILE_READ_ATTRIBUTES|winapi.FILE_WRITE_ATTRIBUTES|winapi.DELETE, - syscall.FILE_SHARE_READ|syscall.FILE_SHARE_WRITE|syscall.FILE_SHARE_DELETE, - winapi.FILE_OPEN, - winapi.FILE_OPEN_REPARSE_POINT) - if err == nil { - defer f.Close() - err = deleteOnClose(f) - if err == syscall.ERROR_ACCESS_DENIED { - // Maybe the file is marked readonly. Clear the bit and retry. - _ = clearReadOnly(f) - err = deleteOnClose(f) - } - } - if err != nil { - return &os.PathError{Op: "remove", Path: filepath.Join(root.Name(), path), Err: err} - } - return nil -} - -// RemoveAllRelative removes a directory tree relative to a root, failing if any -// intermediate path components are reparse points. -func RemoveAllRelative(path string, root *os.File) error { - fi, err := LstatRelative(path, root) - if err != nil { - if os.IsNotExist(err) { - return nil - } - return err - } - fileAttributes := fi.Sys().(*syscall.Win32FileAttributeData).FileAttributes - if fileAttributes&syscall.FILE_ATTRIBUTE_DIRECTORY == 0 || fileAttributes&syscall.FILE_ATTRIBUTE_REPARSE_POINT != 0 { - // If this is a reparse point, it can't have children. Simple remove will do. - err := RemoveRelative(path, root) - if err == nil || os.IsNotExist(err) { - return nil - } - return err - } - - // It is necessary to use os.Open as Readdirnames does not work with - // OpenRelative. This is safe because the above lstatrelative fails - // if the target is outside the root, and we know this is not a - // symlink from the above FILE_ATTRIBUTE_REPARSE_POINT check. - fd, err := os.Open(filepath.Join(root.Name(), path)) - if err != nil { - if os.IsNotExist(err) { - // Race. It was deleted between the Lstat and Open. - // Return nil per RemoveAll's docs. - return nil - } - return err - } - - // Remove contents & return first error. - for { - names, err1 := fd.Readdirnames(100) - for _, name := range names { - err1 := RemoveAllRelative(path+string(os.PathSeparator)+name, root) - if err == nil { - err = err1 - } - } - if err1 == io.EOF { - break - } - // If Readdirnames returned an error, use it. - if err == nil { - err = err1 - } - if len(names) == 0 { - break - } - } - fd.Close() - - // Remove directory. - err1 := RemoveRelative(path, root) - if err1 == nil || os.IsNotExist(err1) { - return nil - } - if err == nil { - err = err1 - } - return err -} - -// MkdirRelative creates a directory relative to a root, failing if any -// intermediate path components are reparse points. -func MkdirRelative(path string, root *os.File) error { - f, err := openRelativeInternal( - path, - root, - 0, - syscall.FILE_SHARE_READ|syscall.FILE_SHARE_WRITE|syscall.FILE_SHARE_DELETE, - winapi.FILE_CREATE, - winapi.FILE_DIRECTORY_FILE) - if err == nil { - f.Close() - } else { - err = &os.PathError{Op: "mkdir", Path: filepath.Join(root.Name(), path), Err: err} - } - return err -} - -// LstatRelative performs a stat operation on a file relative to a root, failing -// if any intermediate path components are reparse points. -func LstatRelative(path string, root *os.File) (os.FileInfo, error) { - f, err := openRelativeInternal( - path, - root, - winapi.FILE_READ_ATTRIBUTES, - syscall.FILE_SHARE_READ|syscall.FILE_SHARE_WRITE|syscall.FILE_SHARE_DELETE, - winapi.FILE_OPEN, - winapi.FILE_OPEN_REPARSE_POINT) - if err != nil { - return nil, &os.PathError{Op: "stat", Path: filepath.Join(root.Name(), path), Err: err} - } - defer f.Close() - return f.Stat() -} - -// EnsureNotReparsePointRelative validates that a given file (relative to a -// root) and all intermediate path components are not a reparse points. -func EnsureNotReparsePointRelative(path string, root *os.File) error { - // Perform an open with OBJ_DONT_REPARSE but without specifying FILE_OPEN_REPARSE_POINT. - f, err := OpenRelative( - path, - root, - 0, - syscall.FILE_SHARE_READ|syscall.FILE_SHARE_WRITE|syscall.FILE_SHARE_DELETE, - winapi.FILE_OPEN, - 0) - if err != nil { - return err - } - f.Close() - return nil -} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/timeout/timeout.go b/vendor/github.com/Microsoft/hcsshim/internal/timeout/timeout.go deleted file mode 100644 index eaf39fa5..00000000 --- a/vendor/github.com/Microsoft/hcsshim/internal/timeout/timeout.go +++ /dev/null @@ -1,74 +0,0 @@ -package timeout - -import ( - "os" - "strconv" - "time" -) - -var ( - // defaultTimeout is the timeout for most operations that is not overridden. - defaultTimeout = 4 * time.Minute - - // defaultTimeoutTestdRetry is the retry loop timeout for testd to respond - // for a disk to come online in LCOW. - defaultTimeoutTestdRetry = 5 * time.Second -) - -// External variables for HCSShim consumers to use. -var ( - // SystemCreate is the timeout for creating a compute system - SystemCreate time.Duration = defaultTimeout - - // SystemStart is the timeout for starting a compute system - SystemStart time.Duration = defaultTimeout - - // SystemPause is the timeout for pausing a compute system - SystemPause time.Duration = defaultTimeout - - // SystemResume is the timeout for resuming a compute system - SystemResume time.Duration = defaultTimeout - - // SystemSave is the timeout for saving a compute system - SystemSave time.Duration = defaultTimeout - - // SyscallWatcher is the timeout before warning of a potential stuck platform syscall. - SyscallWatcher time.Duration = defaultTimeout - - // Tar2VHD is the timeout for the tar2vhd operation to complete - Tar2VHD time.Duration = defaultTimeout - - // ExternalCommandToStart is the timeout for external commands to start - ExternalCommandToStart = defaultTimeout - - // ExternalCommandToComplete is the timeout for external commands to complete. - // Generally this means copying data from their stdio pipes. - ExternalCommandToComplete = defaultTimeout - - // TestDRetryLoop is the timeout for testd retry loop when onlining a SCSI disk in LCOW - TestDRetryLoop = defaultTimeoutTestdRetry -) - -func init() { - SystemCreate = durationFromEnvironment("HCSSHIM_TIMEOUT_SYSTEMCREATE", SystemCreate) - SystemStart = durationFromEnvironment("HCSSHIM_TIMEOUT_SYSTEMSTART", SystemStart) - SystemPause = durationFromEnvironment("HCSSHIM_TIMEOUT_SYSTEMPAUSE", SystemPause) - SystemResume = durationFromEnvironment("HCSSHIM_TIMEOUT_SYSTEMRESUME", SystemResume) - SystemSave = durationFromEnvironment("HCSSHIM_TIMEOUT_SYSTEMSAVE", SystemSave) - SyscallWatcher = durationFromEnvironment("HCSSHIM_TIMEOUT_SYSCALLWATCHER", SyscallWatcher) - Tar2VHD = durationFromEnvironment("HCSSHIM_TIMEOUT_TAR2VHD", Tar2VHD) - ExternalCommandToStart = durationFromEnvironment("HCSSHIM_TIMEOUT_EXTERNALCOMMANDSTART", ExternalCommandToStart) - ExternalCommandToComplete = durationFromEnvironment("HCSSHIM_TIMEOUT_EXTERNALCOMMANDCOMPLETE", ExternalCommandToComplete) - TestDRetryLoop = durationFromEnvironment("HCSSHIM_TIMEOUT_TESTDRETRYLOOP", TestDRetryLoop) -} - -func durationFromEnvironment(env string, defaultValue time.Duration) time.Duration { - envTimeout := os.Getenv(env) - if len(envTimeout) > 0 { - e, err := strconv.Atoi(envTimeout) - if err == nil && e > 0 { - return time.Second * time.Duration(e) - } - } - return defaultValue -} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/vmcompute/vmcompute.go b/vendor/github.com/Microsoft/hcsshim/internal/vmcompute/vmcompute.go deleted file mode 100644 index e7f114b6..00000000 --- a/vendor/github.com/Microsoft/hcsshim/internal/vmcompute/vmcompute.go +++ /dev/null @@ -1,610 +0,0 @@ -package vmcompute - -import ( - gcontext "context" - "syscall" - "time" - - "github.com/Microsoft/hcsshim/internal/interop" - "github.com/Microsoft/hcsshim/internal/log" - "github.com/Microsoft/hcsshim/internal/logfields" - "github.com/Microsoft/hcsshim/internal/oc" - "github.com/Microsoft/hcsshim/internal/timeout" - "go.opencensus.io/trace" -) - -//go:generate go run ../../mksyscall_windows.go -output zsyscall_windows.go vmcompute.go - -//sys hcsEnumerateComputeSystems(query string, computeSystems **uint16, result **uint16) (hr error) = vmcompute.HcsEnumerateComputeSystems? -//sys hcsCreateComputeSystem(id string, configuration string, identity syscall.Handle, computeSystem *HcsSystem, result **uint16) (hr error) = vmcompute.HcsCreateComputeSystem? -//sys hcsOpenComputeSystem(id string, computeSystem *HcsSystem, result **uint16) (hr error) = vmcompute.HcsOpenComputeSystem? -//sys hcsCloseComputeSystem(computeSystem HcsSystem) (hr error) = vmcompute.HcsCloseComputeSystem? -//sys hcsStartComputeSystem(computeSystem HcsSystem, options string, result **uint16) (hr error) = vmcompute.HcsStartComputeSystem? -//sys hcsShutdownComputeSystem(computeSystem HcsSystem, options string, result **uint16) (hr error) = vmcompute.HcsShutdownComputeSystem? -//sys hcsTerminateComputeSystem(computeSystem HcsSystem, options string, result **uint16) (hr error) = vmcompute.HcsTerminateComputeSystem? -//sys hcsPauseComputeSystem(computeSystem HcsSystem, options string, result **uint16) (hr error) = vmcompute.HcsPauseComputeSystem? -//sys hcsResumeComputeSystem(computeSystem HcsSystem, options string, result **uint16) (hr error) = vmcompute.HcsResumeComputeSystem? -//sys hcsGetComputeSystemProperties(computeSystem HcsSystem, propertyQuery string, properties **uint16, result **uint16) (hr error) = vmcompute.HcsGetComputeSystemProperties? -//sys hcsModifyComputeSystem(computeSystem HcsSystem, configuration string, result **uint16) (hr error) = vmcompute.HcsModifyComputeSystem? -//sys hcsModifyServiceSettings(settings string, result **uint16) (hr error) = vmcompute.HcsModifyServiceSettings? -//sys hcsRegisterComputeSystemCallback(computeSystem HcsSystem, callback uintptr, context uintptr, callbackHandle *HcsCallback) (hr error) = vmcompute.HcsRegisterComputeSystemCallback? -//sys hcsUnregisterComputeSystemCallback(callbackHandle HcsCallback) (hr error) = vmcompute.HcsUnregisterComputeSystemCallback? -//sys hcsSaveComputeSystem(computeSystem HcsSystem, options string, result **uint16) (hr error) = vmcompute.HcsSaveComputeSystem? - -//sys hcsCreateProcess(computeSystem HcsSystem, processParameters string, processInformation *HcsProcessInformation, process *HcsProcess, result **uint16) (hr error) = vmcompute.HcsCreateProcess? -//sys hcsOpenProcess(computeSystem HcsSystem, pid uint32, process *HcsProcess, result **uint16) (hr error) = vmcompute.HcsOpenProcess? -//sys hcsCloseProcess(process HcsProcess) (hr error) = vmcompute.HcsCloseProcess? -//sys hcsTerminateProcess(process HcsProcess, result **uint16) (hr error) = vmcompute.HcsTerminateProcess? -//sys hcsSignalProcess(process HcsProcess, options string, result **uint16) (hr error) = vmcompute.HcsSignalProcess? -//sys hcsGetProcessInfo(process HcsProcess, processInformation *HcsProcessInformation, result **uint16) (hr error) = vmcompute.HcsGetProcessInfo? -//sys hcsGetProcessProperties(process HcsProcess, processProperties **uint16, result **uint16) (hr error) = vmcompute.HcsGetProcessProperties? -//sys hcsModifyProcess(process HcsProcess, settings string, result **uint16) (hr error) = vmcompute.HcsModifyProcess? -//sys hcsGetServiceProperties(propertyQuery string, properties **uint16, result **uint16) (hr error) = vmcompute.HcsGetServiceProperties? -//sys hcsRegisterProcessCallback(process HcsProcess, callback uintptr, context uintptr, callbackHandle *HcsCallback) (hr error) = vmcompute.HcsRegisterProcessCallback? -//sys hcsUnregisterProcessCallback(callbackHandle HcsCallback) (hr error) = vmcompute.HcsUnregisterProcessCallback? - -// errVmcomputeOperationPending is an error encountered when the operation is being completed asynchronously -const errVmcomputeOperationPending = syscall.Errno(0xC0370103) - -// HcsSystem is the handle associated with a created compute system. -type HcsSystem syscall.Handle - -// HcsProcess is the handle associated with a created process in a compute -// system. -type HcsProcess syscall.Handle - -// HcsCallback is the handle associated with the function to call when events -// occur. -type HcsCallback syscall.Handle - -// HcsProcessInformation is the structure used when creating or getting process -// info. -type HcsProcessInformation struct { - // ProcessId is the pid of the created process. - ProcessId uint32 - reserved uint32 //nolint:structcheck - // StdInput is the handle associated with the stdin of the process. - StdInput syscall.Handle - // StdOutput is the handle associated with the stdout of the process. - StdOutput syscall.Handle - // StdError is the handle associated with the stderr of the process. - StdError syscall.Handle -} - -func execute(ctx gcontext.Context, timeout time.Duration, f func() error) error { - if timeout > 0 { - var cancel gcontext.CancelFunc - ctx, cancel = gcontext.WithTimeout(ctx, timeout) - defer cancel() - } - - done := make(chan error, 1) - go func() { - done <- f() - }() - select { - case <-ctx.Done(): - if ctx.Err() == gcontext.DeadlineExceeded { - log.G(ctx).WithField(logfields.Timeout, timeout). - Warning("Syscall did not complete within operation timeout. This may indicate a platform issue. If it appears to be making no forward progress, obtain the stacks and see if there is a syscall stuck in the platform API for a significant length of time.") - } - return ctx.Err() - case err := <-done: - return err - } -} - -func HcsEnumerateComputeSystems(ctx gcontext.Context, query string) (computeSystems, result string, hr error) { - ctx, span := trace.StartSpan(ctx, "HcsEnumerateComputeSystems") - defer span.End() - defer func() { - if result != "" { - span.AddAttributes(trace.StringAttribute("result", result)) - } - oc.SetSpanStatus(span, hr) - }() - span.AddAttributes(trace.StringAttribute("query", query)) - - return computeSystems, result, execute(ctx, timeout.SyscallWatcher, func() error { - var ( - computeSystemsp *uint16 - resultp *uint16 - ) - err := hcsEnumerateComputeSystems(query, &computeSystemsp, &resultp) - if computeSystemsp != nil { - computeSystems = interop.ConvertAndFreeCoTaskMemString(computeSystemsp) - } - if resultp != nil { - result = interop.ConvertAndFreeCoTaskMemString(resultp) - } - return err - }) -} - -func HcsCreateComputeSystem(ctx gcontext.Context, id string, configuration string, identity syscall.Handle) (computeSystem HcsSystem, result string, hr error) { - ctx, span := trace.StartSpan(ctx, "HcsCreateComputeSystem") - defer span.End() - defer func() { - if result != "" { - span.AddAttributes(trace.StringAttribute("result", result)) - } - if hr != errVmcomputeOperationPending { - oc.SetSpanStatus(span, hr) - } - }() - span.AddAttributes( - trace.StringAttribute("id", id), - trace.StringAttribute("configuration", configuration)) - - return computeSystem, result, execute(ctx, timeout.SystemCreate, func() error { - var resultp *uint16 - err := hcsCreateComputeSystem(id, configuration, identity, &computeSystem, &resultp) - if resultp != nil { - result = interop.ConvertAndFreeCoTaskMemString(resultp) - } - return err - }) -} - -func HcsOpenComputeSystem(ctx gcontext.Context, id string) (computeSystem HcsSystem, result string, hr error) { - ctx, span := trace.StartSpan(ctx, "HcsOpenComputeSystem") - defer span.End() - defer func() { - if result != "" { - span.AddAttributes(trace.StringAttribute("result", result)) - } - oc.SetSpanStatus(span, hr) - }() - - return computeSystem, result, execute(ctx, timeout.SyscallWatcher, func() error { - var resultp *uint16 - err := hcsOpenComputeSystem(id, &computeSystem, &resultp) - if resultp != nil { - result = interop.ConvertAndFreeCoTaskMemString(resultp) - } - return err - }) -} - -func HcsCloseComputeSystem(ctx gcontext.Context, computeSystem HcsSystem) (hr error) { - ctx, span := trace.StartSpan(ctx, "HcsCloseComputeSystem") - defer span.End() - defer func() { oc.SetSpanStatus(span, hr) }() - - return execute(ctx, timeout.SyscallWatcher, func() error { - return hcsCloseComputeSystem(computeSystem) - }) -} - -func HcsStartComputeSystem(ctx gcontext.Context, computeSystem HcsSystem, options string) (result string, hr error) { - ctx, span := trace.StartSpan(ctx, "HcsStartComputeSystem") - defer span.End() - defer func() { - if result != "" { - span.AddAttributes(trace.StringAttribute("result", result)) - } - if hr != errVmcomputeOperationPending { - oc.SetSpanStatus(span, hr) - } - }() - span.AddAttributes(trace.StringAttribute("options", options)) - - return result, execute(ctx, timeout.SystemStart, func() error { - var resultp *uint16 - err := hcsStartComputeSystem(computeSystem, options, &resultp) - if resultp != nil { - result = interop.ConvertAndFreeCoTaskMemString(resultp) - } - return err - }) -} - -func HcsShutdownComputeSystem(ctx gcontext.Context, computeSystem HcsSystem, options string) (result string, hr error) { - ctx, span := trace.StartSpan(ctx, "HcsShutdownComputeSystem") - defer span.End() - defer func() { - if result != "" { - span.AddAttributes(trace.StringAttribute("result", result)) - } - if hr != errVmcomputeOperationPending { - oc.SetSpanStatus(span, hr) - } - }() - span.AddAttributes(trace.StringAttribute("options", options)) - - return result, execute(ctx, timeout.SyscallWatcher, func() error { - var resultp *uint16 - err := hcsShutdownComputeSystem(computeSystem, options, &resultp) - if resultp != nil { - result = interop.ConvertAndFreeCoTaskMemString(resultp) - } - return err - }) -} - -func HcsTerminateComputeSystem(ctx gcontext.Context, computeSystem HcsSystem, options string) (result string, hr error) { - ctx, span := trace.StartSpan(ctx, "HcsTerminateComputeSystem") - defer span.End() - defer func() { - if result != "" { - span.AddAttributes(trace.StringAttribute("result", result)) - } - if hr != errVmcomputeOperationPending { - oc.SetSpanStatus(span, hr) - } - }() - span.AddAttributes(trace.StringAttribute("options", options)) - - return result, execute(ctx, timeout.SyscallWatcher, func() error { - var resultp *uint16 - err := hcsTerminateComputeSystem(computeSystem, options, &resultp) - if resultp != nil { - result = interop.ConvertAndFreeCoTaskMemString(resultp) - } - return err - }) -} - -func HcsPauseComputeSystem(ctx gcontext.Context, computeSystem HcsSystem, options string) (result string, hr error) { - ctx, span := trace.StartSpan(ctx, "HcsPauseComputeSystem") - defer span.End() - defer func() { - if result != "" { - span.AddAttributes(trace.StringAttribute("result", result)) - } - if hr != errVmcomputeOperationPending { - oc.SetSpanStatus(span, hr) - } - }() - span.AddAttributes(trace.StringAttribute("options", options)) - - return result, execute(ctx, timeout.SystemPause, func() error { - var resultp *uint16 - err := hcsPauseComputeSystem(computeSystem, options, &resultp) - if resultp != nil { - result = interop.ConvertAndFreeCoTaskMemString(resultp) - } - return err - }) -} - -func HcsResumeComputeSystem(ctx gcontext.Context, computeSystem HcsSystem, options string) (result string, hr error) { - ctx, span := trace.StartSpan(ctx, "HcsResumeComputeSystem") - defer span.End() - defer func() { - if result != "" { - span.AddAttributes(trace.StringAttribute("result", result)) - } - if hr != errVmcomputeOperationPending { - oc.SetSpanStatus(span, hr) - } - }() - span.AddAttributes(trace.StringAttribute("options", options)) - - return result, execute(ctx, timeout.SystemResume, func() error { - var resultp *uint16 - err := hcsResumeComputeSystem(computeSystem, options, &resultp) - if resultp != nil { - result = interop.ConvertAndFreeCoTaskMemString(resultp) - } - return err - }) -} - -func HcsGetComputeSystemProperties(ctx gcontext.Context, computeSystem HcsSystem, propertyQuery string) (properties, result string, hr error) { - ctx, span := trace.StartSpan(ctx, "HcsGetComputeSystemProperties") - defer span.End() - defer func() { - if result != "" { - span.AddAttributes(trace.StringAttribute("result", result)) - } - oc.SetSpanStatus(span, hr) - }() - span.AddAttributes(trace.StringAttribute("propertyQuery", propertyQuery)) - - return properties, result, execute(ctx, timeout.SyscallWatcher, func() error { - var ( - propertiesp *uint16 - resultp *uint16 - ) - err := hcsGetComputeSystemProperties(computeSystem, propertyQuery, &propertiesp, &resultp) - if propertiesp != nil { - properties = interop.ConvertAndFreeCoTaskMemString(propertiesp) - } - if resultp != nil { - result = interop.ConvertAndFreeCoTaskMemString(resultp) - } - return err - }) -} - -func HcsModifyComputeSystem(ctx gcontext.Context, computeSystem HcsSystem, configuration string) (result string, hr error) { - ctx, span := trace.StartSpan(ctx, "HcsModifyComputeSystem") - defer span.End() - defer func() { - if result != "" { - span.AddAttributes(trace.StringAttribute("result", result)) - } - oc.SetSpanStatus(span, hr) - }() - span.AddAttributes(trace.StringAttribute("configuration", configuration)) - - return result, execute(ctx, timeout.SyscallWatcher, func() error { - var resultp *uint16 - err := hcsModifyComputeSystem(computeSystem, configuration, &resultp) - if resultp != nil { - result = interop.ConvertAndFreeCoTaskMemString(resultp) - } - return err - }) -} - -func HcsModifyServiceSettings(ctx gcontext.Context, settings string) (result string, hr error) { - ctx, span := trace.StartSpan(ctx, "HcsModifyServiceSettings") - defer span.End() - defer func() { - if result != "" { - span.AddAttributes(trace.StringAttribute("result", result)) - } - oc.SetSpanStatus(span, hr) - }() - span.AddAttributes(trace.StringAttribute("settings", settings)) - - return result, execute(ctx, timeout.SyscallWatcher, func() error { - var resultp *uint16 - err := hcsModifyServiceSettings(settings, &resultp) - if resultp != nil { - result = interop.ConvertAndFreeCoTaskMemString(resultp) - } - return err - }) -} - -func HcsRegisterComputeSystemCallback(ctx gcontext.Context, computeSystem HcsSystem, callback uintptr, context uintptr) (callbackHandle HcsCallback, hr error) { - ctx, span := trace.StartSpan(ctx, "HcsRegisterComputeSystemCallback") - defer span.End() - defer func() { oc.SetSpanStatus(span, hr) }() - - return callbackHandle, execute(ctx, timeout.SyscallWatcher, func() error { - return hcsRegisterComputeSystemCallback(computeSystem, callback, context, &callbackHandle) - }) -} - -func HcsUnregisterComputeSystemCallback(ctx gcontext.Context, callbackHandle HcsCallback) (hr error) { - ctx, span := trace.StartSpan(ctx, "HcsUnregisterComputeSystemCallback") - defer span.End() - defer func() { oc.SetSpanStatus(span, hr) }() - - return execute(ctx, timeout.SyscallWatcher, func() error { - return hcsUnregisterComputeSystemCallback(callbackHandle) - }) -} - -func HcsCreateProcess(ctx gcontext.Context, computeSystem HcsSystem, processParameters string) (processInformation HcsProcessInformation, process HcsProcess, result string, hr error) { - ctx, span := trace.StartSpan(ctx, "HcsCreateProcess") - defer span.End() - defer func() { - if result != "" { - span.AddAttributes(trace.StringAttribute("result", result)) - } - oc.SetSpanStatus(span, hr) - }() - span.AddAttributes(trace.StringAttribute("processParameters", processParameters)) - - return processInformation, process, result, execute(ctx, timeout.SyscallWatcher, func() error { - var resultp *uint16 - err := hcsCreateProcess(computeSystem, processParameters, &processInformation, &process, &resultp) - if resultp != nil { - result = interop.ConvertAndFreeCoTaskMemString(resultp) - } - return err - }) -} - -func HcsOpenProcess(ctx gcontext.Context, computeSystem HcsSystem, pid uint32) (process HcsProcess, result string, hr error) { - ctx, span := trace.StartSpan(ctx, "HcsOpenProcess") - defer span.End() - defer func() { - if result != "" { - span.AddAttributes(trace.StringAttribute("result", result)) - } - oc.SetSpanStatus(span, hr) - }() - span.AddAttributes(trace.Int64Attribute("pid", int64(pid))) - - return process, result, execute(ctx, timeout.SyscallWatcher, func() error { - var resultp *uint16 - err := hcsOpenProcess(computeSystem, pid, &process, &resultp) - if resultp != nil { - result = interop.ConvertAndFreeCoTaskMemString(resultp) - } - return err - }) -} - -func HcsCloseProcess(ctx gcontext.Context, process HcsProcess) (hr error) { - ctx, span := trace.StartSpan(ctx, "HcsCloseProcess") - defer span.End() - defer func() { oc.SetSpanStatus(span, hr) }() - - return execute(ctx, timeout.SyscallWatcher, func() error { - return hcsCloseProcess(process) - }) -} - -func HcsTerminateProcess(ctx gcontext.Context, process HcsProcess) (result string, hr error) { - ctx, span := trace.StartSpan(ctx, "HcsTerminateProcess") - defer span.End() - defer func() { - if result != "" { - span.AddAttributes(trace.StringAttribute("result", result)) - } - oc.SetSpanStatus(span, hr) - }() - - return result, execute(ctx, timeout.SyscallWatcher, func() error { - var resultp *uint16 - err := hcsTerminateProcess(process, &resultp) - if resultp != nil { - result = interop.ConvertAndFreeCoTaskMemString(resultp) - } - return err - }) -} - -func HcsSignalProcess(ctx gcontext.Context, process HcsProcess, options string) (result string, hr error) { - ctx, span := trace.StartSpan(ctx, "HcsSignalProcess") - defer span.End() - defer func() { - if result != "" { - span.AddAttributes(trace.StringAttribute("result", result)) - } - oc.SetSpanStatus(span, hr) - }() - span.AddAttributes(trace.StringAttribute("options", options)) - - return result, execute(ctx, timeout.SyscallWatcher, func() error { - var resultp *uint16 - err := hcsSignalProcess(process, options, &resultp) - if resultp != nil { - result = interop.ConvertAndFreeCoTaskMemString(resultp) - } - return err - }) -} - -func HcsGetProcessInfo(ctx gcontext.Context, process HcsProcess) (processInformation HcsProcessInformation, result string, hr error) { - ctx, span := trace.StartSpan(ctx, "HcsGetProcessInfo") - defer span.End() - defer func() { - if result != "" { - span.AddAttributes(trace.StringAttribute("result", result)) - } - oc.SetSpanStatus(span, hr) - }() - - return processInformation, result, execute(ctx, timeout.SyscallWatcher, func() error { - var resultp *uint16 - err := hcsGetProcessInfo(process, &processInformation, &resultp) - if resultp != nil { - result = interop.ConvertAndFreeCoTaskMemString(resultp) - } - return err - }) -} - -func HcsGetProcessProperties(ctx gcontext.Context, process HcsProcess) (processProperties, result string, hr error) { - ctx, span := trace.StartSpan(ctx, "HcsGetProcessProperties") - defer span.End() - defer func() { - if result != "" { - span.AddAttributes(trace.StringAttribute("result", result)) - } - oc.SetSpanStatus(span, hr) - }() - - return processProperties, result, execute(ctx, timeout.SyscallWatcher, func() error { - var ( - processPropertiesp *uint16 - resultp *uint16 - ) - err := hcsGetProcessProperties(process, &processPropertiesp, &resultp) - if processPropertiesp != nil { - processProperties = interop.ConvertAndFreeCoTaskMemString(processPropertiesp) - } - if resultp != nil { - result = interop.ConvertAndFreeCoTaskMemString(resultp) - } - return err - }) -} - -func HcsModifyProcess(ctx gcontext.Context, process HcsProcess, settings string) (result string, hr error) { - ctx, span := trace.StartSpan(ctx, "HcsModifyProcess") - defer span.End() - defer func() { - if result != "" { - span.AddAttributes(trace.StringAttribute("result", result)) - } - oc.SetSpanStatus(span, hr) - }() - span.AddAttributes(trace.StringAttribute("settings", settings)) - - return result, execute(ctx, timeout.SyscallWatcher, func() error { - var resultp *uint16 - err := hcsModifyProcess(process, settings, &resultp) - if resultp != nil { - result = interop.ConvertAndFreeCoTaskMemString(resultp) - } - return err - }) -} - -func HcsGetServiceProperties(ctx gcontext.Context, propertyQuery string) (properties, result string, hr error) { - ctx, span := trace.StartSpan(ctx, "HcsGetServiceProperties") - defer span.End() - defer func() { - if result != "" { - span.AddAttributes(trace.StringAttribute("result", result)) - } - oc.SetSpanStatus(span, hr) - }() - span.AddAttributes(trace.StringAttribute("propertyQuery", propertyQuery)) - - return properties, result, execute(ctx, timeout.SyscallWatcher, func() error { - var ( - propertiesp *uint16 - resultp *uint16 - ) - err := hcsGetServiceProperties(propertyQuery, &propertiesp, &resultp) - if propertiesp != nil { - properties = interop.ConvertAndFreeCoTaskMemString(propertiesp) - } - if resultp != nil { - result = interop.ConvertAndFreeCoTaskMemString(resultp) - } - return err - }) -} - -func HcsRegisterProcessCallback(ctx gcontext.Context, process HcsProcess, callback uintptr, context uintptr) (callbackHandle HcsCallback, hr error) { - ctx, span := trace.StartSpan(ctx, "HcsRegisterProcessCallback") - defer span.End() - defer func() { oc.SetSpanStatus(span, hr) }() - - return callbackHandle, execute(ctx, timeout.SyscallWatcher, func() error { - return hcsRegisterProcessCallback(process, callback, context, &callbackHandle) - }) -} - -func HcsUnregisterProcessCallback(ctx gcontext.Context, callbackHandle HcsCallback) (hr error) { - ctx, span := trace.StartSpan(ctx, "HcsUnregisterProcessCallback") - defer span.End() - defer func() { oc.SetSpanStatus(span, hr) }() - - return execute(ctx, timeout.SyscallWatcher, func() error { - return hcsUnregisterProcessCallback(callbackHandle) - }) -} - -func HcsSaveComputeSystem(ctx gcontext.Context, computeSystem HcsSystem, options string) (result string, hr error) { - ctx, span := trace.StartSpan(ctx, "HcsSaveComputeSystem") - defer span.End() - defer func() { - if result != "" { - span.AddAttributes(trace.StringAttribute("result", result)) - } - if hr != errVmcomputeOperationPending { - oc.SetSpanStatus(span, hr) - } - }() - - return result, execute(ctx, timeout.SyscallWatcher, func() error { - var resultp *uint16 - err := hcsSaveComputeSystem(computeSystem, options, &resultp) - if resultp != nil { - result = interop.ConvertAndFreeCoTaskMemString(resultp) - } - return err - }) -} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/vmcompute/zsyscall_windows.go b/vendor/github.com/Microsoft/hcsshim/internal/vmcompute/zsyscall_windows.go deleted file mode 100644 index cae55058..00000000 --- a/vendor/github.com/Microsoft/hcsshim/internal/vmcompute/zsyscall_windows.go +++ /dev/null @@ -1,581 +0,0 @@ -// Code generated mksyscall_windows.exe DO NOT EDIT - -package vmcompute - -import ( - "syscall" - "unsafe" - - "golang.org/x/sys/windows" -) - -var _ unsafe.Pointer - -// Do the interface allocations only once for common -// Errno values. -const ( - errnoERROR_IO_PENDING = 997 -) - -var ( - errERROR_IO_PENDING error = syscall.Errno(errnoERROR_IO_PENDING) -) - -// errnoErr returns common boxed Errno values, to prevent -// allocations at runtime. -func errnoErr(e syscall.Errno) error { - switch e { - case 0: - return nil - case errnoERROR_IO_PENDING: - return errERROR_IO_PENDING - } - // TODO: add more here, after collecting data on the common - // error values see on Windows. (perhaps when running - // all.bat?) - return e -} - -var ( - modvmcompute = windows.NewLazySystemDLL("vmcompute.dll") - - procHcsEnumerateComputeSystems = modvmcompute.NewProc("HcsEnumerateComputeSystems") - procHcsCreateComputeSystem = modvmcompute.NewProc("HcsCreateComputeSystem") - procHcsOpenComputeSystem = modvmcompute.NewProc("HcsOpenComputeSystem") - procHcsCloseComputeSystem = modvmcompute.NewProc("HcsCloseComputeSystem") - procHcsStartComputeSystem = modvmcompute.NewProc("HcsStartComputeSystem") - procHcsShutdownComputeSystem = modvmcompute.NewProc("HcsShutdownComputeSystem") - procHcsTerminateComputeSystem = modvmcompute.NewProc("HcsTerminateComputeSystem") - procHcsPauseComputeSystem = modvmcompute.NewProc("HcsPauseComputeSystem") - procHcsResumeComputeSystem = modvmcompute.NewProc("HcsResumeComputeSystem") - procHcsGetComputeSystemProperties = modvmcompute.NewProc("HcsGetComputeSystemProperties") - procHcsModifyComputeSystem = modvmcompute.NewProc("HcsModifyComputeSystem") - procHcsModifyServiceSettings = modvmcompute.NewProc("HcsModifyServiceSettings") - procHcsRegisterComputeSystemCallback = modvmcompute.NewProc("HcsRegisterComputeSystemCallback") - procHcsUnregisterComputeSystemCallback = modvmcompute.NewProc("HcsUnregisterComputeSystemCallback") - procHcsSaveComputeSystem = modvmcompute.NewProc("HcsSaveComputeSystem") - procHcsCreateProcess = modvmcompute.NewProc("HcsCreateProcess") - procHcsOpenProcess = modvmcompute.NewProc("HcsOpenProcess") - procHcsCloseProcess = modvmcompute.NewProc("HcsCloseProcess") - procHcsTerminateProcess = modvmcompute.NewProc("HcsTerminateProcess") - procHcsSignalProcess = modvmcompute.NewProc("HcsSignalProcess") - procHcsGetProcessInfo = modvmcompute.NewProc("HcsGetProcessInfo") - procHcsGetProcessProperties = modvmcompute.NewProc("HcsGetProcessProperties") - procHcsModifyProcess = modvmcompute.NewProc("HcsModifyProcess") - procHcsGetServiceProperties = modvmcompute.NewProc("HcsGetServiceProperties") - procHcsRegisterProcessCallback = modvmcompute.NewProc("HcsRegisterProcessCallback") - procHcsUnregisterProcessCallback = modvmcompute.NewProc("HcsUnregisterProcessCallback") -) - -func hcsEnumerateComputeSystems(query string, computeSystems **uint16, result **uint16) (hr error) { - var _p0 *uint16 - _p0, hr = syscall.UTF16PtrFromString(query) - if hr != nil { - return - } - return _hcsEnumerateComputeSystems(_p0, computeSystems, result) -} - -func _hcsEnumerateComputeSystems(query *uint16, computeSystems **uint16, result **uint16) (hr error) { - if hr = procHcsEnumerateComputeSystems.Find(); hr != nil { - return - } - r0, _, _ := syscall.Syscall(procHcsEnumerateComputeSystems.Addr(), 3, uintptr(unsafe.Pointer(query)), uintptr(unsafe.Pointer(computeSystems)), uintptr(unsafe.Pointer(result))) - if int32(r0) < 0 { - if r0&0x1fff0000 == 0x00070000 { - r0 &= 0xffff - } - hr = syscall.Errno(r0) - } - return -} - -func hcsCreateComputeSystem(id string, configuration string, identity syscall.Handle, computeSystem *HcsSystem, result **uint16) (hr error) { - var _p0 *uint16 - _p0, hr = syscall.UTF16PtrFromString(id) - if hr != nil { - return - } - var _p1 *uint16 - _p1, hr = syscall.UTF16PtrFromString(configuration) - if hr != nil { - return - } - return _hcsCreateComputeSystem(_p0, _p1, identity, computeSystem, result) -} - -func _hcsCreateComputeSystem(id *uint16, configuration *uint16, identity syscall.Handle, computeSystem *HcsSystem, result **uint16) (hr error) { - if hr = procHcsCreateComputeSystem.Find(); hr != nil { - return - } - r0, _, _ := syscall.Syscall6(procHcsCreateComputeSystem.Addr(), 5, uintptr(unsafe.Pointer(id)), uintptr(unsafe.Pointer(configuration)), uintptr(identity), uintptr(unsafe.Pointer(computeSystem)), uintptr(unsafe.Pointer(result)), 0) - if int32(r0) < 0 { - if r0&0x1fff0000 == 0x00070000 { - r0 &= 0xffff - } - hr = syscall.Errno(r0) - } - return -} - -func hcsOpenComputeSystem(id string, computeSystem *HcsSystem, result **uint16) (hr error) { - var _p0 *uint16 - _p0, hr = syscall.UTF16PtrFromString(id) - if hr != nil { - return - } - return _hcsOpenComputeSystem(_p0, computeSystem, result) -} - -func _hcsOpenComputeSystem(id *uint16, computeSystem *HcsSystem, result **uint16) (hr error) { - if hr = procHcsOpenComputeSystem.Find(); hr != nil { - return - } - r0, _, _ := syscall.Syscall(procHcsOpenComputeSystem.Addr(), 3, uintptr(unsafe.Pointer(id)), uintptr(unsafe.Pointer(computeSystem)), uintptr(unsafe.Pointer(result))) - if int32(r0) < 0 { - if r0&0x1fff0000 == 0x00070000 { - r0 &= 0xffff - } - hr = syscall.Errno(r0) - } - return -} - -func hcsCloseComputeSystem(computeSystem HcsSystem) (hr error) { - if hr = procHcsCloseComputeSystem.Find(); hr != nil { - return - } - r0, _, _ := syscall.Syscall(procHcsCloseComputeSystem.Addr(), 1, uintptr(computeSystem), 0, 0) - if int32(r0) < 0 { - if r0&0x1fff0000 == 0x00070000 { - r0 &= 0xffff - } - hr = syscall.Errno(r0) - } - return -} - -func hcsStartComputeSystem(computeSystem HcsSystem, options string, result **uint16) (hr error) { - var _p0 *uint16 - _p0, hr = syscall.UTF16PtrFromString(options) - if hr != nil { - return - } - return _hcsStartComputeSystem(computeSystem, _p0, result) -} - -func _hcsStartComputeSystem(computeSystem HcsSystem, options *uint16, result **uint16) (hr error) { - if hr = procHcsStartComputeSystem.Find(); hr != nil { - return - } - r0, _, _ := syscall.Syscall(procHcsStartComputeSystem.Addr(), 3, uintptr(computeSystem), uintptr(unsafe.Pointer(options)), uintptr(unsafe.Pointer(result))) - if int32(r0) < 0 { - if r0&0x1fff0000 == 0x00070000 { - r0 &= 0xffff - } - hr = syscall.Errno(r0) - } - return -} - -func hcsShutdownComputeSystem(computeSystem HcsSystem, options string, result **uint16) (hr error) { - var _p0 *uint16 - _p0, hr = syscall.UTF16PtrFromString(options) - if hr != nil { - return - } - return _hcsShutdownComputeSystem(computeSystem, _p0, result) -} - -func _hcsShutdownComputeSystem(computeSystem HcsSystem, options *uint16, result **uint16) (hr error) { - if hr = procHcsShutdownComputeSystem.Find(); hr != nil { - return - } - r0, _, _ := syscall.Syscall(procHcsShutdownComputeSystem.Addr(), 3, uintptr(computeSystem), uintptr(unsafe.Pointer(options)), uintptr(unsafe.Pointer(result))) - if int32(r0) < 0 { - if r0&0x1fff0000 == 0x00070000 { - r0 &= 0xffff - } - hr = syscall.Errno(r0) - } - return -} - -func hcsTerminateComputeSystem(computeSystem HcsSystem, options string, result **uint16) (hr error) { - var _p0 *uint16 - _p0, hr = syscall.UTF16PtrFromString(options) - if hr != nil { - return - } - return _hcsTerminateComputeSystem(computeSystem, _p0, result) -} - -func _hcsTerminateComputeSystem(computeSystem HcsSystem, options *uint16, result **uint16) (hr error) { - if hr = procHcsTerminateComputeSystem.Find(); hr != nil { - return - } - r0, _, _ := syscall.Syscall(procHcsTerminateComputeSystem.Addr(), 3, uintptr(computeSystem), uintptr(unsafe.Pointer(options)), uintptr(unsafe.Pointer(result))) - if int32(r0) < 0 { - if r0&0x1fff0000 == 0x00070000 { - r0 &= 0xffff - } - hr = syscall.Errno(r0) - } - return -} - -func hcsPauseComputeSystem(computeSystem HcsSystem, options string, result **uint16) (hr error) { - var _p0 *uint16 - _p0, hr = syscall.UTF16PtrFromString(options) - if hr != nil { - return - } - return _hcsPauseComputeSystem(computeSystem, _p0, result) -} - -func _hcsPauseComputeSystem(computeSystem HcsSystem, options *uint16, result **uint16) (hr error) { - if hr = procHcsPauseComputeSystem.Find(); hr != nil { - return - } - r0, _, _ := syscall.Syscall(procHcsPauseComputeSystem.Addr(), 3, uintptr(computeSystem), uintptr(unsafe.Pointer(options)), uintptr(unsafe.Pointer(result))) - if int32(r0) < 0 { - if r0&0x1fff0000 == 0x00070000 { - r0 &= 0xffff - } - hr = syscall.Errno(r0) - } - return -} - -func hcsResumeComputeSystem(computeSystem HcsSystem, options string, result **uint16) (hr error) { - var _p0 *uint16 - _p0, hr = syscall.UTF16PtrFromString(options) - if hr != nil { - return - } - return _hcsResumeComputeSystem(computeSystem, _p0, result) -} - -func _hcsResumeComputeSystem(computeSystem HcsSystem, options *uint16, result **uint16) (hr error) { - if hr = procHcsResumeComputeSystem.Find(); hr != nil { - return - } - r0, _, _ := syscall.Syscall(procHcsResumeComputeSystem.Addr(), 3, uintptr(computeSystem), uintptr(unsafe.Pointer(options)), uintptr(unsafe.Pointer(result))) - if int32(r0) < 0 { - if r0&0x1fff0000 == 0x00070000 { - r0 &= 0xffff - } - hr = syscall.Errno(r0) - } - return -} - -func hcsGetComputeSystemProperties(computeSystem HcsSystem, propertyQuery string, properties **uint16, result **uint16) (hr error) { - var _p0 *uint16 - _p0, hr = syscall.UTF16PtrFromString(propertyQuery) - if hr != nil { - return - } - return _hcsGetComputeSystemProperties(computeSystem, _p0, properties, result) -} - -func _hcsGetComputeSystemProperties(computeSystem HcsSystem, propertyQuery *uint16, properties **uint16, result **uint16) (hr error) { - if hr = procHcsGetComputeSystemProperties.Find(); hr != nil { - return - } - r0, _, _ := syscall.Syscall6(procHcsGetComputeSystemProperties.Addr(), 4, uintptr(computeSystem), uintptr(unsafe.Pointer(propertyQuery)), uintptr(unsafe.Pointer(properties)), uintptr(unsafe.Pointer(result)), 0, 0) - if int32(r0) < 0 { - if r0&0x1fff0000 == 0x00070000 { - r0 &= 0xffff - } - hr = syscall.Errno(r0) - } - return -} - -func hcsModifyComputeSystem(computeSystem HcsSystem, configuration string, result **uint16) (hr error) { - var _p0 *uint16 - _p0, hr = syscall.UTF16PtrFromString(configuration) - if hr != nil { - return - } - return _hcsModifyComputeSystem(computeSystem, _p0, result) -} - -func _hcsModifyComputeSystem(computeSystem HcsSystem, configuration *uint16, result **uint16) (hr error) { - if hr = procHcsModifyComputeSystem.Find(); hr != nil { - return - } - r0, _, _ := syscall.Syscall(procHcsModifyComputeSystem.Addr(), 3, uintptr(computeSystem), uintptr(unsafe.Pointer(configuration)), uintptr(unsafe.Pointer(result))) - if int32(r0) < 0 { - if r0&0x1fff0000 == 0x00070000 { - r0 &= 0xffff - } - hr = syscall.Errno(r0) - } - return -} - -func hcsModifyServiceSettings(settings string, result **uint16) (hr error) { - var _p0 *uint16 - _p0, hr = syscall.UTF16PtrFromString(settings) - if hr != nil { - return - } - return _hcsModifyServiceSettings(_p0, result) -} - -func _hcsModifyServiceSettings(settings *uint16, result **uint16) (hr error) { - if hr = procHcsModifyServiceSettings.Find(); hr != nil { - return - } - r0, _, _ := syscall.Syscall(procHcsModifyServiceSettings.Addr(), 2, uintptr(unsafe.Pointer(settings)), uintptr(unsafe.Pointer(result)), 0) - if int32(r0) < 0 { - if r0&0x1fff0000 == 0x00070000 { - r0 &= 0xffff - } - hr = syscall.Errno(r0) - } - return -} - -func hcsRegisterComputeSystemCallback(computeSystem HcsSystem, callback uintptr, context uintptr, callbackHandle *HcsCallback) (hr error) { - if hr = procHcsRegisterComputeSystemCallback.Find(); hr != nil { - return - } - r0, _, _ := syscall.Syscall6(procHcsRegisterComputeSystemCallback.Addr(), 4, uintptr(computeSystem), uintptr(callback), uintptr(context), uintptr(unsafe.Pointer(callbackHandle)), 0, 0) - if int32(r0) < 0 { - if r0&0x1fff0000 == 0x00070000 { - r0 &= 0xffff - } - hr = syscall.Errno(r0) - } - return -} - -func hcsUnregisterComputeSystemCallback(callbackHandle HcsCallback) (hr error) { - if hr = procHcsUnregisterComputeSystemCallback.Find(); hr != nil { - return - } - r0, _, _ := syscall.Syscall(procHcsUnregisterComputeSystemCallback.Addr(), 1, uintptr(callbackHandle), 0, 0) - if int32(r0) < 0 { - if r0&0x1fff0000 == 0x00070000 { - r0 &= 0xffff - } - hr = syscall.Errno(r0) - } - return -} - -func hcsSaveComputeSystem(computeSystem HcsSystem, options string, result **uint16) (hr error) { - var _p0 *uint16 - _p0, hr = syscall.UTF16PtrFromString(options) - if hr != nil { - return - } - return _hcsSaveComputeSystem(computeSystem, _p0, result) -} - -func _hcsSaveComputeSystem(computeSystem HcsSystem, options *uint16, result **uint16) (hr error) { - if hr = procHcsSaveComputeSystem.Find(); hr != nil { - return - } - r0, _, _ := syscall.Syscall(procHcsSaveComputeSystem.Addr(), 3, uintptr(computeSystem), uintptr(unsafe.Pointer(options)), uintptr(unsafe.Pointer(result))) - if int32(r0) < 0 { - if r0&0x1fff0000 == 0x00070000 { - r0 &= 0xffff - } - hr = syscall.Errno(r0) - } - return -} - -func hcsCreateProcess(computeSystem HcsSystem, processParameters string, processInformation *HcsProcessInformation, process *HcsProcess, result **uint16) (hr error) { - var _p0 *uint16 - _p0, hr = syscall.UTF16PtrFromString(processParameters) - if hr != nil { - return - } - return _hcsCreateProcess(computeSystem, _p0, processInformation, process, result) -} - -func _hcsCreateProcess(computeSystem HcsSystem, processParameters *uint16, processInformation *HcsProcessInformation, process *HcsProcess, result **uint16) (hr error) { - if hr = procHcsCreateProcess.Find(); hr != nil { - return - } - r0, _, _ := syscall.Syscall6(procHcsCreateProcess.Addr(), 5, uintptr(computeSystem), uintptr(unsafe.Pointer(processParameters)), uintptr(unsafe.Pointer(processInformation)), uintptr(unsafe.Pointer(process)), uintptr(unsafe.Pointer(result)), 0) - if int32(r0) < 0 { - if r0&0x1fff0000 == 0x00070000 { - r0 &= 0xffff - } - hr = syscall.Errno(r0) - } - return -} - -func hcsOpenProcess(computeSystem HcsSystem, pid uint32, process *HcsProcess, result **uint16) (hr error) { - if hr = procHcsOpenProcess.Find(); hr != nil { - return - } - r0, _, _ := syscall.Syscall6(procHcsOpenProcess.Addr(), 4, uintptr(computeSystem), uintptr(pid), uintptr(unsafe.Pointer(process)), uintptr(unsafe.Pointer(result)), 0, 0) - if int32(r0) < 0 { - if r0&0x1fff0000 == 0x00070000 { - r0 &= 0xffff - } - hr = syscall.Errno(r0) - } - return -} - -func hcsCloseProcess(process HcsProcess) (hr error) { - if hr = procHcsCloseProcess.Find(); hr != nil { - return - } - r0, _, _ := syscall.Syscall(procHcsCloseProcess.Addr(), 1, uintptr(process), 0, 0) - if int32(r0) < 0 { - if r0&0x1fff0000 == 0x00070000 { - r0 &= 0xffff - } - hr = syscall.Errno(r0) - } - return -} - -func hcsTerminateProcess(process HcsProcess, result **uint16) (hr error) { - if hr = procHcsTerminateProcess.Find(); hr != nil { - return - } - r0, _, _ := syscall.Syscall(procHcsTerminateProcess.Addr(), 2, uintptr(process), uintptr(unsafe.Pointer(result)), 0) - if int32(r0) < 0 { - if r0&0x1fff0000 == 0x00070000 { - r0 &= 0xffff - } - hr = syscall.Errno(r0) - } - return -} - -func hcsSignalProcess(process HcsProcess, options string, result **uint16) (hr error) { - var _p0 *uint16 - _p0, hr = syscall.UTF16PtrFromString(options) - if hr != nil { - return - } - return _hcsSignalProcess(process, _p0, result) -} - -func _hcsSignalProcess(process HcsProcess, options *uint16, result **uint16) (hr error) { - if hr = procHcsSignalProcess.Find(); hr != nil { - return - } - r0, _, _ := syscall.Syscall(procHcsSignalProcess.Addr(), 3, uintptr(process), uintptr(unsafe.Pointer(options)), uintptr(unsafe.Pointer(result))) - if int32(r0) < 0 { - if r0&0x1fff0000 == 0x00070000 { - r0 &= 0xffff - } - hr = syscall.Errno(r0) - } - return -} - -func hcsGetProcessInfo(process HcsProcess, processInformation *HcsProcessInformation, result **uint16) (hr error) { - if hr = procHcsGetProcessInfo.Find(); hr != nil { - return - } - r0, _, _ := syscall.Syscall(procHcsGetProcessInfo.Addr(), 3, uintptr(process), uintptr(unsafe.Pointer(processInformation)), uintptr(unsafe.Pointer(result))) - if int32(r0) < 0 { - if r0&0x1fff0000 == 0x00070000 { - r0 &= 0xffff - } - hr = syscall.Errno(r0) - } - return -} - -func hcsGetProcessProperties(process HcsProcess, processProperties **uint16, result **uint16) (hr error) { - if hr = procHcsGetProcessProperties.Find(); hr != nil { - return - } - r0, _, _ := syscall.Syscall(procHcsGetProcessProperties.Addr(), 3, uintptr(process), uintptr(unsafe.Pointer(processProperties)), uintptr(unsafe.Pointer(result))) - if int32(r0) < 0 { - if r0&0x1fff0000 == 0x00070000 { - r0 &= 0xffff - } - hr = syscall.Errno(r0) - } - return -} - -func hcsModifyProcess(process HcsProcess, settings string, result **uint16) (hr error) { - var _p0 *uint16 - _p0, hr = syscall.UTF16PtrFromString(settings) - if hr != nil { - return - } - return _hcsModifyProcess(process, _p0, result) -} - -func _hcsModifyProcess(process HcsProcess, settings *uint16, result **uint16) (hr error) { - if hr = procHcsModifyProcess.Find(); hr != nil { - return - } - r0, _, _ := syscall.Syscall(procHcsModifyProcess.Addr(), 3, uintptr(process), uintptr(unsafe.Pointer(settings)), uintptr(unsafe.Pointer(result))) - if int32(r0) < 0 { - if r0&0x1fff0000 == 0x00070000 { - r0 &= 0xffff - } - hr = syscall.Errno(r0) - } - return -} - -func hcsGetServiceProperties(propertyQuery string, properties **uint16, result **uint16) (hr error) { - var _p0 *uint16 - _p0, hr = syscall.UTF16PtrFromString(propertyQuery) - if hr != nil { - return - } - return _hcsGetServiceProperties(_p0, properties, result) -} - -func _hcsGetServiceProperties(propertyQuery *uint16, properties **uint16, result **uint16) (hr error) { - if hr = procHcsGetServiceProperties.Find(); hr != nil { - return - } - r0, _, _ := syscall.Syscall(procHcsGetServiceProperties.Addr(), 3, uintptr(unsafe.Pointer(propertyQuery)), uintptr(unsafe.Pointer(properties)), uintptr(unsafe.Pointer(result))) - if int32(r0) < 0 { - if r0&0x1fff0000 == 0x00070000 { - r0 &= 0xffff - } - hr = syscall.Errno(r0) - } - return -} - -func hcsRegisterProcessCallback(process HcsProcess, callback uintptr, context uintptr, callbackHandle *HcsCallback) (hr error) { - if hr = procHcsRegisterProcessCallback.Find(); hr != nil { - return - } - r0, _, _ := syscall.Syscall6(procHcsRegisterProcessCallback.Addr(), 4, uintptr(process), uintptr(callback), uintptr(context), uintptr(unsafe.Pointer(callbackHandle)), 0, 0) - if int32(r0) < 0 { - if r0&0x1fff0000 == 0x00070000 { - r0 &= 0xffff - } - hr = syscall.Errno(r0) - } - return -} - -func hcsUnregisterProcessCallback(callbackHandle HcsCallback) (hr error) { - if hr = procHcsUnregisterProcessCallback.Find(); hr != nil { - return - } - r0, _, _ := syscall.Syscall(procHcsUnregisterProcessCallback.Addr(), 1, uintptr(callbackHandle), 0, 0) - if int32(r0) < 0 { - if r0&0x1fff0000 == 0x00070000 { - r0 &= 0xffff - } - hr = syscall.Errno(r0) - } - return -} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/activatelayer.go b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/activatelayer.go deleted file mode 100644 index 5debe974..00000000 --- a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/activatelayer.go +++ /dev/null @@ -1,27 +0,0 @@ -package wclayer - -import ( - "context" - - "github.com/Microsoft/hcsshim/internal/hcserror" - "github.com/Microsoft/hcsshim/internal/oc" - "go.opencensus.io/trace" -) - -// ActivateLayer will find the layer with the given id and mount it's filesystem. -// For a read/write layer, the mounted filesystem will appear as a volume on the -// host, while a read-only layer is generally expected to be a no-op. -// An activated layer must later be deactivated via DeactivateLayer. -func ActivateLayer(ctx context.Context, path string) (err error) { - title := "hcsshim::ActivateLayer" - ctx, span := trace.StartSpan(ctx, title) //nolint:ineffassign,staticcheck - defer span.End() - defer func() { oc.SetSpanStatus(span, err) }() - span.AddAttributes(trace.StringAttribute("path", path)) - - err = activateLayer(&stdDriverInfo, path) - if err != nil { - return hcserror.New(err, title, "") - } - return nil -} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/baselayer.go b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/baselayer.go deleted file mode 100644 index 3ec708d1..00000000 --- a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/baselayer.go +++ /dev/null @@ -1,182 +0,0 @@ -package wclayer - -import ( - "context" - "errors" - "os" - "path/filepath" - "syscall" - - "github.com/Microsoft/go-winio" - "github.com/Microsoft/hcsshim/internal/hcserror" - "github.com/Microsoft/hcsshim/internal/oc" - "github.com/Microsoft/hcsshim/internal/safefile" - "github.com/Microsoft/hcsshim/internal/winapi" - "go.opencensus.io/trace" -) - -type baseLayerWriter struct { - ctx context.Context - s *trace.Span - - root *os.File - f *os.File - bw *winio.BackupFileWriter - err error - hasUtilityVM bool - dirInfo []dirInfo -} - -type dirInfo struct { - path string - fileInfo winio.FileBasicInfo -} - -// reapplyDirectoryTimes reapplies directory modification, creation, etc. times -// after processing of the directory tree has completed. The times are expected -// to be ordered such that parent directories come before child directories. -func reapplyDirectoryTimes(root *os.File, dis []dirInfo) error { - for i := range dis { - di := &dis[len(dis)-i-1] // reverse order: process child directories first - f, err := safefile.OpenRelative(di.path, root, syscall.GENERIC_READ|syscall.GENERIC_WRITE, syscall.FILE_SHARE_READ, winapi.FILE_OPEN, winapi.FILE_DIRECTORY_FILE|syscall.FILE_FLAG_OPEN_REPARSE_POINT) - if err != nil { - return err - } - - err = winio.SetFileBasicInfo(f, &di.fileInfo) - f.Close() - if err != nil { - return err - } - - } - return nil -} - -func (w *baseLayerWriter) closeCurrentFile() error { - if w.f != nil { - err := w.bw.Close() - err2 := w.f.Close() - w.f = nil - w.bw = nil - if err != nil { - return err - } - if err2 != nil { - return err2 - } - } - return nil -} - -func (w *baseLayerWriter) Add(name string, fileInfo *winio.FileBasicInfo) (err error) { - defer func() { - if err != nil { - w.err = err - } - }() - - err = w.closeCurrentFile() - if err != nil { - return err - } - - if filepath.ToSlash(name) == `UtilityVM/Files` { - w.hasUtilityVM = true - } - - var f *os.File - defer func() { - if f != nil { - f.Close() - } - }() - - extraFlags := uint32(0) - if fileInfo.FileAttributes&syscall.FILE_ATTRIBUTE_DIRECTORY != 0 { - extraFlags |= winapi.FILE_DIRECTORY_FILE - w.dirInfo = append(w.dirInfo, dirInfo{name, *fileInfo}) - } - - mode := uint32(syscall.GENERIC_READ | syscall.GENERIC_WRITE | winio.WRITE_DAC | winio.WRITE_OWNER | winio.ACCESS_SYSTEM_SECURITY) - f, err = safefile.OpenRelative(name, w.root, mode, syscall.FILE_SHARE_READ, winapi.FILE_CREATE, extraFlags) - if err != nil { - return hcserror.New(err, "Failed to safefile.OpenRelative", name) - } - - err = winio.SetFileBasicInfo(f, fileInfo) - if err != nil { - return hcserror.New(err, "Failed to SetFileBasicInfo", name) - } - - w.f = f - w.bw = winio.NewBackupFileWriter(f, true) - f = nil - return nil -} - -func (w *baseLayerWriter) AddLink(name string, target string) (err error) { - defer func() { - if err != nil { - w.err = err - } - }() - - err = w.closeCurrentFile() - if err != nil { - return err - } - - return safefile.LinkRelative(target, w.root, name, w.root) -} - -func (w *baseLayerWriter) Remove(name string) error { - return errors.New("base layer cannot have tombstones") -} - -func (w *baseLayerWriter) Write(b []byte) (int, error) { - n, err := w.bw.Write(b) - if err != nil { - w.err = err - } - return n, err -} - -func (w *baseLayerWriter) Close() (err error) { - defer w.s.End() - defer func() { oc.SetSpanStatus(w.s, err) }() - defer func() { - w.root.Close() - w.root = nil - }() - - err = w.closeCurrentFile() - if err != nil { - return err - } - if w.err == nil { - // Restore the file times of all the directories, since they may have - // been modified by creating child directories. - err = reapplyDirectoryTimes(w.root, w.dirInfo) - if err != nil { - return err - } - - err = ProcessBaseLayer(w.ctx, w.root.Name()) - if err != nil { - return err - } - - if w.hasUtilityVM { - err := safefile.EnsureNotReparsePointRelative("UtilityVM", w.root) - if err != nil { - return err - } - err = ProcessUtilityVMImage(w.ctx, filepath.Join(w.root.Name(), "UtilityVM")) - if err != nil { - return err - } - } - } - return w.err -} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/createlayer.go b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/createlayer.go deleted file mode 100644 index 480aee87..00000000 --- a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/createlayer.go +++ /dev/null @@ -1,27 +0,0 @@ -package wclayer - -import ( - "context" - - "github.com/Microsoft/hcsshim/internal/hcserror" - "github.com/Microsoft/hcsshim/internal/oc" - "go.opencensus.io/trace" -) - -// CreateLayer creates a new, empty, read-only layer on the filesystem based on -// the parent layer provided. -func CreateLayer(ctx context.Context, path, parent string) (err error) { - title := "hcsshim::CreateLayer" - ctx, span := trace.StartSpan(ctx, title) //nolint:ineffassign,staticcheck - defer span.End() - defer func() { oc.SetSpanStatus(span, err) }() - span.AddAttributes( - trace.StringAttribute("path", path), - trace.StringAttribute("parent", parent)) - - err = createLayer(&stdDriverInfo, path, parent) - if err != nil { - return hcserror.New(err, title, "") - } - return nil -} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/createscratchlayer.go b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/createscratchlayer.go deleted file mode 100644 index 131aa94f..00000000 --- a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/createscratchlayer.go +++ /dev/null @@ -1,34 +0,0 @@ -package wclayer - -import ( - "context" - "strings" - - "github.com/Microsoft/hcsshim/internal/hcserror" - "github.com/Microsoft/hcsshim/internal/oc" - "go.opencensus.io/trace" -) - -// CreateScratchLayer creates and populates new read-write layer for use by a container. -// This requires the full list of paths to all parent layers up to the base -func CreateScratchLayer(ctx context.Context, path string, parentLayerPaths []string) (err error) { - title := "hcsshim::CreateScratchLayer" - ctx, span := trace.StartSpan(ctx, title) - defer span.End() - defer func() { oc.SetSpanStatus(span, err) }() - span.AddAttributes( - trace.StringAttribute("path", path), - trace.StringAttribute("parentLayerPaths", strings.Join(parentLayerPaths, ", "))) - - // Generate layer descriptors - layers, err := layerPathsToDescriptors(ctx, parentLayerPaths) - if err != nil { - return err - } - - err = createSandboxLayer(&stdDriverInfo, path, 0, layers) - if err != nil { - return hcserror.New(err, title, "") - } - return nil -} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/deactivatelayer.go b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/deactivatelayer.go deleted file mode 100644 index d5bf2f5b..00000000 --- a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/deactivatelayer.go +++ /dev/null @@ -1,24 +0,0 @@ -package wclayer - -import ( - "context" - - "github.com/Microsoft/hcsshim/internal/hcserror" - "github.com/Microsoft/hcsshim/internal/oc" - "go.opencensus.io/trace" -) - -// DeactivateLayer will dismount a layer that was mounted via ActivateLayer. -func DeactivateLayer(ctx context.Context, path string) (err error) { - title := "hcsshim::DeactivateLayer" - ctx, span := trace.StartSpan(ctx, title) //nolint:ineffassign,staticcheck - defer span.End() - defer func() { oc.SetSpanStatus(span, err) }() - span.AddAttributes(trace.StringAttribute("path", path)) - - err = deactivateLayer(&stdDriverInfo, path) - if err != nil { - return hcserror.New(err, title+"- failed", "") - } - return nil -} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/destroylayer.go b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/destroylayer.go deleted file mode 100644 index 424467ac..00000000 --- a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/destroylayer.go +++ /dev/null @@ -1,25 +0,0 @@ -package wclayer - -import ( - "context" - - "github.com/Microsoft/hcsshim/internal/hcserror" - "github.com/Microsoft/hcsshim/internal/oc" - "go.opencensus.io/trace" -) - -// DestroyLayer will remove the on-disk files representing the layer with the given -// path, including that layer's containing folder, if any. -func DestroyLayer(ctx context.Context, path string) (err error) { - title := "hcsshim::DestroyLayer" - ctx, span := trace.StartSpan(ctx, title) //nolint:ineffassign,staticcheck - defer span.End() - defer func() { oc.SetSpanStatus(span, err) }() - span.AddAttributes(trace.StringAttribute("path", path)) - - err = destroyLayer(&stdDriverInfo, path) - if err != nil { - return hcserror.New(err, title, "") - } - return nil -} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/expandscratchsize.go b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/expandscratchsize.go deleted file mode 100644 index 035c9041..00000000 --- a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/expandscratchsize.go +++ /dev/null @@ -1,140 +0,0 @@ -package wclayer - -import ( - "context" - "os" - "path/filepath" - "syscall" - "unsafe" - - "github.com/Microsoft/hcsshim/internal/hcserror" - "github.com/Microsoft/hcsshim/internal/oc" - "github.com/Microsoft/hcsshim/osversion" - "go.opencensus.io/trace" -) - -// ExpandScratchSize expands the size of a layer to at least size bytes. -func ExpandScratchSize(ctx context.Context, path string, size uint64) (err error) { - title := "hcsshim::ExpandScratchSize" - ctx, span := trace.StartSpan(ctx, title) - defer span.End() - defer func() { oc.SetSpanStatus(span, err) }() - span.AddAttributes( - trace.StringAttribute("path", path), - trace.Int64Attribute("size", int64(size))) - - err = expandSandboxSize(&stdDriverInfo, path, size) - if err != nil { - return hcserror.New(err, title, "") - } - - // Manually expand the volume now in order to work around bugs in 19H1 and - // prerelease versions of Vb. Remove once this is fixed in Windows. - if build := osversion.Build(); build >= osversion.V19H1 && build < 19020 { - err = expandSandboxVolume(ctx, path) - if err != nil { - return err - } - } - return nil -} - -type virtualStorageType struct { - DeviceID uint32 - VendorID [16]byte -} - -type openVersion2 struct { - GetInfoOnly int32 // bool but 4-byte aligned - ReadOnly int32 // bool but 4-byte aligned - ResiliencyGUID [16]byte // GUID -} - -type openVirtualDiskParameters struct { - Version uint32 // Must always be set to 2 - Version2 openVersion2 -} - -func attachVhd(path string) (syscall.Handle, error) { - var ( - defaultType virtualStorageType - handle syscall.Handle - ) - parameters := openVirtualDiskParameters{Version: 2} - err := openVirtualDisk( - &defaultType, - path, - 0, - 0, - ¶meters, - &handle) - if err != nil { - return 0, &os.PathError{Op: "OpenVirtualDisk", Path: path, Err: err} - } - err = attachVirtualDisk(handle, 0, 0, 0, 0, 0) - if err != nil { - syscall.Close(handle) - return 0, &os.PathError{Op: "AttachVirtualDisk", Path: path, Err: err} - } - return handle, nil -} - -func expandSandboxVolume(ctx context.Context, path string) error { - // Mount the sandbox VHD temporarily. - vhdPath := filepath.Join(path, "sandbox.vhdx") - vhd, err := attachVhd(vhdPath) - if err != nil { - return &os.PathError{Op: "OpenVirtualDisk", Path: vhdPath, Err: err} - } - defer syscall.Close(vhd) - - // Open the volume. - volumePath, err := GetLayerMountPath(ctx, path) - if err != nil { - return err - } - if volumePath[len(volumePath)-1] == '\\' { - volumePath = volumePath[:len(volumePath)-1] - } - volume, err := os.OpenFile(volumePath, os.O_RDWR, 0) - if err != nil { - return err - } - defer volume.Close() - - // Get the volume's underlying partition size in NTFS clusters. - var ( - partitionSize int64 - bytes uint32 - ) - const _IOCTL_DISK_GET_LENGTH_INFO = 0x0007405C - err = syscall.DeviceIoControl(syscall.Handle(volume.Fd()), _IOCTL_DISK_GET_LENGTH_INFO, nil, 0, (*byte)(unsafe.Pointer(&partitionSize)), 8, &bytes, nil) - if err != nil { - return &os.PathError{Op: "IOCTL_DISK_GET_LENGTH_INFO", Path: volume.Name(), Err: err} - } - const ( - clusterSize = 4096 - sectorSize = 512 - ) - targetClusters := partitionSize / clusterSize - - // Get the volume's current size in NTFS clusters. - var volumeSize int64 - err = getDiskFreeSpaceEx(volume.Name()+"\\", nil, &volumeSize, nil) - if err != nil { - return &os.PathError{Op: "GetDiskFreeSpaceEx", Path: volume.Name(), Err: err} - } - volumeClusters := volumeSize / clusterSize - - // Only resize the volume if there is space to grow, otherwise this will - // fail with invalid parameter. NTFS reserves one cluster. - if volumeClusters+1 < targetClusters { - targetSectors := targetClusters * (clusterSize / sectorSize) - const _FSCTL_EXTEND_VOLUME = 0x000900F0 - err = syscall.DeviceIoControl(syscall.Handle(volume.Fd()), _FSCTL_EXTEND_VOLUME, (*byte)(unsafe.Pointer(&targetSectors)), 8, nil, 0, &bytes, nil) - if err != nil { - return &os.PathError{Op: "FSCTL_EXTEND_VOLUME", Path: volume.Name(), Err: err} - } - } - return nil -} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/exportlayer.go b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/exportlayer.go deleted file mode 100644 index 97b27eb7..00000000 --- a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/exportlayer.go +++ /dev/null @@ -1,94 +0,0 @@ -package wclayer - -import ( - "context" - "io/ioutil" - "os" - "strings" - - "github.com/Microsoft/go-winio" - "github.com/Microsoft/hcsshim/internal/hcserror" - "github.com/Microsoft/hcsshim/internal/oc" - "go.opencensus.io/trace" -) - -// ExportLayer will create a folder at exportFolderPath and fill that folder with -// the transport format version of the layer identified by layerId. This transport -// format includes any metadata required for later importing the layer (using -// ImportLayer), and requires the full list of parent layer paths in order to -// perform the export. -func ExportLayer(ctx context.Context, path string, exportFolderPath string, parentLayerPaths []string) (err error) { - title := "hcsshim::ExportLayer" - ctx, span := trace.StartSpan(ctx, title) - defer span.End() - defer func() { oc.SetSpanStatus(span, err) }() - span.AddAttributes( - trace.StringAttribute("path", path), - trace.StringAttribute("exportFolderPath", exportFolderPath), - trace.StringAttribute("parentLayerPaths", strings.Join(parentLayerPaths, ", "))) - - // Generate layer descriptors - layers, err := layerPathsToDescriptors(ctx, parentLayerPaths) - if err != nil { - return err - } - - err = exportLayer(&stdDriverInfo, path, exportFolderPath, layers) - if err != nil { - return hcserror.New(err, title, "") - } - return nil -} - -type LayerReader interface { - Next() (string, int64, *winio.FileBasicInfo, error) - Read(b []byte) (int, error) - Close() error -} - -// NewLayerReader returns a new layer reader for reading the contents of an on-disk layer. -// The caller must have taken the SeBackupPrivilege privilege -// to call this and any methods on the resulting LayerReader. -func NewLayerReader(ctx context.Context, path string, parentLayerPaths []string) (_ LayerReader, err error) { - ctx, span := trace.StartSpan(ctx, "hcsshim::NewLayerReader") - defer func() { - if err != nil { - oc.SetSpanStatus(span, err) - span.End() - } - }() - span.AddAttributes( - trace.StringAttribute("path", path), - trace.StringAttribute("parentLayerPaths", strings.Join(parentLayerPaths, ", "))) - - exportPath, err := ioutil.TempDir("", "hcs") - if err != nil { - return nil, err - } - err = ExportLayer(ctx, path, exportPath, parentLayerPaths) - if err != nil { - os.RemoveAll(exportPath) - return nil, err - } - return &legacyLayerReaderWrapper{ - ctx: ctx, - s: span, - legacyLayerReader: newLegacyLayerReader(exportPath), - }, nil -} - -type legacyLayerReaderWrapper struct { - ctx context.Context - s *trace.Span - - *legacyLayerReader -} - -func (r *legacyLayerReaderWrapper) Close() (err error) { - defer r.s.End() - defer func() { oc.SetSpanStatus(r.s, err) }() - - err = r.legacyLayerReader.Close() - os.RemoveAll(r.root) - return err -} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/getlayermountpath.go b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/getlayermountpath.go deleted file mode 100644 index 8d213f58..00000000 --- a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/getlayermountpath.go +++ /dev/null @@ -1,50 +0,0 @@ -package wclayer - -import ( - "context" - "syscall" - - "github.com/Microsoft/hcsshim/internal/hcserror" - "github.com/Microsoft/hcsshim/internal/log" - "github.com/Microsoft/hcsshim/internal/oc" - "go.opencensus.io/trace" -) - -// GetLayerMountPath will look for a mounted layer with the given path and return -// the path at which that layer can be accessed. This path may be a volume path -// if the layer is a mounted read-write layer, otherwise it is expected to be the -// folder path at which the layer is stored. -func GetLayerMountPath(ctx context.Context, path string) (_ string, err error) { - title := "hcsshim::GetLayerMountPath" - ctx, span := trace.StartSpan(ctx, title) - defer span.End() - defer func() { oc.SetSpanStatus(span, err) }() - span.AddAttributes(trace.StringAttribute("path", path)) - - var mountPathLength uintptr = 0 - - // Call the procedure itself. - log.G(ctx).Debug("Calling proc (1)") - err = getLayerMountPath(&stdDriverInfo, path, &mountPathLength, nil) - if err != nil { - return "", hcserror.New(err, title, "(first call)") - } - - // Allocate a mount path of the returned length. - if mountPathLength == 0 { - return "", nil - } - mountPathp := make([]uint16, mountPathLength) - mountPathp[0] = 0 - - // Call the procedure again - log.G(ctx).Debug("Calling proc (2)") - err = getLayerMountPath(&stdDriverInfo, path, &mountPathLength, &mountPathp[0]) - if err != nil { - return "", hcserror.New(err, title, "(second call)") - } - - mountPath := syscall.UTF16ToString(mountPathp[0:]) - span.AddAttributes(trace.StringAttribute("mountPath", mountPath)) - return mountPath, nil -} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/getsharedbaseimages.go b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/getsharedbaseimages.go deleted file mode 100644 index ae1fff84..00000000 --- a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/getsharedbaseimages.go +++ /dev/null @@ -1,29 +0,0 @@ -package wclayer - -import ( - "context" - - "github.com/Microsoft/hcsshim/internal/hcserror" - "github.com/Microsoft/hcsshim/internal/interop" - "github.com/Microsoft/hcsshim/internal/oc" - "go.opencensus.io/trace" -) - -// GetSharedBaseImages will enumerate the images stored in the common central -// image store and return descriptive info about those images for the purpose -// of registering them with the graphdriver, graph, and tagstore. -func GetSharedBaseImages(ctx context.Context) (_ string, err error) { - title := "hcsshim::GetSharedBaseImages" - ctx, span := trace.StartSpan(ctx, title) //nolint:ineffassign,staticcheck - defer span.End() - defer func() { oc.SetSpanStatus(span, err) }() - - var buffer *uint16 - err = getBaseImages(&buffer) - if err != nil { - return "", hcserror.New(err, title, "") - } - imageData := interop.ConvertAndFreeCoTaskMemString(buffer) - span.AddAttributes(trace.StringAttribute("imageData", imageData)) - return imageData, nil -} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/grantvmaccess.go b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/grantvmaccess.go deleted file mode 100644 index 4b282fef..00000000 --- a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/grantvmaccess.go +++ /dev/null @@ -1,26 +0,0 @@ -package wclayer - -import ( - "context" - - "github.com/Microsoft/hcsshim/internal/hcserror" - "github.com/Microsoft/hcsshim/internal/oc" - "go.opencensus.io/trace" -) - -// GrantVmAccess adds access to a file for a given VM -func GrantVmAccess(ctx context.Context, vmid string, filepath string) (err error) { - title := "hcsshim::GrantVmAccess" - ctx, span := trace.StartSpan(ctx, title) //nolint:ineffassign,staticcheck - defer span.End() - defer func() { oc.SetSpanStatus(span, err) }() - span.AddAttributes( - trace.StringAttribute("vm-id", vmid), - trace.StringAttribute("path", filepath)) - - err = grantVmAccess(vmid, filepath) - if err != nil { - return hcserror.New(err, title, "") - } - return nil -} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/importlayer.go b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/importlayer.go deleted file mode 100644 index 687550f0..00000000 --- a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/importlayer.go +++ /dev/null @@ -1,166 +0,0 @@ -package wclayer - -import ( - "context" - "io/ioutil" - "os" - "path/filepath" - "strings" - - "github.com/Microsoft/go-winio" - "github.com/Microsoft/hcsshim/internal/hcserror" - "github.com/Microsoft/hcsshim/internal/oc" - "github.com/Microsoft/hcsshim/internal/safefile" - "go.opencensus.io/trace" -) - -// ImportLayer will take the contents of the folder at importFolderPath and import -// that into a layer with the id layerId. Note that in order to correctly populate -// the layer and interperet the transport format, all parent layers must already -// be present on the system at the paths provided in parentLayerPaths. -func ImportLayer(ctx context.Context, path string, importFolderPath string, parentLayerPaths []string) (err error) { - title := "hcsshim::ImportLayer" - ctx, span := trace.StartSpan(ctx, title) - defer span.End() - defer func() { oc.SetSpanStatus(span, err) }() - span.AddAttributes( - trace.StringAttribute("path", path), - trace.StringAttribute("importFolderPath", importFolderPath), - trace.StringAttribute("parentLayerPaths", strings.Join(parentLayerPaths, ", "))) - - // Generate layer descriptors - layers, err := layerPathsToDescriptors(ctx, parentLayerPaths) - if err != nil { - return err - } - - err = importLayer(&stdDriverInfo, path, importFolderPath, layers) - if err != nil { - return hcserror.New(err, title, "") - } - return nil -} - -// LayerWriter is an interface that supports writing a new container image layer. -type LayerWriter interface { - // Add adds a file to the layer with given metadata. - Add(name string, fileInfo *winio.FileBasicInfo) error - // AddLink adds a hard link to the layer. The target must already have been added. - AddLink(name string, target string) error - // Remove removes a file that was present in a parent layer from the layer. - Remove(name string) error - // Write writes data to the current file. The data must be in the format of a Win32 - // backup stream. - Write(b []byte) (int, error) - // Close finishes the layer writing process and releases any resources. - Close() error -} - -type legacyLayerWriterWrapper struct { - ctx context.Context - s *trace.Span - - *legacyLayerWriter - path string - parentLayerPaths []string -} - -func (r *legacyLayerWriterWrapper) Close() (err error) { - defer r.s.End() - defer func() { oc.SetSpanStatus(r.s, err) }() - defer os.RemoveAll(r.root.Name()) - defer r.legacyLayerWriter.CloseRoots() - - err = r.legacyLayerWriter.Close() - if err != nil { - return err - } - - if err = ImportLayer(r.ctx, r.destRoot.Name(), r.path, r.parentLayerPaths); err != nil { - return err - } - for _, name := range r.Tombstones { - if err = safefile.RemoveRelative(name, r.destRoot); err != nil && !os.IsNotExist(err) { - return err - } - } - // Add any hard links that were collected. - for _, lnk := range r.PendingLinks { - if err = safefile.RemoveRelative(lnk.Path, r.destRoot); err != nil && !os.IsNotExist(err) { - return err - } - if err = safefile.LinkRelative(lnk.Target, lnk.TargetRoot, lnk.Path, r.destRoot); err != nil { - return err - } - } - - // The reapplyDirectoryTimes must be called AFTER we are done with Tombstone - // deletion and hard link creation. This is because Tombstone deletion and hard link - // creation updates the directory last write timestamps so that will change the - // timestamps added by the `Add` call. Some container applications depend on the - // correctness of these timestamps and so we should change the timestamps back to - // the original value (i.e the value provided in the Add call) after this - // processing is done. - err = reapplyDirectoryTimes(r.destRoot, r.changedDi) - if err != nil { - return err - } - - // Prepare the utility VM for use if one is present in the layer. - if r.HasUtilityVM { - err := safefile.EnsureNotReparsePointRelative("UtilityVM", r.destRoot) - if err != nil { - return err - } - err = ProcessUtilityVMImage(r.ctx, filepath.Join(r.destRoot.Name(), "UtilityVM")) - if err != nil { - return err - } - } - return nil -} - -// NewLayerWriter returns a new layer writer for creating a layer on disk. -// The caller must have taken the SeBackupPrivilege and SeRestorePrivilege privileges -// to call this and any methods on the resulting LayerWriter. -func NewLayerWriter(ctx context.Context, path string, parentLayerPaths []string) (_ LayerWriter, err error) { - ctx, span := trace.StartSpan(ctx, "hcsshim::NewLayerWriter") - defer func() { - if err != nil { - oc.SetSpanStatus(span, err) - span.End() - } - }() - span.AddAttributes( - trace.StringAttribute("path", path), - trace.StringAttribute("parentLayerPaths", strings.Join(parentLayerPaths, ", "))) - - if len(parentLayerPaths) == 0 { - // This is a base layer. It gets imported differently. - f, err := safefile.OpenRoot(path) - if err != nil { - return nil, err - } - return &baseLayerWriter{ - ctx: ctx, - s: span, - root: f, - }, nil - } - - importPath, err := ioutil.TempDir("", "hcs") - if err != nil { - return nil, err - } - w, err := newLegacyLayerWriter(importPath, parentLayerPaths, path) - if err != nil { - return nil, err - } - return &legacyLayerWriterWrapper{ - ctx: ctx, - s: span, - legacyLayerWriter: w, - path: importPath, - parentLayerPaths: parentLayerPaths, - }, nil -} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/layerexists.go b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/layerexists.go deleted file mode 100644 index 01e67233..00000000 --- a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/layerexists.go +++ /dev/null @@ -1,28 +0,0 @@ -package wclayer - -import ( - "context" - - "github.com/Microsoft/hcsshim/internal/hcserror" - "github.com/Microsoft/hcsshim/internal/oc" - "go.opencensus.io/trace" -) - -// LayerExists will return true if a layer with the given id exists and is known -// to the system. -func LayerExists(ctx context.Context, path string) (_ bool, err error) { - title := "hcsshim::LayerExists" - ctx, span := trace.StartSpan(ctx, title) //nolint:ineffassign,staticcheck - defer span.End() - defer func() { oc.SetSpanStatus(span, err) }() - span.AddAttributes(trace.StringAttribute("path", path)) - - // Call the procedure itself. - var exists uint32 - err = layerExists(&stdDriverInfo, path, &exists) - if err != nil { - return false, hcserror.New(err, title, "") - } - span.AddAttributes(trace.BoolAttribute("layer-exists", exists != 0)) - return exists != 0, nil -} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/layerid.go b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/layerid.go deleted file mode 100644 index 0ce34a30..00000000 --- a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/layerid.go +++ /dev/null @@ -1,22 +0,0 @@ -package wclayer - -import ( - "context" - "path/filepath" - - "github.com/Microsoft/go-winio/pkg/guid" - "github.com/Microsoft/hcsshim/internal/oc" - "go.opencensus.io/trace" -) - -// LayerID returns the layer ID of a layer on disk. -func LayerID(ctx context.Context, path string) (_ guid.GUID, err error) { - title := "hcsshim::LayerID" - ctx, span := trace.StartSpan(ctx, title) - defer span.End() - defer func() { oc.SetSpanStatus(span, err) }() - span.AddAttributes(trace.StringAttribute("path", path)) - - _, file := filepath.Split(path) - return NameToGuid(ctx, file) -} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/layerutils.go b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/layerutils.go deleted file mode 100644 index 1ec893c6..00000000 --- a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/layerutils.go +++ /dev/null @@ -1,97 +0,0 @@ -package wclayer - -// This file contains utility functions to support storage (graph) related -// functionality. - -import ( - "context" - "syscall" - - "github.com/Microsoft/go-winio/pkg/guid" - "github.com/sirupsen/logrus" -) - -/* To pass into syscall, we need a struct matching the following: -enum GraphDriverType -{ - DiffDriver, - FilterDriver -}; - -struct DriverInfo { - GraphDriverType Flavour; - LPCWSTR HomeDir; -}; -*/ - -type driverInfo struct { - Flavour int - HomeDirp *uint16 -} - -var ( - utf16EmptyString uint16 - stdDriverInfo = driverInfo{1, &utf16EmptyString} -) - -/* To pass into syscall, we need a struct matching the following: -typedef struct _WC_LAYER_DESCRIPTOR { - - // - // The ID of the layer - // - - GUID LayerId; - - // - // Additional flags - // - - union { - struct { - ULONG Reserved : 31; - ULONG Dirty : 1; // Created from sandbox as a result of snapshot - }; - ULONG Value; - } Flags; - - // - // Path to the layer root directory, null-terminated - // - - PCWSTR Path; - -} WC_LAYER_DESCRIPTOR, *PWC_LAYER_DESCRIPTOR; -*/ -type WC_LAYER_DESCRIPTOR struct { - LayerId guid.GUID - Flags uint32 - Pathp *uint16 -} - -func layerPathsToDescriptors(ctx context.Context, parentLayerPaths []string) ([]WC_LAYER_DESCRIPTOR, error) { - // Array of descriptors that gets constructed. - var layers []WC_LAYER_DESCRIPTOR - - for i := 0; i < len(parentLayerPaths); i++ { - g, err := LayerID(ctx, parentLayerPaths[i]) - if err != nil { - logrus.WithError(err).Debug("Failed to convert name to guid") - return nil, err - } - - p, err := syscall.UTF16PtrFromString(parentLayerPaths[i]) - if err != nil { - logrus.WithError(err).Debug("Failed conversion of parentLayerPath to pointer") - return nil, err - } - - layers = append(layers, WC_LAYER_DESCRIPTOR{ - LayerId: g, - Flags: 0, - Pathp: p, - }) - } - - return layers, nil -} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/legacy.go b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/legacy.go deleted file mode 100644 index b7f3064f..00000000 --- a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/legacy.go +++ /dev/null @@ -1,811 +0,0 @@ -package wclayer - -import ( - "bufio" - "encoding/binary" - "errors" - "fmt" - "io" - "io/ioutil" - "os" - "path/filepath" - "strings" - "syscall" - - "github.com/Microsoft/go-winio" - "github.com/Microsoft/hcsshim/internal/longpath" - "github.com/Microsoft/hcsshim/internal/safefile" - "github.com/Microsoft/hcsshim/internal/winapi" -) - -var errorIterationCanceled = errors.New("") - -var mutatedUtilityVMFiles = map[string]bool{ - `EFI\Microsoft\Boot\BCD`: true, - `EFI\Microsoft\Boot\BCD.LOG`: true, - `EFI\Microsoft\Boot\BCD.LOG1`: true, - `EFI\Microsoft\Boot\BCD.LOG2`: true, -} - -const ( - filesPath = `Files` - hivesPath = `Hives` - utilityVMPath = `UtilityVM` - utilityVMFilesPath = `UtilityVM\Files` -) - -func openFileOrDir(path string, mode uint32, createDisposition uint32) (file *os.File, err error) { - return winio.OpenForBackup(path, mode, syscall.FILE_SHARE_READ, createDisposition) -} - -func hasPathPrefix(p, prefix string) bool { - return strings.HasPrefix(p, prefix) && len(p) > len(prefix) && p[len(prefix)] == '\\' -} - -type fileEntry struct { - path string - fi os.FileInfo - err error -} - -type legacyLayerReader struct { - root string - result chan *fileEntry - proceed chan bool - currentFile *os.File - backupReader *winio.BackupFileReader -} - -// newLegacyLayerReader returns a new LayerReader that can read the Windows -// container layer transport format from disk. -func newLegacyLayerReader(root string) *legacyLayerReader { - r := &legacyLayerReader{ - root: root, - result: make(chan *fileEntry), - proceed: make(chan bool), - } - go r.walk() - return r -} - -func readTombstones(path string) (map[string]([]string), error) { - tf, err := os.Open(filepath.Join(path, "tombstones.txt")) - if err != nil { - return nil, err - } - defer tf.Close() - s := bufio.NewScanner(tf) - if !s.Scan() || s.Text() != "\xef\xbb\xbfVersion 1.0" { - return nil, errors.New("invalid tombstones file") - } - - ts := make(map[string]([]string)) - for s.Scan() { - t := filepath.Join(filesPath, s.Text()[1:]) // skip leading `\` - dir := filepath.Dir(t) - ts[dir] = append(ts[dir], t) - } - if err = s.Err(); err != nil { - return nil, err - } - - return ts, nil -} - -func (r *legacyLayerReader) walkUntilCancelled() error { - root, err := longpath.LongAbs(r.root) - if err != nil { - return err - } - - r.root = root - ts, err := readTombstones(r.root) - if err != nil { - return err - } - - err = filepath.Walk(r.root, func(path string, info os.FileInfo, err error) error { - if err != nil { - return err - } - - // Indirect fix for https://github.com/moby/moby/issues/32838#issuecomment-343610048. - // Handle failure from what may be a golang bug in the conversion of - // UTF16 to UTF8 in files which are left in the recycle bin. Os.Lstat - // which is called by filepath.Walk will fail when a filename contains - // unicode characters. Skip the recycle bin regardless which is goodness. - if strings.EqualFold(path, filepath.Join(r.root, `Files\$Recycle.Bin`)) && info.IsDir() { - return filepath.SkipDir - } - - if path == r.root || path == filepath.Join(r.root, "tombstones.txt") || strings.HasSuffix(path, ".$wcidirs$") { - return nil - } - - r.result <- &fileEntry{path, info, nil} - if !<-r.proceed { - return errorIterationCanceled - } - - // List all the tombstones. - if info.IsDir() { - relPath, err := filepath.Rel(r.root, path) - if err != nil { - return err - } - if dts, ok := ts[relPath]; ok { - for _, t := range dts { - r.result <- &fileEntry{filepath.Join(r.root, t), nil, nil} - if !<-r.proceed { - return errorIterationCanceled - } - } - } - } - return nil - }) - if err == errorIterationCanceled { - return nil - } - if err == nil { - return io.EOF - } - return err -} - -func (r *legacyLayerReader) walk() { - defer close(r.result) - if !<-r.proceed { - return - } - - err := r.walkUntilCancelled() - if err != nil { - for { - r.result <- &fileEntry{err: err} - if !<-r.proceed { - return - } - } - } -} - -func (r *legacyLayerReader) reset() { - if r.backupReader != nil { - r.backupReader.Close() - r.backupReader = nil - } - if r.currentFile != nil { - r.currentFile.Close() - r.currentFile = nil - } -} - -func findBackupStreamSize(r io.Reader) (int64, error) { - br := winio.NewBackupStreamReader(r) - for { - hdr, err := br.Next() - if err != nil { - if err == io.EOF { - err = nil - } - return 0, err - } - if hdr.Id == winio.BackupData { - return hdr.Size, nil - } - } -} - -func (r *legacyLayerReader) Next() (path string, size int64, fileInfo *winio.FileBasicInfo, err error) { - r.reset() - r.proceed <- true - fe := <-r.result - if fe == nil { - err = errors.New("LegacyLayerReader closed") - return - } - if fe.err != nil { - err = fe.err - return - } - - path, err = filepath.Rel(r.root, fe.path) - if err != nil { - return - } - - if fe.fi == nil { - // This is a tombstone. Return a nil fileInfo. - return - } - - if fe.fi.IsDir() && hasPathPrefix(path, filesPath) { - fe.path += ".$wcidirs$" - } - - f, err := openFileOrDir(fe.path, syscall.GENERIC_READ, syscall.OPEN_EXISTING) - if err != nil { - return - } - defer func() { - if f != nil { - f.Close() - } - }() - - fileInfo, err = winio.GetFileBasicInfo(f) - if err != nil { - return - } - - if !hasPathPrefix(path, filesPath) { - size = fe.fi.Size() - r.backupReader = winio.NewBackupFileReader(f, false) - if path == hivesPath || path == filesPath { - // The Hives directory has a non-deterministic file time because of the - // nature of the import process. Use the times from System_Delta. - var g *os.File - g, err = os.Open(filepath.Join(r.root, hivesPath, `System_Delta`)) - if err != nil { - return - } - attr := fileInfo.FileAttributes - fileInfo, err = winio.GetFileBasicInfo(g) - g.Close() - if err != nil { - return - } - fileInfo.FileAttributes = attr - } - - // The creation time and access time get reset for files outside of the Files path. - fileInfo.CreationTime = fileInfo.LastWriteTime - fileInfo.LastAccessTime = fileInfo.LastWriteTime - - } else { - // The file attributes are written before the backup stream. - var attr uint32 - err = binary.Read(f, binary.LittleEndian, &attr) - if err != nil { - return - } - fileInfo.FileAttributes = attr - beginning := int64(4) - - // Find the accurate file size. - if !fe.fi.IsDir() { - size, err = findBackupStreamSize(f) - if err != nil { - err = &os.PathError{Op: "findBackupStreamSize", Path: fe.path, Err: err} - return - } - } - - // Return back to the beginning of the backup stream. - _, err = f.Seek(beginning, 0) - if err != nil { - return - } - } - - r.currentFile = f - f = nil - return -} - -func (r *legacyLayerReader) Read(b []byte) (int, error) { - if r.backupReader == nil { - if r.currentFile == nil { - return 0, io.EOF - } - return r.currentFile.Read(b) - } - return r.backupReader.Read(b) -} - -func (r *legacyLayerReader) Seek(offset int64, whence int) (int64, error) { - if r.backupReader == nil { - if r.currentFile == nil { - return 0, errors.New("no current file") - } - return r.currentFile.Seek(offset, whence) - } - return 0, errors.New("seek not supported on this stream") -} - -func (r *legacyLayerReader) Close() error { - r.proceed <- false - <-r.result - r.reset() - return nil -} - -type pendingLink struct { - Path, Target string - TargetRoot *os.File -} - -type pendingDir struct { - Path string - Root *os.File -} - -type legacyLayerWriter struct { - root *os.File - destRoot *os.File - parentRoots []*os.File - currentFile *os.File - bufWriter *bufio.Writer - currentFileName string - currentFileRoot *os.File - backupWriter *winio.BackupFileWriter - Tombstones []string - HasUtilityVM bool - changedDi []dirInfo - addedFiles map[string]bool - PendingLinks []pendingLink - pendingDirs []pendingDir - currentIsDir bool -} - -// newLegacyLayerWriter returns a LayerWriter that can write the contaler layer -// transport format to disk. -func newLegacyLayerWriter(root string, parentRoots []string, destRoot string) (w *legacyLayerWriter, err error) { - w = &legacyLayerWriter{ - addedFiles: make(map[string]bool), - } - defer func() { - if err != nil { - w.CloseRoots() - w = nil - } - }() - w.root, err = safefile.OpenRoot(root) - if err != nil { - return - } - w.destRoot, err = safefile.OpenRoot(destRoot) - if err != nil { - return - } - for _, r := range parentRoots { - f, err := safefile.OpenRoot(r) - if err != nil { - return w, err - } - w.parentRoots = append(w.parentRoots, f) - } - w.bufWriter = bufio.NewWriterSize(ioutil.Discard, 65536) - return -} - -func (w *legacyLayerWriter) CloseRoots() { - if w.root != nil { - w.root.Close() - w.root = nil - } - if w.destRoot != nil { - w.destRoot.Close() - w.destRoot = nil - } - for i := range w.parentRoots { - _ = w.parentRoots[i].Close() - } - w.parentRoots = nil -} - -func (w *legacyLayerWriter) initUtilityVM() error { - if !w.HasUtilityVM { - err := safefile.MkdirRelative(utilityVMPath, w.destRoot) - if err != nil { - return err - } - // Server 2016 does not support multiple layers for the utility VM, so - // clone the utility VM from the parent layer into this layer. Use hard - // links to avoid unnecessary copying, since most of the files are - // immutable. - err = cloneTree(w.parentRoots[0], w.destRoot, utilityVMFilesPath, mutatedUtilityVMFiles) - if err != nil { - return fmt.Errorf("cloning the parent utility VM image failed: %s", err) - } - w.HasUtilityVM = true - } - return nil -} - -func (w *legacyLayerWriter) reset() error { - err := w.bufWriter.Flush() - if err != nil { - return err - } - w.bufWriter.Reset(ioutil.Discard) - if w.currentIsDir { - r := w.currentFile - br := winio.NewBackupStreamReader(r) - // Seek to the beginning of the backup stream, skipping the fileattrs - if _, err := r.Seek(4, io.SeekStart); err != nil { - return err - } - - for { - bhdr, err := br.Next() - if err == io.EOF { - // end of backupstream data - break - } - if err != nil { - return err - } - switch bhdr.Id { - case winio.BackupReparseData: - // The current file is a `.$wcidirs$` metadata file that - // describes a directory reparse point. Delete the placeholder - // directory to prevent future files being added into the - // destination of the reparse point during the ImportLayer call - if err := safefile.RemoveRelative(w.currentFileName, w.currentFileRoot); err != nil { - return err - } - w.pendingDirs = append(w.pendingDirs, pendingDir{Path: w.currentFileName, Root: w.currentFileRoot}) - default: - // ignore all other stream types, as we only care about directory reparse points - } - } - w.currentIsDir = false - } - if w.backupWriter != nil { - w.backupWriter.Close() - w.backupWriter = nil - } - if w.currentFile != nil { - w.currentFile.Close() - w.currentFile = nil - w.currentFileName = "" - w.currentFileRoot = nil - } - return nil -} - -// copyFileWithMetadata copies a file using the backup/restore APIs in order to preserve metadata -func copyFileWithMetadata(srcRoot, destRoot *os.File, subPath string, isDir bool) (fileInfo *winio.FileBasicInfo, err error) { - src, err := safefile.OpenRelative( - subPath, - srcRoot, - syscall.GENERIC_READ|winio.ACCESS_SYSTEM_SECURITY, - syscall.FILE_SHARE_READ, - winapi.FILE_OPEN, - winapi.FILE_OPEN_REPARSE_POINT) - if err != nil { - return nil, err - } - defer src.Close() - srcr := winio.NewBackupFileReader(src, true) - defer srcr.Close() - - fileInfo, err = winio.GetFileBasicInfo(src) - if err != nil { - return nil, err - } - - extraFlags := uint32(0) - if isDir { - extraFlags |= winapi.FILE_DIRECTORY_FILE - } - dest, err := safefile.OpenRelative( - subPath, - destRoot, - syscall.GENERIC_READ|syscall.GENERIC_WRITE|winio.WRITE_DAC|winio.WRITE_OWNER|winio.ACCESS_SYSTEM_SECURITY, - syscall.FILE_SHARE_READ, - winapi.FILE_CREATE, - extraFlags) - if err != nil { - return nil, err - } - defer dest.Close() - - err = winio.SetFileBasicInfo(dest, fileInfo) - if err != nil { - return nil, err - } - - destw := winio.NewBackupFileWriter(dest, true) - defer func() { - cerr := destw.Close() - if err == nil { - err = cerr - } - }() - - _, err = io.Copy(destw, srcr) - if err != nil { - return nil, err - } - - return fileInfo, nil -} - -// cloneTree clones a directory tree using hard links. It skips hard links for -// the file names in the provided map and just copies those files. -func cloneTree(srcRoot *os.File, destRoot *os.File, subPath string, mutatedFiles map[string]bool) error { - var di []dirInfo - err := safefile.EnsureNotReparsePointRelative(subPath, srcRoot) - if err != nil { - return err - } - err = filepath.Walk(filepath.Join(srcRoot.Name(), subPath), func(srcFilePath string, info os.FileInfo, err error) error { - if err != nil { - return err - } - - relPath, err := filepath.Rel(srcRoot.Name(), srcFilePath) - if err != nil { - return err - } - - fileAttributes := info.Sys().(*syscall.Win32FileAttributeData).FileAttributes - // Directories, reparse points, and files that will be mutated during - // utility VM import must be copied. All other files can be hard linked. - isReparsePoint := fileAttributes&syscall.FILE_ATTRIBUTE_REPARSE_POINT != 0 - // In go1.9, FileInfo.IsDir() returns false if the directory is also a symlink. - // See: https://github.com/golang/go/commit/1989921aef60c83e6f9127a8448fb5ede10e9acc - // Fixes the problem by checking syscall.FILE_ATTRIBUTE_DIRECTORY directly - isDir := fileAttributes&syscall.FILE_ATTRIBUTE_DIRECTORY != 0 - - if isDir || isReparsePoint || mutatedFiles[relPath] { - fi, err := copyFileWithMetadata(srcRoot, destRoot, relPath, isDir) - if err != nil { - return err - } - if isDir { - di = append(di, dirInfo{path: relPath, fileInfo: *fi}) - } - } else { - err = safefile.LinkRelative(relPath, srcRoot, relPath, destRoot) - if err != nil { - return err - } - } - - return nil - }) - if err != nil { - return err - } - - return reapplyDirectoryTimes(destRoot, di) -} - -func (w *legacyLayerWriter) Add(name string, fileInfo *winio.FileBasicInfo) error { - if err := w.reset(); err != nil { - return err - } - - if name == utilityVMPath { - return w.initUtilityVM() - } - - if (fileInfo.FileAttributes & syscall.FILE_ATTRIBUTE_DIRECTORY) != 0 { - w.changedDi = append(w.changedDi, dirInfo{path: name, fileInfo: *fileInfo}) - } - - name = filepath.Clean(name) - if hasPathPrefix(name, utilityVMPath) { - if !w.HasUtilityVM { - return errors.New("missing UtilityVM directory") - } - if !hasPathPrefix(name, utilityVMFilesPath) && name != utilityVMFilesPath { - return errors.New("invalid UtilityVM layer") - } - createDisposition := uint32(winapi.FILE_OPEN) - if (fileInfo.FileAttributes & syscall.FILE_ATTRIBUTE_DIRECTORY) != 0 { - st, err := safefile.LstatRelative(name, w.destRoot) - if err != nil && !os.IsNotExist(err) { - return err - } - if st != nil { - // Delete the existing file/directory if it is not the same type as this directory. - existingAttr := st.Sys().(*syscall.Win32FileAttributeData).FileAttributes - if (uint32(fileInfo.FileAttributes)^existingAttr)&(syscall.FILE_ATTRIBUTE_DIRECTORY|syscall.FILE_ATTRIBUTE_REPARSE_POINT) != 0 { - if err = safefile.RemoveAllRelative(name, w.destRoot); err != nil { - return err - } - st = nil - } - } - if st == nil { - if err = safefile.MkdirRelative(name, w.destRoot); err != nil { - return err - } - } - } else { - // Overwrite any existing hard link. - err := safefile.RemoveRelative(name, w.destRoot) - if err != nil && !os.IsNotExist(err) { - return err - } - createDisposition = winapi.FILE_CREATE - } - - f, err := safefile.OpenRelative( - name, - w.destRoot, - syscall.GENERIC_READ|syscall.GENERIC_WRITE|winio.WRITE_DAC|winio.WRITE_OWNER|winio.ACCESS_SYSTEM_SECURITY, - syscall.FILE_SHARE_READ, - createDisposition, - winapi.FILE_OPEN_REPARSE_POINT, - ) - if err != nil { - return err - } - defer func() { - if f != nil { - f.Close() - _ = safefile.RemoveRelative(name, w.destRoot) - } - }() - - err = winio.SetFileBasicInfo(f, fileInfo) - if err != nil { - return err - } - - w.backupWriter = winio.NewBackupFileWriter(f, true) - w.bufWriter.Reset(w.backupWriter) - w.currentFile = f - w.currentFileName = name - w.currentFileRoot = w.destRoot - w.addedFiles[name] = true - f = nil - return nil - } - - fname := name - if (fileInfo.FileAttributes & syscall.FILE_ATTRIBUTE_DIRECTORY) != 0 { - err := safefile.MkdirRelative(name, w.root) - if err != nil { - return err - } - fname += ".$wcidirs$" - w.currentIsDir = true - } - - f, err := safefile.OpenRelative(fname, w.root, syscall.GENERIC_READ|syscall.GENERIC_WRITE, syscall.FILE_SHARE_READ, winapi.FILE_CREATE, 0) - if err != nil { - return err - } - defer func() { - if f != nil { - f.Close() - _ = safefile.RemoveRelative(fname, w.root) - } - }() - - strippedFi := *fileInfo - strippedFi.FileAttributes = 0 - err = winio.SetFileBasicInfo(f, &strippedFi) - if err != nil { - return err - } - - if hasPathPrefix(name, hivesPath) { - w.backupWriter = winio.NewBackupFileWriter(f, false) - w.bufWriter.Reset(w.backupWriter) - } else { - w.bufWriter.Reset(f) - // The file attributes are written before the stream. - err = binary.Write(w.bufWriter, binary.LittleEndian, uint32(fileInfo.FileAttributes)) - if err != nil { - w.bufWriter.Reset(ioutil.Discard) - return err - } - } - - w.currentFile = f - w.currentFileName = name - w.currentFileRoot = w.root - w.addedFiles[name] = true - f = nil - return nil -} - -func (w *legacyLayerWriter) AddLink(name string, target string) error { - if err := w.reset(); err != nil { - return err - } - - target = filepath.Clean(target) - var roots []*os.File - if hasPathPrefix(target, filesPath) { - // Look for cross-layer hard link targets in the parent layers, since - // nothing is in the destination path yet. - roots = w.parentRoots - } else if hasPathPrefix(target, utilityVMFilesPath) { - // Since the utility VM is fully cloned into the destination path - // already, look for cross-layer hard link targets directly in the - // destination path. - roots = []*os.File{w.destRoot} - } - - if roots == nil || (!hasPathPrefix(name, filesPath) && !hasPathPrefix(name, utilityVMFilesPath)) { - return errors.New("invalid hard link in layer") - } - - // Find to try the target of the link in a previously added file. If that - // fails, search in parent layers. - var selectedRoot *os.File - if _, ok := w.addedFiles[target]; ok { - selectedRoot = w.destRoot - } else { - for _, r := range roots { - if _, err := safefile.LstatRelative(target, r); err != nil { - if !os.IsNotExist(err) { - return err - } - } else { - selectedRoot = r - break - } - } - if selectedRoot == nil { - return fmt.Errorf("failed to find link target for '%s' -> '%s'", name, target) - } - } - - // The link can't be written until after the ImportLayer call. - w.PendingLinks = append(w.PendingLinks, pendingLink{ - Path: name, - Target: target, - TargetRoot: selectedRoot, - }) - w.addedFiles[name] = true - return nil -} - -func (w *legacyLayerWriter) Remove(name string) error { - name = filepath.Clean(name) - if hasPathPrefix(name, filesPath) { - w.Tombstones = append(w.Tombstones, name) - } else if hasPathPrefix(name, utilityVMFilesPath) { - err := w.initUtilityVM() - if err != nil { - return err - } - // Make sure the path exists; os.RemoveAll will not fail if the file is - // already gone, and this needs to be a fatal error for diagnostics - // purposes. - if _, err := safefile.LstatRelative(name, w.destRoot); err != nil { - return err - } - err = safefile.RemoveAllRelative(name, w.destRoot) - if err != nil { - return err - } - } else { - return fmt.Errorf("invalid tombstone %s", name) - } - - return nil -} - -func (w *legacyLayerWriter) Write(b []byte) (int, error) { - if w.backupWriter == nil && w.currentFile == nil { - return 0, errors.New("closed") - } - return w.bufWriter.Write(b) -} - -func (w *legacyLayerWriter) Close() error { - if err := w.reset(); err != nil { - return err - } - if err := safefile.RemoveRelative("tombstones.txt", w.root); err != nil && !os.IsNotExist(err) { - return err - } - for _, pd := range w.pendingDirs { - err := safefile.MkdirRelative(pd.Path, pd.Root) - if err != nil { - return err - } - } - return nil -} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/nametoguid.go b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/nametoguid.go deleted file mode 100644 index 09950297..00000000 --- a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/nametoguid.go +++ /dev/null @@ -1,29 +0,0 @@ -package wclayer - -import ( - "context" - - "github.com/Microsoft/go-winio/pkg/guid" - "github.com/Microsoft/hcsshim/internal/hcserror" - "github.com/Microsoft/hcsshim/internal/oc" - "go.opencensus.io/trace" -) - -// NameToGuid converts the given string into a GUID using the algorithm in the -// Host Compute Service, ensuring GUIDs generated with the same string are common -// across all clients. -func NameToGuid(ctx context.Context, name string) (_ guid.GUID, err error) { - title := "hcsshim::NameToGuid" - ctx, span := trace.StartSpan(ctx, title) //nolint:ineffassign,staticcheck - defer span.End() - defer func() { oc.SetSpanStatus(span, err) }() - span.AddAttributes(trace.StringAttribute("objectName", name)) - - var id guid.GUID - err = nameToGuid(name, &id) - if err != nil { - return guid.GUID{}, hcserror.New(err, title, "") - } - span.AddAttributes(trace.StringAttribute("guid", id.String())) - return id, nil -} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/preparelayer.go b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/preparelayer.go deleted file mode 100644 index 90129fae..00000000 --- a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/preparelayer.go +++ /dev/null @@ -1,44 +0,0 @@ -package wclayer - -import ( - "context" - "strings" - "sync" - - "github.com/Microsoft/hcsshim/internal/hcserror" - "github.com/Microsoft/hcsshim/internal/oc" - "go.opencensus.io/trace" -) - -var prepareLayerLock sync.Mutex - -// PrepareLayer finds a mounted read-write layer matching path and enables the -// the filesystem filter for use on that layer. This requires the paths to all -// parent layers, and is necessary in order to view or interact with the layer -// as an actual filesystem (reading and writing files, creating directories, etc). -// Disabling the filter must be done via UnprepareLayer. -func PrepareLayer(ctx context.Context, path string, parentLayerPaths []string) (err error) { - title := "hcsshim::PrepareLayer" - ctx, span := trace.StartSpan(ctx, title) - defer span.End() - defer func() { oc.SetSpanStatus(span, err) }() - span.AddAttributes( - trace.StringAttribute("path", path), - trace.StringAttribute("parentLayerPaths", strings.Join(parentLayerPaths, ", "))) - - // Generate layer descriptors - layers, err := layerPathsToDescriptors(ctx, parentLayerPaths) - if err != nil { - return err - } - - // This lock is a temporary workaround for a Windows bug. Only allowing one - // call to prepareLayer at a time vastly reduces the chance of a timeout. - prepareLayerLock.Lock() - defer prepareLayerLock.Unlock() - err = prepareLayer(&stdDriverInfo, path, layers) - if err != nil { - return hcserror.New(err, title, "") - } - return nil -} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/processimage.go b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/processimage.go deleted file mode 100644 index 30bcdff5..00000000 --- a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/processimage.go +++ /dev/null @@ -1,41 +0,0 @@ -package wclayer - -import ( - "context" - "os" - - "github.com/Microsoft/hcsshim/internal/oc" - "go.opencensus.io/trace" -) - -// ProcessBaseLayer post-processes a base layer that has had its files extracted. -// The files should have been extracted to \Files. -func ProcessBaseLayer(ctx context.Context, path string) (err error) { - title := "hcsshim::ProcessBaseLayer" - ctx, span := trace.StartSpan(ctx, title) //nolint:ineffassign,staticcheck - defer span.End() - defer func() { oc.SetSpanStatus(span, err) }() - span.AddAttributes(trace.StringAttribute("path", path)) - - err = processBaseImage(path) - if err != nil { - return &os.PathError{Op: title, Path: path, Err: err} - } - return nil -} - -// ProcessUtilityVMImage post-processes a utility VM image that has had its files extracted. -// The files should have been extracted to \Files. -func ProcessUtilityVMImage(ctx context.Context, path string) (err error) { - title := "hcsshim::ProcessUtilityVMImage" - ctx, span := trace.StartSpan(ctx, title) //nolint:ineffassign,staticcheck - defer span.End() - defer func() { oc.SetSpanStatus(span, err) }() - span.AddAttributes(trace.StringAttribute("path", path)) - - err = processUtilityImage(path) - if err != nil { - return &os.PathError{Op: title, Path: path, Err: err} - } - return nil -} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/unpreparelayer.go b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/unpreparelayer.go deleted file mode 100644 index 71b130c5..00000000 --- a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/unpreparelayer.go +++ /dev/null @@ -1,25 +0,0 @@ -package wclayer - -import ( - "context" - - "github.com/Microsoft/hcsshim/internal/hcserror" - "github.com/Microsoft/hcsshim/internal/oc" - "go.opencensus.io/trace" -) - -// UnprepareLayer disables the filesystem filter for the read-write layer with -// the given id. -func UnprepareLayer(ctx context.Context, path string) (err error) { - title := "hcsshim::UnprepareLayer" - ctx, span := trace.StartSpan(ctx, title) //nolint:ineffassign,staticcheck - defer span.End() - defer func() { oc.SetSpanStatus(span, err) }() - span.AddAttributes(trace.StringAttribute("path", path)) - - err = unprepareLayer(&stdDriverInfo, path) - if err != nil { - return hcserror.New(err, title, "") - } - return nil -} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/wclayer.go b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/wclayer.go deleted file mode 100644 index 9b1e06d5..00000000 --- a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/wclayer.go +++ /dev/null @@ -1,35 +0,0 @@ -// Package wclayer provides bindings to HCS's legacy layer management API and -// provides a higher level interface around these calls for container layer -// management. -package wclayer - -import "github.com/Microsoft/go-winio/pkg/guid" - -//go:generate go run ../../mksyscall_windows.go -output zsyscall_windows.go wclayer.go - -//sys activateLayer(info *driverInfo, id string) (hr error) = vmcompute.ActivateLayer? -//sys copyLayer(info *driverInfo, srcId string, dstId string, descriptors []WC_LAYER_DESCRIPTOR) (hr error) = vmcompute.CopyLayer? -//sys createLayer(info *driverInfo, id string, parent string) (hr error) = vmcompute.CreateLayer? -//sys createSandboxLayer(info *driverInfo, id string, parent uintptr, descriptors []WC_LAYER_DESCRIPTOR) (hr error) = vmcompute.CreateSandboxLayer? -//sys expandSandboxSize(info *driverInfo, id string, size uint64) (hr error) = vmcompute.ExpandSandboxSize? -//sys deactivateLayer(info *driverInfo, id string) (hr error) = vmcompute.DeactivateLayer? -//sys destroyLayer(info *driverInfo, id string) (hr error) = vmcompute.DestroyLayer? -//sys exportLayer(info *driverInfo, id string, path string, descriptors []WC_LAYER_DESCRIPTOR) (hr error) = vmcompute.ExportLayer? -//sys getLayerMountPath(info *driverInfo, id string, length *uintptr, buffer *uint16) (hr error) = vmcompute.GetLayerMountPath? -//sys getBaseImages(buffer **uint16) (hr error) = vmcompute.GetBaseImages? -//sys importLayer(info *driverInfo, id string, path string, descriptors []WC_LAYER_DESCRIPTOR) (hr error) = vmcompute.ImportLayer? -//sys layerExists(info *driverInfo, id string, exists *uint32) (hr error) = vmcompute.LayerExists? -//sys nameToGuid(name string, guid *_guid) (hr error) = vmcompute.NameToGuid? -//sys prepareLayer(info *driverInfo, id string, descriptors []WC_LAYER_DESCRIPTOR) (hr error) = vmcompute.PrepareLayer? -//sys unprepareLayer(info *driverInfo, id string) (hr error) = vmcompute.UnprepareLayer? -//sys processBaseImage(path string) (hr error) = vmcompute.ProcessBaseImage? -//sys processUtilityImage(path string) (hr error) = vmcompute.ProcessUtilityImage? - -//sys grantVmAccess(vmid string, filepath string) (hr error) = vmcompute.GrantVmAccess? - -//sys openVirtualDisk(virtualStorageType *virtualStorageType, path string, virtualDiskAccessMask uint32, flags uint32, parameters *openVirtualDiskParameters, handle *syscall.Handle) (err error) [failretval != 0] = virtdisk.OpenVirtualDisk -//sys attachVirtualDisk(handle syscall.Handle, sd uintptr, flags uint32, providerFlags uint32, params uintptr, overlapped uintptr) (err error) [failretval != 0] = virtdisk.AttachVirtualDisk - -//sys getDiskFreeSpaceEx(directoryName string, freeBytesAvailableToCaller *int64, totalNumberOfBytes *int64, totalNumberOfFreeBytes *int64) (err error) = GetDiskFreeSpaceExW - -type _guid = guid.GUID diff --git a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/zsyscall_windows.go b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/zsyscall_windows.go deleted file mode 100644 index 67f917f0..00000000 --- a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/zsyscall_windows.go +++ /dev/null @@ -1,569 +0,0 @@ -// Code generated mksyscall_windows.exe DO NOT EDIT - -package wclayer - -import ( - "syscall" - "unsafe" - - "golang.org/x/sys/windows" -) - -var _ unsafe.Pointer - -// Do the interface allocations only once for common -// Errno values. -const ( - errnoERROR_IO_PENDING = 997 -) - -var ( - errERROR_IO_PENDING error = syscall.Errno(errnoERROR_IO_PENDING) -) - -// errnoErr returns common boxed Errno values, to prevent -// allocations at runtime. -func errnoErr(e syscall.Errno) error { - switch e { - case 0: - return nil - case errnoERROR_IO_PENDING: - return errERROR_IO_PENDING - } - // TODO: add more here, after collecting data on the common - // error values see on Windows. (perhaps when running - // all.bat?) - return e -} - -var ( - modvmcompute = windows.NewLazySystemDLL("vmcompute.dll") - modvirtdisk = windows.NewLazySystemDLL("virtdisk.dll") - modkernel32 = windows.NewLazySystemDLL("kernel32.dll") - - procActivateLayer = modvmcompute.NewProc("ActivateLayer") - procCopyLayer = modvmcompute.NewProc("CopyLayer") - procCreateLayer = modvmcompute.NewProc("CreateLayer") - procCreateSandboxLayer = modvmcompute.NewProc("CreateSandboxLayer") - procExpandSandboxSize = modvmcompute.NewProc("ExpandSandboxSize") - procDeactivateLayer = modvmcompute.NewProc("DeactivateLayer") - procDestroyLayer = modvmcompute.NewProc("DestroyLayer") - procExportLayer = modvmcompute.NewProc("ExportLayer") - procGetLayerMountPath = modvmcompute.NewProc("GetLayerMountPath") - procGetBaseImages = modvmcompute.NewProc("GetBaseImages") - procImportLayer = modvmcompute.NewProc("ImportLayer") - procLayerExists = modvmcompute.NewProc("LayerExists") - procNameToGuid = modvmcompute.NewProc("NameToGuid") - procPrepareLayer = modvmcompute.NewProc("PrepareLayer") - procUnprepareLayer = modvmcompute.NewProc("UnprepareLayer") - procProcessBaseImage = modvmcompute.NewProc("ProcessBaseImage") - procProcessUtilityImage = modvmcompute.NewProc("ProcessUtilityImage") - procGrantVmAccess = modvmcompute.NewProc("GrantVmAccess") - procOpenVirtualDisk = modvirtdisk.NewProc("OpenVirtualDisk") - procAttachVirtualDisk = modvirtdisk.NewProc("AttachVirtualDisk") - procGetDiskFreeSpaceExW = modkernel32.NewProc("GetDiskFreeSpaceExW") -) - -func activateLayer(info *driverInfo, id string) (hr error) { - var _p0 *uint16 - _p0, hr = syscall.UTF16PtrFromString(id) - if hr != nil { - return - } - return _activateLayer(info, _p0) -} - -func _activateLayer(info *driverInfo, id *uint16) (hr error) { - if hr = procActivateLayer.Find(); hr != nil { - return - } - r0, _, _ := syscall.Syscall(procActivateLayer.Addr(), 2, uintptr(unsafe.Pointer(info)), uintptr(unsafe.Pointer(id)), 0) - if int32(r0) < 0 { - if r0&0x1fff0000 == 0x00070000 { - r0 &= 0xffff - } - hr = syscall.Errno(r0) - } - return -} - -func copyLayer(info *driverInfo, srcId string, dstId string, descriptors []WC_LAYER_DESCRIPTOR) (hr error) { - var _p0 *uint16 - _p0, hr = syscall.UTF16PtrFromString(srcId) - if hr != nil { - return - } - var _p1 *uint16 - _p1, hr = syscall.UTF16PtrFromString(dstId) - if hr != nil { - return - } - return _copyLayer(info, _p0, _p1, descriptors) -} - -func _copyLayer(info *driverInfo, srcId *uint16, dstId *uint16, descriptors []WC_LAYER_DESCRIPTOR) (hr error) { - var _p2 *WC_LAYER_DESCRIPTOR - if len(descriptors) > 0 { - _p2 = &descriptors[0] - } - if hr = procCopyLayer.Find(); hr != nil { - return - } - r0, _, _ := syscall.Syscall6(procCopyLayer.Addr(), 5, uintptr(unsafe.Pointer(info)), uintptr(unsafe.Pointer(srcId)), uintptr(unsafe.Pointer(dstId)), uintptr(unsafe.Pointer(_p2)), uintptr(len(descriptors)), 0) - if int32(r0) < 0 { - if r0&0x1fff0000 == 0x00070000 { - r0 &= 0xffff - } - hr = syscall.Errno(r0) - } - return -} - -func createLayer(info *driverInfo, id string, parent string) (hr error) { - var _p0 *uint16 - _p0, hr = syscall.UTF16PtrFromString(id) - if hr != nil { - return - } - var _p1 *uint16 - _p1, hr = syscall.UTF16PtrFromString(parent) - if hr != nil { - return - } - return _createLayer(info, _p0, _p1) -} - -func _createLayer(info *driverInfo, id *uint16, parent *uint16) (hr error) { - if hr = procCreateLayer.Find(); hr != nil { - return - } - r0, _, _ := syscall.Syscall(procCreateLayer.Addr(), 3, uintptr(unsafe.Pointer(info)), uintptr(unsafe.Pointer(id)), uintptr(unsafe.Pointer(parent))) - if int32(r0) < 0 { - if r0&0x1fff0000 == 0x00070000 { - r0 &= 0xffff - } - hr = syscall.Errno(r0) - } - return -} - -func createSandboxLayer(info *driverInfo, id string, parent uintptr, descriptors []WC_LAYER_DESCRIPTOR) (hr error) { - var _p0 *uint16 - _p0, hr = syscall.UTF16PtrFromString(id) - if hr != nil { - return - } - return _createSandboxLayer(info, _p0, parent, descriptors) -} - -func _createSandboxLayer(info *driverInfo, id *uint16, parent uintptr, descriptors []WC_LAYER_DESCRIPTOR) (hr error) { - var _p1 *WC_LAYER_DESCRIPTOR - if len(descriptors) > 0 { - _p1 = &descriptors[0] - } - if hr = procCreateSandboxLayer.Find(); hr != nil { - return - } - r0, _, _ := syscall.Syscall6(procCreateSandboxLayer.Addr(), 5, uintptr(unsafe.Pointer(info)), uintptr(unsafe.Pointer(id)), uintptr(parent), uintptr(unsafe.Pointer(_p1)), uintptr(len(descriptors)), 0) - if int32(r0) < 0 { - if r0&0x1fff0000 == 0x00070000 { - r0 &= 0xffff - } - hr = syscall.Errno(r0) - } - return -} - -func expandSandboxSize(info *driverInfo, id string, size uint64) (hr error) { - var _p0 *uint16 - _p0, hr = syscall.UTF16PtrFromString(id) - if hr != nil { - return - } - return _expandSandboxSize(info, _p0, size) -} - -func _expandSandboxSize(info *driverInfo, id *uint16, size uint64) (hr error) { - if hr = procExpandSandboxSize.Find(); hr != nil { - return - } - r0, _, _ := syscall.Syscall(procExpandSandboxSize.Addr(), 3, uintptr(unsafe.Pointer(info)), uintptr(unsafe.Pointer(id)), uintptr(size)) - if int32(r0) < 0 { - if r0&0x1fff0000 == 0x00070000 { - r0 &= 0xffff - } - hr = syscall.Errno(r0) - } - return -} - -func deactivateLayer(info *driverInfo, id string) (hr error) { - var _p0 *uint16 - _p0, hr = syscall.UTF16PtrFromString(id) - if hr != nil { - return - } - return _deactivateLayer(info, _p0) -} - -func _deactivateLayer(info *driverInfo, id *uint16) (hr error) { - if hr = procDeactivateLayer.Find(); hr != nil { - return - } - r0, _, _ := syscall.Syscall(procDeactivateLayer.Addr(), 2, uintptr(unsafe.Pointer(info)), uintptr(unsafe.Pointer(id)), 0) - if int32(r0) < 0 { - if r0&0x1fff0000 == 0x00070000 { - r0 &= 0xffff - } - hr = syscall.Errno(r0) - } - return -} - -func destroyLayer(info *driverInfo, id string) (hr error) { - var _p0 *uint16 - _p0, hr = syscall.UTF16PtrFromString(id) - if hr != nil { - return - } - return _destroyLayer(info, _p0) -} - -func _destroyLayer(info *driverInfo, id *uint16) (hr error) { - if hr = procDestroyLayer.Find(); hr != nil { - return - } - r0, _, _ := syscall.Syscall(procDestroyLayer.Addr(), 2, uintptr(unsafe.Pointer(info)), uintptr(unsafe.Pointer(id)), 0) - if int32(r0) < 0 { - if r0&0x1fff0000 == 0x00070000 { - r0 &= 0xffff - } - hr = syscall.Errno(r0) - } - return -} - -func exportLayer(info *driverInfo, id string, path string, descriptors []WC_LAYER_DESCRIPTOR) (hr error) { - var _p0 *uint16 - _p0, hr = syscall.UTF16PtrFromString(id) - if hr != nil { - return - } - var _p1 *uint16 - _p1, hr = syscall.UTF16PtrFromString(path) - if hr != nil { - return - } - return _exportLayer(info, _p0, _p1, descriptors) -} - -func _exportLayer(info *driverInfo, id *uint16, path *uint16, descriptors []WC_LAYER_DESCRIPTOR) (hr error) { - var _p2 *WC_LAYER_DESCRIPTOR - if len(descriptors) > 0 { - _p2 = &descriptors[0] - } - if hr = procExportLayer.Find(); hr != nil { - return - } - r0, _, _ := syscall.Syscall6(procExportLayer.Addr(), 5, uintptr(unsafe.Pointer(info)), uintptr(unsafe.Pointer(id)), uintptr(unsafe.Pointer(path)), uintptr(unsafe.Pointer(_p2)), uintptr(len(descriptors)), 0) - if int32(r0) < 0 { - if r0&0x1fff0000 == 0x00070000 { - r0 &= 0xffff - } - hr = syscall.Errno(r0) - } - return -} - -func getLayerMountPath(info *driverInfo, id string, length *uintptr, buffer *uint16) (hr error) { - var _p0 *uint16 - _p0, hr = syscall.UTF16PtrFromString(id) - if hr != nil { - return - } - return _getLayerMountPath(info, _p0, length, buffer) -} - -func _getLayerMountPath(info *driverInfo, id *uint16, length *uintptr, buffer *uint16) (hr error) { - if hr = procGetLayerMountPath.Find(); hr != nil { - return - } - r0, _, _ := syscall.Syscall6(procGetLayerMountPath.Addr(), 4, uintptr(unsafe.Pointer(info)), uintptr(unsafe.Pointer(id)), uintptr(unsafe.Pointer(length)), uintptr(unsafe.Pointer(buffer)), 0, 0) - if int32(r0) < 0 { - if r0&0x1fff0000 == 0x00070000 { - r0 &= 0xffff - } - hr = syscall.Errno(r0) - } - return -} - -func getBaseImages(buffer **uint16) (hr error) { - if hr = procGetBaseImages.Find(); hr != nil { - return - } - r0, _, _ := syscall.Syscall(procGetBaseImages.Addr(), 1, uintptr(unsafe.Pointer(buffer)), 0, 0) - if int32(r0) < 0 { - if r0&0x1fff0000 == 0x00070000 { - r0 &= 0xffff - } - hr = syscall.Errno(r0) - } - return -} - -func importLayer(info *driverInfo, id string, path string, descriptors []WC_LAYER_DESCRIPTOR) (hr error) { - var _p0 *uint16 - _p0, hr = syscall.UTF16PtrFromString(id) - if hr != nil { - return - } - var _p1 *uint16 - _p1, hr = syscall.UTF16PtrFromString(path) - if hr != nil { - return - } - return _importLayer(info, _p0, _p1, descriptors) -} - -func _importLayer(info *driverInfo, id *uint16, path *uint16, descriptors []WC_LAYER_DESCRIPTOR) (hr error) { - var _p2 *WC_LAYER_DESCRIPTOR - if len(descriptors) > 0 { - _p2 = &descriptors[0] - } - if hr = procImportLayer.Find(); hr != nil { - return - } - r0, _, _ := syscall.Syscall6(procImportLayer.Addr(), 5, uintptr(unsafe.Pointer(info)), uintptr(unsafe.Pointer(id)), uintptr(unsafe.Pointer(path)), uintptr(unsafe.Pointer(_p2)), uintptr(len(descriptors)), 0) - if int32(r0) < 0 { - if r0&0x1fff0000 == 0x00070000 { - r0 &= 0xffff - } - hr = syscall.Errno(r0) - } - return -} - -func layerExists(info *driverInfo, id string, exists *uint32) (hr error) { - var _p0 *uint16 - _p0, hr = syscall.UTF16PtrFromString(id) - if hr != nil { - return - } - return _layerExists(info, _p0, exists) -} - -func _layerExists(info *driverInfo, id *uint16, exists *uint32) (hr error) { - if hr = procLayerExists.Find(); hr != nil { - return - } - r0, _, _ := syscall.Syscall(procLayerExists.Addr(), 3, uintptr(unsafe.Pointer(info)), uintptr(unsafe.Pointer(id)), uintptr(unsafe.Pointer(exists))) - if int32(r0) < 0 { - if r0&0x1fff0000 == 0x00070000 { - r0 &= 0xffff - } - hr = syscall.Errno(r0) - } - return -} - -func nameToGuid(name string, guid *_guid) (hr error) { - var _p0 *uint16 - _p0, hr = syscall.UTF16PtrFromString(name) - if hr != nil { - return - } - return _nameToGuid(_p0, guid) -} - -func _nameToGuid(name *uint16, guid *_guid) (hr error) { - if hr = procNameToGuid.Find(); hr != nil { - return - } - r0, _, _ := syscall.Syscall(procNameToGuid.Addr(), 2, uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(guid)), 0) - if int32(r0) < 0 { - if r0&0x1fff0000 == 0x00070000 { - r0 &= 0xffff - } - hr = syscall.Errno(r0) - } - return -} - -func prepareLayer(info *driverInfo, id string, descriptors []WC_LAYER_DESCRIPTOR) (hr error) { - var _p0 *uint16 - _p0, hr = syscall.UTF16PtrFromString(id) - if hr != nil { - return - } - return _prepareLayer(info, _p0, descriptors) -} - -func _prepareLayer(info *driverInfo, id *uint16, descriptors []WC_LAYER_DESCRIPTOR) (hr error) { - var _p1 *WC_LAYER_DESCRIPTOR - if len(descriptors) > 0 { - _p1 = &descriptors[0] - } - if hr = procPrepareLayer.Find(); hr != nil { - return - } - r0, _, _ := syscall.Syscall6(procPrepareLayer.Addr(), 4, uintptr(unsafe.Pointer(info)), uintptr(unsafe.Pointer(id)), uintptr(unsafe.Pointer(_p1)), uintptr(len(descriptors)), 0, 0) - if int32(r0) < 0 { - if r0&0x1fff0000 == 0x00070000 { - r0 &= 0xffff - } - hr = syscall.Errno(r0) - } - return -} - -func unprepareLayer(info *driverInfo, id string) (hr error) { - var _p0 *uint16 - _p0, hr = syscall.UTF16PtrFromString(id) - if hr != nil { - return - } - return _unprepareLayer(info, _p0) -} - -func _unprepareLayer(info *driverInfo, id *uint16) (hr error) { - if hr = procUnprepareLayer.Find(); hr != nil { - return - } - r0, _, _ := syscall.Syscall(procUnprepareLayer.Addr(), 2, uintptr(unsafe.Pointer(info)), uintptr(unsafe.Pointer(id)), 0) - if int32(r0) < 0 { - if r0&0x1fff0000 == 0x00070000 { - r0 &= 0xffff - } - hr = syscall.Errno(r0) - } - return -} - -func processBaseImage(path string) (hr error) { - var _p0 *uint16 - _p0, hr = syscall.UTF16PtrFromString(path) - if hr != nil { - return - } - return _processBaseImage(_p0) -} - -func _processBaseImage(path *uint16) (hr error) { - if hr = procProcessBaseImage.Find(); hr != nil { - return - } - r0, _, _ := syscall.Syscall(procProcessBaseImage.Addr(), 1, uintptr(unsafe.Pointer(path)), 0, 0) - if int32(r0) < 0 { - if r0&0x1fff0000 == 0x00070000 { - r0 &= 0xffff - } - hr = syscall.Errno(r0) - } - return -} - -func processUtilityImage(path string) (hr error) { - var _p0 *uint16 - _p0, hr = syscall.UTF16PtrFromString(path) - if hr != nil { - return - } - return _processUtilityImage(_p0) -} - -func _processUtilityImage(path *uint16) (hr error) { - if hr = procProcessUtilityImage.Find(); hr != nil { - return - } - r0, _, _ := syscall.Syscall(procProcessUtilityImage.Addr(), 1, uintptr(unsafe.Pointer(path)), 0, 0) - if int32(r0) < 0 { - if r0&0x1fff0000 == 0x00070000 { - r0 &= 0xffff - } - hr = syscall.Errno(r0) - } - return -} - -func grantVmAccess(vmid string, filepath string) (hr error) { - var _p0 *uint16 - _p0, hr = syscall.UTF16PtrFromString(vmid) - if hr != nil { - return - } - var _p1 *uint16 - _p1, hr = syscall.UTF16PtrFromString(filepath) - if hr != nil { - return - } - return _grantVmAccess(_p0, _p1) -} - -func _grantVmAccess(vmid *uint16, filepath *uint16) (hr error) { - if hr = procGrantVmAccess.Find(); hr != nil { - return - } - r0, _, _ := syscall.Syscall(procGrantVmAccess.Addr(), 2, uintptr(unsafe.Pointer(vmid)), uintptr(unsafe.Pointer(filepath)), 0) - if int32(r0) < 0 { - if r0&0x1fff0000 == 0x00070000 { - r0 &= 0xffff - } - hr = syscall.Errno(r0) - } - return -} - -func openVirtualDisk(virtualStorageType *virtualStorageType, path string, virtualDiskAccessMask uint32, flags uint32, parameters *openVirtualDiskParameters, handle *syscall.Handle) (err error) { - var _p0 *uint16 - _p0, err = syscall.UTF16PtrFromString(path) - if err != nil { - return - } - return _openVirtualDisk(virtualStorageType, _p0, virtualDiskAccessMask, flags, parameters, handle) -} - -func _openVirtualDisk(virtualStorageType *virtualStorageType, path *uint16, virtualDiskAccessMask uint32, flags uint32, parameters *openVirtualDiskParameters, handle *syscall.Handle) (err error) { - r1, _, e1 := syscall.Syscall6(procOpenVirtualDisk.Addr(), 6, uintptr(unsafe.Pointer(virtualStorageType)), uintptr(unsafe.Pointer(path)), uintptr(virtualDiskAccessMask), uintptr(flags), uintptr(unsafe.Pointer(parameters)), uintptr(unsafe.Pointer(handle))) - if r1 != 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func attachVirtualDisk(handle syscall.Handle, sd uintptr, flags uint32, providerFlags uint32, params uintptr, overlapped uintptr) (err error) { - r1, _, e1 := syscall.Syscall6(procAttachVirtualDisk.Addr(), 6, uintptr(handle), uintptr(sd), uintptr(flags), uintptr(providerFlags), uintptr(params), uintptr(overlapped)) - if r1 != 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func getDiskFreeSpaceEx(directoryName string, freeBytesAvailableToCaller *int64, totalNumberOfBytes *int64, totalNumberOfFreeBytes *int64) (err error) { - var _p0 *uint16 - _p0, err = syscall.UTF16PtrFromString(directoryName) - if err != nil { - return - } - return _getDiskFreeSpaceEx(_p0, freeBytesAvailableToCaller, totalNumberOfBytes, totalNumberOfFreeBytes) -} - -func _getDiskFreeSpaceEx(directoryName *uint16, freeBytesAvailableToCaller *int64, totalNumberOfBytes *int64, totalNumberOfFreeBytes *int64) (err error) { - r1, _, e1 := syscall.Syscall6(procGetDiskFreeSpaceExW.Addr(), 4, uintptr(unsafe.Pointer(directoryName)), uintptr(unsafe.Pointer(freeBytesAvailableToCaller)), uintptr(unsafe.Pointer(totalNumberOfBytes)), uintptr(unsafe.Pointer(totalNumberOfFreeBytes)), 0, 0) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/winapi/console.go b/vendor/github.com/Microsoft/hcsshim/internal/winapi/console.go deleted file mode 100644 index def95254..00000000 --- a/vendor/github.com/Microsoft/hcsshim/internal/winapi/console.go +++ /dev/null @@ -1,44 +0,0 @@ -package winapi - -import ( - "unsafe" - - "golang.org/x/sys/windows" -) - -const PSEUDOCONSOLE_INHERIT_CURSOR = 0x1 - -// CreatePseudoConsole creates a windows pseudo console. -func CreatePseudoConsole(size windows.Coord, hInput windows.Handle, hOutput windows.Handle, dwFlags uint32, hpcon *windows.Handle) error { - // We need this wrapper as the function takes a COORD struct and not a pointer to one, so we need to cast to something beforehand. - return createPseudoConsole(*((*uint32)(unsafe.Pointer(&size))), hInput, hOutput, 0, hpcon) -} - -// ResizePseudoConsole resizes the internal buffers of the pseudo console to the width and height specified in `size`. -func ResizePseudoConsole(hpcon windows.Handle, size windows.Coord) error { - // We need this wrapper as the function takes a COORD struct and not a pointer to one, so we need to cast to something beforehand. - return resizePseudoConsole(hpcon, *((*uint32)(unsafe.Pointer(&size)))) -} - -// HRESULT WINAPI CreatePseudoConsole( -// _In_ COORD size, -// _In_ HANDLE hInput, -// _In_ HANDLE hOutput, -// _In_ DWORD dwFlags, -// _Out_ HPCON* phPC -// ); -// -//sys createPseudoConsole(size uint32, hInput windows.Handle, hOutput windows.Handle, dwFlags uint32, hpcon *windows.Handle) (hr error) = kernel32.CreatePseudoConsole - -// void WINAPI ClosePseudoConsole( -// _In_ HPCON hPC -// ); -// -//sys ClosePseudoConsole(hpc windows.Handle) = kernel32.ClosePseudoConsole - -// HRESULT WINAPI ResizePseudoConsole( -// _In_ HPCON hPC , -// _In_ COORD size -// ); -// -//sys resizePseudoConsole(hPc windows.Handle, size uint32) (hr error) = kernel32.ResizePseudoConsole diff --git a/vendor/github.com/Microsoft/hcsshim/internal/winapi/devices.go b/vendor/github.com/Microsoft/hcsshim/internal/winapi/devices.go deleted file mode 100644 index df28ea24..00000000 --- a/vendor/github.com/Microsoft/hcsshim/internal/winapi/devices.go +++ /dev/null @@ -1,13 +0,0 @@ -package winapi - -import "github.com/Microsoft/go-winio/pkg/guid" - -//sys CMGetDeviceIDListSize(pulLen *uint32, pszFilter *byte, uFlags uint32) (hr error) = cfgmgr32.CM_Get_Device_ID_List_SizeA -//sys CMGetDeviceIDList(pszFilter *byte, buffer *byte, bufferLen uint32, uFlags uint32) (hr error)= cfgmgr32.CM_Get_Device_ID_ListA -//sys CMLocateDevNode(pdnDevInst *uint32, pDeviceID string, uFlags uint32) (hr error) = cfgmgr32.CM_Locate_DevNodeW -//sys CMGetDevNodeProperty(dnDevInst uint32, propertyKey *DevPropKey, propertyType *uint32, propertyBuffer *uint16, propertyBufferSize *uint32, uFlags uint32) (hr error) = cfgmgr32.CM_Get_DevNode_PropertyW - -type DevPropKey struct { - Fmtid guid.GUID - Pid uint32 -} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/winapi/errors.go b/vendor/github.com/Microsoft/hcsshim/internal/winapi/errors.go deleted file mode 100644 index 4e80ef68..00000000 --- a/vendor/github.com/Microsoft/hcsshim/internal/winapi/errors.go +++ /dev/null @@ -1,15 +0,0 @@ -package winapi - -import "syscall" - -//sys RtlNtStatusToDosError(status uint32) (winerr error) = ntdll.RtlNtStatusToDosError - -const ( - STATUS_REPARSE_POINT_ENCOUNTERED = 0xC000050B - ERROR_NO_MORE_ITEMS = 0x103 - ERROR_MORE_DATA syscall.Errno = 234 -) - -func NTSuccess(status uint32) bool { - return status == 0 -} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/winapi/filesystem.go b/vendor/github.com/Microsoft/hcsshim/internal/winapi/filesystem.go deleted file mode 100644 index 7ce52afd..00000000 --- a/vendor/github.com/Microsoft/hcsshim/internal/winapi/filesystem.go +++ /dev/null @@ -1,110 +0,0 @@ -package winapi - -//sys NtCreateFile(handle *uintptr, accessMask uint32, oa *ObjectAttributes, iosb *IOStatusBlock, allocationSize *uint64, fileAttributes uint32, shareAccess uint32, createDisposition uint32, createOptions uint32, eaBuffer *byte, eaLength uint32) (status uint32) = ntdll.NtCreateFile -//sys NtSetInformationFile(handle uintptr, iosb *IOStatusBlock, information uintptr, length uint32, class uint32) (status uint32) = ntdll.NtSetInformationFile - -//sys NtOpenDirectoryObject(handle *uintptr, accessMask uint32, oa *ObjectAttributes) (status uint32) = ntdll.NtOpenDirectoryObject -//sys NtQueryDirectoryObject(handle uintptr, buffer *byte, length uint32, singleEntry bool, restartScan bool, context *uint32, returnLength *uint32)(status uint32) = ntdll.NtQueryDirectoryObject - -const ( - FileLinkInformationClass = 11 - FileDispositionInformationExClass = 64 - - FILE_READ_ATTRIBUTES = 0x0080 - FILE_WRITE_ATTRIBUTES = 0x0100 - DELETE = 0x10000 - - FILE_OPEN = 1 - FILE_CREATE = 2 - - FILE_LIST_DIRECTORY = 0x00000001 - FILE_DIRECTORY_FILE = 0x00000001 - FILE_SYNCHRONOUS_IO_NONALERT = 0x00000020 - FILE_OPEN_FOR_BACKUP_INTENT = 0x00004000 - FILE_OPEN_REPARSE_POINT = 0x00200000 - - FILE_DISPOSITION_DELETE = 0x00000001 - - OBJ_DONT_REPARSE = 0x1000 - - STATUS_MORE_ENTRIES = 0x105 - STATUS_NO_MORE_ENTRIES = 0x8000001a -) - -// Select entries from FILE_INFO_BY_HANDLE_CLASS. -// -// C declaration: -// typedef enum _FILE_INFO_BY_HANDLE_CLASS { -// FileBasicInfo, -// FileStandardInfo, -// FileNameInfo, -// FileRenameInfo, -// FileDispositionInfo, -// FileAllocationInfo, -// FileEndOfFileInfo, -// FileStreamInfo, -// FileCompressionInfo, -// FileAttributeTagInfo, -// FileIdBothDirectoryInfo, -// FileIdBothDirectoryRestartInfo, -// FileIoPriorityHintInfo, -// FileRemoteProtocolInfo, -// FileFullDirectoryInfo, -// FileFullDirectoryRestartInfo, -// FileStorageInfo, -// FileAlignmentInfo, -// FileIdInfo, -// FileIdExtdDirectoryInfo, -// FileIdExtdDirectoryRestartInfo, -// FileDispositionInfoEx, -// FileRenameInfoEx, -// FileCaseSensitiveInfo, -// FileNormalizedNameInfo, -// MaximumFileInfoByHandleClass -// } FILE_INFO_BY_HANDLE_CLASS, *PFILE_INFO_BY_HANDLE_CLASS; -// -// Documentation: https://docs.microsoft.com/en-us/windows/win32/api/minwinbase/ne-minwinbase-file_info_by_handle_class -const ( - FileIdInfo = 18 -) - -type FileDispositionInformationEx struct { - Flags uintptr -} - -type IOStatusBlock struct { - Status, Information uintptr -} - -type ObjectAttributes struct { - Length uintptr - RootDirectory uintptr - ObjectName *UnicodeString - Attributes uintptr - SecurityDescriptor uintptr - SecurityQoS uintptr -} - -type ObjectDirectoryInformation struct { - Name UnicodeString - TypeName UnicodeString -} - -type FileLinkInformation struct { - ReplaceIfExists bool - RootDirectory uintptr - FileNameLength uint32 - FileName [1]uint16 -} - -// C declaration: -// typedef struct _FILE_ID_INFO { -// ULONGLONG VolumeSerialNumber; -// FILE_ID_128 FileId; -// } FILE_ID_INFO, *PFILE_ID_INFO; -// -// Documentation: https://docs.microsoft.com/en-us/windows/win32/api/winbase/ns-winbase-file_id_info -type FILE_ID_INFO struct { - VolumeSerialNumber uint64 - FileID [16]byte -} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/winapi/jobobject.go b/vendor/github.com/Microsoft/hcsshim/internal/winapi/jobobject.go deleted file mode 100644 index 7eb13f8f..00000000 --- a/vendor/github.com/Microsoft/hcsshim/internal/winapi/jobobject.go +++ /dev/null @@ -1,218 +0,0 @@ -package winapi - -import ( - "unsafe" - - "golang.org/x/sys/windows" -) - -// Messages that can be received from an assigned io completion port. -// https://docs.microsoft.com/en-us/windows/win32/api/winnt/ns-winnt-jobobject_associate_completion_port -const ( - JOB_OBJECT_MSG_END_OF_JOB_TIME uint32 = 1 - JOB_OBJECT_MSG_END_OF_PROCESS_TIME uint32 = 2 - JOB_OBJECT_MSG_ACTIVE_PROCESS_LIMIT uint32 = 3 - JOB_OBJECT_MSG_ACTIVE_PROCESS_ZERO uint32 = 4 - JOB_OBJECT_MSG_NEW_PROCESS uint32 = 6 - JOB_OBJECT_MSG_EXIT_PROCESS uint32 = 7 - JOB_OBJECT_MSG_ABNORMAL_EXIT_PROCESS uint32 = 8 - JOB_OBJECT_MSG_PROCESS_MEMORY_LIMIT uint32 = 9 - JOB_OBJECT_MSG_JOB_MEMORY_LIMIT uint32 = 10 - JOB_OBJECT_MSG_NOTIFICATION_LIMIT uint32 = 11 -) - -// Access rights for creating or opening job objects. -// -// https://docs.microsoft.com/en-us/windows/win32/procthread/job-object-security-and-access-rights -const ( - JOB_OBJECT_QUERY = 0x0004 - JOB_OBJECT_ALL_ACCESS = 0x1F001F -) - -// IO limit flags -// -// https://docs.microsoft.com/en-us/windows/win32/api/jobapi2/ns-jobapi2-jobobject_io_rate_control_information -const JOB_OBJECT_IO_RATE_CONTROL_ENABLE = 0x1 - -const JOBOBJECT_IO_ATTRIBUTION_CONTROL_ENABLE uint32 = 0x1 - -// https://docs.microsoft.com/en-us/windows/win32/api/winnt/ns-winnt-jobobject_cpu_rate_control_information -const ( - JOB_OBJECT_CPU_RATE_CONTROL_ENABLE uint32 = 1 << iota - JOB_OBJECT_CPU_RATE_CONTROL_WEIGHT_BASED - JOB_OBJECT_CPU_RATE_CONTROL_HARD_CAP - JOB_OBJECT_CPU_RATE_CONTROL_NOTIFY - JOB_OBJECT_CPU_RATE_CONTROL_MIN_MAX_RATE -) - -// JobObjectInformationClass values. Used for a call to QueryInformationJobObject -// -// https://docs.microsoft.com/en-us/windows/win32/api/jobapi2/nf-jobapi2-queryinformationjobobject -const ( - JobObjectBasicAccountingInformation uint32 = 1 - JobObjectBasicProcessIdList uint32 = 3 - JobObjectBasicAndIoAccountingInformation uint32 = 8 - JobObjectLimitViolationInformation uint32 = 13 - JobObjectMemoryUsageInformation uint32 = 28 - JobObjectNotificationLimitInformation2 uint32 = 33 - JobObjectIoAttribution uint32 = 42 -) - -// https://docs.microsoft.com/en-us/windows/win32/api/winnt/ns-winnt-jobobject_basic_limit_information -type JOBOBJECT_BASIC_LIMIT_INFORMATION struct { - PerProcessUserTimeLimit int64 - PerJobUserTimeLimit int64 - LimitFlags uint32 - MinimumWorkingSetSize uintptr - MaximumWorkingSetSize uintptr - ActiveProcessLimit uint32 - Affinity uintptr - PriorityClass uint32 - SchedulingClass uint32 -} - -// https://docs.microsoft.com/en-us/windows/win32/api/winnt/ns-winnt-jobobject_cpu_rate_control_information -type JOBOBJECT_CPU_RATE_CONTROL_INFORMATION struct { - ControlFlags uint32 - Value uint32 -} - -// https://docs.microsoft.com/en-us/windows/win32/api/jobapi2/ns-jobapi2-jobobject_io_rate_control_information -type JOBOBJECT_IO_RATE_CONTROL_INFORMATION struct { - MaxIops int64 - MaxBandwidth int64 - ReservationIops int64 - BaseIOSize uint32 - VolumeName string - ControlFlags uint32 -} - -// https://docs.microsoft.com/en-us/windows/win32/api/winnt/ns-winnt-jobobject_basic_process_id_list -type JOBOBJECT_BASIC_PROCESS_ID_LIST struct { - NumberOfAssignedProcesses uint32 - NumberOfProcessIdsInList uint32 - ProcessIdList [1]uintptr -} - -// AllPids returns all the process Ids in the job object. -func (p *JOBOBJECT_BASIC_PROCESS_ID_LIST) AllPids() []uintptr { - return (*[(1 << 27) - 1]uintptr)(unsafe.Pointer(&p.ProcessIdList[0]))[:p.NumberOfProcessIdsInList:p.NumberOfProcessIdsInList] -} - -// https://docs.microsoft.com/en-us/windows/win32/api/winnt/ns-winnt-jobobject_basic_accounting_information -type JOBOBJECT_BASIC_ACCOUNTING_INFORMATION struct { - TotalUserTime int64 - TotalKernelTime int64 - ThisPeriodTotalUserTime int64 - ThisPeriodTotalKernelTime int64 - TotalPageFaultCount uint32 - TotalProcesses uint32 - ActiveProcesses uint32 - TotalTerminateProcesses uint32 -} - -//https://docs.microsoft.com/en-us/windows/win32/api/winnt/ns-winnt-jobobject_basic_and_io_accounting_information -type JOBOBJECT_BASIC_AND_IO_ACCOUNTING_INFORMATION struct { - BasicInfo JOBOBJECT_BASIC_ACCOUNTING_INFORMATION - IoInfo windows.IO_COUNTERS -} - -// typedef struct _JOBOBJECT_MEMORY_USAGE_INFORMATION { -// ULONG64 JobMemory; -// ULONG64 PeakJobMemoryUsed; -// } JOBOBJECT_MEMORY_USAGE_INFORMATION, *PJOBOBJECT_MEMORY_USAGE_INFORMATION; -// -type JOBOBJECT_MEMORY_USAGE_INFORMATION struct { - JobMemory uint64 - PeakJobMemoryUsed uint64 -} - -// typedef struct _JOBOBJECT_IO_ATTRIBUTION_STATS { -// ULONG_PTR IoCount; -// ULONGLONG TotalNonOverlappedQueueTime; -// ULONGLONG TotalNonOverlappedServiceTime; -// ULONGLONG TotalSize; -// } JOBOBJECT_IO_ATTRIBUTION_STATS, *PJOBOBJECT_IO_ATTRIBUTION_STATS; -// -type JOBOBJECT_IO_ATTRIBUTION_STATS struct { - IoCount uintptr - TotalNonOverlappedQueueTime uint64 - TotalNonOverlappedServiceTime uint64 - TotalSize uint64 -} - -// typedef struct _JOBOBJECT_IO_ATTRIBUTION_INFORMATION { -// ULONG ControlFlags; -// JOBOBJECT_IO_ATTRIBUTION_STATS ReadStats; -// JOBOBJECT_IO_ATTRIBUTION_STATS WriteStats; -// } JOBOBJECT_IO_ATTRIBUTION_INFORMATION, *PJOBOBJECT_IO_ATTRIBUTION_INFORMATION; -// -type JOBOBJECT_IO_ATTRIBUTION_INFORMATION struct { - ControlFlags uint32 - ReadStats JOBOBJECT_IO_ATTRIBUTION_STATS - WriteStats JOBOBJECT_IO_ATTRIBUTION_STATS -} - -// https://docs.microsoft.com/en-us/windows/win32/api/winnt/ns-winnt-jobobject_associate_completion_port -type JOBOBJECT_ASSOCIATE_COMPLETION_PORT struct { - CompletionKey windows.Handle - CompletionPort windows.Handle -} - -// BOOL IsProcessInJob( -// HANDLE ProcessHandle, -// HANDLE JobHandle, -// PBOOL Result -// ); -// -//sys IsProcessInJob(procHandle windows.Handle, jobHandle windows.Handle, result *int32) (err error) = kernel32.IsProcessInJob - -// BOOL QueryInformationJobObject( -// HANDLE hJob, -// JOBOBJECTINFOCLASS JobObjectInformationClass, -// LPVOID lpJobObjectInformation, -// DWORD cbJobObjectInformationLength, -// LPDWORD lpReturnLength -// ); -// -//sys QueryInformationJobObject(jobHandle windows.Handle, infoClass uint32, jobObjectInfo unsafe.Pointer, jobObjectInformationLength uint32, lpReturnLength *uint32) (err error) = kernel32.QueryInformationJobObject - -// HANDLE OpenJobObjectW( -// DWORD dwDesiredAccess, -// BOOL bInheritHandle, -// LPCWSTR lpName -// ); -// -//sys OpenJobObject(desiredAccess uint32, inheritHandle bool, lpName *uint16) (handle windows.Handle, err error) = kernel32.OpenJobObjectW - -// DWORD SetIoRateControlInformationJobObject( -// HANDLE hJob, -// JOBOBJECT_IO_RATE_CONTROL_INFORMATION *IoRateControlInfo -// ); -// -//sys SetIoRateControlInformationJobObject(jobHandle windows.Handle, ioRateControlInfo *JOBOBJECT_IO_RATE_CONTROL_INFORMATION) (ret uint32, err error) = kernel32.SetIoRateControlInformationJobObject - -// DWORD QueryIoRateControlInformationJobObject( -// HANDLE hJob, -// PCWSTR VolumeName, -// JOBOBJECT_IO_RATE_CONTROL_INFORMATION **InfoBlocks, -// ULONG *InfoBlockCount -// ); -//sys QueryIoRateControlInformationJobObject(jobHandle windows.Handle, volumeName *uint16, ioRateControlInfo **JOBOBJECT_IO_RATE_CONTROL_INFORMATION, infoBlockCount *uint32) (ret uint32, err error) = kernel32.QueryIoRateControlInformationJobObject - -// NTSTATUS -// NtOpenJobObject ( -// _Out_ PHANDLE JobHandle, -// _In_ ACCESS_MASK DesiredAccess, -// _In_ POBJECT_ATTRIBUTES ObjectAttributes -// ); -//sys NtOpenJobObject(jobHandle *windows.Handle, desiredAccess uint32, objAttributes *ObjectAttributes) (status uint32) = ntdll.NtOpenJobObject - -// NTSTATUS -// NTAPI -// NtCreateJobObject ( -// _Out_ PHANDLE JobHandle, -// _In_ ACCESS_MASK DesiredAccess, -// _In_opt_ POBJECT_ATTRIBUTES ObjectAttributes -// ); -//sys NtCreateJobObject(jobHandle *windows.Handle, desiredAccess uint32, objAttributes *ObjectAttributes) (status uint32) = ntdll.NtCreateJobObject diff --git a/vendor/github.com/Microsoft/hcsshim/internal/winapi/logon.go b/vendor/github.com/Microsoft/hcsshim/internal/winapi/logon.go deleted file mode 100644 index b6e7cfd4..00000000 --- a/vendor/github.com/Microsoft/hcsshim/internal/winapi/logon.go +++ /dev/null @@ -1,30 +0,0 @@ -package winapi - -// BOOL LogonUserA( -// LPCWSTR lpszUsername, -// LPCWSTR lpszDomain, -// LPCWSTR lpszPassword, -// DWORD dwLogonType, -// DWORD dwLogonProvider, -// PHANDLE phToken -// ); -// -//sys LogonUser(username *uint16, domain *uint16, password *uint16, logonType uint32, logonProvider uint32, token *windows.Token) (err error) = advapi32.LogonUserW - -// Logon types -const ( - LOGON32_LOGON_INTERACTIVE uint32 = 2 - LOGON32_LOGON_NETWORK uint32 = 3 - LOGON32_LOGON_BATCH uint32 = 4 - LOGON32_LOGON_SERVICE uint32 = 5 - LOGON32_LOGON_UNLOCK uint32 = 7 - LOGON32_LOGON_NETWORK_CLEARTEXT uint32 = 8 - LOGON32_LOGON_NEW_CREDENTIALS uint32 = 9 -) - -// Logon providers -const ( - LOGON32_PROVIDER_DEFAULT uint32 = 0 - LOGON32_PROVIDER_WINNT40 uint32 = 2 - LOGON32_PROVIDER_WINNT50 uint32 = 3 -) diff --git a/vendor/github.com/Microsoft/hcsshim/internal/winapi/memory.go b/vendor/github.com/Microsoft/hcsshim/internal/winapi/memory.go deleted file mode 100644 index 53f62948..00000000 --- a/vendor/github.com/Microsoft/hcsshim/internal/winapi/memory.go +++ /dev/null @@ -1,4 +0,0 @@ -package winapi - -//sys LocalAlloc(flags uint32, size int) (ptr uintptr) = kernel32.LocalAlloc -//sys LocalFree(ptr uintptr) = kernel32.LocalFree diff --git a/vendor/github.com/Microsoft/hcsshim/internal/winapi/net.go b/vendor/github.com/Microsoft/hcsshim/internal/winapi/net.go deleted file mode 100644 index f3791002..00000000 --- a/vendor/github.com/Microsoft/hcsshim/internal/winapi/net.go +++ /dev/null @@ -1,3 +0,0 @@ -package winapi - -//sys SetJobCompartmentId(handle windows.Handle, compartmentId uint32) (win32Err error) = iphlpapi.SetJobCompartmentId diff --git a/vendor/github.com/Microsoft/hcsshim/internal/winapi/path.go b/vendor/github.com/Microsoft/hcsshim/internal/winapi/path.go deleted file mode 100644 index 908920e8..00000000 --- a/vendor/github.com/Microsoft/hcsshim/internal/winapi/path.go +++ /dev/null @@ -1,11 +0,0 @@ -package winapi - -// DWORD SearchPathW( -// LPCWSTR lpPath, -// LPCWSTR lpFileName, -// LPCWSTR lpExtension, -// DWORD nBufferLength, -// LPWSTR lpBuffer, -// LPWSTR *lpFilePart -// ); -//sys SearchPath(lpPath *uint16, lpFileName *uint16, lpExtension *uint16, nBufferLength uint32, lpBuffer *uint16, lpFilePath *uint16) (size uint32, err error) = kernel32.SearchPathW diff --git a/vendor/github.com/Microsoft/hcsshim/internal/winapi/process.go b/vendor/github.com/Microsoft/hcsshim/internal/winapi/process.go deleted file mode 100644 index 222529f4..00000000 --- a/vendor/github.com/Microsoft/hcsshim/internal/winapi/process.go +++ /dev/null @@ -1,65 +0,0 @@ -package winapi - -const PROCESS_ALL_ACCESS uint32 = 2097151 - -const ( - PROC_THREAD_ATTRIBUTE_PSEUDOCONSOLE = 0x20016 - PROC_THREAD_ATTRIBUTE_JOB_LIST = 0x2000D -) - -// ProcessVmCounters corresponds to the _VM_COUNTERS_EX and _VM_COUNTERS_EX2 structures. -const ProcessVmCounters = 3 - -// __kernel_entry NTSTATUS NtQueryInformationProcess( -// [in] HANDLE ProcessHandle, -// [in] PROCESSINFOCLASS ProcessInformationClass, -// [out] PVOID ProcessInformation, -// [in] ULONG ProcessInformationLength, -// [out, optional] PULONG ReturnLength -// ); -// -//sys NtQueryInformationProcess(processHandle windows.Handle, processInfoClass uint32, processInfo unsafe.Pointer, processInfoLength uint32, returnLength *uint32) (status uint32) = ntdll.NtQueryInformationProcess - -// typedef struct _VM_COUNTERS_EX -// { -// SIZE_T PeakVirtualSize; -// SIZE_T VirtualSize; -// ULONG PageFaultCount; -// SIZE_T PeakWorkingSetSize; -// SIZE_T WorkingSetSize; -// SIZE_T QuotaPeakPagedPoolUsage; -// SIZE_T QuotaPagedPoolUsage; -// SIZE_T QuotaPeakNonPagedPoolUsage; -// SIZE_T QuotaNonPagedPoolUsage; -// SIZE_T PagefileUsage; -// SIZE_T PeakPagefileUsage; -// SIZE_T PrivateUsage; -// } VM_COUNTERS_EX, *PVM_COUNTERS_EX; -// -type VM_COUNTERS_EX struct { - PeakVirtualSize uintptr - VirtualSize uintptr - PageFaultCount uint32 - PeakWorkingSetSize uintptr - WorkingSetSize uintptr - QuotaPeakPagedPoolUsage uintptr - QuotaPagedPoolUsage uintptr - QuotaPeakNonPagedPoolUsage uintptr - QuotaNonPagedPoolUsage uintptr - PagefileUsage uintptr - PeakPagefileUsage uintptr - PrivateUsage uintptr -} - -// typedef struct _VM_COUNTERS_EX2 -// { -// VM_COUNTERS_EX CountersEx; -// SIZE_T PrivateWorkingSetSize; -// SIZE_T SharedCommitUsage; -// } VM_COUNTERS_EX2, *PVM_COUNTERS_EX2; -// -type VM_COUNTERS_EX2 struct { - CountersEx VM_COUNTERS_EX - PrivateWorkingSetSize uintptr - SharedCommitUsage uintptr -} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/winapi/processor.go b/vendor/github.com/Microsoft/hcsshim/internal/winapi/processor.go deleted file mode 100644 index ce79ac2c..00000000 --- a/vendor/github.com/Microsoft/hcsshim/internal/winapi/processor.go +++ /dev/null @@ -1,7 +0,0 @@ -package winapi - -// Get count from all processor groups. -// https://docs.microsoft.com/en-us/windows/win32/procthread/processor-groups -const ALL_PROCESSOR_GROUPS = 0xFFFF - -//sys GetActiveProcessorCount(groupNumber uint16) (amount uint32) = kernel32.GetActiveProcessorCount diff --git a/vendor/github.com/Microsoft/hcsshim/internal/winapi/system.go b/vendor/github.com/Microsoft/hcsshim/internal/winapi/system.go deleted file mode 100644 index 78fe01a4..00000000 --- a/vendor/github.com/Microsoft/hcsshim/internal/winapi/system.go +++ /dev/null @@ -1,53 +0,0 @@ -package winapi - -import "golang.org/x/sys/windows" - -const SystemProcessInformation = 5 - -const STATUS_INFO_LENGTH_MISMATCH = 0xC0000004 - -// __kernel_entry NTSTATUS NtQuerySystemInformation( -// SYSTEM_INFORMATION_CLASS SystemInformationClass, -// PVOID SystemInformation, -// ULONG SystemInformationLength, -// PULONG ReturnLength -// ); -// -//sys NtQuerySystemInformation(systemInfoClass int, systemInformation unsafe.Pointer, systemInfoLength uint32, returnLength *uint32) (status uint32) = ntdll.NtQuerySystemInformation - -type SYSTEM_PROCESS_INFORMATION struct { - NextEntryOffset uint32 // ULONG - NumberOfThreads uint32 // ULONG - WorkingSetPrivateSize int64 // LARGE_INTEGER - HardFaultCount uint32 // ULONG - NumberOfThreadsHighWatermark uint32 // ULONG - CycleTime uint64 // ULONGLONG - CreateTime int64 // LARGE_INTEGER - UserTime int64 // LARGE_INTEGER - KernelTime int64 // LARGE_INTEGER - ImageName UnicodeString // UNICODE_STRING - BasePriority int32 // KPRIORITY - UniqueProcessID windows.Handle // HANDLE - InheritedFromUniqueProcessID windows.Handle // HANDLE - HandleCount uint32 // ULONG - SessionID uint32 // ULONG - UniqueProcessKey *uint32 // ULONG_PTR - PeakVirtualSize uintptr // SIZE_T - VirtualSize uintptr // SIZE_T - PageFaultCount uint32 // ULONG - PeakWorkingSetSize uintptr // SIZE_T - WorkingSetSize uintptr // SIZE_T - QuotaPeakPagedPoolUsage uintptr // SIZE_T - QuotaPagedPoolUsage uintptr // SIZE_T - QuotaPeakNonPagedPoolUsage uintptr // SIZE_T - QuotaNonPagedPoolUsage uintptr // SIZE_T - PagefileUsage uintptr // SIZE_T - PeakPagefileUsage uintptr // SIZE_T - PrivatePageCount uintptr // SIZE_T - ReadOperationCount int64 // LARGE_INTEGER - WriteOperationCount int64 // LARGE_INTEGER - OtherOperationCount int64 // LARGE_INTEGER - ReadTransferCount int64 // LARGE_INTEGER - WriteTransferCount int64 // LARGE_INTEGER - OtherTransferCount int64 // LARGE_INTEGER -} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/winapi/thread.go b/vendor/github.com/Microsoft/hcsshim/internal/winapi/thread.go deleted file mode 100644 index 4724713e..00000000 --- a/vendor/github.com/Microsoft/hcsshim/internal/winapi/thread.go +++ /dev/null @@ -1,12 +0,0 @@ -package winapi - -// HANDLE CreateRemoteThread( -// HANDLE hProcess, -// LPSECURITY_ATTRIBUTES lpThreadAttributes, -// SIZE_T dwStackSize, -// LPTHREAD_START_ROUTINE lpStartAddress, -// LPVOID lpParameter, -// DWORD dwCreationFlags, -// LPDWORD lpThreadId -// ); -//sys CreateRemoteThread(process windows.Handle, sa *windows.SecurityAttributes, stackSize uint32, startAddr uintptr, parameter uintptr, creationFlags uint32, threadID *uint32) (handle windows.Handle, err error) = kernel32.CreateRemoteThread diff --git a/vendor/github.com/Microsoft/hcsshim/internal/winapi/utils.go b/vendor/github.com/Microsoft/hcsshim/internal/winapi/utils.go deleted file mode 100644 index 859b753c..00000000 --- a/vendor/github.com/Microsoft/hcsshim/internal/winapi/utils.go +++ /dev/null @@ -1,80 +0,0 @@ -package winapi - -import ( - "errors" - "reflect" - "syscall" - "unsafe" - - "golang.org/x/sys/windows" -) - -// Uint16BufferToSlice wraps a uint16 pointer-and-length into a slice -// for easier interop with Go APIs -func Uint16BufferToSlice(buffer *uint16, bufferLength int) (result []uint16) { - hdr := (*reflect.SliceHeader)(unsafe.Pointer(&result)) - hdr.Data = uintptr(unsafe.Pointer(buffer)) - hdr.Cap = bufferLength - hdr.Len = bufferLength - - return -} - -// UnicodeString corresponds to UNICODE_STRING win32 struct defined here -// https://docs.microsoft.com/en-us/windows/win32/api/ntdef/ns-ntdef-_unicode_string -type UnicodeString struct { - Length uint16 - MaximumLength uint16 - Buffer *uint16 -} - -// NTSTRSAFE_UNICODE_STRING_MAX_CCH is a constant defined in ntstrsafe.h. This value -// denotes the maximum number of wide chars a path can have. -const NTSTRSAFE_UNICODE_STRING_MAX_CCH = 32767 - -//String converts a UnicodeString to a golang string -func (uni UnicodeString) String() string { - // UnicodeString is not guaranteed to be null terminated, therefore - // use the UnicodeString's Length field - return windows.UTF16ToString(Uint16BufferToSlice(uni.Buffer, int(uni.Length/2))) -} - -// NewUnicodeString allocates a new UnicodeString and copies `s` into -// the buffer of the new UnicodeString. -func NewUnicodeString(s string) (*UnicodeString, error) { - buf, err := windows.UTF16FromString(s) - if err != nil { - return nil, err - } - - if len(buf) > NTSTRSAFE_UNICODE_STRING_MAX_CCH { - return nil, syscall.ENAMETOOLONG - } - - uni := &UnicodeString{ - // The length is in bytes and should not include the trailing null character. - Length: uint16((len(buf) - 1) * 2), - MaximumLength: uint16((len(buf) - 1) * 2), - Buffer: &buf[0], - } - return uni, nil -} - -// ConvertStringSetToSlice is a helper function used to convert the contents of -// `buf` into a string slice. `buf` contains a set of null terminated strings -// with an additional null at the end to indicate the end of the set. -func ConvertStringSetToSlice(buf []byte) ([]string, error) { - var results []string - prev := 0 - for i := range buf { - if buf[i] == 0 { - if prev == i { - // found two null characters in a row, return result - return results, nil - } - results = append(results, string(buf[prev:i])) - prev = i + 1 - } - } - return nil, errors.New("string set malformed: missing null terminator at end of buffer") -} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/winapi/winapi.go b/vendor/github.com/Microsoft/hcsshim/internal/winapi/winapi.go deleted file mode 100644 index d2cc9d9f..00000000 --- a/vendor/github.com/Microsoft/hcsshim/internal/winapi/winapi.go +++ /dev/null @@ -1,5 +0,0 @@ -// Package winapi contains various low-level bindings to Windows APIs. It can -// be thought of as an extension to golang.org/x/sys/windows. -package winapi - -//go:generate go run ..\..\mksyscall_windows.go -output zsyscall_windows.go user.go console.go system.go net.go path.go thread.go jobobject.go logon.go memory.go process.go processor.go devices.go filesystem.go errors.go diff --git a/vendor/github.com/Microsoft/hcsshim/internal/winapi/zsyscall_windows.go b/vendor/github.com/Microsoft/hcsshim/internal/winapi/zsyscall_windows.go deleted file mode 100644 index 1f16cf0b..00000000 --- a/vendor/github.com/Microsoft/hcsshim/internal/winapi/zsyscall_windows.go +++ /dev/null @@ -1,354 +0,0 @@ -// Code generated mksyscall_windows.exe DO NOT EDIT - -package winapi - -import ( - "syscall" - "unsafe" - - "golang.org/x/sys/windows" -) - -var _ unsafe.Pointer - -// Do the interface allocations only once for common -// Errno values. -const ( - errnoERROR_IO_PENDING = 997 -) - -var ( - errERROR_IO_PENDING error = syscall.Errno(errnoERROR_IO_PENDING) -) - -// errnoErr returns common boxed Errno values, to prevent -// allocations at runtime. -func errnoErr(e syscall.Errno) error { - switch e { - case 0: - return nil - case errnoERROR_IO_PENDING: - return errERROR_IO_PENDING - } - // TODO: add more here, after collecting data on the common - // error values see on Windows. (perhaps when running - // all.bat?) - return e -} - -var ( - modkernel32 = windows.NewLazySystemDLL("kernel32.dll") - modntdll = windows.NewLazySystemDLL("ntdll.dll") - modiphlpapi = windows.NewLazySystemDLL("iphlpapi.dll") - modadvapi32 = windows.NewLazySystemDLL("advapi32.dll") - modcfgmgr32 = windows.NewLazySystemDLL("cfgmgr32.dll") - - procCreatePseudoConsole = modkernel32.NewProc("CreatePseudoConsole") - procClosePseudoConsole = modkernel32.NewProc("ClosePseudoConsole") - procResizePseudoConsole = modkernel32.NewProc("ResizePseudoConsole") - procNtQuerySystemInformation = modntdll.NewProc("NtQuerySystemInformation") - procSetJobCompartmentId = modiphlpapi.NewProc("SetJobCompartmentId") - procSearchPathW = modkernel32.NewProc("SearchPathW") - procCreateRemoteThread = modkernel32.NewProc("CreateRemoteThread") - procIsProcessInJob = modkernel32.NewProc("IsProcessInJob") - procQueryInformationJobObject = modkernel32.NewProc("QueryInformationJobObject") - procOpenJobObjectW = modkernel32.NewProc("OpenJobObjectW") - procSetIoRateControlInformationJobObject = modkernel32.NewProc("SetIoRateControlInformationJobObject") - procQueryIoRateControlInformationJobObject = modkernel32.NewProc("QueryIoRateControlInformationJobObject") - procNtOpenJobObject = modntdll.NewProc("NtOpenJobObject") - procNtCreateJobObject = modntdll.NewProc("NtCreateJobObject") - procLogonUserW = modadvapi32.NewProc("LogonUserW") - procLocalAlloc = modkernel32.NewProc("LocalAlloc") - procLocalFree = modkernel32.NewProc("LocalFree") - procNtQueryInformationProcess = modntdll.NewProc("NtQueryInformationProcess") - procGetActiveProcessorCount = modkernel32.NewProc("GetActiveProcessorCount") - procCM_Get_Device_ID_List_SizeA = modcfgmgr32.NewProc("CM_Get_Device_ID_List_SizeA") - procCM_Get_Device_ID_ListA = modcfgmgr32.NewProc("CM_Get_Device_ID_ListA") - procCM_Locate_DevNodeW = modcfgmgr32.NewProc("CM_Locate_DevNodeW") - procCM_Get_DevNode_PropertyW = modcfgmgr32.NewProc("CM_Get_DevNode_PropertyW") - procNtCreateFile = modntdll.NewProc("NtCreateFile") - procNtSetInformationFile = modntdll.NewProc("NtSetInformationFile") - procNtOpenDirectoryObject = modntdll.NewProc("NtOpenDirectoryObject") - procNtQueryDirectoryObject = modntdll.NewProc("NtQueryDirectoryObject") - procRtlNtStatusToDosError = modntdll.NewProc("RtlNtStatusToDosError") -) - -func createPseudoConsole(size uint32, hInput windows.Handle, hOutput windows.Handle, dwFlags uint32, hpcon *windows.Handle) (hr error) { - r0, _, _ := syscall.Syscall6(procCreatePseudoConsole.Addr(), 5, uintptr(size), uintptr(hInput), uintptr(hOutput), uintptr(dwFlags), uintptr(unsafe.Pointer(hpcon)), 0) - if int32(r0) < 0 { - if r0&0x1fff0000 == 0x00070000 { - r0 &= 0xffff - } - hr = syscall.Errno(r0) - } - return -} - -func ClosePseudoConsole(hpc windows.Handle) { - syscall.Syscall(procClosePseudoConsole.Addr(), 1, uintptr(hpc), 0, 0) - return -} - -func resizePseudoConsole(hPc windows.Handle, size uint32) (hr error) { - r0, _, _ := syscall.Syscall(procResizePseudoConsole.Addr(), 2, uintptr(hPc), uintptr(size), 0) - if int32(r0) < 0 { - if r0&0x1fff0000 == 0x00070000 { - r0 &= 0xffff - } - hr = syscall.Errno(r0) - } - return -} - -func NtQuerySystemInformation(systemInfoClass int, systemInformation unsafe.Pointer, systemInfoLength uint32, returnLength *uint32) (status uint32) { - r0, _, _ := syscall.Syscall6(procNtQuerySystemInformation.Addr(), 4, uintptr(systemInfoClass), uintptr(systemInformation), uintptr(systemInfoLength), uintptr(unsafe.Pointer(returnLength)), 0, 0) - status = uint32(r0) - return -} - -func SetJobCompartmentId(handle windows.Handle, compartmentId uint32) (win32Err error) { - r0, _, _ := syscall.Syscall(procSetJobCompartmentId.Addr(), 2, uintptr(handle), uintptr(compartmentId), 0) - if r0 != 0 { - win32Err = syscall.Errno(r0) - } - return -} - -func SearchPath(lpPath *uint16, lpFileName *uint16, lpExtension *uint16, nBufferLength uint32, lpBuffer *uint16, lpFilePath *uint16) (size uint32, err error) { - r0, _, e1 := syscall.Syscall6(procSearchPathW.Addr(), 6, uintptr(unsafe.Pointer(lpPath)), uintptr(unsafe.Pointer(lpFileName)), uintptr(unsafe.Pointer(lpExtension)), uintptr(nBufferLength), uintptr(unsafe.Pointer(lpBuffer)), uintptr(unsafe.Pointer(lpFilePath))) - size = uint32(r0) - if size == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func CreateRemoteThread(process windows.Handle, sa *windows.SecurityAttributes, stackSize uint32, startAddr uintptr, parameter uintptr, creationFlags uint32, threadID *uint32) (handle windows.Handle, err error) { - r0, _, e1 := syscall.Syscall9(procCreateRemoteThread.Addr(), 7, uintptr(process), uintptr(unsafe.Pointer(sa)), uintptr(stackSize), uintptr(startAddr), uintptr(parameter), uintptr(creationFlags), uintptr(unsafe.Pointer(threadID)), 0, 0) - handle = windows.Handle(r0) - if handle == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func IsProcessInJob(procHandle windows.Handle, jobHandle windows.Handle, result *int32) (err error) { - r1, _, e1 := syscall.Syscall(procIsProcessInJob.Addr(), 3, uintptr(procHandle), uintptr(jobHandle), uintptr(unsafe.Pointer(result))) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func QueryInformationJobObject(jobHandle windows.Handle, infoClass uint32, jobObjectInfo unsafe.Pointer, jobObjectInformationLength uint32, lpReturnLength *uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procQueryInformationJobObject.Addr(), 5, uintptr(jobHandle), uintptr(infoClass), uintptr(jobObjectInfo), uintptr(jobObjectInformationLength), uintptr(unsafe.Pointer(lpReturnLength)), 0) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func OpenJobObject(desiredAccess uint32, inheritHandle bool, lpName *uint16) (handle windows.Handle, err error) { - var _p0 uint32 - if inheritHandle { - _p0 = 1 - } else { - _p0 = 0 - } - r0, _, e1 := syscall.Syscall(procOpenJobObjectW.Addr(), 3, uintptr(desiredAccess), uintptr(_p0), uintptr(unsafe.Pointer(lpName))) - handle = windows.Handle(r0) - if handle == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func SetIoRateControlInformationJobObject(jobHandle windows.Handle, ioRateControlInfo *JOBOBJECT_IO_RATE_CONTROL_INFORMATION) (ret uint32, err error) { - r0, _, e1 := syscall.Syscall(procSetIoRateControlInformationJobObject.Addr(), 2, uintptr(jobHandle), uintptr(unsafe.Pointer(ioRateControlInfo)), 0) - ret = uint32(r0) - if ret == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func QueryIoRateControlInformationJobObject(jobHandle windows.Handle, volumeName *uint16, ioRateControlInfo **JOBOBJECT_IO_RATE_CONTROL_INFORMATION, infoBlockCount *uint32) (ret uint32, err error) { - r0, _, e1 := syscall.Syscall6(procQueryIoRateControlInformationJobObject.Addr(), 4, uintptr(jobHandle), uintptr(unsafe.Pointer(volumeName)), uintptr(unsafe.Pointer(ioRateControlInfo)), uintptr(unsafe.Pointer(infoBlockCount)), 0, 0) - ret = uint32(r0) - if ret == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func NtOpenJobObject(jobHandle *windows.Handle, desiredAccess uint32, objAttributes *ObjectAttributes) (status uint32) { - r0, _, _ := syscall.Syscall(procNtOpenJobObject.Addr(), 3, uintptr(unsafe.Pointer(jobHandle)), uintptr(desiredAccess), uintptr(unsafe.Pointer(objAttributes))) - status = uint32(r0) - return -} - -func NtCreateJobObject(jobHandle *windows.Handle, desiredAccess uint32, objAttributes *ObjectAttributes) (status uint32) { - r0, _, _ := syscall.Syscall(procNtCreateJobObject.Addr(), 3, uintptr(unsafe.Pointer(jobHandle)), uintptr(desiredAccess), uintptr(unsafe.Pointer(objAttributes))) - status = uint32(r0) - return -} - -func LogonUser(username *uint16, domain *uint16, password *uint16, logonType uint32, logonProvider uint32, token *windows.Token) (err error) { - r1, _, e1 := syscall.Syscall6(procLogonUserW.Addr(), 6, uintptr(unsafe.Pointer(username)), uintptr(unsafe.Pointer(domain)), uintptr(unsafe.Pointer(password)), uintptr(logonType), uintptr(logonProvider), uintptr(unsafe.Pointer(token))) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func LocalAlloc(flags uint32, size int) (ptr uintptr) { - r0, _, _ := syscall.Syscall(procLocalAlloc.Addr(), 2, uintptr(flags), uintptr(size), 0) - ptr = uintptr(r0) - return -} - -func LocalFree(ptr uintptr) { - syscall.Syscall(procLocalFree.Addr(), 1, uintptr(ptr), 0, 0) - return -} - -func NtQueryInformationProcess(processHandle windows.Handle, processInfoClass uint32, processInfo unsafe.Pointer, processInfoLength uint32, returnLength *uint32) (status uint32) { - r0, _, _ := syscall.Syscall6(procNtQueryInformationProcess.Addr(), 5, uintptr(processHandle), uintptr(processInfoClass), uintptr(processInfo), uintptr(processInfoLength), uintptr(unsafe.Pointer(returnLength)), 0) - status = uint32(r0) - return -} - -func GetActiveProcessorCount(groupNumber uint16) (amount uint32) { - r0, _, _ := syscall.Syscall(procGetActiveProcessorCount.Addr(), 1, uintptr(groupNumber), 0, 0) - amount = uint32(r0) - return -} - -func CMGetDeviceIDListSize(pulLen *uint32, pszFilter *byte, uFlags uint32) (hr error) { - r0, _, _ := syscall.Syscall(procCM_Get_Device_ID_List_SizeA.Addr(), 3, uintptr(unsafe.Pointer(pulLen)), uintptr(unsafe.Pointer(pszFilter)), uintptr(uFlags)) - if int32(r0) < 0 { - if r0&0x1fff0000 == 0x00070000 { - r0 &= 0xffff - } - hr = syscall.Errno(r0) - } - return -} - -func CMGetDeviceIDList(pszFilter *byte, buffer *byte, bufferLen uint32, uFlags uint32) (hr error) { - r0, _, _ := syscall.Syscall6(procCM_Get_Device_ID_ListA.Addr(), 4, uintptr(unsafe.Pointer(pszFilter)), uintptr(unsafe.Pointer(buffer)), uintptr(bufferLen), uintptr(uFlags), 0, 0) - if int32(r0) < 0 { - if r0&0x1fff0000 == 0x00070000 { - r0 &= 0xffff - } - hr = syscall.Errno(r0) - } - return -} - -func CMLocateDevNode(pdnDevInst *uint32, pDeviceID string, uFlags uint32) (hr error) { - var _p0 *uint16 - _p0, hr = syscall.UTF16PtrFromString(pDeviceID) - if hr != nil { - return - } - return _CMLocateDevNode(pdnDevInst, _p0, uFlags) -} - -func _CMLocateDevNode(pdnDevInst *uint32, pDeviceID *uint16, uFlags uint32) (hr error) { - r0, _, _ := syscall.Syscall(procCM_Locate_DevNodeW.Addr(), 3, uintptr(unsafe.Pointer(pdnDevInst)), uintptr(unsafe.Pointer(pDeviceID)), uintptr(uFlags)) - if int32(r0) < 0 { - if r0&0x1fff0000 == 0x00070000 { - r0 &= 0xffff - } - hr = syscall.Errno(r0) - } - return -} - -func CMGetDevNodeProperty(dnDevInst uint32, propertyKey *DevPropKey, propertyType *uint32, propertyBuffer *uint16, propertyBufferSize *uint32, uFlags uint32) (hr error) { - r0, _, _ := syscall.Syscall6(procCM_Get_DevNode_PropertyW.Addr(), 6, uintptr(dnDevInst), uintptr(unsafe.Pointer(propertyKey)), uintptr(unsafe.Pointer(propertyType)), uintptr(unsafe.Pointer(propertyBuffer)), uintptr(unsafe.Pointer(propertyBufferSize)), uintptr(uFlags)) - if int32(r0) < 0 { - if r0&0x1fff0000 == 0x00070000 { - r0 &= 0xffff - } - hr = syscall.Errno(r0) - } - return -} - -func NtCreateFile(handle *uintptr, accessMask uint32, oa *ObjectAttributes, iosb *IOStatusBlock, allocationSize *uint64, fileAttributes uint32, shareAccess uint32, createDisposition uint32, createOptions uint32, eaBuffer *byte, eaLength uint32) (status uint32) { - r0, _, _ := syscall.Syscall12(procNtCreateFile.Addr(), 11, uintptr(unsafe.Pointer(handle)), uintptr(accessMask), uintptr(unsafe.Pointer(oa)), uintptr(unsafe.Pointer(iosb)), uintptr(unsafe.Pointer(allocationSize)), uintptr(fileAttributes), uintptr(shareAccess), uintptr(createDisposition), uintptr(createOptions), uintptr(unsafe.Pointer(eaBuffer)), uintptr(eaLength), 0) - status = uint32(r0) - return -} - -func NtSetInformationFile(handle uintptr, iosb *IOStatusBlock, information uintptr, length uint32, class uint32) (status uint32) { - r0, _, _ := syscall.Syscall6(procNtSetInformationFile.Addr(), 5, uintptr(handle), uintptr(unsafe.Pointer(iosb)), uintptr(information), uintptr(length), uintptr(class), 0) - status = uint32(r0) - return -} - -func NtOpenDirectoryObject(handle *uintptr, accessMask uint32, oa *ObjectAttributes) (status uint32) { - r0, _, _ := syscall.Syscall(procNtOpenDirectoryObject.Addr(), 3, uintptr(unsafe.Pointer(handle)), uintptr(accessMask), uintptr(unsafe.Pointer(oa))) - status = uint32(r0) - return -} - -func NtQueryDirectoryObject(handle uintptr, buffer *byte, length uint32, singleEntry bool, restartScan bool, context *uint32, returnLength *uint32) (status uint32) { - var _p0 uint32 - if singleEntry { - _p0 = 1 - } else { - _p0 = 0 - } - var _p1 uint32 - if restartScan { - _p1 = 1 - } else { - _p1 = 0 - } - r0, _, _ := syscall.Syscall9(procNtQueryDirectoryObject.Addr(), 7, uintptr(handle), uintptr(unsafe.Pointer(buffer)), uintptr(length), uintptr(_p0), uintptr(_p1), uintptr(unsafe.Pointer(context)), uintptr(unsafe.Pointer(returnLength)), 0, 0) - status = uint32(r0) - return -} - -func RtlNtStatusToDosError(status uint32) (winerr error) { - r0, _, _ := syscall.Syscall(procRtlNtStatusToDosError.Addr(), 1, uintptr(status), 0, 0) - if r0 != 0 { - winerr = syscall.Errno(r0) - } - return -} diff --git a/vendor/github.com/Microsoft/hcsshim/layer.go b/vendor/github.com/Microsoft/hcsshim/layer.go deleted file mode 100644 index 89161637..00000000 --- a/vendor/github.com/Microsoft/hcsshim/layer.go +++ /dev/null @@ -1,107 +0,0 @@ -package hcsshim - -import ( - "context" - "crypto/sha1" - "path/filepath" - - "github.com/Microsoft/go-winio/pkg/guid" - "github.com/Microsoft/hcsshim/internal/wclayer" -) - -func layerPath(info *DriverInfo, id string) string { - return filepath.Join(info.HomeDir, id) -} - -func ActivateLayer(info DriverInfo, id string) error { - return wclayer.ActivateLayer(context.Background(), layerPath(&info, id)) -} -func CreateLayer(info DriverInfo, id, parent string) error { - return wclayer.CreateLayer(context.Background(), layerPath(&info, id), parent) -} - -// New clients should use CreateScratchLayer instead. Kept in to preserve API compatibility. -func CreateSandboxLayer(info DriverInfo, layerId, parentId string, parentLayerPaths []string) error { - return wclayer.CreateScratchLayer(context.Background(), layerPath(&info, layerId), parentLayerPaths) -} -func CreateScratchLayer(info DriverInfo, layerId, parentId string, parentLayerPaths []string) error { - return wclayer.CreateScratchLayer(context.Background(), layerPath(&info, layerId), parentLayerPaths) -} -func DeactivateLayer(info DriverInfo, id string) error { - return wclayer.DeactivateLayer(context.Background(), layerPath(&info, id)) -} -func DestroyLayer(info DriverInfo, id string) error { - return wclayer.DestroyLayer(context.Background(), layerPath(&info, id)) -} - -// New clients should use ExpandScratchSize instead. Kept in to preserve API compatibility. -func ExpandSandboxSize(info DriverInfo, layerId string, size uint64) error { - return wclayer.ExpandScratchSize(context.Background(), layerPath(&info, layerId), size) -} -func ExpandScratchSize(info DriverInfo, layerId string, size uint64) error { - return wclayer.ExpandScratchSize(context.Background(), layerPath(&info, layerId), size) -} -func ExportLayer(info DriverInfo, layerId string, exportFolderPath string, parentLayerPaths []string) error { - return wclayer.ExportLayer(context.Background(), layerPath(&info, layerId), exportFolderPath, parentLayerPaths) -} -func GetLayerMountPath(info DriverInfo, id string) (string, error) { - return wclayer.GetLayerMountPath(context.Background(), layerPath(&info, id)) -} -func GetSharedBaseImages() (imageData string, err error) { - return wclayer.GetSharedBaseImages(context.Background()) -} -func ImportLayer(info DriverInfo, layerID string, importFolderPath string, parentLayerPaths []string) error { - return wclayer.ImportLayer(context.Background(), layerPath(&info, layerID), importFolderPath, parentLayerPaths) -} -func LayerExists(info DriverInfo, id string) (bool, error) { - return wclayer.LayerExists(context.Background(), layerPath(&info, id)) -} -func PrepareLayer(info DriverInfo, layerId string, parentLayerPaths []string) error { - return wclayer.PrepareLayer(context.Background(), layerPath(&info, layerId), parentLayerPaths) -} -func ProcessBaseLayer(path string) error { - return wclayer.ProcessBaseLayer(context.Background(), path) -} -func ProcessUtilityVMImage(path string) error { - return wclayer.ProcessUtilityVMImage(context.Background(), path) -} -func UnprepareLayer(info DriverInfo, layerId string) error { - return wclayer.UnprepareLayer(context.Background(), layerPath(&info, layerId)) -} - -type DriverInfo struct { - Flavour int - HomeDir string -} - -type GUID [16]byte - -func NameToGuid(name string) (id GUID, err error) { - g, err := wclayer.NameToGuid(context.Background(), name) - return g.ToWindowsArray(), err -} - -func NewGUID(source string) *GUID { - h := sha1.Sum([]byte(source)) - var g GUID - copy(g[0:], h[0:16]) - return &g -} - -func (g *GUID) ToString() string { - return guid.FromWindowsArray(*g).String() -} - -type LayerReader = wclayer.LayerReader - -func NewLayerReader(info DriverInfo, layerID string, parentLayerPaths []string) (LayerReader, error) { - return wclayer.NewLayerReader(context.Background(), layerPath(&info, layerID), parentLayerPaths) -} - -type LayerWriter = wclayer.LayerWriter - -func NewLayerWriter(info DriverInfo, layerID string, parentLayerPaths []string) (LayerWriter, error) { - return wclayer.NewLayerWriter(context.Background(), layerPath(&info, layerID), parentLayerPaths) -} - -type WC_LAYER_DESCRIPTOR = wclayer.WC_LAYER_DESCRIPTOR diff --git a/vendor/github.com/Microsoft/hcsshim/osversion/osversion_windows.go b/vendor/github.com/Microsoft/hcsshim/osversion/osversion_windows.go deleted file mode 100644 index 3ab3bcd8..00000000 --- a/vendor/github.com/Microsoft/hcsshim/osversion/osversion_windows.go +++ /dev/null @@ -1,50 +0,0 @@ -package osversion - -import ( - "fmt" - "sync" - - "golang.org/x/sys/windows" -) - -// OSVersion is a wrapper for Windows version information -// https://msdn.microsoft.com/en-us/library/windows/desktop/ms724439(v=vs.85).aspx -type OSVersion struct { - Version uint32 - MajorVersion uint8 - MinorVersion uint8 - Build uint16 -} - -var ( - osv OSVersion - once sync.Once -) - -// Get gets the operating system version on Windows. -// The calling application must be manifested to get the correct version information. -func Get() OSVersion { - once.Do(func() { - var err error - osv = OSVersion{} - osv.Version, err = windows.GetVersion() - if err != nil { - // GetVersion never fails. - panic(err) - } - osv.MajorVersion = uint8(osv.Version & 0xFF) - osv.MinorVersion = uint8(osv.Version >> 8 & 0xFF) - osv.Build = uint16(osv.Version >> 16) - }) - return osv -} - -// Build gets the build-number on Windows -// The calling application must be manifested to get the correct version information. -func Build() uint16 { - return Get().Build -} - -func (osv OSVersion) ToString() string { - return fmt.Sprintf("%d.%d.%d", osv.MajorVersion, osv.MinorVersion, osv.Build) -} diff --git a/vendor/github.com/Microsoft/hcsshim/osversion/platform_compat_windows.go b/vendor/github.com/Microsoft/hcsshim/osversion/platform_compat_windows.go deleted file mode 100644 index f8d411ad..00000000 --- a/vendor/github.com/Microsoft/hcsshim/osversion/platform_compat_windows.go +++ /dev/null @@ -1,35 +0,0 @@ -package osversion - -// List of stable ABI compliant ltsc releases -// Note: List must be sorted in ascending order -var compatLTSCReleases = []uint16{ - V21H2Server, -} - -// CheckHostAndContainerCompat checks if given host and container -// OS versions are compatible. -// It includes support for stable ABI compliant versions as well. -// Every release after WS 2022 will support the previous ltsc -// container image. Stable ABI is in preview mode for windows 11 client. -// Refer: https://learn.microsoft.com/en-us/virtualization/windowscontainers/deploy-containers/version-compatibility?tabs=windows-server-2022%2Cwindows-10#windows-server-host-os-compatibility -func CheckHostAndContainerCompat(host, ctr OSVersion) bool { - // check major minor versions of host and guest - if host.MajorVersion != ctr.MajorVersion || - host.MinorVersion != ctr.MinorVersion { - return false - } - - // If host is < WS 2022, exact version match is required - if host.Build < V21H2Server { - return host.Build == ctr.Build - } - - var supportedLtscRelease uint16 - for i := len(compatLTSCReleases) - 1; i >= 0; i-- { - if host.Build >= compatLTSCReleases[i] { - supportedLtscRelease = compatLTSCReleases[i] - break - } - } - return ctr.Build >= supportedLtscRelease && ctr.Build <= host.Build -} diff --git a/vendor/github.com/Microsoft/hcsshim/osversion/windowsbuilds.go b/vendor/github.com/Microsoft/hcsshim/osversion/windowsbuilds.go deleted file mode 100644 index b609fa7e..00000000 --- a/vendor/github.com/Microsoft/hcsshim/osversion/windowsbuilds.go +++ /dev/null @@ -1,58 +0,0 @@ -package osversion - -const ( - // RS1 (version 1607, codename "Redstone 1") corresponds to Windows Server - // 2016 (ltsc2016) and Windows 10 (Anniversary Update). - RS1 = 14393 - - // RS2 (version 1703, codename "Redstone 2") was a client-only update, and - // corresponds to Windows 10 (Creators Update). - RS2 = 15063 - - // RS3 (version 1709, codename "Redstone 3") corresponds to Windows Server - // 1709 (Semi-Annual Channel (SAC)), and Windows 10 (Fall Creators Update). - RS3 = 16299 - - // RS4 (version 1803, codename "Redstone 4") corresponds to Windows Server - // 1803 (Semi-Annual Channel (SAC)), and Windows 10 (April 2018 Update). - RS4 = 17134 - - // RS5 (version 1809, codename "Redstone 5") corresponds to Windows Server - // 2019 (ltsc2019), and Windows 10 (October 2018 Update). - RS5 = 17763 - - // V19H1 (version 1903) corresponds to Windows Server 1903 (semi-annual - // channel). - V19H1 = 18362 - - // V19H2 (version 1909) corresponds to Windows Server 1909 (semi-annual - // channel). - V19H2 = 18363 - - // V20H1 (version 2004) corresponds to Windows Server 2004 (semi-annual - // channel). - V20H1 = 19041 - - // V20H2 corresponds to Windows Server 20H2 (semi-annual channel). - V20H2 = 19042 - - // V21H1 corresponds to Windows Server 21H1 (semi-annual channel). - V21H1 = 19043 - - // V21H2Win10 corresponds to Windows 10 (November 2021 Update). - V21H2Win10 = 19044 - - // V21H2Server corresponds to Windows Server 2022 (ltsc2022). - V21H2Server = 20348 - // LTSC2022 (Windows Server 2022) is an alias for [V21H2Server] - LTSC2022 = V21H2Server - - // V21H2Win11 corresponds to Windows 11 (original release). - V21H2Win11 = 22000 - - // V22H2Win10 corresponds to Windows 10 (2022 Update). - V22H2Win10 = 19045 - - // V22H2Win11 corresponds to Windows 11 (2022 Update). - V22H2Win11 = 22621 -) diff --git a/vendor/github.com/Microsoft/hcsshim/process.go b/vendor/github.com/Microsoft/hcsshim/process.go deleted file mode 100644 index 3362c683..00000000 --- a/vendor/github.com/Microsoft/hcsshim/process.go +++ /dev/null @@ -1,98 +0,0 @@ -package hcsshim - -import ( - "context" - "io" - "sync" - "time" - - "github.com/Microsoft/hcsshim/internal/hcs" -) - -// ContainerError is an error encountered in HCS -type process struct { - p *hcs.Process - waitOnce sync.Once - waitCh chan struct{} - waitErr error -} - -// Pid returns the process ID of the process within the container. -func (process *process) Pid() int { - return process.p.Pid() -} - -// Kill signals the process to terminate but does not wait for it to finish terminating. -func (process *process) Kill() error { - found, err := process.p.Kill(context.Background()) - if err != nil { - return convertProcessError(err, process) - } - if !found { - return &ProcessError{Process: process, Err: ErrElementNotFound, Operation: "hcsshim::Process::Kill"} - } - return nil -} - -// Wait waits for the process to exit. -func (process *process) Wait() error { - return convertProcessError(process.p.Wait(), process) -} - -// WaitTimeout waits for the process to exit or the duration to elapse. It returns -// false if timeout occurs. -func (process *process) WaitTimeout(timeout time.Duration) error { - process.waitOnce.Do(func() { - process.waitCh = make(chan struct{}) - go func() { - process.waitErr = process.Wait() - close(process.waitCh) - }() - }) - t := time.NewTimer(timeout) - defer t.Stop() - select { - case <-t.C: - return &ProcessError{Process: process, Err: ErrTimeout, Operation: "hcsshim::Process::Wait"} - case <-process.waitCh: - return process.waitErr - } -} - -// ExitCode returns the exit code of the process. The process must have -// already terminated. -func (process *process) ExitCode() (int, error) { - code, err := process.p.ExitCode() - if err != nil { - err = convertProcessError(err, process) - } - return code, err -} - -// ResizeConsole resizes the console of the process. -func (process *process) ResizeConsole(width, height uint16) error { - return convertProcessError(process.p.ResizeConsole(context.Background(), width, height), process) -} - -// Stdio returns the stdin, stdout, and stderr pipes, respectively. Closing -// these pipes does not close the underlying pipes; it should be possible to -// call this multiple times to get multiple interfaces. -func (process *process) Stdio() (io.WriteCloser, io.ReadCloser, io.ReadCloser, error) { - stdin, stdout, stderr, err := process.p.StdioLegacy() - if err != nil { - err = convertProcessError(err, process) - } - return stdin, stdout, stderr, err -} - -// CloseStdin closes the write side of the stdin pipe so that the process is -// notified on the read side that there is no more data in stdin. -func (process *process) CloseStdin() error { - return convertProcessError(process.p.CloseStdin(context.Background()), process) -} - -// Close cleans up any state associated with the process but does not kill -// or wait on it. -func (process *process) Close() error { - return convertProcessError(process.p.Close(), process) -} diff --git a/vendor/github.com/Microsoft/hcsshim/zsyscall_windows.go b/vendor/github.com/Microsoft/hcsshim/zsyscall_windows.go deleted file mode 100644 index 8bed8485..00000000 --- a/vendor/github.com/Microsoft/hcsshim/zsyscall_windows.go +++ /dev/null @@ -1,54 +0,0 @@ -// Code generated mksyscall_windows.exe DO NOT EDIT - -package hcsshim - -import ( - "syscall" - "unsafe" - - "golang.org/x/sys/windows" -) - -var _ unsafe.Pointer - -// Do the interface allocations only once for common -// Errno values. -const ( - errnoERROR_IO_PENDING = 997 -) - -var ( - errERROR_IO_PENDING error = syscall.Errno(errnoERROR_IO_PENDING) -) - -// errnoErr returns common boxed Errno values, to prevent -// allocations at runtime. -func errnoErr(e syscall.Errno) error { - switch e { - case 0: - return nil - case errnoERROR_IO_PENDING: - return errERROR_IO_PENDING - } - // TODO: add more here, after collecting data on the common - // error values see on Windows. (perhaps when running - // all.bat?) - return e -} - -var ( - modiphlpapi = windows.NewLazySystemDLL("iphlpapi.dll") - - procSetCurrentThreadCompartmentId = modiphlpapi.NewProc("SetCurrentThreadCompartmentId") -) - -func SetCurrentThreadCompartmentId(compartmentId uint32) (hr error) { - r0, _, _ := syscall.Syscall(procSetCurrentThreadCompartmentId.Addr(), 1, uintptr(compartmentId), 0, 0) - if int32(r0) < 0 { - if r0&0x1fff0000 == 0x00070000 { - r0 &= 0xffff - } - hr = syscall.Errno(r0) - } - return -} diff --git a/vendor/github.com/containerd/cgroups/stats/v1/doc.go b/vendor/github.com/containerd/cgroups/stats/v1/doc.go deleted file mode 100644 index 23f3cdd4..00000000 --- a/vendor/github.com/containerd/cgroups/stats/v1/doc.go +++ /dev/null @@ -1,17 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package v1 diff --git a/vendor/github.com/containerd/cgroups/stats/v1/metrics.pb.go b/vendor/github.com/containerd/cgroups/stats/v1/metrics.pb.go deleted file mode 100644 index 6d2d4177..00000000 --- a/vendor/github.com/containerd/cgroups/stats/v1/metrics.pb.go +++ /dev/null @@ -1,6125 +0,0 @@ -// Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: github.com/containerd/cgroups/stats/v1/metrics.proto - -package v1 - -import ( - fmt "fmt" - _ "github.com/gogo/protobuf/gogoproto" - proto "github.com/gogo/protobuf/proto" - io "io" - math "math" - math_bits "math/bits" - reflect "reflect" - strings "strings" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package - -type Metrics struct { - Hugetlb []*HugetlbStat `protobuf:"bytes,1,rep,name=hugetlb,proto3" json:"hugetlb,omitempty"` - Pids *PidsStat `protobuf:"bytes,2,opt,name=pids,proto3" json:"pids,omitempty"` - CPU *CPUStat `protobuf:"bytes,3,opt,name=cpu,proto3" json:"cpu,omitempty"` - Memory *MemoryStat `protobuf:"bytes,4,opt,name=memory,proto3" json:"memory,omitempty"` - Blkio *BlkIOStat `protobuf:"bytes,5,opt,name=blkio,proto3" json:"blkio,omitempty"` - Rdma *RdmaStat `protobuf:"bytes,6,opt,name=rdma,proto3" json:"rdma,omitempty"` - Network []*NetworkStat `protobuf:"bytes,7,rep,name=network,proto3" json:"network,omitempty"` - CgroupStats *CgroupStats `protobuf:"bytes,8,opt,name=cgroup_stats,json=cgroupStats,proto3" json:"cgroup_stats,omitempty"` - MemoryOomControl *MemoryOomControl `protobuf:"bytes,9,opt,name=memory_oom_control,json=memoryOomControl,proto3" json:"memory_oom_control,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Metrics) Reset() { *m = Metrics{} } -func (*Metrics) ProtoMessage() {} -func (*Metrics) Descriptor() ([]byte, []int) { - return fileDescriptor_a17b2d87c332bfaa, []int{0} -} -func (m *Metrics) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *Metrics) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_Metrics.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *Metrics) XXX_Merge(src proto.Message) { - xxx_messageInfo_Metrics.Merge(m, src) -} -func (m *Metrics) XXX_Size() int { - return m.Size() -} -func (m *Metrics) XXX_DiscardUnknown() { - xxx_messageInfo_Metrics.DiscardUnknown(m) -} - -var xxx_messageInfo_Metrics proto.InternalMessageInfo - -type HugetlbStat struct { - Usage uint64 `protobuf:"varint,1,opt,name=usage,proto3" json:"usage,omitempty"` - Max uint64 `protobuf:"varint,2,opt,name=max,proto3" json:"max,omitempty"` - Failcnt uint64 `protobuf:"varint,3,opt,name=failcnt,proto3" json:"failcnt,omitempty"` - Pagesize string `protobuf:"bytes,4,opt,name=pagesize,proto3" json:"pagesize,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *HugetlbStat) Reset() { *m = HugetlbStat{} } -func (*HugetlbStat) ProtoMessage() {} -func (*HugetlbStat) Descriptor() ([]byte, []int) { - return fileDescriptor_a17b2d87c332bfaa, []int{1} -} -func (m *HugetlbStat) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *HugetlbStat) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_HugetlbStat.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *HugetlbStat) XXX_Merge(src proto.Message) { - xxx_messageInfo_HugetlbStat.Merge(m, src) -} -func (m *HugetlbStat) XXX_Size() int { - return m.Size() -} -func (m *HugetlbStat) XXX_DiscardUnknown() { - xxx_messageInfo_HugetlbStat.DiscardUnknown(m) -} - -var xxx_messageInfo_HugetlbStat proto.InternalMessageInfo - -type PidsStat struct { - Current uint64 `protobuf:"varint,1,opt,name=current,proto3" json:"current,omitempty"` - Limit uint64 `protobuf:"varint,2,opt,name=limit,proto3" json:"limit,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *PidsStat) Reset() { *m = PidsStat{} } -func (*PidsStat) ProtoMessage() {} -func (*PidsStat) Descriptor() ([]byte, []int) { - return fileDescriptor_a17b2d87c332bfaa, []int{2} -} -func (m *PidsStat) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *PidsStat) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_PidsStat.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *PidsStat) XXX_Merge(src proto.Message) { - xxx_messageInfo_PidsStat.Merge(m, src) -} -func (m *PidsStat) XXX_Size() int { - return m.Size() -} -func (m *PidsStat) XXX_DiscardUnknown() { - xxx_messageInfo_PidsStat.DiscardUnknown(m) -} - -var xxx_messageInfo_PidsStat proto.InternalMessageInfo - -type CPUStat struct { - Usage *CPUUsage `protobuf:"bytes,1,opt,name=usage,proto3" json:"usage,omitempty"` - Throttling *Throttle `protobuf:"bytes,2,opt,name=throttling,proto3" json:"throttling,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *CPUStat) Reset() { *m = CPUStat{} } -func (*CPUStat) ProtoMessage() {} -func (*CPUStat) Descriptor() ([]byte, []int) { - return fileDescriptor_a17b2d87c332bfaa, []int{3} -} -func (m *CPUStat) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *CPUStat) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_CPUStat.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *CPUStat) XXX_Merge(src proto.Message) { - xxx_messageInfo_CPUStat.Merge(m, src) -} -func (m *CPUStat) XXX_Size() int { - return m.Size() -} -func (m *CPUStat) XXX_DiscardUnknown() { - xxx_messageInfo_CPUStat.DiscardUnknown(m) -} - -var xxx_messageInfo_CPUStat proto.InternalMessageInfo - -type CPUUsage struct { - // values in nanoseconds - Total uint64 `protobuf:"varint,1,opt,name=total,proto3" json:"total,omitempty"` - Kernel uint64 `protobuf:"varint,2,opt,name=kernel,proto3" json:"kernel,omitempty"` - User uint64 `protobuf:"varint,3,opt,name=user,proto3" json:"user,omitempty"` - PerCPU []uint64 `protobuf:"varint,4,rep,packed,name=per_cpu,json=perCpu,proto3" json:"per_cpu,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *CPUUsage) Reset() { *m = CPUUsage{} } -func (*CPUUsage) ProtoMessage() {} -func (*CPUUsage) Descriptor() ([]byte, []int) { - return fileDescriptor_a17b2d87c332bfaa, []int{4} -} -func (m *CPUUsage) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *CPUUsage) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_CPUUsage.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *CPUUsage) XXX_Merge(src proto.Message) { - xxx_messageInfo_CPUUsage.Merge(m, src) -} -func (m *CPUUsage) XXX_Size() int { - return m.Size() -} -func (m *CPUUsage) XXX_DiscardUnknown() { - xxx_messageInfo_CPUUsage.DiscardUnknown(m) -} - -var xxx_messageInfo_CPUUsage proto.InternalMessageInfo - -type Throttle struct { - Periods uint64 `protobuf:"varint,1,opt,name=periods,proto3" json:"periods,omitempty"` - ThrottledPeriods uint64 `protobuf:"varint,2,opt,name=throttled_periods,json=throttledPeriods,proto3" json:"throttled_periods,omitempty"` - ThrottledTime uint64 `protobuf:"varint,3,opt,name=throttled_time,json=throttledTime,proto3" json:"throttled_time,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Throttle) Reset() { *m = Throttle{} } -func (*Throttle) ProtoMessage() {} -func (*Throttle) Descriptor() ([]byte, []int) { - return fileDescriptor_a17b2d87c332bfaa, []int{5} -} -func (m *Throttle) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *Throttle) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_Throttle.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *Throttle) XXX_Merge(src proto.Message) { - xxx_messageInfo_Throttle.Merge(m, src) -} -func (m *Throttle) XXX_Size() int { - return m.Size() -} -func (m *Throttle) XXX_DiscardUnknown() { - xxx_messageInfo_Throttle.DiscardUnknown(m) -} - -var xxx_messageInfo_Throttle proto.InternalMessageInfo - -type MemoryStat struct { - Cache uint64 `protobuf:"varint,1,opt,name=cache,proto3" json:"cache,omitempty"` - RSS uint64 `protobuf:"varint,2,opt,name=rss,proto3" json:"rss,omitempty"` - RSSHuge uint64 `protobuf:"varint,3,opt,name=rss_huge,json=rssHuge,proto3" json:"rss_huge,omitempty"` - MappedFile uint64 `protobuf:"varint,4,opt,name=mapped_file,json=mappedFile,proto3" json:"mapped_file,omitempty"` - Dirty uint64 `protobuf:"varint,5,opt,name=dirty,proto3" json:"dirty,omitempty"` - Writeback uint64 `protobuf:"varint,6,opt,name=writeback,proto3" json:"writeback,omitempty"` - PgPgIn uint64 `protobuf:"varint,7,opt,name=pg_pg_in,json=pgPgIn,proto3" json:"pg_pg_in,omitempty"` - PgPgOut uint64 `protobuf:"varint,8,opt,name=pg_pg_out,json=pgPgOut,proto3" json:"pg_pg_out,omitempty"` - PgFault uint64 `protobuf:"varint,9,opt,name=pg_fault,json=pgFault,proto3" json:"pg_fault,omitempty"` - PgMajFault uint64 `protobuf:"varint,10,opt,name=pg_maj_fault,json=pgMajFault,proto3" json:"pg_maj_fault,omitempty"` - InactiveAnon uint64 `protobuf:"varint,11,opt,name=inactive_anon,json=inactiveAnon,proto3" json:"inactive_anon,omitempty"` - ActiveAnon uint64 `protobuf:"varint,12,opt,name=active_anon,json=activeAnon,proto3" json:"active_anon,omitempty"` - InactiveFile uint64 `protobuf:"varint,13,opt,name=inactive_file,json=inactiveFile,proto3" json:"inactive_file,omitempty"` - ActiveFile uint64 `protobuf:"varint,14,opt,name=active_file,json=activeFile,proto3" json:"active_file,omitempty"` - Unevictable uint64 `protobuf:"varint,15,opt,name=unevictable,proto3" json:"unevictable,omitempty"` - HierarchicalMemoryLimit uint64 `protobuf:"varint,16,opt,name=hierarchical_memory_limit,json=hierarchicalMemoryLimit,proto3" json:"hierarchical_memory_limit,omitempty"` - HierarchicalSwapLimit uint64 `protobuf:"varint,17,opt,name=hierarchical_swap_limit,json=hierarchicalSwapLimit,proto3" json:"hierarchical_swap_limit,omitempty"` - TotalCache uint64 `protobuf:"varint,18,opt,name=total_cache,json=totalCache,proto3" json:"total_cache,omitempty"` - TotalRSS uint64 `protobuf:"varint,19,opt,name=total_rss,json=totalRss,proto3" json:"total_rss,omitempty"` - TotalRSSHuge uint64 `protobuf:"varint,20,opt,name=total_rss_huge,json=totalRssHuge,proto3" json:"total_rss_huge,omitempty"` - TotalMappedFile uint64 `protobuf:"varint,21,opt,name=total_mapped_file,json=totalMappedFile,proto3" json:"total_mapped_file,omitempty"` - TotalDirty uint64 `protobuf:"varint,22,opt,name=total_dirty,json=totalDirty,proto3" json:"total_dirty,omitempty"` - TotalWriteback uint64 `protobuf:"varint,23,opt,name=total_writeback,json=totalWriteback,proto3" json:"total_writeback,omitempty"` - TotalPgPgIn uint64 `protobuf:"varint,24,opt,name=total_pg_pg_in,json=totalPgPgIn,proto3" json:"total_pg_pg_in,omitempty"` - TotalPgPgOut uint64 `protobuf:"varint,25,opt,name=total_pg_pg_out,json=totalPgPgOut,proto3" json:"total_pg_pg_out,omitempty"` - TotalPgFault uint64 `protobuf:"varint,26,opt,name=total_pg_fault,json=totalPgFault,proto3" json:"total_pg_fault,omitempty"` - TotalPgMajFault uint64 `protobuf:"varint,27,opt,name=total_pg_maj_fault,json=totalPgMajFault,proto3" json:"total_pg_maj_fault,omitempty"` - TotalInactiveAnon uint64 `protobuf:"varint,28,opt,name=total_inactive_anon,json=totalInactiveAnon,proto3" json:"total_inactive_anon,omitempty"` - TotalActiveAnon uint64 `protobuf:"varint,29,opt,name=total_active_anon,json=totalActiveAnon,proto3" json:"total_active_anon,omitempty"` - TotalInactiveFile uint64 `protobuf:"varint,30,opt,name=total_inactive_file,json=totalInactiveFile,proto3" json:"total_inactive_file,omitempty"` - TotalActiveFile uint64 `protobuf:"varint,31,opt,name=total_active_file,json=totalActiveFile,proto3" json:"total_active_file,omitempty"` - TotalUnevictable uint64 `protobuf:"varint,32,opt,name=total_unevictable,json=totalUnevictable,proto3" json:"total_unevictable,omitempty"` - Usage *MemoryEntry `protobuf:"bytes,33,opt,name=usage,proto3" json:"usage,omitempty"` - Swap *MemoryEntry `protobuf:"bytes,34,opt,name=swap,proto3" json:"swap,omitempty"` - Kernel *MemoryEntry `protobuf:"bytes,35,opt,name=kernel,proto3" json:"kernel,omitempty"` - KernelTCP *MemoryEntry `protobuf:"bytes,36,opt,name=kernel_tcp,json=kernelTcp,proto3" json:"kernel_tcp,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *MemoryStat) Reset() { *m = MemoryStat{} } -func (*MemoryStat) ProtoMessage() {} -func (*MemoryStat) Descriptor() ([]byte, []int) { - return fileDescriptor_a17b2d87c332bfaa, []int{6} -} -func (m *MemoryStat) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *MemoryStat) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_MemoryStat.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *MemoryStat) XXX_Merge(src proto.Message) { - xxx_messageInfo_MemoryStat.Merge(m, src) -} -func (m *MemoryStat) XXX_Size() int { - return m.Size() -} -func (m *MemoryStat) XXX_DiscardUnknown() { - xxx_messageInfo_MemoryStat.DiscardUnknown(m) -} - -var xxx_messageInfo_MemoryStat proto.InternalMessageInfo - -type MemoryEntry struct { - Limit uint64 `protobuf:"varint,1,opt,name=limit,proto3" json:"limit,omitempty"` - Usage uint64 `protobuf:"varint,2,opt,name=usage,proto3" json:"usage,omitempty"` - Max uint64 `protobuf:"varint,3,opt,name=max,proto3" json:"max,omitempty"` - Failcnt uint64 `protobuf:"varint,4,opt,name=failcnt,proto3" json:"failcnt,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *MemoryEntry) Reset() { *m = MemoryEntry{} } -func (*MemoryEntry) ProtoMessage() {} -func (*MemoryEntry) Descriptor() ([]byte, []int) { - return fileDescriptor_a17b2d87c332bfaa, []int{7} -} -func (m *MemoryEntry) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *MemoryEntry) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_MemoryEntry.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *MemoryEntry) XXX_Merge(src proto.Message) { - xxx_messageInfo_MemoryEntry.Merge(m, src) -} -func (m *MemoryEntry) XXX_Size() int { - return m.Size() -} -func (m *MemoryEntry) XXX_DiscardUnknown() { - xxx_messageInfo_MemoryEntry.DiscardUnknown(m) -} - -var xxx_messageInfo_MemoryEntry proto.InternalMessageInfo - -type MemoryOomControl struct { - OomKillDisable uint64 `protobuf:"varint,1,opt,name=oom_kill_disable,json=oomKillDisable,proto3" json:"oom_kill_disable,omitempty"` - UnderOom uint64 `protobuf:"varint,2,opt,name=under_oom,json=underOom,proto3" json:"under_oom,omitempty"` - OomKill uint64 `protobuf:"varint,3,opt,name=oom_kill,json=oomKill,proto3" json:"oom_kill,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *MemoryOomControl) Reset() { *m = MemoryOomControl{} } -func (*MemoryOomControl) ProtoMessage() {} -func (*MemoryOomControl) Descriptor() ([]byte, []int) { - return fileDescriptor_a17b2d87c332bfaa, []int{8} -} -func (m *MemoryOomControl) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *MemoryOomControl) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_MemoryOomControl.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *MemoryOomControl) XXX_Merge(src proto.Message) { - xxx_messageInfo_MemoryOomControl.Merge(m, src) -} -func (m *MemoryOomControl) XXX_Size() int { - return m.Size() -} -func (m *MemoryOomControl) XXX_DiscardUnknown() { - xxx_messageInfo_MemoryOomControl.DiscardUnknown(m) -} - -var xxx_messageInfo_MemoryOomControl proto.InternalMessageInfo - -type BlkIOStat struct { - IoServiceBytesRecursive []*BlkIOEntry `protobuf:"bytes,1,rep,name=io_service_bytes_recursive,json=ioServiceBytesRecursive,proto3" json:"io_service_bytes_recursive,omitempty"` - IoServicedRecursive []*BlkIOEntry `protobuf:"bytes,2,rep,name=io_serviced_recursive,json=ioServicedRecursive,proto3" json:"io_serviced_recursive,omitempty"` - IoQueuedRecursive []*BlkIOEntry `protobuf:"bytes,3,rep,name=io_queued_recursive,json=ioQueuedRecursive,proto3" json:"io_queued_recursive,omitempty"` - IoServiceTimeRecursive []*BlkIOEntry `protobuf:"bytes,4,rep,name=io_service_time_recursive,json=ioServiceTimeRecursive,proto3" json:"io_service_time_recursive,omitempty"` - IoWaitTimeRecursive []*BlkIOEntry `protobuf:"bytes,5,rep,name=io_wait_time_recursive,json=ioWaitTimeRecursive,proto3" json:"io_wait_time_recursive,omitempty"` - IoMergedRecursive []*BlkIOEntry `protobuf:"bytes,6,rep,name=io_merged_recursive,json=ioMergedRecursive,proto3" json:"io_merged_recursive,omitempty"` - IoTimeRecursive []*BlkIOEntry `protobuf:"bytes,7,rep,name=io_time_recursive,json=ioTimeRecursive,proto3" json:"io_time_recursive,omitempty"` - SectorsRecursive []*BlkIOEntry `protobuf:"bytes,8,rep,name=sectors_recursive,json=sectorsRecursive,proto3" json:"sectors_recursive,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *BlkIOStat) Reset() { *m = BlkIOStat{} } -func (*BlkIOStat) ProtoMessage() {} -func (*BlkIOStat) Descriptor() ([]byte, []int) { - return fileDescriptor_a17b2d87c332bfaa, []int{9} -} -func (m *BlkIOStat) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *BlkIOStat) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_BlkIOStat.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *BlkIOStat) XXX_Merge(src proto.Message) { - xxx_messageInfo_BlkIOStat.Merge(m, src) -} -func (m *BlkIOStat) XXX_Size() int { - return m.Size() -} -func (m *BlkIOStat) XXX_DiscardUnknown() { - xxx_messageInfo_BlkIOStat.DiscardUnknown(m) -} - -var xxx_messageInfo_BlkIOStat proto.InternalMessageInfo - -type BlkIOEntry struct { - Op string `protobuf:"bytes,1,opt,name=op,proto3" json:"op,omitempty"` - Device string `protobuf:"bytes,2,opt,name=device,proto3" json:"device,omitempty"` - Major uint64 `protobuf:"varint,3,opt,name=major,proto3" json:"major,omitempty"` - Minor uint64 `protobuf:"varint,4,opt,name=minor,proto3" json:"minor,omitempty"` - Value uint64 `protobuf:"varint,5,opt,name=value,proto3" json:"value,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *BlkIOEntry) Reset() { *m = BlkIOEntry{} } -func (*BlkIOEntry) ProtoMessage() {} -func (*BlkIOEntry) Descriptor() ([]byte, []int) { - return fileDescriptor_a17b2d87c332bfaa, []int{10} -} -func (m *BlkIOEntry) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *BlkIOEntry) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_BlkIOEntry.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *BlkIOEntry) XXX_Merge(src proto.Message) { - xxx_messageInfo_BlkIOEntry.Merge(m, src) -} -func (m *BlkIOEntry) XXX_Size() int { - return m.Size() -} -func (m *BlkIOEntry) XXX_DiscardUnknown() { - xxx_messageInfo_BlkIOEntry.DiscardUnknown(m) -} - -var xxx_messageInfo_BlkIOEntry proto.InternalMessageInfo - -type RdmaStat struct { - Current []*RdmaEntry `protobuf:"bytes,1,rep,name=current,proto3" json:"current,omitempty"` - Limit []*RdmaEntry `protobuf:"bytes,2,rep,name=limit,proto3" json:"limit,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *RdmaStat) Reset() { *m = RdmaStat{} } -func (*RdmaStat) ProtoMessage() {} -func (*RdmaStat) Descriptor() ([]byte, []int) { - return fileDescriptor_a17b2d87c332bfaa, []int{11} -} -func (m *RdmaStat) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *RdmaStat) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_RdmaStat.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *RdmaStat) XXX_Merge(src proto.Message) { - xxx_messageInfo_RdmaStat.Merge(m, src) -} -func (m *RdmaStat) XXX_Size() int { - return m.Size() -} -func (m *RdmaStat) XXX_DiscardUnknown() { - xxx_messageInfo_RdmaStat.DiscardUnknown(m) -} - -var xxx_messageInfo_RdmaStat proto.InternalMessageInfo - -type RdmaEntry struct { - Device string `protobuf:"bytes,1,opt,name=device,proto3" json:"device,omitempty"` - HcaHandles uint32 `protobuf:"varint,2,opt,name=hca_handles,json=hcaHandles,proto3" json:"hca_handles,omitempty"` - HcaObjects uint32 `protobuf:"varint,3,opt,name=hca_objects,json=hcaObjects,proto3" json:"hca_objects,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *RdmaEntry) Reset() { *m = RdmaEntry{} } -func (*RdmaEntry) ProtoMessage() {} -func (*RdmaEntry) Descriptor() ([]byte, []int) { - return fileDescriptor_a17b2d87c332bfaa, []int{12} -} -func (m *RdmaEntry) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *RdmaEntry) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_RdmaEntry.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *RdmaEntry) XXX_Merge(src proto.Message) { - xxx_messageInfo_RdmaEntry.Merge(m, src) -} -func (m *RdmaEntry) XXX_Size() int { - return m.Size() -} -func (m *RdmaEntry) XXX_DiscardUnknown() { - xxx_messageInfo_RdmaEntry.DiscardUnknown(m) -} - -var xxx_messageInfo_RdmaEntry proto.InternalMessageInfo - -type NetworkStat struct { - Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` - RxBytes uint64 `protobuf:"varint,2,opt,name=rx_bytes,json=rxBytes,proto3" json:"rx_bytes,omitempty"` - RxPackets uint64 `protobuf:"varint,3,opt,name=rx_packets,json=rxPackets,proto3" json:"rx_packets,omitempty"` - RxErrors uint64 `protobuf:"varint,4,opt,name=rx_errors,json=rxErrors,proto3" json:"rx_errors,omitempty"` - RxDropped uint64 `protobuf:"varint,5,opt,name=rx_dropped,json=rxDropped,proto3" json:"rx_dropped,omitempty"` - TxBytes uint64 `protobuf:"varint,6,opt,name=tx_bytes,json=txBytes,proto3" json:"tx_bytes,omitempty"` - TxPackets uint64 `protobuf:"varint,7,opt,name=tx_packets,json=txPackets,proto3" json:"tx_packets,omitempty"` - TxErrors uint64 `protobuf:"varint,8,opt,name=tx_errors,json=txErrors,proto3" json:"tx_errors,omitempty"` - TxDropped uint64 `protobuf:"varint,9,opt,name=tx_dropped,json=txDropped,proto3" json:"tx_dropped,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *NetworkStat) Reset() { *m = NetworkStat{} } -func (*NetworkStat) ProtoMessage() {} -func (*NetworkStat) Descriptor() ([]byte, []int) { - return fileDescriptor_a17b2d87c332bfaa, []int{13} -} -func (m *NetworkStat) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *NetworkStat) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_NetworkStat.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *NetworkStat) XXX_Merge(src proto.Message) { - xxx_messageInfo_NetworkStat.Merge(m, src) -} -func (m *NetworkStat) XXX_Size() int { - return m.Size() -} -func (m *NetworkStat) XXX_DiscardUnknown() { - xxx_messageInfo_NetworkStat.DiscardUnknown(m) -} - -var xxx_messageInfo_NetworkStat proto.InternalMessageInfo - -// CgroupStats exports per-cgroup statistics. -type CgroupStats struct { - // number of tasks sleeping - NrSleeping uint64 `protobuf:"varint,1,opt,name=nr_sleeping,json=nrSleeping,proto3" json:"nr_sleeping,omitempty"` - // number of tasks running - NrRunning uint64 `protobuf:"varint,2,opt,name=nr_running,json=nrRunning,proto3" json:"nr_running,omitempty"` - // number of tasks in stopped state - NrStopped uint64 `protobuf:"varint,3,opt,name=nr_stopped,json=nrStopped,proto3" json:"nr_stopped,omitempty"` - // number of tasks in uninterruptible state - NrUninterruptible uint64 `protobuf:"varint,4,opt,name=nr_uninterruptible,json=nrUninterruptible,proto3" json:"nr_uninterruptible,omitempty"` - // number of tasks waiting on IO - NrIoWait uint64 `protobuf:"varint,5,opt,name=nr_io_wait,json=nrIoWait,proto3" json:"nr_io_wait,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *CgroupStats) Reset() { *m = CgroupStats{} } -func (*CgroupStats) ProtoMessage() {} -func (*CgroupStats) Descriptor() ([]byte, []int) { - return fileDescriptor_a17b2d87c332bfaa, []int{14} -} -func (m *CgroupStats) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *CgroupStats) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_CgroupStats.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *CgroupStats) XXX_Merge(src proto.Message) { - xxx_messageInfo_CgroupStats.Merge(m, src) -} -func (m *CgroupStats) XXX_Size() int { - return m.Size() -} -func (m *CgroupStats) XXX_DiscardUnknown() { - xxx_messageInfo_CgroupStats.DiscardUnknown(m) -} - -var xxx_messageInfo_CgroupStats proto.InternalMessageInfo - -func init() { - proto.RegisterType((*Metrics)(nil), "io.containerd.cgroups.v1.Metrics") - proto.RegisterType((*HugetlbStat)(nil), "io.containerd.cgroups.v1.HugetlbStat") - proto.RegisterType((*PidsStat)(nil), "io.containerd.cgroups.v1.PidsStat") - proto.RegisterType((*CPUStat)(nil), "io.containerd.cgroups.v1.CPUStat") - proto.RegisterType((*CPUUsage)(nil), "io.containerd.cgroups.v1.CPUUsage") - proto.RegisterType((*Throttle)(nil), "io.containerd.cgroups.v1.Throttle") - proto.RegisterType((*MemoryStat)(nil), "io.containerd.cgroups.v1.MemoryStat") - proto.RegisterType((*MemoryEntry)(nil), "io.containerd.cgroups.v1.MemoryEntry") - proto.RegisterType((*MemoryOomControl)(nil), "io.containerd.cgroups.v1.MemoryOomControl") - proto.RegisterType((*BlkIOStat)(nil), "io.containerd.cgroups.v1.BlkIOStat") - proto.RegisterType((*BlkIOEntry)(nil), "io.containerd.cgroups.v1.BlkIOEntry") - proto.RegisterType((*RdmaStat)(nil), "io.containerd.cgroups.v1.RdmaStat") - proto.RegisterType((*RdmaEntry)(nil), "io.containerd.cgroups.v1.RdmaEntry") - proto.RegisterType((*NetworkStat)(nil), "io.containerd.cgroups.v1.NetworkStat") - proto.RegisterType((*CgroupStats)(nil), "io.containerd.cgroups.v1.CgroupStats") -} - -func init() { - proto.RegisterFile("github.com/containerd/cgroups/stats/v1/metrics.proto", fileDescriptor_a17b2d87c332bfaa) -} - -var fileDescriptor_a17b2d87c332bfaa = []byte{ - // 1749 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x58, 0xcd, 0x72, 0xe3, 0xc6, - 0x11, 0x36, 0x45, 0x48, 0x24, 0x9a, 0x92, 0x56, 0x9a, 0xfd, 0x83, 0xe4, 0xb5, 0x28, 0x53, 0xbb, - 0x89, 0xe2, 0xad, 0x48, 0x65, 0x27, 0xb5, 0x95, 0x75, 0xec, 0x4a, 0x59, 0x5a, 0xbb, 0x76, 0xcb, - 0x51, 0x44, 0x83, 0x52, 0xd9, 0x39, 0xa1, 0x40, 0x70, 0x16, 0x9c, 0x15, 0x80, 0x81, 0x07, 0x03, - 0x89, 0xca, 0x29, 0x87, 0x54, 0xe5, 0x94, 0x07, 0xca, 0x1b, 0xf8, 0x98, 0x4b, 0x52, 0xc9, 0x45, - 0x15, 0xf3, 0x49, 0x52, 0x33, 0x3d, 0xf8, 0xa1, 0xbc, 0x5a, 0x85, 0x37, 0x76, 0xcf, 0xd7, 0x5f, - 0xf7, 0x34, 0xbe, 0x19, 0x34, 0x08, 0xbf, 0x0e, 0x99, 0x1c, 0xe7, 0xc3, 0xbd, 0x80, 0xc7, 0xfb, - 0x01, 0x4f, 0xa4, 0xcf, 0x12, 0x2a, 0x46, 0xfb, 0x41, 0x28, 0x78, 0x9e, 0x66, 0xfb, 0x99, 0xf4, - 0x65, 0xb6, 0x7f, 0xfe, 0xf1, 0x7e, 0x4c, 0xa5, 0x60, 0x41, 0xb6, 0x97, 0x0a, 0x2e, 0x39, 0x71, - 0x18, 0xdf, 0xab, 0xd0, 0x7b, 0x06, 0xbd, 0x77, 0xfe, 0xf1, 0xe6, 0xbd, 0x90, 0x87, 0x5c, 0x83, - 0xf6, 0xd5, 0x2f, 0xc4, 0xf7, 0xfe, 0x65, 0x41, 0xeb, 0x08, 0x19, 0xc8, 0xef, 0xa0, 0x35, 0xce, - 0x43, 0x2a, 0xa3, 0xa1, 0xd3, 0xd8, 0x6e, 0xee, 0x76, 0x3e, 0x79, 0xb2, 0x77, 0x13, 0xdb, 0xde, - 0x4b, 0x04, 0x0e, 0xa4, 0x2f, 0xdd, 0x22, 0x8a, 0x3c, 0x03, 0x2b, 0x65, 0xa3, 0xcc, 0x59, 0xd8, - 0x6e, 0xec, 0x76, 0x3e, 0xe9, 0xdd, 0x1c, 0xdd, 0x67, 0xa3, 0x4c, 0x87, 0x6a, 0x3c, 0xf9, 0x0c, - 0x9a, 0x41, 0x9a, 0x3b, 0x4d, 0x1d, 0xf6, 0xe1, 0xcd, 0x61, 0x87, 0xfd, 0x53, 0x15, 0x75, 0xd0, - 0x9a, 0x5e, 0x75, 0x9b, 0x87, 0xfd, 0x53, 0x57, 0x85, 0x91, 0xcf, 0x60, 0x29, 0xa6, 0x31, 0x17, - 0x97, 0x8e, 0xa5, 0x09, 0x1e, 0xdf, 0x4c, 0x70, 0xa4, 0x71, 0x3a, 0xb3, 0x89, 0x21, 0xcf, 0x61, - 0x71, 0x18, 0x9d, 0x31, 0xee, 0x2c, 0xea, 0xe0, 0x9d, 0x9b, 0x83, 0x0f, 0xa2, 0xb3, 0x57, 0xc7, - 0x3a, 0x16, 0x23, 0xd4, 0x76, 0xc5, 0x28, 0xf6, 0x9d, 0xa5, 0xdb, 0xb6, 0xeb, 0x8e, 0x62, 0x1f, - 0xb7, 0xab, 0xf0, 0xaa, 0xcf, 0x09, 0x95, 0x17, 0x5c, 0x9c, 0x39, 0xad, 0xdb, 0xfa, 0xfc, 0x07, - 0x04, 0x62, 0x9f, 0x4d, 0x14, 0x79, 0x09, 0xcb, 0x08, 0xf1, 0xb4, 0x0a, 0x9c, 0xb6, 0x2e, 0xe0, - 0x1d, 0x2c, 0x87, 0xfa, 0xa7, 0x22, 0xc9, 0xdc, 0x4e, 0x50, 0x19, 0xe4, 0x3b, 0x20, 0xd8, 0x07, - 0x8f, 0xf3, 0xd8, 0x53, 0xc1, 0x82, 0x47, 0x8e, 0xad, 0xf9, 0x3e, 0xba, 0xad, 0x8f, 0xc7, 0x3c, - 0x3e, 0xc4, 0x08, 0x77, 0x2d, 0xbe, 0xe6, 0xe9, 0x9d, 0x41, 0xa7, 0xa6, 0x11, 0x72, 0x0f, 0x16, - 0xf3, 0xcc, 0x0f, 0xa9, 0xd3, 0xd8, 0x6e, 0xec, 0x5a, 0x2e, 0x1a, 0x64, 0x0d, 0x9a, 0xb1, 0x3f, - 0xd1, 0x7a, 0xb1, 0x5c, 0xf5, 0x93, 0x38, 0xd0, 0x7a, 0xed, 0xb3, 0x28, 0x48, 0xa4, 0x96, 0x83, - 0xe5, 0x16, 0x26, 0xd9, 0x84, 0x76, 0xea, 0x87, 0x34, 0x63, 0x7f, 0xa2, 0xfa, 0x41, 0xdb, 0x6e, - 0x69, 0xf7, 0x3e, 0x85, 0x76, 0x21, 0x29, 0xc5, 0x10, 0xe4, 0x42, 0xd0, 0x44, 0x9a, 0x5c, 0x85, - 0xa9, 0x6a, 0x88, 0x58, 0xcc, 0xa4, 0xc9, 0x87, 0x46, 0xef, 0xaf, 0x0d, 0x68, 0x19, 0x61, 0x91, - 0xdf, 0xd4, 0xab, 0x7c, 0xe7, 0x23, 0x3d, 0xec, 0x9f, 0x9e, 0x2a, 0x64, 0xb1, 0x93, 0x03, 0x00, - 0x39, 0x16, 0x5c, 0xca, 0x88, 0x25, 0xe1, 0xed, 0x07, 0xe0, 0x04, 0xb1, 0xd4, 0xad, 0x45, 0xf5, - 0xbe, 0x87, 0x76, 0x41, 0xab, 0x6a, 0x95, 0x5c, 0xfa, 0x51, 0xd1, 0x2f, 0x6d, 0x90, 0x07, 0xb0, - 0x74, 0x46, 0x45, 0x42, 0x23, 0xb3, 0x05, 0x63, 0x11, 0x02, 0x56, 0x9e, 0x51, 0x61, 0x5a, 0xa6, - 0x7f, 0x93, 0x1d, 0x68, 0xa5, 0x54, 0x78, 0xea, 0x60, 0x59, 0xdb, 0xcd, 0x5d, 0xeb, 0x00, 0xa6, - 0x57, 0xdd, 0xa5, 0x3e, 0x15, 0xea, 0xe0, 0x2c, 0xa5, 0x54, 0x1c, 0xa6, 0x79, 0x6f, 0x02, 0xed, - 0xa2, 0x14, 0xd5, 0xb8, 0x94, 0x0a, 0xc6, 0x47, 0x59, 0xd1, 0x38, 0x63, 0x92, 0xa7, 0xb0, 0x6e, - 0xca, 0xa4, 0x23, 0xaf, 0xc0, 0x60, 0x05, 0x6b, 0xe5, 0x42, 0xdf, 0x80, 0x9f, 0xc0, 0x6a, 0x05, - 0x96, 0x2c, 0xa6, 0xa6, 0xaa, 0x95, 0xd2, 0x7b, 0xc2, 0x62, 0xda, 0xfb, 0x4f, 0x07, 0xa0, 0x3a, - 0x8e, 0x6a, 0xbf, 0x81, 0x1f, 0x8c, 0x4b, 0x7d, 0x68, 0x83, 0x6c, 0x40, 0x53, 0x64, 0x26, 0x15, - 0x9e, 0x7a, 0x77, 0x30, 0x70, 0x95, 0x8f, 0xfc, 0x0c, 0xda, 0x22, 0xcb, 0x3c, 0x75, 0xf5, 0x60, - 0x82, 0x83, 0xce, 0xf4, 0xaa, 0xdb, 0x72, 0x07, 0x03, 0x25, 0x3b, 0xb7, 0x25, 0xb2, 0x4c, 0xfd, - 0x20, 0x5d, 0xe8, 0xc4, 0x7e, 0x9a, 0xd2, 0x91, 0xf7, 0x9a, 0x45, 0xa8, 0x1c, 0xcb, 0x05, 0x74, - 0x7d, 0xc5, 0x22, 0xdd, 0xe9, 0x11, 0x13, 0xf2, 0x52, 0x5f, 0x00, 0x96, 0x8b, 0x06, 0x79, 0x04, - 0xf6, 0x85, 0x60, 0x92, 0x0e, 0xfd, 0xe0, 0x4c, 0x1f, 0x70, 0xcb, 0xad, 0x1c, 0xc4, 0x81, 0x76, - 0x1a, 0x7a, 0x69, 0xe8, 0xb1, 0xc4, 0x69, 0xe1, 0x93, 0x48, 0xc3, 0x7e, 0xf8, 0x2a, 0x21, 0x9b, - 0x60, 0xe3, 0x0a, 0xcf, 0xa5, 0x3e, 0x97, 0xaa, 0x8d, 0x61, 0x3f, 0x3c, 0xce, 0x25, 0xd9, 0xd0, - 0x51, 0xaf, 0xfd, 0x3c, 0x92, 0xfa, 0x88, 0xe9, 0xa5, 0xaf, 0x94, 0x49, 0xb6, 0x61, 0x39, 0x0d, - 0xbd, 0xd8, 0x7f, 0x63, 0x96, 0x01, 0xcb, 0x4c, 0xc3, 0x23, 0xff, 0x0d, 0x22, 0x76, 0x60, 0x85, - 0x25, 0x7e, 0x20, 0xd9, 0x39, 0xf5, 0xfc, 0x84, 0x27, 0x4e, 0x47, 0x43, 0x96, 0x0b, 0xe7, 0x17, - 0x09, 0x4f, 0xd4, 0x66, 0xeb, 0x90, 0x65, 0x64, 0xa9, 0x01, 0xea, 0x2c, 0xba, 0x1f, 0x2b, 0xb3, - 0x2c, 0xba, 0x23, 0x15, 0x8b, 0x86, 0xac, 0xd6, 0x59, 0x34, 0x60, 0x1b, 0x3a, 0x79, 0x42, 0xcf, - 0x59, 0x20, 0xfd, 0x61, 0x44, 0x9d, 0x3b, 0x1a, 0x50, 0x77, 0x91, 0x4f, 0x61, 0x63, 0xcc, 0xa8, - 0xf0, 0x45, 0x30, 0x66, 0x81, 0x1f, 0x79, 0xe6, 0x92, 0xc1, 0xe3, 0xb7, 0xa6, 0xf1, 0x0f, 0xeb, - 0x00, 0x54, 0xc2, 0xef, 0xd5, 0x32, 0x79, 0x06, 0x33, 0x4b, 0x5e, 0x76, 0xe1, 0xa7, 0x26, 0x72, - 0x5d, 0x47, 0xde, 0xaf, 0x2f, 0x0f, 0x2e, 0xfc, 0x14, 0xe3, 0xba, 0xd0, 0xd1, 0xa7, 0xc4, 0x43, - 0x21, 0x11, 0x2c, 0x5b, 0xbb, 0x0e, 0xb5, 0x9a, 0x7e, 0x01, 0x36, 0x02, 0x94, 0xa6, 0xee, 0x6a, - 0xcd, 0x2c, 0x4f, 0xaf, 0xba, 0xed, 0x13, 0xe5, 0x54, 0xc2, 0x6a, 0xeb, 0x65, 0x37, 0xcb, 0xc8, - 0x33, 0x58, 0x2d, 0xa1, 0xa8, 0xb1, 0x7b, 0x1a, 0xbf, 0x36, 0xbd, 0xea, 0x2e, 0x17, 0x78, 0x2d, - 0xb4, 0xe5, 0x22, 0x46, 0xab, 0xed, 0x23, 0x58, 0xc7, 0xb8, 0xba, 0xe6, 0xee, 0xeb, 0x4a, 0xee, - 0xe8, 0x85, 0xa3, 0x4a, 0x78, 0x65, 0xbd, 0x28, 0xbf, 0x07, 0xb5, 0x7a, 0x5f, 0x68, 0x0d, 0xfe, - 0x1c, 0x30, 0xc6, 0xab, 0x94, 0xf8, 0x50, 0x83, 0xb0, 0xb6, 0x6f, 0x4b, 0x39, 0xee, 0x14, 0xd5, - 0x96, 0xa2, 0x74, 0xf0, 0x91, 0x68, 0x6f, 0x1f, 0x95, 0xf9, 0xa4, 0x60, 0xab, 0xf4, 0xb9, 0x81, - 0x0f, 0xbf, 0x44, 0x29, 0x91, 0x3e, 0xae, 0x71, 0xa1, 0x16, 0x37, 0x67, 0x50, 0xa8, 0xc6, 0xa7, - 0x40, 0x4a, 0x54, 0xa5, 0xda, 0xf7, 0x6b, 0x1b, 0xed, 0x57, 0xd2, 0xdd, 0x83, 0xbb, 0x08, 0x9e, - 0x15, 0xf0, 0x23, 0x8d, 0xc6, 0x7e, 0xbd, 0xaa, 0xab, 0xb8, 0x6c, 0x62, 0x1d, 0xfd, 0x41, 0x8d, - 0xfb, 0x8b, 0x0a, 0xfb, 0x53, 0x6e, 0xdd, 0xf2, 0xad, 0xb7, 0x70, 0xeb, 0xa6, 0x5f, 0xe7, 0xd6, - 0xe8, 0xee, 0x4f, 0xb8, 0x35, 0xf6, 0x69, 0x81, 0xad, 0x8b, 0x7d, 0xdb, 0x5c, 0x7b, 0x6a, 0xe1, - 0xb4, 0xa6, 0xf8, 0xdf, 0x16, 0xaf, 0x8e, 0x0f, 0x6f, 0x7b, 0x19, 0xa3, 0xd6, 0xbf, 0x4c, 0xa4, - 0xb8, 0x2c, 0xde, 0x1e, 0xcf, 0xc1, 0x52, 0x2a, 0x77, 0x7a, 0xf3, 0xc4, 0xea, 0x10, 0xf2, 0x79, - 0xf9, 0x4a, 0xd8, 0x99, 0x27, 0xb8, 0x78, 0x73, 0x0c, 0x00, 0xf0, 0x97, 0x27, 0x83, 0xd4, 0x79, - 0x3c, 0x07, 0xc5, 0xc1, 0xca, 0xf4, 0xaa, 0x6b, 0x7f, 0xad, 0x83, 0x4f, 0x0e, 0xfb, 0xae, 0x8d, - 0x3c, 0x27, 0x41, 0xda, 0xa3, 0xd0, 0xa9, 0x01, 0xab, 0xf7, 0x6e, 0xa3, 0xf6, 0xde, 0xad, 0x26, - 0x82, 0x85, 0xb7, 0x4c, 0x04, 0xcd, 0xb7, 0x4e, 0x04, 0xd6, 0xcc, 0x44, 0xd0, 0x93, 0xb0, 0x76, - 0x7d, 0x10, 0x21, 0xbb, 0xb0, 0xa6, 0x26, 0x99, 0x33, 0x16, 0xa9, 0x73, 0x95, 0xe9, 0x47, 0x86, - 0x69, 0x57, 0x39, 0x8f, 0xbf, 0x66, 0x51, 0xf4, 0x02, 0xbd, 0xe4, 0x7d, 0xb0, 0xf3, 0x64, 0x44, - 0x85, 0x9a, 0x7c, 0x4c, 0x0d, 0x6d, 0xed, 0x38, 0xe6, 0xb1, 0xba, 0xaa, 0x0b, 0x9a, 0x62, 0x0e, - 0x31, 0xe1, 0xbd, 0x7f, 0x2e, 0x82, 0x5d, 0x8e, 0x82, 0xc4, 0x87, 0x4d, 0xc6, 0xbd, 0x8c, 0x8a, - 0x73, 0x16, 0x50, 0x6f, 0x78, 0x29, 0x69, 0xe6, 0x09, 0x1a, 0xe4, 0x22, 0x63, 0xe7, 0xd4, 0x8c, - 0xd1, 0x8f, 0x6f, 0x99, 0x29, 0xf1, 0x89, 0x3c, 0x64, 0x7c, 0x80, 0x34, 0x07, 0x8a, 0xc5, 0x2d, - 0x48, 0xc8, 0x77, 0x70, 0xbf, 0x4a, 0x31, 0xaa, 0xb1, 0x2f, 0xcc, 0xc1, 0x7e, 0xb7, 0x64, 0x1f, - 0x55, 0xcc, 0x27, 0x70, 0x97, 0x71, 0xef, 0xfb, 0x9c, 0xe6, 0x33, 0xbc, 0xcd, 0x39, 0x78, 0xd7, - 0x19, 0xff, 0x46, 0xc7, 0x57, 0xac, 0x1e, 0x6c, 0xd4, 0x5a, 0xa2, 0x26, 0x80, 0x1a, 0xb7, 0x35, - 0x07, 0xf7, 0x83, 0xb2, 0x66, 0x35, 0x31, 0x54, 0x09, 0xfe, 0x08, 0x0f, 0x18, 0xf7, 0x2e, 0x7c, - 0x26, 0xaf, 0xb3, 0x2f, 0xce, 0xd7, 0x91, 0x6f, 0x7d, 0x26, 0x67, 0xa9, 0xb1, 0x23, 0x31, 0x15, - 0xe1, 0x4c, 0x47, 0x96, 0xe6, 0xeb, 0xc8, 0x91, 0x8e, 0xaf, 0x58, 0xfb, 0xb0, 0xce, 0xf8, 0xf5, - 0x5a, 0x5b, 0x73, 0x70, 0xde, 0x61, 0x7c, 0xb6, 0xce, 0x6f, 0x60, 0x3d, 0xa3, 0x81, 0xe4, 0xa2, - 0xae, 0xb6, 0xf6, 0x1c, 0x8c, 0x6b, 0x26, 0xbc, 0xa4, 0xec, 0x9d, 0x03, 0x54, 0xeb, 0x64, 0x15, - 0x16, 0x78, 0xaa, 0x4f, 0x8e, 0xed, 0x2e, 0xf0, 0x54, 0x4d, 0x9e, 0x23, 0x75, 0xd9, 0xe1, 0x71, - 0xb5, 0x5d, 0x63, 0xa9, 0x53, 0x1c, 0xfb, 0x6f, 0x78, 0x31, 0x7a, 0xa2, 0xa1, 0xbd, 0x2c, 0xe1, - 0xc2, 0x9c, 0x58, 0x34, 0x94, 0xf7, 0xdc, 0x8f, 0x72, 0x5a, 0x4c, 0x5a, 0xda, 0xe8, 0xfd, 0xa5, - 0x01, 0xed, 0xe2, 0x03, 0x89, 0x7c, 0x5e, 0x1f, 0xde, 0x9b, 0xef, 0xfe, 0x1e, 0x53, 0x41, 0xb8, - 0x99, 0x72, 0xc2, 0x7f, 0x5e, 0x4d, 0xf8, 0xff, 0x77, 0xb0, 0xf9, 0x0c, 0xa0, 0x60, 0x97, 0xbe, - 0xda, 0x6e, 0x1b, 0x33, 0xbb, 0xed, 0x42, 0x67, 0x1c, 0xf8, 0xde, 0xd8, 0x4f, 0x46, 0x11, 0xc5, - 0xb9, 0x74, 0xc5, 0x85, 0x71, 0xe0, 0xbf, 0x44, 0x4f, 0x01, 0xe0, 0xc3, 0x37, 0x34, 0x90, 0x99, - 0x6e, 0x0a, 0x02, 0x8e, 0xd1, 0xd3, 0xfb, 0xdb, 0x02, 0x74, 0x6a, 0xdf, 0x74, 0x6a, 0x72, 0x4f, - 0xfc, 0xb8, 0xc8, 0xa3, 0x7f, 0xab, 0xcb, 0x47, 0x4c, 0xf0, 0x2e, 0x31, 0x17, 0x53, 0x4b, 0x4c, - 0xf4, 0xa5, 0x40, 0x3e, 0x00, 0x10, 0x13, 0x2f, 0xf5, 0x83, 0x33, 0x6a, 0xe8, 0x2d, 0xd7, 0x16, - 0x93, 0x3e, 0x3a, 0xd4, 0x9d, 0x26, 0x26, 0x1e, 0x15, 0x82, 0x8b, 0xcc, 0xf4, 0xbe, 0x2d, 0x26, - 0x5f, 0x6a, 0xdb, 0xc4, 0x8e, 0x04, 0x57, 0x13, 0x88, 0x79, 0x06, 0xb6, 0x98, 0xbc, 0x40, 0x87, - 0xca, 0x2a, 0x8b, 0xac, 0x38, 0xf0, 0xb6, 0x64, 0x95, 0x55, 0x56, 0x59, 0x71, 0xe0, 0xb5, 0x65, - 0x3d, 0xab, 0x2c, 0xb3, 0xe2, 0xcc, 0xdb, 0x96, 0xb5, 0xac, 0xb2, 0xca, 0x6a, 0x17, 0xb1, 0x26, - 0x6b, 0xef, 0xef, 0x0d, 0xe8, 0xd4, 0xbe, 0x4e, 0x55, 0x03, 0x13, 0xe1, 0x65, 0x11, 0xa5, 0xa9, - 0xfa, 0x90, 0xc2, 0xab, 0x1b, 0x12, 0x31, 0x30, 0x1e, 0xc5, 0x97, 0x08, 0x4f, 0xe4, 0x49, 0x52, - 0x7c, 0x68, 0x59, 0xae, 0x9d, 0x08, 0x17, 0x1d, 0x66, 0x39, 0x93, 0x98, 0xae, 0x59, 0x2c, 0x0f, - 0xd0, 0x41, 0x7e, 0x09, 0x24, 0x11, 0x5e, 0x9e, 0xb0, 0x44, 0x52, 0x21, 0xf2, 0x54, 0xb2, 0x61, - 0xf9, 0x51, 0xb0, 0x9e, 0x88, 0xd3, 0xd9, 0x05, 0xf2, 0x48, 0xb3, 0x99, 0xcb, 0xc6, 0xb4, 0xac, - 0x9d, 0x88, 0x57, 0xfa, 0xe6, 0x38, 0x70, 0x7e, 0xf8, 0x71, 0xeb, 0xbd, 0x7f, 0xff, 0xb8, 0xf5, - 0xde, 0x9f, 0xa7, 0x5b, 0x8d, 0x1f, 0xa6, 0x5b, 0x8d, 0x7f, 0x4c, 0xb7, 0x1a, 0xff, 0x9d, 0x6e, - 0x35, 0x86, 0x4b, 0xfa, 0xcf, 0x95, 0x5f, 0xfd, 0x2f, 0x00, 0x00, 0xff, 0xff, 0xc4, 0x4e, 0x24, - 0x22, 0xc4, 0x11, 0x00, 0x00, -} - -func (m *Metrics) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *Metrics) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Metrics) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if m.MemoryOomControl != nil { - { - size, err := m.MemoryOomControl.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintMetrics(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x4a - } - if m.CgroupStats != nil { - { - size, err := m.CgroupStats.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintMetrics(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x42 - } - if len(m.Network) > 0 { - for iNdEx := len(m.Network) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Network[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintMetrics(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x3a - } - } - if m.Rdma != nil { - { - size, err := m.Rdma.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintMetrics(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x32 - } - if m.Blkio != nil { - { - size, err := m.Blkio.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintMetrics(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x2a - } - if m.Memory != nil { - { - size, err := m.Memory.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintMetrics(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x22 - } - if m.CPU != nil { - { - size, err := m.CPU.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintMetrics(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a - } - if m.Pids != nil { - { - size, err := m.Pids.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintMetrics(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - if len(m.Hugetlb) > 0 { - for iNdEx := len(m.Hugetlb) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Hugetlb[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintMetrics(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - } - return len(dAtA) - i, nil -} - -func (m *HugetlbStat) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *HugetlbStat) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *HugetlbStat) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if len(m.Pagesize) > 0 { - i -= len(m.Pagesize) - copy(dAtA[i:], m.Pagesize) - i = encodeVarintMetrics(dAtA, i, uint64(len(m.Pagesize))) - i-- - dAtA[i] = 0x22 - } - if m.Failcnt != 0 { - i = encodeVarintMetrics(dAtA, i, uint64(m.Failcnt)) - i-- - dAtA[i] = 0x18 - } - if m.Max != 0 { - i = encodeVarintMetrics(dAtA, i, uint64(m.Max)) - i-- - dAtA[i] = 0x10 - } - if m.Usage != 0 { - i = encodeVarintMetrics(dAtA, i, uint64(m.Usage)) - i-- - dAtA[i] = 0x8 - } - return len(dAtA) - i, nil -} - -func (m *PidsStat) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *PidsStat) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *PidsStat) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if m.Limit != 0 { - i = encodeVarintMetrics(dAtA, i, uint64(m.Limit)) - i-- - dAtA[i] = 0x10 - } - if m.Current != 0 { - i = encodeVarintMetrics(dAtA, i, uint64(m.Current)) - i-- - dAtA[i] = 0x8 - } - return len(dAtA) - i, nil -} - -func (m *CPUStat) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *CPUStat) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *CPUStat) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if m.Throttling != nil { - { - size, err := m.Throttling.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintMetrics(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - if m.Usage != nil { - { - size, err := m.Usage.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintMetrics(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *CPUUsage) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *CPUUsage) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *CPUUsage) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if len(m.PerCPU) > 0 { - dAtA11 := make([]byte, len(m.PerCPU)*10) - var j10 int - for _, num := range m.PerCPU { - for num >= 1<<7 { - dAtA11[j10] = uint8(uint64(num)&0x7f | 0x80) - num >>= 7 - j10++ - } - dAtA11[j10] = uint8(num) - j10++ - } - i -= j10 - copy(dAtA[i:], dAtA11[:j10]) - i = encodeVarintMetrics(dAtA, i, uint64(j10)) - i-- - dAtA[i] = 0x22 - } - if m.User != 0 { - i = encodeVarintMetrics(dAtA, i, uint64(m.User)) - i-- - dAtA[i] = 0x18 - } - if m.Kernel != 0 { - i = encodeVarintMetrics(dAtA, i, uint64(m.Kernel)) - i-- - dAtA[i] = 0x10 - } - if m.Total != 0 { - i = encodeVarintMetrics(dAtA, i, uint64(m.Total)) - i-- - dAtA[i] = 0x8 - } - return len(dAtA) - i, nil -} - -func (m *Throttle) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *Throttle) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Throttle) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if m.ThrottledTime != 0 { - i = encodeVarintMetrics(dAtA, i, uint64(m.ThrottledTime)) - i-- - dAtA[i] = 0x18 - } - if m.ThrottledPeriods != 0 { - i = encodeVarintMetrics(dAtA, i, uint64(m.ThrottledPeriods)) - i-- - dAtA[i] = 0x10 - } - if m.Periods != 0 { - i = encodeVarintMetrics(dAtA, i, uint64(m.Periods)) - i-- - dAtA[i] = 0x8 - } - return len(dAtA) - i, nil -} - -func (m *MemoryStat) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *MemoryStat) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *MemoryStat) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if m.KernelTCP != nil { - { - size, err := m.KernelTCP.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintMetrics(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x2 - i-- - dAtA[i] = 0xa2 - } - if m.Kernel != nil { - { - size, err := m.Kernel.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintMetrics(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x2 - i-- - dAtA[i] = 0x9a - } - if m.Swap != nil { - { - size, err := m.Swap.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintMetrics(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x2 - i-- - dAtA[i] = 0x92 - } - if m.Usage != nil { - { - size, err := m.Usage.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintMetrics(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x2 - i-- - dAtA[i] = 0x8a - } - if m.TotalUnevictable != 0 { - i = encodeVarintMetrics(dAtA, i, uint64(m.TotalUnevictable)) - i-- - dAtA[i] = 0x2 - i-- - dAtA[i] = 0x80 - } - if m.TotalActiveFile != 0 { - i = encodeVarintMetrics(dAtA, i, uint64(m.TotalActiveFile)) - i-- - dAtA[i] = 0x1 - i-- - dAtA[i] = 0xf8 - } - if m.TotalInactiveFile != 0 { - i = encodeVarintMetrics(dAtA, i, uint64(m.TotalInactiveFile)) - i-- - dAtA[i] = 0x1 - i-- - dAtA[i] = 0xf0 - } - if m.TotalActiveAnon != 0 { - i = encodeVarintMetrics(dAtA, i, uint64(m.TotalActiveAnon)) - i-- - dAtA[i] = 0x1 - i-- - dAtA[i] = 0xe8 - } - if m.TotalInactiveAnon != 0 { - i = encodeVarintMetrics(dAtA, i, uint64(m.TotalInactiveAnon)) - i-- - dAtA[i] = 0x1 - i-- - dAtA[i] = 0xe0 - } - if m.TotalPgMajFault != 0 { - i = encodeVarintMetrics(dAtA, i, uint64(m.TotalPgMajFault)) - i-- - dAtA[i] = 0x1 - i-- - dAtA[i] = 0xd8 - } - if m.TotalPgFault != 0 { - i = encodeVarintMetrics(dAtA, i, uint64(m.TotalPgFault)) - i-- - dAtA[i] = 0x1 - i-- - dAtA[i] = 0xd0 - } - if m.TotalPgPgOut != 0 { - i = encodeVarintMetrics(dAtA, i, uint64(m.TotalPgPgOut)) - i-- - dAtA[i] = 0x1 - i-- - dAtA[i] = 0xc8 - } - if m.TotalPgPgIn != 0 { - i = encodeVarintMetrics(dAtA, i, uint64(m.TotalPgPgIn)) - i-- - dAtA[i] = 0x1 - i-- - dAtA[i] = 0xc0 - } - if m.TotalWriteback != 0 { - i = encodeVarintMetrics(dAtA, i, uint64(m.TotalWriteback)) - i-- - dAtA[i] = 0x1 - i-- - dAtA[i] = 0xb8 - } - if m.TotalDirty != 0 { - i = encodeVarintMetrics(dAtA, i, uint64(m.TotalDirty)) - i-- - dAtA[i] = 0x1 - i-- - dAtA[i] = 0xb0 - } - if m.TotalMappedFile != 0 { - i = encodeVarintMetrics(dAtA, i, uint64(m.TotalMappedFile)) - i-- - dAtA[i] = 0x1 - i-- - dAtA[i] = 0xa8 - } - if m.TotalRSSHuge != 0 { - i = encodeVarintMetrics(dAtA, i, uint64(m.TotalRSSHuge)) - i-- - dAtA[i] = 0x1 - i-- - dAtA[i] = 0xa0 - } - if m.TotalRSS != 0 { - i = encodeVarintMetrics(dAtA, i, uint64(m.TotalRSS)) - i-- - dAtA[i] = 0x1 - i-- - dAtA[i] = 0x98 - } - if m.TotalCache != 0 { - i = encodeVarintMetrics(dAtA, i, uint64(m.TotalCache)) - i-- - dAtA[i] = 0x1 - i-- - dAtA[i] = 0x90 - } - if m.HierarchicalSwapLimit != 0 { - i = encodeVarintMetrics(dAtA, i, uint64(m.HierarchicalSwapLimit)) - i-- - dAtA[i] = 0x1 - i-- - dAtA[i] = 0x88 - } - if m.HierarchicalMemoryLimit != 0 { - i = encodeVarintMetrics(dAtA, i, uint64(m.HierarchicalMemoryLimit)) - i-- - dAtA[i] = 0x1 - i-- - dAtA[i] = 0x80 - } - if m.Unevictable != 0 { - i = encodeVarintMetrics(dAtA, i, uint64(m.Unevictable)) - i-- - dAtA[i] = 0x78 - } - if m.ActiveFile != 0 { - i = encodeVarintMetrics(dAtA, i, uint64(m.ActiveFile)) - i-- - dAtA[i] = 0x70 - } - if m.InactiveFile != 0 { - i = encodeVarintMetrics(dAtA, i, uint64(m.InactiveFile)) - i-- - dAtA[i] = 0x68 - } - if m.ActiveAnon != 0 { - i = encodeVarintMetrics(dAtA, i, uint64(m.ActiveAnon)) - i-- - dAtA[i] = 0x60 - } - if m.InactiveAnon != 0 { - i = encodeVarintMetrics(dAtA, i, uint64(m.InactiveAnon)) - i-- - dAtA[i] = 0x58 - } - if m.PgMajFault != 0 { - i = encodeVarintMetrics(dAtA, i, uint64(m.PgMajFault)) - i-- - dAtA[i] = 0x50 - } - if m.PgFault != 0 { - i = encodeVarintMetrics(dAtA, i, uint64(m.PgFault)) - i-- - dAtA[i] = 0x48 - } - if m.PgPgOut != 0 { - i = encodeVarintMetrics(dAtA, i, uint64(m.PgPgOut)) - i-- - dAtA[i] = 0x40 - } - if m.PgPgIn != 0 { - i = encodeVarintMetrics(dAtA, i, uint64(m.PgPgIn)) - i-- - dAtA[i] = 0x38 - } - if m.Writeback != 0 { - i = encodeVarintMetrics(dAtA, i, uint64(m.Writeback)) - i-- - dAtA[i] = 0x30 - } - if m.Dirty != 0 { - i = encodeVarintMetrics(dAtA, i, uint64(m.Dirty)) - i-- - dAtA[i] = 0x28 - } - if m.MappedFile != 0 { - i = encodeVarintMetrics(dAtA, i, uint64(m.MappedFile)) - i-- - dAtA[i] = 0x20 - } - if m.RSSHuge != 0 { - i = encodeVarintMetrics(dAtA, i, uint64(m.RSSHuge)) - i-- - dAtA[i] = 0x18 - } - if m.RSS != 0 { - i = encodeVarintMetrics(dAtA, i, uint64(m.RSS)) - i-- - dAtA[i] = 0x10 - } - if m.Cache != 0 { - i = encodeVarintMetrics(dAtA, i, uint64(m.Cache)) - i-- - dAtA[i] = 0x8 - } - return len(dAtA) - i, nil -} - -func (m *MemoryEntry) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *MemoryEntry) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *MemoryEntry) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if m.Failcnt != 0 { - i = encodeVarintMetrics(dAtA, i, uint64(m.Failcnt)) - i-- - dAtA[i] = 0x20 - } - if m.Max != 0 { - i = encodeVarintMetrics(dAtA, i, uint64(m.Max)) - i-- - dAtA[i] = 0x18 - } - if m.Usage != 0 { - i = encodeVarintMetrics(dAtA, i, uint64(m.Usage)) - i-- - dAtA[i] = 0x10 - } - if m.Limit != 0 { - i = encodeVarintMetrics(dAtA, i, uint64(m.Limit)) - i-- - dAtA[i] = 0x8 - } - return len(dAtA) - i, nil -} - -func (m *MemoryOomControl) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *MemoryOomControl) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *MemoryOomControl) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if m.OomKill != 0 { - i = encodeVarintMetrics(dAtA, i, uint64(m.OomKill)) - i-- - dAtA[i] = 0x18 - } - if m.UnderOom != 0 { - i = encodeVarintMetrics(dAtA, i, uint64(m.UnderOom)) - i-- - dAtA[i] = 0x10 - } - if m.OomKillDisable != 0 { - i = encodeVarintMetrics(dAtA, i, uint64(m.OomKillDisable)) - i-- - dAtA[i] = 0x8 - } - return len(dAtA) - i, nil -} - -func (m *BlkIOStat) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *BlkIOStat) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *BlkIOStat) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if len(m.SectorsRecursive) > 0 { - for iNdEx := len(m.SectorsRecursive) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.SectorsRecursive[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintMetrics(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x42 - } - } - if len(m.IoTimeRecursive) > 0 { - for iNdEx := len(m.IoTimeRecursive) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.IoTimeRecursive[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintMetrics(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x3a - } - } - if len(m.IoMergedRecursive) > 0 { - for iNdEx := len(m.IoMergedRecursive) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.IoMergedRecursive[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintMetrics(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x32 - } - } - if len(m.IoWaitTimeRecursive) > 0 { - for iNdEx := len(m.IoWaitTimeRecursive) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.IoWaitTimeRecursive[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintMetrics(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x2a - } - } - if len(m.IoServiceTimeRecursive) > 0 { - for iNdEx := len(m.IoServiceTimeRecursive) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.IoServiceTimeRecursive[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintMetrics(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x22 - } - } - if len(m.IoQueuedRecursive) > 0 { - for iNdEx := len(m.IoQueuedRecursive) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.IoQueuedRecursive[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintMetrics(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a - } - } - if len(m.IoServicedRecursive) > 0 { - for iNdEx := len(m.IoServicedRecursive) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.IoServicedRecursive[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintMetrics(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - } - if len(m.IoServiceBytesRecursive) > 0 { - for iNdEx := len(m.IoServiceBytesRecursive) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.IoServiceBytesRecursive[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintMetrics(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - } - return len(dAtA) - i, nil -} - -func (m *BlkIOEntry) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *BlkIOEntry) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *BlkIOEntry) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if m.Value != 0 { - i = encodeVarintMetrics(dAtA, i, uint64(m.Value)) - i-- - dAtA[i] = 0x28 - } - if m.Minor != 0 { - i = encodeVarintMetrics(dAtA, i, uint64(m.Minor)) - i-- - dAtA[i] = 0x20 - } - if m.Major != 0 { - i = encodeVarintMetrics(dAtA, i, uint64(m.Major)) - i-- - dAtA[i] = 0x18 - } - if len(m.Device) > 0 { - i -= len(m.Device) - copy(dAtA[i:], m.Device) - i = encodeVarintMetrics(dAtA, i, uint64(len(m.Device))) - i-- - dAtA[i] = 0x12 - } - if len(m.Op) > 0 { - i -= len(m.Op) - copy(dAtA[i:], m.Op) - i = encodeVarintMetrics(dAtA, i, uint64(len(m.Op))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *RdmaStat) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *RdmaStat) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *RdmaStat) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if len(m.Limit) > 0 { - for iNdEx := len(m.Limit) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Limit[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintMetrics(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - } - if len(m.Current) > 0 { - for iNdEx := len(m.Current) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Current[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintMetrics(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - } - return len(dAtA) - i, nil -} - -func (m *RdmaEntry) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *RdmaEntry) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *RdmaEntry) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if m.HcaObjects != 0 { - i = encodeVarintMetrics(dAtA, i, uint64(m.HcaObjects)) - i-- - dAtA[i] = 0x18 - } - if m.HcaHandles != 0 { - i = encodeVarintMetrics(dAtA, i, uint64(m.HcaHandles)) - i-- - dAtA[i] = 0x10 - } - if len(m.Device) > 0 { - i -= len(m.Device) - copy(dAtA[i:], m.Device) - i = encodeVarintMetrics(dAtA, i, uint64(len(m.Device))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *NetworkStat) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *NetworkStat) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *NetworkStat) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if m.TxDropped != 0 { - i = encodeVarintMetrics(dAtA, i, uint64(m.TxDropped)) - i-- - dAtA[i] = 0x48 - } - if m.TxErrors != 0 { - i = encodeVarintMetrics(dAtA, i, uint64(m.TxErrors)) - i-- - dAtA[i] = 0x40 - } - if m.TxPackets != 0 { - i = encodeVarintMetrics(dAtA, i, uint64(m.TxPackets)) - i-- - dAtA[i] = 0x38 - } - if m.TxBytes != 0 { - i = encodeVarintMetrics(dAtA, i, uint64(m.TxBytes)) - i-- - dAtA[i] = 0x30 - } - if m.RxDropped != 0 { - i = encodeVarintMetrics(dAtA, i, uint64(m.RxDropped)) - i-- - dAtA[i] = 0x28 - } - if m.RxErrors != 0 { - i = encodeVarintMetrics(dAtA, i, uint64(m.RxErrors)) - i-- - dAtA[i] = 0x20 - } - if m.RxPackets != 0 { - i = encodeVarintMetrics(dAtA, i, uint64(m.RxPackets)) - i-- - dAtA[i] = 0x18 - } - if m.RxBytes != 0 { - i = encodeVarintMetrics(dAtA, i, uint64(m.RxBytes)) - i-- - dAtA[i] = 0x10 - } - if len(m.Name) > 0 { - i -= len(m.Name) - copy(dAtA[i:], m.Name) - i = encodeVarintMetrics(dAtA, i, uint64(len(m.Name))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *CgroupStats) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *CgroupStats) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *CgroupStats) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if m.NrIoWait != 0 { - i = encodeVarintMetrics(dAtA, i, uint64(m.NrIoWait)) - i-- - dAtA[i] = 0x28 - } - if m.NrUninterruptible != 0 { - i = encodeVarintMetrics(dAtA, i, uint64(m.NrUninterruptible)) - i-- - dAtA[i] = 0x20 - } - if m.NrStopped != 0 { - i = encodeVarintMetrics(dAtA, i, uint64(m.NrStopped)) - i-- - dAtA[i] = 0x18 - } - if m.NrRunning != 0 { - i = encodeVarintMetrics(dAtA, i, uint64(m.NrRunning)) - i-- - dAtA[i] = 0x10 - } - if m.NrSleeping != 0 { - i = encodeVarintMetrics(dAtA, i, uint64(m.NrSleeping)) - i-- - dAtA[i] = 0x8 - } - return len(dAtA) - i, nil -} - -func encodeVarintMetrics(dAtA []byte, offset int, v uint64) int { - offset -= sovMetrics(v) - base := offset - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return base -} -func (m *Metrics) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.Hugetlb) > 0 { - for _, e := range m.Hugetlb { - l = e.Size() - n += 1 + l + sovMetrics(uint64(l)) - } - } - if m.Pids != nil { - l = m.Pids.Size() - n += 1 + l + sovMetrics(uint64(l)) - } - if m.CPU != nil { - l = m.CPU.Size() - n += 1 + l + sovMetrics(uint64(l)) - } - if m.Memory != nil { - l = m.Memory.Size() - n += 1 + l + sovMetrics(uint64(l)) - } - if m.Blkio != nil { - l = m.Blkio.Size() - n += 1 + l + sovMetrics(uint64(l)) - } - if m.Rdma != nil { - l = m.Rdma.Size() - n += 1 + l + sovMetrics(uint64(l)) - } - if len(m.Network) > 0 { - for _, e := range m.Network { - l = e.Size() - n += 1 + l + sovMetrics(uint64(l)) - } - } - if m.CgroupStats != nil { - l = m.CgroupStats.Size() - n += 1 + l + sovMetrics(uint64(l)) - } - if m.MemoryOomControl != nil { - l = m.MemoryOomControl.Size() - n += 1 + l + sovMetrics(uint64(l)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *HugetlbStat) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.Usage != 0 { - n += 1 + sovMetrics(uint64(m.Usage)) - } - if m.Max != 0 { - n += 1 + sovMetrics(uint64(m.Max)) - } - if m.Failcnt != 0 { - n += 1 + sovMetrics(uint64(m.Failcnt)) - } - l = len(m.Pagesize) - if l > 0 { - n += 1 + l + sovMetrics(uint64(l)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *PidsStat) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.Current != 0 { - n += 1 + sovMetrics(uint64(m.Current)) - } - if m.Limit != 0 { - n += 1 + sovMetrics(uint64(m.Limit)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *CPUStat) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.Usage != nil { - l = m.Usage.Size() - n += 1 + l + sovMetrics(uint64(l)) - } - if m.Throttling != nil { - l = m.Throttling.Size() - n += 1 + l + sovMetrics(uint64(l)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *CPUUsage) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.Total != 0 { - n += 1 + sovMetrics(uint64(m.Total)) - } - if m.Kernel != 0 { - n += 1 + sovMetrics(uint64(m.Kernel)) - } - if m.User != 0 { - n += 1 + sovMetrics(uint64(m.User)) - } - if len(m.PerCPU) > 0 { - l = 0 - for _, e := range m.PerCPU { - l += sovMetrics(uint64(e)) - } - n += 1 + sovMetrics(uint64(l)) + l - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *Throttle) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.Periods != 0 { - n += 1 + sovMetrics(uint64(m.Periods)) - } - if m.ThrottledPeriods != 0 { - n += 1 + sovMetrics(uint64(m.ThrottledPeriods)) - } - if m.ThrottledTime != 0 { - n += 1 + sovMetrics(uint64(m.ThrottledTime)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *MemoryStat) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.Cache != 0 { - n += 1 + sovMetrics(uint64(m.Cache)) - } - if m.RSS != 0 { - n += 1 + sovMetrics(uint64(m.RSS)) - } - if m.RSSHuge != 0 { - n += 1 + sovMetrics(uint64(m.RSSHuge)) - } - if m.MappedFile != 0 { - n += 1 + sovMetrics(uint64(m.MappedFile)) - } - if m.Dirty != 0 { - n += 1 + sovMetrics(uint64(m.Dirty)) - } - if m.Writeback != 0 { - n += 1 + sovMetrics(uint64(m.Writeback)) - } - if m.PgPgIn != 0 { - n += 1 + sovMetrics(uint64(m.PgPgIn)) - } - if m.PgPgOut != 0 { - n += 1 + sovMetrics(uint64(m.PgPgOut)) - } - if m.PgFault != 0 { - n += 1 + sovMetrics(uint64(m.PgFault)) - } - if m.PgMajFault != 0 { - n += 1 + sovMetrics(uint64(m.PgMajFault)) - } - if m.InactiveAnon != 0 { - n += 1 + sovMetrics(uint64(m.InactiveAnon)) - } - if m.ActiveAnon != 0 { - n += 1 + sovMetrics(uint64(m.ActiveAnon)) - } - if m.InactiveFile != 0 { - n += 1 + sovMetrics(uint64(m.InactiveFile)) - } - if m.ActiveFile != 0 { - n += 1 + sovMetrics(uint64(m.ActiveFile)) - } - if m.Unevictable != 0 { - n += 1 + sovMetrics(uint64(m.Unevictable)) - } - if m.HierarchicalMemoryLimit != 0 { - n += 2 + sovMetrics(uint64(m.HierarchicalMemoryLimit)) - } - if m.HierarchicalSwapLimit != 0 { - n += 2 + sovMetrics(uint64(m.HierarchicalSwapLimit)) - } - if m.TotalCache != 0 { - n += 2 + sovMetrics(uint64(m.TotalCache)) - } - if m.TotalRSS != 0 { - n += 2 + sovMetrics(uint64(m.TotalRSS)) - } - if m.TotalRSSHuge != 0 { - n += 2 + sovMetrics(uint64(m.TotalRSSHuge)) - } - if m.TotalMappedFile != 0 { - n += 2 + sovMetrics(uint64(m.TotalMappedFile)) - } - if m.TotalDirty != 0 { - n += 2 + sovMetrics(uint64(m.TotalDirty)) - } - if m.TotalWriteback != 0 { - n += 2 + sovMetrics(uint64(m.TotalWriteback)) - } - if m.TotalPgPgIn != 0 { - n += 2 + sovMetrics(uint64(m.TotalPgPgIn)) - } - if m.TotalPgPgOut != 0 { - n += 2 + sovMetrics(uint64(m.TotalPgPgOut)) - } - if m.TotalPgFault != 0 { - n += 2 + sovMetrics(uint64(m.TotalPgFault)) - } - if m.TotalPgMajFault != 0 { - n += 2 + sovMetrics(uint64(m.TotalPgMajFault)) - } - if m.TotalInactiveAnon != 0 { - n += 2 + sovMetrics(uint64(m.TotalInactiveAnon)) - } - if m.TotalActiveAnon != 0 { - n += 2 + sovMetrics(uint64(m.TotalActiveAnon)) - } - if m.TotalInactiveFile != 0 { - n += 2 + sovMetrics(uint64(m.TotalInactiveFile)) - } - if m.TotalActiveFile != 0 { - n += 2 + sovMetrics(uint64(m.TotalActiveFile)) - } - if m.TotalUnevictable != 0 { - n += 2 + sovMetrics(uint64(m.TotalUnevictable)) - } - if m.Usage != nil { - l = m.Usage.Size() - n += 2 + l + sovMetrics(uint64(l)) - } - if m.Swap != nil { - l = m.Swap.Size() - n += 2 + l + sovMetrics(uint64(l)) - } - if m.Kernel != nil { - l = m.Kernel.Size() - n += 2 + l + sovMetrics(uint64(l)) - } - if m.KernelTCP != nil { - l = m.KernelTCP.Size() - n += 2 + l + sovMetrics(uint64(l)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *MemoryEntry) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.Limit != 0 { - n += 1 + sovMetrics(uint64(m.Limit)) - } - if m.Usage != 0 { - n += 1 + sovMetrics(uint64(m.Usage)) - } - if m.Max != 0 { - n += 1 + sovMetrics(uint64(m.Max)) - } - if m.Failcnt != 0 { - n += 1 + sovMetrics(uint64(m.Failcnt)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *MemoryOomControl) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.OomKillDisable != 0 { - n += 1 + sovMetrics(uint64(m.OomKillDisable)) - } - if m.UnderOom != 0 { - n += 1 + sovMetrics(uint64(m.UnderOom)) - } - if m.OomKill != 0 { - n += 1 + sovMetrics(uint64(m.OomKill)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *BlkIOStat) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.IoServiceBytesRecursive) > 0 { - for _, e := range m.IoServiceBytesRecursive { - l = e.Size() - n += 1 + l + sovMetrics(uint64(l)) - } - } - if len(m.IoServicedRecursive) > 0 { - for _, e := range m.IoServicedRecursive { - l = e.Size() - n += 1 + l + sovMetrics(uint64(l)) - } - } - if len(m.IoQueuedRecursive) > 0 { - for _, e := range m.IoQueuedRecursive { - l = e.Size() - n += 1 + l + sovMetrics(uint64(l)) - } - } - if len(m.IoServiceTimeRecursive) > 0 { - for _, e := range m.IoServiceTimeRecursive { - l = e.Size() - n += 1 + l + sovMetrics(uint64(l)) - } - } - if len(m.IoWaitTimeRecursive) > 0 { - for _, e := range m.IoWaitTimeRecursive { - l = e.Size() - n += 1 + l + sovMetrics(uint64(l)) - } - } - if len(m.IoMergedRecursive) > 0 { - for _, e := range m.IoMergedRecursive { - l = e.Size() - n += 1 + l + sovMetrics(uint64(l)) - } - } - if len(m.IoTimeRecursive) > 0 { - for _, e := range m.IoTimeRecursive { - l = e.Size() - n += 1 + l + sovMetrics(uint64(l)) - } - } - if len(m.SectorsRecursive) > 0 { - for _, e := range m.SectorsRecursive { - l = e.Size() - n += 1 + l + sovMetrics(uint64(l)) - } - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *BlkIOEntry) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Op) - if l > 0 { - n += 1 + l + sovMetrics(uint64(l)) - } - l = len(m.Device) - if l > 0 { - n += 1 + l + sovMetrics(uint64(l)) - } - if m.Major != 0 { - n += 1 + sovMetrics(uint64(m.Major)) - } - if m.Minor != 0 { - n += 1 + sovMetrics(uint64(m.Minor)) - } - if m.Value != 0 { - n += 1 + sovMetrics(uint64(m.Value)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *RdmaStat) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.Current) > 0 { - for _, e := range m.Current { - l = e.Size() - n += 1 + l + sovMetrics(uint64(l)) - } - } - if len(m.Limit) > 0 { - for _, e := range m.Limit { - l = e.Size() - n += 1 + l + sovMetrics(uint64(l)) - } - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *RdmaEntry) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Device) - if l > 0 { - n += 1 + l + sovMetrics(uint64(l)) - } - if m.HcaHandles != 0 { - n += 1 + sovMetrics(uint64(m.HcaHandles)) - } - if m.HcaObjects != 0 { - n += 1 + sovMetrics(uint64(m.HcaObjects)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *NetworkStat) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Name) - if l > 0 { - n += 1 + l + sovMetrics(uint64(l)) - } - if m.RxBytes != 0 { - n += 1 + sovMetrics(uint64(m.RxBytes)) - } - if m.RxPackets != 0 { - n += 1 + sovMetrics(uint64(m.RxPackets)) - } - if m.RxErrors != 0 { - n += 1 + sovMetrics(uint64(m.RxErrors)) - } - if m.RxDropped != 0 { - n += 1 + sovMetrics(uint64(m.RxDropped)) - } - if m.TxBytes != 0 { - n += 1 + sovMetrics(uint64(m.TxBytes)) - } - if m.TxPackets != 0 { - n += 1 + sovMetrics(uint64(m.TxPackets)) - } - if m.TxErrors != 0 { - n += 1 + sovMetrics(uint64(m.TxErrors)) - } - if m.TxDropped != 0 { - n += 1 + sovMetrics(uint64(m.TxDropped)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *CgroupStats) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.NrSleeping != 0 { - n += 1 + sovMetrics(uint64(m.NrSleeping)) - } - if m.NrRunning != 0 { - n += 1 + sovMetrics(uint64(m.NrRunning)) - } - if m.NrStopped != 0 { - n += 1 + sovMetrics(uint64(m.NrStopped)) - } - if m.NrUninterruptible != 0 { - n += 1 + sovMetrics(uint64(m.NrUninterruptible)) - } - if m.NrIoWait != 0 { - n += 1 + sovMetrics(uint64(m.NrIoWait)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func sovMetrics(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 -} -func sozMetrics(x uint64) (n int) { - return sovMetrics(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (this *Metrics) String() string { - if this == nil { - return "nil" - } - repeatedStringForHugetlb := "[]*HugetlbStat{" - for _, f := range this.Hugetlb { - repeatedStringForHugetlb += strings.Replace(f.String(), "HugetlbStat", "HugetlbStat", 1) + "," - } - repeatedStringForHugetlb += "}" - repeatedStringForNetwork := "[]*NetworkStat{" - for _, f := range this.Network { - repeatedStringForNetwork += strings.Replace(f.String(), "NetworkStat", "NetworkStat", 1) + "," - } - repeatedStringForNetwork += "}" - s := strings.Join([]string{`&Metrics{`, - `Hugetlb:` + repeatedStringForHugetlb + `,`, - `Pids:` + strings.Replace(this.Pids.String(), "PidsStat", "PidsStat", 1) + `,`, - `CPU:` + strings.Replace(this.CPU.String(), "CPUStat", "CPUStat", 1) + `,`, - `Memory:` + strings.Replace(this.Memory.String(), "MemoryStat", "MemoryStat", 1) + `,`, - `Blkio:` + strings.Replace(this.Blkio.String(), "BlkIOStat", "BlkIOStat", 1) + `,`, - `Rdma:` + strings.Replace(this.Rdma.String(), "RdmaStat", "RdmaStat", 1) + `,`, - `Network:` + repeatedStringForNetwork + `,`, - `CgroupStats:` + strings.Replace(this.CgroupStats.String(), "CgroupStats", "CgroupStats", 1) + `,`, - `MemoryOomControl:` + strings.Replace(this.MemoryOomControl.String(), "MemoryOomControl", "MemoryOomControl", 1) + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func (this *HugetlbStat) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&HugetlbStat{`, - `Usage:` + fmt.Sprintf("%v", this.Usage) + `,`, - `Max:` + fmt.Sprintf("%v", this.Max) + `,`, - `Failcnt:` + fmt.Sprintf("%v", this.Failcnt) + `,`, - `Pagesize:` + fmt.Sprintf("%v", this.Pagesize) + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func (this *PidsStat) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&PidsStat{`, - `Current:` + fmt.Sprintf("%v", this.Current) + `,`, - `Limit:` + fmt.Sprintf("%v", this.Limit) + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func (this *CPUStat) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&CPUStat{`, - `Usage:` + strings.Replace(this.Usage.String(), "CPUUsage", "CPUUsage", 1) + `,`, - `Throttling:` + strings.Replace(this.Throttling.String(), "Throttle", "Throttle", 1) + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func (this *CPUUsage) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&CPUUsage{`, - `Total:` + fmt.Sprintf("%v", this.Total) + `,`, - `Kernel:` + fmt.Sprintf("%v", this.Kernel) + `,`, - `User:` + fmt.Sprintf("%v", this.User) + `,`, - `PerCPU:` + fmt.Sprintf("%v", this.PerCPU) + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func (this *Throttle) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&Throttle{`, - `Periods:` + fmt.Sprintf("%v", this.Periods) + `,`, - `ThrottledPeriods:` + fmt.Sprintf("%v", this.ThrottledPeriods) + `,`, - `ThrottledTime:` + fmt.Sprintf("%v", this.ThrottledTime) + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func (this *MemoryStat) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&MemoryStat{`, - `Cache:` + fmt.Sprintf("%v", this.Cache) + `,`, - `RSS:` + fmt.Sprintf("%v", this.RSS) + `,`, - `RSSHuge:` + fmt.Sprintf("%v", this.RSSHuge) + `,`, - `MappedFile:` + fmt.Sprintf("%v", this.MappedFile) + `,`, - `Dirty:` + fmt.Sprintf("%v", this.Dirty) + `,`, - `Writeback:` + fmt.Sprintf("%v", this.Writeback) + `,`, - `PgPgIn:` + fmt.Sprintf("%v", this.PgPgIn) + `,`, - `PgPgOut:` + fmt.Sprintf("%v", this.PgPgOut) + `,`, - `PgFault:` + fmt.Sprintf("%v", this.PgFault) + `,`, - `PgMajFault:` + fmt.Sprintf("%v", this.PgMajFault) + `,`, - `InactiveAnon:` + fmt.Sprintf("%v", this.InactiveAnon) + `,`, - `ActiveAnon:` + fmt.Sprintf("%v", this.ActiveAnon) + `,`, - `InactiveFile:` + fmt.Sprintf("%v", this.InactiveFile) + `,`, - `ActiveFile:` + fmt.Sprintf("%v", this.ActiveFile) + `,`, - `Unevictable:` + fmt.Sprintf("%v", this.Unevictable) + `,`, - `HierarchicalMemoryLimit:` + fmt.Sprintf("%v", this.HierarchicalMemoryLimit) + `,`, - `HierarchicalSwapLimit:` + fmt.Sprintf("%v", this.HierarchicalSwapLimit) + `,`, - `TotalCache:` + fmt.Sprintf("%v", this.TotalCache) + `,`, - `TotalRSS:` + fmt.Sprintf("%v", this.TotalRSS) + `,`, - `TotalRSSHuge:` + fmt.Sprintf("%v", this.TotalRSSHuge) + `,`, - `TotalMappedFile:` + fmt.Sprintf("%v", this.TotalMappedFile) + `,`, - `TotalDirty:` + fmt.Sprintf("%v", this.TotalDirty) + `,`, - `TotalWriteback:` + fmt.Sprintf("%v", this.TotalWriteback) + `,`, - `TotalPgPgIn:` + fmt.Sprintf("%v", this.TotalPgPgIn) + `,`, - `TotalPgPgOut:` + fmt.Sprintf("%v", this.TotalPgPgOut) + `,`, - `TotalPgFault:` + fmt.Sprintf("%v", this.TotalPgFault) + `,`, - `TotalPgMajFault:` + fmt.Sprintf("%v", this.TotalPgMajFault) + `,`, - `TotalInactiveAnon:` + fmt.Sprintf("%v", this.TotalInactiveAnon) + `,`, - `TotalActiveAnon:` + fmt.Sprintf("%v", this.TotalActiveAnon) + `,`, - `TotalInactiveFile:` + fmt.Sprintf("%v", this.TotalInactiveFile) + `,`, - `TotalActiveFile:` + fmt.Sprintf("%v", this.TotalActiveFile) + `,`, - `TotalUnevictable:` + fmt.Sprintf("%v", this.TotalUnevictable) + `,`, - `Usage:` + strings.Replace(this.Usage.String(), "MemoryEntry", "MemoryEntry", 1) + `,`, - `Swap:` + strings.Replace(this.Swap.String(), "MemoryEntry", "MemoryEntry", 1) + `,`, - `Kernel:` + strings.Replace(this.Kernel.String(), "MemoryEntry", "MemoryEntry", 1) + `,`, - `KernelTCP:` + strings.Replace(this.KernelTCP.String(), "MemoryEntry", "MemoryEntry", 1) + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func (this *MemoryEntry) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&MemoryEntry{`, - `Limit:` + fmt.Sprintf("%v", this.Limit) + `,`, - `Usage:` + fmt.Sprintf("%v", this.Usage) + `,`, - `Max:` + fmt.Sprintf("%v", this.Max) + `,`, - `Failcnt:` + fmt.Sprintf("%v", this.Failcnt) + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func (this *MemoryOomControl) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&MemoryOomControl{`, - `OomKillDisable:` + fmt.Sprintf("%v", this.OomKillDisable) + `,`, - `UnderOom:` + fmt.Sprintf("%v", this.UnderOom) + `,`, - `OomKill:` + fmt.Sprintf("%v", this.OomKill) + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func (this *BlkIOStat) String() string { - if this == nil { - return "nil" - } - repeatedStringForIoServiceBytesRecursive := "[]*BlkIOEntry{" - for _, f := range this.IoServiceBytesRecursive { - repeatedStringForIoServiceBytesRecursive += strings.Replace(f.String(), "BlkIOEntry", "BlkIOEntry", 1) + "," - } - repeatedStringForIoServiceBytesRecursive += "}" - repeatedStringForIoServicedRecursive := "[]*BlkIOEntry{" - for _, f := range this.IoServicedRecursive { - repeatedStringForIoServicedRecursive += strings.Replace(f.String(), "BlkIOEntry", "BlkIOEntry", 1) + "," - } - repeatedStringForIoServicedRecursive += "}" - repeatedStringForIoQueuedRecursive := "[]*BlkIOEntry{" - for _, f := range this.IoQueuedRecursive { - repeatedStringForIoQueuedRecursive += strings.Replace(f.String(), "BlkIOEntry", "BlkIOEntry", 1) + "," - } - repeatedStringForIoQueuedRecursive += "}" - repeatedStringForIoServiceTimeRecursive := "[]*BlkIOEntry{" - for _, f := range this.IoServiceTimeRecursive { - repeatedStringForIoServiceTimeRecursive += strings.Replace(f.String(), "BlkIOEntry", "BlkIOEntry", 1) + "," - } - repeatedStringForIoServiceTimeRecursive += "}" - repeatedStringForIoWaitTimeRecursive := "[]*BlkIOEntry{" - for _, f := range this.IoWaitTimeRecursive { - repeatedStringForIoWaitTimeRecursive += strings.Replace(f.String(), "BlkIOEntry", "BlkIOEntry", 1) + "," - } - repeatedStringForIoWaitTimeRecursive += "}" - repeatedStringForIoMergedRecursive := "[]*BlkIOEntry{" - for _, f := range this.IoMergedRecursive { - repeatedStringForIoMergedRecursive += strings.Replace(f.String(), "BlkIOEntry", "BlkIOEntry", 1) + "," - } - repeatedStringForIoMergedRecursive += "}" - repeatedStringForIoTimeRecursive := "[]*BlkIOEntry{" - for _, f := range this.IoTimeRecursive { - repeatedStringForIoTimeRecursive += strings.Replace(f.String(), "BlkIOEntry", "BlkIOEntry", 1) + "," - } - repeatedStringForIoTimeRecursive += "}" - repeatedStringForSectorsRecursive := "[]*BlkIOEntry{" - for _, f := range this.SectorsRecursive { - repeatedStringForSectorsRecursive += strings.Replace(f.String(), "BlkIOEntry", "BlkIOEntry", 1) + "," - } - repeatedStringForSectorsRecursive += "}" - s := strings.Join([]string{`&BlkIOStat{`, - `IoServiceBytesRecursive:` + repeatedStringForIoServiceBytesRecursive + `,`, - `IoServicedRecursive:` + repeatedStringForIoServicedRecursive + `,`, - `IoQueuedRecursive:` + repeatedStringForIoQueuedRecursive + `,`, - `IoServiceTimeRecursive:` + repeatedStringForIoServiceTimeRecursive + `,`, - `IoWaitTimeRecursive:` + repeatedStringForIoWaitTimeRecursive + `,`, - `IoMergedRecursive:` + repeatedStringForIoMergedRecursive + `,`, - `IoTimeRecursive:` + repeatedStringForIoTimeRecursive + `,`, - `SectorsRecursive:` + repeatedStringForSectorsRecursive + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func (this *BlkIOEntry) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&BlkIOEntry{`, - `Op:` + fmt.Sprintf("%v", this.Op) + `,`, - `Device:` + fmt.Sprintf("%v", this.Device) + `,`, - `Major:` + fmt.Sprintf("%v", this.Major) + `,`, - `Minor:` + fmt.Sprintf("%v", this.Minor) + `,`, - `Value:` + fmt.Sprintf("%v", this.Value) + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func (this *RdmaStat) String() string { - if this == nil { - return "nil" - } - repeatedStringForCurrent := "[]*RdmaEntry{" - for _, f := range this.Current { - repeatedStringForCurrent += strings.Replace(f.String(), "RdmaEntry", "RdmaEntry", 1) + "," - } - repeatedStringForCurrent += "}" - repeatedStringForLimit := "[]*RdmaEntry{" - for _, f := range this.Limit { - repeatedStringForLimit += strings.Replace(f.String(), "RdmaEntry", "RdmaEntry", 1) + "," - } - repeatedStringForLimit += "}" - s := strings.Join([]string{`&RdmaStat{`, - `Current:` + repeatedStringForCurrent + `,`, - `Limit:` + repeatedStringForLimit + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func (this *RdmaEntry) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&RdmaEntry{`, - `Device:` + fmt.Sprintf("%v", this.Device) + `,`, - `HcaHandles:` + fmt.Sprintf("%v", this.HcaHandles) + `,`, - `HcaObjects:` + fmt.Sprintf("%v", this.HcaObjects) + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func (this *NetworkStat) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&NetworkStat{`, - `Name:` + fmt.Sprintf("%v", this.Name) + `,`, - `RxBytes:` + fmt.Sprintf("%v", this.RxBytes) + `,`, - `RxPackets:` + fmt.Sprintf("%v", this.RxPackets) + `,`, - `RxErrors:` + fmt.Sprintf("%v", this.RxErrors) + `,`, - `RxDropped:` + fmt.Sprintf("%v", this.RxDropped) + `,`, - `TxBytes:` + fmt.Sprintf("%v", this.TxBytes) + `,`, - `TxPackets:` + fmt.Sprintf("%v", this.TxPackets) + `,`, - `TxErrors:` + fmt.Sprintf("%v", this.TxErrors) + `,`, - `TxDropped:` + fmt.Sprintf("%v", this.TxDropped) + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func (this *CgroupStats) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&CgroupStats{`, - `NrSleeping:` + fmt.Sprintf("%v", this.NrSleeping) + `,`, - `NrRunning:` + fmt.Sprintf("%v", this.NrRunning) + `,`, - `NrStopped:` + fmt.Sprintf("%v", this.NrStopped) + `,`, - `NrUninterruptible:` + fmt.Sprintf("%v", this.NrUninterruptible) + `,`, - `NrIoWait:` + fmt.Sprintf("%v", this.NrIoWait) + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func valueToStringMetrics(v interface{}) string { - rv := reflect.ValueOf(v) - if rv.IsNil() { - return "nil" - } - pv := reflect.Indirect(rv).Interface() - return fmt.Sprintf("*%v", pv) -} -func (m *Metrics) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Metrics: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Metrics: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Hugetlb", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthMetrics - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthMetrics - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Hugetlb = append(m.Hugetlb, &HugetlbStat{}) - if err := m.Hugetlb[len(m.Hugetlb)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Pids", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthMetrics - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthMetrics - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Pids == nil { - m.Pids = &PidsStat{} - } - if err := m.Pids.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field CPU", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthMetrics - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthMetrics - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.CPU == nil { - m.CPU = &CPUStat{} - } - if err := m.CPU.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Memory", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthMetrics - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthMetrics - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Memory == nil { - m.Memory = &MemoryStat{} - } - if err := m.Memory.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Blkio", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthMetrics - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthMetrics - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Blkio == nil { - m.Blkio = &BlkIOStat{} - } - if err := m.Blkio.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 6: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Rdma", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthMetrics - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthMetrics - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Rdma == nil { - m.Rdma = &RdmaStat{} - } - if err := m.Rdma.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 7: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Network", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthMetrics - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthMetrics - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Network = append(m.Network, &NetworkStat{}) - if err := m.Network[len(m.Network)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 8: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field CgroupStats", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthMetrics - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthMetrics - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.CgroupStats == nil { - m.CgroupStats = &CgroupStats{} - } - if err := m.CgroupStats.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 9: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field MemoryOomControl", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthMetrics - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthMetrics - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.MemoryOomControl == nil { - m.MemoryOomControl = &MemoryOomControl{} - } - if err := m.MemoryOomControl.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipMetrics(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthMetrics - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *HugetlbStat) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: HugetlbStat: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: HugetlbStat: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Usage", wireType) - } - m.Usage = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Usage |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Max", wireType) - } - m.Max = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Max |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Failcnt", wireType) - } - m.Failcnt = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Failcnt |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Pagesize", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthMetrics - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthMetrics - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Pagesize = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipMetrics(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthMetrics - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *PidsStat) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: PidsStat: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: PidsStat: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Current", wireType) - } - m.Current = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Current |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Limit", wireType) - } - m.Limit = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Limit |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - default: - iNdEx = preIndex - skippy, err := skipMetrics(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthMetrics - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *CPUStat) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: CPUStat: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: CPUStat: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Usage", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthMetrics - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthMetrics - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Usage == nil { - m.Usage = &CPUUsage{} - } - if err := m.Usage.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Throttling", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthMetrics - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthMetrics - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Throttling == nil { - m.Throttling = &Throttle{} - } - if err := m.Throttling.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipMetrics(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthMetrics - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *CPUUsage) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: CPUUsage: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: CPUUsage: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Total", wireType) - } - m.Total = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Total |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Kernel", wireType) - } - m.Kernel = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Kernel |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field User", wireType) - } - m.User = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.User |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 4: - if wireType == 0 { - var v uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.PerCPU = append(m.PerCPU, v) - } else if wireType == 2 { - var packedLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - packedLen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if packedLen < 0 { - return ErrInvalidLengthMetrics - } - postIndex := iNdEx + packedLen - if postIndex < 0 { - return ErrInvalidLengthMetrics - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - var elementCount int - var count int - for _, integer := range dAtA[iNdEx:postIndex] { - if integer < 128 { - count++ - } - } - elementCount = count - if elementCount != 0 && len(m.PerCPU) == 0 { - m.PerCPU = make([]uint64, 0, elementCount) - } - for iNdEx < postIndex { - var v uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.PerCPU = append(m.PerCPU, v) - } - } else { - return fmt.Errorf("proto: wrong wireType = %d for field PerCPU", wireType) - } - default: - iNdEx = preIndex - skippy, err := skipMetrics(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthMetrics - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *Throttle) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Throttle: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Throttle: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Periods", wireType) - } - m.Periods = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Periods |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ThrottledPeriods", wireType) - } - m.ThrottledPeriods = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.ThrottledPeriods |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ThrottledTime", wireType) - } - m.ThrottledTime = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.ThrottledTime |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - default: - iNdEx = preIndex - skippy, err := skipMetrics(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthMetrics - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *MemoryStat) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: MemoryStat: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: MemoryStat: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Cache", wireType) - } - m.Cache = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Cache |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field RSS", wireType) - } - m.RSS = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.RSS |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field RSSHuge", wireType) - } - m.RSSHuge = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.RSSHuge |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field MappedFile", wireType) - } - m.MappedFile = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.MappedFile |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 5: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Dirty", wireType) - } - m.Dirty = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Dirty |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 6: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Writeback", wireType) - } - m.Writeback = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Writeback |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 7: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field PgPgIn", wireType) - } - m.PgPgIn = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.PgPgIn |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 8: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field PgPgOut", wireType) - } - m.PgPgOut = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.PgPgOut |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 9: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field PgFault", wireType) - } - m.PgFault = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.PgFault |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 10: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field PgMajFault", wireType) - } - m.PgMajFault = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.PgMajFault |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 11: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field InactiveAnon", wireType) - } - m.InactiveAnon = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.InactiveAnon |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 12: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ActiveAnon", wireType) - } - m.ActiveAnon = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.ActiveAnon |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 13: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field InactiveFile", wireType) - } - m.InactiveFile = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.InactiveFile |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 14: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ActiveFile", wireType) - } - m.ActiveFile = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.ActiveFile |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 15: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Unevictable", wireType) - } - m.Unevictable = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Unevictable |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 16: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field HierarchicalMemoryLimit", wireType) - } - m.HierarchicalMemoryLimit = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.HierarchicalMemoryLimit |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 17: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field HierarchicalSwapLimit", wireType) - } - m.HierarchicalSwapLimit = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.HierarchicalSwapLimit |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 18: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field TotalCache", wireType) - } - m.TotalCache = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.TotalCache |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 19: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field TotalRSS", wireType) - } - m.TotalRSS = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.TotalRSS |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 20: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field TotalRSSHuge", wireType) - } - m.TotalRSSHuge = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.TotalRSSHuge |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 21: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field TotalMappedFile", wireType) - } - m.TotalMappedFile = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.TotalMappedFile |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 22: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field TotalDirty", wireType) - } - m.TotalDirty = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.TotalDirty |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 23: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field TotalWriteback", wireType) - } - m.TotalWriteback = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.TotalWriteback |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 24: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field TotalPgPgIn", wireType) - } - m.TotalPgPgIn = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.TotalPgPgIn |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 25: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field TotalPgPgOut", wireType) - } - m.TotalPgPgOut = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.TotalPgPgOut |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 26: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field TotalPgFault", wireType) - } - m.TotalPgFault = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.TotalPgFault |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 27: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field TotalPgMajFault", wireType) - } - m.TotalPgMajFault = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.TotalPgMajFault |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 28: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field TotalInactiveAnon", wireType) - } - m.TotalInactiveAnon = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.TotalInactiveAnon |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 29: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field TotalActiveAnon", wireType) - } - m.TotalActiveAnon = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.TotalActiveAnon |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 30: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field TotalInactiveFile", wireType) - } - m.TotalInactiveFile = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.TotalInactiveFile |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 31: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field TotalActiveFile", wireType) - } - m.TotalActiveFile = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.TotalActiveFile |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 32: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field TotalUnevictable", wireType) - } - m.TotalUnevictable = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.TotalUnevictable |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 33: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Usage", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthMetrics - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthMetrics - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Usage == nil { - m.Usage = &MemoryEntry{} - } - if err := m.Usage.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 34: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Swap", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthMetrics - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthMetrics - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Swap == nil { - m.Swap = &MemoryEntry{} - } - if err := m.Swap.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 35: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Kernel", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthMetrics - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthMetrics - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Kernel == nil { - m.Kernel = &MemoryEntry{} - } - if err := m.Kernel.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 36: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field KernelTCP", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthMetrics - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthMetrics - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.KernelTCP == nil { - m.KernelTCP = &MemoryEntry{} - } - if err := m.KernelTCP.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipMetrics(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthMetrics - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *MemoryEntry) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: MemoryEntry: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: MemoryEntry: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Limit", wireType) - } - m.Limit = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Limit |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Usage", wireType) - } - m.Usage = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Usage |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Max", wireType) - } - m.Max = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Max |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Failcnt", wireType) - } - m.Failcnt = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Failcnt |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - default: - iNdEx = preIndex - skippy, err := skipMetrics(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthMetrics - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *MemoryOomControl) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: MemoryOomControl: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: MemoryOomControl: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field OomKillDisable", wireType) - } - m.OomKillDisable = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.OomKillDisable |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field UnderOom", wireType) - } - m.UnderOom = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.UnderOom |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field OomKill", wireType) - } - m.OomKill = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.OomKill |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - default: - iNdEx = preIndex - skippy, err := skipMetrics(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthMetrics - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *BlkIOStat) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: BlkIOStat: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: BlkIOStat: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field IoServiceBytesRecursive", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthMetrics - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthMetrics - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.IoServiceBytesRecursive = append(m.IoServiceBytesRecursive, &BlkIOEntry{}) - if err := m.IoServiceBytesRecursive[len(m.IoServiceBytesRecursive)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field IoServicedRecursive", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthMetrics - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthMetrics - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.IoServicedRecursive = append(m.IoServicedRecursive, &BlkIOEntry{}) - if err := m.IoServicedRecursive[len(m.IoServicedRecursive)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field IoQueuedRecursive", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthMetrics - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthMetrics - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.IoQueuedRecursive = append(m.IoQueuedRecursive, &BlkIOEntry{}) - if err := m.IoQueuedRecursive[len(m.IoQueuedRecursive)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field IoServiceTimeRecursive", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthMetrics - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthMetrics - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.IoServiceTimeRecursive = append(m.IoServiceTimeRecursive, &BlkIOEntry{}) - if err := m.IoServiceTimeRecursive[len(m.IoServiceTimeRecursive)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field IoWaitTimeRecursive", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthMetrics - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthMetrics - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.IoWaitTimeRecursive = append(m.IoWaitTimeRecursive, &BlkIOEntry{}) - if err := m.IoWaitTimeRecursive[len(m.IoWaitTimeRecursive)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 6: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field IoMergedRecursive", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthMetrics - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthMetrics - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.IoMergedRecursive = append(m.IoMergedRecursive, &BlkIOEntry{}) - if err := m.IoMergedRecursive[len(m.IoMergedRecursive)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 7: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field IoTimeRecursive", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthMetrics - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthMetrics - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.IoTimeRecursive = append(m.IoTimeRecursive, &BlkIOEntry{}) - if err := m.IoTimeRecursive[len(m.IoTimeRecursive)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 8: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SectorsRecursive", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthMetrics - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthMetrics - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.SectorsRecursive = append(m.SectorsRecursive, &BlkIOEntry{}) - if err := m.SectorsRecursive[len(m.SectorsRecursive)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipMetrics(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthMetrics - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *BlkIOEntry) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: BlkIOEntry: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: BlkIOEntry: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Op", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthMetrics - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthMetrics - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Op = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Device", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthMetrics - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthMetrics - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Device = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Major", wireType) - } - m.Major = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Major |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Minor", wireType) - } - m.Minor = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Minor |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 5: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) - } - m.Value = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Value |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - default: - iNdEx = preIndex - skippy, err := skipMetrics(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthMetrics - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *RdmaStat) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: RdmaStat: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: RdmaStat: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Current", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthMetrics - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthMetrics - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Current = append(m.Current, &RdmaEntry{}) - if err := m.Current[len(m.Current)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Limit", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthMetrics - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthMetrics - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Limit = append(m.Limit, &RdmaEntry{}) - if err := m.Limit[len(m.Limit)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipMetrics(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthMetrics - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *RdmaEntry) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: RdmaEntry: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: RdmaEntry: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Device", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthMetrics - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthMetrics - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Device = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field HcaHandles", wireType) - } - m.HcaHandles = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.HcaHandles |= uint32(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field HcaObjects", wireType) - } - m.HcaObjects = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.HcaObjects |= uint32(b&0x7F) << shift - if b < 0x80 { - break - } - } - default: - iNdEx = preIndex - skippy, err := skipMetrics(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthMetrics - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *NetworkStat) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: NetworkStat: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: NetworkStat: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthMetrics - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthMetrics - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Name = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field RxBytes", wireType) - } - m.RxBytes = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.RxBytes |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field RxPackets", wireType) - } - m.RxPackets = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.RxPackets |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field RxErrors", wireType) - } - m.RxErrors = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.RxErrors |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 5: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field RxDropped", wireType) - } - m.RxDropped = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.RxDropped |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 6: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field TxBytes", wireType) - } - m.TxBytes = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.TxBytes |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 7: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field TxPackets", wireType) - } - m.TxPackets = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.TxPackets |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 8: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field TxErrors", wireType) - } - m.TxErrors = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.TxErrors |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 9: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field TxDropped", wireType) - } - m.TxDropped = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.TxDropped |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - default: - iNdEx = preIndex - skippy, err := skipMetrics(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthMetrics - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *CgroupStats) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: CgroupStats: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: CgroupStats: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field NrSleeping", wireType) - } - m.NrSleeping = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.NrSleeping |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field NrRunning", wireType) - } - m.NrRunning = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.NrRunning |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field NrStopped", wireType) - } - m.NrStopped = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.NrStopped |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field NrUninterruptible", wireType) - } - m.NrUninterruptible = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.NrUninterruptible |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 5: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field NrIoWait", wireType) - } - m.NrIoWait = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.NrIoWait |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - default: - iNdEx = preIndex - skippy, err := skipMetrics(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthMetrics - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipMetrics(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - depth := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowMetrics - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowMetrics - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - case 1: - iNdEx += 8 - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowMetrics - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if length < 0 { - return 0, ErrInvalidLengthMetrics - } - iNdEx += length - case 3: - depth++ - case 4: - if depth == 0 { - return 0, ErrUnexpectedEndOfGroupMetrics - } - depth-- - case 5: - iNdEx += 4 - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - if iNdEx < 0 { - return 0, ErrInvalidLengthMetrics - } - if depth == 0 { - return iNdEx, nil - } - } - return 0, io.ErrUnexpectedEOF -} - -var ( - ErrInvalidLengthMetrics = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowMetrics = fmt.Errorf("proto: integer overflow") - ErrUnexpectedEndOfGroupMetrics = fmt.Errorf("proto: unexpected end of group") -) diff --git a/vendor/github.com/containerd/cgroups/stats/v1/metrics.pb.txt b/vendor/github.com/containerd/cgroups/stats/v1/metrics.pb.txt deleted file mode 100644 index e476cea6..00000000 --- a/vendor/github.com/containerd/cgroups/stats/v1/metrics.pb.txt +++ /dev/null @@ -1,790 +0,0 @@ -file { - name: "github.com/containerd/cgroups/stats/v1/metrics.proto" - package: "io.containerd.cgroups.v1" - dependency: "gogoproto/gogo.proto" - message_type { - name: "Metrics" - field { - name: "hugetlb" - number: 1 - label: LABEL_REPEATED - type: TYPE_MESSAGE - type_name: ".io.containerd.cgroups.v1.HugetlbStat" - json_name: "hugetlb" - } - field { - name: "pids" - number: 2 - label: LABEL_OPTIONAL - type: TYPE_MESSAGE - type_name: ".io.containerd.cgroups.v1.PidsStat" - json_name: "pids" - } - field { - name: "cpu" - number: 3 - label: LABEL_OPTIONAL - type: TYPE_MESSAGE - type_name: ".io.containerd.cgroups.v1.CPUStat" - options { - 65004: "CPU" - } - json_name: "cpu" - } - field { - name: "memory" - number: 4 - label: LABEL_OPTIONAL - type: TYPE_MESSAGE - type_name: ".io.containerd.cgroups.v1.MemoryStat" - json_name: "memory" - } - field { - name: "blkio" - number: 5 - label: LABEL_OPTIONAL - type: TYPE_MESSAGE - type_name: ".io.containerd.cgroups.v1.BlkIOStat" - json_name: "blkio" - } - field { - name: "rdma" - number: 6 - label: LABEL_OPTIONAL - type: TYPE_MESSAGE - type_name: ".io.containerd.cgroups.v1.RdmaStat" - json_name: "rdma" - } - field { - name: "network" - number: 7 - label: LABEL_REPEATED - type: TYPE_MESSAGE - type_name: ".io.containerd.cgroups.v1.NetworkStat" - json_name: "network" - } - field { - name: "cgroup_stats" - number: 8 - label: LABEL_OPTIONAL - type: TYPE_MESSAGE - type_name: ".io.containerd.cgroups.v1.CgroupStats" - json_name: "cgroupStats" - } - field { - name: "memory_oom_control" - number: 9 - label: LABEL_OPTIONAL - type: TYPE_MESSAGE - type_name: ".io.containerd.cgroups.v1.MemoryOomControl" - json_name: "memoryOomControl" - } - } - message_type { - name: "HugetlbStat" - field { - name: "usage" - number: 1 - label: LABEL_OPTIONAL - type: TYPE_UINT64 - json_name: "usage" - } - field { - name: "max" - number: 2 - label: LABEL_OPTIONAL - type: TYPE_UINT64 - json_name: "max" - } - field { - name: "failcnt" - number: 3 - label: LABEL_OPTIONAL - type: TYPE_UINT64 - json_name: "failcnt" - } - field { - name: "pagesize" - number: 4 - label: LABEL_OPTIONAL - type: TYPE_STRING - json_name: "pagesize" - } - } - message_type { - name: "PidsStat" - field { - name: "current" - number: 1 - label: LABEL_OPTIONAL - type: TYPE_UINT64 - json_name: "current" - } - field { - name: "limit" - number: 2 - label: LABEL_OPTIONAL - type: TYPE_UINT64 - json_name: "limit" - } - } - message_type { - name: "CPUStat" - field { - name: "usage" - number: 1 - label: LABEL_OPTIONAL - type: TYPE_MESSAGE - type_name: ".io.containerd.cgroups.v1.CPUUsage" - json_name: "usage" - } - field { - name: "throttling" - number: 2 - label: LABEL_OPTIONAL - type: TYPE_MESSAGE - type_name: ".io.containerd.cgroups.v1.Throttle" - json_name: "throttling" - } - } - message_type { - name: "CPUUsage" - field { - name: "total" - number: 1 - label: LABEL_OPTIONAL - type: TYPE_UINT64 - json_name: "total" - } - field { - name: "kernel" - number: 2 - label: LABEL_OPTIONAL - type: TYPE_UINT64 - json_name: "kernel" - } - field { - name: "user" - number: 3 - label: LABEL_OPTIONAL - type: TYPE_UINT64 - json_name: "user" - } - field { - name: "per_cpu" - number: 4 - label: LABEL_REPEATED - type: TYPE_UINT64 - options { - 65004: "PerCPU" - } - json_name: "perCpu" - } - } - message_type { - name: "Throttle" - field { - name: "periods" - number: 1 - label: LABEL_OPTIONAL - type: TYPE_UINT64 - json_name: "periods" - } - field { - name: "throttled_periods" - number: 2 - label: LABEL_OPTIONAL - type: TYPE_UINT64 - json_name: "throttledPeriods" - } - field { - name: "throttled_time" - number: 3 - label: LABEL_OPTIONAL - type: TYPE_UINT64 - json_name: "throttledTime" - } - } - message_type { - name: "MemoryStat" - field { - name: "cache" - number: 1 - label: LABEL_OPTIONAL - type: TYPE_UINT64 - json_name: "cache" - } - field { - name: "rss" - number: 2 - label: LABEL_OPTIONAL - type: TYPE_UINT64 - options { - 65004: "RSS" - } - json_name: "rss" - } - field { - name: "rss_huge" - number: 3 - label: LABEL_OPTIONAL - type: TYPE_UINT64 - options { - 65004: "RSSHuge" - } - json_name: "rssHuge" - } - field { - name: "mapped_file" - number: 4 - label: LABEL_OPTIONAL - type: TYPE_UINT64 - json_name: "mappedFile" - } - field { - name: "dirty" - number: 5 - label: LABEL_OPTIONAL - type: TYPE_UINT64 - json_name: "dirty" - } - field { - name: "writeback" - number: 6 - label: LABEL_OPTIONAL - type: TYPE_UINT64 - json_name: "writeback" - } - field { - name: "pg_pg_in" - number: 7 - label: LABEL_OPTIONAL - type: TYPE_UINT64 - json_name: "pgPgIn" - } - field { - name: "pg_pg_out" - number: 8 - label: LABEL_OPTIONAL - type: TYPE_UINT64 - json_name: "pgPgOut" - } - field { - name: "pg_fault" - number: 9 - label: LABEL_OPTIONAL - type: TYPE_UINT64 - json_name: "pgFault" - } - field { - name: "pg_maj_fault" - number: 10 - label: LABEL_OPTIONAL - type: TYPE_UINT64 - json_name: "pgMajFault" - } - field { - name: "inactive_anon" - number: 11 - label: LABEL_OPTIONAL - type: TYPE_UINT64 - json_name: "inactiveAnon" - } - field { - name: "active_anon" - number: 12 - label: LABEL_OPTIONAL - type: TYPE_UINT64 - json_name: "activeAnon" - } - field { - name: "inactive_file" - number: 13 - label: LABEL_OPTIONAL - type: TYPE_UINT64 - json_name: "inactiveFile" - } - field { - name: "active_file" - number: 14 - label: LABEL_OPTIONAL - type: TYPE_UINT64 - json_name: "activeFile" - } - field { - name: "unevictable" - number: 15 - label: LABEL_OPTIONAL - type: TYPE_UINT64 - json_name: "unevictable" - } - field { - name: "hierarchical_memory_limit" - number: 16 - label: LABEL_OPTIONAL - type: TYPE_UINT64 - json_name: "hierarchicalMemoryLimit" - } - field { - name: "hierarchical_swap_limit" - number: 17 - label: LABEL_OPTIONAL - type: TYPE_UINT64 - json_name: "hierarchicalSwapLimit" - } - field { - name: "total_cache" - number: 18 - label: LABEL_OPTIONAL - type: TYPE_UINT64 - json_name: "totalCache" - } - field { - name: "total_rss" - number: 19 - label: LABEL_OPTIONAL - type: TYPE_UINT64 - options { - 65004: "TotalRSS" - } - json_name: "totalRss" - } - field { - name: "total_rss_huge" - number: 20 - label: LABEL_OPTIONAL - type: TYPE_UINT64 - options { - 65004: "TotalRSSHuge" - } - json_name: "totalRssHuge" - } - field { - name: "total_mapped_file" - number: 21 - label: LABEL_OPTIONAL - type: TYPE_UINT64 - json_name: "totalMappedFile" - } - field { - name: "total_dirty" - number: 22 - label: LABEL_OPTIONAL - type: TYPE_UINT64 - json_name: "totalDirty" - } - field { - name: "total_writeback" - number: 23 - label: LABEL_OPTIONAL - type: TYPE_UINT64 - json_name: "totalWriteback" - } - field { - name: "total_pg_pg_in" - number: 24 - label: LABEL_OPTIONAL - type: TYPE_UINT64 - json_name: "totalPgPgIn" - } - field { - name: "total_pg_pg_out" - number: 25 - label: LABEL_OPTIONAL - type: TYPE_UINT64 - json_name: "totalPgPgOut" - } - field { - name: "total_pg_fault" - number: 26 - label: LABEL_OPTIONAL - type: TYPE_UINT64 - json_name: "totalPgFault" - } - field { - name: "total_pg_maj_fault" - number: 27 - label: LABEL_OPTIONAL - type: TYPE_UINT64 - json_name: "totalPgMajFault" - } - field { - name: "total_inactive_anon" - number: 28 - label: LABEL_OPTIONAL - type: TYPE_UINT64 - json_name: "totalInactiveAnon" - } - field { - name: "total_active_anon" - number: 29 - label: LABEL_OPTIONAL - type: TYPE_UINT64 - json_name: "totalActiveAnon" - } - field { - name: "total_inactive_file" - number: 30 - label: LABEL_OPTIONAL - type: TYPE_UINT64 - json_name: "totalInactiveFile" - } - field { - name: "total_active_file" - number: 31 - label: LABEL_OPTIONAL - type: TYPE_UINT64 - json_name: "totalActiveFile" - } - field { - name: "total_unevictable" - number: 32 - label: LABEL_OPTIONAL - type: TYPE_UINT64 - json_name: "totalUnevictable" - } - field { - name: "usage" - number: 33 - label: LABEL_OPTIONAL - type: TYPE_MESSAGE - type_name: ".io.containerd.cgroups.v1.MemoryEntry" - json_name: "usage" - } - field { - name: "swap" - number: 34 - label: LABEL_OPTIONAL - type: TYPE_MESSAGE - type_name: ".io.containerd.cgroups.v1.MemoryEntry" - json_name: "swap" - } - field { - name: "kernel" - number: 35 - label: LABEL_OPTIONAL - type: TYPE_MESSAGE - type_name: ".io.containerd.cgroups.v1.MemoryEntry" - json_name: "kernel" - } - field { - name: "kernel_tcp" - number: 36 - label: LABEL_OPTIONAL - type: TYPE_MESSAGE - type_name: ".io.containerd.cgroups.v1.MemoryEntry" - options { - 65004: "KernelTCP" - } - json_name: "kernelTcp" - } - } - message_type { - name: "MemoryEntry" - field { - name: "limit" - number: 1 - label: LABEL_OPTIONAL - type: TYPE_UINT64 - json_name: "limit" - } - field { - name: "usage" - number: 2 - label: LABEL_OPTIONAL - type: TYPE_UINT64 - json_name: "usage" - } - field { - name: "max" - number: 3 - label: LABEL_OPTIONAL - type: TYPE_UINT64 - json_name: "max" - } - field { - name: "failcnt" - number: 4 - label: LABEL_OPTIONAL - type: TYPE_UINT64 - json_name: "failcnt" - } - } - message_type { - name: "MemoryOomControl" - field { - name: "oom_kill_disable" - number: 1 - label: LABEL_OPTIONAL - type: TYPE_UINT64 - json_name: "oomKillDisable" - } - field { - name: "under_oom" - number: 2 - label: LABEL_OPTIONAL - type: TYPE_UINT64 - json_name: "underOom" - } - field { - name: "oom_kill" - number: 3 - label: LABEL_OPTIONAL - type: TYPE_UINT64 - json_name: "oomKill" - } - } - message_type { - name: "BlkIOStat" - field { - name: "io_service_bytes_recursive" - number: 1 - label: LABEL_REPEATED - type: TYPE_MESSAGE - type_name: ".io.containerd.cgroups.v1.BlkIOEntry" - json_name: "ioServiceBytesRecursive" - } - field { - name: "io_serviced_recursive" - number: 2 - label: LABEL_REPEATED - type: TYPE_MESSAGE - type_name: ".io.containerd.cgroups.v1.BlkIOEntry" - json_name: "ioServicedRecursive" - } - field { - name: "io_queued_recursive" - number: 3 - label: LABEL_REPEATED - type: TYPE_MESSAGE - type_name: ".io.containerd.cgroups.v1.BlkIOEntry" - json_name: "ioQueuedRecursive" - } - field { - name: "io_service_time_recursive" - number: 4 - label: LABEL_REPEATED - type: TYPE_MESSAGE - type_name: ".io.containerd.cgroups.v1.BlkIOEntry" - json_name: "ioServiceTimeRecursive" - } - field { - name: "io_wait_time_recursive" - number: 5 - label: LABEL_REPEATED - type: TYPE_MESSAGE - type_name: ".io.containerd.cgroups.v1.BlkIOEntry" - json_name: "ioWaitTimeRecursive" - } - field { - name: "io_merged_recursive" - number: 6 - label: LABEL_REPEATED - type: TYPE_MESSAGE - type_name: ".io.containerd.cgroups.v1.BlkIOEntry" - json_name: "ioMergedRecursive" - } - field { - name: "io_time_recursive" - number: 7 - label: LABEL_REPEATED - type: TYPE_MESSAGE - type_name: ".io.containerd.cgroups.v1.BlkIOEntry" - json_name: "ioTimeRecursive" - } - field { - name: "sectors_recursive" - number: 8 - label: LABEL_REPEATED - type: TYPE_MESSAGE - type_name: ".io.containerd.cgroups.v1.BlkIOEntry" - json_name: "sectorsRecursive" - } - } - message_type { - name: "BlkIOEntry" - field { - name: "op" - number: 1 - label: LABEL_OPTIONAL - type: TYPE_STRING - json_name: "op" - } - field { - name: "device" - number: 2 - label: LABEL_OPTIONAL - type: TYPE_STRING - json_name: "device" - } - field { - name: "major" - number: 3 - label: LABEL_OPTIONAL - type: TYPE_UINT64 - json_name: "major" - } - field { - name: "minor" - number: 4 - label: LABEL_OPTIONAL - type: TYPE_UINT64 - json_name: "minor" - } - field { - name: "value" - number: 5 - label: LABEL_OPTIONAL - type: TYPE_UINT64 - json_name: "value" - } - } - message_type { - name: "RdmaStat" - field { - name: "current" - number: 1 - label: LABEL_REPEATED - type: TYPE_MESSAGE - type_name: ".io.containerd.cgroups.v1.RdmaEntry" - json_name: "current" - } - field { - name: "limit" - number: 2 - label: LABEL_REPEATED - type: TYPE_MESSAGE - type_name: ".io.containerd.cgroups.v1.RdmaEntry" - json_name: "limit" - } - } - message_type { - name: "RdmaEntry" - field { - name: "device" - number: 1 - label: LABEL_OPTIONAL - type: TYPE_STRING - json_name: "device" - } - field { - name: "hca_handles" - number: 2 - label: LABEL_OPTIONAL - type: TYPE_UINT32 - json_name: "hcaHandles" - } - field { - name: "hca_objects" - number: 3 - label: LABEL_OPTIONAL - type: TYPE_UINT32 - json_name: "hcaObjects" - } - } - message_type { - name: "NetworkStat" - field { - name: "name" - number: 1 - label: LABEL_OPTIONAL - type: TYPE_STRING - json_name: "name" - } - field { - name: "rx_bytes" - number: 2 - label: LABEL_OPTIONAL - type: TYPE_UINT64 - json_name: "rxBytes" - } - field { - name: "rx_packets" - number: 3 - label: LABEL_OPTIONAL - type: TYPE_UINT64 - json_name: "rxPackets" - } - field { - name: "rx_errors" - number: 4 - label: LABEL_OPTIONAL - type: TYPE_UINT64 - json_name: "rxErrors" - } - field { - name: "rx_dropped" - number: 5 - label: LABEL_OPTIONAL - type: TYPE_UINT64 - json_name: "rxDropped" - } - field { - name: "tx_bytes" - number: 6 - label: LABEL_OPTIONAL - type: TYPE_UINT64 - json_name: "txBytes" - } - field { - name: "tx_packets" - number: 7 - label: LABEL_OPTIONAL - type: TYPE_UINT64 - json_name: "txPackets" - } - field { - name: "tx_errors" - number: 8 - label: LABEL_OPTIONAL - type: TYPE_UINT64 - json_name: "txErrors" - } - field { - name: "tx_dropped" - number: 9 - label: LABEL_OPTIONAL - type: TYPE_UINT64 - json_name: "txDropped" - } - } - message_type { - name: "CgroupStats" - field { - name: "nr_sleeping" - number: 1 - label: LABEL_OPTIONAL - type: TYPE_UINT64 - json_name: "nrSleeping" - } - field { - name: "nr_running" - number: 2 - label: LABEL_OPTIONAL - type: TYPE_UINT64 - json_name: "nrRunning" - } - field { - name: "nr_stopped" - number: 3 - label: LABEL_OPTIONAL - type: TYPE_UINT64 - json_name: "nrStopped" - } - field { - name: "nr_uninterruptible" - number: 4 - label: LABEL_OPTIONAL - type: TYPE_UINT64 - json_name: "nrUninterruptible" - } - field { - name: "nr_io_wait" - number: 5 - label: LABEL_OPTIONAL - type: TYPE_UINT64 - json_name: "nrIoWait" - } - } - syntax: "proto3" -} diff --git a/vendor/github.com/containerd/cgroups/stats/v1/metrics.proto b/vendor/github.com/containerd/cgroups/stats/v1/metrics.proto deleted file mode 100644 index b3f6cc37..00000000 --- a/vendor/github.com/containerd/cgroups/stats/v1/metrics.proto +++ /dev/null @@ -1,158 +0,0 @@ -syntax = "proto3"; - -package io.containerd.cgroups.v1; - -import "gogoproto/gogo.proto"; - -message Metrics { - repeated HugetlbStat hugetlb = 1; - PidsStat pids = 2; - CPUStat cpu = 3 [(gogoproto.customname) = "CPU"]; - MemoryStat memory = 4; - BlkIOStat blkio = 5; - RdmaStat rdma = 6; - repeated NetworkStat network = 7; - CgroupStats cgroup_stats = 8; - MemoryOomControl memory_oom_control = 9; -} - -message HugetlbStat { - uint64 usage = 1; - uint64 max = 2; - uint64 failcnt = 3; - string pagesize = 4; -} - -message PidsStat { - uint64 current = 1; - uint64 limit = 2; -} - -message CPUStat { - CPUUsage usage = 1; - Throttle throttling = 2; -} - -message CPUUsage { - // values in nanoseconds - uint64 total = 1; - uint64 kernel = 2; - uint64 user = 3; - repeated uint64 per_cpu = 4 [(gogoproto.customname) = "PerCPU"]; - -} - -message Throttle { - uint64 periods = 1; - uint64 throttled_periods = 2; - uint64 throttled_time = 3; -} - -message MemoryStat { - uint64 cache = 1; - uint64 rss = 2 [(gogoproto.customname) = "RSS"]; - uint64 rss_huge = 3 [(gogoproto.customname) = "RSSHuge"]; - uint64 mapped_file = 4; - uint64 dirty = 5; - uint64 writeback = 6; - uint64 pg_pg_in = 7; - uint64 pg_pg_out = 8; - uint64 pg_fault = 9; - uint64 pg_maj_fault = 10; - uint64 inactive_anon = 11; - uint64 active_anon = 12; - uint64 inactive_file = 13; - uint64 active_file = 14; - uint64 unevictable = 15; - uint64 hierarchical_memory_limit = 16; - uint64 hierarchical_swap_limit = 17; - uint64 total_cache = 18; - uint64 total_rss = 19 [(gogoproto.customname) = "TotalRSS"]; - uint64 total_rss_huge = 20 [(gogoproto.customname) = "TotalRSSHuge"]; - uint64 total_mapped_file = 21; - uint64 total_dirty = 22; - uint64 total_writeback = 23; - uint64 total_pg_pg_in = 24; - uint64 total_pg_pg_out = 25; - uint64 total_pg_fault = 26; - uint64 total_pg_maj_fault = 27; - uint64 total_inactive_anon = 28; - uint64 total_active_anon = 29; - uint64 total_inactive_file = 30; - uint64 total_active_file = 31; - uint64 total_unevictable = 32; - MemoryEntry usage = 33; - MemoryEntry swap = 34; - MemoryEntry kernel = 35; - MemoryEntry kernel_tcp = 36 [(gogoproto.customname) = "KernelTCP"]; - -} - -message MemoryEntry { - uint64 limit = 1; - uint64 usage = 2; - uint64 max = 3; - uint64 failcnt = 4; -} - -message MemoryOomControl { - uint64 oom_kill_disable = 1; - uint64 under_oom = 2; - uint64 oom_kill = 3; -} - -message BlkIOStat { - repeated BlkIOEntry io_service_bytes_recursive = 1; - repeated BlkIOEntry io_serviced_recursive = 2; - repeated BlkIOEntry io_queued_recursive = 3; - repeated BlkIOEntry io_service_time_recursive = 4; - repeated BlkIOEntry io_wait_time_recursive = 5; - repeated BlkIOEntry io_merged_recursive = 6; - repeated BlkIOEntry io_time_recursive = 7; - repeated BlkIOEntry sectors_recursive = 8; -} - -message BlkIOEntry { - string op = 1; - string device = 2; - uint64 major = 3; - uint64 minor = 4; - uint64 value = 5; -} - -message RdmaStat { - repeated RdmaEntry current = 1; - repeated RdmaEntry limit = 2; -} - -message RdmaEntry { - string device = 1; - uint32 hca_handles = 2; - uint32 hca_objects = 3; -} - -message NetworkStat { - string name = 1; - uint64 rx_bytes = 2; - uint64 rx_packets = 3; - uint64 rx_errors = 4; - uint64 rx_dropped = 5; - uint64 tx_bytes = 6; - uint64 tx_packets = 7; - uint64 tx_errors = 8; - uint64 tx_dropped = 9; -} - -// CgroupStats exports per-cgroup statistics. -message CgroupStats { - // number of tasks sleeping - uint64 nr_sleeping = 1; - // number of tasks running - uint64 nr_running = 2; - // number of tasks in stopped state - uint64 nr_stopped = 3; - // number of tasks in uninterruptible state - uint64 nr_uninterruptible = 4; - // number of tasks waiting on IO - uint64 nr_io_wait = 5; -} diff --git a/vendor/github.com/containerd/containerd/sys/epoll.go b/vendor/github.com/containerd/containerd/sys/epoll.go deleted file mode 100644 index 73a57013..00000000 --- a/vendor/github.com/containerd/containerd/sys/epoll.go +++ /dev/null @@ -1,34 +0,0 @@ -//go:build linux -// +build linux - -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package sys - -import "golang.org/x/sys/unix" - -// EpollCreate1 is an alias for unix.EpollCreate1 -// Deprecated: use golang.org/x/sys/unix.EpollCreate1 -var EpollCreate1 = unix.EpollCreate1 - -// EpollCtl is an alias for unix.EpollCtl -// Deprecated: use golang.org/x/sys/unix.EpollCtl -var EpollCtl = unix.EpollCtl - -// EpollWait is an alias for unix.EpollWait -// Deprecated: use golang.org/x/sys/unix.EpollWait -var EpollWait = unix.EpollWait diff --git a/vendor/github.com/containerd/containerd/sys/fds.go b/vendor/github.com/containerd/containerd/sys/fds.go deleted file mode 100644 index a71a9cd7..00000000 --- a/vendor/github.com/containerd/containerd/sys/fds.go +++ /dev/null @@ -1,35 +0,0 @@ -//go:build !windows && !darwin -// +build !windows,!darwin - -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package sys - -import ( - "os" - "path/filepath" - "strconv" -) - -// GetOpenFds returns the number of open fds for the process provided by pid -func GetOpenFds(pid int) (int, error) { - dirs, err := os.ReadDir(filepath.Join("/proc", strconv.Itoa(pid), "fd")) - if err != nil { - return -1, err - } - return len(dirs), nil -} diff --git a/vendor/github.com/containerd/containerd/sys/filesys_unix.go b/vendor/github.com/containerd/containerd/sys/filesys_unix.go deleted file mode 100644 index 805a7a73..00000000 --- a/vendor/github.com/containerd/containerd/sys/filesys_unix.go +++ /dev/null @@ -1,32 +0,0 @@ -//go:build !windows -// +build !windows - -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package sys - -import "os" - -// ForceRemoveAll on unix is just a wrapper for os.RemoveAll -func ForceRemoveAll(path string) error { - return os.RemoveAll(path) -} - -// MkdirAllWithACL is a wrapper for os.MkdirAll on Unix systems. -func MkdirAllWithACL(path string, perm os.FileMode) error { - return os.MkdirAll(path, perm) -} diff --git a/vendor/github.com/containerd/containerd/sys/filesys_windows.go b/vendor/github.com/containerd/containerd/sys/filesys_windows.go deleted file mode 100644 index 87ebacc2..00000000 --- a/vendor/github.com/containerd/containerd/sys/filesys_windows.go +++ /dev/null @@ -1,345 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package sys - -import ( - "fmt" - "os" - "path/filepath" - "regexp" - "sort" - "strconv" - "strings" - "syscall" - "unsafe" - - "github.com/Microsoft/hcsshim" - "golang.org/x/sys/windows" -) - -const ( - // SddlAdministratorsLocalSystem is local administrators plus NT AUTHORITY\System - SddlAdministratorsLocalSystem = "D:P(A;OICI;GA;;;BA)(A;OICI;GA;;;SY)" -) - -// MkdirAllWithACL is a wrapper for MkdirAll that creates a directory -// ACL'd for Builtin Administrators and Local System. -func MkdirAllWithACL(path string, perm os.FileMode) error { - return mkdirall(path, true) -} - -// MkdirAll implementation that is volume path aware for Windows. It can be used -// as a drop-in replacement for os.MkdirAll() -func MkdirAll(path string, _ os.FileMode) error { - return mkdirall(path, false) -} - -// mkdirall is a custom version of os.MkdirAll modified for use on Windows -// so that it is both volume path aware, and can create a directory with -// a DACL. -func mkdirall(path string, adminAndLocalSystem bool) error { - if re := regexp.MustCompile(`^\\\\\?\\Volume{[a-z0-9-]+}$`); re.MatchString(path) { - return nil - } - - // The rest of this method is largely copied from os.MkdirAll and should be kept - // as-is to ensure compatibility. - - // Fast path: if we can tell whether path is a directory or file, stop with success or error. - dir, err := os.Stat(path) - if err == nil { - if dir.IsDir() { - return nil - } - return &os.PathError{ - Op: "mkdir", - Path: path, - Err: syscall.ENOTDIR, - } - } - - // Slow path: make sure parent exists and then call Mkdir for path. - i := len(path) - for i > 0 && os.IsPathSeparator(path[i-1]) { // Skip trailing path separator. - i-- - } - - j := i - for j > 0 && !os.IsPathSeparator(path[j-1]) { // Scan backward over element. - j-- - } - - if j > 1 { - // Create parent - err = mkdirall(path[0:j-1], adminAndLocalSystem) - if err != nil { - return err - } - } - - // Parent now exists; invoke os.Mkdir or mkdirWithACL and use its result. - if adminAndLocalSystem { - err = mkdirWithACL(path) - } else { - err = os.Mkdir(path, 0) - } - - if err != nil { - // Handle arguments like "foo/." by - // double-checking that directory doesn't exist. - dir, err1 := os.Lstat(path) - if err1 == nil && dir.IsDir() { - return nil - } - return err - } - return nil -} - -// mkdirWithACL creates a new directory. If there is an error, it will be of -// type *PathError. . -// -// This is a modified and combined version of os.Mkdir and windows.Mkdir -// in golang to cater for creating a directory am ACL permitting full -// access, with inheritance, to any subfolder/file for Built-in Administrators -// and Local System. -func mkdirWithACL(name string) error { - sa := windows.SecurityAttributes{Length: 0} - sd, err := windows.SecurityDescriptorFromString(SddlAdministratorsLocalSystem) - if err != nil { - return &os.PathError{Op: "mkdir", Path: name, Err: err} - } - sa.Length = uint32(unsafe.Sizeof(sa)) - sa.InheritHandle = 1 - sa.SecurityDescriptor = sd - - namep, err := windows.UTF16PtrFromString(name) - if err != nil { - return &os.PathError{Op: "mkdir", Path: name, Err: err} - } - - e := windows.CreateDirectory(namep, &sa) - if e != nil { - return &os.PathError{Op: "mkdir", Path: name, Err: e} - } - return nil -} - -// IsAbs is a platform-specific wrapper for filepath.IsAbs. On Windows, -// golang filepath.IsAbs does not consider a path \windows\system32 as absolute -// as it doesn't start with a drive-letter/colon combination. However, in -// docker we need to verify things such as WORKDIR /windows/system32 in -// a Dockerfile (which gets translated to \windows\system32 when being processed -// by the daemon. This SHOULD be treated as absolute from a docker processing -// perspective. -func IsAbs(path string) bool { - if !filepath.IsAbs(path) { - if !strings.HasPrefix(path, string(os.PathSeparator)) { - return false - } - } - return true -} - -// The origin of the functions below here are the golang OS and windows packages, -// slightly modified to only cope with files, not directories due to the -// specific use case. -// -// The alteration is to allow a file on Windows to be opened with -// FILE_FLAG_SEQUENTIAL_SCAN (particular for docker load), to avoid eating -// the standby list, particularly when accessing large files such as layer.tar. - -// CreateSequential creates the named file with mode 0666 (before umask), truncating -// it if it already exists. If successful, methods on the returned -// File can be used for I/O; the associated file descriptor has mode -// O_RDWR. -// If there is an error, it will be of type *PathError. -func CreateSequential(name string) (*os.File, error) { - return OpenFileSequential(name, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0) -} - -// OpenSequential opens the named file for reading. If successful, methods on -// the returned file can be used for reading; the associated file -// descriptor has mode O_RDONLY. -// If there is an error, it will be of type *PathError. -func OpenSequential(name string) (*os.File, error) { - return OpenFileSequential(name, os.O_RDONLY, 0) -} - -// OpenFileSequential is the generalized open call; most users will use Open -// or Create instead. -// If there is an error, it will be of type *PathError. -func OpenFileSequential(name string, flag int, _ os.FileMode) (*os.File, error) { - if name == "" { - return nil, &os.PathError{Op: "open", Path: name, Err: syscall.ENOENT} - } - r, errf := windowsOpenFileSequential(name, flag, 0) - if errf == nil { - return r, nil - } - return nil, &os.PathError{Op: "open", Path: name, Err: errf} -} - -func windowsOpenFileSequential(name string, flag int, _ os.FileMode) (file *os.File, err error) { - r, e := windowsOpenSequential(name, flag|windows.O_CLOEXEC, 0) - if e != nil { - return nil, e - } - return os.NewFile(uintptr(r), name), nil -} - -func makeInheritSa() *windows.SecurityAttributes { - var sa windows.SecurityAttributes - sa.Length = uint32(unsafe.Sizeof(sa)) - sa.InheritHandle = 1 - return &sa -} - -func windowsOpenSequential(path string, mode int, _ uint32) (fd windows.Handle, err error) { - if len(path) == 0 { - return windows.InvalidHandle, windows.ERROR_FILE_NOT_FOUND - } - pathp, err := windows.UTF16PtrFromString(path) - if err != nil { - return windows.InvalidHandle, err - } - var access uint32 - switch mode & (windows.O_RDONLY | windows.O_WRONLY | windows.O_RDWR) { - case windows.O_RDONLY: - access = windows.GENERIC_READ - case windows.O_WRONLY: - access = windows.GENERIC_WRITE - case windows.O_RDWR: - access = windows.GENERIC_READ | windows.GENERIC_WRITE - } - if mode&windows.O_CREAT != 0 { - access |= windows.GENERIC_WRITE - } - if mode&windows.O_APPEND != 0 { - access &^= windows.GENERIC_WRITE - access |= windows.FILE_APPEND_DATA - } - sharemode := uint32(windows.FILE_SHARE_READ | windows.FILE_SHARE_WRITE) - var sa *windows.SecurityAttributes - if mode&windows.O_CLOEXEC == 0 { - sa = makeInheritSa() - } - var createmode uint32 - switch { - case mode&(windows.O_CREAT|windows.O_EXCL) == (windows.O_CREAT | windows.O_EXCL): - createmode = windows.CREATE_NEW - case mode&(windows.O_CREAT|windows.O_TRUNC) == (windows.O_CREAT | windows.O_TRUNC): - createmode = windows.CREATE_ALWAYS - case mode&windows.O_CREAT == windows.O_CREAT: - createmode = windows.OPEN_ALWAYS - case mode&windows.O_TRUNC == windows.O_TRUNC: - createmode = windows.TRUNCATE_EXISTING - default: - createmode = windows.OPEN_EXISTING - } - // Use FILE_FLAG_SEQUENTIAL_SCAN rather than FILE_ATTRIBUTE_NORMAL as implemented in golang. - // https://msdn.microsoft.com/en-us/library/windows/desktop/aa363858(v=vs.85).aspx - const fileFlagSequentialScan = 0x08000000 // FILE_FLAG_SEQUENTIAL_SCAN - h, e := windows.CreateFile(pathp, access, sharemode, sa, createmode, fileFlagSequentialScan, 0) - return h, e -} - -// ForceRemoveAll is the same as os.RemoveAll, but is aware of io.containerd.snapshotter.v1.windows -// and uses hcsshim to unmount and delete container layers contained therein, in the correct order, -// when passed a containerd root data directory (i.e. the `--root` directory for containerd). -func ForceRemoveAll(path string) error { - // snapshots/windows/windows.go init() - const snapshotPlugin = "io.containerd.snapshotter.v1" + "." + "windows" - // snapshots/windows/windows.go NewSnapshotter() - snapshotDir := filepath.Join(path, snapshotPlugin, "snapshots") - if stat, err := os.Stat(snapshotDir); err == nil && stat.IsDir() { - if err := cleanupWCOWLayers(snapshotDir); err != nil { - return fmt.Errorf("failed to cleanup WCOW layers in %s: %w", snapshotDir, err) - } - } - - return os.RemoveAll(path) -} - -func cleanupWCOWLayers(root string) error { - // See snapshots/windows/windows.go getSnapshotDir() - var layerNums []int - var rmLayerNums []int - if err := filepath.Walk(root, func(path string, info os.FileInfo, err error) error { - if path != root && info.IsDir() { - name := filepath.Base(path) - if strings.HasPrefix(name, "rm-") { - layerNum, err := strconv.Atoi(strings.TrimPrefix(name, "rm-")) - if err != nil { - return err - } - rmLayerNums = append(rmLayerNums, layerNum) - } else { - layerNum, err := strconv.Atoi(name) - if err != nil { - return err - } - layerNums = append(layerNums, layerNum) - } - return filepath.SkipDir - } - - return nil - }); err != nil { - return err - } - - sort.Sort(sort.Reverse(sort.IntSlice(rmLayerNums))) - for _, rmLayerNum := range rmLayerNums { - if err := cleanupWCOWLayer(filepath.Join(root, "rm-"+strconv.Itoa(rmLayerNum))); err != nil { - return err - } - } - - sort.Sort(sort.Reverse(sort.IntSlice(layerNums))) - for _, layerNum := range layerNums { - if err := cleanupWCOWLayer(filepath.Join(root, strconv.Itoa(layerNum))); err != nil { - return err - } - } - - return nil -} - -func cleanupWCOWLayer(layerPath string) error { - info := hcsshim.DriverInfo{ - HomeDir: filepath.Dir(layerPath), - } - - // ERROR_DEV_NOT_EXIST is returned if the layer is not currently prepared or activated. - // ERROR_FLT_INSTANCE_NOT_FOUND is returned if the layer is currently activated but not prepared. - if err := hcsshim.UnprepareLayer(info, filepath.Base(layerPath)); err != nil { - if hcserror, ok := err.(*hcsshim.HcsError); !ok || (hcserror.Err != windows.ERROR_DEV_NOT_EXIST && hcserror.Err != syscall.Errno(windows.ERROR_FLT_INSTANCE_NOT_FOUND)) { - return fmt.Errorf("failed to unprepare %s: %w", layerPath, err) - } - } - - if err := hcsshim.DeactivateLayer(info, filepath.Base(layerPath)); err != nil { - return fmt.Errorf("failed to deactivate %s: %w", layerPath, err) - } - - if err := hcsshim.DestroyLayer(info, filepath.Base(layerPath)); err != nil { - return fmt.Errorf("failed to destroy %s: %w", layerPath, err) - } - - return nil -} diff --git a/vendor/github.com/containerd/containerd/sys/oom_linux.go b/vendor/github.com/containerd/containerd/sys/oom_linux.go deleted file mode 100644 index bb2a3eaf..00000000 --- a/vendor/github.com/containerd/containerd/sys/oom_linux.go +++ /dev/null @@ -1,82 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package sys - -import ( - "fmt" - "os" - "strconv" - "strings" - - "github.com/containerd/containerd/pkg/userns" - "golang.org/x/sys/unix" -) - -const ( - // OOMScoreAdjMin is from OOM_SCORE_ADJ_MIN https://github.com/torvalds/linux/blob/v5.10/include/uapi/linux/oom.h#L9 - OOMScoreAdjMin = -1000 - // OOMScoreAdjMax is from OOM_SCORE_ADJ_MAX https://github.com/torvalds/linux/blob/v5.10/include/uapi/linux/oom.h#L10 - OOMScoreAdjMax = 1000 -) - -// AdjustOOMScore sets the oom score for the provided pid. If the provided score -// is out of range (-1000 - 1000), it is clipped to the min/max value. -func AdjustOOMScore(pid, score int) error { - if score > OOMScoreAdjMax { - score = OOMScoreAdjMax - } else if score < OOMScoreAdjMin { - score = OOMScoreAdjMin - } - return SetOOMScore(pid, score) -} - -// SetOOMScore sets the oom score for the provided pid -func SetOOMScore(pid, score int) error { - if score > OOMScoreAdjMax || score < OOMScoreAdjMin { - return fmt.Errorf("value out of range (%d): OOM score must be between %d and %d", score, OOMScoreAdjMin, OOMScoreAdjMax) - } - path := fmt.Sprintf("/proc/%d/oom_score_adj", pid) - f, err := os.OpenFile(path, os.O_WRONLY, 0) - if err != nil { - return err - } - defer f.Close() - if _, err = f.WriteString(strconv.Itoa(score)); err != nil { - if os.IsPermission(err) && (!runningPrivileged() || userns.RunningInUserNS()) { - return nil - } - return err - } - return nil -} - -// GetOOMScoreAdj gets the oom score for a process. It returns 0 (zero) if either -// no oom score is set, or a sore is set to 0. -func GetOOMScoreAdj(pid int) (int, error) { - path := fmt.Sprintf("/proc/%d/oom_score_adj", pid) - data, err := os.ReadFile(path) - if err != nil { - return 0, err - } - return strconv.Atoi(strings.TrimSpace(string(data))) -} - -// runningPrivileged returns true if the effective user ID of the -// calling process is 0 -func runningPrivileged() bool { - return unix.Geteuid() == 0 -} diff --git a/vendor/github.com/containerd/containerd/sys/oom_unsupported.go b/vendor/github.com/containerd/containerd/sys/oom_unsupported.go deleted file mode 100644 index fa0db5a1..00000000 --- a/vendor/github.com/containerd/containerd/sys/oom_unsupported.go +++ /dev/null @@ -1,49 +0,0 @@ -//go:build !linux -// +build !linux - -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package sys - -const ( - // OOMScoreMaxKillable is not implemented on non Linux - OOMScoreMaxKillable = 0 - // OOMScoreAdjMax is not implemented on non Linux - OOMScoreAdjMax = 0 -) - -// AdjustOOMScore sets the oom score for the provided pid. If the provided score -// is out of range (-1000 - 1000), it is clipped to the min/max value. -// -// Not implemented on Windows -func AdjustOOMScore(pid, score int) error { - return nil -} - -// SetOOMScore sets the oom score for the process -// -// Not implemented on Windows -func SetOOMScore(pid, score int) error { - return nil -} - -// GetOOMScoreAdj gets the oom score for a process -// -// Not implemented on Windows -func GetOOMScoreAdj(pid int) (int, error) { - return 0, nil -} diff --git a/vendor/github.com/containerd/containerd/sys/socket_unix.go b/vendor/github.com/containerd/containerd/sys/socket_unix.go deleted file mode 100644 index 367e19ca..00000000 --- a/vendor/github.com/containerd/containerd/sys/socket_unix.go +++ /dev/null @@ -1,81 +0,0 @@ -//go:build !windows -// +build !windows - -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package sys - -import ( - "fmt" - "net" - "os" - "path/filepath" - - "golang.org/x/sys/unix" -) - -// CreateUnixSocket creates a unix socket and returns the listener -func CreateUnixSocket(path string) (net.Listener, error) { - // BSDs have a 104 limit - if len(path) > 104 { - return nil, fmt.Errorf("%q: unix socket path too long (> 104)", path) - } - if err := os.MkdirAll(filepath.Dir(path), 0660); err != nil { - return nil, err - } - if err := unix.Unlink(path); err != nil && !os.IsNotExist(err) { - return nil, err - } - return net.Listen("unix", path) -} - -// GetLocalListener returns a listener out of a unix socket. -func GetLocalListener(path string, uid, gid int) (net.Listener, error) { - // Ensure parent directory is created - if err := mkdirAs(filepath.Dir(path), uid, gid); err != nil { - return nil, err - } - - l, err := CreateUnixSocket(path) - if err != nil { - return l, err - } - - if err := os.Chmod(path, 0660); err != nil { - l.Close() - return nil, err - } - - if err := os.Chown(path, uid, gid); err != nil { - l.Close() - return nil, err - } - - return l, nil -} - -func mkdirAs(path string, uid, gid int) error { - if _, err := os.Stat(path); !os.IsNotExist(err) { - return err - } - - if err := os.MkdirAll(path, 0770); err != nil { - return err - } - - return os.Chown(path, uid, gid) -} diff --git a/vendor/github.com/containerd/containerd/sys/socket_windows.go b/vendor/github.com/containerd/containerd/sys/socket_windows.go deleted file mode 100644 index 1ae12bc5..00000000 --- a/vendor/github.com/containerd/containerd/sys/socket_windows.go +++ /dev/null @@ -1,30 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package sys - -import ( - "net" - - "github.com/Microsoft/go-winio" -) - -// GetLocalListener returns a Listernet out of a named pipe. -// `path` must be of the form of `\\.\pipe\` -// (see https://msdn.microsoft.com/en-us/library/windows/desktop/aa365150) -func GetLocalListener(path string, uid, gid int) (net.Listener, error) { - return winio.ListenPipe(path, nil) -} diff --git a/vendor/github.com/containerd/containerd/sys/userns_deprecated.go b/vendor/github.com/containerd/containerd/sys/userns_deprecated.go deleted file mode 100644 index 53acf554..00000000 --- a/vendor/github.com/containerd/containerd/sys/userns_deprecated.go +++ /dev/null @@ -1,23 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package sys - -import "github.com/containerd/containerd/pkg/userns" - -// RunningInUserNS detects whether we are currently running in a user namespace. -// Deprecated: use github.com/containerd/containerd/pkg/userns.RunningInUserNS instead. -var RunningInUserNS = userns.RunningInUserNS diff --git a/vendor/github.com/docker/docker/AUTHORS b/vendor/github.com/docker/docker/AUTHORS index dffacff1..0728bfe1 100644 --- a/vendor/github.com/docker/docker/AUTHORS +++ b/vendor/github.com/docker/docker/AUTHORS @@ -1,5 +1,6 @@ -# This file lists all individuals having contributed content to the repository. -# For how it is generated, see `hack/generate-authors.sh`. +# File @generated by hack/generate-authors.sh. DO NOT EDIT. +# This file lists all contributors to the repository. +# See hack/generate-authors.sh to make modifications. Aanand Prasad Aaron Davidson @@ -7,16 +8,17 @@ Aaron Feng Aaron Hnatiw Aaron Huslage Aaron L. Xu -Aaron Lehmann +Aaron Lehmann Aaron Welch -Aaron.L.Xu Abel Muiño Abhijeet Kasurde -Abhinandan Prativadi +Abhinandan Prativadi Abhinav Ajgaonkar Abhishek Chanda Abhishek Sharma Abin Shahab +Abirdcfly +Ada Mancini Adam Avilla Adam Dobrawy Adam Eijdenberg @@ -26,6 +28,7 @@ Adam Mills Adam Pointer Adam Singer Adam Walz +Adam Williams Addam Hardy Aditi Rajagopal Aditya @@ -51,6 +54,7 @@ Akihiro Suda Akim Demaille Akira Koyasu Akshay Karle +Akshay Moghe Al Tobey alambike Alan Hoyle @@ -58,9 +62,11 @@ Alan Scherger Alan Thompson Albert Callarisa Albert Zhang -Albin Kerouanton +Albin Kerouanton +Alec Benson Alejandro González Hevia Aleksa Sarai +Aleksandr Chebotov Aleksandrs Fadins Alena Prokharchyk Alessandro Boch @@ -72,6 +78,7 @@ Alex Crawford Alex Ellis Alex Gaynor Alex Goodman +Alex Nordlund Alex Olshansky Alex Samorukov Alex Warhawk @@ -79,7 +86,8 @@ Alexander Artemenko Alexander Boyd Alexander Larsson Alexander Midlash -Alexander Morozov +Alexander Morozov +Alexander Polakov Alexander Shopov Alexandre Beslic Alexandre Garnier @@ -90,7 +98,8 @@ Alexei Margasov Alexey Guskov Alexey Kotlyarov Alexey Shamrin -Alexis THOMAS +Alexis Ries +Alexis Thomas Alfred Landrum Ali Dehghani Alicia Lauerman @@ -103,6 +112,7 @@ Alvin Deng Alvin Richards amangoel Amen Belayneh +Ameya Gawde Amir Goldstein Amit Bakshi Amit Krishnan @@ -126,6 +136,7 @@ Andreas Köhler Andreas Savvides Andreas Tiefenthaler Andrei Gherzan +Andrei Ushakov Andrei Vagin Andrew C. Bodine Andrew Clay Shafer @@ -135,6 +146,7 @@ Andrew Gerrand Andrew Guenther Andrew He Andrew Hsu +Andrew Kim Andrew Kuklewicz Andrew Macgregor Andrew Macpherson @@ -150,15 +162,17 @@ Andrey Kolomentsev Andrey Petrov Andrey Stolbovsky André Martins -andy Andy Chambers andy diller Andy Goldstein Andy Kipp +Andy Lindeman Andy Rothfusz Andy Smith Andy Wilson +Andy Zhang Anes Hasicic +Angel Velazquez Anil Belur Anil Madhavapeddy Ankit Jain @@ -179,20 +193,24 @@ Antonio Murdaca Antonis Kalipetis Antony Messerli Anuj Bahuguna +Anuj Varma Anusha Ragunathan +Anyu Wang apocas Arash Deshmeh ArikaChen -Arko Dasgupta +Arko Dasgupta Arnaud Lefebvre -Arnaud Porterie +Arnaud Porterie Arnaud Rebillout +Artem Khramov Arthur Barr Arthur Gautier Artur Meyster Arun Gupta Asad Saeeduddin Asbjørn Enge +Austin Vazquez averagehuman Avi Das Avi Kivity @@ -200,17 +218,21 @@ Avi Miller Avi Vaid ayoshitake Azat Khuyiyakhmetov +Bao Yonglei Bardia Keyoumarsi Barnaby Gray Barry Allard Bartłomiej Piotrowski Bastiaan Bakker +Bastien Pascard bdevloed +Bearice Ren Ben Bonnefoy Ben Firshman Ben Golub Ben Gould Ben Hall +Ben Langfeld Ben Sargent Ben Severson Ben Toews @@ -218,6 +240,7 @@ Ben Wiklund Benjamin Atkin Benjamin Baker Benjamin Boudreau +Benjamin Böhmke Benjamin Yolken Benny Ng Benoit Chesneau @@ -231,12 +254,15 @@ Bhiraj Butala Bhumika Bayani Bilal Amarni Bill Wang +Billy Ridgway Bily Zhang Bin Liu Bingshen Wang +Bjorn Neergaard Blake Geno Boaz Shuster bobby abbott +Bojun Zhu Boqin Qin Boris Pruessmann Boshi Lian @@ -252,6 +278,7 @@ Brendan Dixon Brent Salisbury Brett Higgins Brett Kochendorfer +Brett Milford Brett Randall Brian (bex) Exelbierd Brian Bland @@ -282,6 +309,7 @@ Byung Kang Caleb Spare Calen Pennington Cameron Boehmer +Cameron Sparr Cameron Spear Campbell Allen Candid Dauth @@ -316,6 +344,7 @@ Charlie Drage Charlie Lewis Chase Bolt ChaYoung You +Chee Hau Lim Chen Chao Chen Chuanliang Chen Hanxiao @@ -325,6 +354,7 @@ Chen Qiu Cheng-mean Liu Chengfei Shang Chengguang Xu +Chenyang Yan chenyuzhu Chetan Birajdar Chewey @@ -339,6 +369,7 @@ Chris Fordham Chris Gavin Chris Gibson Chris Khoo +Chris Kreussling (Flatbush Gardener) Chris McKinnel Chris McKinnel Chris Price @@ -351,6 +382,7 @@ Chris Telfer Chris Wahl Chris Weyl Chris White +Christian Becker Christian Berendt Christian Brauner Christian Böhme @@ -359,6 +391,7 @@ Christian Persson Christian Rotzoll Christian Simon Christian Stefanescu +Christoph Ziebuhr Christophe Mehay Christophe Troestler Christophe Vidal @@ -372,7 +405,9 @@ Christy Norman Chun Chen Ciro S. Costa Clayton Coleman +Clint Armstrong Clinton Kitson +clubby789 Cody Roseborough Coenraad Loubser Colin Dunklau @@ -383,19 +418,23 @@ Colin Walters Collin Guarino Colm Hally companycy +Conor Evans Corbin Coleman Corey Farrell Cory Forsyth +Cory Snider cressie176 -CrimsonGlory Cristian Ariza Cristian Staretu cristiano balducci Cristina Yenyxe Gonzalez Garcia Cruceru Calin-Cristian CUI Wei +cuishuang +Cuong Manh Le Cyprian Gracz Cyril F +Da McGrady Daan van Berkel Daehyeok Mun Dafydd Crosby @@ -413,6 +452,7 @@ Dan Hirsch Dan Keder Dan Levy Dan McPherson +Dan Plamadeala Dan Stine Dan Williams Dani Hodovic @@ -433,6 +473,7 @@ Daniel Mizyrycki Daniel Nephin Daniel Norberg Daniel Nordberg +Daniel P. Berrangé Daniel Robinson Daniel S Daniel Sweet @@ -441,6 +482,7 @@ Daniel Watkins Daniel X Moore Daniel YC Lin Daniel Zhang +Daniele Rondina Danny Berger Danny Milosavljevic Danny Yates @@ -456,6 +498,7 @@ Dave Henderson Dave MacDonald Dave Tucker David Anderson +David Bellotti David Calavera David Chung David Corking @@ -470,9 +513,11 @@ David Lawrence David Lechner David M. Karr David Mackey +David Manouchehri David Mat David Mcanulty David McKay +David O'Rourke David P Hilton David Pelaez David R. Jenni @@ -503,14 +548,14 @@ Dennis Docter Derek Derek Derek Ch -Derek McGowan +Derek McGowan Deric Crago Deshi Xiao -devmeyster Devon Estes Devvyn Murphy Dharmit Shah Dhawal Yogesh Bhanushali +Dhilip Kumars Diego Romero Diego Siqueira Dieter Reuter @@ -522,9 +567,11 @@ Dimitris Rozakis Dimitry Andric Dinesh Subhraveti Ding Fei +dingwei Diogo Monica DiuDiugirl Djibril Koné +Djordje Lukic dkumor Dmitri Logvinenko Dmitri Shuralyov @@ -536,6 +583,8 @@ Dmitry Shyshkin Dmitry Smirnov Dmitry V. Krivenok Dmitry Vorobev +Dmytro Iakovliev +docker-unir[bot] Dolph Mathews Dominic Tubach Dominic Yin @@ -569,8 +618,9 @@ Eivind Uggedal Elan Ruusamäe Elango Sivanandam Elena Morozova -Eli Uriegas +Eli Uriegas Elias Faxö +Elias Koromilas Elias Probst Elijah Zupancic eluck @@ -580,6 +630,7 @@ Emil Hernvall Emily Maier Emily Rose Emir Ozer +Eng Zer Jun Enguerran Eohyung Lee epeterso @@ -588,6 +639,7 @@ Eric Curtin Eric G. Noriega Eric Hanchrow Eric Lee +Eric Mountain Eric Myhre Eric Paris Eric Rafaloff @@ -597,17 +649,21 @@ Eric Soderstrom Eric Yang Eric-Olivier Lamey Erica Windisch +Erich Cordoba Erik Bray Erik Dubbelboer Erik Hollensbe Erik Inge Bolsø Erik Kristensen +Erik Sipsma Erik St. Martin Erik Weathers Erno Hopearuoho Erwin van der Koogh +Espen Suenson Ethan Bell Ethan Mosbaugh +Euan Harris Euan Kemp Eugen Krizo Eugene Yakubovich @@ -657,6 +713,7 @@ Fengtu Wang Ferenc Szabo Fernando Fero Volar +Feroz Salam Ferran Rodenas Filipe Brandenburger Filipe Oliveira @@ -673,6 +730,7 @@ Florin Patan fonglh Foysal Iqbal Francesc Campoy +Francesco Degrassi Francesco Mari Francis Chuang Francisco Carriedo @@ -681,18 +739,23 @@ Frank Groeneveld Frank Herrmann Frank Macreery Frank Rosquin -frankyang +Frank Yang Fred Lifton Frederick F. Kautz IV +Frederico F. de Oliveira Frederik Loeffert Frederik Nordahl Jul Sabroe Freek Kalter Frieder Bluemle +frobnicaty <92033765+frobnicaty@users.noreply.github.com> +Frédéric Dalleau Fu JinLin Félix Baylac-Jacqué Félix Cantournet Gabe Rosenhouse Gabor Nagy +Gabriel Goller +Gabriel L. Somlo Gabriel Linder Gabriel Monroy Gabriel Nicolas Avellaneda @@ -707,12 +770,14 @@ Gaurav Singh Gaël PORTAY Genki Takiuchi GennadySpb +Geoff Levand Geoffrey Bachelet Geon Kim George Kontridze George MacRorie George Xie Georgi Hristozov +Georgy Yakovlev Gereon Frey German DZ Gert van Valkenhoef @@ -724,6 +789,7 @@ Gildas Cuisinier Giovan Isa Musthofa gissehel Giuseppe Mazzotta +Giuseppe Scrivano Gleb Fotengauer-Malinovskiy Gleb M Borisov Glyn Normington @@ -746,6 +812,8 @@ Guilhem Lettron Guilherme Salgado Guillaume Dufour Guillaume J. Charmes +Gunadhya S. <6939749+gunadhya@users.noreply.github.com> +Guoqiang QI guoxiuyan Guri Gurjeet Singh @@ -755,12 +823,13 @@ gwx296173 Günter Zöchbauer Haichao Yang haikuoliu +haining.cao Hakan Özler Hamish Hutchings Hannes Ljungberg Hans Kristian Flaatten Hans Rødtang -Hao Shu Wei +Hao Shu Wei Hao Zhang <21521210@zju.edu.cn> Harald Albers Harald Niesche @@ -792,15 +861,16 @@ Hu Tao HuanHuan Ye Huanzhong Zhang Huayi Zhang +Hugo Barrera Hugo Duncan Hugo Marisco <0x6875676f@gmail.com> +Hui Kang Hunter Blanks huqun Huu Nguyen -hyeongkyu.lee +Hyeongkyu Lee Hyzhou Zhy Iago López Galeiras -Ian Babrou Ian Bishop Ian Bull Ian Calvert @@ -817,6 +887,7 @@ Igor Dolzhikov Igor Karpovich Iliana Weller Ilkka Laukkanen +Illo Abdulrahim Ilya Dmitrichenko Ilya Gusev Ilya Khlopotov @@ -847,7 +918,8 @@ Jaivish Kothari Jake Champlin Jake Moshenko Jake Sanders -jakedt +Jakub Drahos +Jakub Guzik James Allen James Carey James Carr @@ -859,11 +931,14 @@ James Lal James Mills James Nesbitt James Nugent +James Sanders James Turnbull James Watkins-Harvey Jamie Hannaford Jamshid Afshar +Jan Breig Jan Chren +Jan Götte Jan Keromnes Jan Koprowski Jan Pazdziora @@ -876,7 +951,6 @@ Januar Wayong Jared Biel Jared Hocutt Jaroslaw Zabiello -jaseg Jasmine Hegman Jason A. Donenfeld Jason Divock @@ -891,10 +965,11 @@ Jason Shepherd Jason Smith Jason Sommer Jason Stangroome +Javier Bassi jaxgeller -Jay Jay Jay Kamat +Jay Lim Jean Rouge Jean-Baptiste Barth Jean-Baptiste Dalido @@ -912,12 +987,14 @@ Jeff Minard Jeff Nickoloff Jeff Silberman Jeff Welch +Jeff Zvier Jeffrey Bolle Jeffrey Morgan Jeffrey van Gogh Jenny Gebske Jeremy Chambers Jeremy Grosser +Jeremy Huntwork Jeremy Price Jeremy Qian Jeremy Unruh @@ -933,13 +1010,16 @@ Ji.Zhilong Jian Liao Jian Zhang Jiang Jinyang +Jianyong Wu Jie Luo Jie Ma Jihyun Hwang Jilles Oldenbeuving Jim Alateras +Jim Carroll Jim Ehrismann Jim Galasyn +Jim Lin Jim Minter Jim Perrin Jimmy Cuadra @@ -951,6 +1031,7 @@ Jiri Appl Jiri Popelka Jiuyue Ma Jiří Župka +Joakim Roubert Joao Fernandes Joao Trindade Joe Beda @@ -1012,6 +1093,7 @@ Joost Cassee Jordan Arentsen Jordan Jennings Jordan Sissel +Jordi Massaguer Pla Jorge Marin Jorit Kleine-Möllhoff Jose Diaz-Gonzalez @@ -1044,12 +1126,15 @@ Julien Pervillé Julien Pivotto Julio Guerra Julio Montes +Jun Du Jun-Ru Chang +junxu Jussi Nummelin Justas Brazauskas Justen Martin Justin Cormack Justin Force +Justin Keller <85903732+jk-vb@users.noreply.github.com> Justin Menga Justin Plock Justin Simonelis @@ -1062,6 +1147,7 @@ Jörg Thalheim K. Heller Kai Blin Kai Qiang Wu (Kennan) +Kaijie Chen Kamil Domański Kamjar Gerami Kanstantsin Shautsou @@ -1082,6 +1168,7 @@ Kawsar Saiyeed Kay Yan kayrus Kazuhiro Sera +Kazuyoshi Kato Ke Li Ke Xu Kei Ohmura @@ -1096,6 +1183,7 @@ Kenjiro Nakayama Kent Johnson Kenta Tada Kevin "qwazerty" Houdebert +Kevin Alvarez Kevin Burke Kevin Clark Kevin Feyrer @@ -1122,20 +1210,22 @@ knappe Kohei Tsuruta Koichi Shiraishi Konrad Kleine +Konrad Ponichtera Konstantin Gribov Konstantin L Konstantin Pelykh +Kostadin Plachkov Krasi Georgiev Krasimir Georgiev Kris-Mikael Krister Kristian Haugene Kristina Zabunova Krystian Wojcicki -Kun Zhang Kunal Kushwaha Kunal Tyagi Kyle Conroy Kyle Linden +Kyle Squizzato Kyle Wuolle kyu Lachlan Coote @@ -1151,20 +1241,25 @@ Lars R. Damerow Lars-Magnus Skog Laszlo Meszaros Laura Frank +Laurent Bernaille Laurent Erignoux Laurie Voss Leandro Siqueira +Lee Calcote Lee Chao <932819864@qq.com> Lee, Meng-Han -leeplay Lei Gong Lei Jitang +Leiiwang Len Weincier Lennie Leo Gallucci +Leonardo Nodari +Leonardo Taccari Leszek Kowalski Levi Blackstone Levi Gross +Levi Harrison Lewis Daly Lewis Marshall Lewis Peckover @@ -1173,11 +1268,12 @@ Liam Macgillavry Liana Lo Liang Mingqiang Liang-Chi Hsieh +liangwei Liao Qingwei Lifubang Lihua Tang Lily Guo -limsy +limeidan Lin Lu LingFaKe Linus Heckemann @@ -1207,6 +1303,7 @@ Lucas Chi Lucas Molas Lucas Silvestre Luciano Mores +Luis Henrique Mulinari Luis Martínez de Bartolomé Izquierdo Luiz Svoboda Lukas Heeren @@ -1222,7 +1319,7 @@ Ma Shimiao Mabin Madhan Raj Mookkandy Madhav Puri -Madhu Venugopal +Madhu Venugopal Mageee Mahesh Tiyyagura malnick @@ -1255,12 +1352,14 @@ Marius Gundersen Marius Sturm Marius Voila Mark Allen +Mark Feit Mark Jeromin Mark McGranaghan Mark McKinstry Mark Milstein Mark Oates Mark Parker +Mark Vainomaa Mark West Markan Patel Marko Mikulicic @@ -1269,11 +1368,14 @@ Markus Fix Markus Kortlang Martijn Dwars Martijn van Oosterhout +Martin Braun +Martin Dojcak Martin Honermeyer Martin Kelly Martin Mosegaard Amdisen Martin Muzatko Martin Redmond +Maru Newby Mary Anthony Masahito Zembutsu Masato Ohba @@ -1284,13 +1386,16 @@ Mathias Monnerville Mathieu Champlon Mathieu Le Marec - Pasquet Mathieu Parent +Mathieu Paturel Matt Apperson Matt Bachmann +Matt Bajor Matt Bentley Matt Haggard Matt Hoyle Matt McCormick Matt Moore +Matt Morrison <3maven@gmail.com> Matt Richardson Matt Rickard Matt Robenolt @@ -1305,12 +1410,14 @@ Matthew Riley Matthias Klumpp Matthias Kühnle Matthias Rampke +Matthieu Fronton Matthieu Hauglustaine Mattias Jernberg Mauricio Garavaglia mauriyouth Max Harmathy Max Shytikov +Max Timchenko Maxim Fedchyshyn Maxim Ivanov Maxim Kulkin @@ -1324,14 +1431,16 @@ Megan Kostick Mehul Kar Mei ChunTao Mengdi Gao +Menghui Chen Mert Yazıcıoğlu mgniu Micah Zoltu Michael A. Smith +Michael Beskin Michael Bridgen Michael Brown Michael Chiang -Michael Crosby +Michael Crosby Michael Currie Michael Friis Michael Gorsuch @@ -1340,6 +1449,7 @@ Michael Holzheu Michael Hudson-Doyle Michael Huettermann Michael Irwin +Michael Kuehn Michael Käufl Michael Neale Michael Nussbaum @@ -1349,23 +1459,29 @@ Michael Spetsiotis Michael Stapelberg Michael Steinert Michael Thies +Michael Weidmann Michael West Michael Zhao Michal Fojtik Michal Gebauer Michal Jemala +Michal Kostrzewa Michal Minář +Michal Rostecki Michal Wieczorek Michaël Pailloncy Michał Czeraszkiewicz Michał Gryko +Michał Kosek Michiel de Jong Mickaël Fortunato Mickaël Remars Miguel Angel Fernández Miguel Morales +Miguel Perez Mihai Borobocea Mihuleacc Sergiu +Mikael Davranche Mike Brown Mike Bush Mike Casas @@ -1384,6 +1500,7 @@ Mike Snitzer mikelinjie <294893458@qq.com> Mikhail Sobolev Miklos Szegedi +Milas Bowman Milind Chawre Miloslav Trmač mingqing @@ -1392,7 +1509,7 @@ Misty Stanley-Jones Mitch Capper Mizuki Urushida mlarcher -Mohammad Banikazemi +Mohammad Banikazemi Mohammad Nasirifar Mohammed Aaqib Ansari Mohit Soni @@ -1406,6 +1523,7 @@ Moysés Borges mrfly Mrunal Patel Muayyad Alsadi +Muhammad Zohaib Aslam Mustafa Akın Muthukumar R Máximo Cuadros @@ -1422,6 +1540,8 @@ Natasha Jarus Nate Brennand Nate Eagleson Nate Jones +Nathan Carlson +Nathan Herald Nathan Hsieh Nathan Kleyn Nathan LeClaire @@ -1445,6 +1565,7 @@ Nick Payne Nick Russo Nick Stenning Nick Stinemates +Nick Wood NickrenREN Nicola Kabar Nicolas Borboën @@ -1455,6 +1576,7 @@ Nicolas Kaiser Nicolas Sterchele Nicolas V Castet Nicolás Hock Isaza +Niel Drummond Nigel Poulton Nik Nyby Nikhil Chawla @@ -1472,6 +1594,7 @@ noducks Nolan Darilek Noriki Nakamura nponeccop +Nurahmadie Nuutti Kotivuori nzwsch O.S. Tezer @@ -1489,7 +1612,9 @@ Olle Jonsson Olli Janatuinen Olly Pomeroy Omri Shiv +Onur Filiz Oriol Francès +Oscar Bonilla <6f6231@gmail.com> Oskar Niburski Otto Kekäläinen Ouyang Liduo @@ -1502,10 +1627,12 @@ Pascal Borreli Pascal Hartig Patrick Böänziger Patrick Devine +Patrick Haas Patrick Hemmer Patrick Stapleton Patrik Cyvoct pattichen +Paul "TBBle" Hampson Paul paul Paul Annesley @@ -1520,6 +1647,7 @@ Paul Liljenberg Paul Morie Paul Nasrat Paul Weaver +Paulo Gomes Paulo Ribeiro Pavel Lobashov Pavel Matěja @@ -1530,6 +1658,7 @@ Pavel Tikhomirov Pavlos Ratis Pavol Vargovcik Pawel Konczalski +Paweł Gronowski Peeyush Gupta Peggy Li Pei Su @@ -1537,6 +1666,7 @@ Peng Tao Penghan Wang Per Weijnitz perhapszzy@sina.com +Pete Woods Peter Bourgon Peter Braden Peter Bücker @@ -1552,8 +1682,10 @@ Peter Salvatore Peter Volpe Peter Waller Petr Švihlík +Petros Angelatos Phil -Phil Estes +Phil Estes +Phil Sphicas Phil Spitler Philip Alexander Etling Philip Monroe @@ -1570,21 +1702,25 @@ Pierre Dal-Pra Pierre Wacrenier Pierre-Alain RIVIERE Piotr Bogdan -pixelistik +Piotr Karbowski Porjo Poul Kjeldager Sørensen Pradeep Chhetri Pradip Dhara +Pradipta Kr. Banerjee Prasanna Gautam Pratik Karki Prayag Verma Priya Wadhwa Projjol Banerji Przemek Hejman +Puneet Pruthi Pure White pysqz Qiang Huang +Qin TianHuan Qinglan Peng +Quan Tian qudongfang Quentin Brossard Quentin Perez @@ -1607,6 +1743,7 @@ Ramon van Alteren RaviTeja Pothana Ray Tsang ReadmeCritic +realityone Recursive Madman Reficul Regan McCooey @@ -1617,9 +1754,9 @@ Renaud Gaubert Rhys Hiltner Ri Xu Ricardo N Feliciano +Rich Horwood Rich Moyse Rich Seymour -Richard Richard Burnison Richard Harvey Richard Mathie @@ -1634,12 +1771,14 @@ Riku Voipio Riley Guerin Ritesh H Shukla Riyaz Faizullabhoy +Rob Cowsill <42620235+rcowsill@users.noreply.github.com> Rob Gulewich Rob Vesse Robert Bachmann Robert Bittle Robert Obryk Robert Schneider +Robert Shade Robert Stern Robert Terhaar Robert Wallis @@ -1652,6 +1791,7 @@ Robin Speekenbrink Robin Thoni robpc Rodolfo Carvalho +Rodrigo Campos Rodrigo Vaz Roel Van Nyen Roger Peppe @@ -1666,11 +1806,14 @@ Roma Sokolov Roman Dudin Roman Mazur Roman Strashkin +Roman Volosatovs +Roman Zabaluev Ron Smits Ron Williams Rong Gao Rong Zhang Rongxiang Song +Rony Weng root root root @@ -1690,13 +1833,16 @@ Russ Magee Ryan Abrams Ryan Anderson Ryan Aslett +Ryan Barry Ryan Belgrave +Ryan Campbell Ryan Detzel Ryan Fowler Ryan Liu Ryan McLaughlin Ryan O'Donnell Ryan Seto +Ryan Shea Ryan Simmen Ryan Stelly Ryan Thomas @@ -1706,9 +1852,9 @@ Ryan Zhang ryancooper7 RyanDeng Ryo Nakao +Ryoga Saito Rémy Greinhofer s. rannou -s00318865 Sabin Basyal Sachin Joshi Sagar Hani @@ -1728,8 +1874,9 @@ Sambuddha Basu Sami Wagiaalla Samuel Andaya Samuel Dion-Girardeau -Samuel Karp +Samuel Karp Samuel PHAN +sanchayanghosh Sandeep Bansal Sankar சங்கர் Sanket Saurav @@ -1745,6 +1892,7 @@ Satoshi Tagomori Scott Bessler Scott Collier Scott Johnston +Scott Percival Scott Stamp Scott Walls sdreyesg @@ -1757,6 +1905,9 @@ Sean P. Kane Sean Rodman Sebastiaan van Steenis Sebastiaan van Stijn +Sebastian Höffner +Sebastian Radloff +Sebastien Goasguen Senthil Kumar Selvaraj Senthil Kumaran SeongJae Park @@ -1776,12 +1927,15 @@ shaunol Shawn Landden Shawn Siefkas shawnhe +Shayan Pooya Shayne Wang Shekhar Gulati Sheng Yang Shengbo Song +Shengjing Zhu Shev Yan Shih-Yuan Lee +Shihao Xia Shijiang Wei Shijun Qin Shishir Mahajan @@ -1790,14 +1944,13 @@ Shourya Sarcar Shu-Wai Chow shuai-z Shukui Yang -Shuwei Hao Sian Lerk Lau +Siarhei Rasiukevich Sidhartha Mani sidharthamani Silas Sewell Silvan Jegen Simão Reis -Simei He Simon Barendse Simon Eskildsen Simon Ferquel @@ -1808,13 +1961,16 @@ Simon Vikstrom Sindhu S Sjoerd Langkemper skanehira +Smark Meng Solganik Alexander Solomon Hykes Song Gao Soshi Katsuta +Sotiris Salloumis Soulou Spencer Brown Spencer Smith +Spike Curtis Sridatta Thatipamala Sridhar Ratnakumar Srini Brahmaroutu @@ -1830,6 +1986,7 @@ Stefan S. Stefan Scherer Stefan Staudenmeyer Stefan Weil +Steffen Butzer Stephan Spindler Stephen Benjamin Stephen Crosby @@ -1848,7 +2005,9 @@ Steven Iveson Steven Merrill Steven Richards Steven Taylor +Stéphane Este-Gracias Stig Larsson +Su Wang Subhajit Ghosh Sujith Haridasan Sun Gengze <690388648@qq.com> @@ -1858,15 +2017,16 @@ Sunny Gogoi Suryakumar Sudar Sven Dowideit Swapnil Daingade -Sylvain Baubeau +Sylvain Baubeau Sylvain Bellemare Sébastien Sébastien HOUZÉ Sébastien Luttringer Sébastien Stormacq +Sören Tempel Tabakhase Tadej Janež -TAGOMORI Satoshi +Takuto Sato tang0th Tangi Colin Tatsuki Sugiura @@ -1877,18 +2037,21 @@ Ted M. Young Tehmasp Chaudhri Tejaswini Duggaraju Tejesh Mehta +Terry Chu terryding77 <550147740@qq.com> -tgic Thatcher Peskens theadactyl Thell 'Bo' Fowler Thermionix +Thiago Alves Silva Thijs Terlouw Thomas Bikeev Thomas Frössman Thomas Gazagnaire +Thomas Graf Thomas Grainger Thomas Hansen +Thomas Ledos Thomas Leonard Thomas Léveil Thomas Orozco @@ -1899,11 +2062,13 @@ Thomas Swift Thomas Tanaka Thomas Texier Ti Zhou +Tiago Seabra Tianon Gravi Tianyi Wang Tibor Vass Tiffany Jernigan Tiffany Low +Till Claassen Till Wegmüller Tim Tim Bart @@ -1915,11 +2080,14 @@ Tim Potter Tim Ruffles Tim Smith Tim Terhorst +Tim Wagner Tim Wang Tim Waugh Tim Wraight Tim Zju <21651152@zju.edu.cn> +timchenxiaoyu <837829664@qq.com> timfeirg +Timo Rothenpieler Timothy Hobbs tjwebb123 tobe @@ -1928,6 +2096,7 @@ Tobias Bradtke Tobias Gesellchen Tobias Klauser Tobias Munk +Tobias Pfandzelter Tobias Schmidt Tobias Schwab Todd Crane @@ -1941,25 +2110,33 @@ Tom Fotherby Tom Howe Tom Hulihan Tom Maaswinkel +Tom Parker Tom Sweeney Tom Wilkie Tom X. Tobin +Tom Zhao +Tomas Janousek +Tomas Kral Tomas Tomecek Tomasz Kopczynski Tomasz Lipinski Tomasz Nurkiewicz +Tomek Mańko Tommaso Visconti +Tomoya Tabuchi Tomáš Hrčka +tonic Tonny Xu Tony Abboud Tony Daws Tony Miller toogley Torstein Husebø +Toshiaki Makita Tõnis Tiigi Trace Andreason tracylihui <793912329@qq.com> -Trapier Marshall +Trapier Marshall Travis Cline Travis Thieman Trent Ogren @@ -1969,6 +2146,8 @@ Trevor Sullivan Trishna Guha Tristan Carel Troy Denton +Tudor Brindus +Ty Alexander Tycho Andersen Tyler Brock Tyler Brown @@ -1979,6 +2158,7 @@ Umesh Yadav Utz Bacher vagrant Vaidas Jablonskis +Valentin Kulesh vanderliang Velko Ivanov Veres Lajos @@ -1992,12 +2172,13 @@ Victor Palma Victor Vieux Victoria Bialas Vijaya Kumar K +Vikas Choudhary Vikram bir Singh Viktor Stanchev Viktor Vojnovski VinayRaghavanKS Vincent Batts -Vincent Bernat +Vincent Bernat Vincent Boulineau Vincent Demeester Vincent Giersch @@ -2017,9 +2198,9 @@ Vladimir Pouzanov Vladimir Rutsky Vladimir Varankin VladimirAus +Vladislav Kolesnikov Vlastimil Zeman Vojtech Vitek (V-Teq) -waitingkuo Walter Leibbrandt Walter Stanish Wang Chao @@ -2034,6 +2215,7 @@ wanghuaiqing Ward Vandewege WarheadsSE Wassim Dhif +Wataru Ishida Wayne Chang Wayne Song Weerasak Chongnguluam @@ -2048,7 +2230,6 @@ Wendel Fleming Wenjun Tang Wenkai Yin wenlxie -Wentao Zhang Wenxuan Zhao Wenyu You <21551128@zju.edu.cn> Wenzhi Liang @@ -2068,16 +2249,22 @@ William Thurston Wilson Júnior Wing-Kam Wong WiseTrem +Wolfgang Nagele Wolfgang Powisch Wonjun Kim +WuLonghui xamyzhao +Xia Wu Xian Chaobo Xianglin Gao +Xianjie Xianlu Bird Xiao YongBiao +Xiao Zhang XiaoBing Jiang Xiaodong Liu Xiaodong Zhang +Xiaohua Ding Xiaoxi He Xiaoxu Chen Xiaoyu Zhang @@ -2092,12 +2279,16 @@ Xuecong Liao xuzhaokui Yadnyawalkya Tale Yahya +yalpul YAMADA Tsuyoshi Yamasaki Masahide Yan Feng +Yan Zhu Yang Bai +Yang Li Yang Pengfei yangchenliang +Yann Autissier Yanqiang Miao Yao Zaiyong Yash Murty @@ -2117,6 +2308,7 @@ Yosef Fertel You-Sheng Yang (楊有勝) youcai Youcef YEKHLEF +Youfu Zhang Yu Changchun Yu Chengxia Yu Peng @@ -2124,14 +2316,18 @@ Yu-Ju Hong Yuan Sun Yuanhong Peng Yue Zhang +Yufei Xiong Yuhao Fang Yuichiro Kaneko +YujiOshima Yunxiang Huang Yurii Rashkovskii Yusuf Tarık Günaydın +Yves Blusseau <90z7oey02@sneakemail.com> Yves Junqueira Zac Dover Zach Borboa +Zach Gershman Zachary Jaffee Zain Memon Zaiste! @@ -2147,6 +2343,7 @@ Zhenan Ye <21551168@zju.edu.cn> zhenghenghuo Zhenhai Gao Zhenkun Bi +ZhiPeng Lu zhipengzuo Zhou Hao Zhoulin Xie @@ -2164,7 +2361,6 @@ Zou Yu zqh Zuhayr Elahi Zunayed Ali -Álex González Álvaro Lázaro Átila Camurça Alves 尹吉峰 @@ -2173,3 +2369,4 @@ Zunayed Ali 慕陶 搏通 黄艳红00139573 +정재영 diff --git a/vendor/github.com/docker/docker/api/types/container/config.go b/vendor/github.com/docker/docker/api/types/container/config.go index f767195b..077583e6 100644 --- a/vendor/github.com/docker/docker/api/types/container/config.go +++ b/vendor/github.com/docker/docker/api/types/container/config.go @@ -1,6 +1,7 @@ package container // import "github.com/docker/docker/api/types/container" import ( + "io" "time" "github.com/docker/docker/api/types/strslice" @@ -13,6 +14,24 @@ import ( // Docker interprets it as 3 nanoseconds. const MinimumDuration = 1 * time.Millisecond +// StopOptions holds the options to stop or restart a container. +type StopOptions struct { + // Signal (optional) is the signal to send to the container to (gracefully) + // stop it before forcibly terminating the container with SIGKILL after the + // timeout expires. If not value is set, the default (SIGTERM) is used. + Signal string `json:",omitempty"` + + // Timeout (optional) is the timeout (in seconds) to wait for the container + // to stop gracefully before forcibly terminating it with SIGKILL. + // + // - Use nil to use the default timeout (10 seconds). + // - Use '-1' to wait indefinitely. + // - Use '0' to not wait for the container to exit gracefully, and + // immediately proceeds to forcibly terminating the container. + // - Other positive values are used as timeout (in seconds). + Timeout *int `json:",omitempty"` +} + // HealthConfig holds configuration settings for the HEALTHCHECK feature. type HealthConfig struct { // Test is the test to perform to check that the container is healthy. @@ -34,6 +53,14 @@ type HealthConfig struct { Retries int `json:",omitempty"` } +// ExecStartOptions holds the options to start container's exec. +type ExecStartOptions struct { + Stdin io.Reader + Stdout io.Writer + Stderr io.Writer + ConsoleSize *[2]uint `json:",omitempty"` +} + // Config contains the configuration data about a container. // It should hold only portable information about the container. // Here, "portable" means "independent from the host we are running on". diff --git a/vendor/github.com/docker/docker/api/types/container/container_create.go b/vendor/github.com/docker/docker/api/types/container/container_create.go deleted file mode 100644 index d0c852f8..00000000 --- a/vendor/github.com/docker/docker/api/types/container/container_create.go +++ /dev/null @@ -1,20 +0,0 @@ -package container // import "github.com/docker/docker/api/types/container" - -// ---------------------------------------------------------------------------- -// Code generated by `swagger generate operation`. DO NOT EDIT. -// -// See hack/generate-swagger-api.sh -// ---------------------------------------------------------------------------- - -// ContainerCreateCreatedBody OK response to ContainerCreate operation -// swagger:model ContainerCreateCreatedBody -type ContainerCreateCreatedBody struct { - - // The ID of the created container - // Required: true - ID string `json:"Id"` - - // Warnings encountered when creating the container - // Required: true - Warnings []string `json:"Warnings"` -} diff --git a/vendor/github.com/docker/docker/api/types/container/container_wait.go b/vendor/github.com/docker/docker/api/types/container/container_wait.go deleted file mode 100644 index 49e05ae6..00000000 --- a/vendor/github.com/docker/docker/api/types/container/container_wait.go +++ /dev/null @@ -1,28 +0,0 @@ -package container // import "github.com/docker/docker/api/types/container" - -// ---------------------------------------------------------------------------- -// Code generated by `swagger generate operation`. DO NOT EDIT. -// -// See hack/generate-swagger-api.sh -// ---------------------------------------------------------------------------- - -// ContainerWaitOKBodyError container waiting error, if any -// swagger:model ContainerWaitOKBodyError -type ContainerWaitOKBodyError struct { - - // Details of an error - Message string `json:"Message,omitempty"` -} - -// ContainerWaitOKBody OK response to ContainerWait operation -// swagger:model ContainerWaitOKBody -type ContainerWaitOKBody struct { - - // error - // Required: true - Error *ContainerWaitOKBodyError `json:"Error"` - - // Exit code of the container - // Required: true - StatusCode int64 `json:"StatusCode"` -} diff --git a/vendor/github.com/docker/docker/api/types/container/create_response.go b/vendor/github.com/docker/docker/api/types/container/create_response.go new file mode 100644 index 00000000..aa0e7f7d --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/container/create_response.go @@ -0,0 +1,19 @@ +package container + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +// CreateResponse ContainerCreateResponse +// +// OK response to ContainerCreate operation +// swagger:model CreateResponse +type CreateResponse struct { + + // The ID of the created container + // Required: true + ID string `json:"Id"` + + // Warnings encountered when creating the container + // Required: true + Warnings []string `json:"Warnings"` +} diff --git a/vendor/github.com/docker/docker/api/types/container/deprecated.go b/vendor/github.com/docker/docker/api/types/container/deprecated.go new file mode 100644 index 00000000..0cb70e36 --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/container/deprecated.go @@ -0,0 +1,16 @@ +package container // import "github.com/docker/docker/api/types/container" + +// ContainerCreateCreatedBody OK response to ContainerCreate operation +// +// Deprecated: use CreateResponse +type ContainerCreateCreatedBody = CreateResponse + +// ContainerWaitOKBody OK response to ContainerWait operation +// +// Deprecated: use WaitResponse +type ContainerWaitOKBody = WaitResponse + +// ContainerWaitOKBodyError container waiting error, if any +// +// Deprecated: use WaitExitError +type ContainerWaitOKBodyError = WaitExitError diff --git a/vendor/github.com/docker/docker/api/types/container/host_config.go b/vendor/github.com/docker/docker/api/types/container/host_config.go index 2d1cbaa9..100f434c 100644 --- a/vendor/github.com/docker/docker/api/types/container/host_config.go +++ b/vendor/github.com/docker/docker/api/types/container/host_config.go @@ -13,19 +13,26 @@ import ( // CgroupnsMode represents the cgroup namespace mode of the container type CgroupnsMode string +// cgroup namespace modes for containers +const ( + CgroupnsModeEmpty CgroupnsMode = "" + CgroupnsModePrivate CgroupnsMode = "private" + CgroupnsModeHost CgroupnsMode = "host" +) + // IsPrivate indicates whether the container uses its own private cgroup namespace func (c CgroupnsMode) IsPrivate() bool { - return c == "private" + return c == CgroupnsModePrivate } // IsHost indicates whether the container shares the host's cgroup namespace func (c CgroupnsMode) IsHost() bool { - return c == "host" + return c == CgroupnsModeHost } // IsEmpty indicates whether the container cgroup namespace mode is unset func (c CgroupnsMode) IsEmpty() bool { - return c == "" + return c == CgroupnsModeEmpty } // Valid indicates whether the cgroup namespace mode is valid @@ -37,60 +44,69 @@ func (c CgroupnsMode) Valid() bool { // values are platform specific type Isolation string +// Isolation modes for containers +const ( + IsolationEmpty Isolation = "" // IsolationEmpty is unspecified (same behavior as default) + IsolationDefault Isolation = "default" // IsolationDefault is the default isolation mode on current daemon + IsolationProcess Isolation = "process" // IsolationProcess is process isolation mode + IsolationHyperV Isolation = "hyperv" // IsolationHyperV is HyperV isolation mode +) + // IsDefault indicates the default isolation technology of a container. On Linux this // is the native driver. On Windows, this is a Windows Server Container. func (i Isolation) IsDefault() bool { - return strings.ToLower(string(i)) == "default" || string(i) == "" + // TODO consider making isolation-mode strict (case-sensitive) + v := Isolation(strings.ToLower(string(i))) + return v == IsolationDefault || v == IsolationEmpty } // IsHyperV indicates the use of a Hyper-V partition for isolation func (i Isolation) IsHyperV() bool { - return strings.ToLower(string(i)) == "hyperv" + // TODO consider making isolation-mode strict (case-sensitive) + return Isolation(strings.ToLower(string(i))) == IsolationHyperV } // IsProcess indicates the use of process isolation func (i Isolation) IsProcess() bool { - return strings.ToLower(string(i)) == "process" + // TODO consider making isolation-mode strict (case-sensitive) + return Isolation(strings.ToLower(string(i))) == IsolationProcess } -const ( - // IsolationEmpty is unspecified (same behavior as default) - IsolationEmpty = Isolation("") - // IsolationDefault is the default isolation mode on current daemon - IsolationDefault = Isolation("default") - // IsolationProcess is process isolation mode - IsolationProcess = Isolation("process") - // IsolationHyperV is HyperV isolation mode - IsolationHyperV = Isolation("hyperv") -) - // IpcMode represents the container ipc stack. type IpcMode string +// IpcMode constants +const ( + IPCModeNone IpcMode = "none" + IPCModeHost IpcMode = "host" + IPCModeContainer IpcMode = "container" + IPCModePrivate IpcMode = "private" + IPCModeShareable IpcMode = "shareable" +) + // IsPrivate indicates whether the container uses its own private ipc namespace which can not be shared. func (n IpcMode) IsPrivate() bool { - return n == "private" + return n == IPCModePrivate } // IsHost indicates whether the container shares the host's ipc namespace. func (n IpcMode) IsHost() bool { - return n == "host" + return n == IPCModeHost } // IsShareable indicates whether the container's ipc namespace can be shared with another container. func (n IpcMode) IsShareable() bool { - return n == "shareable" + return n == IPCModeShareable } // IsContainer indicates whether the container uses another container's ipc namespace. func (n IpcMode) IsContainer() bool { - parts := strings.SplitN(string(n), ":", 2) - return len(parts) > 1 && parts[0] == "container" + return strings.HasPrefix(string(n), string(IPCModeContainer)+":") } // IsNone indicates whether container IpcMode is set to "none". func (n IpcMode) IsNone() bool { - return n == "none" + return n == IPCModeNone } // IsEmpty indicates whether container IpcMode is empty @@ -105,9 +121,8 @@ func (n IpcMode) Valid() bool { // Container returns the name of the container ipc stack is going to be used. func (n IpcMode) Container() string { - parts := strings.SplitN(string(n), ":", 2) - if len(parts) > 1 && parts[0] == "container" { - return parts[1] + if n.IsContainer() { + return strings.TrimPrefix(string(n), string(IPCModeContainer)+":") } return "" } @@ -326,7 +341,7 @@ type LogMode string // Available logging modes const ( - LogModeUnset = "" + LogModeUnset LogMode = "" LogModeBlocking LogMode = "blocking" LogModeNonBlock LogMode = "non-blocking" ) @@ -361,14 +376,17 @@ type Resources struct { Devices []DeviceMapping // List of devices to map inside the container DeviceCgroupRules []string // List of rule to be added to the device cgroup DeviceRequests []DeviceRequest // List of device requests for device drivers - KernelMemory int64 // Kernel memory limit (in bytes), Deprecated: kernel 5.4 deprecated kmem.limit_in_bytes - KernelMemoryTCP int64 // Hard limit for kernel TCP buffer memory (in bytes) - MemoryReservation int64 // Memory soft limit (in bytes) - MemorySwap int64 // Total memory usage (memory + swap); set `-1` to enable unlimited swap - MemorySwappiness *int64 // Tuning container memory swappiness behaviour - OomKillDisable *bool // Whether to disable OOM Killer or not - PidsLimit *int64 // Setting PIDs limit for a container; Set `0` or `-1` for unlimited, or `null` to not change. - Ulimits []*units.Ulimit // List of ulimits to be set in the container + + // KernelMemory specifies the kernel memory limit (in bytes) for the container. + // Deprecated: kernel 5.4 deprecated kmem.limit_in_bytes. + KernelMemory int64 `json:",omitempty"` + KernelMemoryTCP int64 `json:",omitempty"` // Hard limit for kernel TCP buffer memory (in bytes) + MemoryReservation int64 // Memory soft limit (in bytes) + MemorySwap int64 // Total memory usage (memory + swap); set `-1` to enable unlimited swap + MemorySwappiness *int64 // Tuning container memory swappiness behaviour + OomKillDisable *bool // Whether to disable OOM Killer or not + PidsLimit *int64 // Setting PIDs limit for a container; Set `0` or `-1` for unlimited, or `null` to not change. + Ulimits []*units.Ulimit // List of ulimits to be set in the container // Applicable to Windows CPUCount int64 `json:"CpuCount"` // CPU count @@ -399,6 +417,7 @@ type HostConfig struct { AutoRemove bool // Automatically remove container when it exits VolumeDriver string // Name of the volume driver used to mount volumes VolumesFrom []string // List of volumes to take from other container + ConsoleSize [2]uint // Initial console size (height,width) // Applicable to UNIX platforms CapAdd strslice.StrSlice // List of kernel capabilities to add to the container @@ -427,8 +446,7 @@ type HostConfig struct { Runtime string `json:",omitempty"` // Runtime to use with this container // Applicable to Windows - ConsoleSize [2]uint // Initial console size (height,width) - Isolation Isolation // Isolation technology of the container (e.g. default, hyperv) + Isolation Isolation // Isolation technology of the container (e.g. default, hyperv) // Contains container's resources (cgroups, ulimits) Resources diff --git a/vendor/github.com/docker/docker/api/types/container/wait_exit_error.go b/vendor/github.com/docker/docker/api/types/container/wait_exit_error.go new file mode 100644 index 00000000..ab56d4ee --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/container/wait_exit_error.go @@ -0,0 +1,12 @@ +package container + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +// WaitExitError container waiting error, if any +// swagger:model WaitExitError +type WaitExitError struct { + + // Details of an error + Message string `json:"Message,omitempty"` +} diff --git a/vendor/github.com/docker/docker/api/types/container/wait_response.go b/vendor/github.com/docker/docker/api/types/container/wait_response.go new file mode 100644 index 00000000..84fc6afd --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/container/wait_response.go @@ -0,0 +1,18 @@ +package container + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +// WaitResponse ContainerWaitResponse +// +// OK response to ContainerWait operation +// swagger:model WaitResponse +type WaitResponse struct { + + // error + Error *WaitExitError `json:"Error,omitempty"` + + // Exit code of the container + // Required: true + StatusCode int64 `json:"StatusCode"` +} diff --git a/vendor/github.com/docker/docker/api/types/filters/parse.go b/vendor/github.com/docker/docker/api/types/filters/parse.go index b4976a34..f8fe7940 100644 --- a/vendor/github.com/docker/docker/api/types/filters/parse.go +++ b/vendor/github.com/docker/docker/api/types/filters/parse.go @@ -10,6 +10,7 @@ import ( "strings" "github.com/docker/docker/api/types/versions" + "github.com/pkg/errors" ) // Args stores a mapping of keys to a set of multiple values. @@ -98,7 +99,7 @@ func FromJSON(p string) (Args, error) { // Fallback to parsing arguments in the legacy slice format deprecated := map[string][]string{} if legacyErr := json.Unmarshal(raw, &deprecated); legacyErr != nil { - return args, err + return args, invalidFilter{errors.Wrap(err, "invalid filter")} } args.fields = deprecatedArgs(deprecated) @@ -245,10 +246,10 @@ func (args Args) Contains(field string) bool { return ok } -type invalidFilter string +type invalidFilter struct{ error } func (e invalidFilter) Error() string { - return "Invalid filter '" + string(e) + "'" + return e.error.Error() } func (invalidFilter) InvalidParameter() {} @@ -258,7 +259,7 @@ func (invalidFilter) InvalidParameter() {} func (args Args) Validate(accepted map[string]bool) error { for name := range args.fields { if !accepted[name] { - return invalidFilter(name) + return invalidFilter{errors.New("invalid filter '" + name + "'")} } } return nil diff --git a/vendor/github.com/docker/docker/api/types/mount/mount.go b/vendor/github.com/docker/docker/api/types/mount/mount.go index 443b8d07..ac4ce622 100644 --- a/vendor/github.com/docker/docker/api/types/mount/mount.go +++ b/vendor/github.com/docker/docker/api/types/mount/mount.go @@ -17,6 +17,8 @@ const ( TypeTmpfs Type = "tmpfs" // TypeNamedPipe is the type for mounting Windows named pipes TypeNamedPipe Type = "npipe" + // TypeCluster is the type for Swarm Cluster Volumes. + TypeCluster Type = "cluster" ) // Mount represents a mount (volume). @@ -30,9 +32,10 @@ type Mount struct { ReadOnly bool `json:",omitempty"` Consistency Consistency `json:",omitempty"` - BindOptions *BindOptions `json:",omitempty"` - VolumeOptions *VolumeOptions `json:",omitempty"` - TmpfsOptions *TmpfsOptions `json:",omitempty"` + BindOptions *BindOptions `json:",omitempty"` + VolumeOptions *VolumeOptions `json:",omitempty"` + TmpfsOptions *TmpfsOptions `json:",omitempty"` + ClusterOptions *ClusterOptions `json:",omitempty"` } // Propagation represents the propagation of a mount. @@ -79,8 +82,9 @@ const ( // BindOptions defines options specific to mounts of type "bind". type BindOptions struct { - Propagation Propagation `json:",omitempty"` - NonRecursive bool `json:",omitempty"` + Propagation Propagation `json:",omitempty"` + NonRecursive bool `json:",omitempty"` + CreateMountpoint bool `json:",omitempty"` } // VolumeOptions represents the options for a mount of type volume. @@ -129,3 +133,8 @@ type TmpfsOptions struct { // Some of these may be straightforward to add, but others, such as // uid/gid have implications in a clustered system. } + +// ClusterOptions specifies options for a Cluster volume. +type ClusterOptions struct { + // intentionally empty +} diff --git a/vendor/github.com/docker/docker/api/types/swarm/common.go b/vendor/github.com/docker/docker/api/types/swarm/common.go index ef020f45..5ded7dba 100644 --- a/vendor/github.com/docker/docker/api/types/swarm/common.go +++ b/vendor/github.com/docker/docker/api/types/swarm/common.go @@ -1,12 +1,20 @@ package swarm // import "github.com/docker/docker/api/types/swarm" -import "time" +import ( + "strconv" + "time" +) // Version represents the internal object version. type Version struct { Index uint64 `json:",omitempty"` } +// String implements fmt.Stringer interface. +func (v Version) String() string { + return strconv.FormatUint(v.Index, 10) +} + // Meta is a base object inherited by most of the other once. type Meta struct { Version Version `json:",omitempty"` diff --git a/vendor/github.com/docker/docker/api/types/swarm/node.go b/vendor/github.com/docker/docker/api/types/swarm/node.go index 1e30f5fa..bb98d5ee 100644 --- a/vendor/github.com/docker/docker/api/types/swarm/node.go +++ b/vendor/github.com/docker/docker/api/types/swarm/node.go @@ -53,6 +53,7 @@ type NodeDescription struct { Resources Resources `json:",omitempty"` Engine EngineDescription `json:",omitempty"` TLSInfo TLSInfo `json:",omitempty"` + CSIInfo []NodeCSIInfo `json:",omitempty"` } // Platform represents the platform (Arch/OS). @@ -68,6 +69,21 @@ type EngineDescription struct { Plugins []PluginDescription `json:",omitempty"` } +// NodeCSIInfo represents information about a CSI plugin available on the node +type NodeCSIInfo struct { + // PluginName is the name of the CSI plugin. + PluginName string `json:",omitempty"` + // NodeID is the ID of the node as reported by the CSI plugin. This is + // different from the swarm node ID. + NodeID string `json:",omitempty"` + // MaxVolumesPerNode is the maximum number of volumes that may be published + // to this node + MaxVolumesPerNode int64 `json:",omitempty"` + // AccessibleTopology indicates the location of this node in the CSI + // plugin's topology + AccessibleTopology *Topology `json:",omitempty"` +} + // PluginDescription represents the description of an engine plugin. type PluginDescription struct { Type string `json:",omitempty"` @@ -113,3 +129,11 @@ const ( // NodeStateDisconnected DISCONNECTED NodeStateDisconnected NodeState = "disconnected" ) + +// Topology defines the CSI topology of this node. This type is a duplicate of +// github.com/docker/docker/api/types.Topology. Because the type definition +// is so simple and to avoid complicated structure or circular imports, we just +// duplicate it here. See that type for full documentation +type Topology struct { + Segments map[string]string `json:",omitempty"` +} diff --git a/vendor/github.com/docker/docker/api/types/swarm/swarm.go b/vendor/github.com/docker/docker/api/types/swarm/swarm.go index b25f9996..3eae4b9b 100644 --- a/vendor/github.com/docker/docker/api/types/swarm/swarm.go +++ b/vendor/github.com/docker/docker/api/types/swarm/swarm.go @@ -213,6 +213,16 @@ type Info struct { Warnings []string `json:",omitempty"` } +// Status provides information about the current swarm status and role, +// obtained from the "Swarm" header in the API response. +type Status struct { + // NodeState represents the state of the node. + NodeState LocalNodeState + + // ControlAvailable indicates if the node is a swarm manager. + ControlAvailable bool +} + // Peer represents a peer. type Peer struct { NodeID string diff --git a/vendor/github.com/docker/docker/api/types/swarm/task.go b/vendor/github.com/docker/docker/api/types/swarm/task.go index a6f7ab7b..ad3eeca0 100644 --- a/vendor/github.com/docker/docker/api/types/swarm/task.go +++ b/vendor/github.com/docker/docker/api/types/swarm/task.go @@ -62,6 +62,11 @@ type Task struct { // used to determine which Tasks belong to which run of the job. This field // is absent if the Service mode is Replicated or Global. JobIteration *Version `json:",omitempty"` + + // Volumes is the list of VolumeAttachments for this task. It specifies + // which particular volumes are to be used by this particular task, and + // fulfilling what mounts in the spec. + Volumes []VolumeAttachment } // TaskSpec represents the spec of a task. @@ -204,3 +209,17 @@ type ContainerStatus struct { type PortStatus struct { Ports []PortConfig `json:",omitempty"` } + +// VolumeAttachment contains the associating a Volume to a Task. +type VolumeAttachment struct { + // ID is the Swarmkit ID of the Volume. This is not the CSI VolumeId. + ID string `json:",omitempty"` + + // Source, together with Target, indicates the Mount, as specified in the + // ContainerSpec, that this volume fulfills. + Source string `json:",omitempty"` + + // Target, together with Source, indicates the Mount, as specified + // in the ContainerSpec, that this volume fulfills. + Target string `json:",omitempty"` +} diff --git a/vendor/github.com/docker/docker/api/types/versions/compare.go b/vendor/github.com/docker/docker/api/types/versions/compare.go index 8ccb0aa9..489e917e 100644 --- a/vendor/github.com/docker/docker/api/types/versions/compare.go +++ b/vendor/github.com/docker/docker/api/types/versions/compare.go @@ -8,6 +8,9 @@ import ( // compare compares two version strings // returns -1 if v1 < v2, 1 if v1 > v2, 0 otherwise. func compare(v1, v2 string) int { + if v1 == v2 { + return 0 + } var ( currTab = strings.Split(v1, ".") otherTab = strings.Split(v2, ".") diff --git a/vendor/github.com/docker/libnetwork/resolvconf/resolvconf.go b/vendor/github.com/docker/docker/libnetwork/resolvconf/resolvconf.go similarity index 76% rename from vendor/github.com/docker/libnetwork/resolvconf/resolvconf.go rename to vendor/github.com/docker/docker/libnetwork/resolvconf/resolvconf.go index 946bb871..09c2dacb 100644 --- a/vendor/github.com/docker/libnetwork/resolvconf/resolvconf.go +++ b/vendor/github.com/docker/docker/libnetwork/resolvconf/resolvconf.go @@ -3,14 +3,11 @@ package resolvconf import ( "bytes" - "io/ioutil" + "os" "regexp" "strings" "sync" - "github.com/docker/docker/pkg/ioutils" - "github.com/docker/libnetwork/resolvconf/dns" - "github.com/docker/libnetwork/types" "github.com/sirupsen/logrus" ) @@ -21,6 +18,13 @@ const ( alternatePath = "/run/systemd/resolve/resolv.conf" ) +// constants for the IP address type +const ( + IP = iota // IPv4 and IPv6 + IPv4 + IPv6 +) + var ( detectSystemdResolvConfOnce sync.Once pathAfterSystemdDetection = defaultPath @@ -39,12 +43,12 @@ var ( // More information at https://www.freedesktop.org/software/systemd/man/systemd-resolved.service.html#/etc/resolv.conf func Path() string { detectSystemdResolvConfOnce.Do(func() { - candidateResolvConf, err := ioutil.ReadFile(defaultPath) + candidateResolvConf, err := os.ReadFile(defaultPath) if err != nil { // silencing error as it will resurface at next calls trying to read defaultPath return } - ns := GetNameservers(candidateResolvConf, types.IP) + ns := GetNameservers(candidateResolvConf, IP) if len(ns) == 1 && ns[0] == "127.0.0.53" { pathAfterSystemdDetection = alternatePath logrus.Infof("detected 127.0.0.53 nameserver, assuming systemd-resolved, so using resolv.conf: %s", alternatePath) @@ -53,20 +57,26 @@ func Path() string { return pathAfterSystemdDetection } -var ( - // Note: the default IPv4 & IPv6 resolvers are set to Google's Public DNS - defaultIPv4Dns = []string{"nameserver 8.8.8.8", "nameserver 8.8.4.4"} - defaultIPv6Dns = []string{"nameserver 2001:4860:4860::8888", "nameserver 2001:4860:4860::8844"} - ipv4NumBlock = `(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)` - ipv4Address = `(` + ipv4NumBlock + `\.){3}` + ipv4NumBlock +const ( + // ipLocalhost is a regex pattern for IPv4 or IPv6 loopback range. + ipLocalhost = `((127\.([0-9]{1,3}\.){2}[0-9]{1,3})|(::1)$)` + ipv4NumBlock = `(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)` + ipv4Address = `(` + ipv4NumBlock + `\.){3}` + ipv4NumBlock + // This is not an IPv6 address verifier as it will accept a super-set of IPv6, and also // will *not match* IPv4-Embedded IPv6 Addresses (RFC6052), but that and other variants // -- e.g. other link-local types -- either won't work in containers or are unnecessary. // For readability and sufficiency for Docker purposes this seemed more reasonable than a // 1000+ character regexp with exact and complete IPv6 validation ipv6Address = `([0-9A-Fa-f]{0,4}:){2,7}([0-9A-Fa-f]{0,4})(%\w+)?` +) + +var ( + // Note: the default IPv4 & IPv6 resolvers are set to Google's Public DNS + defaultIPv4Dns = []string{"nameserver 8.8.8.8", "nameserver 8.8.4.4"} + defaultIPv6Dns = []string{"nameserver 2001:4860:4860::8888", "nameserver 2001:4860:4860::8844"} - localhostNSRegexp = regexp.MustCompile(`(?m)^nameserver\s+` + dns.IPLocalhost + `\s*\n*`) + localhostNSRegexp = regexp.MustCompile(`(?m)^nameserver\s+` + ipLocalhost + `\s*\n*`) nsIPv6Regexp = regexp.MustCompile(`(?m)^nameserver\s+` + ipv6Address + `\s*\n*`) nsRegexp = regexp.MustCompile(`^\s*nameserver\s*((` + ipv4Address + `)|(` + ipv6Address + `))\s*$`) nsIPv6Regexpmatch = regexp.MustCompile(`^\s*nameserver\s*((` + ipv6Address + `))\s*$`) @@ -75,12 +85,6 @@ var ( optionsRegexp = regexp.MustCompile(`^\s*options\s*(([^\s]+\s*)*)$`) ) -var lastModified struct { - sync.Mutex - sha256 string - contents []byte -} - // File contains the resolv.conf content and its hash type File struct { Content []byte @@ -94,57 +98,23 @@ func Get() (*File, error) { // GetSpecific returns the contents of the user specified resolv.conf file and its hash func GetSpecific(path string) (*File, error) { - resolv, err := ioutil.ReadFile(path) + resolv, err := os.ReadFile(path) if err != nil { return nil, err } - hash, err := ioutils.HashData(bytes.NewReader(resolv)) + hash, err := hashData(bytes.NewReader(resolv)) if err != nil { return nil, err } return &File{Content: resolv, Hash: hash}, nil } -// GetIfChanged retrieves the host /etc/resolv.conf file, checks against the last hash -// and, if modified since last check, returns the bytes and new hash. -// This feature is used by the resolv.conf updater for containers -func GetIfChanged() (*File, error) { - lastModified.Lock() - defer lastModified.Unlock() - - resolv, err := ioutil.ReadFile(Path()) - if err != nil { - return nil, err - } - newHash, err := ioutils.HashData(bytes.NewReader(resolv)) - if err != nil { - return nil, err - } - if lastModified.sha256 != newHash { - lastModified.sha256 = newHash - lastModified.contents = resolv - return &File{Content: resolv, Hash: newHash}, nil - } - // nothing changed, so return no data - return nil, nil -} - -// GetLastModified retrieves the last used contents and hash of the host resolv.conf. -// Used by containers updating on restart -func GetLastModified() *File { - lastModified.Lock() - defer lastModified.Unlock() - - return &File{Content: lastModified.contents, Hash: lastModified.sha256} -} - // FilterResolvDNS cleans up the config in resolvConf. It has two main jobs: -// 1. It looks for localhost (127.*|::1) entries in the provided -// resolv.conf, removing local nameserver entries, and, if the resulting -// cleaned config has no defined nameservers left, adds default DNS entries -// 2. Given the caller provides the enable/disable state of IPv6, the filter -// code will remove all IPv6 nameservers if it is not enabled for containers -// +// 1. It looks for localhost (127.*|::1) entries in the provided +// resolv.conf, removing local nameserver entries, and, if the resulting +// cleaned config has no defined nameservers left, adds default DNS entries +// 2. Given the caller provides the enable/disable state of IPv6, the filter +// code will remove all IPv6 nameservers if it is not enabled for containers func FilterResolvDNS(resolvConf []byte, ipv6Enabled bool) (*File, error) { cleanedResolvConf := localhostNSRegexp.ReplaceAll(resolvConf, []byte{}) // if IPv6 is not enabled, also clean out any IPv6 address nameserver @@ -153,7 +123,7 @@ func FilterResolvDNS(resolvConf []byte, ipv6Enabled bool) (*File, error) { } // if the resulting resolvConf has no more nameservers defined, add appropriate // default DNS servers for IPv4 and (optionally) IPv6 - if len(GetNameservers(cleanedResolvConf, types.IP)) == 0 { + if len(GetNameservers(cleanedResolvConf, IP)) == 0 { logrus.Infof("No non-localhost DNS nameservers are left in resolv.conf. Using default external servers: %v", defaultIPv4Dns) dns := defaultIPv4Dns if ipv6Enabled { @@ -162,7 +132,7 @@ func FilterResolvDNS(resolvConf []byte, ipv6Enabled bool) (*File, error) { } cleanedResolvConf = append(cleanedResolvConf, []byte("\n"+strings.Join(dns, "\n"))...) } - hash, err := ioutils.HashData(bytes.NewReader(cleanedResolvConf)) + hash, err := hashData(bytes.NewReader(cleanedResolvConf)) if err != nil { return nil, err } @@ -189,11 +159,11 @@ func GetNameservers(resolvConf []byte, kind int) []string { nameservers := []string{} for _, line := range getLines(resolvConf, []byte("#")) { var ns [][]byte - if kind == types.IP { + if kind == IP { ns = nsRegexp.FindSubmatch(line) - } else if kind == types.IPv4 { + } else if kind == IPv4 { ns = nsIPv4Regexpmatch.FindSubmatch(line) - } else if kind == types.IPv6 { + } else if kind == IPv6 { ns = nsIPv6Regexpmatch.FindSubmatch(line) } if len(ns) > 0 { @@ -208,7 +178,7 @@ func GetNameservers(resolvConf []byte, kind int) []string { // This function's output is intended for net.ParseCIDR func GetNameserversAsCIDR(resolvConf []byte) []string { nameservers := []string{} - for _, nameserver := range GetNameservers(resolvConf, types.IP) { + for _, nameserver := range GetNameservers(resolvConf, IP) { var address string // If IPv6, strip zone if present if strings.Contains(nameserver, ":") { @@ -276,10 +246,10 @@ func Build(path string, dns, dnsSearch, dnsOptions []string) (*File, error) { } } - hash, err := ioutils.HashData(bytes.NewReader(content.Bytes())) + hash, err := hashData(bytes.NewReader(content.Bytes())) if err != nil { return nil, err } - return &File{Content: content.Bytes(), Hash: hash}, ioutil.WriteFile(path, content.Bytes(), 0644) + return &File{Content: content.Bytes(), Hash: hash}, os.WriteFile(path, content.Bytes(), 0644) } diff --git a/vendor/github.com/docker/docker/libnetwork/resolvconf/utils.go b/vendor/github.com/docker/docker/libnetwork/resolvconf/utils.go new file mode 100644 index 00000000..7d4d0957 --- /dev/null +++ b/vendor/github.com/docker/docker/libnetwork/resolvconf/utils.go @@ -0,0 +1,16 @@ +package resolvconf + +import ( + "crypto/sha256" + "encoding/hex" + "io" +) + +// hashData returns the sha256 sum of src. +func hashData(src io.Reader) (string, error) { + h := sha256.New() + if _, err := io.Copy(h, src); err != nil { + return "", err + } + return "sha256:" + hex.EncodeToString(h.Sum(nil)), nil +} diff --git a/vendor/github.com/docker/docker/pkg/archive/archive.go b/vendor/github.com/docker/docker/pkg/archive/archive.go index ade5f539..3af7c3a6 100644 --- a/vendor/github.com/docker/docker/pkg/archive/archive.go +++ b/vendor/github.com/docker/docker/pkg/archive/archive.go @@ -7,6 +7,7 @@ import ( "compress/bzip2" "compress/gzip" "context" + "encoding/binary" "fmt" "io" "os" @@ -17,15 +18,31 @@ import ( "syscall" "time" - "github.com/docker/docker/pkg/fileutils" + "github.com/containerd/containerd/pkg/userns" "github.com/docker/docker/pkg/idtools" "github.com/docker/docker/pkg/ioutils" "github.com/docker/docker/pkg/pools" "github.com/docker/docker/pkg/system" + "github.com/klauspost/compress/zstd" + "github.com/moby/patternmatcher" + "github.com/moby/sys/sequential" + "github.com/pkg/errors" "github.com/sirupsen/logrus" exec "golang.org/x/sys/execabs" ) +// ImpliedDirectoryMode represents the mode (Unix permissions) applied to directories that are implied by files in a +// tar, but that do not have their own header entry. +// +// The permissions mask is stored in a constant instead of locally to ensure that magic numbers do not +// proliferate in the codebase. The default value 0755 has been selected based on the default umask of 0022, and +// a convention of mkdir(1) calling mkdir(2) with permissions of 0777, resulting in a final value of 0755. +// +// This value is currently implementation-defined, and not captured in any cross-runtime specification. Thus, it is +// subject to change in Moby at any time -- image authors who require consistent or known directory permissions +// should explicitly control them by ensuring that header entries exist for any applicable path. +const ImpliedDirectoryMode = 0755 + type ( // Compression is the state represents if compressed or not. Compression int @@ -38,8 +55,7 @@ type ( ExcludePatterns []string Compression Compression NoLchown bool - UIDMaps []idtools.IDMap - GIDMaps []idtools.IDMap + IDMap idtools.IdentityMapping ChownOpts *idtools.Identity IncludeSourceDir bool // WhiteoutFormat is the expected on disk format for whiteout files. @@ -61,12 +77,12 @@ type ( // mappings for untar, an Archiver can be created with maps which will then be passed to Untar operations. type Archiver struct { Untar func(io.Reader, string, *TarOptions) error - IDMapping *idtools.IdentityMapping + IDMapping idtools.IdentityMapping } // NewDefaultArchiver returns a new Archiver without any IdentityMapping func NewDefaultArchiver() *Archiver { - return &Archiver{Untar: Untar, IDMapping: &idtools.IdentityMapping{}} + return &Archiver{Untar: Untar} } // breakoutError is used to differentiate errors related to breaking out @@ -83,6 +99,8 @@ const ( Gzip // Xz is xz compression algorithm. Xz + // Zstd is zstd compression algorithm. + Zstd ) const ( @@ -121,18 +139,59 @@ func IsArchivePath(path string) bool { return err == nil } +const ( + zstdMagicSkippableStart = 0x184D2A50 + zstdMagicSkippableMask = 0xFFFFFFF0 +) + +var ( + bzip2Magic = []byte{0x42, 0x5A, 0x68} + gzipMagic = []byte{0x1F, 0x8B, 0x08} + xzMagic = []byte{0xFD, 0x37, 0x7A, 0x58, 0x5A, 0x00} + zstdMagic = []byte{0x28, 0xb5, 0x2f, 0xfd} +) + +type matcher = func([]byte) bool + +func magicNumberMatcher(m []byte) matcher { + return func(source []byte) bool { + return bytes.HasPrefix(source, m) + } +} + +// zstdMatcher detects zstd compression algorithm. +// Zstandard compressed data is made of one or more frames. +// There are two frame formats defined by Zstandard: Zstandard frames and Skippable frames. +// See https://tools.ietf.org/id/draft-kucherawy-dispatch-zstd-00.html#rfc.section.2 for more details. +func zstdMatcher() matcher { + return func(source []byte) bool { + if bytes.HasPrefix(source, zstdMagic) { + // Zstandard frame + return true + } + // skippable frame + if len(source) < 8 { + return false + } + // magic number from 0x184D2A50 to 0x184D2A5F. + if binary.LittleEndian.Uint32(source[:4])&zstdMagicSkippableMask == zstdMagicSkippableStart { + return true + } + return false + } +} + // DetectCompression detects the compression algorithm of the source. func DetectCompression(source []byte) Compression { - for compression, m := range map[Compression][]byte{ - Bzip2: {0x42, 0x5A, 0x68}, - Gzip: {0x1F, 0x8B, 0x08}, - Xz: {0xFD, 0x37, 0x7A, 0x58, 0x5A, 0x00}, - } { - if len(source) < len(m) { - logrus.Debug("Len too short") - continue - } - if bytes.Equal(m, source[:len(m)]) { + compressionMap := map[Compression]matcher{ + Bzip2: magicNumberMatcher(bzip2Magic), + Gzip: magicNumberMatcher(gzipMagic), + Xz: magicNumberMatcher(xzMagic), + Zstd: zstdMatcher(), + } + for _, compression := range []Compression{Bzip2, Gzip, Xz, Zstd} { + fn := compressionMap[compression] + if fn(source) { return compression } } @@ -146,20 +205,15 @@ func xzDecompress(ctx context.Context, archive io.Reader) (io.ReadCloser, error) } func gzDecompress(ctx context.Context, buf io.Reader) (io.ReadCloser, error) { - noPigzEnv := os.Getenv("MOBY_DISABLE_PIGZ") - var noPigz bool - - if noPigzEnv != "" { - var err error - noPigz, err = strconv.ParseBool(noPigzEnv) + if noPigzEnv := os.Getenv("MOBY_DISABLE_PIGZ"); noPigzEnv != "" { + noPigz, err := strconv.ParseBool(noPigzEnv) if err != nil { logrus.WithError(err).Warn("invalid value in MOBY_DISABLE_PIGZ env var") } - } - - if noPigz { - logrus.Debugf("Use of pigz is disabled due to MOBY_DISABLE_PIGZ=%s", noPigzEnv) - return gzip.NewReader(buf) + if noPigz { + logrus.Debugf("Use of pigz is disabled due to MOBY_DISABLE_PIGZ=%s", noPigzEnv) + return gzip.NewReader(buf) + } } unpigzPath, err := exec.LookPath("unpigz") @@ -224,6 +278,13 @@ func DecompressStream(archive io.Reader) (io.ReadCloser, error) { } readBufWrapper := p.NewReadCloserWrapper(buf, xzReader) return wrapReadCloser(readBufWrapper, cancel), nil + case Zstd: + zstdReader, err := zstd.NewReader(buf) + if err != nil { + return nil, err + } + readBufWrapper := p.NewReadCloserWrapper(buf, zstdReader) + return readBufWrapper, nil default: return nil, fmt.Errorf("Unsupported compression format %s", (&compression).Extension()) } @@ -277,7 +338,9 @@ func ReplaceFileTarWrapper(inputTarStream io.ReadCloser, mods map[string]TarModi return nil } - header.Name = name + if header.Name == "" { + header.Name = name + } header.Size = int64(len(data)) if err := tarWriter.WriteHeader(header); err != nil { return err @@ -332,7 +395,6 @@ func ReplaceFileTarWrapper(inputTarStream io.ReadCloser, mods map[string]TarModi } pipeWriter.Close() - }() return pipeReader } @@ -348,16 +410,70 @@ func (compression *Compression) Extension() string { return "tar.gz" case Xz: return "tar.xz" + case Zstd: + return "tar.zst" } return "" } +// nosysFileInfo hides the system-dependent info of the wrapped FileInfo to +// prevent tar.FileInfoHeader from introspecting it and potentially calling into +// glibc. +type nosysFileInfo struct { + os.FileInfo +} + +func (fi nosysFileInfo) Sys() interface{} { + // A Sys value of type *tar.Header is safe as it is system-independent. + // The tar.FileInfoHeader function copies the fields into the returned + // header without performing any OS lookups. + if sys, ok := fi.FileInfo.Sys().(*tar.Header); ok { + return sys + } + return nil +} + +// sysStat, if non-nil, populates hdr from system-dependent fields of fi. +var sysStat func(fi os.FileInfo, hdr *tar.Header) error + +// FileInfoHeaderNoLookups creates a partially-populated tar.Header from fi. +// +// Compared to the archive/tar.FileInfoHeader function, this function is safe to +// call from a chrooted process as it does not populate fields which would +// require operating system lookups. It behaves identically to +// tar.FileInfoHeader when fi is a FileInfo value returned from +// tar.Header.FileInfo(). +// +// When fi is a FileInfo for a native file, such as returned from os.Stat() and +// os.Lstat(), the returned Header value differs from one returned from +// tar.FileInfoHeader in the following ways. The Uname and Gname fields are not +// set as OS lookups would be required to populate them. The AccessTime and +// ChangeTime fields are not currently set (not yet implemented) although that +// is subject to change. Callers which require the AccessTime or ChangeTime +// fields to be zeroed should explicitly zero them out in the returned Header +// value to avoid any compatibility issues in the future. +func FileInfoHeaderNoLookups(fi os.FileInfo, link string) (*tar.Header, error) { + hdr, err := tar.FileInfoHeader(nosysFileInfo{fi}, link) + if err != nil { + return nil, err + } + if sysStat != nil { + return hdr, sysStat(fi, hdr) + } + return hdr, nil +} + // FileInfoHeader creates a populated Header from fi. -// Compared to archive pkg this function fills in more information. -// Also, regardless of Go version, this function fills file type bits (e.g. hdr.Mode |= modeISDIR), -// which have been deleted since Go 1.9 archive/tar. +// +// Compared to the archive/tar package, this function fills in less information +// but is safe to call from a chrooted process. The AccessTime and ChangeTime +// fields are not set in the returned header, ModTime is truncated to one-second +// precision, and the Uname and Gname fields are only set when fi is a FileInfo +// value returned from tar.Header.FileInfo(). Also, regardless of Go version, +// this function fills file type bits (e.g. hdr.Mode |= modeISDIR), which have +// been deleted since Go 1.9 archive/tar. func FileInfoHeader(name string, fi os.FileInfo, link string) (*tar.Header, error) { - hdr, err := tar.FileInfoHeader(fi, link) + hdr, err := FileInfoHeaderNoLookups(fi, link) if err != nil { return nil, err } @@ -367,9 +483,6 @@ func FileInfoHeader(name string, fi os.FileInfo, link string) (*tar.Header, erro hdr.ChangeTime = time.Time{} hdr.Mode = fillGo18FileTypeBits(int64(chmodTarEntry(os.FileMode(hdr.Mode))), fi) hdr.Name = canonicalTarName(name, fi.IsDir()) - if err := setHeaderForSpecialDevice(hdr, name, fi.Sys()); err != nil { - return nil, err - } return hdr, nil } @@ -434,7 +547,7 @@ type tarAppender struct { // for hardlink mapping SeenFiles map[uint64]string - IdentityMapping *idtools.IdentityMapping + IdentityMapping idtools.IdentityMapping ChownOpts *idtools.Identity // For packing and unpacking whiteout files in the @@ -444,7 +557,7 @@ type tarAppender struct { WhiteoutConverter tarWhiteoutConverter } -func newTarAppender(idMapping *idtools.IdentityMapping, writer io.Writer, chownOpts *idtools.Identity) *tarAppender { +func newTarAppender(idMapping idtools.IdentityMapping, writer io.Writer, chownOpts *idtools.Identity) *tarAppender { return &tarAppender{ SeenFiles: make(map[uint64]string), TarWriter: tar.NewWriter(writer), @@ -559,10 +672,9 @@ func (ta *tarAppender) addTarFile(path, name string) error { } if hdr.Typeflag == tar.TypeReg && hdr.Size > 0 { - // We use system.OpenSequential to ensure we use sequential file - // access on Windows to avoid depleting the standby list. - // On Linux, this equates to a regular os.Open. - file, err := system.OpenSequential(path) + // We use sequential file access to avoid depleting the standby list on + // Windows. On Linux, this equates to a regular os.Open. + file, err := sequential.Open(path) if err != nil { return err } @@ -599,11 +711,10 @@ func createTarFile(path, extractDir string, hdr *tar.Header, reader io.Reader, L } } - case tar.TypeReg, tar.TypeRegA: - // Source is regular file. We use system.OpenFileSequential to use sequential - // file access to avoid depleting the standby list on Windows. - // On Linux, this equates to a regular os.OpenFile - file, err := system.OpenFileSequential(path, os.O_CREATE|os.O_WRONLY, hdrInfo.Mode()) + case tar.TypeReg: + // Source is regular file. We use sequential file access to avoid depleting + // the standby list on Windows. On Linux, this equates to a regular os.OpenFile. + file, err := sequential.OpenFile(path, os.O_CREATE|os.O_WRONLY, hdrInfo.Mode()) if err != nil { return err } @@ -629,6 +740,7 @@ func createTarFile(path, extractDir string, hdr *tar.Header, reader io.Reader, L } case tar.TypeLink: + // #nosec G305 -- The target path is checked for path traversal. targetPath := filepath.Join(extractDir, hdr.Linkname) // check for hardlink breakout if !strings.HasPrefix(targetPath, extractDir) { @@ -641,7 +753,7 @@ func createTarFile(path, extractDir string, hdr *tar.Header, reader io.Reader, L case tar.TypeSymlink: // path -> hdr.Linkname = targetPath // e.g. /extractDir/path/to/symlink -> ../2/file = /extractDir/path/2/file - targetPath := filepath.Join(filepath.Dir(path), hdr.Linkname) + targetPath := filepath.Join(filepath.Dir(path), hdr.Linkname) // #nosec G305 -- The target path is checked for path traversal. // the reason we don't need to check symlinks in the path (with FollowSymlinkInScope) is because // that symlink would first have to be created, which would be caught earlier, at this very check: @@ -666,7 +778,11 @@ func createTarFile(path, extractDir string, hdr *tar.Header, reader io.Reader, L chownOpts = &idtools.Identity{UID: hdr.Uid, GID: hdr.Gid} } if err := os.Lchown(path, chownOpts.UID, chownOpts.GID); err != nil { - return err + msg := "failed to Lchown %q for UID %d, GID %d" + if errors.Is(err, syscall.EINVAL) && userns.RunningInUserNS() { + msg += " (try increasing the number of subordinate IDs in /etc/subuid and /etc/subgid)" + } + return errors.Wrapf(err, msg, path, hdr.Uid, hdr.Gid) } } @@ -685,7 +801,6 @@ func createTarFile(path, extractDir string, hdr *tar.Header, reader io.Reader, L } return err } - } if len(errors) > 0 { @@ -735,12 +850,11 @@ func Tar(path string, compression Compression) (io.ReadCloser, error) { // TarWithOptions creates an archive from the directory at `path`, only including files whose relative // paths are included in `options.IncludeFiles` (if non-nil) or not in `options.ExcludePatterns`. func TarWithOptions(srcPath string, options *TarOptions) (io.ReadCloser, error) { - // Fix the source path to work with long path names. This is a no-op // on platforms other than Windows. srcPath = fixVolumePathPrefix(srcPath) - pm, err := fileutils.NewPatternMatcher(options.ExcludePatterns) + pm, err := patternmatcher.New(options.ExcludePatterns) if err != nil { return nil, err } @@ -759,7 +873,7 @@ func TarWithOptions(srcPath string, options *TarOptions) (io.ReadCloser, error) go func() { ta := newTarAppender( - idtools.NewIDMappingsFromMaps(options.UIDMaps, options.GIDMaps), + options.IDMap, compressWriter, options.ChownOpts, ) @@ -814,6 +928,11 @@ func TarWithOptions(srcPath string, options *TarOptions) (io.ReadCloser, error) for _, include := range options.IncludeFiles { rebaseName := options.RebaseNames[include] + var ( + parentMatchInfo []patternmatcher.MatchInfo + parentDirs []string + ) + walkRoot := getWalkRoot(srcPath, include) filepath.Walk(walkRoot, func(filePath string, f os.FileInfo, err error) error { if err != nil { @@ -840,11 +959,30 @@ func TarWithOptions(srcPath string, options *TarOptions) (io.ReadCloser, error) // is asking for that file no matter what - which is true // for some files, like .dockerignore and Dockerfile (sometimes) if include != relFilePath { - skip, err = pm.Matches(relFilePath) + for len(parentDirs) != 0 { + lastParentDir := parentDirs[len(parentDirs)-1] + if strings.HasPrefix(relFilePath, lastParentDir+string(os.PathSeparator)) { + break + } + parentDirs = parentDirs[:len(parentDirs)-1] + parentMatchInfo = parentMatchInfo[:len(parentMatchInfo)-1] + } + + var matchInfo patternmatcher.MatchInfo + if len(parentMatchInfo) != 0 { + skip, matchInfo, err = pm.MatchesUsingParentResults(relFilePath, parentMatchInfo[len(parentMatchInfo)-1]) + } else { + skip, matchInfo, err = pm.MatchesUsingParentResults(relFilePath, patternmatcher.MatchInfo{}) + } if err != nil { logrus.Errorf("Error matching %s: %v", relFilePath, err) return err } + + if f.IsDir() { + parentDirs = append(parentDirs, relFilePath) + parentMatchInfo = append(parentMatchInfo, matchInfo) + } } if skip { @@ -919,8 +1057,6 @@ func Unpack(decompressedArchive io.Reader, dest string, options *TarOptions) err defer pools.BufioReader32KPool.Put(trBuf) var dirs []*tar.Header - idMapping := idtools.NewIDMappingsFromMaps(options.UIDMaps, options.GIDMaps) - rootIDs := idMapping.RootPair() whiteoutConverter, err := getWhiteoutConverter(options.WhiteoutFormat, options.InUserNS) if err != nil { return err @@ -955,21 +1091,13 @@ loop: } } - // After calling filepath.Clean(hdr.Name) above, hdr.Name will now be in - // the filepath format for the OS on which the daemon is running. Hence - // the check for a slash-suffix MUST be done in an OS-agnostic way. - if !strings.HasSuffix(hdr.Name, string(os.PathSeparator)) { - // Not the root directory, ensure that the parent directory exists - parent := filepath.Dir(hdr.Name) - parentPath := filepath.Join(dest, parent) - if _, err := os.Lstat(parentPath); err != nil && os.IsNotExist(err) { - err = idtools.MkdirAllAndChownNew(parentPath, 0755, rootIDs) - if err != nil { - return err - } - } + // Ensure that the parent directory exists. + err = createImpliedDirectories(dest, hdr, options) + if err != nil { + return err } + // #nosec G305 -- The joined path is checked for path traversal. path := filepath.Join(dest, hdr.Name) rel, err := filepath.Rel(dest, path) if err != nil { @@ -1008,7 +1136,7 @@ loop: } trBuf.Reset(tr) - if err := remapIDs(idMapping, hdr); err != nil { + if err := remapIDs(options.IDMap, hdr); err != nil { return err } @@ -1034,6 +1162,7 @@ loop: } for _, hdr := range dirs { + // #nosec G305 -- The header was checked for path traversal before it was appended to the dirs slice. path := filepath.Join(dest, hdr.Name) if err := system.Chtimes(path, hdr.AccessTime, hdr.ModTime); err != nil { @@ -1043,6 +1172,35 @@ loop: return nil } +// createImpliedDirectories will create all parent directories of the current path with default permissions, if they do +// not already exist. This is possible as the tar format supports 'implicit' directories, where their existence is +// defined by the paths of files in the tar, but there are no header entries for the directories themselves, and thus +// we most both create them and choose metadata like permissions. +// +// The caller should have performed filepath.Clean(hdr.Name), so hdr.Name will now be in the filepath format for the OS +// on which the daemon is running. This precondition is required because this function assumes a OS-specific path +// separator when checking that a path is not the root. +func createImpliedDirectories(dest string, hdr *tar.Header, options *TarOptions) error { + // Not the root directory, ensure that the parent directory exists + if !strings.HasSuffix(hdr.Name, string(os.PathSeparator)) { + parent := filepath.Dir(hdr.Name) + parentPath := filepath.Join(dest, parent) + if _, err := os.Lstat(parentPath); err != nil && os.IsNotExist(err) { + // RootPair() is confined inside this loop as most cases will not require a call, so we can spend some + // unneeded function calls in the uncommon case to encapsulate logic -- implied directories are a niche + // usage that reduces the portability of an image. + rootIDs := options.IDMap.RootPair() + + err = idtools.MkdirAllAndChownNew(parentPath, ImpliedDirectoryMode, rootIDs) + if err != nil { + return err + } + } + } + + return nil +} + // Untar reads a stream of bytes from `archive`, parses it as a tar archive, // and unpacks it into the directory at `dest`. // The archive may be compressed with one of the following algorithms: @@ -1089,15 +1247,13 @@ func untarHandler(tarArchive io.Reader, dest string, options *TarOptions, decomp // TarUntar is a convenience function which calls Tar and Untar, with the output of one piped into the other. // If either Tar or Untar fails, TarUntar aborts and returns the error. func (archiver *Archiver) TarUntar(src, dst string) error { - logrus.Debugf("TarUntar(%s %s)", src, dst) archive, err := TarWithOptions(src, &TarOptions{Compression: Uncompressed}) if err != nil { return err } defer archive.Close() options := &TarOptions{ - UIDMaps: archiver.IDMapping.UIDs(), - GIDMaps: archiver.IDMapping.GIDs(), + IDMap: archiver.IDMapping, } return archiver.Untar(archive, dst, options) } @@ -1110,8 +1266,7 @@ func (archiver *Archiver) UntarPath(src, dst string) error { } defer archive.Close() options := &TarOptions{ - UIDMaps: archiver.IDMapping.UIDs(), - GIDMaps: archiver.IDMapping.GIDs(), + IDMap: archiver.IDMapping, } return archiver.Untar(archive, dst, options) } @@ -1134,11 +1289,9 @@ func (archiver *Archiver) CopyWithTar(src, dst string) error { // as owner rootIDs := archiver.IDMapping.RootPair() // Create dst, copy src's content into it - logrus.Debugf("Creating dest directory: %s", dst) if err := idtools.MkdirAllAndChownNew(dst, 0755, rootIDs); err != nil { return err } - logrus.Debugf("Calling TarUntar(%s, %s)", src, dst) return archiver.TarUntar(src, dst) } @@ -1146,7 +1299,6 @@ func (archiver *Archiver) CopyWithTar(src, dst string) error { // for a single file. It copies a regular file from path `src` to // path `dst`, and preserves all its metadata. func (archiver *Archiver) CopyFileWithTar(src, dst string) (err error) { - logrus.Debugf("CopyFileWithTar(%s, %s)", src, dst) srcSt, err := os.Stat(src) if err != nil { return err @@ -1181,7 +1333,7 @@ func (archiver *Archiver) CopyFileWithTar(src, dst string) (err error) { } defer srcF.Close() - hdr, err := tar.FileInfoHeader(srcSt, "") + hdr, err := FileInfoHeaderNoLookups(srcSt, "") if err != nil { return err } @@ -1221,11 +1373,11 @@ func (archiver *Archiver) CopyFileWithTar(src, dst string) (err error) { } // IdentityMapping returns the IdentityMapping of the archiver. -func (archiver *Archiver) IdentityMapping() *idtools.IdentityMapping { +func (archiver *Archiver) IdentityMapping() idtools.IdentityMapping { return archiver.IDMapping } -func remapIDs(idMapping *idtools.IdentityMapping, hdr *tar.Header) error { +func remapIDs(idMapping idtools.IdentityMapping, hdr *tar.Header) error { ids, err := idMapping.ToHost(idtools.Identity{UID: hdr.Uid, GID: hdr.Gid}) hdr.Uid, hdr.Gid = ids.UID, ids.GID return err diff --git a/vendor/github.com/docker/docker/pkg/archive/archive_linux.go b/vendor/github.com/docker/docker/pkg/archive/archive_linux.go index 0a3cc1f9..76321a35 100644 --- a/vendor/github.com/docker/docker/pkg/archive/archive_linux.go +++ b/vendor/github.com/docker/docker/pkg/archive/archive_linux.go @@ -59,7 +59,7 @@ func (overlayWhiteoutConverter) ConvertWrite(hdr *tar.Header, path string, fi os Gname: hdr.Gname, AccessTime: hdr.AccessTime, ChangeTime: hdr.ChangeTime, - } + } //#nosec G305 -- An archive is being created, not extracted. } } diff --git a/vendor/github.com/docker/docker/pkg/archive/archive_unix.go b/vendor/github.com/docker/docker/pkg/archive/archive_unix.go index fea53d3a..1a2aea2e 100644 --- a/vendor/github.com/docker/docker/pkg/archive/archive_unix.go +++ b/vendor/github.com/docker/docker/pkg/archive/archive_unix.go @@ -11,12 +11,16 @@ import ( "strings" "syscall" - "github.com/containerd/containerd/sys" + "github.com/containerd/containerd/pkg/userns" "github.com/docker/docker/pkg/idtools" "github.com/docker/docker/pkg/system" "golang.org/x/sys/unix" ) +func init() { + sysStat = statUnix +} + // fixVolumePathPrefix does platform specific processing to ensure that if // the path being passed in is not in a volume path format, convert it to one. func fixVolumePathPrefix(srcPath string) string { @@ -45,19 +49,24 @@ func chmodTarEntry(perm os.FileMode) os.FileMode { return perm // noop for unix as golang APIs provide perm bits correctly } -func setHeaderForSpecialDevice(hdr *tar.Header, name string, stat interface{}) (err error) { - s, ok := stat.(*syscall.Stat_t) +// statUnix populates hdr from system-dependent fields of fi without performing +// any OS lookups. +func statUnix(fi os.FileInfo, hdr *tar.Header) error { + s, ok := fi.Sys().(*syscall.Stat_t) + if !ok { + return nil + } - if ok { - // Currently go does not fill in the major/minors - if s.Mode&unix.S_IFBLK != 0 || - s.Mode&unix.S_IFCHR != 0 { - hdr.Devmajor = int64(unix.Major(uint64(s.Rdev))) //nolint: unconvert - hdr.Devminor = int64(unix.Minor(uint64(s.Rdev))) //nolint: unconvert - } + hdr.Uid = int(s.Uid) + hdr.Gid = int(s.Gid) + + if s.Mode&unix.S_IFBLK != 0 || + s.Mode&unix.S_IFCHR != 0 { + hdr.Devmajor = int64(unix.Major(uint64(s.Rdev))) //nolint: unconvert + hdr.Devminor = int64(unix.Minor(uint64(s.Rdev))) //nolint: unconvert } - return + return nil } func getInodeFromStat(stat interface{}) (inode uint64, err error) { @@ -93,7 +102,7 @@ func handleTarTypeBlockCharFifo(hdr *tar.Header, path string) error { } err := system.Mknod(path, mode, int(system.Mkdev(hdr.Devmajor, hdr.Devminor))) - if errors.Is(err, syscall.EPERM) && sys.RunningInUserNS() { + if errors.Is(err, syscall.EPERM) && userns.RunningInUserNS() { // In most cases, cannot create a device if running in user namespace err = nil } diff --git a/vendor/github.com/docker/docker/pkg/archive/changes.go b/vendor/github.com/docker/docker/pkg/archive/changes.go index a0f25942..7f7242be 100644 --- a/vendor/github.com/docker/docker/pkg/archive/changes.go +++ b/vendor/github.com/docker/docker/pkg/archive/changes.go @@ -246,7 +246,6 @@ func (info *FileInfo) path() string { } func (info *FileInfo) addChanges(oldInfo *FileInfo, changes *[]Change) { - sizeAtEntry := len(*changes) if oldInfo == nil { @@ -319,7 +318,6 @@ func (info *FileInfo) addChanges(oldInfo *FileInfo, changes *[]Change) { copy((*changes)[sizeAtEntry+1:], (*changes)[sizeAtEntry:]) (*changes)[sizeAtEntry] = change } - } // Changes add changes to file information. @@ -394,10 +392,10 @@ func ChangesSize(newDir string, changes []Change) int64 { } // ExportChanges produces an Archive from the provided changes, relative to dir. -func ExportChanges(dir string, changes []Change, uidMaps, gidMaps []idtools.IDMap) (io.ReadCloser, error) { +func ExportChanges(dir string, changes []Change, idMap idtools.IdentityMapping) (io.ReadCloser, error) { reader, writer := io.Pipe() go func() { - ta := newTarAppender(idtools.NewIDMappingsFromMaps(uidMaps, gidMaps), writer, nil) + ta := newTarAppender(idMap, writer, nil) // this buffer is needed for the duration of this piped stream defer pools.BufioWriter32KPool.Put(ta.Buffer) diff --git a/vendor/github.com/docker/docker/pkg/archive/copy.go b/vendor/github.com/docker/docker/pkg/archive/copy.go index b9be0d5e..f3111b79 100644 --- a/vendor/github.com/docker/docker/pkg/archive/copy.go +++ b/vendor/github.com/docker/docker/pkg/archive/copy.go @@ -29,8 +29,8 @@ var ( // clean path already ends in the separator, then another is not added. func PreserveTrailingDotOrSeparator(cleanedPath string, originalPath string, sep byte) string { // Ensure paths are in platform semantics - cleanedPath = strings.Replace(cleanedPath, "/", string(sep), -1) - originalPath = strings.Replace(originalPath, "/", string(sep), -1) + cleanedPath = strings.ReplaceAll(cleanedPath, "/", string(sep)) + originalPath = strings.ReplaceAll(originalPath, "/", string(sep)) if !specifiesCurrentDir(cleanedPath) && specifiesCurrentDir(originalPath) { if !hasTrailingPathSeparator(cleanedPath, sep) { @@ -303,7 +303,6 @@ func PrepareArchiveCopy(srcContent io.Reader, srcInfo, dstInfo CopyInfo) (dstDir } return dstDir, RebaseArchiveEntries(srcContent, srcBase, dstBase), nil } - } // RebaseArchiveEntries rewrites the given srcContent archive replacing @@ -373,9 +372,6 @@ func RebaseArchiveEntries(srcContent io.Reader, oldBase, newBase string) io.Read return rebased } -// TODO @gupta-ak. These might have to be changed in the future to be -// continuity driver aware as well to support LCOW. - // CopyResource performs an archive copy from the given source path to the // given destination path. The source path MUST exist and the destination // path's parent directory must exist. diff --git a/vendor/github.com/docker/docker/pkg/archive/diff.go b/vendor/github.com/docker/docker/pkg/archive/diff.go index e095104b..8eeccb60 100644 --- a/vendor/github.com/docker/docker/pkg/archive/diff.go +++ b/vendor/github.com/docker/docker/pkg/archive/diff.go @@ -9,7 +9,6 @@ import ( "runtime" "strings" - "github.com/docker/docker/pkg/idtools" "github.com/docker/docker/pkg/pools" "github.com/docker/docker/pkg/system" "github.com/sirupsen/logrus" @@ -32,7 +31,6 @@ func UnpackLayer(dest string, layer io.Reader, options *TarOptions) (size int64, if options.ExcludePatterns == nil { options.ExcludePatterns = []string{} } - idMapping := idtools.NewIDMappingsFromMaps(options.UIDMaps, options.GIDMaps) aufsTempdir := "" aufsHardlinks := make(map[string]*tar.Header) @@ -74,20 +72,10 @@ func UnpackLayer(dest string, layer io.Reader, options *TarOptions) (size int64, } } - // Note as these operations are platform specific, so must the slash be. - if !strings.HasSuffix(hdr.Name, string(os.PathSeparator)) { - // Not the root directory, ensure that the parent directory exists. - // This happened in some tests where an image had a tarfile without any - // parent directories. - parent := filepath.Dir(hdr.Name) - parentPath := filepath.Join(dest, parent) - - if _, err := os.Lstat(parentPath); err != nil && os.IsNotExist(err) { - err = system.MkdirAll(parentPath, 0600) - if err != nil { - return 0, err - } - } + // Ensure that the parent directory exists. + err = createImpliedDirectories(dest, hdr, options) + if err != nil { + return 0, err } // Skip AUFS metadata dirs @@ -113,6 +101,7 @@ func UnpackLayer(dest string, layer io.Reader, options *TarOptions) (size int64, continue } } + //#nosec G305 -- The joined path is guarded against path traversal. path := filepath.Join(dest, hdr.Name) rel, err := filepath.Rel(dest, path) if err != nil { @@ -191,7 +180,7 @@ func UnpackLayer(dest string, layer io.Reader, options *TarOptions) (size int64, srcData = tmpFile } - if err := remapIDs(idMapping, srcHdr); err != nil { + if err := remapIDs(options.IDMap, srcHdr); err != nil { return 0, err } @@ -209,6 +198,7 @@ func UnpackLayer(dest string, layer io.Reader, options *TarOptions) (size int64, } for _, hdr := range dirs { + //#nosec G305 -- The header was checked for path traversal before it was appended to the dirs slice. path := filepath.Join(dest, hdr.Name) if err := system.Chtimes(path, hdr.AccessTime, hdr.ModTime); err != nil { return 0, err @@ -239,13 +229,8 @@ func applyLayerHandler(dest string, layer io.Reader, options *TarOptions, decomp dest = filepath.Clean(dest) // We need to be able to set any perms - if runtime.GOOS != "windows" { - oldmask, err := system.Umask(0) - if err != nil { - return 0, err - } - defer system.Umask(oldmask) - } + restore := overrideUmask(0) + defer restore() if decompress { decompLayer, err := DecompressStream(layer) diff --git a/vendor/github.com/docker/docker/pkg/archive/diff_unix.go b/vendor/github.com/docker/docker/pkg/archive/diff_unix.go new file mode 100644 index 00000000..d7f80644 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/archive/diff_unix.go @@ -0,0 +1,22 @@ +//go:build !windows +// +build !windows + +package archive + +import "golang.org/x/sys/unix" + +// overrideUmask sets current process's file mode creation mask to newmask +// and returns a function to restore it. +// +// WARNING for readers stumbling upon this code. Changing umask in a multi- +// threaded environment isn't safe. Don't use this without understanding the +// risks, and don't export this function for others to use (we shouldn't even +// be using this ourself). +// +// FIXME(thaJeztah): we should get rid of these hacks if possible. +func overrideUmask(newMask int) func() { + oldMask := unix.Umask(newMask) + return func() { + unix.Umask(oldMask) + } +} diff --git a/vendor/github.com/docker/docker/pkg/archive/diff_windows.go b/vendor/github.com/docker/docker/pkg/archive/diff_windows.go new file mode 100644 index 00000000..d28f5b2d --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/archive/diff_windows.go @@ -0,0 +1,6 @@ +package archive + +// overrideUmask is a no-op on windows. +func overrideUmask(newmask int) func() { + return func() {} +} diff --git a/vendor/github.com/docker/docker/pkg/fileutils/deprecated.go b/vendor/github.com/docker/docker/pkg/fileutils/deprecated.go new file mode 100644 index 00000000..a4fc2d05 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/fileutils/deprecated.go @@ -0,0 +1,44 @@ +package fileutils + +import "github.com/moby/patternmatcher" + +type ( + // PatternMatcher allows checking paths against a list of patterns. + // + // Deprecated: use github.com/moby/patternmatcher.PatternMatcher + PatternMatcher = patternmatcher.PatternMatcher + + // MatchInfo tracks information about parent dir matches while traversing a + // filesystem. + // + // Deprecated: use github.com/moby/patternmatcher.MatchInfo + MatchInfo = patternmatcher.MatchInfo + + // Pattern defines a single regexp used to filter file paths. + // + // Deprecated: use github.com/moby/patternmatcher.Pattern + Pattern = patternmatcher.Pattern +) + +var ( + // NewPatternMatcher creates a new matcher object for specific patterns that can + // be used later to match against patterns against paths + // + // Deprecated: use github.com/moby/patternmatcher.New + NewPatternMatcher = patternmatcher.New + + // Matches returns true if file matches any of the patterns + // and isn't excluded by any of the subsequent patterns. + // + // This implementation is buggy (it only checks a single parent dir against the + // pattern) and will be removed soon. Use MatchesOrParentMatches instead. + // + // Deprecated: use github.com/moby/patternmatcher.Matches + Matches = patternmatcher.Matches + + // MatchesOrParentMatches returns true if file matches any of the patterns + // and isn't excluded by any of the subsequent patterns. + // + // Deprecated: use github.com/moby/patternmatcher.MatchesOrParentMatches + MatchesOrParentMatches = patternmatcher.MatchesOrParentMatches +) diff --git a/vendor/github.com/docker/docker/pkg/fileutils/fileutils.go b/vendor/github.com/docker/docker/pkg/fileutils/fileutils.go index 34f1c726..7c607efc 100644 --- a/vendor/github.com/docker/docker/pkg/fileutils/fileutils.go +++ b/vendor/github.com/docker/docker/pkg/fileutils/fileutils.go @@ -1,236 +1,12 @@ package fileutils // import "github.com/docker/docker/pkg/fileutils" import ( - "errors" "fmt" "io" "os" "path/filepath" - "regexp" - "strings" - "text/scanner" - - "github.com/sirupsen/logrus" ) -// PatternMatcher allows checking paths against a list of patterns -type PatternMatcher struct { - patterns []*Pattern - exclusions bool -} - -// NewPatternMatcher creates a new matcher object for specific patterns that can -// be used later to match against patterns against paths -func NewPatternMatcher(patterns []string) (*PatternMatcher, error) { - pm := &PatternMatcher{ - patterns: make([]*Pattern, 0, len(patterns)), - } - for _, p := range patterns { - // Eliminate leading and trailing whitespace. - p = strings.TrimSpace(p) - if p == "" { - continue - } - p = filepath.Clean(p) - newp := &Pattern{} - if p[0] == '!' { - if len(p) == 1 { - return nil, errors.New("illegal exclusion pattern: \"!\"") - } - newp.exclusion = true - p = p[1:] - pm.exclusions = true - } - // Do some syntax checking on the pattern. - // filepath's Match() has some really weird rules that are inconsistent - // so instead of trying to dup their logic, just call Match() for its - // error state and if there is an error in the pattern return it. - // If this becomes an issue we can remove this since its really only - // needed in the error (syntax) case - which isn't really critical. - if _, err := filepath.Match(p, "."); err != nil { - return nil, err - } - newp.cleanedPattern = p - newp.dirs = strings.Split(p, string(os.PathSeparator)) - pm.patterns = append(pm.patterns, newp) - } - return pm, nil -} - -// Matches matches path against all the patterns. Matches is not safe to be -// called concurrently -func (pm *PatternMatcher) Matches(file string) (bool, error) { - matched := false - file = filepath.FromSlash(file) - parentPath := filepath.Dir(file) - parentPathDirs := strings.Split(parentPath, string(os.PathSeparator)) - - for _, pattern := range pm.patterns { - negative := false - - if pattern.exclusion { - negative = true - } - - match, err := pattern.match(file) - if err != nil { - return false, err - } - - if !match && parentPath != "." { - // Check to see if the pattern matches one of our parent dirs. - if len(pattern.dirs) <= len(parentPathDirs) { - match, _ = pattern.match(strings.Join(parentPathDirs[:len(pattern.dirs)], string(os.PathSeparator))) - } - } - - if match { - matched = !negative - } - } - - if matched { - logrus.Debugf("Skipping excluded path: %s", file) - } - - return matched, nil -} - -// Exclusions returns true if any of the patterns define exclusions -func (pm *PatternMatcher) Exclusions() bool { - return pm.exclusions -} - -// Patterns returns array of active patterns -func (pm *PatternMatcher) Patterns() []*Pattern { - return pm.patterns -} - -// Pattern defines a single regexp used to filter file paths. -type Pattern struct { - cleanedPattern string - dirs []string - regexp *regexp.Regexp - exclusion bool -} - -func (p *Pattern) String() string { - return p.cleanedPattern -} - -// Exclusion returns true if this pattern defines exclusion -func (p *Pattern) Exclusion() bool { - return p.exclusion -} - -func (p *Pattern) match(path string) (bool, error) { - - if p.regexp == nil { - if err := p.compile(); err != nil { - return false, filepath.ErrBadPattern - } - } - - b := p.regexp.MatchString(path) - - return b, nil -} - -func (p *Pattern) compile() error { - regStr := "^" - pattern := p.cleanedPattern - // Go through the pattern and convert it to a regexp. - // We use a scanner so we can support utf-8 chars. - var scan scanner.Scanner - scan.Init(strings.NewReader(pattern)) - - sl := string(os.PathSeparator) - escSL := sl - if sl == `\` { - escSL += `\` - } - - for scan.Peek() != scanner.EOF { - ch := scan.Next() - - if ch == '*' { - if scan.Peek() == '*' { - // is some flavor of "**" - scan.Next() - - // Treat **/ as ** so eat the "/" - if string(scan.Peek()) == sl { - scan.Next() - } - - if scan.Peek() == scanner.EOF { - // is "**EOF" - to align with .gitignore just accept all - regStr += ".*" - } else { - // is "**" - // Note that this allows for any # of /'s (even 0) because - // the .* will eat everything, even /'s - regStr += "(.*" + escSL + ")?" - } - } else { - // is "*" so map it to anything but "/" - regStr += "[^" + escSL + "]*" - } - } else if ch == '?' { - // "?" is any char except "/" - regStr += "[^" + escSL + "]" - } else if ch == '.' || ch == '$' { - // Escape some regexp special chars that have no meaning - // in golang's filepath.Match - regStr += `\` + string(ch) - } else if ch == '\\' { - // escape next char. Note that a trailing \ in the pattern - // will be left alone (but need to escape it) - if sl == `\` { - // On windows map "\" to "\\", meaning an escaped backslash, - // and then just continue because filepath.Match on - // Windows doesn't allow escaping at all - regStr += escSL - continue - } - if scan.Peek() != scanner.EOF { - regStr += `\` + string(scan.Next()) - } else { - regStr += `\` - } - } else { - regStr += string(ch) - } - } - - regStr += "$" - - re, err := regexp.Compile(regStr) - if err != nil { - return err - } - - p.regexp = re - return nil -} - -// Matches returns true if file matches any of the patterns -// and isn't excluded by any of the subsequent patterns. -func Matches(file string, patterns []string) (bool, error) { - pm, err := NewPatternMatcher(patterns) - if err != nil { - return false, err - } - file = filepath.Clean(file) - - if file == "." { - // Don't let them exclude everything, kind of silly. - return false, nil - } - - return pm.Matches(file) -} - // CopyFile copies from src to dst until either EOF is reached // on src or an error occurs. It verifies src exists and removes // the dst if it exists. diff --git a/vendor/github.com/docker/docker/pkg/homedir/homedir_linux.go b/vendor/github.com/docker/docker/pkg/homedir/homedir_linux.go index 5e6310fd..7df039b4 100644 --- a/vendor/github.com/docker/docker/pkg/homedir/homedir_linux.go +++ b/vendor/github.com/docker/docker/pkg/homedir/homedir_linux.go @@ -91,3 +91,12 @@ func GetConfigHome() (string, error) { } return filepath.Join(home, ".config"), nil } + +// GetLibHome returns $HOME/.local/lib +func GetLibHome() (string, error) { + home := os.Getenv("HOME") + if home == "" { + return "", errors.New("could not get HOME") + } + return filepath.Join(home, ".local/lib"), nil +} diff --git a/vendor/github.com/docker/docker/pkg/homedir/homedir_others.go b/vendor/github.com/docker/docker/pkg/homedir/homedir_others.go index fc48e674..11f1bec9 100644 --- a/vendor/github.com/docker/docker/pkg/homedir/homedir_others.go +++ b/vendor/github.com/docker/docker/pkg/homedir/homedir_others.go @@ -26,3 +26,8 @@ func GetDataHome() (string, error) { func GetConfigHome() (string, error) { return "", errors.New("homedir.GetConfigHome() is not supported on this system") } + +// GetLibHome is unsupported on non-linux system. +func GetLibHome() (string, error) { + return "", errors.New("homedir.GetLibHome() is not supported on this system") +} diff --git a/vendor/github.com/docker/docker/pkg/idtools/idtools.go b/vendor/github.com/docker/docker/pkg/idtools/idtools.go index 25a57b23..1e0a8900 100644 --- a/vendor/github.com/docker/docker/pkg/idtools/idtools.go +++ b/vendor/github.com/docker/docker/pkg/idtools/idtools.go @@ -108,70 +108,72 @@ type Identity struct { SID string } -// IdentityMapping contains a mappings of UIDs and GIDs -type IdentityMapping struct { - uids []IDMap - gids []IDMap +// Chown changes the numeric uid and gid of the named file to id.UID and id.GID. +func (id Identity) Chown(name string) error { + return os.Chown(name, id.UID, id.GID) } -// NewIDMappingsFromMaps creates a new mapping from two slices -// Deprecated: this is a temporary shim while transitioning to IDMapping -func NewIDMappingsFromMaps(uids []IDMap, gids []IDMap) *IdentityMapping { - return &IdentityMapping{uids: uids, gids: gids} +// IdentityMapping contains a mappings of UIDs and GIDs. +// The zero value represents an empty mapping. +type IdentityMapping struct { + UIDMaps []IDMap `json:"UIDMaps"` + GIDMaps []IDMap `json:"GIDMaps"` } // RootPair returns a uid and gid pair for the root user. The error is ignored // because a root user always exists, and the defaults are correct when the uid // and gid maps are empty. -func (i *IdentityMapping) RootPair() Identity { - uid, gid, _ := GetRootUIDGID(i.uids, i.gids) +func (i IdentityMapping) RootPair() Identity { + uid, gid, _ := GetRootUIDGID(i.UIDMaps, i.GIDMaps) return Identity{UID: uid, GID: gid} } // ToHost returns the host UID and GID for the container uid, gid. // Remapping is only performed if the ids aren't already the remapped root ids -func (i *IdentityMapping) ToHost(pair Identity) (Identity, error) { +func (i IdentityMapping) ToHost(pair Identity) (Identity, error) { var err error target := i.RootPair() if pair.UID != target.UID { - target.UID, err = toHost(pair.UID, i.uids) + target.UID, err = toHost(pair.UID, i.UIDMaps) if err != nil { return target, err } } if pair.GID != target.GID { - target.GID, err = toHost(pair.GID, i.gids) + target.GID, err = toHost(pair.GID, i.GIDMaps) } return target, err } // ToContainer returns the container UID and GID for the host uid and gid -func (i *IdentityMapping) ToContainer(pair Identity) (int, int, error) { - uid, err := toContainer(pair.UID, i.uids) +func (i IdentityMapping) ToContainer(pair Identity) (int, int, error) { + uid, err := toContainer(pair.UID, i.UIDMaps) if err != nil { return -1, -1, err } - gid, err := toContainer(pair.GID, i.gids) + gid, err := toContainer(pair.GID, i.GIDMaps) return uid, gid, err } // Empty returns true if there are no id mappings -func (i *IdentityMapping) Empty() bool { - return len(i.uids) == 0 && len(i.gids) == 0 +func (i IdentityMapping) Empty() bool { + return len(i.UIDMaps) == 0 && len(i.GIDMaps) == 0 } -// UIDs return the UID mapping -// TODO: remove this once everything has been refactored to use pairs -func (i *IdentityMapping) UIDs() []IDMap { - return i.uids +// UIDs returns the mapping for UID. +// +// Deprecated: reference the UIDMaps field directly. +func (i IdentityMapping) UIDs() []IDMap { + return i.UIDMaps } -// GIDs return the UID mapping -// TODO: remove this once everything has been refactored to use pairs -func (i *IdentityMapping) GIDs() []IDMap { - return i.gids +// GIDs returns the mapping for GID. +// +// Deprecated: reference the GIDMaps field directly. +func (i IdentityMapping) GIDs() []IDMap { + return i.GIDMaps } func createIDMap(subidRanges ranges) []IDMap { diff --git a/vendor/github.com/docker/docker/pkg/idtools/idtools_unix.go b/vendor/github.com/docker/docker/pkg/idtools/idtools_unix.go index ceec0339..03846b03 100644 --- a/vendor/github.com/docker/docker/pkg/idtools/idtools_unix.go +++ b/vendor/github.com/docker/docker/pkg/idtools/idtools_unix.go @@ -8,6 +8,7 @@ import ( "fmt" "io" "os" + "os/exec" "path/filepath" "strconv" "sync" @@ -30,6 +31,10 @@ func mkdirAs(path string, mode os.FileMode, owner Identity, mkAll, chownExisting // chown the full directory path if it exists var paths []string + path, err := filepath.Abs(path) + if err != nil { + return err + } stat, err := system.Stat(path) if err == nil { @@ -195,7 +200,7 @@ func callGetent(database, key string) (io.Reader, error) { } out, err := execCmd(getentCmd, database, key) if err != nil { - exitCode, errC := system.GetExitCode(err) + exitCode, errC := getExitCode(err) if errC != nil { return nil, err } @@ -209,11 +214,22 @@ func callGetent(database, key string) (io.Reader, error) { default: return nil, err } - } return bytes.NewReader(out), nil } +// getExitCode returns the ExitStatus of the specified error if its type is +// exec.ExitError, returns 0 and an error otherwise. +func getExitCode(err error) (int, error) { + exitCode := 0 + if exiterr, ok := err.(*exec.ExitError); ok { + if procExit, ok := exiterr.Sys().(syscall.WaitStatus); ok { + return procExit.ExitStatus(), nil + } + } + return exitCode, fmt.Errorf("failed to get exit code") +} + // setPermissions performs a chown/chmod only if the uid/gid don't match what's requested // Normally a Chown is a no-op if uid/gid match, but in some cases this can still cause an error, e.g. if the // dir is on an NFS share, so don't call chown unless we absolutely must. @@ -240,24 +256,37 @@ func setPermissions(p string, mode os.FileMode, uid, gid int, stat *system.StatT // NewIdentityMapping takes a requested username and // using the data from /etc/sub{uid,gid} ranges, creates the // proper uid and gid remapping ranges for that user/group pair +// +// Deprecated: Use LoadIdentityMapping. func NewIdentityMapping(name string) (*IdentityMapping, error) { + m, err := LoadIdentityMapping(name) + if err != nil { + return nil, err + } + return &m, err +} + +// LoadIdentityMapping takes a requested username and +// using the data from /etc/sub{uid,gid} ranges, creates the +// proper uid and gid remapping ranges for that user/group pair +func LoadIdentityMapping(name string) (IdentityMapping, error) { usr, err := LookupUser(name) if err != nil { - return nil, fmt.Errorf("Could not get user for username %s: %v", name, err) + return IdentityMapping{}, fmt.Errorf("Could not get user for username %s: %v", name, err) } subuidRanges, err := lookupSubUIDRanges(usr) if err != nil { - return nil, err + return IdentityMapping{}, err } subgidRanges, err := lookupSubGIDRanges(usr) if err != nil { - return nil, err + return IdentityMapping{}, err } - return &IdentityMapping{ - uids: subuidRanges, - gids: subgidRanges, + return IdentityMapping{ + UIDMaps: subuidRanges, + GIDMaps: subgidRanges, }, nil } diff --git a/vendor/github.com/docker/docker/pkg/idtools/idtools_windows.go b/vendor/github.com/docker/docker/pkg/idtools/idtools_windows.go index 35ede0ff..0f5aadd4 100644 --- a/vendor/github.com/docker/docker/pkg/idtools/idtools_windows.go +++ b/vendor/github.com/docker/docker/pkg/idtools/idtools_windows.go @@ -6,6 +6,15 @@ import ( "github.com/docker/docker/pkg/system" ) +const ( + SeTakeOwnershipPrivilege = "SeTakeOwnershipPrivilege" +) + +const ( + ContainerAdministratorSidString = "S-1-5-93-2-1" + ContainerUserSidString = "S-1-5-93-2-2" +) + // This is currently a wrapper around MkdirAll, however, since currently // permissions aren't set through this path, the identity isn't utilized. // Ownership is handled elsewhere, but in the future could be support here diff --git a/vendor/github.com/docker/docker/pkg/idtools/usergroupadd_linux.go b/vendor/github.com/docker/docker/pkg/idtools/usergroupadd_linux.go index bf7ae056..3ad9255d 100644 --- a/vendor/github.com/docker/docker/pkg/idtools/usergroupadd_linux.go +++ b/vendor/github.com/docker/docker/pkg/idtools/usergroupadd_linux.go @@ -88,7 +88,6 @@ func addUser(name string) error { } func createSubordinateRanges(name string) error { - // first, we should verify that ranges weren't automatically created // by the distro tooling ranges, err := parseSubuid(name) diff --git a/vendor/github.com/docker/docker/pkg/ioutils/bytespipe.go b/vendor/github.com/docker/docker/pkg/ioutils/bytespipe.go index 87514b64..c1cfa62f 100644 --- a/vendor/github.com/docker/docker/pkg/ioutils/bytespipe.go +++ b/vendor/github.com/docker/docker/pkg/ioutils/bytespipe.go @@ -29,11 +29,12 @@ var ( // and releases new byte slices to adjust to current needs, so the buffer // won't be overgrown after peak loads. type BytesPipe struct { - mu sync.Mutex - wait *sync.Cond - buf []*fixedBuffer - bufLen int - closeErr error // error to return from next Read. set to nil if not closed. + mu sync.Mutex + wait *sync.Cond + buf []*fixedBuffer + bufLen int + closeErr error // error to return from next Read. set to nil if not closed. + readBlock bool // check read BytesPipe is Wait() or not } // NewBytesPipe creates new BytesPipe, initialized by specified slice. @@ -50,12 +51,12 @@ func NewBytesPipe() *BytesPipe { // It can allocate new []byte slices in a process of writing. func (bp *BytesPipe) Write(p []byte) (int, error) { bp.mu.Lock() + defer bp.mu.Unlock() written := 0 loop0: for { if bp.closeErr != nil { - bp.mu.Unlock() return written, ErrClosed } @@ -72,7 +73,6 @@ loop0: // errBufferFull is an error we expect to get if the buffer is full if err != nil && err != errBufferFull { bp.wait.Broadcast() - bp.mu.Unlock() return written, err } @@ -86,6 +86,9 @@ loop0: // make sure the buffer doesn't grow too big from this write for bp.bufLen >= blockThreshold { + if bp.readBlock { + bp.wait.Broadcast() + } bp.wait.Wait() if bp.closeErr != nil { continue loop0 @@ -100,7 +103,6 @@ loop0: bp.buf = append(bp.buf, getBuffer(nextCap)) } bp.wait.Broadcast() - bp.mu.Unlock() return written, nil } @@ -126,17 +128,16 @@ func (bp *BytesPipe) Close() error { // Data could be read only once. func (bp *BytesPipe) Read(p []byte) (n int, err error) { bp.mu.Lock() + defer bp.mu.Unlock() if bp.bufLen == 0 { if bp.closeErr != nil { - err := bp.closeErr - bp.mu.Unlock() - return 0, err + return 0, bp.closeErr } + bp.readBlock = true bp.wait.Wait() + bp.readBlock = false if bp.bufLen == 0 && bp.closeErr != nil { - err := bp.closeErr - bp.mu.Unlock() - return 0, err + return 0, bp.closeErr } } @@ -161,7 +162,6 @@ func (bp *BytesPipe) Read(p []byte) (n int, err error) { } bp.wait.Broadcast() - bp.mu.Unlock() return } diff --git a/vendor/github.com/docker/docker/pkg/ioutils/readers.go b/vendor/github.com/docker/docker/pkg/ioutils/readers.go index 1f657bd3..de00b95e 100644 --- a/vendor/github.com/docker/docker/pkg/ioutils/readers.go +++ b/vendor/github.com/docker/docker/pkg/ioutils/readers.go @@ -2,9 +2,12 @@ package ioutils // import "github.com/docker/docker/pkg/ioutils" import ( "context" - "crypto/sha256" - "encoding/hex" "io" + + // make sure crypto.SHA256, crypto.sha512 and crypto.SHA384 are registered + // TODO remove once https://github.com/opencontainers/go-digest/pull/64 is merged. + _ "crypto/sha256" + _ "crypto/sha512" ) // ReadCloserWrapper wraps an io.Reader, and implements an io.ReadCloser @@ -49,15 +52,6 @@ func NewReaderErrWrapper(r io.Reader, closer func()) io.Reader { } } -// HashData returns the sha256 sum of src. -func HashData(src io.Reader) (string, error) { - h := sha256.New() - if _, err := io.Copy(h, src); err != nil { - return "", err - } - return "sha256:" + hex.EncodeToString(h.Sum(nil)), nil -} - // OnEOFReader wraps an io.ReadCloser and a function // the function will run at the end of file or close the file. type OnEOFReader struct { diff --git a/vendor/github.com/docker/docker/pkg/system/exitcode.go b/vendor/github.com/docker/docker/pkg/system/exitcode.go deleted file mode 100644 index 4ba8fe35..00000000 --- a/vendor/github.com/docker/docker/pkg/system/exitcode.go +++ /dev/null @@ -1,19 +0,0 @@ -package system // import "github.com/docker/docker/pkg/system" - -import ( - "fmt" - "os/exec" - "syscall" -) - -// GetExitCode returns the ExitStatus of the specified error if its type is -// exec.ExitError, returns 0 and an error otherwise. -func GetExitCode(err error) (int, error) { - exitCode := 0 - if exiterr, ok := err.(*exec.ExitError); ok { - if procExit, ok := exiterr.Sys().(syscall.WaitStatus); ok { - return procExit.ExitStatus(), nil - } - } - return exitCode, fmt.Errorf("failed to get exit code") -} diff --git a/vendor/github.com/docker/docker/pkg/system/filesys.go b/vendor/github.com/docker/docker/pkg/system/filesys.go new file mode 100644 index 00000000..ce5990c9 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/system/filesys.go @@ -0,0 +1,19 @@ +package system + +import ( + "os" + "path/filepath" + "strings" +) + +// IsAbs is a platform-agnostic wrapper for filepath.IsAbs. +// +// On Windows, golang filepath.IsAbs does not consider a path \windows\system32 +// as absolute as it doesn't start with a drive-letter/colon combination. However, +// in docker we need to verify things such as WORKDIR /windows/system32 in +// a Dockerfile (which gets translated to \windows\system32 when being processed +// by the daemon). This SHOULD be treated as absolute from a docker processing +// perspective. +func IsAbs(path string) bool { + return filepath.IsAbs(path) || strings.HasPrefix(path, string(os.PathSeparator)) +} diff --git a/vendor/github.com/docker/docker/pkg/system/filesys_deprecated.go b/vendor/github.com/docker/docker/pkg/system/filesys_deprecated.go new file mode 100644 index 00000000..b2ee0063 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/system/filesys_deprecated.go @@ -0,0 +1,35 @@ +package system + +import ( + "os" + + "github.com/moby/sys/sequential" +) + +// CreateSequential is deprecated. +// +// Deprecated: use os.Create or github.com/moby/sys/sequential.Create() +func CreateSequential(name string) (*os.File, error) { + return sequential.Create(name) +} + +// OpenSequential is deprecated. +// +// Deprecated: use os.Open or github.com/moby/sys/sequential.Open +func OpenSequential(name string) (*os.File, error) { + return sequential.Open(name) +} + +// OpenFileSequential is deprecated. +// +// Deprecated: use github.com/moby/sys/sequential.OpenFile() +func OpenFileSequential(name string, flag int, perm os.FileMode) (*os.File, error) { + return sequential.OpenFile(name, flag, perm) +} + +// TempFileSequential is deprecated. +// +// Deprecated: use os.CreateTemp or github.com/moby/sys/sequential.CreateTemp +func TempFileSequential(dir, prefix string) (f *os.File, err error) { + return sequential.CreateTemp(dir, prefix) +} diff --git a/vendor/github.com/docker/docker/pkg/system/filesys_unix.go b/vendor/github.com/docker/docker/pkg/system/filesys_unix.go index 8b991201..38011294 100644 --- a/vendor/github.com/docker/docker/pkg/system/filesys_unix.go +++ b/vendor/github.com/docker/docker/pkg/system/filesys_unix.go @@ -3,10 +3,7 @@ package system // import "github.com/docker/docker/pkg/system" -import ( - "os" - "path/filepath" -) +import "os" // MkdirAllWithACL is a wrapper for os.MkdirAll on unix systems. func MkdirAllWithACL(path string, perm os.FileMode, sddl string) error { @@ -18,50 +15,3 @@ func MkdirAllWithACL(path string, perm os.FileMode, sddl string) error { func MkdirAll(path string, perm os.FileMode) error { return os.MkdirAll(path, perm) } - -// IsAbs is a platform-specific wrapper for filepath.IsAbs. -func IsAbs(path string) bool { - return filepath.IsAbs(path) -} - -// The functions below here are wrappers for the equivalents in the os and ioutils packages. -// They are passthrough on Unix platforms, and only relevant on Windows. - -// CreateSequential creates the named file with mode 0666 (before umask), truncating -// it if it already exists. If successful, methods on the returned -// File can be used for I/O; the associated file descriptor has mode -// O_RDWR. -// If there is an error, it will be of type *PathError. -func CreateSequential(name string) (*os.File, error) { - return os.Create(name) -} - -// OpenSequential opens the named file for reading. If successful, methods on -// the returned file can be used for reading; the associated file -// descriptor has mode O_RDONLY. -// If there is an error, it will be of type *PathError. -func OpenSequential(name string) (*os.File, error) { - return os.Open(name) -} - -// OpenFileSequential is the generalized open call; most users will use Open -// or Create instead. It opens the named file with specified flag -// (O_RDONLY etc.) and perm, (0666 etc.) if applicable. If successful, -// methods on the returned File can be used for I/O. -// If there is an error, it will be of type *PathError. -func OpenFileSequential(name string, flag int, perm os.FileMode) (*os.File, error) { - return os.OpenFile(name, flag, perm) -} - -// TempFileSequential creates a new temporary file in the directory dir -// with a name beginning with prefix, opens the file for reading -// and writing, and returns the resulting *os.File. -// If dir is the empty string, TempFile uses the default directory -// for temporary files (see os.TempDir). -// Multiple programs calling TempFile simultaneously -// will not choose the same file. The caller can use f.Name() -// to find the pathname of the file. It is the caller's responsibility -// to remove the file when no longer needed. -func TempFileSequential(dir, prefix string) (f *os.File, err error) { - return os.CreateTemp(dir, prefix) -} diff --git a/vendor/github.com/docker/docker/pkg/system/filesys_windows.go b/vendor/github.com/docker/docker/pkg/system/filesys_windows.go index 8f79dc8f..e3fa9f73 100644 --- a/vendor/github.com/docker/docker/pkg/system/filesys_windows.go +++ b/vendor/github.com/docker/docker/pkg/system/filesys_windows.go @@ -2,13 +2,8 @@ package system // import "github.com/docker/docker/pkg/system" import ( "os" - "path/filepath" "regexp" - "strconv" - "strings" - "sync" "syscall" - "time" "unsafe" "golang.org/x/sys/windows" @@ -121,172 +116,3 @@ func mkdirWithACL(name string, sddl string) error { } return nil } - -// IsAbs is a platform-specific wrapper for filepath.IsAbs. On Windows, -// golang filepath.IsAbs does not consider a path \windows\system32 as absolute -// as it doesn't start with a drive-letter/colon combination. However, in -// docker we need to verify things such as WORKDIR /windows/system32 in -// a Dockerfile (which gets translated to \windows\system32 when being processed -// by the daemon. This SHOULD be treated as absolute from a docker processing -// perspective. -func IsAbs(path string) bool { - if filepath.IsAbs(path) || strings.HasPrefix(path, string(os.PathSeparator)) { - return true - } - return false -} - -// The origin of the functions below here are the golang OS and windows packages, -// slightly modified to only cope with files, not directories due to the -// specific use case. -// -// The alteration is to allow a file on Windows to be opened with -// FILE_FLAG_SEQUENTIAL_SCAN (particular for docker load), to avoid eating -// the standby list, particularly when accessing large files such as layer.tar. - -// CreateSequential creates the named file with mode 0666 (before umask), truncating -// it if it already exists. If successful, methods on the returned -// File can be used for I/O; the associated file descriptor has mode -// O_RDWR. -// If there is an error, it will be of type *PathError. -func CreateSequential(name string) (*os.File, error) { - return OpenFileSequential(name, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0) -} - -// OpenSequential opens the named file for reading. If successful, methods on -// the returned file can be used for reading; the associated file -// descriptor has mode O_RDONLY. -// If there is an error, it will be of type *PathError. -func OpenSequential(name string) (*os.File, error) { - return OpenFileSequential(name, os.O_RDONLY, 0) -} - -// OpenFileSequential is the generalized open call; most users will use Open -// or Create instead. -// If there is an error, it will be of type *PathError. -func OpenFileSequential(name string, flag int, _ os.FileMode) (*os.File, error) { - if name == "" { - return nil, &os.PathError{Op: "open", Path: name, Err: syscall.ENOENT} - } - r, errf := windowsOpenFileSequential(name, flag, 0) - if errf == nil { - return r, nil - } - return nil, &os.PathError{Op: "open", Path: name, Err: errf} -} - -func windowsOpenFileSequential(name string, flag int, _ os.FileMode) (file *os.File, err error) { - r, e := windowsOpenSequential(name, flag|windows.O_CLOEXEC, 0) - if e != nil { - return nil, e - } - return os.NewFile(uintptr(r), name), nil -} - -func makeInheritSa() *windows.SecurityAttributes { - var sa windows.SecurityAttributes - sa.Length = uint32(unsafe.Sizeof(sa)) - sa.InheritHandle = 1 - return &sa -} - -func windowsOpenSequential(path string, mode int, _ uint32) (fd windows.Handle, err error) { - if len(path) == 0 { - return windows.InvalidHandle, windows.ERROR_FILE_NOT_FOUND - } - pathp, err := windows.UTF16PtrFromString(path) - if err != nil { - return windows.InvalidHandle, err - } - var access uint32 - switch mode & (windows.O_RDONLY | windows.O_WRONLY | windows.O_RDWR) { - case windows.O_RDONLY: - access = windows.GENERIC_READ - case windows.O_WRONLY: - access = windows.GENERIC_WRITE - case windows.O_RDWR: - access = windows.GENERIC_READ | windows.GENERIC_WRITE - } - if mode&windows.O_CREAT != 0 { - access |= windows.GENERIC_WRITE - } - if mode&windows.O_APPEND != 0 { - access &^= windows.GENERIC_WRITE - access |= windows.FILE_APPEND_DATA - } - sharemode := uint32(windows.FILE_SHARE_READ | windows.FILE_SHARE_WRITE) - var sa *windows.SecurityAttributes - if mode&windows.O_CLOEXEC == 0 { - sa = makeInheritSa() - } - var createmode uint32 - switch { - case mode&(windows.O_CREAT|windows.O_EXCL) == (windows.O_CREAT | windows.O_EXCL): - createmode = windows.CREATE_NEW - case mode&(windows.O_CREAT|windows.O_TRUNC) == (windows.O_CREAT | windows.O_TRUNC): - createmode = windows.CREATE_ALWAYS - case mode&windows.O_CREAT == windows.O_CREAT: - createmode = windows.OPEN_ALWAYS - case mode&windows.O_TRUNC == windows.O_TRUNC: - createmode = windows.TRUNCATE_EXISTING - default: - createmode = windows.OPEN_EXISTING - } - // Use FILE_FLAG_SEQUENTIAL_SCAN rather than FILE_ATTRIBUTE_NORMAL as implemented in golang. - // https://msdn.microsoft.com/en-us/library/windows/desktop/aa363858(v=vs.85).aspx - const fileFlagSequentialScan = 0x08000000 // FILE_FLAG_SEQUENTIAL_SCAN - h, e := windows.CreateFile(pathp, access, sharemode, sa, createmode, fileFlagSequentialScan, 0) - return h, e -} - -// Helpers for TempFileSequential -var rand uint32 -var randmu sync.Mutex - -func reseed() uint32 { - return uint32(time.Now().UnixNano() + int64(os.Getpid())) -} -func nextSuffix() string { - randmu.Lock() - r := rand - if r == 0 { - r = reseed() - } - r = r*1664525 + 1013904223 // constants from Numerical Recipes - rand = r - randmu.Unlock() - return strconv.Itoa(int(1e9 + r%1e9))[1:] -} - -// TempFileSequential is a copy of os.CreateTemp, modified to use sequential -// file access. Below is the original comment from golang: -// TempFile creates a new temporary file in the directory dir -// with a name beginning with prefix, opens the file for reading -// and writing, and returns the resulting *os.File. -// If dir is the empty string, TempFile uses the default directory -// for temporary files (see os.TempDir). -// Multiple programs calling TempFile simultaneously -// will not choose the same file. The caller can use f.Name() -// to find the pathname of the file. It is the caller's responsibility -// to remove the file when no longer needed. -func TempFileSequential(dir, prefix string) (f *os.File, err error) { - if dir == "" { - dir = os.TempDir() - } - - nconflict := 0 - for i := 0; i < 10000; i++ { - name := filepath.Join(dir, prefix+nextSuffix()) - f, err = OpenFileSequential(name, os.O_RDWR|os.O_CREATE|os.O_EXCL, 0600) - if os.IsExist(err) { - if nconflict++; nconflict > 10 { - randmu.Lock() - rand = reseed() - randmu.Unlock() - } - continue - } - break - } - return -} diff --git a/vendor/github.com/docker/docker/pkg/system/image_os.go b/vendor/github.com/docker/docker/pkg/system/image_os.go new file mode 100644 index 00000000..e3de86be --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/system/image_os.go @@ -0,0 +1,10 @@ +package system // import "github.com/docker/docker/pkg/system" +import ( + "runtime" + "strings" +) + +// IsOSSupported determines if an operating system is supported by the host. +func IsOSSupported(os string) bool { + return strings.EqualFold(runtime.GOOS, os) +} diff --git a/vendor/github.com/docker/docker/pkg/system/init_windows.go b/vendor/github.com/docker/docker/pkg/system/init_windows.go index a91288c6..3c2a43dd 100644 --- a/vendor/github.com/docker/docker/pkg/system/init_windows.go +++ b/vendor/github.com/docker/docker/pkg/system/init_windows.go @@ -1,29 +1,18 @@ package system // import "github.com/docker/docker/pkg/system" -import ( - "os" - - "github.com/sirupsen/logrus" -) - var ( - // containerdRuntimeSupported determines if ContainerD should be the runtime. - // As of March 2019, this is an experimental feature. + // containerdRuntimeSupported determines if containerd should be the runtime. containerdRuntimeSupported = false ) -// InitContainerdRuntime sets whether to use ContainerD for runtime -// on Windows. This is an experimental feature still in development, and -// also requires an environment variable to be set (so as not to turn the -// feature on from simply experimental which would also mean LCOW. -func InitContainerdRuntime(experimental bool, cdPath string) { - if experimental && len(cdPath) > 0 && len(os.Getenv("DOCKER_WINDOWS_CONTAINERD_RUNTIME")) > 0 { - logrus.Warnf("Using ContainerD runtime. This feature is experimental") +// InitContainerdRuntime sets whether to use containerd for runtime on Windows. +func InitContainerdRuntime(cdPath string) { + if len(cdPath) > 0 { containerdRuntimeSupported = true } } -// ContainerdRuntimeSupported returns true if the use of ContainerD runtime is supported. +// ContainerdRuntimeSupported returns true if the use of containerd runtime is supported. func ContainerdRuntimeSupported() bool { return containerdRuntimeSupported } diff --git a/vendor/github.com/docker/docker/pkg/system/lcow.go b/vendor/github.com/docker/docker/pkg/system/lcow.go deleted file mode 100644 index 4599a3f2..00000000 --- a/vendor/github.com/docker/docker/pkg/system/lcow.go +++ /dev/null @@ -1,49 +0,0 @@ -//go:build windows && !no_lcow -// +build windows,!no_lcow - -package system // import "github.com/docker/docker/pkg/system" - -import ( - "strings" - - "github.com/Microsoft/hcsshim/osversion" - specs "github.com/opencontainers/image-spec/specs-go/v1" - "github.com/pkg/errors" -) - -var ( - // lcowSupported determines if Linux Containers on Windows are supported. - lcowSupported = false -) - -// InitLCOW sets whether LCOW is supported or not. Requires RS5+ -func InitLCOW(experimental bool) { - if experimental && osversion.Build() >= osversion.RS5 { - lcowSupported = true - } -} - -func LCOWSupported() bool { - return lcowSupported -} - -// ValidatePlatform determines if a platform structure is valid. -// TODO This is a temporary windows-only function, should be replaced by -// comparison of worker capabilities -func ValidatePlatform(platform specs.Platform) error { - if !IsOSSupported(platform.OS) { - return errors.Errorf("unsupported os %s", platform.OS) - } - return nil -} - -// IsOSSupported determines if an operating system is supported by the host -func IsOSSupported(os string) bool { - if strings.EqualFold("windows", os) { - return true - } - if LCOWSupported() && strings.EqualFold(os, "linux") { - return true - } - return false -} diff --git a/vendor/github.com/docker/docker/pkg/system/lcow_unsupported.go b/vendor/github.com/docker/docker/pkg/system/lcow_unsupported.go deleted file mode 100644 index daadef31..00000000 --- a/vendor/github.com/docker/docker/pkg/system/lcow_unsupported.go +++ /dev/null @@ -1,29 +0,0 @@ -//go:build !windows || (windows && no_lcow) -// +build !windows windows,no_lcow - -package system // import "github.com/docker/docker/pkg/system" -import ( - "runtime" - "strings" - - specs "github.com/opencontainers/image-spec/specs-go/v1" -) - -// InitLCOW does nothing since LCOW is a windows only feature -func InitLCOW(_ bool) {} - -// LCOWSupported returns true if Linux containers on Windows are supported. -func LCOWSupported() bool { - return false -} - -// ValidatePlatform determines if a platform structure is valid. This function -// is used for LCOW, and is a no-op on non-windows platforms. -func ValidatePlatform(_ specs.Platform) error { - return nil -} - -// IsOSSupported determines if an operating system is supported by the host. -func IsOSSupported(os string) bool { - return strings.EqualFold(runtime.GOOS, os) -} diff --git a/vendor/github.com/docker/docker/pkg/system/meminfo_linux.go b/vendor/github.com/docker/docker/pkg/system/meminfo_linux.go index cd060eff..02a7377c 100644 --- a/vendor/github.com/docker/docker/pkg/system/meminfo_linux.go +++ b/vendor/github.com/docker/docker/pkg/system/meminfo_linux.go @@ -6,8 +6,6 @@ import ( "os" "strconv" "strings" - - units "github.com/docker/go-units" ) // ReadMemInfo retrieves memory statistics of the host system and returns a @@ -42,7 +40,8 @@ func parseMemInfo(reader io.Reader) (*MemInfo, error) { if err != nil { continue } - bytes := int64(size) * units.KiB + // Convert to KiB + bytes := int64(size) * 1024 switch parts[0] { case "MemTotal:": @@ -56,7 +55,6 @@ func parseMemInfo(reader io.Reader) (*MemInfo, error) { case "SwapFree:": meminfo.SwapFree = bytes } - } if memAvailable != -1 { meminfo.MemFree = memAvailable diff --git a/vendor/github.com/docker/docker/pkg/system/path.go b/vendor/github.com/docker/docker/pkg/system/path.go index 64e89228..818b20ef 100644 --- a/vendor/github.com/docker/docker/pkg/system/path.go +++ b/vendor/github.com/docker/docker/pkg/system/path.go @@ -1,28 +1,18 @@ package system // import "github.com/docker/docker/pkg/system" -import ( - "fmt" - "path/filepath" - "runtime" - "strings" -) - const defaultUnixPathEnv = "/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" // DefaultPathEnv is unix style list of directories to search for // executables. Each directory is separated from the next by a colon // ':' character . +// For Windows containers, an empty string is returned as the default +// path will be set by the container, and Docker has no context of what the +// default path should be. func DefaultPathEnv(os string) string { - if runtime.GOOS == "windows" { - if os != runtime.GOOS { - return defaultUnixPathEnv - } - // Deliberately empty on Windows containers on Windows as the default path will be set by - // the container. Docker has no context of what the default path should be. + if os == "windows" { return "" } return defaultUnixPathEnv - } // PathVerifier defines the subset of a PathDriver that CheckSystemDriveAndRemoveDriveLetter @@ -47,18 +37,5 @@ type PathVerifier interface { // /a --> \a // d:\ --> Fail func CheckSystemDriveAndRemoveDriveLetter(path string, driver PathVerifier) (string, error) { - if runtime.GOOS != "windows" || LCOWSupported() { - return path, nil - } - - if len(path) == 2 && string(path[1]) == ":" { - return "", fmt.Errorf("No relative path specified in %q", path) - } - if !driver.IsAbs(path) || len(path) < 2 { - return filepath.FromSlash(path), nil - } - if string(path[1]) == ":" && !strings.EqualFold(string(path[0]), "c") { - return "", fmt.Errorf("The specified path is not on the system drive (C:)") - } - return filepath.FromSlash(path[2:]), nil + return checkSystemDriveAndRemoveDriveLetter(path, driver) } diff --git a/vendor/github.com/docker/docker/pkg/system/path_unix.go b/vendor/github.com/docker/docker/pkg/system/path_unix.go index 2c85371b..197a37a2 100644 --- a/vendor/github.com/docker/docker/pkg/system/path_unix.go +++ b/vendor/github.com/docker/docker/pkg/system/path_unix.go @@ -9,3 +9,9 @@ package system // import "github.com/docker/docker/pkg/system" func GetLongPathName(path string) (string, error) { return path, nil } + +// checkSystemDriveAndRemoveDriveLetter is the non-Windows implementation +// of CheckSystemDriveAndRemoveDriveLetter +func checkSystemDriveAndRemoveDriveLetter(path string, driver PathVerifier) (string, error) { + return path, nil +} diff --git a/vendor/github.com/docker/docker/pkg/system/path_windows.go b/vendor/github.com/docker/docker/pkg/system/path_windows.go index 22a56136..7d375b0d 100644 --- a/vendor/github.com/docker/docker/pkg/system/path_windows.go +++ b/vendor/github.com/docker/docker/pkg/system/path_windows.go @@ -1,6 +1,12 @@ package system // import "github.com/docker/docker/pkg/system" -import "golang.org/x/sys/windows" +import ( + "fmt" + "path/filepath" + "strings" + + "golang.org/x/sys/windows" +) // GetLongPathName converts Windows short pathnames to full pathnames. // For example C:\Users\ADMIN~1 --> C:\Users\Administrator. @@ -25,3 +31,18 @@ func GetLongPathName(path string) (string, error) { } return windows.UTF16ToString(b), nil } + +// checkSystemDriveAndRemoveDriveLetter is the Windows implementation +// of CheckSystemDriveAndRemoveDriveLetter +func checkSystemDriveAndRemoveDriveLetter(path string, driver PathVerifier) (string, error) { + if len(path) == 2 && string(path[1]) == ":" { + return "", fmt.Errorf("No relative path specified in %q", path) + } + if !driver.IsAbs(path) || len(path) < 2 { + return filepath.FromSlash(path), nil + } + if string(path[1]) == ":" && !strings.EqualFold(string(path[0]), "c") { + return "", fmt.Errorf("The specified path is not on the system drive (C:)") + } + return filepath.FromSlash(path[2:]), nil +} diff --git a/vendor/github.com/docker/docker/pkg/system/process_unix.go b/vendor/github.com/docker/docker/pkg/system/process_unix.go index d2ab9c3d..1c2c6a30 100644 --- a/vendor/github.com/docker/docker/pkg/system/process_unix.go +++ b/vendor/github.com/docker/docker/pkg/system/process_unix.go @@ -33,6 +33,7 @@ func IsProcessZombie(pid int) (bool, error) { statPath := fmt.Sprintf("/proc/%d/stat", pid) dataBytes, err := os.ReadFile(statPath) if err != nil { + // TODO(thaJeztah) should we ignore os.IsNotExist() here? ("/proc//stat" will be gone if the process exited) return false, err } data := string(dataBytes) diff --git a/vendor/github.com/docker/docker/pkg/system/rm.go b/vendor/github.com/docker/docker/pkg/system/rm.go deleted file mode 100644 index f2d81597..00000000 --- a/vendor/github.com/docker/docker/pkg/system/rm.go +++ /dev/null @@ -1,79 +0,0 @@ -//go:build !darwin && !windows -// +build !darwin,!windows - -package system // import "github.com/docker/docker/pkg/system" - -import ( - "os" - "syscall" - "time" - - "github.com/moby/sys/mount" - "github.com/pkg/errors" -) - -// EnsureRemoveAll wraps `os.RemoveAll` to check for specific errors that can -// often be remedied. -// Only use `EnsureRemoveAll` if you really want to make every effort to remove -// a directory. -// -// Because of the way `os.Remove` (and by extension `os.RemoveAll`) works, there -// can be a race between reading directory entries and then actually attempting -// to remove everything in the directory. -// These types of errors do not need to be returned since it's ok for the dir to -// be gone we can just retry the remove operation. -// -// This should not return a `os.ErrNotExist` kind of error under any circumstances -func EnsureRemoveAll(dir string) error { - notExistErr := make(map[string]bool) - - // track retries - exitOnErr := make(map[string]int) - maxRetry := 50 - - // Attempt to unmount anything beneath this dir first - mount.RecursiveUnmount(dir) - - for { - err := os.RemoveAll(dir) - if err == nil { - return nil - } - - pe, ok := err.(*os.PathError) - if !ok { - return err - } - - if os.IsNotExist(err) { - if notExistErr[pe.Path] { - return err - } - notExistErr[pe.Path] = true - - // There is a race where some subdir can be removed but after the parent - // dir entries have been read. - // So the path could be from `os.Remove(subdir)` - // If the reported non-existent path is not the passed in `dir` we - // should just retry, but otherwise return with no error. - if pe.Path == dir { - return nil - } - continue - } - - if pe.Err != syscall.EBUSY { - return err - } - - if e := mount.Unmount(pe.Path); e != nil { - return errors.Wrapf(e, "error while removing %s", dir) - } - - if exitOnErr[pe.Path] == maxRetry { - return err - } - exitOnErr[pe.Path]++ - time.Sleep(100 * time.Millisecond) - } -} diff --git a/vendor/github.com/docker/docker/pkg/system/rm_windows.go b/vendor/github.com/docker/docker/pkg/system/rm_windows.go deleted file mode 100644 index ed9c5dcb..00000000 --- a/vendor/github.com/docker/docker/pkg/system/rm_windows.go +++ /dev/null @@ -1,6 +0,0 @@ -package system - -import "os" - -// EnsureRemoveAll is an alias to os.RemoveAll on Windows -var EnsureRemoveAll = os.RemoveAll diff --git a/vendor/github.com/docker/docker/pkg/system/stat_windows.go b/vendor/github.com/docker/docker/pkg/system/stat_windows.go index b2456cb8..0ff3af2f 100644 --- a/vendor/github.com/docker/docker/pkg/system/stat_windows.go +++ b/vendor/github.com/docker/docker/pkg/system/stat_windows.go @@ -20,12 +20,12 @@ func (s StatT) Size() int64 { // Mode returns file's permission mode. func (s StatT) Mode() os.FileMode { - return os.FileMode(s.mode) + return s.mode } // Mtim returns file's last modification time. func (s StatT) Mtim() time.Time { - return time.Time(s.mtim) + return s.mtim } // Stat takes a path to a file and returns diff --git a/vendor/github.com/docker/docker/pkg/system/syscall_unix.go b/vendor/github.com/docker/docker/pkg/system/syscall_unix.go deleted file mode 100644 index 7c90bffa..00000000 --- a/vendor/github.com/docker/docker/pkg/system/syscall_unix.go +++ /dev/null @@ -1,12 +0,0 @@ -//go:build linux || freebsd -// +build linux freebsd - -package system // import "github.com/docker/docker/pkg/system" - -import "golang.org/x/sys/unix" - -// Unmount is a platform-specific helper function to call -// the unmount syscall. -func Unmount(dest string) error { - return unix.Unmount(dest, 0) -} diff --git a/vendor/github.com/docker/docker/pkg/system/syscall_windows.go b/vendor/github.com/docker/docker/pkg/system/syscall_windows.go deleted file mode 100644 index 1588aa3e..00000000 --- a/vendor/github.com/docker/docker/pkg/system/syscall_windows.go +++ /dev/null @@ -1,136 +0,0 @@ -package system // import "github.com/docker/docker/pkg/system" - -import ( - "syscall" - "unsafe" - - "github.com/Microsoft/hcsshim/osversion" - "github.com/sirupsen/logrus" - "golang.org/x/sys/windows" -) - -const ( - OWNER_SECURITY_INFORMATION = windows.OWNER_SECURITY_INFORMATION // Deprecated: use golang.org/x/sys/windows.OWNER_SECURITY_INFORMATION - GROUP_SECURITY_INFORMATION = windows.GROUP_SECURITY_INFORMATION // Deprecated: use golang.org/x/sys/windows.GROUP_SECURITY_INFORMATION - DACL_SECURITY_INFORMATION = windows.DACL_SECURITY_INFORMATION // Deprecated: use golang.org/x/sys/windows.DACL_SECURITY_INFORMATION - SACL_SECURITY_INFORMATION = windows.SACL_SECURITY_INFORMATION // Deprecated: use golang.org/x/sys/windows.SACL_SECURITY_INFORMATION - LABEL_SECURITY_INFORMATION = windows.LABEL_SECURITY_INFORMATION // Deprecated: use golang.org/x/sys/windows.LABEL_SECURITY_INFORMATION - ATTRIBUTE_SECURITY_INFORMATION = windows.ATTRIBUTE_SECURITY_INFORMATION // Deprecated: use golang.org/x/sys/windows.ATTRIBUTE_SECURITY_INFORMATION - SCOPE_SECURITY_INFORMATION = windows.SCOPE_SECURITY_INFORMATION // Deprecated: use golang.org/x/sys/windows.SCOPE_SECURITY_INFORMATION - PROCESS_TRUST_LABEL_SECURITY_INFORMATION = 0x00000080 - ACCESS_FILTER_SECURITY_INFORMATION = 0x00000100 - BACKUP_SECURITY_INFORMATION = windows.BACKUP_SECURITY_INFORMATION // Deprecated: use golang.org/x/sys/windows.BACKUP_SECURITY_INFORMATION - PROTECTED_DACL_SECURITY_INFORMATION = windows.PROTECTED_DACL_SECURITY_INFORMATION // Deprecated: use golang.org/x/sys/windows.PROTECTED_DACL_SECURITY_INFORMATION - PROTECTED_SACL_SECURITY_INFORMATION = windows.PROTECTED_SACL_SECURITY_INFORMATION // Deprecated: use golang.org/x/sys/windows.PROTECTED_SACL_SECURITY_INFORMATION - UNPROTECTED_DACL_SECURITY_INFORMATION = windows.UNPROTECTED_DACL_SECURITY_INFORMATION // Deprecated: use golang.org/x/sys/windows.UNPROTECTED_DACL_SECURITY_INFORMATION - UNPROTECTED_SACL_SECURITY_INFORMATION = windows.UNPROTECTED_SACL_SECURITY_INFORMATION // Deprecated: use golang.org/x/sys/windows.UNPROTECTED_SACL_SECURITY_INFORMATION -) - -const ( - SE_UNKNOWN_OBJECT_TYPE = windows.SE_UNKNOWN_OBJECT_TYPE // Deprecated: use golang.org/x/sys/windows.SE_UNKNOWN_OBJECT_TYPE - SE_FILE_OBJECT = windows.SE_FILE_OBJECT // Deprecated: use golang.org/x/sys/windows.SE_FILE_OBJECT - SE_SERVICE = windows.SE_SERVICE // Deprecated: use golang.org/x/sys/windows.SE_SERVICE - SE_PRINTER = windows.SE_PRINTER // Deprecated: use golang.org/x/sys/windows.SE_PRINTER - SE_REGISTRY_KEY = windows.SE_REGISTRY_KEY // Deprecated: use golang.org/x/sys/windows.SE_REGISTRY_KEY - SE_LMSHARE = windows.SE_LMSHARE // Deprecated: use golang.org/x/sys/windows.SE_LMSHARE - SE_KERNEL_OBJECT = windows.SE_KERNEL_OBJECT // Deprecated: use golang.org/x/sys/windows.SE_KERNEL_OBJECT - SE_WINDOW_OBJECT = windows.SE_WINDOW_OBJECT // Deprecated: use golang.org/x/sys/windows.SE_WINDOW_OBJECT - SE_DS_OBJECT = windows.SE_DS_OBJECT // Deprecated: use golang.org/x/sys/windows.SE_DS_OBJECT - SE_DS_OBJECT_ALL = windows.SE_DS_OBJECT_ALL // Deprecated: use golang.org/x/sys/windows.SE_DS_OBJECT_ALL - SE_PROVIDER_DEFINED_OBJECT = windows.SE_PROVIDER_DEFINED_OBJECT // Deprecated: use golang.org/x/sys/windows.SE_PROVIDER_DEFINED_OBJECT - SE_WMIGUID_OBJECT = windows.SE_WMIGUID_OBJECT // Deprecated: use golang.org/x/sys/windows.SE_WMIGUID_OBJECT - SE_REGISTRY_WOW64_32KEY = windows.SE_REGISTRY_WOW64_32KEY // Deprecated: use golang.org/x/sys/windows.SE_REGISTRY_WOW64_32KEY -) - -const ( - SeTakeOwnershipPrivilege = "SeTakeOwnershipPrivilege" -) - -const ( - ContainerAdministratorSidString = "S-1-5-93-2-1" - ContainerUserSidString = "S-1-5-93-2-2" -) - -var ( - ntuserApiset = windows.NewLazyDLL("ext-ms-win-ntuser-window-l1-1-0") - modadvapi32 = windows.NewLazySystemDLL("advapi32.dll") - procGetVersionExW = modkernel32.NewProc("GetVersionExW") - procSetNamedSecurityInfo = modadvapi32.NewProc("SetNamedSecurityInfoW") - procGetSecurityDescriptorDacl = modadvapi32.NewProc("GetSecurityDescriptorDacl") -) - -// OSVersion is a wrapper for Windows version information -// https://msdn.microsoft.com/en-us/library/windows/desktop/ms724439(v=vs.85).aspx -type OSVersion = osversion.OSVersion - -// https://msdn.microsoft.com/en-us/library/windows/desktop/ms724833(v=vs.85).aspx -// TODO: use golang.org/x/sys/windows.OsVersionInfoEx (needs OSVersionInfoSize to be exported) -type osVersionInfoEx struct { - OSVersionInfoSize uint32 - MajorVersion uint32 - MinorVersion uint32 - BuildNumber uint32 - PlatformID uint32 - CSDVersion [128]uint16 - ServicePackMajor uint16 - ServicePackMinor uint16 - SuiteMask uint16 - ProductType byte - Reserve byte -} - -// GetOSVersion gets the operating system version on Windows. Note that -// dockerd.exe must be manifested to get the correct version information. -// Deprecated: use github.com/Microsoft/hcsshim/osversion.Get() instead -func GetOSVersion() OSVersion { - return osversion.Get() -} - -// IsWindowsClient returns true if the SKU is client -func IsWindowsClient() bool { - osviex := &osVersionInfoEx{OSVersionInfoSize: 284} - r1, _, err := procGetVersionExW.Call(uintptr(unsafe.Pointer(osviex))) - if r1 == 0 { - logrus.Warnf("GetVersionExW failed - assuming server SKU: %v", err) - return false - } - const verNTWorkstation = 0x00000001 - return osviex.ProductType == verNTWorkstation -} - -// Unmount is a platform-specific helper function to call -// the unmount syscall. Not supported on Windows -func Unmount(_ string) error { - return nil -} - -// HasWin32KSupport determines whether containers that depend on win32k can -// run on this machine. Win32k is the driver used to implement windowing. -func HasWin32KSupport() bool { - // For now, check for ntuser API support on the host. In the future, a host - // may support win32k in containers even if the host does not support ntuser - // APIs. - return ntuserApiset.Load() == nil -} - -// Deprecated: use golang.org/x/sys/windows.SetNamedSecurityInfo() -func SetNamedSecurityInfo(objectName *uint16, objectType uint32, securityInformation uint32, sidOwner *windows.SID, sidGroup *windows.SID, dacl *byte, sacl *byte) (result error) { - r0, _, _ := syscall.Syscall9(procSetNamedSecurityInfo.Addr(), 7, uintptr(unsafe.Pointer(objectName)), uintptr(objectType), uintptr(securityInformation), uintptr(unsafe.Pointer(sidOwner)), uintptr(unsafe.Pointer(sidGroup)), uintptr(unsafe.Pointer(dacl)), uintptr(unsafe.Pointer(sacl)), 0, 0) - if r0 != 0 { - result = syscall.Errno(r0) - } - return -} - -// Deprecated: uses golang.org/x/sys/windows.SecurityDescriptorFromString() and golang.org/x/sys/windows.SECURITY_DESCRIPTOR.DACL() -func GetSecurityDescriptorDacl(securityDescriptor *byte, daclPresent *uint32, dacl **byte, daclDefaulted *uint32) (result error) { - r1, _, e1 := syscall.Syscall6(procGetSecurityDescriptorDacl.Addr(), 4, uintptr(unsafe.Pointer(securityDescriptor)), uintptr(unsafe.Pointer(daclPresent)), uintptr(unsafe.Pointer(dacl)), uintptr(unsafe.Pointer(daclDefaulted)), 0, 0) - if r1 == 0 { - if e1 != 0 { - result = e1 - } else { - result = syscall.EINVAL - } - } - return -} diff --git a/vendor/github.com/docker/docker/pkg/system/umask.go b/vendor/github.com/docker/docker/pkg/system/umask.go deleted file mode 100644 index d4a15cbe..00000000 --- a/vendor/github.com/docker/docker/pkg/system/umask.go +++ /dev/null @@ -1,14 +0,0 @@ -//go:build !windows -// +build !windows - -package system // import "github.com/docker/docker/pkg/system" - -import ( - "golang.org/x/sys/unix" -) - -// Umask sets current process's file mode creation mask to newmask -// and returns oldmask. -func Umask(newmask int) (oldmask int, err error) { - return unix.Umask(newmask), nil -} diff --git a/vendor/github.com/docker/docker/pkg/system/umask_windows.go b/vendor/github.com/docker/docker/pkg/system/umask_windows.go deleted file mode 100644 index fc62388c..00000000 --- a/vendor/github.com/docker/docker/pkg/system/umask_windows.go +++ /dev/null @@ -1,7 +0,0 @@ -package system // import "github.com/docker/docker/pkg/system" - -// Umask is not supported on the windows platform. -func Umask(newmask int) (oldmask int, err error) { - // should not be called on cli code path - return 0, ErrNotSupportedPlatform -} diff --git a/vendor/github.com/docker/libnetwork/resolvconf/README.md b/vendor/github.com/docker/libnetwork/resolvconf/README.md deleted file mode 100644 index cdda554b..00000000 --- a/vendor/github.com/docker/libnetwork/resolvconf/README.md +++ /dev/null @@ -1 +0,0 @@ -Package resolvconf provides utility code to query and update DNS configuration in /etc/resolv.conf diff --git a/vendor/github.com/docker/libnetwork/resolvconf/dns/resolvconf.go b/vendor/github.com/docker/libnetwork/resolvconf/dns/resolvconf.go deleted file mode 100644 index e348bc57..00000000 --- a/vendor/github.com/docker/libnetwork/resolvconf/dns/resolvconf.go +++ /dev/null @@ -1,26 +0,0 @@ -package dns - -import ( - "regexp" -) - -// IPLocalhost is a regex pattern for IPv4 or IPv6 loopback range. -const IPLocalhost = `((127\.([0-9]{1,3}\.){2}[0-9]{1,3})|(::1)$)` - -// IPv4Localhost is a regex pattern for IPv4 localhost address range. -const IPv4Localhost = `(127\.([0-9]{1,3}\.){2}[0-9]{1,3})` - -var localhostIPRegexp = regexp.MustCompile(IPLocalhost) -var localhostIPv4Regexp = regexp.MustCompile(IPv4Localhost) - -// IsLocalhost returns true if ip matches the localhost IP regular expression. -// Used for determining if nameserver settings are being passed which are -// localhost addresses -func IsLocalhost(ip string) bool { - return localhostIPRegexp.MatchString(ip) -} - -// IsIPv4Localhost returns true if ip matches the IPv4 localhost regular expression. -func IsIPv4Localhost(ip string) bool { - return localhostIPv4Regexp.MatchString(ip) -} diff --git a/vendor/github.com/docker/libnetwork/types/types.go b/vendor/github.com/docker/libnetwork/types/types.go deleted file mode 100644 index 42da71be..00000000 --- a/vendor/github.com/docker/libnetwork/types/types.go +++ /dev/null @@ -1,649 +0,0 @@ -// Package types contains types that are common across libnetwork project -package types - -import ( - "bytes" - "fmt" - "net" - "strconv" - "strings" - - "github.com/ishidawataru/sctp" -) - -// constants for the IP address type -const ( - IP = iota // IPv4 and IPv6 - IPv4 - IPv6 -) - -// EncryptionKey is the libnetwork representation of the key distributed by the lead -// manager. -type EncryptionKey struct { - Subsystem string - Algorithm int32 - Key []byte - LamportTime uint64 -} - -// UUID represents a globally unique ID of various resources like network and endpoint -type UUID string - -// QosPolicy represents a quality of service policy on an endpoint -type QosPolicy struct { - MaxEgressBandwidth uint64 -} - -// TransportPort represents a local Layer 4 endpoint -type TransportPort struct { - Proto Protocol - Port uint16 -} - -// Equal checks if this instance of Transportport is equal to the passed one -func (t *TransportPort) Equal(o *TransportPort) bool { - if t == o { - return true - } - - if o == nil { - return false - } - - if t.Proto != o.Proto || t.Port != o.Port { - return false - } - - return true -} - -// GetCopy returns a copy of this TransportPort structure instance -func (t *TransportPort) GetCopy() TransportPort { - return TransportPort{Proto: t.Proto, Port: t.Port} -} - -// String returns the TransportPort structure in string form -func (t *TransportPort) String() string { - return fmt.Sprintf("%s/%d", t.Proto.String(), t.Port) -} - -// FromString reads the TransportPort structure from string -func (t *TransportPort) FromString(s string) error { - ps := strings.Split(s, "/") - if len(ps) == 2 { - t.Proto = ParseProtocol(ps[0]) - if p, err := strconv.ParseUint(ps[1], 10, 16); err == nil { - t.Port = uint16(p) - return nil - } - } - return BadRequestErrorf("invalid format for transport port: %s", s) -} - -// PortBinding represents a port binding between the container and the host -type PortBinding struct { - Proto Protocol - IP net.IP - Port uint16 - HostIP net.IP - HostPort uint16 - HostPortEnd uint16 -} - -// HostAddr returns the host side transport address -func (p PortBinding) HostAddr() (net.Addr, error) { - switch p.Proto { - case UDP: - return &net.UDPAddr{IP: p.HostIP, Port: int(p.HostPort)}, nil - case TCP: - return &net.TCPAddr{IP: p.HostIP, Port: int(p.HostPort)}, nil - case SCTP: - return &sctp.SCTPAddr{IPAddrs: []net.IPAddr{{IP: p.HostIP}}, Port: int(p.HostPort)}, nil - default: - return nil, ErrInvalidProtocolBinding(p.Proto.String()) - } -} - -// ContainerAddr returns the container side transport address -func (p PortBinding) ContainerAddr() (net.Addr, error) { - switch p.Proto { - case UDP: - return &net.UDPAddr{IP: p.IP, Port: int(p.Port)}, nil - case TCP: - return &net.TCPAddr{IP: p.IP, Port: int(p.Port)}, nil - case SCTP: - return &sctp.SCTPAddr{IPAddrs: []net.IPAddr{{IP: p.IP}}, Port: int(p.Port)}, nil - default: - return nil, ErrInvalidProtocolBinding(p.Proto.String()) - } -} - -// GetCopy returns a copy of this PortBinding structure instance -func (p *PortBinding) GetCopy() PortBinding { - return PortBinding{ - Proto: p.Proto, - IP: GetIPCopy(p.IP), - Port: p.Port, - HostIP: GetIPCopy(p.HostIP), - HostPort: p.HostPort, - HostPortEnd: p.HostPortEnd, - } -} - -// String returns the PortBinding structure in string form -func (p *PortBinding) String() string { - ret := fmt.Sprintf("%s/", p.Proto) - if p.IP != nil { - ret += p.IP.String() - } - ret = fmt.Sprintf("%s:%d/", ret, p.Port) - if p.HostIP != nil { - ret += p.HostIP.String() - } - ret = fmt.Sprintf("%s:%d", ret, p.HostPort) - return ret -} - -// FromString reads the PortBinding structure from string s. -// String s is a triple of "protocol/containerIP:port/hostIP:port" -// containerIP and hostIP can be in dotted decimal ("192.0.2.1") or IPv6 ("2001:db8::68") form. -// Zoned addresses ("169.254.0.23%eth0" or "fe80::1ff:fe23:4567:890a%eth0") are not supported. -// If string s is incorrectly formatted or the IP addresses or ports cannot be parsed, FromString -// returns an error. -func (p *PortBinding) FromString(s string) error { - ps := strings.Split(s, "/") - if len(ps) != 3 { - return BadRequestErrorf("invalid format for port binding: %s", s) - } - - p.Proto = ParseProtocol(ps[0]) - - var err error - if p.IP, p.Port, err = parseIPPort(ps[1]); err != nil { - return BadRequestErrorf("failed to parse Container IP/Port in port binding: %s", err.Error()) - } - - if p.HostIP, p.HostPort, err = parseIPPort(ps[2]); err != nil { - return BadRequestErrorf("failed to parse Host IP/Port in port binding: %s", err.Error()) - } - - return nil -} - -func parseIPPort(s string) (net.IP, uint16, error) { - hoststr, portstr, err := net.SplitHostPort(s) - if err != nil { - return nil, 0, err - } - - ip := net.ParseIP(hoststr) - if ip == nil { - return nil, 0, BadRequestErrorf("invalid ip: %s", hoststr) - } - - port, err := strconv.ParseUint(portstr, 10, 16) - if err != nil { - return nil, 0, BadRequestErrorf("invalid port: %s", portstr) - } - - return ip, uint16(port), nil -} - -// Equal checks if this instance of PortBinding is equal to the passed one -func (p *PortBinding) Equal(o *PortBinding) bool { - if p == o { - return true - } - - if o == nil { - return false - } - - if p.Proto != o.Proto || p.Port != o.Port || - p.HostPort != o.HostPort || p.HostPortEnd != o.HostPortEnd { - return false - } - - if p.IP != nil { - if !p.IP.Equal(o.IP) { - return false - } - } else { - if o.IP != nil { - return false - } - } - - if p.HostIP != nil { - if !p.HostIP.Equal(o.HostIP) { - return false - } - } else { - if o.HostIP != nil { - return false - } - } - - return true -} - -// ErrInvalidProtocolBinding is returned when the port binding protocol is not valid. -type ErrInvalidProtocolBinding string - -func (ipb ErrInvalidProtocolBinding) Error() string { - return fmt.Sprintf("invalid transport protocol: %s", string(ipb)) -} - -const ( - // ICMP is for the ICMP ip protocol - ICMP = 1 - // TCP is for the TCP ip protocol - TCP = 6 - // UDP is for the UDP ip protocol - UDP = 17 - // SCTP is for the SCTP ip protocol - SCTP = 132 -) - -// Protocol represents an IP protocol number -type Protocol uint8 - -func (p Protocol) String() string { - switch p { - case ICMP: - return "icmp" - case TCP: - return "tcp" - case UDP: - return "udp" - case SCTP: - return "sctp" - default: - return fmt.Sprintf("%d", p) - } -} - -// ParseProtocol returns the respective Protocol type for the passed string -func ParseProtocol(s string) Protocol { - switch strings.ToLower(s) { - case "icmp": - return ICMP - case "udp": - return UDP - case "tcp": - return TCP - case "sctp": - return SCTP - default: - return 0 - } -} - -// GetMacCopy returns a copy of the passed MAC address -func GetMacCopy(from net.HardwareAddr) net.HardwareAddr { - if from == nil { - return nil - } - to := make(net.HardwareAddr, len(from)) - copy(to, from) - return to -} - -// GetIPCopy returns a copy of the passed IP address -func GetIPCopy(from net.IP) net.IP { - if from == nil { - return nil - } - to := make(net.IP, len(from)) - copy(to, from) - return to -} - -// GetIPNetCopy returns a copy of the passed IP Network -func GetIPNetCopy(from *net.IPNet) *net.IPNet { - if from == nil { - return nil - } - bm := make(net.IPMask, len(from.Mask)) - copy(bm, from.Mask) - return &net.IPNet{IP: GetIPCopy(from.IP), Mask: bm} -} - -// GetIPNetCanonical returns the canonical form for the passed network -func GetIPNetCanonical(nw *net.IPNet) *net.IPNet { - if nw == nil { - return nil - } - c := GetIPNetCopy(nw) - c.IP = c.IP.Mask(nw.Mask) - return c -} - -// CompareIPNet returns equal if the two IP Networks are equal -func CompareIPNet(a, b *net.IPNet) bool { - if a == b { - return true - } - if a == nil || b == nil { - return false - } - return a.IP.Equal(b.IP) && bytes.Equal(a.Mask, b.Mask) -} - -// GetMinimalIP returns the address in its shortest form -// If ip contains an IPv4-mapped IPv6 address, the 4-octet form of the IPv4 address will be returned. -// Otherwise ip is returned unchanged. -func GetMinimalIP(ip net.IP) net.IP { - if ip != nil && ip.To4() != nil { - return ip.To4() - } - return ip -} - -// GetMinimalIPNet returns a copy of the passed IP Network with congruent ip and mask notation -func GetMinimalIPNet(nw *net.IPNet) *net.IPNet { - if nw == nil { - return nil - } - if len(nw.IP) == 16 && nw.IP.To4() != nil { - m := nw.Mask - if len(m) == 16 { - m = m[12:16] - } - return &net.IPNet{IP: nw.IP.To4(), Mask: m} - } - return nw -} - -// IsIPNetValid returns true if the ipnet is a valid network/mask -// combination. Otherwise returns false. -func IsIPNetValid(nw *net.IPNet) bool { - return nw.String() != "0.0.0.0/0" -} - -var v4inV6MaskPrefix = []byte{0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff} - -// compareIPMask checks if the passed ip and mask are semantically compatible. -// It returns the byte indexes for the address and mask so that caller can -// do bitwise operations without modifying address representation. -func compareIPMask(ip net.IP, mask net.IPMask) (is int, ms int, err error) { - // Find the effective starting of address and mask - if len(ip) == net.IPv6len && ip.To4() != nil { - is = 12 - } - if len(ip[is:]) == net.IPv4len && len(mask) == net.IPv6len && bytes.Equal(mask[:12], v4inV6MaskPrefix) { - ms = 12 - } - // Check if address and mask are semantically compatible - if len(ip[is:]) != len(mask[ms:]) { - err = fmt.Errorf("ip and mask are not compatible: (%#v, %#v)", ip, mask) - } - return -} - -// GetHostPartIP returns the host portion of the ip address identified by the mask. -// IP address representation is not modified. If address and mask are not compatible -// an error is returned. -func GetHostPartIP(ip net.IP, mask net.IPMask) (net.IP, error) { - // Find the effective starting of address and mask - is, ms, err := compareIPMask(ip, mask) - if err != nil { - return nil, fmt.Errorf("cannot compute host portion ip address because %s", err) - } - - // Compute host portion - out := GetIPCopy(ip) - for i := 0; i < len(mask[ms:]); i++ { - out[is+i] &= ^mask[ms+i] - } - - return out, nil -} - -// GetBroadcastIP returns the broadcast ip address for the passed network (ip and mask). -// IP address representation is not modified. If address and mask are not compatible -// an error is returned. -func GetBroadcastIP(ip net.IP, mask net.IPMask) (net.IP, error) { - // Find the effective starting of address and mask - is, ms, err := compareIPMask(ip, mask) - if err != nil { - return nil, fmt.Errorf("cannot compute broadcast ip address because %s", err) - } - - // Compute broadcast address - out := GetIPCopy(ip) - for i := 0; i < len(mask[ms:]); i++ { - out[is+i] |= ^mask[ms+i] - } - - return out, nil -} - -// ParseCIDR returns the *net.IPNet represented by the passed CIDR notation -func ParseCIDR(cidr string) (n *net.IPNet, e error) { - var i net.IP - if i, n, e = net.ParseCIDR(cidr); e == nil { - n.IP = i - } - return -} - -const ( - // NEXTHOP indicates a StaticRoute with an IP next hop. - NEXTHOP = iota - - // CONNECTED indicates a StaticRoute with an interface for directly connected peers. - CONNECTED -) - -// StaticRoute is a statically-provisioned IP route. -type StaticRoute struct { - Destination *net.IPNet - - RouteType int // NEXT_HOP or CONNECTED - - // NextHop will be resolved by the kernel (i.e. as a loose hop). - NextHop net.IP -} - -// GetCopy returns a copy of this StaticRoute structure -func (r *StaticRoute) GetCopy() *StaticRoute { - d := GetIPNetCopy(r.Destination) - nh := GetIPCopy(r.NextHop) - return &StaticRoute{Destination: d, - RouteType: r.RouteType, - NextHop: nh, - } -} - -// InterfaceStatistics represents the interface's statistics -type InterfaceStatistics struct { - RxBytes uint64 - RxPackets uint64 - RxErrors uint64 - RxDropped uint64 - TxBytes uint64 - TxPackets uint64 - TxErrors uint64 - TxDropped uint64 -} - -func (is *InterfaceStatistics) String() string { - return fmt.Sprintf("\nRxBytes: %d, RxPackets: %d, RxErrors: %d, RxDropped: %d, TxBytes: %d, TxPackets: %d, TxErrors: %d, TxDropped: %d", - is.RxBytes, is.RxPackets, is.RxErrors, is.RxDropped, is.TxBytes, is.TxPackets, is.TxErrors, is.TxDropped) -} - -/****************************** - * Well-known Error Interfaces - ******************************/ - -// MaskableError is an interface for errors which can be ignored by caller -type MaskableError interface { - // Maskable makes implementer into MaskableError type - Maskable() -} - -// RetryError is an interface for errors which might get resolved through retry -type RetryError interface { - // Retry makes implementer into RetryError type - Retry() -} - -// BadRequestError is an interface for errors originated by a bad request -type BadRequestError interface { - // BadRequest makes implementer into BadRequestError type - BadRequest() -} - -// NotFoundError is an interface for errors raised because a needed resource is not available -type NotFoundError interface { - // NotFound makes implementer into NotFoundError type - NotFound() -} - -// ForbiddenError is an interface for errors which denote a valid request that cannot be honored -type ForbiddenError interface { - // Forbidden makes implementer into ForbiddenError type - Forbidden() -} - -// NoServiceError is an interface for errors returned when the required service is not available -type NoServiceError interface { - // NoService makes implementer into NoServiceError type - NoService() -} - -// TimeoutError is an interface for errors raised because of timeout -type TimeoutError interface { - // Timeout makes implementer into TimeoutError type - Timeout() -} - -// NotImplementedError is an interface for errors raised because of requested functionality is not yet implemented -type NotImplementedError interface { - // NotImplemented makes implementer into NotImplementedError type - NotImplemented() -} - -// InternalError is an interface for errors raised because of an internal error -type InternalError interface { - // Internal makes implementer into InternalError type - Internal() -} - -/****************************** - * Well-known Error Formatters - ******************************/ - -// BadRequestErrorf creates an instance of BadRequestError -func BadRequestErrorf(format string, params ...interface{}) error { - return badRequest(fmt.Sprintf(format, params...)) -} - -// NotFoundErrorf creates an instance of NotFoundError -func NotFoundErrorf(format string, params ...interface{}) error { - return notFound(fmt.Sprintf(format, params...)) -} - -// ForbiddenErrorf creates an instance of ForbiddenError -func ForbiddenErrorf(format string, params ...interface{}) error { - return forbidden(fmt.Sprintf(format, params...)) -} - -// NoServiceErrorf creates an instance of NoServiceError -func NoServiceErrorf(format string, params ...interface{}) error { - return noService(fmt.Sprintf(format, params...)) -} - -// NotImplementedErrorf creates an instance of NotImplementedError -func NotImplementedErrorf(format string, params ...interface{}) error { - return notImpl(fmt.Sprintf(format, params...)) -} - -// TimeoutErrorf creates an instance of TimeoutError -func TimeoutErrorf(format string, params ...interface{}) error { - return timeout(fmt.Sprintf(format, params...)) -} - -// InternalErrorf creates an instance of InternalError -func InternalErrorf(format string, params ...interface{}) error { - return internal(fmt.Sprintf(format, params...)) -} - -// InternalMaskableErrorf creates an instance of InternalError and MaskableError -func InternalMaskableErrorf(format string, params ...interface{}) error { - return maskInternal(fmt.Sprintf(format, params...)) -} - -// RetryErrorf creates an instance of RetryError -func RetryErrorf(format string, params ...interface{}) error { - return retry(fmt.Sprintf(format, params...)) -} - -/*********************** - * Internal Error Types - ***********************/ -type badRequest string - -func (br badRequest) Error() string { - return string(br) -} -func (br badRequest) BadRequest() {} - -type notFound string - -func (nf notFound) Error() string { - return string(nf) -} -func (nf notFound) NotFound() {} - -type forbidden string - -func (frb forbidden) Error() string { - return string(frb) -} -func (frb forbidden) Forbidden() {} - -type noService string - -func (ns noService) Error() string { - return string(ns) -} -func (ns noService) NoService() {} - -type timeout string - -func (to timeout) Error() string { - return string(to) -} -func (to timeout) Timeout() {} - -type notImpl string - -func (ni notImpl) Error() string { - return string(ni) -} -func (ni notImpl) NotImplemented() {} - -type internal string - -func (nt internal) Error() string { - return string(nt) -} -func (nt internal) Internal() {} - -type maskInternal string - -func (mnt maskInternal) Error() string { - return string(mnt) -} -func (mnt maskInternal) Internal() {} -func (mnt maskInternal) Maskable() {} - -type retry string - -func (r retry) Error() string { - return string(r) -} -func (r retry) Retry() {} diff --git a/vendor/github.com/fatih/color/README.md b/vendor/github.com/fatih/color/README.md index 5152bf59..be82827c 100644 --- a/vendor/github.com/fatih/color/README.md +++ b/vendor/github.com/fatih/color/README.md @@ -7,7 +7,6 @@ suits you. ![Color](https://user-images.githubusercontent.com/438920/96832689-03b3e000-13f4-11eb-9803-46f4c4de3406.jpg) - ## Install ```bash @@ -124,17 +123,17 @@ fmt.Println("All text will now be bold magenta.") ``` ### Disable/Enable color - + There might be a case where you want to explicitly disable/enable color output. the `go-isatty` package will automatically disable color output for non-tty output streams (for example if the output were piped directly to `less`). The `color` package also disables color output if the [`NO_COLOR`](https://no-color.org) environment -variable is set (regardless of its value). +variable is set to a non-empty string. -`Color` has support to disable/enable colors programatically both globally and +`Color` has support to disable/enable colors programmatically both globally and for single color definitions. For example suppose you have a CLI app and a -`--no-color` bool flag. You can easily disable the color output with: +`-no-color` bool flag. You can easily disable the color output with: ```go var flagNoColor = flag.Bool("no-color", false, "Disable color output") @@ -167,11 +166,10 @@ To output color in GitHub Actions (or other CI systems that support ANSI colors) * Save/Return previous values * Evaluate fmt.Formatter interface - ## Credits - * [Fatih Arslan](https://github.com/fatih) - * Windows support via @mattn: [colorable](https://github.com/mattn/go-colorable) +* [Fatih Arslan](https://github.com/fatih) +* Windows support via @mattn: [colorable](https://github.com/mattn/go-colorable) ## License diff --git a/vendor/github.com/fatih/color/color.go b/vendor/github.com/fatih/color/color.go index 98a60f3c..889f9e77 100644 --- a/vendor/github.com/fatih/color/color.go +++ b/vendor/github.com/fatih/color/color.go @@ -19,10 +19,10 @@ var ( // set (regardless of its value). This is a global option and affects all // colors. For more control over each color block use the methods // DisableColor() individually. - NoColor = noColorExists() || os.Getenv("TERM") == "dumb" || + NoColor = noColorIsSet() || os.Getenv("TERM") == "dumb" || (!isatty.IsTerminal(os.Stdout.Fd()) && !isatty.IsCygwinTerminal(os.Stdout.Fd())) - // Output defines the standard output of the print functions. By default + // Output defines the standard output of the print functions. By default, // os.Stdout is used. Output = colorable.NewColorableStdout() @@ -35,10 +35,9 @@ var ( colorsCacheMu sync.Mutex // protects colorsCache ) -// noColorExists returns true if the environment variable NO_COLOR exists. -func noColorExists() bool { - _, exists := os.LookupEnv("NO_COLOR") - return exists +// noColorIsSet returns true if the environment variable NO_COLOR is set to a non-empty string. +func noColorIsSet() bool { + return os.Getenv("NO_COLOR") != "" } // Color defines a custom color object which is defined by SGR parameters. @@ -120,7 +119,7 @@ func New(value ...Attribute) *Color { params: make([]Attribute, 0), } - if noColorExists() { + if noColorIsSet() { c.noColor = boolPtr(true) } @@ -152,7 +151,7 @@ func (c *Color) Set() *Color { return c } - fmt.Fprintf(Output, c.format()) + fmt.Fprint(Output, c.format()) return c } @@ -164,16 +163,21 @@ func (c *Color) unset() { Unset() } -func (c *Color) setWriter(w io.Writer) *Color { +// SetWriter is used to set the SGR sequence with the given io.Writer. This is +// a low-level function, and users should use the higher-level functions, such +// as color.Fprint, color.Print, etc. +func (c *Color) SetWriter(w io.Writer) *Color { if c.isNoColorSet() { return c } - fmt.Fprintf(w, c.format()) + fmt.Fprint(w, c.format()) return c } -func (c *Color) unsetWriter(w io.Writer) { +// UnsetWriter resets all escape attributes and clears the output with the give +// io.Writer. Usually should be called after SetWriter(). +func (c *Color) UnsetWriter(w io.Writer) { if c.isNoColorSet() { return } @@ -192,20 +196,14 @@ func (c *Color) Add(value ...Attribute) *Color { return c } -func (c *Color) prepend(value Attribute) { - c.params = append(c.params, 0) - copy(c.params[1:], c.params[0:]) - c.params[0] = value -} - // Fprint formats using the default formats for its operands and writes to w. // Spaces are added between operands when neither is a string. // It returns the number of bytes written and any write error encountered. // On Windows, users should wrap w with colorable.NewColorable() if w is of // type *os.File. func (c *Color) Fprint(w io.Writer, a ...interface{}) (n int, err error) { - c.setWriter(w) - defer c.unsetWriter(w) + c.SetWriter(w) + defer c.UnsetWriter(w) return fmt.Fprint(w, a...) } @@ -227,8 +225,8 @@ func (c *Color) Print(a ...interface{}) (n int, err error) { // On Windows, users should wrap w with colorable.NewColorable() if w is of // type *os.File. func (c *Color) Fprintf(w io.Writer, format string, a ...interface{}) (n int, err error) { - c.setWriter(w) - defer c.unsetWriter(w) + c.SetWriter(w) + defer c.UnsetWriter(w) return fmt.Fprintf(w, format, a...) } @@ -248,8 +246,8 @@ func (c *Color) Printf(format string, a ...interface{}) (n int, err error) { // On Windows, users should wrap w with colorable.NewColorable() if w is of // type *os.File. func (c *Color) Fprintln(w io.Writer, a ...interface{}) (n int, err error) { - c.setWriter(w) - defer c.unsetWriter(w) + c.SetWriter(w) + defer c.UnsetWriter(w) return fmt.Fprintln(w, a...) } @@ -396,7 +394,7 @@ func (c *Color) DisableColor() { } // EnableColor enables the color output. Use it in conjunction with -// DisableColor(). Otherwise this method has no side effects. +// DisableColor(). Otherwise, this method has no side effects. func (c *Color) EnableColor() { c.noColor = boolPtr(false) } diff --git a/vendor/github.com/fatih/color/color_windows.go b/vendor/github.com/fatih/color/color_windows.go new file mode 100644 index 00000000..be01c558 --- /dev/null +++ b/vendor/github.com/fatih/color/color_windows.go @@ -0,0 +1,19 @@ +package color + +import ( + "os" + + "golang.org/x/sys/windows" +) + +func init() { + // Opt-in for ansi color support for current process. + // https://learn.microsoft.com/en-us/windows/console/console-virtual-terminal-sequences#output-sequences + var outMode uint32 + out := windows.Handle(os.Stdout.Fd()) + if err := windows.GetConsoleMode(out, &outMode); err != nil { + return + } + outMode |= windows.ENABLE_PROCESSED_OUTPUT | windows.ENABLE_VIRTUAL_TERMINAL_PROCESSING + _ = windows.SetConsoleMode(out, outMode) +} diff --git a/vendor/github.com/fatih/color/doc.go b/vendor/github.com/fatih/color/doc.go index 04541de7..9491ad54 100644 --- a/vendor/github.com/fatih/color/doc.go +++ b/vendor/github.com/fatih/color/doc.go @@ -5,106 +5,105 @@ that suits you. Use simple and default helper functions with predefined foreground colors: - color.Cyan("Prints text in cyan.") + color.Cyan("Prints text in cyan.") - // a newline will be appended automatically - color.Blue("Prints %s in blue.", "text") + // a newline will be appended automatically + color.Blue("Prints %s in blue.", "text") - // More default foreground colors.. - color.Red("We have red") - color.Yellow("Yellow color too!") - color.Magenta("And many others ..") + // More default foreground colors.. + color.Red("We have red") + color.Yellow("Yellow color too!") + color.Magenta("And many others ..") - // Hi-intensity colors - color.HiGreen("Bright green color.") - color.HiBlack("Bright black means gray..") - color.HiWhite("Shiny white color!") + // Hi-intensity colors + color.HiGreen("Bright green color.") + color.HiBlack("Bright black means gray..") + color.HiWhite("Shiny white color!") -However there are times where custom color mixes are required. Below are some +However, there are times when custom color mixes are required. Below are some examples to create custom color objects and use the print functions of each separate color object. - // Create a new color object - c := color.New(color.FgCyan).Add(color.Underline) - c.Println("Prints cyan text with an underline.") + // Create a new color object + c := color.New(color.FgCyan).Add(color.Underline) + c.Println("Prints cyan text with an underline.") - // Or just add them to New() - d := color.New(color.FgCyan, color.Bold) - d.Printf("This prints bold cyan %s\n", "too!.") + // Or just add them to New() + d := color.New(color.FgCyan, color.Bold) + d.Printf("This prints bold cyan %s\n", "too!.") - // Mix up foreground and background colors, create new mixes! - red := color.New(color.FgRed) + // Mix up foreground and background colors, create new mixes! + red := color.New(color.FgRed) - boldRed := red.Add(color.Bold) - boldRed.Println("This will print text in bold red.") + boldRed := red.Add(color.Bold) + boldRed.Println("This will print text in bold red.") - whiteBackground := red.Add(color.BgWhite) - whiteBackground.Println("Red text with White background.") + whiteBackground := red.Add(color.BgWhite) + whiteBackground.Println("Red text with White background.") - // Use your own io.Writer output - color.New(color.FgBlue).Fprintln(myWriter, "blue color!") + // Use your own io.Writer output + color.New(color.FgBlue).Fprintln(myWriter, "blue color!") - blue := color.New(color.FgBlue) - blue.Fprint(myWriter, "This will print text in blue.") + blue := color.New(color.FgBlue) + blue.Fprint(myWriter, "This will print text in blue.") You can create PrintXxx functions to simplify even more: - // Create a custom print function for convenient - red := color.New(color.FgRed).PrintfFunc() - red("warning") - red("error: %s", err) + // Create a custom print function for convenient + red := color.New(color.FgRed).PrintfFunc() + red("warning") + red("error: %s", err) - // Mix up multiple attributes - notice := color.New(color.Bold, color.FgGreen).PrintlnFunc() - notice("don't forget this...") + // Mix up multiple attributes + notice := color.New(color.Bold, color.FgGreen).PrintlnFunc() + notice("don't forget this...") You can also FprintXxx functions to pass your own io.Writer: - blue := color.New(FgBlue).FprintfFunc() - blue(myWriter, "important notice: %s", stars) - - // Mix up with multiple attributes - success := color.New(color.Bold, color.FgGreen).FprintlnFunc() - success(myWriter, don't forget this...") + blue := color.New(FgBlue).FprintfFunc() + blue(myWriter, "important notice: %s", stars) + // Mix up with multiple attributes + success := color.New(color.Bold, color.FgGreen).FprintlnFunc() + success(myWriter, don't forget this...") Or create SprintXxx functions to mix strings with other non-colorized strings: - yellow := New(FgYellow).SprintFunc() - red := New(FgRed).SprintFunc() + yellow := New(FgYellow).SprintFunc() + red := New(FgRed).SprintFunc() - fmt.Printf("this is a %s and this is %s.\n", yellow("warning"), red("error")) + fmt.Printf("this is a %s and this is %s.\n", yellow("warning"), red("error")) - info := New(FgWhite, BgGreen).SprintFunc() - fmt.Printf("this %s rocks!\n", info("package")) + info := New(FgWhite, BgGreen).SprintFunc() + fmt.Printf("this %s rocks!\n", info("package")) Windows support is enabled by default. All Print functions work as intended. -However only for color.SprintXXX functions, user should use fmt.FprintXXX and +However, only for color.SprintXXX functions, user should use fmt.FprintXXX and set the output to color.Output: - fmt.Fprintf(color.Output, "Windows support: %s", color.GreenString("PASS")) + fmt.Fprintf(color.Output, "Windows support: %s", color.GreenString("PASS")) - info := New(FgWhite, BgGreen).SprintFunc() - fmt.Fprintf(color.Output, "this %s rocks!\n", info("package")) + info := New(FgWhite, BgGreen).SprintFunc() + fmt.Fprintf(color.Output, "this %s rocks!\n", info("package")) Using with existing code is possible. Just use the Set() method to set the standard output to the given parameters. That way a rewrite of an existing code is not required. - // Use handy standard colors. - color.Set(color.FgYellow) + // Use handy standard colors. + color.Set(color.FgYellow) - fmt.Println("Existing text will be now in Yellow") - fmt.Printf("This one %s\n", "too") + fmt.Println("Existing text will be now in Yellow") + fmt.Printf("This one %s\n", "too") - color.Unset() // don't forget to unset + color.Unset() // don't forget to unset - // You can mix up parameters - color.Set(color.FgMagenta, color.Bold) - defer color.Unset() // use it in your function + // You can mix up parameters + color.Set(color.FgMagenta, color.Bold) + defer color.Unset() // use it in your function - fmt.Println("All text will be now bold magenta.") + fmt.Println("All text will be now bold magenta.") There might be a case where you want to disable color output (for example to pipe the standard output of your app to somewhere else). `Color` has support to @@ -112,24 +111,24 @@ disable colors both globally and for single color definition. For example suppose you have a CLI app and a `--no-color` bool flag. You can easily disable the color output with: - var flagNoColor = flag.Bool("no-color", false, "Disable color output") + var flagNoColor = flag.Bool("no-color", false, "Disable color output") - if *flagNoColor { - color.NoColor = true // disables colorized output - } + if *flagNoColor { + color.NoColor = true // disables colorized output + } You can also disable the color by setting the NO_COLOR environment variable to any value. It also has support for single color definitions (local). You can disable/enable color output on the fly: - c := color.New(color.FgCyan) - c.Println("Prints cyan text") + c := color.New(color.FgCyan) + c.Println("Prints cyan text") - c.DisableColor() - c.Println("This is printed without any color") + c.DisableColor() + c.Println("This is printed without any color") - c.EnableColor() - c.Println("This prints again cyan...") + c.EnableColor() + c.Println("This prints again cyan...") */ package color diff --git a/vendor/github.com/gogo/protobuf/gogoproto/Makefile b/vendor/github.com/gogo/protobuf/gogoproto/Makefile deleted file mode 100644 index 0b4659b7..00000000 --- a/vendor/github.com/gogo/protobuf/gogoproto/Makefile +++ /dev/null @@ -1,37 +0,0 @@ -# Protocol Buffers for Go with Gadgets -# -# Copyright (c) 2013, The GoGo Authors. All rights reserved. -# http://github.com/gogo/protobuf -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions are -# met: -# -# * Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# * Redistributions in binary form must reproduce the above -# copyright notice, this list of conditions and the following disclaimer -# in the documentation and/or other materials provided with the -# distribution. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -regenerate: - go install github.com/gogo/protobuf/protoc-gen-gogo - protoc --gogo_out=Mgoogle/protobuf/descriptor.proto=github.com/gogo/protobuf/protoc-gen-gogo/descriptor:../../../../ --proto_path=../../../../:../protobuf/:. *.proto - -restore: - cp gogo.pb.golden gogo.pb.go - -preserve: - cp gogo.pb.go gogo.pb.golden diff --git a/vendor/github.com/gogo/protobuf/gogoproto/doc.go b/vendor/github.com/gogo/protobuf/gogoproto/doc.go deleted file mode 100644 index 081c86fa..00000000 --- a/vendor/github.com/gogo/protobuf/gogoproto/doc.go +++ /dev/null @@ -1,169 +0,0 @@ -// Protocol Buffers for Go with Gadgets -// -// Copyright (c) 2013, The GoGo Authors. All rights reserved. -// http://github.com/gogo/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -/* -Package gogoproto provides extensions for protocol buffers to achieve: - - - fast marshalling and unmarshalling. - - peace of mind by optionally generating test and benchmark code. - - more canonical Go structures. - - less typing by optionally generating extra helper code. - - goprotobuf compatibility - -More Canonical Go Structures - -A lot of time working with a goprotobuf struct will lead you to a place where you create another struct that is easier to work with and then have a function to copy the values between the two structs. -You might also find that basic structs that started their life as part of an API need to be sent over the wire. With gob, you could just send it. With goprotobuf, you need to make a parallel struct. -Gogoprotobuf tries to fix these problems with the nullable, embed, customtype and customname field extensions. - - - nullable, if false, a field is generated without a pointer (see warning below). - - embed, if true, the field is generated as an embedded field. - - customtype, It works with the Marshal and Unmarshal methods, to allow you to have your own types in your struct, but marshal to bytes. For example, custom.Uuid or custom.Fixed128 - - customname (beta), Changes the generated fieldname. This is especially useful when generated methods conflict with fieldnames. - - casttype (beta), Changes the generated fieldtype. All generated code assumes that this type is castable to the protocol buffer field type. It does not work for structs or enums. - - castkey (beta), Changes the generated fieldtype for a map key. All generated code assumes that this type is castable to the protocol buffer field type. Only supported on maps. - - castvalue (beta), Changes the generated fieldtype for a map value. All generated code assumes that this type is castable to the protocol buffer field type. Only supported on maps. - -Warning about nullable: According to the Protocol Buffer specification, you should be able to tell whether a field is set or unset. With the option nullable=false this feature is lost, since your non-nullable fields will always be set. It can be seen as a layer on top of Protocol Buffers, where before and after marshalling all non-nullable fields are set and they cannot be unset. - -Let us look at: - - github.com/gogo/protobuf/test/example/example.proto - -for a quicker overview. - -The following message: - - package test; - - import "github.com/gogo/protobuf/gogoproto/gogo.proto"; - - message A { - optional string Description = 1 [(gogoproto.nullable) = false]; - optional int64 Number = 2 [(gogoproto.nullable) = false]; - optional bytes Id = 3 [(gogoproto.customtype) = "github.com/gogo/protobuf/test/custom.Uuid", (gogoproto.nullable) = false]; - } - -Will generate a go struct which looks a lot like this: - - type A struct { - Description string - Number int64 - Id github_com_gogo_protobuf_test_custom.Uuid - } - -You will see there are no pointers, since all fields are non-nullable. -You will also see a custom type which marshals to a string. -Be warned it is your responsibility to test your custom types thoroughly. -You should think of every possible empty and nil case for your marshaling, unmarshaling and size methods. - -Next we will embed the message A in message B. - - message B { - optional A A = 1 [(gogoproto.nullable) = false, (gogoproto.embed) = true]; - repeated bytes G = 2 [(gogoproto.customtype) = "github.com/gogo/protobuf/test/custom.Uint128", (gogoproto.nullable) = false]; - } - -See below that A is embedded in B. - - type B struct { - A - G []github_com_gogo_protobuf_test_custom.Uint128 - } - -Also see the repeated custom type. - - type Uint128 [2]uint64 - -Next we will create a custom name for one of our fields. - - message C { - optional int64 size = 1 [(gogoproto.customname) = "MySize"]; - } - -See below that the field's name is MySize and not Size. - - type C struct { - MySize *int64 - } - -The is useful when having a protocol buffer message with a field name which conflicts with a generated method. -As an example, having a field name size and using the sizer plugin to generate a Size method will cause a go compiler error. -Using customname you can fix this error without changing the field name. -This is typically useful when working with a protocol buffer that was designed before these methods and/or the go language were avialable. - -Gogoprotobuf also has some more subtle changes, these could be changed back: - - - the generated package name for imports do not have the extra /filename.pb, - but are actually the imports specified in the .proto file. - -Gogoprotobuf also has lost some features which should be brought back with time: - - - Marshalling and unmarshalling with reflect and without the unsafe package, - this requires work in pointer_reflect.go - -Why does nullable break protocol buffer specifications: - -The protocol buffer specification states, somewhere, that you should be able to tell whether a -field is set or unset. With the option nullable=false this feature is lost, -since your non-nullable fields will always be set. It can be seen as a layer on top of -protocol buffers, where before and after marshalling all non-nullable fields are set -and they cannot be unset. - -Goprotobuf Compatibility: - -Gogoprotobuf is compatible with Goprotobuf, because it is compatible with protocol buffers. -Gogoprotobuf generates the same code as goprotobuf if no extensions are used. -The enumprefix, getters and stringer extensions can be used to remove some of the unnecessary code generated by goprotobuf: - - - gogoproto_import, if false, the generated code imports github.com/golang/protobuf/proto instead of github.com/gogo/protobuf/proto. - - goproto_enum_prefix, if false, generates the enum constant names without the messagetype prefix - - goproto_enum_stringer (experimental), if false, the enum is generated without the default string method, this is useful for rather using enum_stringer, or allowing you to write your own string method. - - goproto_getters, if false, the message is generated without get methods, this is useful when you would rather want to use face - - goproto_stringer, if false, the message is generated without the default string method, this is useful for rather using stringer, or allowing you to write your own string method. - - goproto_extensions_map (beta), if false, the extensions field is generated as type []byte instead of type map[int32]proto.Extension - - goproto_unrecognized (beta), if false, XXX_unrecognized field is not generated. This is useful in conjunction with gogoproto.nullable=false, to generate structures completely devoid of pointers and reduce GC pressure at the cost of losing information about unrecognized fields. - - goproto_registration (beta), if true, the generated files will register all messages and types against both gogo/protobuf and golang/protobuf. This is necessary when using third-party packages which read registrations from golang/protobuf (such as the grpc-gateway). - -Less Typing and Peace of Mind is explained in their specific plugin folders godoc: - - - github.com/gogo/protobuf/plugin/ - -If you do not use any of these extension the code that is generated -will be the same as if goprotobuf has generated it. - -The most complete way to see examples is to look at - - github.com/gogo/protobuf/test/thetest.proto - -Gogoprototest is a seperate project, -because we want to keep gogoprotobuf independent of goprotobuf, -but we still want to test it thoroughly. - -*/ -package gogoproto diff --git a/vendor/github.com/gogo/protobuf/gogoproto/gogo.pb.go b/vendor/github.com/gogo/protobuf/gogoproto/gogo.pb.go deleted file mode 100644 index 1e91766a..00000000 --- a/vendor/github.com/gogo/protobuf/gogoproto/gogo.pb.go +++ /dev/null @@ -1,874 +0,0 @@ -// Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: gogo.proto - -package gogoproto - -import ( - fmt "fmt" - proto "github.com/gogo/protobuf/proto" - descriptor "github.com/gogo/protobuf/protoc-gen-gogo/descriptor" - math "math" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package - -var E_GoprotoEnumPrefix = &proto.ExtensionDesc{ - ExtendedType: (*descriptor.EnumOptions)(nil), - ExtensionType: (*bool)(nil), - Field: 62001, - Name: "gogoproto.goproto_enum_prefix", - Tag: "varint,62001,opt,name=goproto_enum_prefix", - Filename: "gogo.proto", -} - -var E_GoprotoEnumStringer = &proto.ExtensionDesc{ - ExtendedType: (*descriptor.EnumOptions)(nil), - ExtensionType: (*bool)(nil), - Field: 62021, - Name: "gogoproto.goproto_enum_stringer", - Tag: "varint,62021,opt,name=goproto_enum_stringer", - Filename: "gogo.proto", -} - -var E_EnumStringer = &proto.ExtensionDesc{ - ExtendedType: (*descriptor.EnumOptions)(nil), - ExtensionType: (*bool)(nil), - Field: 62022, - Name: "gogoproto.enum_stringer", - Tag: "varint,62022,opt,name=enum_stringer", - Filename: "gogo.proto", -} - -var E_EnumCustomname = &proto.ExtensionDesc{ - ExtendedType: (*descriptor.EnumOptions)(nil), - ExtensionType: (*string)(nil), - Field: 62023, - Name: "gogoproto.enum_customname", - Tag: "bytes,62023,opt,name=enum_customname", - Filename: "gogo.proto", -} - -var E_Enumdecl = &proto.ExtensionDesc{ - ExtendedType: (*descriptor.EnumOptions)(nil), - ExtensionType: (*bool)(nil), - Field: 62024, - Name: "gogoproto.enumdecl", - Tag: "varint,62024,opt,name=enumdecl", - Filename: "gogo.proto", -} - -var E_EnumvalueCustomname = &proto.ExtensionDesc{ - ExtendedType: (*descriptor.EnumValueOptions)(nil), - ExtensionType: (*string)(nil), - Field: 66001, - Name: "gogoproto.enumvalue_customname", - Tag: "bytes,66001,opt,name=enumvalue_customname", - Filename: "gogo.proto", -} - -var E_GoprotoGettersAll = &proto.ExtensionDesc{ - ExtendedType: (*descriptor.FileOptions)(nil), - ExtensionType: (*bool)(nil), - Field: 63001, - Name: "gogoproto.goproto_getters_all", - Tag: "varint,63001,opt,name=goproto_getters_all", - Filename: "gogo.proto", -} - -var E_GoprotoEnumPrefixAll = &proto.ExtensionDesc{ - ExtendedType: (*descriptor.FileOptions)(nil), - ExtensionType: (*bool)(nil), - Field: 63002, - Name: "gogoproto.goproto_enum_prefix_all", - Tag: "varint,63002,opt,name=goproto_enum_prefix_all", - Filename: "gogo.proto", -} - -var E_GoprotoStringerAll = &proto.ExtensionDesc{ - ExtendedType: (*descriptor.FileOptions)(nil), - ExtensionType: (*bool)(nil), - Field: 63003, - Name: "gogoproto.goproto_stringer_all", - Tag: "varint,63003,opt,name=goproto_stringer_all", - Filename: "gogo.proto", -} - -var E_VerboseEqualAll = &proto.ExtensionDesc{ - ExtendedType: (*descriptor.FileOptions)(nil), - ExtensionType: (*bool)(nil), - Field: 63004, - Name: "gogoproto.verbose_equal_all", - Tag: "varint,63004,opt,name=verbose_equal_all", - Filename: "gogo.proto", -} - -var E_FaceAll = &proto.ExtensionDesc{ - ExtendedType: (*descriptor.FileOptions)(nil), - ExtensionType: (*bool)(nil), - Field: 63005, - Name: "gogoproto.face_all", - Tag: "varint,63005,opt,name=face_all", - Filename: "gogo.proto", -} - -var E_GostringAll = &proto.ExtensionDesc{ - ExtendedType: (*descriptor.FileOptions)(nil), - ExtensionType: (*bool)(nil), - Field: 63006, - Name: "gogoproto.gostring_all", - Tag: "varint,63006,opt,name=gostring_all", - Filename: "gogo.proto", -} - -var E_PopulateAll = &proto.ExtensionDesc{ - ExtendedType: (*descriptor.FileOptions)(nil), - ExtensionType: (*bool)(nil), - Field: 63007, - Name: "gogoproto.populate_all", - Tag: "varint,63007,opt,name=populate_all", - Filename: "gogo.proto", -} - -var E_StringerAll = &proto.ExtensionDesc{ - ExtendedType: (*descriptor.FileOptions)(nil), - ExtensionType: (*bool)(nil), - Field: 63008, - Name: "gogoproto.stringer_all", - Tag: "varint,63008,opt,name=stringer_all", - Filename: "gogo.proto", -} - -var E_OnlyoneAll = &proto.ExtensionDesc{ - ExtendedType: (*descriptor.FileOptions)(nil), - ExtensionType: (*bool)(nil), - Field: 63009, - Name: "gogoproto.onlyone_all", - Tag: "varint,63009,opt,name=onlyone_all", - Filename: "gogo.proto", -} - -var E_EqualAll = &proto.ExtensionDesc{ - ExtendedType: (*descriptor.FileOptions)(nil), - ExtensionType: (*bool)(nil), - Field: 63013, - Name: "gogoproto.equal_all", - Tag: "varint,63013,opt,name=equal_all", - Filename: "gogo.proto", -} - -var E_DescriptionAll = &proto.ExtensionDesc{ - ExtendedType: (*descriptor.FileOptions)(nil), - ExtensionType: (*bool)(nil), - Field: 63014, - Name: "gogoproto.description_all", - Tag: "varint,63014,opt,name=description_all", - Filename: "gogo.proto", -} - -var E_TestgenAll = &proto.ExtensionDesc{ - ExtendedType: (*descriptor.FileOptions)(nil), - ExtensionType: (*bool)(nil), - Field: 63015, - Name: "gogoproto.testgen_all", - Tag: "varint,63015,opt,name=testgen_all", - Filename: "gogo.proto", -} - -var E_BenchgenAll = &proto.ExtensionDesc{ - ExtendedType: (*descriptor.FileOptions)(nil), - ExtensionType: (*bool)(nil), - Field: 63016, - Name: "gogoproto.benchgen_all", - Tag: "varint,63016,opt,name=benchgen_all", - Filename: "gogo.proto", -} - -var E_MarshalerAll = &proto.ExtensionDesc{ - ExtendedType: (*descriptor.FileOptions)(nil), - ExtensionType: (*bool)(nil), - Field: 63017, - Name: "gogoproto.marshaler_all", - Tag: "varint,63017,opt,name=marshaler_all", - Filename: "gogo.proto", -} - -var E_UnmarshalerAll = &proto.ExtensionDesc{ - ExtendedType: (*descriptor.FileOptions)(nil), - ExtensionType: (*bool)(nil), - Field: 63018, - Name: "gogoproto.unmarshaler_all", - Tag: "varint,63018,opt,name=unmarshaler_all", - Filename: "gogo.proto", -} - -var E_StableMarshalerAll = &proto.ExtensionDesc{ - ExtendedType: (*descriptor.FileOptions)(nil), - ExtensionType: (*bool)(nil), - Field: 63019, - Name: "gogoproto.stable_marshaler_all", - Tag: "varint,63019,opt,name=stable_marshaler_all", - Filename: "gogo.proto", -} - -var E_SizerAll = &proto.ExtensionDesc{ - ExtendedType: (*descriptor.FileOptions)(nil), - ExtensionType: (*bool)(nil), - Field: 63020, - Name: "gogoproto.sizer_all", - Tag: "varint,63020,opt,name=sizer_all", - Filename: "gogo.proto", -} - -var E_GoprotoEnumStringerAll = &proto.ExtensionDesc{ - ExtendedType: (*descriptor.FileOptions)(nil), - ExtensionType: (*bool)(nil), - Field: 63021, - Name: "gogoproto.goproto_enum_stringer_all", - Tag: "varint,63021,opt,name=goproto_enum_stringer_all", - Filename: "gogo.proto", -} - -var E_EnumStringerAll = &proto.ExtensionDesc{ - ExtendedType: (*descriptor.FileOptions)(nil), - ExtensionType: (*bool)(nil), - Field: 63022, - Name: "gogoproto.enum_stringer_all", - Tag: "varint,63022,opt,name=enum_stringer_all", - Filename: "gogo.proto", -} - -var E_UnsafeMarshalerAll = &proto.ExtensionDesc{ - ExtendedType: (*descriptor.FileOptions)(nil), - ExtensionType: (*bool)(nil), - Field: 63023, - Name: "gogoproto.unsafe_marshaler_all", - Tag: "varint,63023,opt,name=unsafe_marshaler_all", - Filename: "gogo.proto", -} - -var E_UnsafeUnmarshalerAll = &proto.ExtensionDesc{ - ExtendedType: (*descriptor.FileOptions)(nil), - ExtensionType: (*bool)(nil), - Field: 63024, - Name: "gogoproto.unsafe_unmarshaler_all", - Tag: "varint,63024,opt,name=unsafe_unmarshaler_all", - Filename: "gogo.proto", -} - -var E_GoprotoExtensionsMapAll = &proto.ExtensionDesc{ - ExtendedType: (*descriptor.FileOptions)(nil), - ExtensionType: (*bool)(nil), - Field: 63025, - Name: "gogoproto.goproto_extensions_map_all", - Tag: "varint,63025,opt,name=goproto_extensions_map_all", - Filename: "gogo.proto", -} - -var E_GoprotoUnrecognizedAll = &proto.ExtensionDesc{ - ExtendedType: (*descriptor.FileOptions)(nil), - ExtensionType: (*bool)(nil), - Field: 63026, - Name: "gogoproto.goproto_unrecognized_all", - Tag: "varint,63026,opt,name=goproto_unrecognized_all", - Filename: "gogo.proto", -} - -var E_GogoprotoImport = &proto.ExtensionDesc{ - ExtendedType: (*descriptor.FileOptions)(nil), - ExtensionType: (*bool)(nil), - Field: 63027, - Name: "gogoproto.gogoproto_import", - Tag: "varint,63027,opt,name=gogoproto_import", - Filename: "gogo.proto", -} - -var E_ProtosizerAll = &proto.ExtensionDesc{ - ExtendedType: (*descriptor.FileOptions)(nil), - ExtensionType: (*bool)(nil), - Field: 63028, - Name: "gogoproto.protosizer_all", - Tag: "varint,63028,opt,name=protosizer_all", - Filename: "gogo.proto", -} - -var E_CompareAll = &proto.ExtensionDesc{ - ExtendedType: (*descriptor.FileOptions)(nil), - ExtensionType: (*bool)(nil), - Field: 63029, - Name: "gogoproto.compare_all", - Tag: "varint,63029,opt,name=compare_all", - Filename: "gogo.proto", -} - -var E_TypedeclAll = &proto.ExtensionDesc{ - ExtendedType: (*descriptor.FileOptions)(nil), - ExtensionType: (*bool)(nil), - Field: 63030, - Name: "gogoproto.typedecl_all", - Tag: "varint,63030,opt,name=typedecl_all", - Filename: "gogo.proto", -} - -var E_EnumdeclAll = &proto.ExtensionDesc{ - ExtendedType: (*descriptor.FileOptions)(nil), - ExtensionType: (*bool)(nil), - Field: 63031, - Name: "gogoproto.enumdecl_all", - Tag: "varint,63031,opt,name=enumdecl_all", - Filename: "gogo.proto", -} - -var E_GoprotoRegistration = &proto.ExtensionDesc{ - ExtendedType: (*descriptor.FileOptions)(nil), - ExtensionType: (*bool)(nil), - Field: 63032, - Name: "gogoproto.goproto_registration", - Tag: "varint,63032,opt,name=goproto_registration", - Filename: "gogo.proto", -} - -var E_MessagenameAll = &proto.ExtensionDesc{ - ExtendedType: (*descriptor.FileOptions)(nil), - ExtensionType: (*bool)(nil), - Field: 63033, - Name: "gogoproto.messagename_all", - Tag: "varint,63033,opt,name=messagename_all", - Filename: "gogo.proto", -} - -var E_GoprotoSizecacheAll = &proto.ExtensionDesc{ - ExtendedType: (*descriptor.FileOptions)(nil), - ExtensionType: (*bool)(nil), - Field: 63034, - Name: "gogoproto.goproto_sizecache_all", - Tag: "varint,63034,opt,name=goproto_sizecache_all", - Filename: "gogo.proto", -} - -var E_GoprotoUnkeyedAll = &proto.ExtensionDesc{ - ExtendedType: (*descriptor.FileOptions)(nil), - ExtensionType: (*bool)(nil), - Field: 63035, - Name: "gogoproto.goproto_unkeyed_all", - Tag: "varint,63035,opt,name=goproto_unkeyed_all", - Filename: "gogo.proto", -} - -var E_GoprotoGetters = &proto.ExtensionDesc{ - ExtendedType: (*descriptor.MessageOptions)(nil), - ExtensionType: (*bool)(nil), - Field: 64001, - Name: "gogoproto.goproto_getters", - Tag: "varint,64001,opt,name=goproto_getters", - Filename: "gogo.proto", -} - -var E_GoprotoStringer = &proto.ExtensionDesc{ - ExtendedType: (*descriptor.MessageOptions)(nil), - ExtensionType: (*bool)(nil), - Field: 64003, - Name: "gogoproto.goproto_stringer", - Tag: "varint,64003,opt,name=goproto_stringer", - Filename: "gogo.proto", -} - -var E_VerboseEqual = &proto.ExtensionDesc{ - ExtendedType: (*descriptor.MessageOptions)(nil), - ExtensionType: (*bool)(nil), - Field: 64004, - Name: "gogoproto.verbose_equal", - Tag: "varint,64004,opt,name=verbose_equal", - Filename: "gogo.proto", -} - -var E_Face = &proto.ExtensionDesc{ - ExtendedType: (*descriptor.MessageOptions)(nil), - ExtensionType: (*bool)(nil), - Field: 64005, - Name: "gogoproto.face", - Tag: "varint,64005,opt,name=face", - Filename: "gogo.proto", -} - -var E_Gostring = &proto.ExtensionDesc{ - ExtendedType: (*descriptor.MessageOptions)(nil), - ExtensionType: (*bool)(nil), - Field: 64006, - Name: "gogoproto.gostring", - Tag: "varint,64006,opt,name=gostring", - Filename: "gogo.proto", -} - -var E_Populate = &proto.ExtensionDesc{ - ExtendedType: (*descriptor.MessageOptions)(nil), - ExtensionType: (*bool)(nil), - Field: 64007, - Name: "gogoproto.populate", - Tag: "varint,64007,opt,name=populate", - Filename: "gogo.proto", -} - -var E_Stringer = &proto.ExtensionDesc{ - ExtendedType: (*descriptor.MessageOptions)(nil), - ExtensionType: (*bool)(nil), - Field: 67008, - Name: "gogoproto.stringer", - Tag: "varint,67008,opt,name=stringer", - Filename: "gogo.proto", -} - -var E_Onlyone = &proto.ExtensionDesc{ - ExtendedType: (*descriptor.MessageOptions)(nil), - ExtensionType: (*bool)(nil), - Field: 64009, - Name: "gogoproto.onlyone", - Tag: "varint,64009,opt,name=onlyone", - Filename: "gogo.proto", -} - -var E_Equal = &proto.ExtensionDesc{ - ExtendedType: (*descriptor.MessageOptions)(nil), - ExtensionType: (*bool)(nil), - Field: 64013, - Name: "gogoproto.equal", - Tag: "varint,64013,opt,name=equal", - Filename: "gogo.proto", -} - -var E_Description = &proto.ExtensionDesc{ - ExtendedType: (*descriptor.MessageOptions)(nil), - ExtensionType: (*bool)(nil), - Field: 64014, - Name: "gogoproto.description", - Tag: "varint,64014,opt,name=description", - Filename: "gogo.proto", -} - -var E_Testgen = &proto.ExtensionDesc{ - ExtendedType: (*descriptor.MessageOptions)(nil), - ExtensionType: (*bool)(nil), - Field: 64015, - Name: "gogoproto.testgen", - Tag: "varint,64015,opt,name=testgen", - Filename: "gogo.proto", -} - -var E_Benchgen = &proto.ExtensionDesc{ - ExtendedType: (*descriptor.MessageOptions)(nil), - ExtensionType: (*bool)(nil), - Field: 64016, - Name: "gogoproto.benchgen", - Tag: "varint,64016,opt,name=benchgen", - Filename: "gogo.proto", -} - -var E_Marshaler = &proto.ExtensionDesc{ - ExtendedType: (*descriptor.MessageOptions)(nil), - ExtensionType: (*bool)(nil), - Field: 64017, - Name: "gogoproto.marshaler", - Tag: "varint,64017,opt,name=marshaler", - Filename: "gogo.proto", -} - -var E_Unmarshaler = &proto.ExtensionDesc{ - ExtendedType: (*descriptor.MessageOptions)(nil), - ExtensionType: (*bool)(nil), - Field: 64018, - Name: "gogoproto.unmarshaler", - Tag: "varint,64018,opt,name=unmarshaler", - Filename: "gogo.proto", -} - -var E_StableMarshaler = &proto.ExtensionDesc{ - ExtendedType: (*descriptor.MessageOptions)(nil), - ExtensionType: (*bool)(nil), - Field: 64019, - Name: "gogoproto.stable_marshaler", - Tag: "varint,64019,opt,name=stable_marshaler", - Filename: "gogo.proto", -} - -var E_Sizer = &proto.ExtensionDesc{ - ExtendedType: (*descriptor.MessageOptions)(nil), - ExtensionType: (*bool)(nil), - Field: 64020, - Name: "gogoproto.sizer", - Tag: "varint,64020,opt,name=sizer", - Filename: "gogo.proto", -} - -var E_UnsafeMarshaler = &proto.ExtensionDesc{ - ExtendedType: (*descriptor.MessageOptions)(nil), - ExtensionType: (*bool)(nil), - Field: 64023, - Name: "gogoproto.unsafe_marshaler", - Tag: "varint,64023,opt,name=unsafe_marshaler", - Filename: "gogo.proto", -} - -var E_UnsafeUnmarshaler = &proto.ExtensionDesc{ - ExtendedType: (*descriptor.MessageOptions)(nil), - ExtensionType: (*bool)(nil), - Field: 64024, - Name: "gogoproto.unsafe_unmarshaler", - Tag: "varint,64024,opt,name=unsafe_unmarshaler", - Filename: "gogo.proto", -} - -var E_GoprotoExtensionsMap = &proto.ExtensionDesc{ - ExtendedType: (*descriptor.MessageOptions)(nil), - ExtensionType: (*bool)(nil), - Field: 64025, - Name: "gogoproto.goproto_extensions_map", - Tag: "varint,64025,opt,name=goproto_extensions_map", - Filename: "gogo.proto", -} - -var E_GoprotoUnrecognized = &proto.ExtensionDesc{ - ExtendedType: (*descriptor.MessageOptions)(nil), - ExtensionType: (*bool)(nil), - Field: 64026, - Name: "gogoproto.goproto_unrecognized", - Tag: "varint,64026,opt,name=goproto_unrecognized", - Filename: "gogo.proto", -} - -var E_Protosizer = &proto.ExtensionDesc{ - ExtendedType: (*descriptor.MessageOptions)(nil), - ExtensionType: (*bool)(nil), - Field: 64028, - Name: "gogoproto.protosizer", - Tag: "varint,64028,opt,name=protosizer", - Filename: "gogo.proto", -} - -var E_Compare = &proto.ExtensionDesc{ - ExtendedType: (*descriptor.MessageOptions)(nil), - ExtensionType: (*bool)(nil), - Field: 64029, - Name: "gogoproto.compare", - Tag: "varint,64029,opt,name=compare", - Filename: "gogo.proto", -} - -var E_Typedecl = &proto.ExtensionDesc{ - ExtendedType: (*descriptor.MessageOptions)(nil), - ExtensionType: (*bool)(nil), - Field: 64030, - Name: "gogoproto.typedecl", - Tag: "varint,64030,opt,name=typedecl", - Filename: "gogo.proto", -} - -var E_Messagename = &proto.ExtensionDesc{ - ExtendedType: (*descriptor.MessageOptions)(nil), - ExtensionType: (*bool)(nil), - Field: 64033, - Name: "gogoproto.messagename", - Tag: "varint,64033,opt,name=messagename", - Filename: "gogo.proto", -} - -var E_GoprotoSizecache = &proto.ExtensionDesc{ - ExtendedType: (*descriptor.MessageOptions)(nil), - ExtensionType: (*bool)(nil), - Field: 64034, - Name: "gogoproto.goproto_sizecache", - Tag: "varint,64034,opt,name=goproto_sizecache", - Filename: "gogo.proto", -} - -var E_GoprotoUnkeyed = &proto.ExtensionDesc{ - ExtendedType: (*descriptor.MessageOptions)(nil), - ExtensionType: (*bool)(nil), - Field: 64035, - Name: "gogoproto.goproto_unkeyed", - Tag: "varint,64035,opt,name=goproto_unkeyed", - Filename: "gogo.proto", -} - -var E_Nullable = &proto.ExtensionDesc{ - ExtendedType: (*descriptor.FieldOptions)(nil), - ExtensionType: (*bool)(nil), - Field: 65001, - Name: "gogoproto.nullable", - Tag: "varint,65001,opt,name=nullable", - Filename: "gogo.proto", -} - -var E_Embed = &proto.ExtensionDesc{ - ExtendedType: (*descriptor.FieldOptions)(nil), - ExtensionType: (*bool)(nil), - Field: 65002, - Name: "gogoproto.embed", - Tag: "varint,65002,opt,name=embed", - Filename: "gogo.proto", -} - -var E_Customtype = &proto.ExtensionDesc{ - ExtendedType: (*descriptor.FieldOptions)(nil), - ExtensionType: (*string)(nil), - Field: 65003, - Name: "gogoproto.customtype", - Tag: "bytes,65003,opt,name=customtype", - Filename: "gogo.proto", -} - -var E_Customname = &proto.ExtensionDesc{ - ExtendedType: (*descriptor.FieldOptions)(nil), - ExtensionType: (*string)(nil), - Field: 65004, - Name: "gogoproto.customname", - Tag: "bytes,65004,opt,name=customname", - Filename: "gogo.proto", -} - -var E_Jsontag = &proto.ExtensionDesc{ - ExtendedType: (*descriptor.FieldOptions)(nil), - ExtensionType: (*string)(nil), - Field: 65005, - Name: "gogoproto.jsontag", - Tag: "bytes,65005,opt,name=jsontag", - Filename: "gogo.proto", -} - -var E_Moretags = &proto.ExtensionDesc{ - ExtendedType: (*descriptor.FieldOptions)(nil), - ExtensionType: (*string)(nil), - Field: 65006, - Name: "gogoproto.moretags", - Tag: "bytes,65006,opt,name=moretags", - Filename: "gogo.proto", -} - -var E_Casttype = &proto.ExtensionDesc{ - ExtendedType: (*descriptor.FieldOptions)(nil), - ExtensionType: (*string)(nil), - Field: 65007, - Name: "gogoproto.casttype", - Tag: "bytes,65007,opt,name=casttype", - Filename: "gogo.proto", -} - -var E_Castkey = &proto.ExtensionDesc{ - ExtendedType: (*descriptor.FieldOptions)(nil), - ExtensionType: (*string)(nil), - Field: 65008, - Name: "gogoproto.castkey", - Tag: "bytes,65008,opt,name=castkey", - Filename: "gogo.proto", -} - -var E_Castvalue = &proto.ExtensionDesc{ - ExtendedType: (*descriptor.FieldOptions)(nil), - ExtensionType: (*string)(nil), - Field: 65009, - Name: "gogoproto.castvalue", - Tag: "bytes,65009,opt,name=castvalue", - Filename: "gogo.proto", -} - -var E_Stdtime = &proto.ExtensionDesc{ - ExtendedType: (*descriptor.FieldOptions)(nil), - ExtensionType: (*bool)(nil), - Field: 65010, - Name: "gogoproto.stdtime", - Tag: "varint,65010,opt,name=stdtime", - Filename: "gogo.proto", -} - -var E_Stdduration = &proto.ExtensionDesc{ - ExtendedType: (*descriptor.FieldOptions)(nil), - ExtensionType: (*bool)(nil), - Field: 65011, - Name: "gogoproto.stdduration", - Tag: "varint,65011,opt,name=stdduration", - Filename: "gogo.proto", -} - -var E_Wktpointer = &proto.ExtensionDesc{ - ExtendedType: (*descriptor.FieldOptions)(nil), - ExtensionType: (*bool)(nil), - Field: 65012, - Name: "gogoproto.wktpointer", - Tag: "varint,65012,opt,name=wktpointer", - Filename: "gogo.proto", -} - -func init() { - proto.RegisterExtension(E_GoprotoEnumPrefix) - proto.RegisterExtension(E_GoprotoEnumStringer) - proto.RegisterExtension(E_EnumStringer) - proto.RegisterExtension(E_EnumCustomname) - proto.RegisterExtension(E_Enumdecl) - proto.RegisterExtension(E_EnumvalueCustomname) - proto.RegisterExtension(E_GoprotoGettersAll) - proto.RegisterExtension(E_GoprotoEnumPrefixAll) - proto.RegisterExtension(E_GoprotoStringerAll) - proto.RegisterExtension(E_VerboseEqualAll) - proto.RegisterExtension(E_FaceAll) - proto.RegisterExtension(E_GostringAll) - proto.RegisterExtension(E_PopulateAll) - proto.RegisterExtension(E_StringerAll) - proto.RegisterExtension(E_OnlyoneAll) - proto.RegisterExtension(E_EqualAll) - proto.RegisterExtension(E_DescriptionAll) - proto.RegisterExtension(E_TestgenAll) - proto.RegisterExtension(E_BenchgenAll) - proto.RegisterExtension(E_MarshalerAll) - proto.RegisterExtension(E_UnmarshalerAll) - proto.RegisterExtension(E_StableMarshalerAll) - proto.RegisterExtension(E_SizerAll) - proto.RegisterExtension(E_GoprotoEnumStringerAll) - proto.RegisterExtension(E_EnumStringerAll) - proto.RegisterExtension(E_UnsafeMarshalerAll) - proto.RegisterExtension(E_UnsafeUnmarshalerAll) - proto.RegisterExtension(E_GoprotoExtensionsMapAll) - proto.RegisterExtension(E_GoprotoUnrecognizedAll) - proto.RegisterExtension(E_GogoprotoImport) - proto.RegisterExtension(E_ProtosizerAll) - proto.RegisterExtension(E_CompareAll) - proto.RegisterExtension(E_TypedeclAll) - proto.RegisterExtension(E_EnumdeclAll) - proto.RegisterExtension(E_GoprotoRegistration) - proto.RegisterExtension(E_MessagenameAll) - proto.RegisterExtension(E_GoprotoSizecacheAll) - proto.RegisterExtension(E_GoprotoUnkeyedAll) - proto.RegisterExtension(E_GoprotoGetters) - proto.RegisterExtension(E_GoprotoStringer) - proto.RegisterExtension(E_VerboseEqual) - proto.RegisterExtension(E_Face) - proto.RegisterExtension(E_Gostring) - proto.RegisterExtension(E_Populate) - proto.RegisterExtension(E_Stringer) - proto.RegisterExtension(E_Onlyone) - proto.RegisterExtension(E_Equal) - proto.RegisterExtension(E_Description) - proto.RegisterExtension(E_Testgen) - proto.RegisterExtension(E_Benchgen) - proto.RegisterExtension(E_Marshaler) - proto.RegisterExtension(E_Unmarshaler) - proto.RegisterExtension(E_StableMarshaler) - proto.RegisterExtension(E_Sizer) - proto.RegisterExtension(E_UnsafeMarshaler) - proto.RegisterExtension(E_UnsafeUnmarshaler) - proto.RegisterExtension(E_GoprotoExtensionsMap) - proto.RegisterExtension(E_GoprotoUnrecognized) - proto.RegisterExtension(E_Protosizer) - proto.RegisterExtension(E_Compare) - proto.RegisterExtension(E_Typedecl) - proto.RegisterExtension(E_Messagename) - proto.RegisterExtension(E_GoprotoSizecache) - proto.RegisterExtension(E_GoprotoUnkeyed) - proto.RegisterExtension(E_Nullable) - proto.RegisterExtension(E_Embed) - proto.RegisterExtension(E_Customtype) - proto.RegisterExtension(E_Customname) - proto.RegisterExtension(E_Jsontag) - proto.RegisterExtension(E_Moretags) - proto.RegisterExtension(E_Casttype) - proto.RegisterExtension(E_Castkey) - proto.RegisterExtension(E_Castvalue) - proto.RegisterExtension(E_Stdtime) - proto.RegisterExtension(E_Stdduration) - proto.RegisterExtension(E_Wktpointer) -} - -func init() { proto.RegisterFile("gogo.proto", fileDescriptor_592445b5231bc2b9) } - -var fileDescriptor_592445b5231bc2b9 = []byte{ - // 1328 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x98, 0x49, 0x6f, 0x1c, 0x45, - 0x14, 0x80, 0x85, 0x48, 0x64, 0x4f, 0x79, 0x8b, 0xc7, 0xc6, 0x84, 0x08, 0x44, 0xe0, 0xc4, 0xc9, - 0x3e, 0x45, 0x28, 0x65, 0x45, 0x96, 0x63, 0x39, 0x56, 0x10, 0x0e, 0xc6, 0x89, 0xc3, 0x76, 0x18, - 0xf5, 0xf4, 0x94, 0xdb, 0x8d, 0xbb, 0xbb, 0x9a, 0xee, 0xea, 0x10, 0xe7, 0x86, 0xc2, 0x22, 0x84, - 0xd8, 0x91, 0x20, 0x21, 0x09, 0x04, 0xc4, 0xbe, 0x86, 0x7d, 0xb9, 0x70, 0x61, 0xb9, 0xf2, 0x1f, - 0xb8, 0x00, 0x66, 0xf7, 0xcd, 0x17, 0xf4, 0xba, 0xdf, 0xeb, 0xa9, 0x69, 0x8f, 0x54, 0x35, 0xb7, - 0xf6, 0xb8, 0xbe, 0x6f, 0xaa, 0xdf, 0xeb, 0x7a, 0xef, 0x4d, 0x33, 0xe6, 0x49, 0x4f, 0x4e, 0xc6, - 0x89, 0x54, 0xb2, 0x5e, 0x83, 0xeb, 0xfc, 0x72, 0xdf, 0x7e, 0x4f, 0x4a, 0x2f, 0x10, 0x53, 0xf9, - 0x5f, 0xcd, 0x6c, 0x75, 0xaa, 0x25, 0x52, 0x37, 0xf1, 0x63, 0x25, 0x93, 0x62, 0x31, 0x3f, 0xc6, - 0xc6, 0x70, 0x71, 0x43, 0x44, 0x59, 0xd8, 0x88, 0x13, 0xb1, 0xea, 0x9f, 0xae, 0x5f, 0x3f, 0x59, - 0x90, 0x93, 0x44, 0x4e, 0xce, 0x47, 0x59, 0x78, 0x47, 0xac, 0x7c, 0x19, 0xa5, 0x7b, 0xaf, 0xfc, - 0x72, 0xf5, 0xfe, 0xab, 0x6e, 0xe9, 0x5f, 0x1e, 0x45, 0x14, 0xfe, 0xb7, 0x94, 0x83, 0x7c, 0x99, - 0x5d, 0xd3, 0xe1, 0x4b, 0x55, 0xe2, 0x47, 0x9e, 0x48, 0x0c, 0xc6, 0xef, 0xd1, 0x38, 0xa6, 0x19, - 0x8f, 0x23, 0xca, 0xe7, 0xd8, 0x50, 0x2f, 0xae, 0x1f, 0xd0, 0x35, 0x28, 0x74, 0xc9, 0x02, 0x1b, - 0xc9, 0x25, 0x6e, 0x96, 0x2a, 0x19, 0x46, 0x4e, 0x28, 0x0c, 0x9a, 0x1f, 0x73, 0x4d, 0x6d, 0x79, - 0x18, 0xb0, 0xb9, 0x92, 0xe2, 0x9c, 0xf5, 0xc3, 0x27, 0x2d, 0xe1, 0x06, 0x06, 0xc3, 0x4f, 0xb8, - 0x91, 0x72, 0x3d, 0x3f, 0xc9, 0xc6, 0xe1, 0xfa, 0x94, 0x13, 0x64, 0x42, 0xdf, 0xc9, 0x4d, 0x5d, - 0x3d, 0x27, 0x61, 0x19, 0xc9, 0x7e, 0x3e, 0xbb, 0x2b, 0xdf, 0xce, 0x58, 0x29, 0xd0, 0xf6, 0xa4, - 0x65, 0xd1, 0x13, 0x4a, 0x89, 0x24, 0x6d, 0x38, 0x41, 0xb7, 0xed, 0x1d, 0xf1, 0x83, 0xd2, 0x78, - 0x6e, 0xb3, 0x33, 0x8b, 0x0b, 0x05, 0x39, 0x1b, 0x04, 0x7c, 0x85, 0x5d, 0xdb, 0xe5, 0xa9, 0xb0, - 0x70, 0x9e, 0x47, 0xe7, 0xf8, 0x8e, 0x27, 0x03, 0xb4, 0x4b, 0x8c, 0x3e, 0x2f, 0x73, 0x69, 0xe1, - 0x7c, 0x19, 0x9d, 0x75, 0x64, 0x29, 0xa5, 0x60, 0xbc, 0x8d, 0x8d, 0x9e, 0x12, 0x49, 0x53, 0xa6, - 0xa2, 0x21, 0x1e, 0xc8, 0x9c, 0xc0, 0x42, 0x77, 0x01, 0x75, 0x23, 0x08, 0xce, 0x03, 0x07, 0xae, - 0x83, 0xac, 0x7f, 0xd5, 0x71, 0x85, 0x85, 0xe2, 0x22, 0x2a, 0xfa, 0x60, 0x3d, 0xa0, 0xb3, 0x6c, - 0xd0, 0x93, 0xc5, 0x2d, 0x59, 0xe0, 0x97, 0x10, 0x1f, 0x20, 0x06, 0x15, 0xb1, 0x8c, 0xb3, 0xc0, - 0x51, 0x36, 0x3b, 0x78, 0x85, 0x14, 0xc4, 0xa0, 0xa2, 0x87, 0xb0, 0xbe, 0x4a, 0x8a, 0x54, 0x8b, - 0xe7, 0x0c, 0x1b, 0x90, 0x51, 0xb0, 0x21, 0x23, 0x9b, 0x4d, 0x5c, 0x46, 0x03, 0x43, 0x04, 0x04, - 0xd3, 0xac, 0x66, 0x9b, 0x88, 0x37, 0x36, 0xe9, 0x78, 0x50, 0x06, 0x16, 0xd8, 0x08, 0x15, 0x28, - 0x5f, 0x46, 0x16, 0x8a, 0x37, 0x51, 0x31, 0xac, 0x61, 0x78, 0x1b, 0x4a, 0xa4, 0xca, 0x13, 0x36, - 0x92, 0xb7, 0xe8, 0x36, 0x10, 0xc1, 0x50, 0x36, 0x45, 0xe4, 0xae, 0xd9, 0x19, 0xde, 0xa6, 0x50, - 0x12, 0x03, 0x8a, 0x39, 0x36, 0x14, 0x3a, 0x49, 0xba, 0xe6, 0x04, 0x56, 0xe9, 0x78, 0x07, 0x1d, - 0x83, 0x25, 0x84, 0x11, 0xc9, 0xa2, 0x5e, 0x34, 0xef, 0x52, 0x44, 0x34, 0x0c, 0x8f, 0x5e, 0xaa, - 0x9c, 0x66, 0x20, 0x1a, 0xbd, 0xd8, 0xde, 0xa3, 0xa3, 0x57, 0xb0, 0x8b, 0xba, 0x71, 0x9a, 0xd5, - 0x52, 0xff, 0x8c, 0x95, 0xe6, 0x7d, 0xca, 0x74, 0x0e, 0x00, 0x7c, 0x0f, 0xbb, 0xae, 0x6b, 0x9b, - 0xb0, 0x90, 0x7d, 0x80, 0xb2, 0x89, 0x2e, 0xad, 0x02, 0x4b, 0x42, 0xaf, 0xca, 0x0f, 0xa9, 0x24, - 0x88, 0x8a, 0x6b, 0x89, 0x8d, 0x67, 0x51, 0xea, 0xac, 0xf6, 0x16, 0xb5, 0x8f, 0x28, 0x6a, 0x05, - 0xdb, 0x11, 0xb5, 0x13, 0x6c, 0x02, 0x8d, 0xbd, 0xe5, 0xf5, 0x63, 0x2a, 0xac, 0x05, 0xbd, 0xd2, - 0x99, 0xdd, 0xfb, 0xd8, 0xbe, 0x32, 0x9c, 0xa7, 0x95, 0x88, 0x52, 0x60, 0x1a, 0xa1, 0x13, 0x5b, - 0x98, 0xaf, 0xa0, 0x99, 0x2a, 0xfe, 0x7c, 0x29, 0x58, 0x74, 0x62, 0x90, 0xdf, 0xcd, 0xf6, 0x92, - 0x3c, 0x8b, 0x12, 0xe1, 0x4a, 0x2f, 0xf2, 0xcf, 0x88, 0x96, 0x85, 0xfa, 0x93, 0x4a, 0xaa, 0x56, - 0x34, 0x1c, 0xcc, 0x47, 0xd9, 0x9e, 0x72, 0x56, 0x69, 0xf8, 0x61, 0x2c, 0x13, 0x65, 0x30, 0x7e, - 0x4a, 0x99, 0x2a, 0xb9, 0xa3, 0x39, 0xc6, 0xe7, 0xd9, 0x70, 0xfe, 0xa7, 0xed, 0x23, 0xf9, 0x19, - 0x8a, 0x86, 0xda, 0x14, 0x16, 0x0e, 0x57, 0x86, 0xb1, 0x93, 0xd8, 0xd4, 0xbf, 0xcf, 0xa9, 0x70, - 0x20, 0x82, 0x85, 0x43, 0x6d, 0xc4, 0x02, 0xba, 0xbd, 0x85, 0xe1, 0x0b, 0x2a, 0x1c, 0xc4, 0xa0, - 0x82, 0x06, 0x06, 0x0b, 0xc5, 0x97, 0xa4, 0x20, 0x06, 0x14, 0x77, 0xb6, 0x1b, 0x6d, 0x22, 0x3c, - 0x3f, 0x55, 0x89, 0x03, 0xab, 0x0d, 0xaa, 0xaf, 0x36, 0x3b, 0x87, 0xb0, 0x65, 0x0d, 0x85, 0x4a, - 0x14, 0x8a, 0x34, 0x75, 0x3c, 0x01, 0x13, 0x87, 0xc5, 0xc6, 0xbe, 0xa6, 0x4a, 0xa4, 0x61, 0xb0, - 0x37, 0x6d, 0x42, 0x84, 0xb0, 0xbb, 0x8e, 0xbb, 0x66, 0xa3, 0xfb, 0xa6, 0xb2, 0xb9, 0xe3, 0xc4, - 0x82, 0x53, 0x9b, 0x7f, 0xb2, 0x68, 0x5d, 0x6c, 0x58, 0x3d, 0x9d, 0xdf, 0x56, 0xe6, 0x9f, 0x95, - 0x82, 0x2c, 0x6a, 0xc8, 0x48, 0x65, 0x9e, 0xaa, 0xdf, 0xb8, 0xc3, 0xb5, 0x58, 0xdc, 0x17, 0xe9, - 0x1e, 0xda, 0xc2, 0xfb, 0xed, 0x1c, 0xa7, 0xf8, 0xed, 0xf0, 0x90, 0x77, 0x0e, 0x3d, 0x66, 0xd9, - 0xd9, 0xad, 0xf2, 0x39, 0xef, 0x98, 0x79, 0xf8, 0x11, 0x36, 0xd4, 0x31, 0xf0, 0x98, 0x55, 0x0f, - 0xa3, 0x6a, 0x50, 0x9f, 0x77, 0xf8, 0x01, 0xb6, 0x0b, 0x86, 0x17, 0x33, 0xfe, 0x08, 0xe2, 0xf9, - 0x72, 0x7e, 0x88, 0xf5, 0xd3, 0xd0, 0x62, 0x46, 0x1f, 0x45, 0xb4, 0x44, 0x00, 0xa7, 0x81, 0xc5, - 0x8c, 0x3f, 0x46, 0x38, 0x21, 0x80, 0xdb, 0x87, 0xf0, 0xbb, 0x27, 0x76, 0x61, 0xd3, 0xa1, 0xd8, - 0x4d, 0xb3, 0x3e, 0x9c, 0x54, 0xcc, 0xf4, 0xe3, 0xf8, 0xe5, 0x44, 0xf0, 0x5b, 0xd9, 0x6e, 0xcb, - 0x80, 0x3f, 0x89, 0x68, 0xb1, 0x9e, 0xcf, 0xb1, 0x01, 0x6d, 0x3a, 0x31, 0xe3, 0x4f, 0x21, 0xae, - 0x53, 0xb0, 0x75, 0x9c, 0x4e, 0xcc, 0x82, 0xa7, 0x69, 0xeb, 0x48, 0x40, 0xd8, 0x68, 0x30, 0x31, - 0xd3, 0xcf, 0x50, 0xd4, 0x09, 0xe1, 0x33, 0xac, 0x56, 0x36, 0x1b, 0x33, 0xff, 0x2c, 0xf2, 0x6d, - 0x06, 0x22, 0xa0, 0x35, 0x3b, 0xb3, 0xe2, 0x39, 0x8a, 0x80, 0x46, 0xc1, 0x31, 0xaa, 0x0e, 0x30, - 0x66, 0xd3, 0xf3, 0x74, 0x8c, 0x2a, 0xf3, 0x0b, 0x64, 0x33, 0xaf, 0xf9, 0x66, 0xc5, 0x0b, 0x94, - 0xcd, 0x7c, 0x3d, 0x6c, 0xa3, 0x3a, 0x11, 0x98, 0x1d, 0x2f, 0xd2, 0x36, 0x2a, 0x03, 0x01, 0x5f, - 0x62, 0xf5, 0x9d, 0xd3, 0x80, 0xd9, 0xf7, 0x12, 0xfa, 0x46, 0x77, 0x0c, 0x03, 0xfc, 0x2e, 0x36, - 0xd1, 0x7d, 0x12, 0x30, 0x5b, 0xcf, 0x6d, 0x55, 0x7e, 0xbb, 0xe9, 0x83, 0x00, 0x3f, 0xd1, 0x6e, - 0x29, 0xfa, 0x14, 0x60, 0xd6, 0x9e, 0xdf, 0xea, 0x2c, 0xdc, 0xfa, 0x10, 0xc0, 0x67, 0x19, 0x6b, - 0x37, 0x60, 0xb3, 0xeb, 0x02, 0xba, 0x34, 0x08, 0x8e, 0x06, 0xf6, 0x5f, 0x33, 0x7f, 0x91, 0x8e, - 0x06, 0x12, 0x70, 0x34, 0xa8, 0xf5, 0x9a, 0xe9, 0x4b, 0x74, 0x34, 0x08, 0x81, 0x27, 0x5b, 0xeb, - 0x6e, 0x66, 0xc3, 0x65, 0x7a, 0xb2, 0x35, 0x8a, 0x1f, 0x63, 0xa3, 0x3b, 0x1a, 0xa2, 0x59, 0xf5, - 0x1a, 0xaa, 0xf6, 0x54, 0xfb, 0xa1, 0xde, 0xbc, 0xb0, 0x19, 0x9a, 0x6d, 0xaf, 0x57, 0x9a, 0x17, - 0xf6, 0x42, 0x3e, 0xcd, 0xfa, 0xa3, 0x2c, 0x08, 0xe0, 0xf0, 0xd4, 0x6f, 0xe8, 0xd2, 0x4d, 0x45, - 0xd0, 0x22, 0xc5, 0xaf, 0xdb, 0x18, 0x1d, 0x02, 0xf8, 0x01, 0xb6, 0x5b, 0x84, 0x4d, 0xd1, 0x32, - 0x91, 0xbf, 0x6d, 0x53, 0xc1, 0x84, 0xd5, 0x7c, 0x86, 0xb1, 0xe2, 0xd5, 0x08, 0x84, 0xd9, 0xc4, - 0xfe, 0xbe, 0x5d, 0xbc, 0xa5, 0xd1, 0x90, 0xb6, 0x20, 0x4f, 0x8a, 0x41, 0xb0, 0xd9, 0x29, 0xc8, - 0x33, 0x72, 0x90, 0xf5, 0xdd, 0x9f, 0xca, 0x48, 0x39, 0x9e, 0x89, 0xfe, 0x03, 0x69, 0x5a, 0x0f, - 0x01, 0x0b, 0x65, 0x22, 0x94, 0xe3, 0xa5, 0x26, 0xf6, 0x4f, 0x64, 0x4b, 0x00, 0x60, 0xd7, 0x49, - 0x95, 0xcd, 0x7d, 0xff, 0x45, 0x30, 0x01, 0xb0, 0x69, 0xb8, 0x5e, 0x17, 0x1b, 0x26, 0xf6, 0x6f, - 0xda, 0x34, 0xae, 0xe7, 0x87, 0x58, 0x0d, 0x2e, 0xf3, 0xb7, 0x4a, 0x26, 0xf8, 0x1f, 0x84, 0xdb, - 0x04, 0x7c, 0x73, 0xaa, 0x5a, 0xca, 0x37, 0x07, 0xfb, 0x5f, 0xcc, 0x34, 0xad, 0xe7, 0xb3, 0x6c, - 0x20, 0x55, 0xad, 0x56, 0x86, 0xf3, 0xa9, 0x01, 0xff, 0x6f, 0xbb, 0x7c, 0x65, 0x51, 0x32, 0x90, - 0xed, 0x07, 0xd7, 0x55, 0x2c, 0xfd, 0x48, 0x89, 0xc4, 0x64, 0xd8, 0x42, 0x83, 0x86, 0x1c, 0x9e, - 0x67, 0x63, 0xae, 0x0c, 0xab, 0xdc, 0x61, 0xb6, 0x20, 0x17, 0xe4, 0x52, 0x5e, 0x67, 0xee, 0xbd, - 0xd9, 0xf3, 0xd5, 0x5a, 0xd6, 0x9c, 0x74, 0x65, 0x38, 0x05, 0xbf, 0x3c, 0xda, 0x2f, 0x54, 0xcb, - 0xdf, 0x21, 0xff, 0x07, 0x00, 0x00, 0xff, 0xff, 0x9c, 0xaf, 0x70, 0x4e, 0x83, 0x15, 0x00, 0x00, -} diff --git a/vendor/github.com/gogo/protobuf/gogoproto/gogo.pb.golden b/vendor/github.com/gogo/protobuf/gogoproto/gogo.pb.golden deleted file mode 100644 index f6502e4b..00000000 --- a/vendor/github.com/gogo/protobuf/gogoproto/gogo.pb.golden +++ /dev/null @@ -1,45 +0,0 @@ -// Code generated by protoc-gen-go. -// source: gogo.proto -// DO NOT EDIT! - -package gogoproto - -import proto "github.com/gogo/protobuf/proto" -import json "encoding/json" -import math "math" -import google_protobuf "github.com/gogo/protobuf/protoc-gen-gogo/descriptor" - -// Reference proto, json, and math imports to suppress error if they are not otherwise used. -var _ = proto.Marshal -var _ = &json.SyntaxError{} -var _ = math.Inf - -var E_Nullable = &proto.ExtensionDesc{ - ExtendedType: (*google_protobuf.FieldOptions)(nil), - ExtensionType: (*bool)(nil), - Field: 51235, - Name: "gogoproto.nullable", - Tag: "varint,51235,opt,name=nullable", -} - -var E_Embed = &proto.ExtensionDesc{ - ExtendedType: (*google_protobuf.FieldOptions)(nil), - ExtensionType: (*bool)(nil), - Field: 51236, - Name: "gogoproto.embed", - Tag: "varint,51236,opt,name=embed", -} - -var E_Customtype = &proto.ExtensionDesc{ - ExtendedType: (*google_protobuf.FieldOptions)(nil), - ExtensionType: (*string)(nil), - Field: 51237, - Name: "gogoproto.customtype", - Tag: "bytes,51237,opt,name=customtype", -} - -func init() { - proto.RegisterExtension(E_Nullable) - proto.RegisterExtension(E_Embed) - proto.RegisterExtension(E_Customtype) -} diff --git a/vendor/github.com/gogo/protobuf/gogoproto/gogo.proto b/vendor/github.com/gogo/protobuf/gogoproto/gogo.proto deleted file mode 100644 index b80c8565..00000000 --- a/vendor/github.com/gogo/protobuf/gogoproto/gogo.proto +++ /dev/null @@ -1,144 +0,0 @@ -// Protocol Buffers for Go with Gadgets -// -// Copyright (c) 2013, The GoGo Authors. All rights reserved. -// http://github.com/gogo/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -syntax = "proto2"; -package gogoproto; - -import "google/protobuf/descriptor.proto"; - -option java_package = "com.google.protobuf"; -option java_outer_classname = "GoGoProtos"; -option go_package = "github.com/gogo/protobuf/gogoproto"; - -extend google.protobuf.EnumOptions { - optional bool goproto_enum_prefix = 62001; - optional bool goproto_enum_stringer = 62021; - optional bool enum_stringer = 62022; - optional string enum_customname = 62023; - optional bool enumdecl = 62024; -} - -extend google.protobuf.EnumValueOptions { - optional string enumvalue_customname = 66001; -} - -extend google.protobuf.FileOptions { - optional bool goproto_getters_all = 63001; - optional bool goproto_enum_prefix_all = 63002; - optional bool goproto_stringer_all = 63003; - optional bool verbose_equal_all = 63004; - optional bool face_all = 63005; - optional bool gostring_all = 63006; - optional bool populate_all = 63007; - optional bool stringer_all = 63008; - optional bool onlyone_all = 63009; - - optional bool equal_all = 63013; - optional bool description_all = 63014; - optional bool testgen_all = 63015; - optional bool benchgen_all = 63016; - optional bool marshaler_all = 63017; - optional bool unmarshaler_all = 63018; - optional bool stable_marshaler_all = 63019; - - optional bool sizer_all = 63020; - - optional bool goproto_enum_stringer_all = 63021; - optional bool enum_stringer_all = 63022; - - optional bool unsafe_marshaler_all = 63023; - optional bool unsafe_unmarshaler_all = 63024; - - optional bool goproto_extensions_map_all = 63025; - optional bool goproto_unrecognized_all = 63026; - optional bool gogoproto_import = 63027; - optional bool protosizer_all = 63028; - optional bool compare_all = 63029; - optional bool typedecl_all = 63030; - optional bool enumdecl_all = 63031; - - optional bool goproto_registration = 63032; - optional bool messagename_all = 63033; - - optional bool goproto_sizecache_all = 63034; - optional bool goproto_unkeyed_all = 63035; -} - -extend google.protobuf.MessageOptions { - optional bool goproto_getters = 64001; - optional bool goproto_stringer = 64003; - optional bool verbose_equal = 64004; - optional bool face = 64005; - optional bool gostring = 64006; - optional bool populate = 64007; - optional bool stringer = 67008; - optional bool onlyone = 64009; - - optional bool equal = 64013; - optional bool description = 64014; - optional bool testgen = 64015; - optional bool benchgen = 64016; - optional bool marshaler = 64017; - optional bool unmarshaler = 64018; - optional bool stable_marshaler = 64019; - - optional bool sizer = 64020; - - optional bool unsafe_marshaler = 64023; - optional bool unsafe_unmarshaler = 64024; - - optional bool goproto_extensions_map = 64025; - optional bool goproto_unrecognized = 64026; - - optional bool protosizer = 64028; - optional bool compare = 64029; - - optional bool typedecl = 64030; - - optional bool messagename = 64033; - - optional bool goproto_sizecache = 64034; - optional bool goproto_unkeyed = 64035; -} - -extend google.protobuf.FieldOptions { - optional bool nullable = 65001; - optional bool embed = 65002; - optional string customtype = 65003; - optional string customname = 65004; - optional string jsontag = 65005; - optional string moretags = 65006; - optional string casttype = 65007; - optional string castkey = 65008; - optional string castvalue = 65009; - - optional bool stdtime = 65010; - optional bool stdduration = 65011; - optional bool wktpointer = 65012; - -} diff --git a/vendor/github.com/gogo/protobuf/gogoproto/helper.go b/vendor/github.com/gogo/protobuf/gogoproto/helper.go deleted file mode 100644 index 390d4e4b..00000000 --- a/vendor/github.com/gogo/protobuf/gogoproto/helper.go +++ /dev/null @@ -1,415 +0,0 @@ -// Protocol Buffers for Go with Gadgets -// -// Copyright (c) 2013, The GoGo Authors. All rights reserved. -// http://github.com/gogo/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package gogoproto - -import google_protobuf "github.com/gogo/protobuf/protoc-gen-gogo/descriptor" -import proto "github.com/gogo/protobuf/proto" - -func IsEmbed(field *google_protobuf.FieldDescriptorProto) bool { - return proto.GetBoolExtension(field.Options, E_Embed, false) -} - -func IsNullable(field *google_protobuf.FieldDescriptorProto) bool { - return proto.GetBoolExtension(field.Options, E_Nullable, true) -} - -func IsStdTime(field *google_protobuf.FieldDescriptorProto) bool { - return proto.GetBoolExtension(field.Options, E_Stdtime, false) -} - -func IsStdDuration(field *google_protobuf.FieldDescriptorProto) bool { - return proto.GetBoolExtension(field.Options, E_Stdduration, false) -} - -func IsStdDouble(field *google_protobuf.FieldDescriptorProto) bool { - return proto.GetBoolExtension(field.Options, E_Wktpointer, false) && *field.TypeName == ".google.protobuf.DoubleValue" -} - -func IsStdFloat(field *google_protobuf.FieldDescriptorProto) bool { - return proto.GetBoolExtension(field.Options, E_Wktpointer, false) && *field.TypeName == ".google.protobuf.FloatValue" -} - -func IsStdInt64(field *google_protobuf.FieldDescriptorProto) bool { - return proto.GetBoolExtension(field.Options, E_Wktpointer, false) && *field.TypeName == ".google.protobuf.Int64Value" -} - -func IsStdUInt64(field *google_protobuf.FieldDescriptorProto) bool { - return proto.GetBoolExtension(field.Options, E_Wktpointer, false) && *field.TypeName == ".google.protobuf.UInt64Value" -} - -func IsStdInt32(field *google_protobuf.FieldDescriptorProto) bool { - return proto.GetBoolExtension(field.Options, E_Wktpointer, false) && *field.TypeName == ".google.protobuf.Int32Value" -} - -func IsStdUInt32(field *google_protobuf.FieldDescriptorProto) bool { - return proto.GetBoolExtension(field.Options, E_Wktpointer, false) && *field.TypeName == ".google.protobuf.UInt32Value" -} - -func IsStdBool(field *google_protobuf.FieldDescriptorProto) bool { - return proto.GetBoolExtension(field.Options, E_Wktpointer, false) && *field.TypeName == ".google.protobuf.BoolValue" -} - -func IsStdString(field *google_protobuf.FieldDescriptorProto) bool { - return proto.GetBoolExtension(field.Options, E_Wktpointer, false) && *field.TypeName == ".google.protobuf.StringValue" -} - -func IsStdBytes(field *google_protobuf.FieldDescriptorProto) bool { - return proto.GetBoolExtension(field.Options, E_Wktpointer, false) && *field.TypeName == ".google.protobuf.BytesValue" -} - -func IsStdType(field *google_protobuf.FieldDescriptorProto) bool { - return (IsStdTime(field) || IsStdDuration(field) || - IsStdDouble(field) || IsStdFloat(field) || - IsStdInt64(field) || IsStdUInt64(field) || - IsStdInt32(field) || IsStdUInt32(field) || - IsStdBool(field) || - IsStdString(field) || IsStdBytes(field)) -} - -func IsWktPtr(field *google_protobuf.FieldDescriptorProto) bool { - return proto.GetBoolExtension(field.Options, E_Wktpointer, false) -} - -func NeedsNilCheck(proto3 bool, field *google_protobuf.FieldDescriptorProto) bool { - nullable := IsNullable(field) - if field.IsMessage() || IsCustomType(field) { - return nullable - } - if proto3 { - return false - } - return nullable || *field.Type == google_protobuf.FieldDescriptorProto_TYPE_BYTES -} - -func IsCustomType(field *google_protobuf.FieldDescriptorProto) bool { - typ := GetCustomType(field) - if len(typ) > 0 { - return true - } - return false -} - -func IsCastType(field *google_protobuf.FieldDescriptorProto) bool { - typ := GetCastType(field) - if len(typ) > 0 { - return true - } - return false -} - -func IsCastKey(field *google_protobuf.FieldDescriptorProto) bool { - typ := GetCastKey(field) - if len(typ) > 0 { - return true - } - return false -} - -func IsCastValue(field *google_protobuf.FieldDescriptorProto) bool { - typ := GetCastValue(field) - if len(typ) > 0 { - return true - } - return false -} - -func HasEnumDecl(file *google_protobuf.FileDescriptorProto, enum *google_protobuf.EnumDescriptorProto) bool { - return proto.GetBoolExtension(enum.Options, E_Enumdecl, proto.GetBoolExtension(file.Options, E_EnumdeclAll, true)) -} - -func HasTypeDecl(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { - return proto.GetBoolExtension(message.Options, E_Typedecl, proto.GetBoolExtension(file.Options, E_TypedeclAll, true)) -} - -func GetCustomType(field *google_protobuf.FieldDescriptorProto) string { - if field == nil { - return "" - } - if field.Options != nil { - v, err := proto.GetExtension(field.Options, E_Customtype) - if err == nil && v.(*string) != nil { - return *(v.(*string)) - } - } - return "" -} - -func GetCastType(field *google_protobuf.FieldDescriptorProto) string { - if field == nil { - return "" - } - if field.Options != nil { - v, err := proto.GetExtension(field.Options, E_Casttype) - if err == nil && v.(*string) != nil { - return *(v.(*string)) - } - } - return "" -} - -func GetCastKey(field *google_protobuf.FieldDescriptorProto) string { - if field == nil { - return "" - } - if field.Options != nil { - v, err := proto.GetExtension(field.Options, E_Castkey) - if err == nil && v.(*string) != nil { - return *(v.(*string)) - } - } - return "" -} - -func GetCastValue(field *google_protobuf.FieldDescriptorProto) string { - if field == nil { - return "" - } - if field.Options != nil { - v, err := proto.GetExtension(field.Options, E_Castvalue) - if err == nil && v.(*string) != nil { - return *(v.(*string)) - } - } - return "" -} - -func IsCustomName(field *google_protobuf.FieldDescriptorProto) bool { - name := GetCustomName(field) - if len(name) > 0 { - return true - } - return false -} - -func IsEnumCustomName(field *google_protobuf.EnumDescriptorProto) bool { - name := GetEnumCustomName(field) - if len(name) > 0 { - return true - } - return false -} - -func IsEnumValueCustomName(field *google_protobuf.EnumValueDescriptorProto) bool { - name := GetEnumValueCustomName(field) - if len(name) > 0 { - return true - } - return false -} - -func GetCustomName(field *google_protobuf.FieldDescriptorProto) string { - if field == nil { - return "" - } - if field.Options != nil { - v, err := proto.GetExtension(field.Options, E_Customname) - if err == nil && v.(*string) != nil { - return *(v.(*string)) - } - } - return "" -} - -func GetEnumCustomName(field *google_protobuf.EnumDescriptorProto) string { - if field == nil { - return "" - } - if field.Options != nil { - v, err := proto.GetExtension(field.Options, E_EnumCustomname) - if err == nil && v.(*string) != nil { - return *(v.(*string)) - } - } - return "" -} - -func GetEnumValueCustomName(field *google_protobuf.EnumValueDescriptorProto) string { - if field == nil { - return "" - } - if field.Options != nil { - v, err := proto.GetExtension(field.Options, E_EnumvalueCustomname) - if err == nil && v.(*string) != nil { - return *(v.(*string)) - } - } - return "" -} - -func GetJsonTag(field *google_protobuf.FieldDescriptorProto) *string { - if field == nil { - return nil - } - if field.Options != nil { - v, err := proto.GetExtension(field.Options, E_Jsontag) - if err == nil && v.(*string) != nil { - return (v.(*string)) - } - } - return nil -} - -func GetMoreTags(field *google_protobuf.FieldDescriptorProto) *string { - if field == nil { - return nil - } - if field.Options != nil { - v, err := proto.GetExtension(field.Options, E_Moretags) - if err == nil && v.(*string) != nil { - return (v.(*string)) - } - } - return nil -} - -type EnableFunc func(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool - -func EnabledGoEnumPrefix(file *google_protobuf.FileDescriptorProto, enum *google_protobuf.EnumDescriptorProto) bool { - return proto.GetBoolExtension(enum.Options, E_GoprotoEnumPrefix, proto.GetBoolExtension(file.Options, E_GoprotoEnumPrefixAll, true)) -} - -func EnabledGoStringer(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { - return proto.GetBoolExtension(message.Options, E_GoprotoStringer, proto.GetBoolExtension(file.Options, E_GoprotoStringerAll, true)) -} - -func HasGoGetters(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { - return proto.GetBoolExtension(message.Options, E_GoprotoGetters, proto.GetBoolExtension(file.Options, E_GoprotoGettersAll, true)) -} - -func IsUnion(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { - return proto.GetBoolExtension(message.Options, E_Onlyone, proto.GetBoolExtension(file.Options, E_OnlyoneAll, false)) -} - -func HasGoString(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { - return proto.GetBoolExtension(message.Options, E_Gostring, proto.GetBoolExtension(file.Options, E_GostringAll, false)) -} - -func HasEqual(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { - return proto.GetBoolExtension(message.Options, E_Equal, proto.GetBoolExtension(file.Options, E_EqualAll, false)) -} - -func HasVerboseEqual(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { - return proto.GetBoolExtension(message.Options, E_VerboseEqual, proto.GetBoolExtension(file.Options, E_VerboseEqualAll, false)) -} - -func IsStringer(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { - return proto.GetBoolExtension(message.Options, E_Stringer, proto.GetBoolExtension(file.Options, E_StringerAll, false)) -} - -func IsFace(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { - return proto.GetBoolExtension(message.Options, E_Face, proto.GetBoolExtension(file.Options, E_FaceAll, false)) -} - -func HasDescription(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { - return proto.GetBoolExtension(message.Options, E_Description, proto.GetBoolExtension(file.Options, E_DescriptionAll, false)) -} - -func HasPopulate(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { - return proto.GetBoolExtension(message.Options, E_Populate, proto.GetBoolExtension(file.Options, E_PopulateAll, false)) -} - -func HasTestGen(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { - return proto.GetBoolExtension(message.Options, E_Testgen, proto.GetBoolExtension(file.Options, E_TestgenAll, false)) -} - -func HasBenchGen(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { - return proto.GetBoolExtension(message.Options, E_Benchgen, proto.GetBoolExtension(file.Options, E_BenchgenAll, false)) -} - -func IsMarshaler(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { - return proto.GetBoolExtension(message.Options, E_Marshaler, proto.GetBoolExtension(file.Options, E_MarshalerAll, false)) -} - -func IsUnmarshaler(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { - return proto.GetBoolExtension(message.Options, E_Unmarshaler, proto.GetBoolExtension(file.Options, E_UnmarshalerAll, false)) -} - -func IsStableMarshaler(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { - return proto.GetBoolExtension(message.Options, E_StableMarshaler, proto.GetBoolExtension(file.Options, E_StableMarshalerAll, false)) -} - -func IsSizer(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { - return proto.GetBoolExtension(message.Options, E_Sizer, proto.GetBoolExtension(file.Options, E_SizerAll, false)) -} - -func IsProtoSizer(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { - return proto.GetBoolExtension(message.Options, E_Protosizer, proto.GetBoolExtension(file.Options, E_ProtosizerAll, false)) -} - -func IsGoEnumStringer(file *google_protobuf.FileDescriptorProto, enum *google_protobuf.EnumDescriptorProto) bool { - return proto.GetBoolExtension(enum.Options, E_GoprotoEnumStringer, proto.GetBoolExtension(file.Options, E_GoprotoEnumStringerAll, true)) -} - -func IsEnumStringer(file *google_protobuf.FileDescriptorProto, enum *google_protobuf.EnumDescriptorProto) bool { - return proto.GetBoolExtension(enum.Options, E_EnumStringer, proto.GetBoolExtension(file.Options, E_EnumStringerAll, false)) -} - -func IsUnsafeMarshaler(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { - return proto.GetBoolExtension(message.Options, E_UnsafeMarshaler, proto.GetBoolExtension(file.Options, E_UnsafeMarshalerAll, false)) -} - -func IsUnsafeUnmarshaler(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { - return proto.GetBoolExtension(message.Options, E_UnsafeUnmarshaler, proto.GetBoolExtension(file.Options, E_UnsafeUnmarshalerAll, false)) -} - -func HasExtensionsMap(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { - return proto.GetBoolExtension(message.Options, E_GoprotoExtensionsMap, proto.GetBoolExtension(file.Options, E_GoprotoExtensionsMapAll, true)) -} - -func HasUnrecognized(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { - return proto.GetBoolExtension(message.Options, E_GoprotoUnrecognized, proto.GetBoolExtension(file.Options, E_GoprotoUnrecognizedAll, true)) -} - -func IsProto3(file *google_protobuf.FileDescriptorProto) bool { - return file.GetSyntax() == "proto3" -} - -func ImportsGoGoProto(file *google_protobuf.FileDescriptorProto) bool { - return proto.GetBoolExtension(file.Options, E_GogoprotoImport, true) -} - -func HasCompare(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { - return proto.GetBoolExtension(message.Options, E_Compare, proto.GetBoolExtension(file.Options, E_CompareAll, false)) -} - -func RegistersGolangProto(file *google_protobuf.FileDescriptorProto) bool { - return proto.GetBoolExtension(file.Options, E_GoprotoRegistration, false) -} - -func HasMessageName(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { - return proto.GetBoolExtension(message.Options, E_Messagename, proto.GetBoolExtension(file.Options, E_MessagenameAll, false)) -} - -func HasSizecache(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { - return proto.GetBoolExtension(message.Options, E_GoprotoSizecache, proto.GetBoolExtension(file.Options, E_GoprotoSizecacheAll, true)) -} - -func HasUnkeyed(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { - return proto.GetBoolExtension(message.Options, E_GoprotoUnkeyed, proto.GetBoolExtension(file.Options, E_GoprotoUnkeyedAll, true)) -} diff --git a/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/Makefile b/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/Makefile deleted file mode 100644 index 3496dc99..00000000 --- a/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/Makefile +++ /dev/null @@ -1,36 +0,0 @@ -# Go support for Protocol Buffers - Google's data interchange format -# -# Copyright 2010 The Go Authors. All rights reserved. -# https://github.com/golang/protobuf -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions are -# met: -# -# * Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# * Redistributions in binary form must reproduce the above -# copyright notice, this list of conditions and the following disclaimer -# in the documentation and/or other materials provided with the -# distribution. -# * Neither the name of Google Inc. nor the names of its -# contributors may be used to endorse or promote products derived from -# this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -regenerate: - go install github.com/gogo/protobuf/protoc-gen-gogo - go install github.com/gogo/protobuf/protoc-gen-gostring - protoc --gogo_out=. -I=../../protobuf/google/protobuf ../../protobuf/google/protobuf/descriptor.proto - protoc --gostring_out=. -I=../../protobuf/google/protobuf ../../protobuf/google/protobuf/descriptor.proto diff --git a/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/descriptor.go b/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/descriptor.go deleted file mode 100644 index a85bf198..00000000 --- a/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/descriptor.go +++ /dev/null @@ -1,118 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2016 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -// Package descriptor provides functions for obtaining protocol buffer -// descriptors for generated Go types. -// -// These functions cannot go in package proto because they depend on the -// generated protobuf descriptor messages, which themselves depend on proto. -package descriptor - -import ( - "bytes" - "compress/gzip" - "fmt" - "io/ioutil" - - "github.com/gogo/protobuf/proto" -) - -// extractFile extracts a FileDescriptorProto from a gzip'd buffer. -func extractFile(gz []byte) (*FileDescriptorProto, error) { - r, err := gzip.NewReader(bytes.NewReader(gz)) - if err != nil { - return nil, fmt.Errorf("failed to open gzip reader: %v", err) - } - defer r.Close() - - b, err := ioutil.ReadAll(r) - if err != nil { - return nil, fmt.Errorf("failed to uncompress descriptor: %v", err) - } - - fd := new(FileDescriptorProto) - if err := proto.Unmarshal(b, fd); err != nil { - return nil, fmt.Errorf("malformed FileDescriptorProto: %v", err) - } - - return fd, nil -} - -// Message is a proto.Message with a method to return its descriptor. -// -// Message types generated by the protocol compiler always satisfy -// the Message interface. -type Message interface { - proto.Message - Descriptor() ([]byte, []int) -} - -// ForMessage returns a FileDescriptorProto and a DescriptorProto from within it -// describing the given message. -func ForMessage(msg Message) (fd *FileDescriptorProto, md *DescriptorProto) { - gz, path := msg.Descriptor() - fd, err := extractFile(gz) - if err != nil { - panic(fmt.Sprintf("invalid FileDescriptorProto for %T: %v", msg, err)) - } - - md = fd.MessageType[path[0]] - for _, i := range path[1:] { - md = md.NestedType[i] - } - return fd, md -} - -// Is this field a scalar numeric type? -func (field *FieldDescriptorProto) IsScalar() bool { - if field.Type == nil { - return false - } - switch *field.Type { - case FieldDescriptorProto_TYPE_DOUBLE, - FieldDescriptorProto_TYPE_FLOAT, - FieldDescriptorProto_TYPE_INT64, - FieldDescriptorProto_TYPE_UINT64, - FieldDescriptorProto_TYPE_INT32, - FieldDescriptorProto_TYPE_FIXED64, - FieldDescriptorProto_TYPE_FIXED32, - FieldDescriptorProto_TYPE_BOOL, - FieldDescriptorProto_TYPE_UINT32, - FieldDescriptorProto_TYPE_ENUM, - FieldDescriptorProto_TYPE_SFIXED32, - FieldDescriptorProto_TYPE_SFIXED64, - FieldDescriptorProto_TYPE_SINT32, - FieldDescriptorProto_TYPE_SINT64: - return true - default: - return false - } -} diff --git a/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/descriptor.pb.go b/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/descriptor.pb.go deleted file mode 100644 index 18b2a331..00000000 --- a/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/descriptor.pb.go +++ /dev/null @@ -1,2865 +0,0 @@ -// Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: descriptor.proto - -package descriptor - -import ( - fmt "fmt" - proto "github.com/gogo/protobuf/proto" - math "math" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package - -type FieldDescriptorProto_Type int32 - -const ( - // 0 is reserved for errors. - // Order is weird for historical reasons. - FieldDescriptorProto_TYPE_DOUBLE FieldDescriptorProto_Type = 1 - FieldDescriptorProto_TYPE_FLOAT FieldDescriptorProto_Type = 2 - // Not ZigZag encoded. Negative numbers take 10 bytes. Use TYPE_SINT64 if - // negative values are likely. - FieldDescriptorProto_TYPE_INT64 FieldDescriptorProto_Type = 3 - FieldDescriptorProto_TYPE_UINT64 FieldDescriptorProto_Type = 4 - // Not ZigZag encoded. Negative numbers take 10 bytes. Use TYPE_SINT32 if - // negative values are likely. - FieldDescriptorProto_TYPE_INT32 FieldDescriptorProto_Type = 5 - FieldDescriptorProto_TYPE_FIXED64 FieldDescriptorProto_Type = 6 - FieldDescriptorProto_TYPE_FIXED32 FieldDescriptorProto_Type = 7 - FieldDescriptorProto_TYPE_BOOL FieldDescriptorProto_Type = 8 - FieldDescriptorProto_TYPE_STRING FieldDescriptorProto_Type = 9 - // Tag-delimited aggregate. - // Group type is deprecated and not supported in proto3. However, Proto3 - // implementations should still be able to parse the group wire format and - // treat group fields as unknown fields. - FieldDescriptorProto_TYPE_GROUP FieldDescriptorProto_Type = 10 - FieldDescriptorProto_TYPE_MESSAGE FieldDescriptorProto_Type = 11 - // New in version 2. - FieldDescriptorProto_TYPE_BYTES FieldDescriptorProto_Type = 12 - FieldDescriptorProto_TYPE_UINT32 FieldDescriptorProto_Type = 13 - FieldDescriptorProto_TYPE_ENUM FieldDescriptorProto_Type = 14 - FieldDescriptorProto_TYPE_SFIXED32 FieldDescriptorProto_Type = 15 - FieldDescriptorProto_TYPE_SFIXED64 FieldDescriptorProto_Type = 16 - FieldDescriptorProto_TYPE_SINT32 FieldDescriptorProto_Type = 17 - FieldDescriptorProto_TYPE_SINT64 FieldDescriptorProto_Type = 18 -) - -var FieldDescriptorProto_Type_name = map[int32]string{ - 1: "TYPE_DOUBLE", - 2: "TYPE_FLOAT", - 3: "TYPE_INT64", - 4: "TYPE_UINT64", - 5: "TYPE_INT32", - 6: "TYPE_FIXED64", - 7: "TYPE_FIXED32", - 8: "TYPE_BOOL", - 9: "TYPE_STRING", - 10: "TYPE_GROUP", - 11: "TYPE_MESSAGE", - 12: "TYPE_BYTES", - 13: "TYPE_UINT32", - 14: "TYPE_ENUM", - 15: "TYPE_SFIXED32", - 16: "TYPE_SFIXED64", - 17: "TYPE_SINT32", - 18: "TYPE_SINT64", -} - -var FieldDescriptorProto_Type_value = map[string]int32{ - "TYPE_DOUBLE": 1, - "TYPE_FLOAT": 2, - "TYPE_INT64": 3, - "TYPE_UINT64": 4, - "TYPE_INT32": 5, - "TYPE_FIXED64": 6, - "TYPE_FIXED32": 7, - "TYPE_BOOL": 8, - "TYPE_STRING": 9, - "TYPE_GROUP": 10, - "TYPE_MESSAGE": 11, - "TYPE_BYTES": 12, - "TYPE_UINT32": 13, - "TYPE_ENUM": 14, - "TYPE_SFIXED32": 15, - "TYPE_SFIXED64": 16, - "TYPE_SINT32": 17, - "TYPE_SINT64": 18, -} - -func (x FieldDescriptorProto_Type) Enum() *FieldDescriptorProto_Type { - p := new(FieldDescriptorProto_Type) - *p = x - return p -} - -func (x FieldDescriptorProto_Type) String() string { - return proto.EnumName(FieldDescriptorProto_Type_name, int32(x)) -} - -func (x *FieldDescriptorProto_Type) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(FieldDescriptorProto_Type_value, data, "FieldDescriptorProto_Type") - if err != nil { - return err - } - *x = FieldDescriptorProto_Type(value) - return nil -} - -func (FieldDescriptorProto_Type) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_308767df5ffe18af, []int{4, 0} -} - -type FieldDescriptorProto_Label int32 - -const ( - // 0 is reserved for errors - FieldDescriptorProto_LABEL_OPTIONAL FieldDescriptorProto_Label = 1 - FieldDescriptorProto_LABEL_REQUIRED FieldDescriptorProto_Label = 2 - FieldDescriptorProto_LABEL_REPEATED FieldDescriptorProto_Label = 3 -) - -var FieldDescriptorProto_Label_name = map[int32]string{ - 1: "LABEL_OPTIONAL", - 2: "LABEL_REQUIRED", - 3: "LABEL_REPEATED", -} - -var FieldDescriptorProto_Label_value = map[string]int32{ - "LABEL_OPTIONAL": 1, - "LABEL_REQUIRED": 2, - "LABEL_REPEATED": 3, -} - -func (x FieldDescriptorProto_Label) Enum() *FieldDescriptorProto_Label { - p := new(FieldDescriptorProto_Label) - *p = x - return p -} - -func (x FieldDescriptorProto_Label) String() string { - return proto.EnumName(FieldDescriptorProto_Label_name, int32(x)) -} - -func (x *FieldDescriptorProto_Label) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(FieldDescriptorProto_Label_value, data, "FieldDescriptorProto_Label") - if err != nil { - return err - } - *x = FieldDescriptorProto_Label(value) - return nil -} - -func (FieldDescriptorProto_Label) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_308767df5ffe18af, []int{4, 1} -} - -// Generated classes can be optimized for speed or code size. -type FileOptions_OptimizeMode int32 - -const ( - FileOptions_SPEED FileOptions_OptimizeMode = 1 - // etc. - FileOptions_CODE_SIZE FileOptions_OptimizeMode = 2 - FileOptions_LITE_RUNTIME FileOptions_OptimizeMode = 3 -) - -var FileOptions_OptimizeMode_name = map[int32]string{ - 1: "SPEED", - 2: "CODE_SIZE", - 3: "LITE_RUNTIME", -} - -var FileOptions_OptimizeMode_value = map[string]int32{ - "SPEED": 1, - "CODE_SIZE": 2, - "LITE_RUNTIME": 3, -} - -func (x FileOptions_OptimizeMode) Enum() *FileOptions_OptimizeMode { - p := new(FileOptions_OptimizeMode) - *p = x - return p -} - -func (x FileOptions_OptimizeMode) String() string { - return proto.EnumName(FileOptions_OptimizeMode_name, int32(x)) -} - -func (x *FileOptions_OptimizeMode) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(FileOptions_OptimizeMode_value, data, "FileOptions_OptimizeMode") - if err != nil { - return err - } - *x = FileOptions_OptimizeMode(value) - return nil -} - -func (FileOptions_OptimizeMode) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_308767df5ffe18af, []int{10, 0} -} - -type FieldOptions_CType int32 - -const ( - // Default mode. - FieldOptions_STRING FieldOptions_CType = 0 - FieldOptions_CORD FieldOptions_CType = 1 - FieldOptions_STRING_PIECE FieldOptions_CType = 2 -) - -var FieldOptions_CType_name = map[int32]string{ - 0: "STRING", - 1: "CORD", - 2: "STRING_PIECE", -} - -var FieldOptions_CType_value = map[string]int32{ - "STRING": 0, - "CORD": 1, - "STRING_PIECE": 2, -} - -func (x FieldOptions_CType) Enum() *FieldOptions_CType { - p := new(FieldOptions_CType) - *p = x - return p -} - -func (x FieldOptions_CType) String() string { - return proto.EnumName(FieldOptions_CType_name, int32(x)) -} - -func (x *FieldOptions_CType) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(FieldOptions_CType_value, data, "FieldOptions_CType") - if err != nil { - return err - } - *x = FieldOptions_CType(value) - return nil -} - -func (FieldOptions_CType) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_308767df5ffe18af, []int{12, 0} -} - -type FieldOptions_JSType int32 - -const ( - // Use the default type. - FieldOptions_JS_NORMAL FieldOptions_JSType = 0 - // Use JavaScript strings. - FieldOptions_JS_STRING FieldOptions_JSType = 1 - // Use JavaScript numbers. - FieldOptions_JS_NUMBER FieldOptions_JSType = 2 -) - -var FieldOptions_JSType_name = map[int32]string{ - 0: "JS_NORMAL", - 1: "JS_STRING", - 2: "JS_NUMBER", -} - -var FieldOptions_JSType_value = map[string]int32{ - "JS_NORMAL": 0, - "JS_STRING": 1, - "JS_NUMBER": 2, -} - -func (x FieldOptions_JSType) Enum() *FieldOptions_JSType { - p := new(FieldOptions_JSType) - *p = x - return p -} - -func (x FieldOptions_JSType) String() string { - return proto.EnumName(FieldOptions_JSType_name, int32(x)) -} - -func (x *FieldOptions_JSType) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(FieldOptions_JSType_value, data, "FieldOptions_JSType") - if err != nil { - return err - } - *x = FieldOptions_JSType(value) - return nil -} - -func (FieldOptions_JSType) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_308767df5ffe18af, []int{12, 1} -} - -// Is this method side-effect-free (or safe in HTTP parlance), or idempotent, -// or neither? HTTP based RPC implementation may choose GET verb for safe -// methods, and PUT verb for idempotent methods instead of the default POST. -type MethodOptions_IdempotencyLevel int32 - -const ( - MethodOptions_IDEMPOTENCY_UNKNOWN MethodOptions_IdempotencyLevel = 0 - MethodOptions_NO_SIDE_EFFECTS MethodOptions_IdempotencyLevel = 1 - MethodOptions_IDEMPOTENT MethodOptions_IdempotencyLevel = 2 -) - -var MethodOptions_IdempotencyLevel_name = map[int32]string{ - 0: "IDEMPOTENCY_UNKNOWN", - 1: "NO_SIDE_EFFECTS", - 2: "IDEMPOTENT", -} - -var MethodOptions_IdempotencyLevel_value = map[string]int32{ - "IDEMPOTENCY_UNKNOWN": 0, - "NO_SIDE_EFFECTS": 1, - "IDEMPOTENT": 2, -} - -func (x MethodOptions_IdempotencyLevel) Enum() *MethodOptions_IdempotencyLevel { - p := new(MethodOptions_IdempotencyLevel) - *p = x - return p -} - -func (x MethodOptions_IdempotencyLevel) String() string { - return proto.EnumName(MethodOptions_IdempotencyLevel_name, int32(x)) -} - -func (x *MethodOptions_IdempotencyLevel) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(MethodOptions_IdempotencyLevel_value, data, "MethodOptions_IdempotencyLevel") - if err != nil { - return err - } - *x = MethodOptions_IdempotencyLevel(value) - return nil -} - -func (MethodOptions_IdempotencyLevel) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_308767df5ffe18af, []int{17, 0} -} - -// The protocol compiler can output a FileDescriptorSet containing the .proto -// files it parses. -type FileDescriptorSet struct { - File []*FileDescriptorProto `protobuf:"bytes,1,rep,name=file" json:"file,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *FileDescriptorSet) Reset() { *m = FileDescriptorSet{} } -func (m *FileDescriptorSet) String() string { return proto.CompactTextString(m) } -func (*FileDescriptorSet) ProtoMessage() {} -func (*FileDescriptorSet) Descriptor() ([]byte, []int) { - return fileDescriptor_308767df5ffe18af, []int{0} -} -func (m *FileDescriptorSet) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_FileDescriptorSet.Unmarshal(m, b) -} -func (m *FileDescriptorSet) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_FileDescriptorSet.Marshal(b, m, deterministic) -} -func (m *FileDescriptorSet) XXX_Merge(src proto.Message) { - xxx_messageInfo_FileDescriptorSet.Merge(m, src) -} -func (m *FileDescriptorSet) XXX_Size() int { - return xxx_messageInfo_FileDescriptorSet.Size(m) -} -func (m *FileDescriptorSet) XXX_DiscardUnknown() { - xxx_messageInfo_FileDescriptorSet.DiscardUnknown(m) -} - -var xxx_messageInfo_FileDescriptorSet proto.InternalMessageInfo - -func (m *FileDescriptorSet) GetFile() []*FileDescriptorProto { - if m != nil { - return m.File - } - return nil -} - -// Describes a complete .proto file. -type FileDescriptorProto struct { - Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` - Package *string `protobuf:"bytes,2,opt,name=package" json:"package,omitempty"` - // Names of files imported by this file. - Dependency []string `protobuf:"bytes,3,rep,name=dependency" json:"dependency,omitempty"` - // Indexes of the public imported files in the dependency list above. - PublicDependency []int32 `protobuf:"varint,10,rep,name=public_dependency,json=publicDependency" json:"public_dependency,omitempty"` - // Indexes of the weak imported files in the dependency list. - // For Google-internal migration only. Do not use. - WeakDependency []int32 `protobuf:"varint,11,rep,name=weak_dependency,json=weakDependency" json:"weak_dependency,omitempty"` - // All top-level definitions in this file. - MessageType []*DescriptorProto `protobuf:"bytes,4,rep,name=message_type,json=messageType" json:"message_type,omitempty"` - EnumType []*EnumDescriptorProto `protobuf:"bytes,5,rep,name=enum_type,json=enumType" json:"enum_type,omitempty"` - Service []*ServiceDescriptorProto `protobuf:"bytes,6,rep,name=service" json:"service,omitempty"` - Extension []*FieldDescriptorProto `protobuf:"bytes,7,rep,name=extension" json:"extension,omitempty"` - Options *FileOptions `protobuf:"bytes,8,opt,name=options" json:"options,omitempty"` - // This field contains optional information about the original source code. - // You may safely remove this entire field without harming runtime - // functionality of the descriptors -- the information is needed only by - // development tools. - SourceCodeInfo *SourceCodeInfo `protobuf:"bytes,9,opt,name=source_code_info,json=sourceCodeInfo" json:"source_code_info,omitempty"` - // The syntax of the proto file. - // The supported values are "proto2" and "proto3". - Syntax *string `protobuf:"bytes,12,opt,name=syntax" json:"syntax,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *FileDescriptorProto) Reset() { *m = FileDescriptorProto{} } -func (m *FileDescriptorProto) String() string { return proto.CompactTextString(m) } -func (*FileDescriptorProto) ProtoMessage() {} -func (*FileDescriptorProto) Descriptor() ([]byte, []int) { - return fileDescriptor_308767df5ffe18af, []int{1} -} -func (m *FileDescriptorProto) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_FileDescriptorProto.Unmarshal(m, b) -} -func (m *FileDescriptorProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_FileDescriptorProto.Marshal(b, m, deterministic) -} -func (m *FileDescriptorProto) XXX_Merge(src proto.Message) { - xxx_messageInfo_FileDescriptorProto.Merge(m, src) -} -func (m *FileDescriptorProto) XXX_Size() int { - return xxx_messageInfo_FileDescriptorProto.Size(m) -} -func (m *FileDescriptorProto) XXX_DiscardUnknown() { - xxx_messageInfo_FileDescriptorProto.DiscardUnknown(m) -} - -var xxx_messageInfo_FileDescriptorProto proto.InternalMessageInfo - -func (m *FileDescriptorProto) GetName() string { - if m != nil && m.Name != nil { - return *m.Name - } - return "" -} - -func (m *FileDescriptorProto) GetPackage() string { - if m != nil && m.Package != nil { - return *m.Package - } - return "" -} - -func (m *FileDescriptorProto) GetDependency() []string { - if m != nil { - return m.Dependency - } - return nil -} - -func (m *FileDescriptorProto) GetPublicDependency() []int32 { - if m != nil { - return m.PublicDependency - } - return nil -} - -func (m *FileDescriptorProto) GetWeakDependency() []int32 { - if m != nil { - return m.WeakDependency - } - return nil -} - -func (m *FileDescriptorProto) GetMessageType() []*DescriptorProto { - if m != nil { - return m.MessageType - } - return nil -} - -func (m *FileDescriptorProto) GetEnumType() []*EnumDescriptorProto { - if m != nil { - return m.EnumType - } - return nil -} - -func (m *FileDescriptorProto) GetService() []*ServiceDescriptorProto { - if m != nil { - return m.Service - } - return nil -} - -func (m *FileDescriptorProto) GetExtension() []*FieldDescriptorProto { - if m != nil { - return m.Extension - } - return nil -} - -func (m *FileDescriptorProto) GetOptions() *FileOptions { - if m != nil { - return m.Options - } - return nil -} - -func (m *FileDescriptorProto) GetSourceCodeInfo() *SourceCodeInfo { - if m != nil { - return m.SourceCodeInfo - } - return nil -} - -func (m *FileDescriptorProto) GetSyntax() string { - if m != nil && m.Syntax != nil { - return *m.Syntax - } - return "" -} - -// Describes a message type. -type DescriptorProto struct { - Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` - Field []*FieldDescriptorProto `protobuf:"bytes,2,rep,name=field" json:"field,omitempty"` - Extension []*FieldDescriptorProto `protobuf:"bytes,6,rep,name=extension" json:"extension,omitempty"` - NestedType []*DescriptorProto `protobuf:"bytes,3,rep,name=nested_type,json=nestedType" json:"nested_type,omitempty"` - EnumType []*EnumDescriptorProto `protobuf:"bytes,4,rep,name=enum_type,json=enumType" json:"enum_type,omitempty"` - ExtensionRange []*DescriptorProto_ExtensionRange `protobuf:"bytes,5,rep,name=extension_range,json=extensionRange" json:"extension_range,omitempty"` - OneofDecl []*OneofDescriptorProto `protobuf:"bytes,8,rep,name=oneof_decl,json=oneofDecl" json:"oneof_decl,omitempty"` - Options *MessageOptions `protobuf:"bytes,7,opt,name=options" json:"options,omitempty"` - ReservedRange []*DescriptorProto_ReservedRange `protobuf:"bytes,9,rep,name=reserved_range,json=reservedRange" json:"reserved_range,omitempty"` - // Reserved field names, which may not be used by fields in the same message. - // A given name may only be reserved once. - ReservedName []string `protobuf:"bytes,10,rep,name=reserved_name,json=reservedName" json:"reserved_name,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *DescriptorProto) Reset() { *m = DescriptorProto{} } -func (m *DescriptorProto) String() string { return proto.CompactTextString(m) } -func (*DescriptorProto) ProtoMessage() {} -func (*DescriptorProto) Descriptor() ([]byte, []int) { - return fileDescriptor_308767df5ffe18af, []int{2} -} -func (m *DescriptorProto) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_DescriptorProto.Unmarshal(m, b) -} -func (m *DescriptorProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_DescriptorProto.Marshal(b, m, deterministic) -} -func (m *DescriptorProto) XXX_Merge(src proto.Message) { - xxx_messageInfo_DescriptorProto.Merge(m, src) -} -func (m *DescriptorProto) XXX_Size() int { - return xxx_messageInfo_DescriptorProto.Size(m) -} -func (m *DescriptorProto) XXX_DiscardUnknown() { - xxx_messageInfo_DescriptorProto.DiscardUnknown(m) -} - -var xxx_messageInfo_DescriptorProto proto.InternalMessageInfo - -func (m *DescriptorProto) GetName() string { - if m != nil && m.Name != nil { - return *m.Name - } - return "" -} - -func (m *DescriptorProto) GetField() []*FieldDescriptorProto { - if m != nil { - return m.Field - } - return nil -} - -func (m *DescriptorProto) GetExtension() []*FieldDescriptorProto { - if m != nil { - return m.Extension - } - return nil -} - -func (m *DescriptorProto) GetNestedType() []*DescriptorProto { - if m != nil { - return m.NestedType - } - return nil -} - -func (m *DescriptorProto) GetEnumType() []*EnumDescriptorProto { - if m != nil { - return m.EnumType - } - return nil -} - -func (m *DescriptorProto) GetExtensionRange() []*DescriptorProto_ExtensionRange { - if m != nil { - return m.ExtensionRange - } - return nil -} - -func (m *DescriptorProto) GetOneofDecl() []*OneofDescriptorProto { - if m != nil { - return m.OneofDecl - } - return nil -} - -func (m *DescriptorProto) GetOptions() *MessageOptions { - if m != nil { - return m.Options - } - return nil -} - -func (m *DescriptorProto) GetReservedRange() []*DescriptorProto_ReservedRange { - if m != nil { - return m.ReservedRange - } - return nil -} - -func (m *DescriptorProto) GetReservedName() []string { - if m != nil { - return m.ReservedName - } - return nil -} - -type DescriptorProto_ExtensionRange struct { - Start *int32 `protobuf:"varint,1,opt,name=start" json:"start,omitempty"` - End *int32 `protobuf:"varint,2,opt,name=end" json:"end,omitempty"` - Options *ExtensionRangeOptions `protobuf:"bytes,3,opt,name=options" json:"options,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *DescriptorProto_ExtensionRange) Reset() { *m = DescriptorProto_ExtensionRange{} } -func (m *DescriptorProto_ExtensionRange) String() string { return proto.CompactTextString(m) } -func (*DescriptorProto_ExtensionRange) ProtoMessage() {} -func (*DescriptorProto_ExtensionRange) Descriptor() ([]byte, []int) { - return fileDescriptor_308767df5ffe18af, []int{2, 0} -} -func (m *DescriptorProto_ExtensionRange) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_DescriptorProto_ExtensionRange.Unmarshal(m, b) -} -func (m *DescriptorProto_ExtensionRange) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_DescriptorProto_ExtensionRange.Marshal(b, m, deterministic) -} -func (m *DescriptorProto_ExtensionRange) XXX_Merge(src proto.Message) { - xxx_messageInfo_DescriptorProto_ExtensionRange.Merge(m, src) -} -func (m *DescriptorProto_ExtensionRange) XXX_Size() int { - return xxx_messageInfo_DescriptorProto_ExtensionRange.Size(m) -} -func (m *DescriptorProto_ExtensionRange) XXX_DiscardUnknown() { - xxx_messageInfo_DescriptorProto_ExtensionRange.DiscardUnknown(m) -} - -var xxx_messageInfo_DescriptorProto_ExtensionRange proto.InternalMessageInfo - -func (m *DescriptorProto_ExtensionRange) GetStart() int32 { - if m != nil && m.Start != nil { - return *m.Start - } - return 0 -} - -func (m *DescriptorProto_ExtensionRange) GetEnd() int32 { - if m != nil && m.End != nil { - return *m.End - } - return 0 -} - -func (m *DescriptorProto_ExtensionRange) GetOptions() *ExtensionRangeOptions { - if m != nil { - return m.Options - } - return nil -} - -// Range of reserved tag numbers. Reserved tag numbers may not be used by -// fields or extension ranges in the same message. Reserved ranges may -// not overlap. -type DescriptorProto_ReservedRange struct { - Start *int32 `protobuf:"varint,1,opt,name=start" json:"start,omitempty"` - End *int32 `protobuf:"varint,2,opt,name=end" json:"end,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *DescriptorProto_ReservedRange) Reset() { *m = DescriptorProto_ReservedRange{} } -func (m *DescriptorProto_ReservedRange) String() string { return proto.CompactTextString(m) } -func (*DescriptorProto_ReservedRange) ProtoMessage() {} -func (*DescriptorProto_ReservedRange) Descriptor() ([]byte, []int) { - return fileDescriptor_308767df5ffe18af, []int{2, 1} -} -func (m *DescriptorProto_ReservedRange) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_DescriptorProto_ReservedRange.Unmarshal(m, b) -} -func (m *DescriptorProto_ReservedRange) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_DescriptorProto_ReservedRange.Marshal(b, m, deterministic) -} -func (m *DescriptorProto_ReservedRange) XXX_Merge(src proto.Message) { - xxx_messageInfo_DescriptorProto_ReservedRange.Merge(m, src) -} -func (m *DescriptorProto_ReservedRange) XXX_Size() int { - return xxx_messageInfo_DescriptorProto_ReservedRange.Size(m) -} -func (m *DescriptorProto_ReservedRange) XXX_DiscardUnknown() { - xxx_messageInfo_DescriptorProto_ReservedRange.DiscardUnknown(m) -} - -var xxx_messageInfo_DescriptorProto_ReservedRange proto.InternalMessageInfo - -func (m *DescriptorProto_ReservedRange) GetStart() int32 { - if m != nil && m.Start != nil { - return *m.Start - } - return 0 -} - -func (m *DescriptorProto_ReservedRange) GetEnd() int32 { - if m != nil && m.End != nil { - return *m.End - } - return 0 -} - -type ExtensionRangeOptions struct { - // The parser stores options it doesn't recognize here. See above. - UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - proto.XXX_InternalExtensions `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *ExtensionRangeOptions) Reset() { *m = ExtensionRangeOptions{} } -func (m *ExtensionRangeOptions) String() string { return proto.CompactTextString(m) } -func (*ExtensionRangeOptions) ProtoMessage() {} -func (*ExtensionRangeOptions) Descriptor() ([]byte, []int) { - return fileDescriptor_308767df5ffe18af, []int{3} -} - -var extRange_ExtensionRangeOptions = []proto.ExtensionRange{ - {Start: 1000, End: 536870911}, -} - -func (*ExtensionRangeOptions) ExtensionRangeArray() []proto.ExtensionRange { - return extRange_ExtensionRangeOptions -} - -func (m *ExtensionRangeOptions) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ExtensionRangeOptions.Unmarshal(m, b) -} -func (m *ExtensionRangeOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ExtensionRangeOptions.Marshal(b, m, deterministic) -} -func (m *ExtensionRangeOptions) XXX_Merge(src proto.Message) { - xxx_messageInfo_ExtensionRangeOptions.Merge(m, src) -} -func (m *ExtensionRangeOptions) XXX_Size() int { - return xxx_messageInfo_ExtensionRangeOptions.Size(m) -} -func (m *ExtensionRangeOptions) XXX_DiscardUnknown() { - xxx_messageInfo_ExtensionRangeOptions.DiscardUnknown(m) -} - -var xxx_messageInfo_ExtensionRangeOptions proto.InternalMessageInfo - -func (m *ExtensionRangeOptions) GetUninterpretedOption() []*UninterpretedOption { - if m != nil { - return m.UninterpretedOption - } - return nil -} - -// Describes a field within a message. -type FieldDescriptorProto struct { - Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` - Number *int32 `protobuf:"varint,3,opt,name=number" json:"number,omitempty"` - Label *FieldDescriptorProto_Label `protobuf:"varint,4,opt,name=label,enum=google.protobuf.FieldDescriptorProto_Label" json:"label,omitempty"` - // If type_name is set, this need not be set. If both this and type_name - // are set, this must be one of TYPE_ENUM, TYPE_MESSAGE or TYPE_GROUP. - Type *FieldDescriptorProto_Type `protobuf:"varint,5,opt,name=type,enum=google.protobuf.FieldDescriptorProto_Type" json:"type,omitempty"` - // For message and enum types, this is the name of the type. If the name - // starts with a '.', it is fully-qualified. Otherwise, C++-like scoping - // rules are used to find the type (i.e. first the nested types within this - // message are searched, then within the parent, on up to the root - // namespace). - TypeName *string `protobuf:"bytes,6,opt,name=type_name,json=typeName" json:"type_name,omitempty"` - // For extensions, this is the name of the type being extended. It is - // resolved in the same manner as type_name. - Extendee *string `protobuf:"bytes,2,opt,name=extendee" json:"extendee,omitempty"` - // For numeric types, contains the original text representation of the value. - // For booleans, "true" or "false". - // For strings, contains the default text contents (not escaped in any way). - // For bytes, contains the C escaped value. All bytes >= 128 are escaped. - // TODO(kenton): Base-64 encode? - DefaultValue *string `protobuf:"bytes,7,opt,name=default_value,json=defaultValue" json:"default_value,omitempty"` - // If set, gives the index of a oneof in the containing type's oneof_decl - // list. This field is a member of that oneof. - OneofIndex *int32 `protobuf:"varint,9,opt,name=oneof_index,json=oneofIndex" json:"oneof_index,omitempty"` - // JSON name of this field. The value is set by protocol compiler. If the - // user has set a "json_name" option on this field, that option's value - // will be used. Otherwise, it's deduced from the field's name by converting - // it to camelCase. - JsonName *string `protobuf:"bytes,10,opt,name=json_name,json=jsonName" json:"json_name,omitempty"` - Options *FieldOptions `protobuf:"bytes,8,opt,name=options" json:"options,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *FieldDescriptorProto) Reset() { *m = FieldDescriptorProto{} } -func (m *FieldDescriptorProto) String() string { return proto.CompactTextString(m) } -func (*FieldDescriptorProto) ProtoMessage() {} -func (*FieldDescriptorProto) Descriptor() ([]byte, []int) { - return fileDescriptor_308767df5ffe18af, []int{4} -} -func (m *FieldDescriptorProto) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_FieldDescriptorProto.Unmarshal(m, b) -} -func (m *FieldDescriptorProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_FieldDescriptorProto.Marshal(b, m, deterministic) -} -func (m *FieldDescriptorProto) XXX_Merge(src proto.Message) { - xxx_messageInfo_FieldDescriptorProto.Merge(m, src) -} -func (m *FieldDescriptorProto) XXX_Size() int { - return xxx_messageInfo_FieldDescriptorProto.Size(m) -} -func (m *FieldDescriptorProto) XXX_DiscardUnknown() { - xxx_messageInfo_FieldDescriptorProto.DiscardUnknown(m) -} - -var xxx_messageInfo_FieldDescriptorProto proto.InternalMessageInfo - -func (m *FieldDescriptorProto) GetName() string { - if m != nil && m.Name != nil { - return *m.Name - } - return "" -} - -func (m *FieldDescriptorProto) GetNumber() int32 { - if m != nil && m.Number != nil { - return *m.Number - } - return 0 -} - -func (m *FieldDescriptorProto) GetLabel() FieldDescriptorProto_Label { - if m != nil && m.Label != nil { - return *m.Label - } - return FieldDescriptorProto_LABEL_OPTIONAL -} - -func (m *FieldDescriptorProto) GetType() FieldDescriptorProto_Type { - if m != nil && m.Type != nil { - return *m.Type - } - return FieldDescriptorProto_TYPE_DOUBLE -} - -func (m *FieldDescriptorProto) GetTypeName() string { - if m != nil && m.TypeName != nil { - return *m.TypeName - } - return "" -} - -func (m *FieldDescriptorProto) GetExtendee() string { - if m != nil && m.Extendee != nil { - return *m.Extendee - } - return "" -} - -func (m *FieldDescriptorProto) GetDefaultValue() string { - if m != nil && m.DefaultValue != nil { - return *m.DefaultValue - } - return "" -} - -func (m *FieldDescriptorProto) GetOneofIndex() int32 { - if m != nil && m.OneofIndex != nil { - return *m.OneofIndex - } - return 0 -} - -func (m *FieldDescriptorProto) GetJsonName() string { - if m != nil && m.JsonName != nil { - return *m.JsonName - } - return "" -} - -func (m *FieldDescriptorProto) GetOptions() *FieldOptions { - if m != nil { - return m.Options - } - return nil -} - -// Describes a oneof. -type OneofDescriptorProto struct { - Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` - Options *OneofOptions `protobuf:"bytes,2,opt,name=options" json:"options,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *OneofDescriptorProto) Reset() { *m = OneofDescriptorProto{} } -func (m *OneofDescriptorProto) String() string { return proto.CompactTextString(m) } -func (*OneofDescriptorProto) ProtoMessage() {} -func (*OneofDescriptorProto) Descriptor() ([]byte, []int) { - return fileDescriptor_308767df5ffe18af, []int{5} -} -func (m *OneofDescriptorProto) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_OneofDescriptorProto.Unmarshal(m, b) -} -func (m *OneofDescriptorProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_OneofDescriptorProto.Marshal(b, m, deterministic) -} -func (m *OneofDescriptorProto) XXX_Merge(src proto.Message) { - xxx_messageInfo_OneofDescriptorProto.Merge(m, src) -} -func (m *OneofDescriptorProto) XXX_Size() int { - return xxx_messageInfo_OneofDescriptorProto.Size(m) -} -func (m *OneofDescriptorProto) XXX_DiscardUnknown() { - xxx_messageInfo_OneofDescriptorProto.DiscardUnknown(m) -} - -var xxx_messageInfo_OneofDescriptorProto proto.InternalMessageInfo - -func (m *OneofDescriptorProto) GetName() string { - if m != nil && m.Name != nil { - return *m.Name - } - return "" -} - -func (m *OneofDescriptorProto) GetOptions() *OneofOptions { - if m != nil { - return m.Options - } - return nil -} - -// Describes an enum type. -type EnumDescriptorProto struct { - Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` - Value []*EnumValueDescriptorProto `protobuf:"bytes,2,rep,name=value" json:"value,omitempty"` - Options *EnumOptions `protobuf:"bytes,3,opt,name=options" json:"options,omitempty"` - // Range of reserved numeric values. Reserved numeric values may not be used - // by enum values in the same enum declaration. Reserved ranges may not - // overlap. - ReservedRange []*EnumDescriptorProto_EnumReservedRange `protobuf:"bytes,4,rep,name=reserved_range,json=reservedRange" json:"reserved_range,omitempty"` - // Reserved enum value names, which may not be reused. A given name may only - // be reserved once. - ReservedName []string `protobuf:"bytes,5,rep,name=reserved_name,json=reservedName" json:"reserved_name,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *EnumDescriptorProto) Reset() { *m = EnumDescriptorProto{} } -func (m *EnumDescriptorProto) String() string { return proto.CompactTextString(m) } -func (*EnumDescriptorProto) ProtoMessage() {} -func (*EnumDescriptorProto) Descriptor() ([]byte, []int) { - return fileDescriptor_308767df5ffe18af, []int{6} -} -func (m *EnumDescriptorProto) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_EnumDescriptorProto.Unmarshal(m, b) -} -func (m *EnumDescriptorProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_EnumDescriptorProto.Marshal(b, m, deterministic) -} -func (m *EnumDescriptorProto) XXX_Merge(src proto.Message) { - xxx_messageInfo_EnumDescriptorProto.Merge(m, src) -} -func (m *EnumDescriptorProto) XXX_Size() int { - return xxx_messageInfo_EnumDescriptorProto.Size(m) -} -func (m *EnumDescriptorProto) XXX_DiscardUnknown() { - xxx_messageInfo_EnumDescriptorProto.DiscardUnknown(m) -} - -var xxx_messageInfo_EnumDescriptorProto proto.InternalMessageInfo - -func (m *EnumDescriptorProto) GetName() string { - if m != nil && m.Name != nil { - return *m.Name - } - return "" -} - -func (m *EnumDescriptorProto) GetValue() []*EnumValueDescriptorProto { - if m != nil { - return m.Value - } - return nil -} - -func (m *EnumDescriptorProto) GetOptions() *EnumOptions { - if m != nil { - return m.Options - } - return nil -} - -func (m *EnumDescriptorProto) GetReservedRange() []*EnumDescriptorProto_EnumReservedRange { - if m != nil { - return m.ReservedRange - } - return nil -} - -func (m *EnumDescriptorProto) GetReservedName() []string { - if m != nil { - return m.ReservedName - } - return nil -} - -// Range of reserved numeric values. Reserved values may not be used by -// entries in the same enum. Reserved ranges may not overlap. -// -// Note that this is distinct from DescriptorProto.ReservedRange in that it -// is inclusive such that it can appropriately represent the entire int32 -// domain. -type EnumDescriptorProto_EnumReservedRange struct { - Start *int32 `protobuf:"varint,1,opt,name=start" json:"start,omitempty"` - End *int32 `protobuf:"varint,2,opt,name=end" json:"end,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *EnumDescriptorProto_EnumReservedRange) Reset() { *m = EnumDescriptorProto_EnumReservedRange{} } -func (m *EnumDescriptorProto_EnumReservedRange) String() string { return proto.CompactTextString(m) } -func (*EnumDescriptorProto_EnumReservedRange) ProtoMessage() {} -func (*EnumDescriptorProto_EnumReservedRange) Descriptor() ([]byte, []int) { - return fileDescriptor_308767df5ffe18af, []int{6, 0} -} -func (m *EnumDescriptorProto_EnumReservedRange) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_EnumDescriptorProto_EnumReservedRange.Unmarshal(m, b) -} -func (m *EnumDescriptorProto_EnumReservedRange) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_EnumDescriptorProto_EnumReservedRange.Marshal(b, m, deterministic) -} -func (m *EnumDescriptorProto_EnumReservedRange) XXX_Merge(src proto.Message) { - xxx_messageInfo_EnumDescriptorProto_EnumReservedRange.Merge(m, src) -} -func (m *EnumDescriptorProto_EnumReservedRange) XXX_Size() int { - return xxx_messageInfo_EnumDescriptorProto_EnumReservedRange.Size(m) -} -func (m *EnumDescriptorProto_EnumReservedRange) XXX_DiscardUnknown() { - xxx_messageInfo_EnumDescriptorProto_EnumReservedRange.DiscardUnknown(m) -} - -var xxx_messageInfo_EnumDescriptorProto_EnumReservedRange proto.InternalMessageInfo - -func (m *EnumDescriptorProto_EnumReservedRange) GetStart() int32 { - if m != nil && m.Start != nil { - return *m.Start - } - return 0 -} - -func (m *EnumDescriptorProto_EnumReservedRange) GetEnd() int32 { - if m != nil && m.End != nil { - return *m.End - } - return 0 -} - -// Describes a value within an enum. -type EnumValueDescriptorProto struct { - Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` - Number *int32 `protobuf:"varint,2,opt,name=number" json:"number,omitempty"` - Options *EnumValueOptions `protobuf:"bytes,3,opt,name=options" json:"options,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *EnumValueDescriptorProto) Reset() { *m = EnumValueDescriptorProto{} } -func (m *EnumValueDescriptorProto) String() string { return proto.CompactTextString(m) } -func (*EnumValueDescriptorProto) ProtoMessage() {} -func (*EnumValueDescriptorProto) Descriptor() ([]byte, []int) { - return fileDescriptor_308767df5ffe18af, []int{7} -} -func (m *EnumValueDescriptorProto) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_EnumValueDescriptorProto.Unmarshal(m, b) -} -func (m *EnumValueDescriptorProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_EnumValueDescriptorProto.Marshal(b, m, deterministic) -} -func (m *EnumValueDescriptorProto) XXX_Merge(src proto.Message) { - xxx_messageInfo_EnumValueDescriptorProto.Merge(m, src) -} -func (m *EnumValueDescriptorProto) XXX_Size() int { - return xxx_messageInfo_EnumValueDescriptorProto.Size(m) -} -func (m *EnumValueDescriptorProto) XXX_DiscardUnknown() { - xxx_messageInfo_EnumValueDescriptorProto.DiscardUnknown(m) -} - -var xxx_messageInfo_EnumValueDescriptorProto proto.InternalMessageInfo - -func (m *EnumValueDescriptorProto) GetName() string { - if m != nil && m.Name != nil { - return *m.Name - } - return "" -} - -func (m *EnumValueDescriptorProto) GetNumber() int32 { - if m != nil && m.Number != nil { - return *m.Number - } - return 0 -} - -func (m *EnumValueDescriptorProto) GetOptions() *EnumValueOptions { - if m != nil { - return m.Options - } - return nil -} - -// Describes a service. -type ServiceDescriptorProto struct { - Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` - Method []*MethodDescriptorProto `protobuf:"bytes,2,rep,name=method" json:"method,omitempty"` - Options *ServiceOptions `protobuf:"bytes,3,opt,name=options" json:"options,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *ServiceDescriptorProto) Reset() { *m = ServiceDescriptorProto{} } -func (m *ServiceDescriptorProto) String() string { return proto.CompactTextString(m) } -func (*ServiceDescriptorProto) ProtoMessage() {} -func (*ServiceDescriptorProto) Descriptor() ([]byte, []int) { - return fileDescriptor_308767df5ffe18af, []int{8} -} -func (m *ServiceDescriptorProto) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ServiceDescriptorProto.Unmarshal(m, b) -} -func (m *ServiceDescriptorProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ServiceDescriptorProto.Marshal(b, m, deterministic) -} -func (m *ServiceDescriptorProto) XXX_Merge(src proto.Message) { - xxx_messageInfo_ServiceDescriptorProto.Merge(m, src) -} -func (m *ServiceDescriptorProto) XXX_Size() int { - return xxx_messageInfo_ServiceDescriptorProto.Size(m) -} -func (m *ServiceDescriptorProto) XXX_DiscardUnknown() { - xxx_messageInfo_ServiceDescriptorProto.DiscardUnknown(m) -} - -var xxx_messageInfo_ServiceDescriptorProto proto.InternalMessageInfo - -func (m *ServiceDescriptorProto) GetName() string { - if m != nil && m.Name != nil { - return *m.Name - } - return "" -} - -func (m *ServiceDescriptorProto) GetMethod() []*MethodDescriptorProto { - if m != nil { - return m.Method - } - return nil -} - -func (m *ServiceDescriptorProto) GetOptions() *ServiceOptions { - if m != nil { - return m.Options - } - return nil -} - -// Describes a method of a service. -type MethodDescriptorProto struct { - Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` - // Input and output type names. These are resolved in the same way as - // FieldDescriptorProto.type_name, but must refer to a message type. - InputType *string `protobuf:"bytes,2,opt,name=input_type,json=inputType" json:"input_type,omitempty"` - OutputType *string `protobuf:"bytes,3,opt,name=output_type,json=outputType" json:"output_type,omitempty"` - Options *MethodOptions `protobuf:"bytes,4,opt,name=options" json:"options,omitempty"` - // Identifies if client streams multiple client messages - ClientStreaming *bool `protobuf:"varint,5,opt,name=client_streaming,json=clientStreaming,def=0" json:"client_streaming,omitempty"` - // Identifies if server streams multiple server messages - ServerStreaming *bool `protobuf:"varint,6,opt,name=server_streaming,json=serverStreaming,def=0" json:"server_streaming,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *MethodDescriptorProto) Reset() { *m = MethodDescriptorProto{} } -func (m *MethodDescriptorProto) String() string { return proto.CompactTextString(m) } -func (*MethodDescriptorProto) ProtoMessage() {} -func (*MethodDescriptorProto) Descriptor() ([]byte, []int) { - return fileDescriptor_308767df5ffe18af, []int{9} -} -func (m *MethodDescriptorProto) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_MethodDescriptorProto.Unmarshal(m, b) -} -func (m *MethodDescriptorProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_MethodDescriptorProto.Marshal(b, m, deterministic) -} -func (m *MethodDescriptorProto) XXX_Merge(src proto.Message) { - xxx_messageInfo_MethodDescriptorProto.Merge(m, src) -} -func (m *MethodDescriptorProto) XXX_Size() int { - return xxx_messageInfo_MethodDescriptorProto.Size(m) -} -func (m *MethodDescriptorProto) XXX_DiscardUnknown() { - xxx_messageInfo_MethodDescriptorProto.DiscardUnknown(m) -} - -var xxx_messageInfo_MethodDescriptorProto proto.InternalMessageInfo - -const Default_MethodDescriptorProto_ClientStreaming bool = false -const Default_MethodDescriptorProto_ServerStreaming bool = false - -func (m *MethodDescriptorProto) GetName() string { - if m != nil && m.Name != nil { - return *m.Name - } - return "" -} - -func (m *MethodDescriptorProto) GetInputType() string { - if m != nil && m.InputType != nil { - return *m.InputType - } - return "" -} - -func (m *MethodDescriptorProto) GetOutputType() string { - if m != nil && m.OutputType != nil { - return *m.OutputType - } - return "" -} - -func (m *MethodDescriptorProto) GetOptions() *MethodOptions { - if m != nil { - return m.Options - } - return nil -} - -func (m *MethodDescriptorProto) GetClientStreaming() bool { - if m != nil && m.ClientStreaming != nil { - return *m.ClientStreaming - } - return Default_MethodDescriptorProto_ClientStreaming -} - -func (m *MethodDescriptorProto) GetServerStreaming() bool { - if m != nil && m.ServerStreaming != nil { - return *m.ServerStreaming - } - return Default_MethodDescriptorProto_ServerStreaming -} - -type FileOptions struct { - // Sets the Java package where classes generated from this .proto will be - // placed. By default, the proto package is used, but this is often - // inappropriate because proto packages do not normally start with backwards - // domain names. - JavaPackage *string `protobuf:"bytes,1,opt,name=java_package,json=javaPackage" json:"java_package,omitempty"` - // If set, all the classes from the .proto file are wrapped in a single - // outer class with the given name. This applies to both Proto1 - // (equivalent to the old "--one_java_file" option) and Proto2 (where - // a .proto always translates to a single class, but you may want to - // explicitly choose the class name). - JavaOuterClassname *string `protobuf:"bytes,8,opt,name=java_outer_classname,json=javaOuterClassname" json:"java_outer_classname,omitempty"` - // If set true, then the Java code generator will generate a separate .java - // file for each top-level message, enum, and service defined in the .proto - // file. Thus, these types will *not* be nested inside the outer class - // named by java_outer_classname. However, the outer class will still be - // generated to contain the file's getDescriptor() method as well as any - // top-level extensions defined in the file. - JavaMultipleFiles *bool `protobuf:"varint,10,opt,name=java_multiple_files,json=javaMultipleFiles,def=0" json:"java_multiple_files,omitempty"` - // This option does nothing. - JavaGenerateEqualsAndHash *bool `protobuf:"varint,20,opt,name=java_generate_equals_and_hash,json=javaGenerateEqualsAndHash" json:"java_generate_equals_and_hash,omitempty"` // Deprecated: Do not use. - // If set true, then the Java2 code generator will generate code that - // throws an exception whenever an attempt is made to assign a non-UTF-8 - // byte sequence to a string field. - // Message reflection will do the same. - // However, an extension field still accepts non-UTF-8 byte sequences. - // This option has no effect on when used with the lite runtime. - JavaStringCheckUtf8 *bool `protobuf:"varint,27,opt,name=java_string_check_utf8,json=javaStringCheckUtf8,def=0" json:"java_string_check_utf8,omitempty"` - OptimizeFor *FileOptions_OptimizeMode `protobuf:"varint,9,opt,name=optimize_for,json=optimizeFor,enum=google.protobuf.FileOptions_OptimizeMode,def=1" json:"optimize_for,omitempty"` - // Sets the Go package where structs generated from this .proto will be - // placed. If omitted, the Go package will be derived from the following: - // - The basename of the package import path, if provided. - // - Otherwise, the package statement in the .proto file, if present. - // - Otherwise, the basename of the .proto file, without extension. - GoPackage *string `protobuf:"bytes,11,opt,name=go_package,json=goPackage" json:"go_package,omitempty"` - // Should generic services be generated in each language? "Generic" services - // are not specific to any particular RPC system. They are generated by the - // main code generators in each language (without additional plugins). - // Generic services were the only kind of service generation supported by - // early versions of google.protobuf. - // - // Generic services are now considered deprecated in favor of using plugins - // that generate code specific to your particular RPC system. Therefore, - // these default to false. Old code which depends on generic services should - // explicitly set them to true. - CcGenericServices *bool `protobuf:"varint,16,opt,name=cc_generic_services,json=ccGenericServices,def=0" json:"cc_generic_services,omitempty"` - JavaGenericServices *bool `protobuf:"varint,17,opt,name=java_generic_services,json=javaGenericServices,def=0" json:"java_generic_services,omitempty"` - PyGenericServices *bool `protobuf:"varint,18,opt,name=py_generic_services,json=pyGenericServices,def=0" json:"py_generic_services,omitempty"` - PhpGenericServices *bool `protobuf:"varint,42,opt,name=php_generic_services,json=phpGenericServices,def=0" json:"php_generic_services,omitempty"` - // Is this file deprecated? - // Depending on the target platform, this can emit Deprecated annotations - // for everything in the file, or it will be completely ignored; in the very - // least, this is a formalization for deprecating files. - Deprecated *bool `protobuf:"varint,23,opt,name=deprecated,def=0" json:"deprecated,omitempty"` - // Enables the use of arenas for the proto messages in this file. This applies - // only to generated classes for C++. - CcEnableArenas *bool `protobuf:"varint,31,opt,name=cc_enable_arenas,json=ccEnableArenas,def=0" json:"cc_enable_arenas,omitempty"` - // Sets the objective c class prefix which is prepended to all objective c - // generated classes from this .proto. There is no default. - ObjcClassPrefix *string `protobuf:"bytes,36,opt,name=objc_class_prefix,json=objcClassPrefix" json:"objc_class_prefix,omitempty"` - // Namespace for generated classes; defaults to the package. - CsharpNamespace *string `protobuf:"bytes,37,opt,name=csharp_namespace,json=csharpNamespace" json:"csharp_namespace,omitempty"` - // By default Swift generators will take the proto package and CamelCase it - // replacing '.' with underscore and use that to prefix the types/symbols - // defined. When this options is provided, they will use this value instead - // to prefix the types/symbols defined. - SwiftPrefix *string `protobuf:"bytes,39,opt,name=swift_prefix,json=swiftPrefix" json:"swift_prefix,omitempty"` - // Sets the php class prefix which is prepended to all php generated classes - // from this .proto. Default is empty. - PhpClassPrefix *string `protobuf:"bytes,40,opt,name=php_class_prefix,json=phpClassPrefix" json:"php_class_prefix,omitempty"` - // Use this option to change the namespace of php generated classes. Default - // is empty. When this option is empty, the package name will be used for - // determining the namespace. - PhpNamespace *string `protobuf:"bytes,41,opt,name=php_namespace,json=phpNamespace" json:"php_namespace,omitempty"` - // Use this option to change the namespace of php generated metadata classes. - // Default is empty. When this option is empty, the proto file name will be - // used for determining the namespace. - PhpMetadataNamespace *string `protobuf:"bytes,44,opt,name=php_metadata_namespace,json=phpMetadataNamespace" json:"php_metadata_namespace,omitempty"` - // Use this option to change the package of ruby generated classes. Default - // is empty. When this option is not set, the package name will be used for - // determining the ruby package. - RubyPackage *string `protobuf:"bytes,45,opt,name=ruby_package,json=rubyPackage" json:"ruby_package,omitempty"` - // The parser stores options it doesn't recognize here. - // See the documentation for the "Options" section above. - UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - proto.XXX_InternalExtensions `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *FileOptions) Reset() { *m = FileOptions{} } -func (m *FileOptions) String() string { return proto.CompactTextString(m) } -func (*FileOptions) ProtoMessage() {} -func (*FileOptions) Descriptor() ([]byte, []int) { - return fileDescriptor_308767df5ffe18af, []int{10} -} - -var extRange_FileOptions = []proto.ExtensionRange{ - {Start: 1000, End: 536870911}, -} - -func (*FileOptions) ExtensionRangeArray() []proto.ExtensionRange { - return extRange_FileOptions -} - -func (m *FileOptions) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_FileOptions.Unmarshal(m, b) -} -func (m *FileOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_FileOptions.Marshal(b, m, deterministic) -} -func (m *FileOptions) XXX_Merge(src proto.Message) { - xxx_messageInfo_FileOptions.Merge(m, src) -} -func (m *FileOptions) XXX_Size() int { - return xxx_messageInfo_FileOptions.Size(m) -} -func (m *FileOptions) XXX_DiscardUnknown() { - xxx_messageInfo_FileOptions.DiscardUnknown(m) -} - -var xxx_messageInfo_FileOptions proto.InternalMessageInfo - -const Default_FileOptions_JavaMultipleFiles bool = false -const Default_FileOptions_JavaStringCheckUtf8 bool = false -const Default_FileOptions_OptimizeFor FileOptions_OptimizeMode = FileOptions_SPEED -const Default_FileOptions_CcGenericServices bool = false -const Default_FileOptions_JavaGenericServices bool = false -const Default_FileOptions_PyGenericServices bool = false -const Default_FileOptions_PhpGenericServices bool = false -const Default_FileOptions_Deprecated bool = false -const Default_FileOptions_CcEnableArenas bool = false - -func (m *FileOptions) GetJavaPackage() string { - if m != nil && m.JavaPackage != nil { - return *m.JavaPackage - } - return "" -} - -func (m *FileOptions) GetJavaOuterClassname() string { - if m != nil && m.JavaOuterClassname != nil { - return *m.JavaOuterClassname - } - return "" -} - -func (m *FileOptions) GetJavaMultipleFiles() bool { - if m != nil && m.JavaMultipleFiles != nil { - return *m.JavaMultipleFiles - } - return Default_FileOptions_JavaMultipleFiles -} - -// Deprecated: Do not use. -func (m *FileOptions) GetJavaGenerateEqualsAndHash() bool { - if m != nil && m.JavaGenerateEqualsAndHash != nil { - return *m.JavaGenerateEqualsAndHash - } - return false -} - -func (m *FileOptions) GetJavaStringCheckUtf8() bool { - if m != nil && m.JavaStringCheckUtf8 != nil { - return *m.JavaStringCheckUtf8 - } - return Default_FileOptions_JavaStringCheckUtf8 -} - -func (m *FileOptions) GetOptimizeFor() FileOptions_OptimizeMode { - if m != nil && m.OptimizeFor != nil { - return *m.OptimizeFor - } - return Default_FileOptions_OptimizeFor -} - -func (m *FileOptions) GetGoPackage() string { - if m != nil && m.GoPackage != nil { - return *m.GoPackage - } - return "" -} - -func (m *FileOptions) GetCcGenericServices() bool { - if m != nil && m.CcGenericServices != nil { - return *m.CcGenericServices - } - return Default_FileOptions_CcGenericServices -} - -func (m *FileOptions) GetJavaGenericServices() bool { - if m != nil && m.JavaGenericServices != nil { - return *m.JavaGenericServices - } - return Default_FileOptions_JavaGenericServices -} - -func (m *FileOptions) GetPyGenericServices() bool { - if m != nil && m.PyGenericServices != nil { - return *m.PyGenericServices - } - return Default_FileOptions_PyGenericServices -} - -func (m *FileOptions) GetPhpGenericServices() bool { - if m != nil && m.PhpGenericServices != nil { - return *m.PhpGenericServices - } - return Default_FileOptions_PhpGenericServices -} - -func (m *FileOptions) GetDeprecated() bool { - if m != nil && m.Deprecated != nil { - return *m.Deprecated - } - return Default_FileOptions_Deprecated -} - -func (m *FileOptions) GetCcEnableArenas() bool { - if m != nil && m.CcEnableArenas != nil { - return *m.CcEnableArenas - } - return Default_FileOptions_CcEnableArenas -} - -func (m *FileOptions) GetObjcClassPrefix() string { - if m != nil && m.ObjcClassPrefix != nil { - return *m.ObjcClassPrefix - } - return "" -} - -func (m *FileOptions) GetCsharpNamespace() string { - if m != nil && m.CsharpNamespace != nil { - return *m.CsharpNamespace - } - return "" -} - -func (m *FileOptions) GetSwiftPrefix() string { - if m != nil && m.SwiftPrefix != nil { - return *m.SwiftPrefix - } - return "" -} - -func (m *FileOptions) GetPhpClassPrefix() string { - if m != nil && m.PhpClassPrefix != nil { - return *m.PhpClassPrefix - } - return "" -} - -func (m *FileOptions) GetPhpNamespace() string { - if m != nil && m.PhpNamespace != nil { - return *m.PhpNamespace - } - return "" -} - -func (m *FileOptions) GetPhpMetadataNamespace() string { - if m != nil && m.PhpMetadataNamespace != nil { - return *m.PhpMetadataNamespace - } - return "" -} - -func (m *FileOptions) GetRubyPackage() string { - if m != nil && m.RubyPackage != nil { - return *m.RubyPackage - } - return "" -} - -func (m *FileOptions) GetUninterpretedOption() []*UninterpretedOption { - if m != nil { - return m.UninterpretedOption - } - return nil -} - -type MessageOptions struct { - // Set true to use the old proto1 MessageSet wire format for extensions. - // This is provided for backwards-compatibility with the MessageSet wire - // format. You should not use this for any other reason: It's less - // efficient, has fewer features, and is more complicated. - // - // The message must be defined exactly as follows: - // message Foo { - // option message_set_wire_format = true; - // extensions 4 to max; - // } - // Note that the message cannot have any defined fields; MessageSets only - // have extensions. - // - // All extensions of your type must be singular messages; e.g. they cannot - // be int32s, enums, or repeated messages. - // - // Because this is an option, the above two restrictions are not enforced by - // the protocol compiler. - MessageSetWireFormat *bool `protobuf:"varint,1,opt,name=message_set_wire_format,json=messageSetWireFormat,def=0" json:"message_set_wire_format,omitempty"` - // Disables the generation of the standard "descriptor()" accessor, which can - // conflict with a field of the same name. This is meant to make migration - // from proto1 easier; new code should avoid fields named "descriptor". - NoStandardDescriptorAccessor *bool `protobuf:"varint,2,opt,name=no_standard_descriptor_accessor,json=noStandardDescriptorAccessor,def=0" json:"no_standard_descriptor_accessor,omitempty"` - // Is this message deprecated? - // Depending on the target platform, this can emit Deprecated annotations - // for the message, or it will be completely ignored; in the very least, - // this is a formalization for deprecating messages. - Deprecated *bool `protobuf:"varint,3,opt,name=deprecated,def=0" json:"deprecated,omitempty"` - // Whether the message is an automatically generated map entry type for the - // maps field. - // - // For maps fields: - // map map_field = 1; - // The parsed descriptor looks like: - // message MapFieldEntry { - // option map_entry = true; - // optional KeyType key = 1; - // optional ValueType value = 2; - // } - // repeated MapFieldEntry map_field = 1; - // - // Implementations may choose not to generate the map_entry=true message, but - // use a native map in the target language to hold the keys and values. - // The reflection APIs in such implementations still need to work as - // if the field is a repeated message field. - // - // NOTE: Do not set the option in .proto files. Always use the maps syntax - // instead. The option should only be implicitly set by the proto compiler - // parser. - MapEntry *bool `protobuf:"varint,7,opt,name=map_entry,json=mapEntry" json:"map_entry,omitempty"` - // The parser stores options it doesn't recognize here. See above. - UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - proto.XXX_InternalExtensions `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *MessageOptions) Reset() { *m = MessageOptions{} } -func (m *MessageOptions) String() string { return proto.CompactTextString(m) } -func (*MessageOptions) ProtoMessage() {} -func (*MessageOptions) Descriptor() ([]byte, []int) { - return fileDescriptor_308767df5ffe18af, []int{11} -} - -var extRange_MessageOptions = []proto.ExtensionRange{ - {Start: 1000, End: 536870911}, -} - -func (*MessageOptions) ExtensionRangeArray() []proto.ExtensionRange { - return extRange_MessageOptions -} - -func (m *MessageOptions) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_MessageOptions.Unmarshal(m, b) -} -func (m *MessageOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_MessageOptions.Marshal(b, m, deterministic) -} -func (m *MessageOptions) XXX_Merge(src proto.Message) { - xxx_messageInfo_MessageOptions.Merge(m, src) -} -func (m *MessageOptions) XXX_Size() int { - return xxx_messageInfo_MessageOptions.Size(m) -} -func (m *MessageOptions) XXX_DiscardUnknown() { - xxx_messageInfo_MessageOptions.DiscardUnknown(m) -} - -var xxx_messageInfo_MessageOptions proto.InternalMessageInfo - -const Default_MessageOptions_MessageSetWireFormat bool = false -const Default_MessageOptions_NoStandardDescriptorAccessor bool = false -const Default_MessageOptions_Deprecated bool = false - -func (m *MessageOptions) GetMessageSetWireFormat() bool { - if m != nil && m.MessageSetWireFormat != nil { - return *m.MessageSetWireFormat - } - return Default_MessageOptions_MessageSetWireFormat -} - -func (m *MessageOptions) GetNoStandardDescriptorAccessor() bool { - if m != nil && m.NoStandardDescriptorAccessor != nil { - return *m.NoStandardDescriptorAccessor - } - return Default_MessageOptions_NoStandardDescriptorAccessor -} - -func (m *MessageOptions) GetDeprecated() bool { - if m != nil && m.Deprecated != nil { - return *m.Deprecated - } - return Default_MessageOptions_Deprecated -} - -func (m *MessageOptions) GetMapEntry() bool { - if m != nil && m.MapEntry != nil { - return *m.MapEntry - } - return false -} - -func (m *MessageOptions) GetUninterpretedOption() []*UninterpretedOption { - if m != nil { - return m.UninterpretedOption - } - return nil -} - -type FieldOptions struct { - // The ctype option instructs the C++ code generator to use a different - // representation of the field than it normally would. See the specific - // options below. This option is not yet implemented in the open source - // release -- sorry, we'll try to include it in a future version! - Ctype *FieldOptions_CType `protobuf:"varint,1,opt,name=ctype,enum=google.protobuf.FieldOptions_CType,def=0" json:"ctype,omitempty"` - // The packed option can be enabled for repeated primitive fields to enable - // a more efficient representation on the wire. Rather than repeatedly - // writing the tag and type for each element, the entire array is encoded as - // a single length-delimited blob. In proto3, only explicit setting it to - // false will avoid using packed encoding. - Packed *bool `protobuf:"varint,2,opt,name=packed" json:"packed,omitempty"` - // The jstype option determines the JavaScript type used for values of the - // field. The option is permitted only for 64 bit integral and fixed types - // (int64, uint64, sint64, fixed64, sfixed64). A field with jstype JS_STRING - // is represented as JavaScript string, which avoids loss of precision that - // can happen when a large value is converted to a floating point JavaScript. - // Specifying JS_NUMBER for the jstype causes the generated JavaScript code to - // use the JavaScript "number" type. The behavior of the default option - // JS_NORMAL is implementation dependent. - // - // This option is an enum to permit additional types to be added, e.g. - // goog.math.Integer. - Jstype *FieldOptions_JSType `protobuf:"varint,6,opt,name=jstype,enum=google.protobuf.FieldOptions_JSType,def=0" json:"jstype,omitempty"` - // Should this field be parsed lazily? Lazy applies only to message-type - // fields. It means that when the outer message is initially parsed, the - // inner message's contents will not be parsed but instead stored in encoded - // form. The inner message will actually be parsed when it is first accessed. - // - // This is only a hint. Implementations are free to choose whether to use - // eager or lazy parsing regardless of the value of this option. However, - // setting this option true suggests that the protocol author believes that - // using lazy parsing on this field is worth the additional bookkeeping - // overhead typically needed to implement it. - // - // This option does not affect the public interface of any generated code; - // all method signatures remain the same. Furthermore, thread-safety of the - // interface is not affected by this option; const methods remain safe to - // call from multiple threads concurrently, while non-const methods continue - // to require exclusive access. - // - // - // Note that implementations may choose not to check required fields within - // a lazy sub-message. That is, calling IsInitialized() on the outer message - // may return true even if the inner message has missing required fields. - // This is necessary because otherwise the inner message would have to be - // parsed in order to perform the check, defeating the purpose of lazy - // parsing. An implementation which chooses not to check required fields - // must be consistent about it. That is, for any particular sub-message, the - // implementation must either *always* check its required fields, or *never* - // check its required fields, regardless of whether or not the message has - // been parsed. - Lazy *bool `protobuf:"varint,5,opt,name=lazy,def=0" json:"lazy,omitempty"` - // Is this field deprecated? - // Depending on the target platform, this can emit Deprecated annotations - // for accessors, or it will be completely ignored; in the very least, this - // is a formalization for deprecating fields. - Deprecated *bool `protobuf:"varint,3,opt,name=deprecated,def=0" json:"deprecated,omitempty"` - // For Google-internal migration only. Do not use. - Weak *bool `protobuf:"varint,10,opt,name=weak,def=0" json:"weak,omitempty"` - // The parser stores options it doesn't recognize here. See above. - UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - proto.XXX_InternalExtensions `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *FieldOptions) Reset() { *m = FieldOptions{} } -func (m *FieldOptions) String() string { return proto.CompactTextString(m) } -func (*FieldOptions) ProtoMessage() {} -func (*FieldOptions) Descriptor() ([]byte, []int) { - return fileDescriptor_308767df5ffe18af, []int{12} -} - -var extRange_FieldOptions = []proto.ExtensionRange{ - {Start: 1000, End: 536870911}, -} - -func (*FieldOptions) ExtensionRangeArray() []proto.ExtensionRange { - return extRange_FieldOptions -} - -func (m *FieldOptions) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_FieldOptions.Unmarshal(m, b) -} -func (m *FieldOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_FieldOptions.Marshal(b, m, deterministic) -} -func (m *FieldOptions) XXX_Merge(src proto.Message) { - xxx_messageInfo_FieldOptions.Merge(m, src) -} -func (m *FieldOptions) XXX_Size() int { - return xxx_messageInfo_FieldOptions.Size(m) -} -func (m *FieldOptions) XXX_DiscardUnknown() { - xxx_messageInfo_FieldOptions.DiscardUnknown(m) -} - -var xxx_messageInfo_FieldOptions proto.InternalMessageInfo - -const Default_FieldOptions_Ctype FieldOptions_CType = FieldOptions_STRING -const Default_FieldOptions_Jstype FieldOptions_JSType = FieldOptions_JS_NORMAL -const Default_FieldOptions_Lazy bool = false -const Default_FieldOptions_Deprecated bool = false -const Default_FieldOptions_Weak bool = false - -func (m *FieldOptions) GetCtype() FieldOptions_CType { - if m != nil && m.Ctype != nil { - return *m.Ctype - } - return Default_FieldOptions_Ctype -} - -func (m *FieldOptions) GetPacked() bool { - if m != nil && m.Packed != nil { - return *m.Packed - } - return false -} - -func (m *FieldOptions) GetJstype() FieldOptions_JSType { - if m != nil && m.Jstype != nil { - return *m.Jstype - } - return Default_FieldOptions_Jstype -} - -func (m *FieldOptions) GetLazy() bool { - if m != nil && m.Lazy != nil { - return *m.Lazy - } - return Default_FieldOptions_Lazy -} - -func (m *FieldOptions) GetDeprecated() bool { - if m != nil && m.Deprecated != nil { - return *m.Deprecated - } - return Default_FieldOptions_Deprecated -} - -func (m *FieldOptions) GetWeak() bool { - if m != nil && m.Weak != nil { - return *m.Weak - } - return Default_FieldOptions_Weak -} - -func (m *FieldOptions) GetUninterpretedOption() []*UninterpretedOption { - if m != nil { - return m.UninterpretedOption - } - return nil -} - -type OneofOptions struct { - // The parser stores options it doesn't recognize here. See above. - UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - proto.XXX_InternalExtensions `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *OneofOptions) Reset() { *m = OneofOptions{} } -func (m *OneofOptions) String() string { return proto.CompactTextString(m) } -func (*OneofOptions) ProtoMessage() {} -func (*OneofOptions) Descriptor() ([]byte, []int) { - return fileDescriptor_308767df5ffe18af, []int{13} -} - -var extRange_OneofOptions = []proto.ExtensionRange{ - {Start: 1000, End: 536870911}, -} - -func (*OneofOptions) ExtensionRangeArray() []proto.ExtensionRange { - return extRange_OneofOptions -} - -func (m *OneofOptions) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_OneofOptions.Unmarshal(m, b) -} -func (m *OneofOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_OneofOptions.Marshal(b, m, deterministic) -} -func (m *OneofOptions) XXX_Merge(src proto.Message) { - xxx_messageInfo_OneofOptions.Merge(m, src) -} -func (m *OneofOptions) XXX_Size() int { - return xxx_messageInfo_OneofOptions.Size(m) -} -func (m *OneofOptions) XXX_DiscardUnknown() { - xxx_messageInfo_OneofOptions.DiscardUnknown(m) -} - -var xxx_messageInfo_OneofOptions proto.InternalMessageInfo - -func (m *OneofOptions) GetUninterpretedOption() []*UninterpretedOption { - if m != nil { - return m.UninterpretedOption - } - return nil -} - -type EnumOptions struct { - // Set this option to true to allow mapping different tag names to the same - // value. - AllowAlias *bool `protobuf:"varint,2,opt,name=allow_alias,json=allowAlias" json:"allow_alias,omitempty"` - // Is this enum deprecated? - // Depending on the target platform, this can emit Deprecated annotations - // for the enum, or it will be completely ignored; in the very least, this - // is a formalization for deprecating enums. - Deprecated *bool `protobuf:"varint,3,opt,name=deprecated,def=0" json:"deprecated,omitempty"` - // The parser stores options it doesn't recognize here. See above. - UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - proto.XXX_InternalExtensions `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *EnumOptions) Reset() { *m = EnumOptions{} } -func (m *EnumOptions) String() string { return proto.CompactTextString(m) } -func (*EnumOptions) ProtoMessage() {} -func (*EnumOptions) Descriptor() ([]byte, []int) { - return fileDescriptor_308767df5ffe18af, []int{14} -} - -var extRange_EnumOptions = []proto.ExtensionRange{ - {Start: 1000, End: 536870911}, -} - -func (*EnumOptions) ExtensionRangeArray() []proto.ExtensionRange { - return extRange_EnumOptions -} - -func (m *EnumOptions) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_EnumOptions.Unmarshal(m, b) -} -func (m *EnumOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_EnumOptions.Marshal(b, m, deterministic) -} -func (m *EnumOptions) XXX_Merge(src proto.Message) { - xxx_messageInfo_EnumOptions.Merge(m, src) -} -func (m *EnumOptions) XXX_Size() int { - return xxx_messageInfo_EnumOptions.Size(m) -} -func (m *EnumOptions) XXX_DiscardUnknown() { - xxx_messageInfo_EnumOptions.DiscardUnknown(m) -} - -var xxx_messageInfo_EnumOptions proto.InternalMessageInfo - -const Default_EnumOptions_Deprecated bool = false - -func (m *EnumOptions) GetAllowAlias() bool { - if m != nil && m.AllowAlias != nil { - return *m.AllowAlias - } - return false -} - -func (m *EnumOptions) GetDeprecated() bool { - if m != nil && m.Deprecated != nil { - return *m.Deprecated - } - return Default_EnumOptions_Deprecated -} - -func (m *EnumOptions) GetUninterpretedOption() []*UninterpretedOption { - if m != nil { - return m.UninterpretedOption - } - return nil -} - -type EnumValueOptions struct { - // Is this enum value deprecated? - // Depending on the target platform, this can emit Deprecated annotations - // for the enum value, or it will be completely ignored; in the very least, - // this is a formalization for deprecating enum values. - Deprecated *bool `protobuf:"varint,1,opt,name=deprecated,def=0" json:"deprecated,omitempty"` - // The parser stores options it doesn't recognize here. See above. - UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - proto.XXX_InternalExtensions `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *EnumValueOptions) Reset() { *m = EnumValueOptions{} } -func (m *EnumValueOptions) String() string { return proto.CompactTextString(m) } -func (*EnumValueOptions) ProtoMessage() {} -func (*EnumValueOptions) Descriptor() ([]byte, []int) { - return fileDescriptor_308767df5ffe18af, []int{15} -} - -var extRange_EnumValueOptions = []proto.ExtensionRange{ - {Start: 1000, End: 536870911}, -} - -func (*EnumValueOptions) ExtensionRangeArray() []proto.ExtensionRange { - return extRange_EnumValueOptions -} - -func (m *EnumValueOptions) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_EnumValueOptions.Unmarshal(m, b) -} -func (m *EnumValueOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_EnumValueOptions.Marshal(b, m, deterministic) -} -func (m *EnumValueOptions) XXX_Merge(src proto.Message) { - xxx_messageInfo_EnumValueOptions.Merge(m, src) -} -func (m *EnumValueOptions) XXX_Size() int { - return xxx_messageInfo_EnumValueOptions.Size(m) -} -func (m *EnumValueOptions) XXX_DiscardUnknown() { - xxx_messageInfo_EnumValueOptions.DiscardUnknown(m) -} - -var xxx_messageInfo_EnumValueOptions proto.InternalMessageInfo - -const Default_EnumValueOptions_Deprecated bool = false - -func (m *EnumValueOptions) GetDeprecated() bool { - if m != nil && m.Deprecated != nil { - return *m.Deprecated - } - return Default_EnumValueOptions_Deprecated -} - -func (m *EnumValueOptions) GetUninterpretedOption() []*UninterpretedOption { - if m != nil { - return m.UninterpretedOption - } - return nil -} - -type ServiceOptions struct { - // Is this service deprecated? - // Depending on the target platform, this can emit Deprecated annotations - // for the service, or it will be completely ignored; in the very least, - // this is a formalization for deprecating services. - Deprecated *bool `protobuf:"varint,33,opt,name=deprecated,def=0" json:"deprecated,omitempty"` - // The parser stores options it doesn't recognize here. See above. - UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - proto.XXX_InternalExtensions `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *ServiceOptions) Reset() { *m = ServiceOptions{} } -func (m *ServiceOptions) String() string { return proto.CompactTextString(m) } -func (*ServiceOptions) ProtoMessage() {} -func (*ServiceOptions) Descriptor() ([]byte, []int) { - return fileDescriptor_308767df5ffe18af, []int{16} -} - -var extRange_ServiceOptions = []proto.ExtensionRange{ - {Start: 1000, End: 536870911}, -} - -func (*ServiceOptions) ExtensionRangeArray() []proto.ExtensionRange { - return extRange_ServiceOptions -} - -func (m *ServiceOptions) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ServiceOptions.Unmarshal(m, b) -} -func (m *ServiceOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ServiceOptions.Marshal(b, m, deterministic) -} -func (m *ServiceOptions) XXX_Merge(src proto.Message) { - xxx_messageInfo_ServiceOptions.Merge(m, src) -} -func (m *ServiceOptions) XXX_Size() int { - return xxx_messageInfo_ServiceOptions.Size(m) -} -func (m *ServiceOptions) XXX_DiscardUnknown() { - xxx_messageInfo_ServiceOptions.DiscardUnknown(m) -} - -var xxx_messageInfo_ServiceOptions proto.InternalMessageInfo - -const Default_ServiceOptions_Deprecated bool = false - -func (m *ServiceOptions) GetDeprecated() bool { - if m != nil && m.Deprecated != nil { - return *m.Deprecated - } - return Default_ServiceOptions_Deprecated -} - -func (m *ServiceOptions) GetUninterpretedOption() []*UninterpretedOption { - if m != nil { - return m.UninterpretedOption - } - return nil -} - -type MethodOptions struct { - // Is this method deprecated? - // Depending on the target platform, this can emit Deprecated annotations - // for the method, or it will be completely ignored; in the very least, - // this is a formalization for deprecating methods. - Deprecated *bool `protobuf:"varint,33,opt,name=deprecated,def=0" json:"deprecated,omitempty"` - IdempotencyLevel *MethodOptions_IdempotencyLevel `protobuf:"varint,34,opt,name=idempotency_level,json=idempotencyLevel,enum=google.protobuf.MethodOptions_IdempotencyLevel,def=0" json:"idempotency_level,omitempty"` - // The parser stores options it doesn't recognize here. See above. - UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - proto.XXX_InternalExtensions `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *MethodOptions) Reset() { *m = MethodOptions{} } -func (m *MethodOptions) String() string { return proto.CompactTextString(m) } -func (*MethodOptions) ProtoMessage() {} -func (*MethodOptions) Descriptor() ([]byte, []int) { - return fileDescriptor_308767df5ffe18af, []int{17} -} - -var extRange_MethodOptions = []proto.ExtensionRange{ - {Start: 1000, End: 536870911}, -} - -func (*MethodOptions) ExtensionRangeArray() []proto.ExtensionRange { - return extRange_MethodOptions -} - -func (m *MethodOptions) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_MethodOptions.Unmarshal(m, b) -} -func (m *MethodOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_MethodOptions.Marshal(b, m, deterministic) -} -func (m *MethodOptions) XXX_Merge(src proto.Message) { - xxx_messageInfo_MethodOptions.Merge(m, src) -} -func (m *MethodOptions) XXX_Size() int { - return xxx_messageInfo_MethodOptions.Size(m) -} -func (m *MethodOptions) XXX_DiscardUnknown() { - xxx_messageInfo_MethodOptions.DiscardUnknown(m) -} - -var xxx_messageInfo_MethodOptions proto.InternalMessageInfo - -const Default_MethodOptions_Deprecated bool = false -const Default_MethodOptions_IdempotencyLevel MethodOptions_IdempotencyLevel = MethodOptions_IDEMPOTENCY_UNKNOWN - -func (m *MethodOptions) GetDeprecated() bool { - if m != nil && m.Deprecated != nil { - return *m.Deprecated - } - return Default_MethodOptions_Deprecated -} - -func (m *MethodOptions) GetIdempotencyLevel() MethodOptions_IdempotencyLevel { - if m != nil && m.IdempotencyLevel != nil { - return *m.IdempotencyLevel - } - return Default_MethodOptions_IdempotencyLevel -} - -func (m *MethodOptions) GetUninterpretedOption() []*UninterpretedOption { - if m != nil { - return m.UninterpretedOption - } - return nil -} - -// A message representing a option the parser does not recognize. This only -// appears in options protos created by the compiler::Parser class. -// DescriptorPool resolves these when building Descriptor objects. Therefore, -// options protos in descriptor objects (e.g. returned by Descriptor::options(), -// or produced by Descriptor::CopyTo()) will never have UninterpretedOptions -// in them. -type UninterpretedOption struct { - Name []*UninterpretedOption_NamePart `protobuf:"bytes,2,rep,name=name" json:"name,omitempty"` - // The value of the uninterpreted option, in whatever type the tokenizer - // identified it as during parsing. Exactly one of these should be set. - IdentifierValue *string `protobuf:"bytes,3,opt,name=identifier_value,json=identifierValue" json:"identifier_value,omitempty"` - PositiveIntValue *uint64 `protobuf:"varint,4,opt,name=positive_int_value,json=positiveIntValue" json:"positive_int_value,omitempty"` - NegativeIntValue *int64 `protobuf:"varint,5,opt,name=negative_int_value,json=negativeIntValue" json:"negative_int_value,omitempty"` - DoubleValue *float64 `protobuf:"fixed64,6,opt,name=double_value,json=doubleValue" json:"double_value,omitempty"` - StringValue []byte `protobuf:"bytes,7,opt,name=string_value,json=stringValue" json:"string_value,omitempty"` - AggregateValue *string `protobuf:"bytes,8,opt,name=aggregate_value,json=aggregateValue" json:"aggregate_value,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *UninterpretedOption) Reset() { *m = UninterpretedOption{} } -func (m *UninterpretedOption) String() string { return proto.CompactTextString(m) } -func (*UninterpretedOption) ProtoMessage() {} -func (*UninterpretedOption) Descriptor() ([]byte, []int) { - return fileDescriptor_308767df5ffe18af, []int{18} -} -func (m *UninterpretedOption) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_UninterpretedOption.Unmarshal(m, b) -} -func (m *UninterpretedOption) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_UninterpretedOption.Marshal(b, m, deterministic) -} -func (m *UninterpretedOption) XXX_Merge(src proto.Message) { - xxx_messageInfo_UninterpretedOption.Merge(m, src) -} -func (m *UninterpretedOption) XXX_Size() int { - return xxx_messageInfo_UninterpretedOption.Size(m) -} -func (m *UninterpretedOption) XXX_DiscardUnknown() { - xxx_messageInfo_UninterpretedOption.DiscardUnknown(m) -} - -var xxx_messageInfo_UninterpretedOption proto.InternalMessageInfo - -func (m *UninterpretedOption) GetName() []*UninterpretedOption_NamePart { - if m != nil { - return m.Name - } - return nil -} - -func (m *UninterpretedOption) GetIdentifierValue() string { - if m != nil && m.IdentifierValue != nil { - return *m.IdentifierValue - } - return "" -} - -func (m *UninterpretedOption) GetPositiveIntValue() uint64 { - if m != nil && m.PositiveIntValue != nil { - return *m.PositiveIntValue - } - return 0 -} - -func (m *UninterpretedOption) GetNegativeIntValue() int64 { - if m != nil && m.NegativeIntValue != nil { - return *m.NegativeIntValue - } - return 0 -} - -func (m *UninterpretedOption) GetDoubleValue() float64 { - if m != nil && m.DoubleValue != nil { - return *m.DoubleValue - } - return 0 -} - -func (m *UninterpretedOption) GetStringValue() []byte { - if m != nil { - return m.StringValue - } - return nil -} - -func (m *UninterpretedOption) GetAggregateValue() string { - if m != nil && m.AggregateValue != nil { - return *m.AggregateValue - } - return "" -} - -// The name of the uninterpreted option. Each string represents a segment in -// a dot-separated name. is_extension is true iff a segment represents an -// extension (denoted with parentheses in options specs in .proto files). -// E.g.,{ ["foo", false], ["bar.baz", true], ["qux", false] } represents -// "foo.(bar.baz).qux". -type UninterpretedOption_NamePart struct { - NamePart *string `protobuf:"bytes,1,req,name=name_part,json=namePart" json:"name_part,omitempty"` - IsExtension *bool `protobuf:"varint,2,req,name=is_extension,json=isExtension" json:"is_extension,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *UninterpretedOption_NamePart) Reset() { *m = UninterpretedOption_NamePart{} } -func (m *UninterpretedOption_NamePart) String() string { return proto.CompactTextString(m) } -func (*UninterpretedOption_NamePart) ProtoMessage() {} -func (*UninterpretedOption_NamePart) Descriptor() ([]byte, []int) { - return fileDescriptor_308767df5ffe18af, []int{18, 0} -} -func (m *UninterpretedOption_NamePart) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_UninterpretedOption_NamePart.Unmarshal(m, b) -} -func (m *UninterpretedOption_NamePart) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_UninterpretedOption_NamePart.Marshal(b, m, deterministic) -} -func (m *UninterpretedOption_NamePart) XXX_Merge(src proto.Message) { - xxx_messageInfo_UninterpretedOption_NamePart.Merge(m, src) -} -func (m *UninterpretedOption_NamePart) XXX_Size() int { - return xxx_messageInfo_UninterpretedOption_NamePart.Size(m) -} -func (m *UninterpretedOption_NamePart) XXX_DiscardUnknown() { - xxx_messageInfo_UninterpretedOption_NamePart.DiscardUnknown(m) -} - -var xxx_messageInfo_UninterpretedOption_NamePart proto.InternalMessageInfo - -func (m *UninterpretedOption_NamePart) GetNamePart() string { - if m != nil && m.NamePart != nil { - return *m.NamePart - } - return "" -} - -func (m *UninterpretedOption_NamePart) GetIsExtension() bool { - if m != nil && m.IsExtension != nil { - return *m.IsExtension - } - return false -} - -// Encapsulates information about the original source file from which a -// FileDescriptorProto was generated. -type SourceCodeInfo struct { - // A Location identifies a piece of source code in a .proto file which - // corresponds to a particular definition. This information is intended - // to be useful to IDEs, code indexers, documentation generators, and similar - // tools. - // - // For example, say we have a file like: - // message Foo { - // optional string foo = 1; - // } - // Let's look at just the field definition: - // optional string foo = 1; - // ^ ^^ ^^ ^ ^^^ - // a bc de f ghi - // We have the following locations: - // span path represents - // [a,i) [ 4, 0, 2, 0 ] The whole field definition. - // [a,b) [ 4, 0, 2, 0, 4 ] The label (optional). - // [c,d) [ 4, 0, 2, 0, 5 ] The type (string). - // [e,f) [ 4, 0, 2, 0, 1 ] The name (foo). - // [g,h) [ 4, 0, 2, 0, 3 ] The number (1). - // - // Notes: - // - A location may refer to a repeated field itself (i.e. not to any - // particular index within it). This is used whenever a set of elements are - // logically enclosed in a single code segment. For example, an entire - // extend block (possibly containing multiple extension definitions) will - // have an outer location whose path refers to the "extensions" repeated - // field without an index. - // - Multiple locations may have the same path. This happens when a single - // logical declaration is spread out across multiple places. The most - // obvious example is the "extend" block again -- there may be multiple - // extend blocks in the same scope, each of which will have the same path. - // - A location's span is not always a subset of its parent's span. For - // example, the "extendee" of an extension declaration appears at the - // beginning of the "extend" block and is shared by all extensions within - // the block. - // - Just because a location's span is a subset of some other location's span - // does not mean that it is a descendant. For example, a "group" defines - // both a type and a field in a single declaration. Thus, the locations - // corresponding to the type and field and their components will overlap. - // - Code which tries to interpret locations should probably be designed to - // ignore those that it doesn't understand, as more types of locations could - // be recorded in the future. - Location []*SourceCodeInfo_Location `protobuf:"bytes,1,rep,name=location" json:"location,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *SourceCodeInfo) Reset() { *m = SourceCodeInfo{} } -func (m *SourceCodeInfo) String() string { return proto.CompactTextString(m) } -func (*SourceCodeInfo) ProtoMessage() {} -func (*SourceCodeInfo) Descriptor() ([]byte, []int) { - return fileDescriptor_308767df5ffe18af, []int{19} -} -func (m *SourceCodeInfo) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_SourceCodeInfo.Unmarshal(m, b) -} -func (m *SourceCodeInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_SourceCodeInfo.Marshal(b, m, deterministic) -} -func (m *SourceCodeInfo) XXX_Merge(src proto.Message) { - xxx_messageInfo_SourceCodeInfo.Merge(m, src) -} -func (m *SourceCodeInfo) XXX_Size() int { - return xxx_messageInfo_SourceCodeInfo.Size(m) -} -func (m *SourceCodeInfo) XXX_DiscardUnknown() { - xxx_messageInfo_SourceCodeInfo.DiscardUnknown(m) -} - -var xxx_messageInfo_SourceCodeInfo proto.InternalMessageInfo - -func (m *SourceCodeInfo) GetLocation() []*SourceCodeInfo_Location { - if m != nil { - return m.Location - } - return nil -} - -type SourceCodeInfo_Location struct { - // Identifies which part of the FileDescriptorProto was defined at this - // location. - // - // Each element is a field number or an index. They form a path from - // the root FileDescriptorProto to the place where the definition. For - // example, this path: - // [ 4, 3, 2, 7, 1 ] - // refers to: - // file.message_type(3) // 4, 3 - // .field(7) // 2, 7 - // .name() // 1 - // This is because FileDescriptorProto.message_type has field number 4: - // repeated DescriptorProto message_type = 4; - // and DescriptorProto.field has field number 2: - // repeated FieldDescriptorProto field = 2; - // and FieldDescriptorProto.name has field number 1: - // optional string name = 1; - // - // Thus, the above path gives the location of a field name. If we removed - // the last element: - // [ 4, 3, 2, 7 ] - // this path refers to the whole field declaration (from the beginning - // of the label to the terminating semicolon). - Path []int32 `protobuf:"varint,1,rep,packed,name=path" json:"path,omitempty"` - // Always has exactly three or four elements: start line, start column, - // end line (optional, otherwise assumed same as start line), end column. - // These are packed into a single field for efficiency. Note that line - // and column numbers are zero-based -- typically you will want to add - // 1 to each before displaying to a user. - Span []int32 `protobuf:"varint,2,rep,packed,name=span" json:"span,omitempty"` - // If this SourceCodeInfo represents a complete declaration, these are any - // comments appearing before and after the declaration which appear to be - // attached to the declaration. - // - // A series of line comments appearing on consecutive lines, with no other - // tokens appearing on those lines, will be treated as a single comment. - // - // leading_detached_comments will keep paragraphs of comments that appear - // before (but not connected to) the current element. Each paragraph, - // separated by empty lines, will be one comment element in the repeated - // field. - // - // Only the comment content is provided; comment markers (e.g. //) are - // stripped out. For block comments, leading whitespace and an asterisk - // will be stripped from the beginning of each line other than the first. - // Newlines are included in the output. - // - // Examples: - // - // optional int32 foo = 1; // Comment attached to foo. - // // Comment attached to bar. - // optional int32 bar = 2; - // - // optional string baz = 3; - // // Comment attached to baz. - // // Another line attached to baz. - // - // // Comment attached to qux. - // // - // // Another line attached to qux. - // optional double qux = 4; - // - // // Detached comment for corge. This is not leading or trailing comments - // // to qux or corge because there are blank lines separating it from - // // both. - // - // // Detached comment for corge paragraph 2. - // - // optional string corge = 5; - // /* Block comment attached - // * to corge. Leading asterisks - // * will be removed. */ - // /* Block comment attached to - // * grault. */ - // optional int32 grault = 6; - // - // // ignored detached comments. - LeadingComments *string `protobuf:"bytes,3,opt,name=leading_comments,json=leadingComments" json:"leading_comments,omitempty"` - TrailingComments *string `protobuf:"bytes,4,opt,name=trailing_comments,json=trailingComments" json:"trailing_comments,omitempty"` - LeadingDetachedComments []string `protobuf:"bytes,6,rep,name=leading_detached_comments,json=leadingDetachedComments" json:"leading_detached_comments,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *SourceCodeInfo_Location) Reset() { *m = SourceCodeInfo_Location{} } -func (m *SourceCodeInfo_Location) String() string { return proto.CompactTextString(m) } -func (*SourceCodeInfo_Location) ProtoMessage() {} -func (*SourceCodeInfo_Location) Descriptor() ([]byte, []int) { - return fileDescriptor_308767df5ffe18af, []int{19, 0} -} -func (m *SourceCodeInfo_Location) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_SourceCodeInfo_Location.Unmarshal(m, b) -} -func (m *SourceCodeInfo_Location) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_SourceCodeInfo_Location.Marshal(b, m, deterministic) -} -func (m *SourceCodeInfo_Location) XXX_Merge(src proto.Message) { - xxx_messageInfo_SourceCodeInfo_Location.Merge(m, src) -} -func (m *SourceCodeInfo_Location) XXX_Size() int { - return xxx_messageInfo_SourceCodeInfo_Location.Size(m) -} -func (m *SourceCodeInfo_Location) XXX_DiscardUnknown() { - xxx_messageInfo_SourceCodeInfo_Location.DiscardUnknown(m) -} - -var xxx_messageInfo_SourceCodeInfo_Location proto.InternalMessageInfo - -func (m *SourceCodeInfo_Location) GetPath() []int32 { - if m != nil { - return m.Path - } - return nil -} - -func (m *SourceCodeInfo_Location) GetSpan() []int32 { - if m != nil { - return m.Span - } - return nil -} - -func (m *SourceCodeInfo_Location) GetLeadingComments() string { - if m != nil && m.LeadingComments != nil { - return *m.LeadingComments - } - return "" -} - -func (m *SourceCodeInfo_Location) GetTrailingComments() string { - if m != nil && m.TrailingComments != nil { - return *m.TrailingComments - } - return "" -} - -func (m *SourceCodeInfo_Location) GetLeadingDetachedComments() []string { - if m != nil { - return m.LeadingDetachedComments - } - return nil -} - -// Describes the relationship between generated code and its original source -// file. A GeneratedCodeInfo message is associated with only one generated -// source file, but may contain references to different source .proto files. -type GeneratedCodeInfo struct { - // An Annotation connects some span of text in generated code to an element - // of its generating .proto file. - Annotation []*GeneratedCodeInfo_Annotation `protobuf:"bytes,1,rep,name=annotation" json:"annotation,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *GeneratedCodeInfo) Reset() { *m = GeneratedCodeInfo{} } -func (m *GeneratedCodeInfo) String() string { return proto.CompactTextString(m) } -func (*GeneratedCodeInfo) ProtoMessage() {} -func (*GeneratedCodeInfo) Descriptor() ([]byte, []int) { - return fileDescriptor_308767df5ffe18af, []int{20} -} -func (m *GeneratedCodeInfo) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_GeneratedCodeInfo.Unmarshal(m, b) -} -func (m *GeneratedCodeInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_GeneratedCodeInfo.Marshal(b, m, deterministic) -} -func (m *GeneratedCodeInfo) XXX_Merge(src proto.Message) { - xxx_messageInfo_GeneratedCodeInfo.Merge(m, src) -} -func (m *GeneratedCodeInfo) XXX_Size() int { - return xxx_messageInfo_GeneratedCodeInfo.Size(m) -} -func (m *GeneratedCodeInfo) XXX_DiscardUnknown() { - xxx_messageInfo_GeneratedCodeInfo.DiscardUnknown(m) -} - -var xxx_messageInfo_GeneratedCodeInfo proto.InternalMessageInfo - -func (m *GeneratedCodeInfo) GetAnnotation() []*GeneratedCodeInfo_Annotation { - if m != nil { - return m.Annotation - } - return nil -} - -type GeneratedCodeInfo_Annotation struct { - // Identifies the element in the original source .proto file. This field - // is formatted the same as SourceCodeInfo.Location.path. - Path []int32 `protobuf:"varint,1,rep,packed,name=path" json:"path,omitempty"` - // Identifies the filesystem path to the original source .proto. - SourceFile *string `protobuf:"bytes,2,opt,name=source_file,json=sourceFile" json:"source_file,omitempty"` - // Identifies the starting offset in bytes in the generated code - // that relates to the identified object. - Begin *int32 `protobuf:"varint,3,opt,name=begin" json:"begin,omitempty"` - // Identifies the ending offset in bytes in the generated code that - // relates to the identified offset. The end offset should be one past - // the last relevant byte (so the length of the text = end - begin). - End *int32 `protobuf:"varint,4,opt,name=end" json:"end,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *GeneratedCodeInfo_Annotation) Reset() { *m = GeneratedCodeInfo_Annotation{} } -func (m *GeneratedCodeInfo_Annotation) String() string { return proto.CompactTextString(m) } -func (*GeneratedCodeInfo_Annotation) ProtoMessage() {} -func (*GeneratedCodeInfo_Annotation) Descriptor() ([]byte, []int) { - return fileDescriptor_308767df5ffe18af, []int{20, 0} -} -func (m *GeneratedCodeInfo_Annotation) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_GeneratedCodeInfo_Annotation.Unmarshal(m, b) -} -func (m *GeneratedCodeInfo_Annotation) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_GeneratedCodeInfo_Annotation.Marshal(b, m, deterministic) -} -func (m *GeneratedCodeInfo_Annotation) XXX_Merge(src proto.Message) { - xxx_messageInfo_GeneratedCodeInfo_Annotation.Merge(m, src) -} -func (m *GeneratedCodeInfo_Annotation) XXX_Size() int { - return xxx_messageInfo_GeneratedCodeInfo_Annotation.Size(m) -} -func (m *GeneratedCodeInfo_Annotation) XXX_DiscardUnknown() { - xxx_messageInfo_GeneratedCodeInfo_Annotation.DiscardUnknown(m) -} - -var xxx_messageInfo_GeneratedCodeInfo_Annotation proto.InternalMessageInfo - -func (m *GeneratedCodeInfo_Annotation) GetPath() []int32 { - if m != nil { - return m.Path - } - return nil -} - -func (m *GeneratedCodeInfo_Annotation) GetSourceFile() string { - if m != nil && m.SourceFile != nil { - return *m.SourceFile - } - return "" -} - -func (m *GeneratedCodeInfo_Annotation) GetBegin() int32 { - if m != nil && m.Begin != nil { - return *m.Begin - } - return 0 -} - -func (m *GeneratedCodeInfo_Annotation) GetEnd() int32 { - if m != nil && m.End != nil { - return *m.End - } - return 0 -} - -func init() { - proto.RegisterEnum("google.protobuf.FieldDescriptorProto_Type", FieldDescriptorProto_Type_name, FieldDescriptorProto_Type_value) - proto.RegisterEnum("google.protobuf.FieldDescriptorProto_Label", FieldDescriptorProto_Label_name, FieldDescriptorProto_Label_value) - proto.RegisterEnum("google.protobuf.FileOptions_OptimizeMode", FileOptions_OptimizeMode_name, FileOptions_OptimizeMode_value) - proto.RegisterEnum("google.protobuf.FieldOptions_CType", FieldOptions_CType_name, FieldOptions_CType_value) - proto.RegisterEnum("google.protobuf.FieldOptions_JSType", FieldOptions_JSType_name, FieldOptions_JSType_value) - proto.RegisterEnum("google.protobuf.MethodOptions_IdempotencyLevel", MethodOptions_IdempotencyLevel_name, MethodOptions_IdempotencyLevel_value) - proto.RegisterType((*FileDescriptorSet)(nil), "google.protobuf.FileDescriptorSet") - proto.RegisterType((*FileDescriptorProto)(nil), "google.protobuf.FileDescriptorProto") - proto.RegisterType((*DescriptorProto)(nil), "google.protobuf.DescriptorProto") - proto.RegisterType((*DescriptorProto_ExtensionRange)(nil), "google.protobuf.DescriptorProto.ExtensionRange") - proto.RegisterType((*DescriptorProto_ReservedRange)(nil), "google.protobuf.DescriptorProto.ReservedRange") - proto.RegisterType((*ExtensionRangeOptions)(nil), "google.protobuf.ExtensionRangeOptions") - proto.RegisterType((*FieldDescriptorProto)(nil), "google.protobuf.FieldDescriptorProto") - proto.RegisterType((*OneofDescriptorProto)(nil), "google.protobuf.OneofDescriptorProto") - proto.RegisterType((*EnumDescriptorProto)(nil), "google.protobuf.EnumDescriptorProto") - proto.RegisterType((*EnumDescriptorProto_EnumReservedRange)(nil), "google.protobuf.EnumDescriptorProto.EnumReservedRange") - proto.RegisterType((*EnumValueDescriptorProto)(nil), "google.protobuf.EnumValueDescriptorProto") - proto.RegisterType((*ServiceDescriptorProto)(nil), "google.protobuf.ServiceDescriptorProto") - proto.RegisterType((*MethodDescriptorProto)(nil), "google.protobuf.MethodDescriptorProto") - proto.RegisterType((*FileOptions)(nil), "google.protobuf.FileOptions") - proto.RegisterType((*MessageOptions)(nil), "google.protobuf.MessageOptions") - proto.RegisterType((*FieldOptions)(nil), "google.protobuf.FieldOptions") - proto.RegisterType((*OneofOptions)(nil), "google.protobuf.OneofOptions") - proto.RegisterType((*EnumOptions)(nil), "google.protobuf.EnumOptions") - proto.RegisterType((*EnumValueOptions)(nil), "google.protobuf.EnumValueOptions") - proto.RegisterType((*ServiceOptions)(nil), "google.protobuf.ServiceOptions") - proto.RegisterType((*MethodOptions)(nil), "google.protobuf.MethodOptions") - proto.RegisterType((*UninterpretedOption)(nil), "google.protobuf.UninterpretedOption") - proto.RegisterType((*UninterpretedOption_NamePart)(nil), "google.protobuf.UninterpretedOption.NamePart") - proto.RegisterType((*SourceCodeInfo)(nil), "google.protobuf.SourceCodeInfo") - proto.RegisterType((*SourceCodeInfo_Location)(nil), "google.protobuf.SourceCodeInfo.Location") - proto.RegisterType((*GeneratedCodeInfo)(nil), "google.protobuf.GeneratedCodeInfo") - proto.RegisterType((*GeneratedCodeInfo_Annotation)(nil), "google.protobuf.GeneratedCodeInfo.Annotation") -} - -func init() { proto.RegisterFile("descriptor.proto", fileDescriptor_308767df5ffe18af) } - -var fileDescriptor_308767df5ffe18af = []byte{ - // 2522 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xc4, 0x59, 0xcd, 0x6f, 0xdb, 0xc8, - 0x15, 0x5f, 0x7d, 0x5a, 0x7a, 0x92, 0x65, 0x7a, 0xec, 0x75, 0x18, 0xef, 0x47, 0x1c, 0xed, 0x66, - 0xe3, 0x24, 0xbb, 0xca, 0xc2, 0x49, 0x9c, 0xac, 0x53, 0x6c, 0x2b, 0x4b, 0x8c, 0x57, 0xa9, 0xbe, - 0x4a, 0xc9, 0xdd, 0x64, 0x8b, 0x82, 0x18, 0x93, 0x23, 0x89, 0x09, 0x45, 0x72, 0x49, 0x2a, 0x89, - 0x83, 0x1e, 0x02, 0xf4, 0x54, 0xa0, 0x7f, 0x40, 0x51, 0x14, 0x3d, 0xf4, 0xb2, 0x40, 0xff, 0x80, - 0x02, 0xed, 0xbd, 0xd7, 0x02, 0xbd, 0xf7, 0x50, 0xa0, 0x05, 0xda, 0x3f, 0xa1, 0xc7, 0x62, 0x66, - 0x48, 0x8a, 0xd4, 0x47, 0xe2, 0x5d, 0x20, 0xd9, 0x93, 0x3d, 0xef, 0xfd, 0xde, 0x9b, 0x37, 0x8f, - 0xbf, 0x79, 0xf3, 0x66, 0x04, 0x82, 0x46, 0x5c, 0xd5, 0xd1, 0x6d, 0xcf, 0x72, 0x2a, 0xb6, 0x63, - 0x79, 0x16, 0x5a, 0x1b, 0x5a, 0xd6, 0xd0, 0x20, 0x7c, 0x74, 0x32, 0x19, 0x94, 0x5b, 0xb0, 0x7e, - 0x4f, 0x37, 0x48, 0x3d, 0x04, 0xf6, 0x88, 0x87, 0xee, 0x40, 0x7a, 0xa0, 0x1b, 0x44, 0x4c, 0xec, - 0xa4, 0x76, 0x0b, 0x7b, 0x1f, 0x56, 0x66, 0x8c, 0x2a, 0x71, 0x8b, 0x2e, 0x15, 0xcb, 0xcc, 0xa2, - 0xfc, 0xef, 0x34, 0x6c, 0x2c, 0xd0, 0x22, 0x04, 0x69, 0x13, 0x8f, 0xa9, 0xc7, 0xc4, 0x6e, 0x5e, - 0x66, 0xff, 0x23, 0x11, 0x56, 0x6c, 0xac, 0x3e, 0xc6, 0x43, 0x22, 0x26, 0x99, 0x38, 0x18, 0xa2, - 0xf7, 0x01, 0x34, 0x62, 0x13, 0x53, 0x23, 0xa6, 0x7a, 0x2a, 0xa6, 0x76, 0x52, 0xbb, 0x79, 0x39, - 0x22, 0x41, 0xd7, 0x60, 0xdd, 0x9e, 0x9c, 0x18, 0xba, 0xaa, 0x44, 0x60, 0xb0, 0x93, 0xda, 0xcd, - 0xc8, 0x02, 0x57, 0xd4, 0xa7, 0xe0, 0xcb, 0xb0, 0xf6, 0x94, 0xe0, 0xc7, 0x51, 0x68, 0x81, 0x41, - 0x4b, 0x54, 0x1c, 0x01, 0xd6, 0xa0, 0x38, 0x26, 0xae, 0x8b, 0x87, 0x44, 0xf1, 0x4e, 0x6d, 0x22, - 0xa6, 0xd9, 0xea, 0x77, 0xe6, 0x56, 0x3f, 0xbb, 0xf2, 0x82, 0x6f, 0xd5, 0x3f, 0xb5, 0x09, 0xaa, - 0x42, 0x9e, 0x98, 0x93, 0x31, 0xf7, 0x90, 0x59, 0x92, 0x3f, 0xc9, 0x9c, 0x8c, 0x67, 0xbd, 0xe4, - 0xa8, 0x99, 0xef, 0x62, 0xc5, 0x25, 0xce, 0x13, 0x5d, 0x25, 0x62, 0x96, 0x39, 0xb8, 0x3c, 0xe7, - 0xa0, 0xc7, 0xf5, 0xb3, 0x3e, 0x02, 0x3b, 0x54, 0x83, 0x3c, 0x79, 0xe6, 0x11, 0xd3, 0xd5, 0x2d, - 0x53, 0x5c, 0x61, 0x4e, 0x2e, 0x2d, 0xf8, 0x8a, 0xc4, 0xd0, 0x66, 0x5d, 0x4c, 0xed, 0xd0, 0x3e, - 0xac, 0x58, 0xb6, 0xa7, 0x5b, 0xa6, 0x2b, 0xe6, 0x76, 0x12, 0xbb, 0x85, 0xbd, 0x77, 0x17, 0x12, - 0xa1, 0xc3, 0x31, 0x72, 0x00, 0x46, 0x0d, 0x10, 0x5c, 0x6b, 0xe2, 0xa8, 0x44, 0x51, 0x2d, 0x8d, - 0x28, 0xba, 0x39, 0xb0, 0xc4, 0x3c, 0x73, 0x70, 0x61, 0x7e, 0x21, 0x0c, 0x58, 0xb3, 0x34, 0xd2, - 0x30, 0x07, 0x96, 0x5c, 0x72, 0x63, 0x63, 0xb4, 0x05, 0x59, 0xf7, 0xd4, 0xf4, 0xf0, 0x33, 0xb1, - 0xc8, 0x18, 0xe2, 0x8f, 0xca, 0x7f, 0xce, 0xc2, 0xda, 0x59, 0x28, 0x76, 0x17, 0x32, 0x03, 0xba, - 0x4a, 0x31, 0xf9, 0x6d, 0x72, 0xc0, 0x6d, 0xe2, 0x49, 0xcc, 0x7e, 0xc7, 0x24, 0x56, 0xa1, 0x60, - 0x12, 0xd7, 0x23, 0x1a, 0x67, 0x44, 0xea, 0x8c, 0x9c, 0x02, 0x6e, 0x34, 0x4f, 0xa9, 0xf4, 0x77, - 0xa2, 0xd4, 0x03, 0x58, 0x0b, 0x43, 0x52, 0x1c, 0x6c, 0x0e, 0x03, 0x6e, 0x5e, 0x7f, 0x55, 0x24, - 0x15, 0x29, 0xb0, 0x93, 0xa9, 0x99, 0x5c, 0x22, 0xb1, 0x31, 0xaa, 0x03, 0x58, 0x26, 0xb1, 0x06, - 0x8a, 0x46, 0x54, 0x43, 0xcc, 0x2d, 0xc9, 0x52, 0x87, 0x42, 0xe6, 0xb2, 0x64, 0x71, 0xa9, 0x6a, - 0xa0, 0xcf, 0xa6, 0x54, 0x5b, 0x59, 0xc2, 0x94, 0x16, 0xdf, 0x64, 0x73, 0x6c, 0x3b, 0x86, 0x92, - 0x43, 0x28, 0xef, 0x89, 0xe6, 0xaf, 0x2c, 0xcf, 0x82, 0xa8, 0xbc, 0x72, 0x65, 0xb2, 0x6f, 0xc6, - 0x17, 0xb6, 0xea, 0x44, 0x87, 0xe8, 0x03, 0x08, 0x05, 0x0a, 0xa3, 0x15, 0xb0, 0x2a, 0x54, 0x0c, - 0x84, 0x6d, 0x3c, 0x26, 0xdb, 0xcf, 0xa1, 0x14, 0x4f, 0x0f, 0xda, 0x84, 0x8c, 0xeb, 0x61, 0xc7, - 0x63, 0x2c, 0xcc, 0xc8, 0x7c, 0x80, 0x04, 0x48, 0x11, 0x53, 0x63, 0x55, 0x2e, 0x23, 0xd3, 0x7f, - 0xd1, 0x8f, 0xa6, 0x0b, 0x4e, 0xb1, 0x05, 0x7f, 0x34, 0xff, 0x45, 0x63, 0x9e, 0x67, 0xd7, 0xbd, - 0x7d, 0x1b, 0x56, 0x63, 0x0b, 0x38, 0xeb, 0xd4, 0xe5, 0x5f, 0xc0, 0xdb, 0x0b, 0x5d, 0xa3, 0x07, - 0xb0, 0x39, 0x31, 0x75, 0xd3, 0x23, 0x8e, 0xed, 0x10, 0xca, 0x58, 0x3e, 0x95, 0xf8, 0x9f, 0x95, - 0x25, 0x9c, 0x3b, 0x8e, 0xa2, 0xb9, 0x17, 0x79, 0x63, 0x32, 0x2f, 0xbc, 0x9a, 0xcf, 0xfd, 0x77, - 0x45, 0x78, 0xf1, 0xe2, 0xc5, 0x8b, 0x64, 0xf9, 0x37, 0x59, 0xd8, 0x5c, 0xb4, 0x67, 0x16, 0x6e, - 0xdf, 0x2d, 0xc8, 0x9a, 0x93, 0xf1, 0x09, 0x71, 0x58, 0x92, 0x32, 0xb2, 0x3f, 0x42, 0x55, 0xc8, - 0x18, 0xf8, 0x84, 0x18, 0x62, 0x7a, 0x27, 0xb1, 0x5b, 0xda, 0xbb, 0x76, 0xa6, 0x5d, 0x59, 0x69, - 0x52, 0x13, 0x99, 0x5b, 0xa2, 0xcf, 0x21, 0xed, 0x97, 0x68, 0xea, 0xe1, 0xea, 0xd9, 0x3c, 0xd0, - 0xbd, 0x24, 0x33, 0x3b, 0xf4, 0x0e, 0xe4, 0xe9, 0x5f, 0xce, 0x8d, 0x2c, 0x8b, 0x39, 0x47, 0x05, - 0x94, 0x17, 0x68, 0x1b, 0x72, 0x6c, 0x9b, 0x68, 0x24, 0x38, 0xda, 0xc2, 0x31, 0x25, 0x96, 0x46, - 0x06, 0x78, 0x62, 0x78, 0xca, 0x13, 0x6c, 0x4c, 0x08, 0x23, 0x7c, 0x5e, 0x2e, 0xfa, 0xc2, 0x9f, - 0x52, 0x19, 0xba, 0x00, 0x05, 0xbe, 0xab, 0x74, 0x53, 0x23, 0xcf, 0x58, 0xf5, 0xcc, 0xc8, 0x7c, - 0xa3, 0x35, 0xa8, 0x84, 0x4e, 0xff, 0xc8, 0xb5, 0xcc, 0x80, 0x9a, 0x6c, 0x0a, 0x2a, 0x60, 0xd3, - 0xdf, 0x9e, 0x2d, 0xdc, 0xef, 0x2d, 0x5e, 0xde, 0x2c, 0xa7, 0xca, 0x7f, 0x4a, 0x42, 0x9a, 0xd5, - 0x8b, 0x35, 0x28, 0xf4, 0x1f, 0x76, 0x25, 0xa5, 0xde, 0x39, 0x3e, 0x6c, 0x4a, 0x42, 0x02, 0x95, - 0x00, 0x98, 0xe0, 0x5e, 0xb3, 0x53, 0xed, 0x0b, 0xc9, 0x70, 0xdc, 0x68, 0xf7, 0xf7, 0x6f, 0x0a, - 0xa9, 0xd0, 0xe0, 0x98, 0x0b, 0xd2, 0x51, 0xc0, 0x8d, 0x3d, 0x21, 0x83, 0x04, 0x28, 0x72, 0x07, - 0x8d, 0x07, 0x52, 0x7d, 0xff, 0xa6, 0x90, 0x8d, 0x4b, 0x6e, 0xec, 0x09, 0x2b, 0x68, 0x15, 0xf2, - 0x4c, 0x72, 0xd8, 0xe9, 0x34, 0x85, 0x5c, 0xe8, 0xb3, 0xd7, 0x97, 0x1b, 0xed, 0x23, 0x21, 0x1f, - 0xfa, 0x3c, 0x92, 0x3b, 0xc7, 0x5d, 0x01, 0x42, 0x0f, 0x2d, 0xa9, 0xd7, 0xab, 0x1e, 0x49, 0x42, - 0x21, 0x44, 0x1c, 0x3e, 0xec, 0x4b, 0x3d, 0xa1, 0x18, 0x0b, 0xeb, 0xc6, 0x9e, 0xb0, 0x1a, 0x4e, - 0x21, 0xb5, 0x8f, 0x5b, 0x42, 0x09, 0xad, 0xc3, 0x2a, 0x9f, 0x22, 0x08, 0x62, 0x6d, 0x46, 0xb4, - 0x7f, 0x53, 0x10, 0xa6, 0x81, 0x70, 0x2f, 0xeb, 0x31, 0xc1, 0xfe, 0x4d, 0x01, 0x95, 0x6b, 0x90, - 0x61, 0xec, 0x42, 0x08, 0x4a, 0xcd, 0xea, 0xa1, 0xd4, 0x54, 0x3a, 0xdd, 0x7e, 0xa3, 0xd3, 0xae, - 0x36, 0x85, 0xc4, 0x54, 0x26, 0x4b, 0x3f, 0x39, 0x6e, 0xc8, 0x52, 0x5d, 0x48, 0x46, 0x65, 0x5d, - 0xa9, 0xda, 0x97, 0xea, 0x42, 0xaa, 0xac, 0xc2, 0xe6, 0xa2, 0x3a, 0xb9, 0x70, 0x67, 0x44, 0x3e, - 0x71, 0x72, 0xc9, 0x27, 0x66, 0xbe, 0xe6, 0x3e, 0xf1, 0xbf, 0x92, 0xb0, 0xb1, 0xe0, 0xac, 0x58, - 0x38, 0xc9, 0x0f, 0x21, 0xc3, 0x29, 0xca, 0x4f, 0xcf, 0x2b, 0x0b, 0x0f, 0x1d, 0x46, 0xd8, 0xb9, - 0x13, 0x94, 0xd9, 0x45, 0x3b, 0x88, 0xd4, 0x92, 0x0e, 0x82, 0xba, 0x98, 0xab, 0xe9, 0x3f, 0x9f, - 0xab, 0xe9, 0xfc, 0xd8, 0xdb, 0x3f, 0xcb, 0xb1, 0xc7, 0x64, 0xdf, 0xae, 0xb6, 0x67, 0x16, 0xd4, - 0xf6, 0xbb, 0xb0, 0x3e, 0xe7, 0xe8, 0xcc, 0x35, 0xf6, 0x97, 0x09, 0x10, 0x97, 0x25, 0xe7, 0x15, - 0x95, 0x2e, 0x19, 0xab, 0x74, 0x77, 0x67, 0x33, 0x78, 0x71, 0xf9, 0x47, 0x98, 0xfb, 0xd6, 0xdf, - 0x24, 0x60, 0x6b, 0x71, 0xa7, 0xb8, 0x30, 0x86, 0xcf, 0x21, 0x3b, 0x26, 0xde, 0xc8, 0x0a, 0xba, - 0xa5, 0x8f, 0x16, 0x9c, 0xc1, 0x54, 0x3d, 0xfb, 0xb1, 0x7d, 0xab, 0xe8, 0x21, 0x9e, 0x5a, 0xd6, - 0xee, 0xf1, 0x68, 0xe6, 0x22, 0xfd, 0x55, 0x12, 0xde, 0x5e, 0xe8, 0x7c, 0x61, 0xa0, 0xef, 0x01, - 0xe8, 0xa6, 0x3d, 0xf1, 0x78, 0x47, 0xc4, 0x0b, 0x6c, 0x9e, 0x49, 0x58, 0xf1, 0xa2, 0xc5, 0x73, - 0xe2, 0x85, 0xfa, 0x14, 0xd3, 0x03, 0x17, 0x31, 0xc0, 0x9d, 0x69, 0xa0, 0x69, 0x16, 0xe8, 0xfb, - 0x4b, 0x56, 0x3a, 0x47, 0xcc, 0x4f, 0x41, 0x50, 0x0d, 0x9d, 0x98, 0x9e, 0xe2, 0x7a, 0x0e, 0xc1, - 0x63, 0xdd, 0x1c, 0xb2, 0x13, 0x24, 0x77, 0x90, 0x19, 0x60, 0xc3, 0x25, 0xf2, 0x1a, 0x57, 0xf7, - 0x02, 0x2d, 0xb5, 0x60, 0x04, 0x72, 0x22, 0x16, 0xd9, 0x98, 0x05, 0x57, 0x87, 0x16, 0xe5, 0x5f, - 0xe7, 0xa1, 0x10, 0xe9, 0xab, 0xd1, 0x45, 0x28, 0x3e, 0xc2, 0x4f, 0xb0, 0x12, 0xdc, 0x95, 0x78, - 0x26, 0x0a, 0x54, 0xd6, 0xf5, 0xef, 0x4b, 0x9f, 0xc2, 0x26, 0x83, 0x58, 0x13, 0x8f, 0x38, 0x8a, - 0x6a, 0x60, 0xd7, 0x65, 0x49, 0xcb, 0x31, 0x28, 0xa2, 0xba, 0x0e, 0x55, 0xd5, 0x02, 0x0d, 0xba, - 0x05, 0x1b, 0xcc, 0x62, 0x3c, 0x31, 0x3c, 0xdd, 0x36, 0x88, 0x42, 0x6f, 0x6f, 0x2e, 0x3b, 0x49, - 0xc2, 0xc8, 0xd6, 0x29, 0xa2, 0xe5, 0x03, 0x68, 0x44, 0x2e, 0xaa, 0xc3, 0x7b, 0xcc, 0x6c, 0x48, - 0x4c, 0xe2, 0x60, 0x8f, 0x28, 0xe4, 0xeb, 0x09, 0x36, 0x5c, 0x05, 0x9b, 0x9a, 0x32, 0xc2, 0xee, - 0x48, 0xdc, 0xa4, 0x0e, 0x0e, 0x93, 0x62, 0x42, 0x3e, 0x4f, 0x81, 0x47, 0x3e, 0x4e, 0x62, 0xb0, - 0xaa, 0xa9, 0x7d, 0x81, 0xdd, 0x11, 0x3a, 0x80, 0x2d, 0xe6, 0xc5, 0xf5, 0x1c, 0xdd, 0x1c, 0x2a, - 0xea, 0x88, 0xa8, 0x8f, 0x95, 0x89, 0x37, 0xb8, 0x23, 0xbe, 0x13, 0x9d, 0x9f, 0x45, 0xd8, 0x63, - 0x98, 0x1a, 0x85, 0x1c, 0x7b, 0x83, 0x3b, 0xa8, 0x07, 0x45, 0xfa, 0x31, 0xc6, 0xfa, 0x73, 0xa2, - 0x0c, 0x2c, 0x87, 0x1d, 0x8d, 0xa5, 0x05, 0xa5, 0x29, 0x92, 0xc1, 0x4a, 0xc7, 0x37, 0x68, 0x59, - 0x1a, 0x39, 0xc8, 0xf4, 0xba, 0x92, 0x54, 0x97, 0x0b, 0x81, 0x97, 0x7b, 0x96, 0x43, 0x09, 0x35, - 0xb4, 0xc2, 0x04, 0x17, 0x38, 0xa1, 0x86, 0x56, 0x90, 0xde, 0x5b, 0xb0, 0xa1, 0xaa, 0x7c, 0xcd, - 0xba, 0xaa, 0xf8, 0x77, 0x2c, 0x57, 0x14, 0x62, 0xc9, 0x52, 0xd5, 0x23, 0x0e, 0xf0, 0x39, 0xee, - 0xa2, 0xcf, 0xe0, 0xed, 0x69, 0xb2, 0xa2, 0x86, 0xeb, 0x73, 0xab, 0x9c, 0x35, 0xbd, 0x05, 0x1b, - 0xf6, 0xe9, 0xbc, 0x21, 0x8a, 0xcd, 0x68, 0x9f, 0xce, 0x9a, 0xdd, 0x86, 0x4d, 0x7b, 0x64, 0xcf, - 0xdb, 0x5d, 0x8d, 0xda, 0x21, 0x7b, 0x64, 0xcf, 0x1a, 0x5e, 0x62, 0x17, 0x6e, 0x87, 0xa8, 0xd8, - 0x23, 0x9a, 0x78, 0x2e, 0x0a, 0x8f, 0x28, 0xd0, 0x75, 0x10, 0x54, 0x55, 0x21, 0x26, 0x3e, 0x31, - 0x88, 0x82, 0x1d, 0x62, 0x62, 0x57, 0xbc, 0x10, 0x05, 0x97, 0x54, 0x55, 0x62, 0xda, 0x2a, 0x53, - 0xa2, 0xab, 0xb0, 0x6e, 0x9d, 0x3c, 0x52, 0x39, 0x25, 0x15, 0xdb, 0x21, 0x03, 0xfd, 0x99, 0xf8, - 0x21, 0xcb, 0xef, 0x1a, 0x55, 0x30, 0x42, 0x76, 0x99, 0x18, 0x5d, 0x01, 0x41, 0x75, 0x47, 0xd8, - 0xb1, 0x59, 0x4d, 0x76, 0x6d, 0xac, 0x12, 0xf1, 0x12, 0x87, 0x72, 0x79, 0x3b, 0x10, 0xd3, 0x2d, - 0xe1, 0x3e, 0xd5, 0x07, 0x5e, 0xe0, 0xf1, 0x32, 0xdf, 0x12, 0x4c, 0xe6, 0x7b, 0xdb, 0x05, 0x81, - 0xa6, 0x22, 0x36, 0xf1, 0x2e, 0x83, 0x95, 0xec, 0x91, 0x1d, 0x9d, 0xf7, 0x03, 0x58, 0xa5, 0xc8, - 0xe9, 0xa4, 0x57, 0x78, 0x43, 0x66, 0x8f, 0x22, 0x33, 0xde, 0x84, 0x2d, 0x0a, 0x1a, 0x13, 0x0f, - 0x6b, 0xd8, 0xc3, 0x11, 0xf4, 0xc7, 0x0c, 0x4d, 0xf3, 0xde, 0xf2, 0x95, 0xb1, 0x38, 0x9d, 0xc9, - 0xc9, 0x69, 0xc8, 0xac, 0x4f, 0x78, 0x9c, 0x54, 0x16, 0x70, 0xeb, 0xb5, 0x35, 0xdd, 0xe5, 0x03, - 0x28, 0x46, 0x89, 0x8f, 0xf2, 0xc0, 0xa9, 0x2f, 0x24, 0x68, 0x17, 0x54, 0xeb, 0xd4, 0x69, 0xff, - 0xf2, 0x95, 0x24, 0x24, 0x69, 0x1f, 0xd5, 0x6c, 0xf4, 0x25, 0x45, 0x3e, 0x6e, 0xf7, 0x1b, 0x2d, - 0x49, 0x48, 0x45, 0x1b, 0xf6, 0xbf, 0x26, 0xa1, 0x14, 0xbf, 0x7b, 0xa1, 0x1f, 0xc0, 0xb9, 0xe0, - 0xa1, 0xc4, 0x25, 0x9e, 0xf2, 0x54, 0x77, 0xd8, 0x5e, 0x1c, 0x63, 0x7e, 0x2e, 0x86, 0x6c, 0xd8, - 0xf4, 0x51, 0x3d, 0xe2, 0x7d, 0xa9, 0x3b, 0x74, 0xa7, 0x8d, 0xb1, 0x87, 0x9a, 0x70, 0xc1, 0xb4, - 0x14, 0xd7, 0xc3, 0xa6, 0x86, 0x1d, 0x4d, 0x99, 0x3e, 0x51, 0x29, 0x58, 0x55, 0x89, 0xeb, 0x5a, - 0xfc, 0x0c, 0x0c, 0xbd, 0xbc, 0x6b, 0x5a, 0x3d, 0x1f, 0x3c, 0x3d, 0x1c, 0xaa, 0x3e, 0x74, 0x86, - 0xb9, 0xa9, 0x65, 0xcc, 0x7d, 0x07, 0xf2, 0x63, 0x6c, 0x2b, 0xc4, 0xf4, 0x9c, 0x53, 0xd6, 0x71, - 0xe7, 0xe4, 0xdc, 0x18, 0xdb, 0x12, 0x1d, 0xbf, 0x99, 0x8b, 0xcf, 0x3f, 0x52, 0x50, 0x8c, 0x76, - 0xdd, 0xf4, 0x12, 0xa3, 0xb2, 0x03, 0x2a, 0xc1, 0x4a, 0xd8, 0x07, 0x2f, 0xed, 0xd1, 0x2b, 0x35, - 0x7a, 0x72, 0x1d, 0x64, 0x79, 0x2f, 0x2c, 0x73, 0x4b, 0xda, 0x35, 0x50, 0x6a, 0x11, 0xde, 0x7b, - 0xe4, 0x64, 0x7f, 0x84, 0x8e, 0x20, 0xfb, 0xc8, 0x65, 0xbe, 0xb3, 0xcc, 0xf7, 0x87, 0x2f, 0xf7, - 0x7d, 0xbf, 0xc7, 0x9c, 0xe7, 0xef, 0xf7, 0x94, 0x76, 0x47, 0x6e, 0x55, 0x9b, 0xb2, 0x6f, 0x8e, - 0xce, 0x43, 0xda, 0xc0, 0xcf, 0x4f, 0xe3, 0x67, 0x1c, 0x13, 0x9d, 0x35, 0xf1, 0xe7, 0x21, 0xfd, - 0x94, 0xe0, 0xc7, 0xf1, 0x93, 0x85, 0x89, 0x5e, 0x23, 0xf5, 0xaf, 0x43, 0x86, 0xe5, 0x0b, 0x01, - 0xf8, 0x19, 0x13, 0xde, 0x42, 0x39, 0x48, 0xd7, 0x3a, 0x32, 0xa5, 0xbf, 0x00, 0x45, 0x2e, 0x55, - 0xba, 0x0d, 0xa9, 0x26, 0x09, 0xc9, 0xf2, 0x2d, 0xc8, 0xf2, 0x24, 0xd0, 0xad, 0x11, 0xa6, 0x41, - 0x78, 0xcb, 0x1f, 0xfa, 0x3e, 0x12, 0x81, 0xf6, 0xb8, 0x75, 0x28, 0xc9, 0x42, 0x32, 0xfa, 0x79, - 0x5d, 0x28, 0x46, 0x1b, 0xee, 0x37, 0xc3, 0xa9, 0xbf, 0x24, 0xa0, 0x10, 0x69, 0xa0, 0x69, 0xe7, - 0x83, 0x0d, 0xc3, 0x7a, 0xaa, 0x60, 0x43, 0xc7, 0xae, 0x4f, 0x0a, 0x60, 0xa2, 0x2a, 0x95, 0x9c, - 0xf5, 0xa3, 0xbd, 0x91, 0xe0, 0x7f, 0x9f, 0x00, 0x61, 0xb6, 0x77, 0x9d, 0x09, 0x30, 0xf1, 0xbd, - 0x06, 0xf8, 0xbb, 0x04, 0x94, 0xe2, 0x0d, 0xeb, 0x4c, 0x78, 0x17, 0xbf, 0xd7, 0xf0, 0xfe, 0x99, - 0x84, 0xd5, 0x58, 0x9b, 0x7a, 0xd6, 0xe8, 0xbe, 0x86, 0x75, 0x5d, 0x23, 0x63, 0xdb, 0xf2, 0x88, - 0xa9, 0x9e, 0x2a, 0x06, 0x79, 0x42, 0x0c, 0xb1, 0xcc, 0x0a, 0xc5, 0xf5, 0x97, 0x37, 0xc2, 0x95, - 0xc6, 0xd4, 0xae, 0x49, 0xcd, 0x0e, 0x36, 0x1a, 0x75, 0xa9, 0xd5, 0xed, 0xf4, 0xa5, 0x76, 0xed, - 0xa1, 0x72, 0xdc, 0xfe, 0x71, 0xbb, 0xf3, 0x65, 0x5b, 0x16, 0xf4, 0x19, 0xd8, 0x6b, 0xdc, 0xea, - 0x5d, 0x10, 0x66, 0x83, 0x42, 0xe7, 0x60, 0x51, 0x58, 0xc2, 0x5b, 0x68, 0x03, 0xd6, 0xda, 0x1d, - 0xa5, 0xd7, 0xa8, 0x4b, 0x8a, 0x74, 0xef, 0x9e, 0x54, 0xeb, 0xf7, 0xf8, 0xd3, 0x46, 0x88, 0xee, - 0xc7, 0x37, 0xf5, 0x6f, 0x53, 0xb0, 0xb1, 0x20, 0x12, 0x54, 0xf5, 0x2f, 0x25, 0xfc, 0x9e, 0xf4, - 0xc9, 0x59, 0xa2, 0xaf, 0xd0, 0xae, 0xa0, 0x8b, 0x1d, 0xcf, 0xbf, 0xc3, 0x5c, 0x01, 0x9a, 0x25, - 0xd3, 0xd3, 0x07, 0x3a, 0x71, 0xfc, 0x97, 0x20, 0x7e, 0x53, 0x59, 0x9b, 0xca, 0xf9, 0x63, 0xd0, - 0xc7, 0x80, 0x6c, 0xcb, 0xd5, 0x3d, 0xfd, 0x09, 0x51, 0x74, 0x33, 0x78, 0x36, 0xa2, 0x37, 0x97, - 0xb4, 0x2c, 0x04, 0x9a, 0x86, 0xe9, 0x85, 0x68, 0x93, 0x0c, 0xf1, 0x0c, 0x9a, 0x16, 0xf0, 0x94, - 0x2c, 0x04, 0x9a, 0x10, 0x7d, 0x11, 0x8a, 0x9a, 0x35, 0xa1, 0xed, 0x1c, 0xc7, 0xd1, 0xf3, 0x22, - 0x21, 0x17, 0xb8, 0x2c, 0x84, 0xf8, 0x8d, 0xfa, 0xf4, 0xbd, 0xaa, 0x28, 0x17, 0xb8, 0x8c, 0x43, - 0x2e, 0xc3, 0x1a, 0x1e, 0x0e, 0x1d, 0xea, 0x3c, 0x70, 0xc4, 0xaf, 0x1e, 0xa5, 0x50, 0xcc, 0x80, - 0xdb, 0xf7, 0x21, 0x17, 0xe4, 0x81, 0x1e, 0xc9, 0x34, 0x13, 0x8a, 0xcd, 0xef, 0xd3, 0xc9, 0xdd, - 0xbc, 0x9c, 0x33, 0x03, 0xe5, 0x45, 0x28, 0xea, 0xae, 0x32, 0x7d, 0x7e, 0x4f, 0xee, 0x24, 0x77, - 0x73, 0x72, 0x41, 0x77, 0xc3, 0xa7, 0xcb, 0xf2, 0x37, 0x49, 0x28, 0xc5, 0x7f, 0x3e, 0x40, 0x75, - 0xc8, 0x19, 0x96, 0x8a, 0x19, 0xb5, 0xf8, 0x6f, 0x57, 0xbb, 0xaf, 0xf8, 0xc5, 0xa1, 0xd2, 0xf4, - 0xf1, 0x72, 0x68, 0xb9, 0xfd, 0xb7, 0x04, 0xe4, 0x02, 0x31, 0xda, 0x82, 0xb4, 0x8d, 0xbd, 0x11, - 0x73, 0x97, 0x39, 0x4c, 0x0a, 0x09, 0x99, 0x8d, 0xa9, 0xdc, 0xb5, 0xb1, 0xc9, 0x28, 0xe0, 0xcb, - 0xe9, 0x98, 0x7e, 0x57, 0x83, 0x60, 0x8d, 0xdd, 0x6b, 0xac, 0xf1, 0x98, 0x98, 0x9e, 0x1b, 0x7c, - 0x57, 0x5f, 0x5e, 0xf3, 0xc5, 0xe8, 0x1a, 0xac, 0x7b, 0x0e, 0xd6, 0x8d, 0x18, 0x36, 0xcd, 0xb0, - 0x42, 0xa0, 0x08, 0xc1, 0x07, 0x70, 0x3e, 0xf0, 0xab, 0x11, 0x0f, 0xab, 0x23, 0xa2, 0x4d, 0x8d, - 0xb2, 0xec, 0xfd, 0xe2, 0x9c, 0x0f, 0xa8, 0xfb, 0xfa, 0xc0, 0xb6, 0xfc, 0xf7, 0x04, 0xac, 0x07, - 0x37, 0x31, 0x2d, 0x4c, 0x56, 0x0b, 0x00, 0x9b, 0xa6, 0xe5, 0x45, 0xd3, 0x35, 0x4f, 0xe5, 0x39, - 0xbb, 0x4a, 0x35, 0x34, 0x92, 0x23, 0x0e, 0xb6, 0xc7, 0x00, 0x53, 0xcd, 0xd2, 0xb4, 0x5d, 0x80, - 0x82, 0xff, 0xdb, 0x10, 0xfb, 0x81, 0x91, 0xdf, 0xdd, 0x81, 0x8b, 0xe8, 0x95, 0x0d, 0x6d, 0x42, - 0xe6, 0x84, 0x0c, 0x75, 0xd3, 0x7f, 0xf1, 0xe5, 0x83, 0xe0, 0x85, 0x25, 0x1d, 0xbe, 0xb0, 0x1c, - 0xfe, 0x0c, 0x36, 0x54, 0x6b, 0x3c, 0x1b, 0xee, 0xa1, 0x30, 0xf3, 0x7e, 0xe0, 0x7e, 0x91, 0xf8, - 0x0a, 0xa6, 0x2d, 0xe6, 0xff, 0x12, 0x89, 0x3f, 0x24, 0x53, 0x47, 0xdd, 0xc3, 0x3f, 0x26, 0xb7, - 0x8f, 0xb8, 0x69, 0x37, 0x58, 0xa9, 0x4c, 0x06, 0x06, 0x51, 0x69, 0xf4, 0xff, 0x0f, 0x00, 0x00, - 0xff, 0xff, 0x88, 0x17, 0xc1, 0xbe, 0x38, 0x1d, 0x00, 0x00, -} diff --git a/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/descriptor_gostring.gen.go b/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/descriptor_gostring.gen.go deleted file mode 100644 index 165b2110..00000000 --- a/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/descriptor_gostring.gen.go +++ /dev/null @@ -1,752 +0,0 @@ -// Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: descriptor.proto - -package descriptor - -import ( - fmt "fmt" - github_com_gogo_protobuf_proto "github.com/gogo/protobuf/proto" - proto "github.com/gogo/protobuf/proto" - math "math" - reflect "reflect" - sort "sort" - strconv "strconv" - strings "strings" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -func (this *FileDescriptorSet) GoString() string { - if this == nil { - return "nil" - } - s := make([]string, 0, 5) - s = append(s, "&descriptor.FileDescriptorSet{") - if this.File != nil { - s = append(s, "File: "+fmt.Sprintf("%#v", this.File)+",\n") - } - if this.XXX_unrecognized != nil { - s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") - } - s = append(s, "}") - return strings.Join(s, "") -} -func (this *FileDescriptorProto) GoString() string { - if this == nil { - return "nil" - } - s := make([]string, 0, 16) - s = append(s, "&descriptor.FileDescriptorProto{") - if this.Name != nil { - s = append(s, "Name: "+valueToGoStringDescriptor(this.Name, "string")+",\n") - } - if this.Package != nil { - s = append(s, "Package: "+valueToGoStringDescriptor(this.Package, "string")+",\n") - } - if this.Dependency != nil { - s = append(s, "Dependency: "+fmt.Sprintf("%#v", this.Dependency)+",\n") - } - if this.PublicDependency != nil { - s = append(s, "PublicDependency: "+fmt.Sprintf("%#v", this.PublicDependency)+",\n") - } - if this.WeakDependency != nil { - s = append(s, "WeakDependency: "+fmt.Sprintf("%#v", this.WeakDependency)+",\n") - } - if this.MessageType != nil { - s = append(s, "MessageType: "+fmt.Sprintf("%#v", this.MessageType)+",\n") - } - if this.EnumType != nil { - s = append(s, "EnumType: "+fmt.Sprintf("%#v", this.EnumType)+",\n") - } - if this.Service != nil { - s = append(s, "Service: "+fmt.Sprintf("%#v", this.Service)+",\n") - } - if this.Extension != nil { - s = append(s, "Extension: "+fmt.Sprintf("%#v", this.Extension)+",\n") - } - if this.Options != nil { - s = append(s, "Options: "+fmt.Sprintf("%#v", this.Options)+",\n") - } - if this.SourceCodeInfo != nil { - s = append(s, "SourceCodeInfo: "+fmt.Sprintf("%#v", this.SourceCodeInfo)+",\n") - } - if this.Syntax != nil { - s = append(s, "Syntax: "+valueToGoStringDescriptor(this.Syntax, "string")+",\n") - } - if this.XXX_unrecognized != nil { - s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") - } - s = append(s, "}") - return strings.Join(s, "") -} -func (this *DescriptorProto) GoString() string { - if this == nil { - return "nil" - } - s := make([]string, 0, 14) - s = append(s, "&descriptor.DescriptorProto{") - if this.Name != nil { - s = append(s, "Name: "+valueToGoStringDescriptor(this.Name, "string")+",\n") - } - if this.Field != nil { - s = append(s, "Field: "+fmt.Sprintf("%#v", this.Field)+",\n") - } - if this.Extension != nil { - s = append(s, "Extension: "+fmt.Sprintf("%#v", this.Extension)+",\n") - } - if this.NestedType != nil { - s = append(s, "NestedType: "+fmt.Sprintf("%#v", this.NestedType)+",\n") - } - if this.EnumType != nil { - s = append(s, "EnumType: "+fmt.Sprintf("%#v", this.EnumType)+",\n") - } - if this.ExtensionRange != nil { - s = append(s, "ExtensionRange: "+fmt.Sprintf("%#v", this.ExtensionRange)+",\n") - } - if this.OneofDecl != nil { - s = append(s, "OneofDecl: "+fmt.Sprintf("%#v", this.OneofDecl)+",\n") - } - if this.Options != nil { - s = append(s, "Options: "+fmt.Sprintf("%#v", this.Options)+",\n") - } - if this.ReservedRange != nil { - s = append(s, "ReservedRange: "+fmt.Sprintf("%#v", this.ReservedRange)+",\n") - } - if this.ReservedName != nil { - s = append(s, "ReservedName: "+fmt.Sprintf("%#v", this.ReservedName)+",\n") - } - if this.XXX_unrecognized != nil { - s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") - } - s = append(s, "}") - return strings.Join(s, "") -} -func (this *DescriptorProto_ExtensionRange) GoString() string { - if this == nil { - return "nil" - } - s := make([]string, 0, 7) - s = append(s, "&descriptor.DescriptorProto_ExtensionRange{") - if this.Start != nil { - s = append(s, "Start: "+valueToGoStringDescriptor(this.Start, "int32")+",\n") - } - if this.End != nil { - s = append(s, "End: "+valueToGoStringDescriptor(this.End, "int32")+",\n") - } - if this.Options != nil { - s = append(s, "Options: "+fmt.Sprintf("%#v", this.Options)+",\n") - } - if this.XXX_unrecognized != nil { - s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") - } - s = append(s, "}") - return strings.Join(s, "") -} -func (this *DescriptorProto_ReservedRange) GoString() string { - if this == nil { - return "nil" - } - s := make([]string, 0, 6) - s = append(s, "&descriptor.DescriptorProto_ReservedRange{") - if this.Start != nil { - s = append(s, "Start: "+valueToGoStringDescriptor(this.Start, "int32")+",\n") - } - if this.End != nil { - s = append(s, "End: "+valueToGoStringDescriptor(this.End, "int32")+",\n") - } - if this.XXX_unrecognized != nil { - s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") - } - s = append(s, "}") - return strings.Join(s, "") -} -func (this *ExtensionRangeOptions) GoString() string { - if this == nil { - return "nil" - } - s := make([]string, 0, 5) - s = append(s, "&descriptor.ExtensionRangeOptions{") - if this.UninterpretedOption != nil { - s = append(s, "UninterpretedOption: "+fmt.Sprintf("%#v", this.UninterpretedOption)+",\n") - } - s = append(s, "XXX_InternalExtensions: "+extensionToGoStringDescriptor(this)+",\n") - if this.XXX_unrecognized != nil { - s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") - } - s = append(s, "}") - return strings.Join(s, "") -} -func (this *FieldDescriptorProto) GoString() string { - if this == nil { - return "nil" - } - s := make([]string, 0, 14) - s = append(s, "&descriptor.FieldDescriptorProto{") - if this.Name != nil { - s = append(s, "Name: "+valueToGoStringDescriptor(this.Name, "string")+",\n") - } - if this.Number != nil { - s = append(s, "Number: "+valueToGoStringDescriptor(this.Number, "int32")+",\n") - } - if this.Label != nil { - s = append(s, "Label: "+valueToGoStringDescriptor(this.Label, "FieldDescriptorProto_Label")+",\n") - } - if this.Type != nil { - s = append(s, "Type: "+valueToGoStringDescriptor(this.Type, "FieldDescriptorProto_Type")+",\n") - } - if this.TypeName != nil { - s = append(s, "TypeName: "+valueToGoStringDescriptor(this.TypeName, "string")+",\n") - } - if this.Extendee != nil { - s = append(s, "Extendee: "+valueToGoStringDescriptor(this.Extendee, "string")+",\n") - } - if this.DefaultValue != nil { - s = append(s, "DefaultValue: "+valueToGoStringDescriptor(this.DefaultValue, "string")+",\n") - } - if this.OneofIndex != nil { - s = append(s, "OneofIndex: "+valueToGoStringDescriptor(this.OneofIndex, "int32")+",\n") - } - if this.JsonName != nil { - s = append(s, "JsonName: "+valueToGoStringDescriptor(this.JsonName, "string")+",\n") - } - if this.Options != nil { - s = append(s, "Options: "+fmt.Sprintf("%#v", this.Options)+",\n") - } - if this.XXX_unrecognized != nil { - s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") - } - s = append(s, "}") - return strings.Join(s, "") -} -func (this *OneofDescriptorProto) GoString() string { - if this == nil { - return "nil" - } - s := make([]string, 0, 6) - s = append(s, "&descriptor.OneofDescriptorProto{") - if this.Name != nil { - s = append(s, "Name: "+valueToGoStringDescriptor(this.Name, "string")+",\n") - } - if this.Options != nil { - s = append(s, "Options: "+fmt.Sprintf("%#v", this.Options)+",\n") - } - if this.XXX_unrecognized != nil { - s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") - } - s = append(s, "}") - return strings.Join(s, "") -} -func (this *EnumDescriptorProto) GoString() string { - if this == nil { - return "nil" - } - s := make([]string, 0, 9) - s = append(s, "&descriptor.EnumDescriptorProto{") - if this.Name != nil { - s = append(s, "Name: "+valueToGoStringDescriptor(this.Name, "string")+",\n") - } - if this.Value != nil { - s = append(s, "Value: "+fmt.Sprintf("%#v", this.Value)+",\n") - } - if this.Options != nil { - s = append(s, "Options: "+fmt.Sprintf("%#v", this.Options)+",\n") - } - if this.ReservedRange != nil { - s = append(s, "ReservedRange: "+fmt.Sprintf("%#v", this.ReservedRange)+",\n") - } - if this.ReservedName != nil { - s = append(s, "ReservedName: "+fmt.Sprintf("%#v", this.ReservedName)+",\n") - } - if this.XXX_unrecognized != nil { - s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") - } - s = append(s, "}") - return strings.Join(s, "") -} -func (this *EnumDescriptorProto_EnumReservedRange) GoString() string { - if this == nil { - return "nil" - } - s := make([]string, 0, 6) - s = append(s, "&descriptor.EnumDescriptorProto_EnumReservedRange{") - if this.Start != nil { - s = append(s, "Start: "+valueToGoStringDescriptor(this.Start, "int32")+",\n") - } - if this.End != nil { - s = append(s, "End: "+valueToGoStringDescriptor(this.End, "int32")+",\n") - } - if this.XXX_unrecognized != nil { - s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") - } - s = append(s, "}") - return strings.Join(s, "") -} -func (this *EnumValueDescriptorProto) GoString() string { - if this == nil { - return "nil" - } - s := make([]string, 0, 7) - s = append(s, "&descriptor.EnumValueDescriptorProto{") - if this.Name != nil { - s = append(s, "Name: "+valueToGoStringDescriptor(this.Name, "string")+",\n") - } - if this.Number != nil { - s = append(s, "Number: "+valueToGoStringDescriptor(this.Number, "int32")+",\n") - } - if this.Options != nil { - s = append(s, "Options: "+fmt.Sprintf("%#v", this.Options)+",\n") - } - if this.XXX_unrecognized != nil { - s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") - } - s = append(s, "}") - return strings.Join(s, "") -} -func (this *ServiceDescriptorProto) GoString() string { - if this == nil { - return "nil" - } - s := make([]string, 0, 7) - s = append(s, "&descriptor.ServiceDescriptorProto{") - if this.Name != nil { - s = append(s, "Name: "+valueToGoStringDescriptor(this.Name, "string")+",\n") - } - if this.Method != nil { - s = append(s, "Method: "+fmt.Sprintf("%#v", this.Method)+",\n") - } - if this.Options != nil { - s = append(s, "Options: "+fmt.Sprintf("%#v", this.Options)+",\n") - } - if this.XXX_unrecognized != nil { - s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") - } - s = append(s, "}") - return strings.Join(s, "") -} -func (this *MethodDescriptorProto) GoString() string { - if this == nil { - return "nil" - } - s := make([]string, 0, 10) - s = append(s, "&descriptor.MethodDescriptorProto{") - if this.Name != nil { - s = append(s, "Name: "+valueToGoStringDescriptor(this.Name, "string")+",\n") - } - if this.InputType != nil { - s = append(s, "InputType: "+valueToGoStringDescriptor(this.InputType, "string")+",\n") - } - if this.OutputType != nil { - s = append(s, "OutputType: "+valueToGoStringDescriptor(this.OutputType, "string")+",\n") - } - if this.Options != nil { - s = append(s, "Options: "+fmt.Sprintf("%#v", this.Options)+",\n") - } - if this.ClientStreaming != nil { - s = append(s, "ClientStreaming: "+valueToGoStringDescriptor(this.ClientStreaming, "bool")+",\n") - } - if this.ServerStreaming != nil { - s = append(s, "ServerStreaming: "+valueToGoStringDescriptor(this.ServerStreaming, "bool")+",\n") - } - if this.XXX_unrecognized != nil { - s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") - } - s = append(s, "}") - return strings.Join(s, "") -} -func (this *FileOptions) GoString() string { - if this == nil { - return "nil" - } - s := make([]string, 0, 25) - s = append(s, "&descriptor.FileOptions{") - if this.JavaPackage != nil { - s = append(s, "JavaPackage: "+valueToGoStringDescriptor(this.JavaPackage, "string")+",\n") - } - if this.JavaOuterClassname != nil { - s = append(s, "JavaOuterClassname: "+valueToGoStringDescriptor(this.JavaOuterClassname, "string")+",\n") - } - if this.JavaMultipleFiles != nil { - s = append(s, "JavaMultipleFiles: "+valueToGoStringDescriptor(this.JavaMultipleFiles, "bool")+",\n") - } - if this.JavaGenerateEqualsAndHash != nil { - s = append(s, "JavaGenerateEqualsAndHash: "+valueToGoStringDescriptor(this.JavaGenerateEqualsAndHash, "bool")+",\n") - } - if this.JavaStringCheckUtf8 != nil { - s = append(s, "JavaStringCheckUtf8: "+valueToGoStringDescriptor(this.JavaStringCheckUtf8, "bool")+",\n") - } - if this.OptimizeFor != nil { - s = append(s, "OptimizeFor: "+valueToGoStringDescriptor(this.OptimizeFor, "FileOptions_OptimizeMode")+",\n") - } - if this.GoPackage != nil { - s = append(s, "GoPackage: "+valueToGoStringDescriptor(this.GoPackage, "string")+",\n") - } - if this.CcGenericServices != nil { - s = append(s, "CcGenericServices: "+valueToGoStringDescriptor(this.CcGenericServices, "bool")+",\n") - } - if this.JavaGenericServices != nil { - s = append(s, "JavaGenericServices: "+valueToGoStringDescriptor(this.JavaGenericServices, "bool")+",\n") - } - if this.PyGenericServices != nil { - s = append(s, "PyGenericServices: "+valueToGoStringDescriptor(this.PyGenericServices, "bool")+",\n") - } - if this.PhpGenericServices != nil { - s = append(s, "PhpGenericServices: "+valueToGoStringDescriptor(this.PhpGenericServices, "bool")+",\n") - } - if this.Deprecated != nil { - s = append(s, "Deprecated: "+valueToGoStringDescriptor(this.Deprecated, "bool")+",\n") - } - if this.CcEnableArenas != nil { - s = append(s, "CcEnableArenas: "+valueToGoStringDescriptor(this.CcEnableArenas, "bool")+",\n") - } - if this.ObjcClassPrefix != nil { - s = append(s, "ObjcClassPrefix: "+valueToGoStringDescriptor(this.ObjcClassPrefix, "string")+",\n") - } - if this.CsharpNamespace != nil { - s = append(s, "CsharpNamespace: "+valueToGoStringDescriptor(this.CsharpNamespace, "string")+",\n") - } - if this.SwiftPrefix != nil { - s = append(s, "SwiftPrefix: "+valueToGoStringDescriptor(this.SwiftPrefix, "string")+",\n") - } - if this.PhpClassPrefix != nil { - s = append(s, "PhpClassPrefix: "+valueToGoStringDescriptor(this.PhpClassPrefix, "string")+",\n") - } - if this.PhpNamespace != nil { - s = append(s, "PhpNamespace: "+valueToGoStringDescriptor(this.PhpNamespace, "string")+",\n") - } - if this.PhpMetadataNamespace != nil { - s = append(s, "PhpMetadataNamespace: "+valueToGoStringDescriptor(this.PhpMetadataNamespace, "string")+",\n") - } - if this.RubyPackage != nil { - s = append(s, "RubyPackage: "+valueToGoStringDescriptor(this.RubyPackage, "string")+",\n") - } - if this.UninterpretedOption != nil { - s = append(s, "UninterpretedOption: "+fmt.Sprintf("%#v", this.UninterpretedOption)+",\n") - } - s = append(s, "XXX_InternalExtensions: "+extensionToGoStringDescriptor(this)+",\n") - if this.XXX_unrecognized != nil { - s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") - } - s = append(s, "}") - return strings.Join(s, "") -} -func (this *MessageOptions) GoString() string { - if this == nil { - return "nil" - } - s := make([]string, 0, 9) - s = append(s, "&descriptor.MessageOptions{") - if this.MessageSetWireFormat != nil { - s = append(s, "MessageSetWireFormat: "+valueToGoStringDescriptor(this.MessageSetWireFormat, "bool")+",\n") - } - if this.NoStandardDescriptorAccessor != nil { - s = append(s, "NoStandardDescriptorAccessor: "+valueToGoStringDescriptor(this.NoStandardDescriptorAccessor, "bool")+",\n") - } - if this.Deprecated != nil { - s = append(s, "Deprecated: "+valueToGoStringDescriptor(this.Deprecated, "bool")+",\n") - } - if this.MapEntry != nil { - s = append(s, "MapEntry: "+valueToGoStringDescriptor(this.MapEntry, "bool")+",\n") - } - if this.UninterpretedOption != nil { - s = append(s, "UninterpretedOption: "+fmt.Sprintf("%#v", this.UninterpretedOption)+",\n") - } - s = append(s, "XXX_InternalExtensions: "+extensionToGoStringDescriptor(this)+",\n") - if this.XXX_unrecognized != nil { - s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") - } - s = append(s, "}") - return strings.Join(s, "") -} -func (this *FieldOptions) GoString() string { - if this == nil { - return "nil" - } - s := make([]string, 0, 11) - s = append(s, "&descriptor.FieldOptions{") - if this.Ctype != nil { - s = append(s, "Ctype: "+valueToGoStringDescriptor(this.Ctype, "FieldOptions_CType")+",\n") - } - if this.Packed != nil { - s = append(s, "Packed: "+valueToGoStringDescriptor(this.Packed, "bool")+",\n") - } - if this.Jstype != nil { - s = append(s, "Jstype: "+valueToGoStringDescriptor(this.Jstype, "FieldOptions_JSType")+",\n") - } - if this.Lazy != nil { - s = append(s, "Lazy: "+valueToGoStringDescriptor(this.Lazy, "bool")+",\n") - } - if this.Deprecated != nil { - s = append(s, "Deprecated: "+valueToGoStringDescriptor(this.Deprecated, "bool")+",\n") - } - if this.Weak != nil { - s = append(s, "Weak: "+valueToGoStringDescriptor(this.Weak, "bool")+",\n") - } - if this.UninterpretedOption != nil { - s = append(s, "UninterpretedOption: "+fmt.Sprintf("%#v", this.UninterpretedOption)+",\n") - } - s = append(s, "XXX_InternalExtensions: "+extensionToGoStringDescriptor(this)+",\n") - if this.XXX_unrecognized != nil { - s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") - } - s = append(s, "}") - return strings.Join(s, "") -} -func (this *OneofOptions) GoString() string { - if this == nil { - return "nil" - } - s := make([]string, 0, 5) - s = append(s, "&descriptor.OneofOptions{") - if this.UninterpretedOption != nil { - s = append(s, "UninterpretedOption: "+fmt.Sprintf("%#v", this.UninterpretedOption)+",\n") - } - s = append(s, "XXX_InternalExtensions: "+extensionToGoStringDescriptor(this)+",\n") - if this.XXX_unrecognized != nil { - s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") - } - s = append(s, "}") - return strings.Join(s, "") -} -func (this *EnumOptions) GoString() string { - if this == nil { - return "nil" - } - s := make([]string, 0, 7) - s = append(s, "&descriptor.EnumOptions{") - if this.AllowAlias != nil { - s = append(s, "AllowAlias: "+valueToGoStringDescriptor(this.AllowAlias, "bool")+",\n") - } - if this.Deprecated != nil { - s = append(s, "Deprecated: "+valueToGoStringDescriptor(this.Deprecated, "bool")+",\n") - } - if this.UninterpretedOption != nil { - s = append(s, "UninterpretedOption: "+fmt.Sprintf("%#v", this.UninterpretedOption)+",\n") - } - s = append(s, "XXX_InternalExtensions: "+extensionToGoStringDescriptor(this)+",\n") - if this.XXX_unrecognized != nil { - s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") - } - s = append(s, "}") - return strings.Join(s, "") -} -func (this *EnumValueOptions) GoString() string { - if this == nil { - return "nil" - } - s := make([]string, 0, 6) - s = append(s, "&descriptor.EnumValueOptions{") - if this.Deprecated != nil { - s = append(s, "Deprecated: "+valueToGoStringDescriptor(this.Deprecated, "bool")+",\n") - } - if this.UninterpretedOption != nil { - s = append(s, "UninterpretedOption: "+fmt.Sprintf("%#v", this.UninterpretedOption)+",\n") - } - s = append(s, "XXX_InternalExtensions: "+extensionToGoStringDescriptor(this)+",\n") - if this.XXX_unrecognized != nil { - s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") - } - s = append(s, "}") - return strings.Join(s, "") -} -func (this *ServiceOptions) GoString() string { - if this == nil { - return "nil" - } - s := make([]string, 0, 6) - s = append(s, "&descriptor.ServiceOptions{") - if this.Deprecated != nil { - s = append(s, "Deprecated: "+valueToGoStringDescriptor(this.Deprecated, "bool")+",\n") - } - if this.UninterpretedOption != nil { - s = append(s, "UninterpretedOption: "+fmt.Sprintf("%#v", this.UninterpretedOption)+",\n") - } - s = append(s, "XXX_InternalExtensions: "+extensionToGoStringDescriptor(this)+",\n") - if this.XXX_unrecognized != nil { - s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") - } - s = append(s, "}") - return strings.Join(s, "") -} -func (this *MethodOptions) GoString() string { - if this == nil { - return "nil" - } - s := make([]string, 0, 7) - s = append(s, "&descriptor.MethodOptions{") - if this.Deprecated != nil { - s = append(s, "Deprecated: "+valueToGoStringDescriptor(this.Deprecated, "bool")+",\n") - } - if this.IdempotencyLevel != nil { - s = append(s, "IdempotencyLevel: "+valueToGoStringDescriptor(this.IdempotencyLevel, "MethodOptions_IdempotencyLevel")+",\n") - } - if this.UninterpretedOption != nil { - s = append(s, "UninterpretedOption: "+fmt.Sprintf("%#v", this.UninterpretedOption)+",\n") - } - s = append(s, "XXX_InternalExtensions: "+extensionToGoStringDescriptor(this)+",\n") - if this.XXX_unrecognized != nil { - s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") - } - s = append(s, "}") - return strings.Join(s, "") -} -func (this *UninterpretedOption) GoString() string { - if this == nil { - return "nil" - } - s := make([]string, 0, 11) - s = append(s, "&descriptor.UninterpretedOption{") - if this.Name != nil { - s = append(s, "Name: "+fmt.Sprintf("%#v", this.Name)+",\n") - } - if this.IdentifierValue != nil { - s = append(s, "IdentifierValue: "+valueToGoStringDescriptor(this.IdentifierValue, "string")+",\n") - } - if this.PositiveIntValue != nil { - s = append(s, "PositiveIntValue: "+valueToGoStringDescriptor(this.PositiveIntValue, "uint64")+",\n") - } - if this.NegativeIntValue != nil { - s = append(s, "NegativeIntValue: "+valueToGoStringDescriptor(this.NegativeIntValue, "int64")+",\n") - } - if this.DoubleValue != nil { - s = append(s, "DoubleValue: "+valueToGoStringDescriptor(this.DoubleValue, "float64")+",\n") - } - if this.StringValue != nil { - s = append(s, "StringValue: "+valueToGoStringDescriptor(this.StringValue, "byte")+",\n") - } - if this.AggregateValue != nil { - s = append(s, "AggregateValue: "+valueToGoStringDescriptor(this.AggregateValue, "string")+",\n") - } - if this.XXX_unrecognized != nil { - s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") - } - s = append(s, "}") - return strings.Join(s, "") -} -func (this *UninterpretedOption_NamePart) GoString() string { - if this == nil { - return "nil" - } - s := make([]string, 0, 6) - s = append(s, "&descriptor.UninterpretedOption_NamePart{") - if this.NamePart != nil { - s = append(s, "NamePart: "+valueToGoStringDescriptor(this.NamePart, "string")+",\n") - } - if this.IsExtension != nil { - s = append(s, "IsExtension: "+valueToGoStringDescriptor(this.IsExtension, "bool")+",\n") - } - if this.XXX_unrecognized != nil { - s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") - } - s = append(s, "}") - return strings.Join(s, "") -} -func (this *SourceCodeInfo) GoString() string { - if this == nil { - return "nil" - } - s := make([]string, 0, 5) - s = append(s, "&descriptor.SourceCodeInfo{") - if this.Location != nil { - s = append(s, "Location: "+fmt.Sprintf("%#v", this.Location)+",\n") - } - if this.XXX_unrecognized != nil { - s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") - } - s = append(s, "}") - return strings.Join(s, "") -} -func (this *SourceCodeInfo_Location) GoString() string { - if this == nil { - return "nil" - } - s := make([]string, 0, 9) - s = append(s, "&descriptor.SourceCodeInfo_Location{") - if this.Path != nil { - s = append(s, "Path: "+fmt.Sprintf("%#v", this.Path)+",\n") - } - if this.Span != nil { - s = append(s, "Span: "+fmt.Sprintf("%#v", this.Span)+",\n") - } - if this.LeadingComments != nil { - s = append(s, "LeadingComments: "+valueToGoStringDescriptor(this.LeadingComments, "string")+",\n") - } - if this.TrailingComments != nil { - s = append(s, "TrailingComments: "+valueToGoStringDescriptor(this.TrailingComments, "string")+",\n") - } - if this.LeadingDetachedComments != nil { - s = append(s, "LeadingDetachedComments: "+fmt.Sprintf("%#v", this.LeadingDetachedComments)+",\n") - } - if this.XXX_unrecognized != nil { - s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") - } - s = append(s, "}") - return strings.Join(s, "") -} -func (this *GeneratedCodeInfo) GoString() string { - if this == nil { - return "nil" - } - s := make([]string, 0, 5) - s = append(s, "&descriptor.GeneratedCodeInfo{") - if this.Annotation != nil { - s = append(s, "Annotation: "+fmt.Sprintf("%#v", this.Annotation)+",\n") - } - if this.XXX_unrecognized != nil { - s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") - } - s = append(s, "}") - return strings.Join(s, "") -} -func (this *GeneratedCodeInfo_Annotation) GoString() string { - if this == nil { - return "nil" - } - s := make([]string, 0, 8) - s = append(s, "&descriptor.GeneratedCodeInfo_Annotation{") - if this.Path != nil { - s = append(s, "Path: "+fmt.Sprintf("%#v", this.Path)+",\n") - } - if this.SourceFile != nil { - s = append(s, "SourceFile: "+valueToGoStringDescriptor(this.SourceFile, "string")+",\n") - } - if this.Begin != nil { - s = append(s, "Begin: "+valueToGoStringDescriptor(this.Begin, "int32")+",\n") - } - if this.End != nil { - s = append(s, "End: "+valueToGoStringDescriptor(this.End, "int32")+",\n") - } - if this.XXX_unrecognized != nil { - s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") - } - s = append(s, "}") - return strings.Join(s, "") -} -func valueToGoStringDescriptor(v interface{}, typ string) string { - rv := reflect.ValueOf(v) - if rv.IsNil() { - return "nil" - } - pv := reflect.Indirect(rv).Interface() - return fmt.Sprintf("func(v %v) *%v { return &v } ( %#v )", typ, typ, pv) -} -func extensionToGoStringDescriptor(m github_com_gogo_protobuf_proto.Message) string { - e := github_com_gogo_protobuf_proto.GetUnsafeExtensionsMap(m) - if e == nil { - return "nil" - } - s := "proto.NewUnsafeXXX_InternalExtensions(map[int32]proto.Extension{" - keys := make([]int, 0, len(e)) - for k := range e { - keys = append(keys, int(k)) - } - sort.Ints(keys) - ss := []string{} - for _, k := range keys { - ss = append(ss, strconv.Itoa(k)+": "+e[int32(k)].GoString()) - } - s += strings.Join(ss, ",") + "})" - return s -} diff --git a/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/helper.go b/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/helper.go deleted file mode 100644 index e0846a35..00000000 --- a/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/helper.go +++ /dev/null @@ -1,390 +0,0 @@ -// Protocol Buffers for Go with Gadgets -// -// Copyright (c) 2013, The GoGo Authors. All rights reserved. -// http://github.com/gogo/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package descriptor - -import ( - "strings" -) - -func (msg *DescriptorProto) GetMapFields() (*FieldDescriptorProto, *FieldDescriptorProto) { - if !msg.GetOptions().GetMapEntry() { - return nil, nil - } - return msg.GetField()[0], msg.GetField()[1] -} - -func dotToUnderscore(r rune) rune { - if r == '.' { - return '_' - } - return r -} - -func (field *FieldDescriptorProto) WireType() (wire int) { - switch *field.Type { - case FieldDescriptorProto_TYPE_DOUBLE: - return 1 - case FieldDescriptorProto_TYPE_FLOAT: - return 5 - case FieldDescriptorProto_TYPE_INT64: - return 0 - case FieldDescriptorProto_TYPE_UINT64: - return 0 - case FieldDescriptorProto_TYPE_INT32: - return 0 - case FieldDescriptorProto_TYPE_UINT32: - return 0 - case FieldDescriptorProto_TYPE_FIXED64: - return 1 - case FieldDescriptorProto_TYPE_FIXED32: - return 5 - case FieldDescriptorProto_TYPE_BOOL: - return 0 - case FieldDescriptorProto_TYPE_STRING: - return 2 - case FieldDescriptorProto_TYPE_GROUP: - return 2 - case FieldDescriptorProto_TYPE_MESSAGE: - return 2 - case FieldDescriptorProto_TYPE_BYTES: - return 2 - case FieldDescriptorProto_TYPE_ENUM: - return 0 - case FieldDescriptorProto_TYPE_SFIXED32: - return 5 - case FieldDescriptorProto_TYPE_SFIXED64: - return 1 - case FieldDescriptorProto_TYPE_SINT32: - return 0 - case FieldDescriptorProto_TYPE_SINT64: - return 0 - } - panic("unreachable") -} - -func (field *FieldDescriptorProto) GetKeyUint64() (x uint64) { - packed := field.IsPacked() - wireType := field.WireType() - fieldNumber := field.GetNumber() - if packed { - wireType = 2 - } - x = uint64(uint32(fieldNumber)<<3 | uint32(wireType)) - return x -} - -func (field *FieldDescriptorProto) GetKey3Uint64() (x uint64) { - packed := field.IsPacked3() - wireType := field.WireType() - fieldNumber := field.GetNumber() - if packed { - wireType = 2 - } - x = uint64(uint32(fieldNumber)<<3 | uint32(wireType)) - return x -} - -func (field *FieldDescriptorProto) GetKey() []byte { - x := field.GetKeyUint64() - i := 0 - keybuf := make([]byte, 0) - for i = 0; x > 127; i++ { - keybuf = append(keybuf, 0x80|uint8(x&0x7F)) - x >>= 7 - } - keybuf = append(keybuf, uint8(x)) - return keybuf -} - -func (field *FieldDescriptorProto) GetKey3() []byte { - x := field.GetKey3Uint64() - i := 0 - keybuf := make([]byte, 0) - for i = 0; x > 127; i++ { - keybuf = append(keybuf, 0x80|uint8(x&0x7F)) - x >>= 7 - } - keybuf = append(keybuf, uint8(x)) - return keybuf -} - -func (desc *FileDescriptorSet) GetField(packageName, messageName, fieldName string) *FieldDescriptorProto { - msg := desc.GetMessage(packageName, messageName) - if msg == nil { - return nil - } - for _, field := range msg.GetField() { - if field.GetName() == fieldName { - return field - } - } - return nil -} - -func (file *FileDescriptorProto) GetMessage(typeName string) *DescriptorProto { - for _, msg := range file.GetMessageType() { - if msg.GetName() == typeName { - return msg - } - nes := file.GetNestedMessage(msg, strings.TrimPrefix(typeName, msg.GetName()+".")) - if nes != nil { - return nes - } - } - return nil -} - -func (file *FileDescriptorProto) GetNestedMessage(msg *DescriptorProto, typeName string) *DescriptorProto { - for _, nes := range msg.GetNestedType() { - if nes.GetName() == typeName { - return nes - } - res := file.GetNestedMessage(nes, strings.TrimPrefix(typeName, nes.GetName()+".")) - if res != nil { - return res - } - } - return nil -} - -func (desc *FileDescriptorSet) GetMessage(packageName string, typeName string) *DescriptorProto { - for _, file := range desc.GetFile() { - if strings.Map(dotToUnderscore, file.GetPackage()) != strings.Map(dotToUnderscore, packageName) { - continue - } - for _, msg := range file.GetMessageType() { - if msg.GetName() == typeName { - return msg - } - } - for _, msg := range file.GetMessageType() { - for _, nes := range msg.GetNestedType() { - if nes.GetName() == typeName { - return nes - } - if msg.GetName()+"."+nes.GetName() == typeName { - return nes - } - } - } - } - return nil -} - -func (desc *FileDescriptorSet) IsProto3(packageName string, typeName string) bool { - for _, file := range desc.GetFile() { - if strings.Map(dotToUnderscore, file.GetPackage()) != strings.Map(dotToUnderscore, packageName) { - continue - } - for _, msg := range file.GetMessageType() { - if msg.GetName() == typeName { - return file.GetSyntax() == "proto3" - } - } - for _, msg := range file.GetMessageType() { - for _, nes := range msg.GetNestedType() { - if nes.GetName() == typeName { - return file.GetSyntax() == "proto3" - } - if msg.GetName()+"."+nes.GetName() == typeName { - return file.GetSyntax() == "proto3" - } - } - } - } - return false -} - -func (msg *DescriptorProto) IsExtendable() bool { - return len(msg.GetExtensionRange()) > 0 -} - -func (desc *FileDescriptorSet) FindExtension(packageName string, typeName string, fieldName string) (extPackageName string, field *FieldDescriptorProto) { - parent := desc.GetMessage(packageName, typeName) - if parent == nil { - return "", nil - } - if !parent.IsExtendable() { - return "", nil - } - extendee := "." + packageName + "." + typeName - for _, file := range desc.GetFile() { - for _, ext := range file.GetExtension() { - if strings.Map(dotToUnderscore, file.GetPackage()) == strings.Map(dotToUnderscore, packageName) { - if !(ext.GetExtendee() == typeName || ext.GetExtendee() == extendee) { - continue - } - } else { - if ext.GetExtendee() != extendee { - continue - } - } - if ext.GetName() == fieldName { - return file.GetPackage(), ext - } - } - } - return "", nil -} - -func (desc *FileDescriptorSet) FindExtensionByFieldNumber(packageName string, typeName string, fieldNum int32) (extPackageName string, field *FieldDescriptorProto) { - parent := desc.GetMessage(packageName, typeName) - if parent == nil { - return "", nil - } - if !parent.IsExtendable() { - return "", nil - } - extendee := "." + packageName + "." + typeName - for _, file := range desc.GetFile() { - for _, ext := range file.GetExtension() { - if strings.Map(dotToUnderscore, file.GetPackage()) == strings.Map(dotToUnderscore, packageName) { - if !(ext.GetExtendee() == typeName || ext.GetExtendee() == extendee) { - continue - } - } else { - if ext.GetExtendee() != extendee { - continue - } - } - if ext.GetNumber() == fieldNum { - return file.GetPackage(), ext - } - } - } - return "", nil -} - -func (desc *FileDescriptorSet) FindMessage(packageName string, typeName string, fieldName string) (msgPackageName string, msgName string) { - parent := desc.GetMessage(packageName, typeName) - if parent == nil { - return "", "" - } - field := parent.GetFieldDescriptor(fieldName) - if field == nil { - var extPackageName string - extPackageName, field = desc.FindExtension(packageName, typeName, fieldName) - if field == nil { - return "", "" - } - packageName = extPackageName - } - typeNames := strings.Split(field.GetTypeName(), ".") - if len(typeNames) == 1 { - msg := desc.GetMessage(packageName, typeName) - if msg == nil { - return "", "" - } - return packageName, msg.GetName() - } - if len(typeNames) > 2 { - for i := 1; i < len(typeNames)-1; i++ { - packageName = strings.Join(typeNames[1:len(typeNames)-i], ".") - typeName = strings.Join(typeNames[len(typeNames)-i:], ".") - msg := desc.GetMessage(packageName, typeName) - if msg != nil { - typeNames := strings.Split(msg.GetName(), ".") - if len(typeNames) == 1 { - return packageName, msg.GetName() - } - return strings.Join(typeNames[1:len(typeNames)-1], "."), typeNames[len(typeNames)-1] - } - } - } - return "", "" -} - -func (msg *DescriptorProto) GetFieldDescriptor(fieldName string) *FieldDescriptorProto { - for _, field := range msg.GetField() { - if field.GetName() == fieldName { - return field - } - } - return nil -} - -func (desc *FileDescriptorSet) GetEnum(packageName string, typeName string) *EnumDescriptorProto { - for _, file := range desc.GetFile() { - if strings.Map(dotToUnderscore, file.GetPackage()) != strings.Map(dotToUnderscore, packageName) { - continue - } - for _, enum := range file.GetEnumType() { - if enum.GetName() == typeName { - return enum - } - } - } - return nil -} - -func (f *FieldDescriptorProto) IsEnum() bool { - return *f.Type == FieldDescriptorProto_TYPE_ENUM -} - -func (f *FieldDescriptorProto) IsMessage() bool { - return *f.Type == FieldDescriptorProto_TYPE_MESSAGE -} - -func (f *FieldDescriptorProto) IsBytes() bool { - return *f.Type == FieldDescriptorProto_TYPE_BYTES -} - -func (f *FieldDescriptorProto) IsRepeated() bool { - return f.Label != nil && *f.Label == FieldDescriptorProto_LABEL_REPEATED -} - -func (f *FieldDescriptorProto) IsString() bool { - return *f.Type == FieldDescriptorProto_TYPE_STRING -} - -func (f *FieldDescriptorProto) IsBool() bool { - return *f.Type == FieldDescriptorProto_TYPE_BOOL -} - -func (f *FieldDescriptorProto) IsRequired() bool { - return f.Label != nil && *f.Label == FieldDescriptorProto_LABEL_REQUIRED -} - -func (f *FieldDescriptorProto) IsPacked() bool { - return f.Options != nil && f.GetOptions().GetPacked() -} - -func (f *FieldDescriptorProto) IsPacked3() bool { - if f.IsRepeated() && f.IsScalar() { - if f.Options == nil || f.GetOptions().Packed == nil { - return true - } - return f.Options != nil && f.GetOptions().GetPacked() - } - return false -} - -func (m *DescriptorProto) HasExtension() bool { - return len(m.ExtensionRange) > 0 -} diff --git a/vendor/github.com/gojuno/minimock/v3/.goreleaser.yml b/vendor/github.com/gojuno/minimock/v3/.goreleaser.yml deleted file mode 100644 index 80ec6347..00000000 --- a/vendor/github.com/gojuno/minimock/v3/.goreleaser.yml +++ /dev/null @@ -1,34 +0,0 @@ -project_name: minimock -before: - hooks: - - make all -builds: -- - main: ./cmd/minimock/ - binary: minimock - env: - - CGO_ENABLED=0 - goos: - - darwin - - linux - - windows - - ldflags: - - -X main.version={{.Version}} -X main.commit={{.FullCommit}} -X main.buildDate={{.Date}} -archive: - replacements: - darwin: Darwin - linux: Linux - windows: Windows - 386: i386 - amd64: x86_64 -checksum: - name_template: 'checksums.txt' -snapshot: - name_template: "{{ .Tag }}" -changelog: - sort: asc - filters: - exclude: - - '^docs:' - - '^test:' diff --git a/vendor/github.com/gojuno/minimock/v3/.travis.yml b/vendor/github.com/gojuno/minimock/v3/.travis.yml deleted file mode 100644 index 4d92cdf3..00000000 --- a/vendor/github.com/gojuno/minimock/v3/.travis.yml +++ /dev/null @@ -1,21 +0,0 @@ -language: go - -go: - - 1.x - -install: - - go get github.com/mattn/goveralls - - if ! go get github.com/golang/tools/cmd/cover; then go get golang.org/x/tools/cmd/cover; fi - - export BINDIR=./bin - - curl -L https://git.io/vp6lP | sh #gometalinter installation - - export PATH=`pwd`/bin:$PATH - - export GO111MODULE=on - -script: - - cd $TRAVIS_BUILD_DIR && make all - - cd tests/ - - go test -c -covermode=count -coverpkg=github.com/gojuno/minimock/tests - - ./tests.test -test.coverprofile coverage.cov - -after_script: - - $HOME/gopath/bin/goveralls -service=travis-ci -coverprofile=coverage.cov diff --git a/vendor/github.com/gojuno/minimock/v3/LICENSE b/vendor/github.com/gojuno/minimock/v3/LICENSE deleted file mode 100644 index 148e6fa4..00000000 --- a/vendor/github.com/gojuno/minimock/v3/LICENSE +++ /dev/null @@ -1,21 +0,0 @@ -The MIT License (MIT) - -Copyright (c) 2016 Juno Lab Ltd. - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. diff --git a/vendor/github.com/gojuno/minimock/v3/Makefile b/vendor/github.com/gojuno/minimock/v3/Makefile deleted file mode 100644 index 89e799c9..00000000 --- a/vendor/github.com/gojuno/minimock/v3/Makefile +++ /dev/null @@ -1,26 +0,0 @@ -all: install test lint clean - -generate: - go run ./cmd/minimock/minimock.go -i github.com/gojuno/minimock/v3.Tester -o ./tests - go run ./cmd/minimock/minimock.go -i ./tests.Formatter -o ./tests/formatter_mock.go - -lint: - gometalinter ./tests/ -I minimock -e gopathwalk --disable=gotype --deadline=2m - -install: - go mod download - go install ./cmd/minimock - -clean: - [ -e ./tests/formatter_mock.go.test_origin ] && mv -f ./tests/formatter_mock.go.test_origin ./tests/formatter_mock.go - [ -e ./tests/tester_mock_test.go.test_origin ] && mv -f ./tests/tester_mock_test.go.test_origin ./tests/tester_mock_test.go - rm -Rf bin/ dist/ - -test_save_origin: - [ -e ./tests/formatter_mock.go.test_origin ] || cp ./tests/formatter_mock.go ./tests/formatter_mock.go.test_origin - [ -e ./tests/tester_mock_test.go.test_origin ] || cp ./tests/tester_mock_test.go ./tests/tester_mock_test.go.test_origin - -test: test_save_origin generate - diff ./tests/formatter_mock.go ./tests/formatter_mock.go.test_origin - diff ./tests/tester_mock_test.go ./tests/tester_mock_test.go.test_origin - go test -race ./... diff --git a/vendor/github.com/gojuno/minimock/v3/README.md b/vendor/github.com/gojuno/minimock/v3/README.md deleted file mode 100644 index dd79f1eb..00000000 --- a/vendor/github.com/gojuno/minimock/v3/README.md +++ /dev/null @@ -1,198 +0,0 @@ -![logo](https://rawgit.com/gojuno/minimock/master/logo.svg) -[![GoDoc](https://godoc.org/github.com/gojuno/minimock?status.svg)](http://godoc.org/github.com/gojuno/minimock) -[![Build Status](https://travis-ci.org/gojuno/minimock.svg?branch=master)](https://travis-ci.org/gojuno/minimock) -[![Go Report Card](https://goreportcard.com/badge/github.com/gojuno/minimock)](https://goreportcard.com/report/github.com/gojuno/minimock) -[![Coverage Status](https://coveralls.io/repos/github/gojuno/minimock/badge.svg?branch=master)](https://coveralls.io/github/gojuno/minimock?branch=master) -[![Release](https://img.shields.io/github/release/gojuno/minimock.svg)](https://github.com/gojuno/minimock/releases/latest) -[![Awesome](https://cdn.rawgit.com/sindresorhus/awesome/d7305f38d29fed78fa85652e3a63e154dd8e8829/media/badge.svg)](https://github.com/avelino/awesome-go#testing) - - -## Summary -Minimock generates mocks out of Go interface declarations. - -The main features of minimock are: - -* It generates statically typed mocks and helpers. There's no need for type assertions when you use minimock. -* It's fully integrated with the standard Go "testing" package. -* It's ready for Go modules. -* It works well with [table driven tests](https://dave.cheney.net/2013/06/09/writing-table-driven-tests-in-go) because you can set up mocks for several methods in one line of code using the builder pattern. -* It can generate several mocks in one run. -* It generates code that passes [gometalinter](https://github.com/alecthomas/gometalinter) checks. -* It puts //go:generate instruction into the generated code, so all you need to do when the source interface is updated is to run the `go generate ./...` command from within the project's directory. -* It provides Finish and Wait helpers to check if all mocked methods have been called during the test and keeps your test code clean and up to date. -* It provides When and Then helpers to set up several expectations and results for any method. -* It generates concurrent-safe mocks and mock invocation counters that you can use to manage mock behavior depending on the number of calls. -* It can be used with the [GoUnit](https://github.com/hexdigest/gounit) tool which generates table-driven tests that make use of minimock. - -## Installation - -If you use go modules please download the [latest binary](https://github.com/gojuno/minimock/releases/latest) -or install minimock from source: -``` -go install github.com/gojuno/minimock/v3/cmd/minimock -``` - -If you don't use go modules please find the latest v2.x binary [here](https://github.com/gojuno/minimock/releases) -or install minimock using [v2 branch](https://github.com/gojuno/minimock/tree/v2) - -## Usage - -``` - minimock [-i source.interface] [-o output/dir/or/file.go] [-g] - -g don't put go:generate instruction into the generated code - -h show this help message - -i string - comma-separated names of the interfaces to mock, i.e fmt.Stringer,io.Reader - use io.* notation to generate mocks for all interfaces in the "io" package (default "*") - -o string - comma-separated destination file names or packages to put the generated mocks in, - by default the generated mock is placed in the source package directory - -s string - mock file suffix (default "_mock_test.go") -``` - -Let's say we have the following interface declaration in github.com/gojuno/minimock/tests package: -```go -type Formatter interface { - Format(string, ...interface{}) string -} -``` - -This will generate mocks for all interfaces defined in the "tests" package: - -``` -$ cd ~/go/src/github.com/gojuno/minimock/tests -$ minimock -``` - -Here is how to generate a mock for the "Formatter" interface only: - -``` -$ cd ~/go/src/github.com/gojuno/minimock/tests -$ minimock -i Formatter -``` - -Same using the relative package notation: - -``` -$ minimock -i ./tests.Formatter -``` - -Same using the full import path of the source package: - -``` -$ minimock -i github.com/gojuno/minimock/tests.Formatter -o ./tests/ -``` - -All the examples above generate ./tests/formatter_mock_test.go file - - -Now it's time to use the generated mock. There are several ways it can be done. - -### Setting up a mock using the builder pattern and Expect/Return methods: -```go -mc := minimock.NewController(t) -formatterMock := NewFormatterMock(mc).FormatMock.Expect("hello %s!", "world").Return("hello world!") -``` - -The builder pattern is convenient when you have more than one method to mock. -Let's say we have an io.ReadCloser interface which has two methods: Read and Close -```go -type ReadCloser interface { - Read(p []byte) (n int, err error) - Close() error -} -``` - -We can set up a mock using a simple one-liner: -```go -mc := minimock.NewController(t) -readCloserMock := NewReadCloserMock(mc).ReadMock.Expect([]byte(1,2,3)).Return(3, nil).CloseMock.Return(nil) -``` - -But what if we don't want to check all arguments of the read method? -Let's say we just want to check that the second element of the given slice "p" is 2. -This is where "Inspect" helper comes into play: -```go -mc := minimock.NewController(t) -readCloserMock := NewReadCloserMock(mc).ReadMock.Inspect(func(p []byte){ - assert.Equal(mc, 2, p[1]) -}).Return(3, nil).CloseMock.Return(nil) - -``` - -### Setting up a mock using When/Then helpers: -```go -mc := minimock.NewController(t) -formatterMock := NewFormatterMock(mc) -formatterMock.When("Hello %s!", "world").Then("Hello world!") -formatterMock.When("Hi %s!", "there").Then("Hi there!") -``` - -alternatively you can use the one-liner: - -```go -formatterMock = NewFormatterMock(mc).When("Hello %s!", "world").Then("Hello world!").When("Hi %s!", "there").Then("Hi there!") -``` - -### Setting up a mock using the Set method: -```go -mc := minimock.NewController(t) -formatterMock := NewFormatterMock(mc).FormatMock.Set(func(string, ...interface{}) string { - return "minimock" -}) -``` - -You can also use invocation counters in your mocks and tests: -```go -mc := minimock.NewController(t) -formatterMock := NewFormatterMock(mc) -formatterMock.FormatFunc = func(string, ...interface{}) string { - return fmt.Sprintf("minimock: %d", formatterMock.BeforeFormatCounter()) -} -``` - -### Make sure that your mocks are being used -Often we write tons of mocks to test our code but sometimes the tested code stops using mocked dependencies. -You can easily identify this problem by using mc.Finish or mc.Wait helpers. -These helpers ensure that all your mocks and expectations have been used at least once during the test run. - -```go -func TestSomething(t *testing.T) { - mc := minimock.NewController(t) - defer mc.Finish() //it will mark this example test as failed because there are no calls to formatterMock.Format() and readCloserMock.Read() below - - formatterMock := NewFormatterMock(mc) - formatterMock.FormatMock.Return("minimock") - - readCloserMock := NewReadCloserMock(mc) - readCloserMock.ReadMock.Return(5, nil) -} -``` - -### Testing concurrent code -Testing concurrent code is tough. Fortunately minimock.Controller provides you with the helper method that makes testing concurrent code easy. -Here is how it works: - -```go -func TestSomething(t *testing.T) { - mc := minimock.NewController(t) - - //Wait ensures that all mocked methods have been called within the given time span - //if any of the mocked methods have not been called Wait marks the test as failed - defer mc.Wait(time.Second) - - formatterMock := NewFormatterMock(mc) - formatterMock.FormatMock.Return("minimock") - - //tested code can run the mocked method in a goroutine - go formatterMock.Format("hello world!") -} -``` - -## Using GoUnit with minimock - -Writing test is not only mocking the dependencies. Often the test itself contains a lot of boilerplate code. -You can generate test stubs using [GoUnit](https://github.com/hexdigest/gounit) tool which has a nice template that uses minimock. - -Happy mocking! diff --git a/vendor/github.com/gojuno/minimock/v3/camel_to_snake.go b/vendor/github.com/gojuno/minimock/v3/camel_to_snake.go deleted file mode 100644 index 2f903e69..00000000 --- a/vendor/github.com/gojuno/minimock/v3/camel_to_snake.go +++ /dev/null @@ -1,62 +0,0 @@ -package minimock - -import ( - "unicode" - "unicode/utf8" -) - -type buffer struct { - r []byte - runeBytes [utf8.UTFMax]byte -} - -func (b *buffer) write(r rune) { - if r < utf8.RuneSelf { - b.r = append(b.r, byte(r)) - return - } - n := utf8.EncodeRune(b.runeBytes[0:], r) - b.r = append(b.r, b.runeBytes[0:n]...) -} - -func (b *buffer) indent() { - if len(b.r) > 0 { - b.r = append(b.r, '_') - } -} - -// CamelToSnake transforms strings from CamelCase to snake_case -func CamelToSnake(s string) string { - b := buffer{ - r: make([]byte, 0, len(s)), - } - var m rune - var w bool - for _, ch := range s { - if unicode.IsUpper(ch) { - if m != 0 { - if !w { - b.indent() - w = true - } - b.write(m) - } - m = unicode.ToLower(ch) - } else { - if m != 0 { - b.indent() - b.write(m) - m = 0 - w = false - } - b.write(ch) - } - } - if m != 0 { - if !w { - b.indent() - } - b.write(m) - } - return string(b.r) -} diff --git a/vendor/github.com/gojuno/minimock/v3/doc.go b/vendor/github.com/gojuno/minimock/v3/doc.go deleted file mode 100644 index 09b216c0..00000000 --- a/vendor/github.com/gojuno/minimock/v3/doc.go +++ /dev/null @@ -1,264 +0,0 @@ -/* -Package minimock is a command line tool that parses the input Go source file that contains an interface declaration and generates -implementation of this interface that can be used as a mock. - -Main features of minimock - -1. It's integrated with the standard Go "testing" package - -2. It supports variadic methods and embedded interfaces - -3. It's very convenient to use generated mocks in table tests because it implements builder pattern to set up several mocks - -4. It provides a useful Wait(time.Duration) helper to test concurrent code - -5. It generates helpers to check if the mocked methods have been called and keeps your tests clean and up to date - -6. It generates concurrent-safe mock execution counters that you can use in your mocks to implement sophisticated mocks behaviour - -Let's say we have the following interface declaration in github.com/gojuno/minimock/tests package: - - type Formatter interface { - Format(string, ...interface{}) string - } - -Here is how to generate the mock for this interface: - - minimock -i github.com/gojuno/minimock/tests.Formatter -o ./tests/ - -The result file ./tests/formatter_mock_test.go will contain the following code: - - //FormatterMock implements github.com/gojuno/minimock/tests.Formatter - type FormatterMock struct { - t minimock.Tester - - FormatFunc func(p string, p1 ...interface{}) (r string) - FormatCounter uint64 - FormatMock mFormatterMockFormat - } - - //NewFormatterMock returns a mock for github.com/gojuno/minimock/tests.Formatter - func NewFormatterMock(t minimock.Tester) *FormatterMock { - m := &FormatterMock{t: t} - - if controller, ok := t.(minimock.MockController); ok { - controller.RegisterMocker(m) - } - - m.FormatMock = mFormatterMockFormat{mock: m} - - return m - } - - type mFormatterMockFormat struct { - mock *FormatterMock - mockExpectations *FormatterMockFormatParams - } - - //FormatterMockFormatParams represents input parameters of the Formatter.Format - type FormatterMockFormatParams struct { - p string - p1 []interface{} - } - - //Expect sets up expected params for the Formatter.Format - func (m *mFormatterMockFormat) Expect(p string, p1 ...interface{}) *mFormatterMockFormat { - m.mockExpectations = &FormatterMockFormatParams{p, p1} - return m - } - - //Return sets up a mock for Formatter.Format to return Return's arguments - func (m *mFormatterMockFormat) Return(r string) *FormatterMock { - m.mock.FormatFunc = func(p string, p1 ...interface{}) string { - return r - } - return m.mock - } - - //Set uses given function f as a mock of Formatter.Format method - func (m *mFormatterMockFormat) Set(f func(p string, p1 ...interface{}) (r string)) *FormatterMock { - m.mock.FormatFunc = f - return m.mock - } - - //Format implements github.com/gojuno/minimock/tests.Formatter interface - func (m *FormatterMock) Format(p string, p1 ...interface{}) (r string) { - defer atomic.AddUint64(&m.FormatCounter, 1) - - if m.FormatMock.mockExpectations != nil { - testify_assert.Equal(m.t, *m.FormatMock.mockExpectations, FormatterMockFormatParams{p, p1}, - "Formatter.Format got unexpected parameters") - - if m.FormatFunc == nil { - m.t.Fatal("No results are set for the FormatterMock.Format") - return - } - } - - if m.FormatFunc == nil { - m.t.Fatal("Unexpected call to FormatterMock.Format") - return - } - - return m.FormatFunc(p, p1...) - } - - //FormatMinimockCounter returns a count of Formatter.Format invocations - func (m *FormatterMock) FormatMinimockCounter() uint64 { - return atomic.LoadUint64(&m.FormatCounter) - } - - //MinimockFinish checks that all mocked methods of the interface have been called at least once - func (m *FormatterMock) MinimockFinish() { - if m.FormatFunc != nil && atomic.LoadUint64(&m.FormatCounter) == 0 { - m.t.Fatal("Expected call to FormatterMock.Format") - } - } - - //MinimockWait waits for all mocked methods to be called at least once - //this method is called by minimock.Controller - func (m *FormatterMock) MinimockWait(timeout time.Duration) { - timeoutCh := time.After(timeout) - for { - ok := true - ok = ok && (m.FormatFunc == nil || atomic.LoadUint64(&m.FormatCounter) > 0) - - if ok { - return - } - - select { - case <-timeoutCh: - - if m.FormatFunc != nil && atomic.LoadUint64(&m.FormatCounter) == 0 { - m.t.Error("Expected call to FormatterMock.Format") - } - - m.t.Fatalf("Some mocks were not called on time: %s", timeout) - return - default: - time.Sleep(time.Millisecond) - } - } - } - -There are several ways to set up a mock - -Setting up a mock using direct assignment: - - formatterMock := NewFormatterMock(mc) - formatterMock.FormatFunc = func(string, ...interface{}) string { - return "minimock" - } - -Setting up a mock using builder pattern and Return method: - - formatterMock := NewFormatterMock(mc).FormatMock.Expect("%s %d", "string", 1).Return("minimock") - -Setting up a mock using builder and Set method: - - formatterMock := NewFormatterMock(mc).FormatMock.Set(func(string, ...interface{}) string { - return "minimock" - }) - -Builder pattern is convenient when you have to mock more than one method of an interface. -Let's say we have an io.ReadCloser interface which has two methods: Read and Close - - type ReadCloser interface { - Read(p []byte) (n int, err error) - Close() error - } - -Then you can set up a mock using just one assignment: - - readCloserMock := NewReadCloserMock(mc).ReadMock.Expect([]byte(1,2,3)).Return(3, nil).CloseMock.Return(nil) - -You can also use invocation counters in your mocks and tests: - - formatterMock := NewFormatterMock(mc) - formatterMock.FormatFunc = func(string, ...interface{}) string { - return fmt.Sprintf("minimock: %d", formatterMock.FormatMinimockCounter()) - } - -minimock.Controller - -When you have to mock multiple dependencies in your test it's recommended to use minimock.Controller and its Finish or Wait methods. -All you have to do is instantiate the Controller and pass it as an argument to the mocks' constructors: - - func TestSomething(t *testing.T) { - mc := minimock.NewController(t) - defer mc.Finish() - - formatterMock := NewFormatterMock(mc) - formatterMock.FormatMock.Return("minimock") - - readCloserMock := NewReadCloserMock(mc) - readCloserMock.ReadMock.Return(5, nil) - - readCloserMock.Read([]byte{}) - formatterMock.Format() - } - -Every mock is registered in the controller so by calling mc.Finish() you can verify that all the registered mocks have been called -within your test. - -Keep your tests clean - -Sometimes we write tons of mocks for our tests but over time the tested code stops using mocked dependencies, -however mocks are still present and being initialized in the test files. So while tested code can shrink, tests are only growing. -To prevent this minimock provides Finish() method that verifies that all your mocks have been called at least once during the test run. - - func TestSomething(t *testing.T) { - mc := minimock.NewController(t) - defer mc.Finish() - - formatterMock := NewFormatterMock(mc) - formatterMock.FormatMock.Return("minimock") - - readCloserMock := NewReadCloserMock(mc) - readCloserMock.ReadMock.Return(5, nil) - - //this test will fail because there are no calls to formatterMock.Format() and readCloserMock.Read() - } - -Testing concurrent code - -Testing concurrent code is tough. Fortunately minimock provides you with the helper method that makes testing concurrent code easy. -Here is how it works: - - func TestSomething(t *testing.T) { - mc := minimock.NewController(t) - - //Wait ensures that all mocked methods have been called within given interval - //if any of the mocked methods have not been called Wait marks test as failed - defer mc.Wait(time.Second) - - formatterMock := NewFormatterMock(mc) - formatterMock.FormatMock.Return("minimock") - - //tested code can run mocked method in a goroutine - go formatterMock.Format("") - } - -Minimock comman line args: - - $ minimock -h - Usage of minimock: - -f string - DEPRECATED: input file or import path of the package that contains interface declaration - -h show this help message - -i string - comma-separated names of the interfaces to mock, i.e fmt.Stringer,io.Reader, use io.* notation to generate mocks for all interfaces in an io package - -o string - destination file name to place the generated mock or path to destination package when multiple interfaces are given - -p string - DEPRECATED: destination package name - -s string - output file name suffix which is added to file names when multiple interfaces are given (default "_mock_test.go") - -t string - DEPRECATED: mock struct name (default Mock) - -withTests - parse *_test.go files in the source package - -*/ -package minimock diff --git a/vendor/github.com/gojuno/minimock/v3/equal.go b/vendor/github.com/gojuno/minimock/v3/equal.go deleted file mode 100644 index 2c43b4c9..00000000 --- a/vendor/github.com/gojuno/minimock/v3/equal.go +++ /dev/null @@ -1,63 +0,0 @@ -package minimock - -import ( - "reflect" - - "github.com/davecgh/go-spew/spew" - "github.com/pmezard/go-difflib/difflib" -) - -var dumpConf = spew.ConfigState{ - Indent: " ", - DisablePointerAddresses: true, - SortKeys: true, -} - -// Equal returns true if a equals b -func Equal(a, b interface{}) bool { - if a == nil && b == nil { - return a == b - } - - return reflect.DeepEqual(a, b) -} - -// Diff returns unified diff of the textual representations of e and a -func Diff(e, a interface{}) string { - if e == nil || a == nil { - return "" - } - - t := reflect.TypeOf(e) - k := t.Kind() - - if reflect.TypeOf(a) != t { - return "" - } - - if k == reflect.Ptr { - t = t.Elem() - k = t.Kind() - } - - if k != reflect.Array && k != reflect.Map && k != reflect.Slice && k != reflect.Struct { - return "" - } - - es := dumpConf.Sdump(e) - as := dumpConf.Sdump(a) - - diff, err := difflib.GetUnifiedDiffString(difflib.UnifiedDiff{ - A: difflib.SplitLines(es), - B: difflib.SplitLines(as), - Context: 1, - FromFile: "Expected params", - ToFile: "Actual params", - }) - - if err != nil { - panic(err) - } - - return "\n\nDiff:\n" + diff -} diff --git a/vendor/github.com/gojuno/minimock/v3/logo.svg b/vendor/github.com/gojuno/minimock/v3/logo.svg deleted file mode 100644 index b1a0bd82..00000000 --- a/vendor/github.com/gojuno/minimock/v3/logo.svg +++ /dev/null @@ -1,94 +0,0 @@ - - - - - - - - - - image/svg+xml - - - - - - - - - - minimock - - - - diff --git a/vendor/github.com/gojuno/minimock/v3/mock_controller.go b/vendor/github.com/gojuno/minimock/v3/mock_controller.go deleted file mode 100644 index e432a256..00000000 --- a/vendor/github.com/gojuno/minimock/v3/mock_controller.go +++ /dev/null @@ -1,77 +0,0 @@ -package minimock - -import ( - "sync" - "time" -) - -//Mocker describes common interface for all mocks generated by minimock -type Mocker interface { - MinimockFinish() - MinimockWait(time.Duration) -} - -// Tester contains subset of the testing.T methods used by the generated code -type Tester interface { - Fatal(args ...interface{}) - Fatalf(format string, args ...interface{}) - Error(...interface{}) - Errorf(format string, args ...interface{}) - FailNow() -} - -//MockController can be passed to mocks generated by minimock -type MockController interface { - Tester - - RegisterMocker(Mocker) -} - -//Controller implements MockController interface and has to be used in your tests: -//mockController := minimock.NewController(t) -//defer mockController.Finish() -//stringerMock := NewStringerMock(mockController) -type Controller struct { - Tester - sync.Mutex - - mockers []Mocker -} - -// Check if Controller supports MockController interface -var _ MockController = &Controller{} - -//NewController returns an instance of Controller -func NewController(t Tester) *Controller { - return &Controller{Tester: newSafeTester(t)} -} - -//RegisterMocker puts mocker to the list of controller mockers -func (c *Controller) RegisterMocker(m Mocker) { - c.Lock() - c.mockers = append(c.mockers, m) - c.Unlock() -} - -//Finish calls to MinimockFinish method for all registered mockers -func (c *Controller) Finish() { - c.Lock() - for _, m := range c.mockers { - m.MinimockFinish() - } - c.Unlock() -} - -//Wait calls to MinimockWait method for all registered mockers -func (c *Controller) Wait(d time.Duration) { - wg := sync.WaitGroup{} - wg.Add(len(c.mockers)) - for _, m := range c.mockers { - go func(m Mocker) { - defer wg.Done() - m.MinimockWait(d) - }(m) - } - - wg.Wait() -} diff --git a/vendor/github.com/gojuno/minimock/v3/safe_tester.go b/vendor/github.com/gojuno/minimock/v3/safe_tester.go deleted file mode 100644 index 91ccdeef..00000000 --- a/vendor/github.com/gojuno/minimock/v3/safe_tester.go +++ /dev/null @@ -1,52 +0,0 @@ -package minimock - -import "sync" - -type safeTester struct { - Tester - m sync.Mutex -} - -func newSafeTester(t Tester) *safeTester { - return &safeTester{Tester: t} -} - -// Error implements Tester -func (st *safeTester) Error(args ...interface{}) { - st.m.Lock() - defer st.m.Unlock() - - st.Tester.Error(args...) -} - -// Errorf implements Tester -func (st *safeTester) Errorf(format string, args ...interface{}) { - st.m.Lock() - defer st.m.Unlock() - - st.Tester.Errorf(format, args...) -} - -// Fatal implements Tester -func (st *safeTester) Fatal(args ...interface{}) { - st.m.Lock() - defer st.m.Unlock() - - st.Tester.Fatal(args...) -} - -// Fatalf implements Tester -func (st *safeTester) Fatalf(format string, args ...interface{}) { - st.m.Lock() - defer st.m.Unlock() - - st.Tester.Fatalf(format, args...) -} - -// FailNow implements Tester -func (st *safeTester) FailNow() { - st.m.Lock() - defer st.m.Unlock() - - st.Tester.FailNow() -} diff --git a/vendor/github.com/gojuno/minimock/v3/template.go b/vendor/github.com/gojuno/minimock/v3/template.go deleted file mode 100644 index 3017414c..00000000 --- a/vendor/github.com/gojuno/minimock/v3/template.go +++ /dev/null @@ -1,325 +0,0 @@ -package minimock - -const ( - // HeaderTemplate is used to generate package clause and go:generate instruction - HeaderTemplate = ` - package {{$.Package.Name}} - - // Code generated by http://github.com/gojuno/minimock ({{$.Options.HeaderVars.Version}}). DO NOT EDIT. - - {{if $.Options.HeaderVars.GenerateInstruction}} - //go:generate minimock -i {{$.SourcePackage.PkgPath}}.{{$.Options.InterfaceName}} -o {{$.Options.OutputFile}} - {{end}} - - import ( - {{range $import := $.Options.Imports}}{{- if not (in $import "\"time\"" "\"sync/atomic\"" "\"github.com/gojuno/minimock/v3\"")}} - {{$import}}{{end}}{{end}} - {{$.Options.SourcePackageAlias}} "{{$.SourcePackage.PkgPath}}" - mm_atomic "sync/atomic" - mm_time "time" - "github.com/gojuno/minimock/v3" - ) - ` - - // BodyTemplate is used to generate mock body - BodyTemplate = ` - {{ $mock := (title (printf "%sMock" $.Interface.Name)) }} - - // {{$mock}} implements {{$.Interface.Type}} - type {{$mock}} struct { - t minimock.Tester - {{ range $method := $.Interface.Methods }} - func{{$method.Name}} func{{ $method.Signature }} - inspectFunc{{$method.Name}} func({{ $method.Params}}) - after{{$method.Name}}Counter uint64 - before{{$method.Name}}Counter uint64 - {{$method.Name}}Mock m{{$mock}}{{$method.Name}} - {{ end }} - } - - // New{{$mock}} returns a mock for {{$.Interface.Type}} - func New{{$mock}}(t minimock.Tester) *{{$mock}} { - m := &{{$mock}}{t: t} - if controller, ok := t.(minimock.MockController); ok { - controller.RegisterMocker(m) - } - {{ range $method := $.Interface.Methods }} - m.{{$method.Name}}Mock = m{{$mock}}{{$method.Name}}{mock: m} - {{ if $method.HasParams }} m.{{$method.Name}}Mock.callArgs = []*{{$mock}}{{$method.Name}}Params{} {{ end }} - {{ end }} - return m - } - - {{ range $method := $.Interface.Methods }} - {{ $m := (printf "mm%s" $method.Name) }} - - type m{{$mock}}{{$method.Name}} struct { - mock *{{$mock}} - defaultExpectation *{{$mock}}{{$method.Name}}Expectation - expectations []*{{$mock}}{{$method.Name}}Expectation - {{ if $method.HasParams }} - callArgs []*{{$mock}}{{$method.Name}}Params - mutex sync.RWMutex - {{ end }} - } - - // {{$mock}}{{$method.Name}}Expectation specifies expectation struct of the {{$.Interface.Name}}.{{$method.Name}} - type {{$mock}}{{$method.Name}}Expectation struct { - mock *{{$mock}} - {{ if $method.HasParams }} params *{{$mock}}{{$method.Name}}Params {{end}} - {{ if $method.HasResults }} results *{{$mock}}{{$method.Name}}Results {{end}} - Counter uint64 - } - - {{if $method.HasParams }} - // {{$mock}}{{$method.Name}}Params contains parameters of the {{$.Interface.Name}}.{{$method.Name}} - type {{$mock}}{{$method.Name}}Params {{$method.ParamsStruct}} - {{end}} - - {{if $method.HasResults }} - // {{$mock}}{{$method.Name}}Results contains results of the {{$.Interface.Name}}.{{$method.Name}} - type {{$mock}}{{$method.Name}}Results {{$method.ResultsStruct}} - {{end}} - - // Expect sets up expected params for {{$.Interface.Name}}.{{$method.Name}} - func ({{$m}} *m{{$mock}}{{$method.Name}}) Expect({{$method.Params}}) *m{{$mock}}{{$method.Name}} { - if {{$m}}.mock.func{{$method.Name}} != nil { - {{$m}}.mock.t.Fatalf("{{$mock}}.{{$method.Name}} mock is already set by Set") - } - - if {{$m}}.defaultExpectation == nil { - {{$m}}.defaultExpectation = &{{$mock}}{{$method.Name}}Expectation{} - } - - {{if $method.HasParams }} - {{$m}}.defaultExpectation.params = &{{$mock}}{{$method.Name}}Params{ {{ $method.ParamsNames }} } - for _, e := range {{$m}}.expectations { - if minimock.Equal(e.params, {{$m}}.defaultExpectation.params) { - {{$m}}.mock.t.Fatalf("Expectation set by When has same params: %#v", *{{$m}}.defaultExpectation.params) - } - } - {{end}} - return {{$m}} - } - - // Inspect accepts an inspector function that has same arguments as the {{$.Interface.Name}}.{{$method.Name}} - func ({{$m}} *m{{$mock}}{{$method.Name}}) Inspect(f func({{$method.Params}})) *m{{$mock}}{{$method.Name}} { - if {{$m}}.mock.inspectFunc{{$method.Name}} != nil { - {{$m}}.mock.t.Fatalf("Inspect function is already set for {{$mock}}.{{$method.Name}}") - } - - {{$m}}.mock.inspectFunc{{$method.Name}} = f - - return {{$m}} - } - - // Return sets up results that will be returned by {{$.Interface.Name}}.{{$method.Name}} - func ({{$m}} *m{{$mock}}{{$method.Name}}) Return({{$method.Results}}) *{{$mock}} { - if {{$m}}.mock.func{{$method.Name}} != nil { - {{$m}}.mock.t.Fatalf("{{$mock}}.{{$method.Name}} mock is already set by Set") - } - - if {{$m}}.defaultExpectation == nil { - {{$m}}.defaultExpectation = &{{$mock}}{{$method.Name}}Expectation{mock: {{$m}}.mock} - } - {{if $method.HasResults }} {{$m}}.defaultExpectation.results = &{{$mock}}{{$method.Name}}Results{ {{ $method.ResultsNames }} } {{end}} - return {{$m}}.mock - } - - //Set uses given function f to mock the {{$.Interface.Name}}.{{$method.Name}} method - func ({{$m}} *m{{$mock}}{{$method.Name}}) Set(f func{{$method.Signature}}) *{{$mock}}{ - if {{$m}}.defaultExpectation != nil { - {{$m}}.mock.t.Fatalf("Default expectation is already set for the {{$.Interface.Name}}.{{$method.Name}} method") - } - - if len({{$m}}.expectations) > 0 { - {{$m}}.mock.t.Fatalf("Some expectations are already set for the {{$.Interface.Name}}.{{$method.Name}} method") - } - - {{$m}}.mock.func{{$method.Name}}= f - return {{$m}}.mock - } - - {{if (and $method.HasParams $method.HasResults)}} - // When sets expectation for the {{$.Interface.Name}}.{{$method.Name}} which will trigger the result defined by the following - // Then helper - func ({{$m}} *m{{$mock}}{{$method.Name}}) When({{$method.Params}}) *{{$mock}}{{$method.Name}}Expectation { - if {{$m}}.mock.func{{$method.Name}} != nil { - {{$m}}.mock.t.Fatalf("{{$mock}}.{{$method.Name}} mock is already set by Set") - } - - expectation := &{{$mock}}{{$method.Name}}Expectation{ - mock: {{$m}}.mock, - params: &{{$mock}}{{$method.Name}}Params{ {{ $method.ParamsNames }} }, - } - {{$m}}.expectations = append({{$m}}.expectations, expectation) - return expectation - } - - // Then sets up {{$.Interface.Name}}.{{$method.Name}} return parameters for the expectation previously defined by the When method - func (e *{{$mock}}{{$method.Name}}Expectation) Then({{$method.Results}}) *{{$mock}} { - e.results = &{{$mock}}{{$method.Name}}Results{ {{ $method.ResultsNames }} } - return e.mock - } - {{end}} - - // {{$method.Name}} implements {{$.Interface.Type}} - func ({{$m}} *{{$mock}}) {{$method.Declaration}} { - mm_atomic.AddUint64(&{{$m}}.before{{$method.Name}}Counter, 1) - defer mm_atomic.AddUint64(&{{$m}}.after{{$method.Name}}Counter, 1) - - if {{$m}}.inspectFunc{{$method.Name}} != nil { - {{$m}}.inspectFunc{{$method.Name}}({{$method.Params.Pass}}) - } - - {{if $method.HasParams}} - mm_params := &{{$mock}}{{$method.Name}}Params{ {{$method.ParamsNames}} } - - // Record call args - {{$m}}.{{$method.Name}}Mock.mutex.Lock() - {{$m}}.{{$method.Name}}Mock.callArgs = append({{$m}}.{{$method.Name}}Mock.callArgs, mm_params) - {{$m}}.{{$method.Name}}Mock.mutex.Unlock() - - for _, e := range {{$m}}.{{$method.Name}}Mock.expectations { - if minimock.Equal(e.params, mm_params) { - mm_atomic.AddUint64(&e.Counter, 1) - {{$method.ReturnStruct "e.results" -}} - } - } - {{end}} - - if {{$m}}.{{$method.Name}}Mock.defaultExpectation != nil { - mm_atomic.AddUint64(&{{$m}}.{{$method.Name}}Mock.defaultExpectation.Counter, 1) - {{- if $method.HasParams }} - mm_want := {{$m}}.{{$method.Name}}Mock.defaultExpectation.params - mm_got := {{$mock}}{{$method.Name}}Params{ {{$method.ParamsNames}} } - if mm_want != nil && !minimock.Equal(*mm_want, mm_got) { - {{$m}}.t.Errorf("{{$mock}}.{{$method.Name}} got unexpected parameters, want: %#v, got: %#v%s\n", *mm_want, mm_got, minimock.Diff(*mm_want, mm_got)) - } - {{ end }} - {{if $method.HasResults }} - mm_results := {{$m}}.{{$method.Name}}Mock.defaultExpectation.results - if mm_results == nil { - {{$m}}.t.Fatal("No results are set for the {{$mock}}.{{$method.Name}}") - } - {{$method.ReturnStruct "(*mm_results)" -}} - {{else}} - return - {{ end }} - } - if {{$m}}.func{{$method.Name}} != nil { - {{$method.Pass (printf "%s.func" $m)}} - } - {{$m}}.t.Fatalf("Unexpected call to {{$mock}}.{{$method.Name}}.{{range $method.Params}} %v{{end}}", {{ $method.ParamsNames }} ) - {{if $method.HasResults}}return{{end}} - } - - // {{$method.Name}}AfterCounter returns a count of finished {{$mock}}.{{$method.Name}} invocations - func ({{$m}} *{{$mock}}) {{$method.Name}}AfterCounter() uint64 { - return mm_atomic.LoadUint64(&{{$m}}.after{{$method.Name}}Counter) - } - - // {{$method.Name}}BeforeCounter returns a count of {{$mock}}.{{$method.Name}} invocations - func ({{$m}} *{{$mock}}) {{$method.Name}}BeforeCounter() uint64 { - return mm_atomic.LoadUint64(&{{$m}}.before{{$method.Name}}Counter) - } - - {{ if $method.HasParams }} - // Calls returns a list of arguments used in each call to {{$mock}}.{{$method.Name}}. - // The list is in the same order as the calls were made (i.e. recent calls have a higher index) - func ({{$m}} *m{{$mock}}{{$method.Name}}) Calls() []*{{$mock}}{{$method.Name}}Params { - {{$m}}.mutex.RLock() - - argCopy := make([]*{{$mock}}{{$method.Name}}Params, len({{$m}}.callArgs)) - copy(argCopy, {{$m}}.callArgs) - - {{$m}}.mutex.RUnlock() - - return argCopy - } - {{ end }} - - // Minimock{{$method.Name}}Done returns true if the count of the {{$method.Name}} invocations corresponds - // the number of defined expectations - func (m *{{$mock}}) Minimock{{$method.Name}}Done() bool { - for _, e := range m.{{$method.Name}}Mock.expectations { - if mm_atomic.LoadUint64(&e.Counter) < 1 { - return false - } - } - - // if default expectation was set then invocations count should be greater than zero - if m.{{$method.Name}}Mock.defaultExpectation != nil && mm_atomic.LoadUint64(&m.after{{$method.Name}}Counter) < 1 { - return false - } - // if func was set then invocations count should be greater than zero - if m.func{{$method.Name}} != nil && mm_atomic.LoadUint64(&m.after{{$method.Name}}Counter) < 1 { - return false - } - return true - } - - // Minimock{{$method.Name}}Inspect logs each unmet expectation - func (m *{{$mock}}) Minimock{{$method.Name}}Inspect() { - for _, e := range m.{{$method.Name}}Mock.expectations { - if mm_atomic.LoadUint64(&e.Counter) < 1 { - {{- if $method.HasParams}} - m.t.Errorf("Expected call to {{$mock}}.{{$method.Name}} with params: %#v", *e.params) - {{else}} - m.t.Error("Expected call to {{$mock}}.{{$method.Name}}") - {{end -}} - } - } - - // if default expectation was set then invocations count should be greater than zero - if m.{{$method.Name}}Mock.defaultExpectation != nil && mm_atomic.LoadUint64(&m.after{{$method.Name}}Counter) < 1 { - {{- if $method.HasParams}} - if m.{{$method.Name}}Mock.defaultExpectation.params == nil { - m.t.Error("Expected call to {{$mock}}.{{$method.Name}}") - } else { - m.t.Errorf("Expected call to {{$mock}}.{{$method.Name}} with params: %#v", *m.{{$method.Name}}Mock.defaultExpectation.params) - } - {{else}} - m.t.Error("Expected call to {{$mock}}.{{$method.Name}}") - {{end -}} - } - // if func was set then invocations count should be greater than zero - if m.func{{$method.Name}} != nil && mm_atomic.LoadUint64(&m.after{{$method.Name}}Counter) < 1 { - m.t.Error("Expected call to {{$mock}}.{{$method.Name}}") - } - } - {{end}} - - // MinimockFinish checks that all mocked methods have been called the expected number of times - func (m *{{$mock}}) MinimockFinish() { - if !m.minimockDone() { - {{- range $method := $.Interface.Methods }} - m.Minimock{{$method.Name}}Inspect() - {{ end -}} - m.t.FailNow() - } - } - - // MinimockWait waits for all mocked methods to be called the expected number of times - func (m *{{$mock}}) MinimockWait(timeout mm_time.Duration) { - timeoutCh := mm_time.After(timeout) - for { - if m.minimockDone() { - return - } - select { - case <-timeoutCh: - m.MinimockFinish() - return - case <-mm_time.After(10 * mm_time.Millisecond): - } - } - } - - func (m *{{$mock}}) minimockDone() bool { - done := true - return done {{ range $method := $.Interface.Methods }}&& - m.Minimock{{$method.Name}}Done(){{end -}} - } - ` -) diff --git a/vendor/github.com/golang/groupcache/LICENSE b/vendor/github.com/golang/groupcache/LICENSE deleted file mode 100644 index 37ec93a1..00000000 --- a/vendor/github.com/golang/groupcache/LICENSE +++ /dev/null @@ -1,191 +0,0 @@ -Apache License -Version 2.0, January 2004 -http://www.apache.org/licenses/ - -TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - -1. Definitions. - -"License" shall mean the terms and conditions for use, reproduction, and -distribution as defined by Sections 1 through 9 of this document. - -"Licensor" shall mean the copyright owner or entity authorized by the copyright -owner that is granting the License. - -"Legal Entity" shall mean the union of the acting entity and all other entities -that control, are controlled by, or are under common control with that entity. -For the purposes of this definition, "control" means (i) the power, direct or -indirect, to cause the direction or management of such entity, whether by -contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the -outstanding shares, or (iii) beneficial ownership of such entity. - -"You" (or "Your") shall mean an individual or Legal Entity exercising -permissions granted by this License. - -"Source" form shall mean the preferred form for making modifications, including -but not limited to software source code, documentation source, and configuration -files. - -"Object" form shall mean any form resulting from mechanical transformation or -translation of a Source form, including but not limited to compiled object code, -generated documentation, and conversions to other media types. - -"Work" shall mean the work of authorship, whether in Source or Object form, made -available under the License, as indicated by a copyright notice that is included -in or attached to the work (an example is provided in the Appendix below). - -"Derivative Works" shall mean any work, whether in Source or Object form, that -is based on (or derived from) the Work and for which the editorial revisions, -annotations, elaborations, or other modifications represent, as a whole, an -original work of authorship. For the purposes of this License, Derivative Works -shall not include works that remain separable from, or merely link (or bind by -name) to the interfaces of, the Work and Derivative Works thereof. - -"Contribution" shall mean any work of authorship, including the original version -of the Work and any modifications or additions to that Work or Derivative Works -thereof, that is intentionally submitted to Licensor for inclusion in the Work -by the copyright owner or by an individual or Legal Entity authorized to submit -on behalf of the copyright owner. For the purposes of this definition, -"submitted" means any form of electronic, verbal, or written communication sent -to the Licensor or its representatives, including but not limited to -communication on electronic mailing lists, source code control systems, and -issue tracking systems that are managed by, or on behalf of, the Licensor for -the purpose of discussing and improving the Work, but excluding communication -that is conspicuously marked or otherwise designated in writing by the copyright -owner as "Not a Contribution." - -"Contributor" shall mean Licensor and any individual or Legal Entity on behalf -of whom a Contribution has been received by Licensor and subsequently -incorporated within the Work. - -2. Grant of Copyright License. - -Subject to the terms and conditions of this License, each Contributor hereby -grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, -irrevocable copyright license to reproduce, prepare Derivative Works of, -publicly display, publicly perform, sublicense, and distribute the Work and such -Derivative Works in Source or Object form. - -3. Grant of Patent License. - -Subject to the terms and conditions of this License, each Contributor hereby -grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, -irrevocable (except as stated in this section) patent license to make, have -made, use, offer to sell, sell, import, and otherwise transfer the Work, where -such license applies only to those patent claims licensable by such Contributor -that are necessarily infringed by their Contribution(s) alone or by combination -of their Contribution(s) with the Work to which such Contribution(s) was -submitted. If You institute patent litigation against any entity (including a -cross-claim or counterclaim in a lawsuit) alleging that the Work or a -Contribution incorporated within the Work constitutes direct or contributory -patent infringement, then any patent licenses granted to You under this License -for that Work shall terminate as of the date such litigation is filed. - -4. Redistribution. - -You may reproduce and distribute copies of the Work or Derivative Works thereof -in any medium, with or without modifications, and in Source or Object form, -provided that You meet the following conditions: - -You must give any other recipients of the Work or Derivative Works a copy of -this License; and -You must cause any modified files to carry prominent notices stating that You -changed the files; and -You must retain, in the Source form of any Derivative Works that You distribute, -all copyright, patent, trademark, and attribution notices from the Source form -of the Work, excluding those notices that do not pertain to any part of the -Derivative Works; and -If the Work includes a "NOTICE" text file as part of its distribution, then any -Derivative Works that You distribute must include a readable copy of the -attribution notices contained within such NOTICE file, excluding those notices -that do not pertain to any part of the Derivative Works, in at least one of the -following places: within a NOTICE text file distributed as part of the -Derivative Works; within the Source form or documentation, if provided along -with the Derivative Works; or, within a display generated by the Derivative -Works, if and wherever such third-party notices normally appear. The contents of -the NOTICE file are for informational purposes only and do not modify the -License. You may add Your own attribution notices within Derivative Works that -You distribute, alongside or as an addendum to the NOTICE text from the Work, -provided that such additional attribution notices cannot be construed as -modifying the License. -You may add Your own copyright statement to Your modifications and may provide -additional or different license terms and conditions for use, reproduction, or -distribution of Your modifications, or for any such Derivative Works as a whole, -provided Your use, reproduction, and distribution of the Work otherwise complies -with the conditions stated in this License. - -5. Submission of Contributions. - -Unless You explicitly state otherwise, any Contribution intentionally submitted -for inclusion in the Work by You to the Licensor shall be under the terms and -conditions of this License, without any additional terms or conditions. -Notwithstanding the above, nothing herein shall supersede or modify the terms of -any separate license agreement you may have executed with Licensor regarding -such Contributions. - -6. Trademarks. - -This License does not grant permission to use the trade names, trademarks, -service marks, or product names of the Licensor, except as required for -reasonable and customary use in describing the origin of the Work and -reproducing the content of the NOTICE file. - -7. Disclaimer of Warranty. - -Unless required by applicable law or agreed to in writing, Licensor provides the -Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, -including, without limitation, any warranties or conditions of TITLE, -NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are -solely responsible for determining the appropriateness of using or -redistributing the Work and assume any risks associated with Your exercise of -permissions under this License. - -8. Limitation of Liability. - -In no event and under no legal theory, whether in tort (including negligence), -contract, or otherwise, unless required by applicable law (such as deliberate -and grossly negligent acts) or agreed to in writing, shall any Contributor be -liable to You for damages, including any direct, indirect, special, incidental, -or consequential damages of any character arising as a result of this License or -out of the use or inability to use the Work (including but not limited to -damages for loss of goodwill, work stoppage, computer failure or malfunction, or -any and all other commercial damages or losses), even if such Contributor has -been advised of the possibility of such damages. - -9. Accepting Warranty or Additional Liability. - -While redistributing the Work or Derivative Works thereof, You may choose to -offer, and charge a fee for, acceptance of support, warranty, indemnity, or -other liability obligations and/or rights consistent with this License. However, -in accepting such obligations, You may act only on Your own behalf and on Your -sole responsibility, not on behalf of any other Contributor, and only if You -agree to indemnify, defend, and hold each Contributor harmless for any liability -incurred by, or claims asserted against, such Contributor by reason of your -accepting any such warranty or additional liability. - -END OF TERMS AND CONDITIONS - -APPENDIX: How to apply the Apache License to your work - -To apply the Apache License to your work, attach the following boilerplate -notice, with the fields enclosed by brackets "[]" replaced with your own -identifying information. (Don't include the brackets!) The text should be -enclosed in the appropriate comment syntax for the file format. We also -recommend that a file or class name and description of purpose be included on -the same "printed page" as the copyright notice for easier identification within -third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/github.com/golang/groupcache/lru/lru.go b/vendor/github.com/golang/groupcache/lru/lru.go deleted file mode 100644 index eac1c766..00000000 --- a/vendor/github.com/golang/groupcache/lru/lru.go +++ /dev/null @@ -1,133 +0,0 @@ -/* -Copyright 2013 Google Inc. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Package lru implements an LRU cache. -package lru - -import "container/list" - -// Cache is an LRU cache. It is not safe for concurrent access. -type Cache struct { - // MaxEntries is the maximum number of cache entries before - // an item is evicted. Zero means no limit. - MaxEntries int - - // OnEvicted optionally specifies a callback function to be - // executed when an entry is purged from the cache. - OnEvicted func(key Key, value interface{}) - - ll *list.List - cache map[interface{}]*list.Element -} - -// A Key may be any value that is comparable. See http://golang.org/ref/spec#Comparison_operators -type Key interface{} - -type entry struct { - key Key - value interface{} -} - -// New creates a new Cache. -// If maxEntries is zero, the cache has no limit and it's assumed -// that eviction is done by the caller. -func New(maxEntries int) *Cache { - return &Cache{ - MaxEntries: maxEntries, - ll: list.New(), - cache: make(map[interface{}]*list.Element), - } -} - -// Add adds a value to the cache. -func (c *Cache) Add(key Key, value interface{}) { - if c.cache == nil { - c.cache = make(map[interface{}]*list.Element) - c.ll = list.New() - } - if ee, ok := c.cache[key]; ok { - c.ll.MoveToFront(ee) - ee.Value.(*entry).value = value - return - } - ele := c.ll.PushFront(&entry{key, value}) - c.cache[key] = ele - if c.MaxEntries != 0 && c.ll.Len() > c.MaxEntries { - c.RemoveOldest() - } -} - -// Get looks up a key's value from the cache. -func (c *Cache) Get(key Key) (value interface{}, ok bool) { - if c.cache == nil { - return - } - if ele, hit := c.cache[key]; hit { - c.ll.MoveToFront(ele) - return ele.Value.(*entry).value, true - } - return -} - -// Remove removes the provided key from the cache. -func (c *Cache) Remove(key Key) { - if c.cache == nil { - return - } - if ele, hit := c.cache[key]; hit { - c.removeElement(ele) - } -} - -// RemoveOldest removes the oldest item from the cache. -func (c *Cache) RemoveOldest() { - if c.cache == nil { - return - } - ele := c.ll.Back() - if ele != nil { - c.removeElement(ele) - } -} - -func (c *Cache) removeElement(e *list.Element) { - c.ll.Remove(e) - kv := e.Value.(*entry) - delete(c.cache, kv.key) - if c.OnEvicted != nil { - c.OnEvicted(kv.key, kv.value) - } -} - -// Len returns the number of items in the cache. -func (c *Cache) Len() int { - if c.cache == nil { - return 0 - } - return c.ll.Len() -} - -// Clear purges all stored items from the cache. -func (c *Cache) Clear() { - if c.OnEvicted != nil { - for _, e := range c.cache { - kv := e.Value.(*entry) - c.OnEvicted(kv.key, kv.value) - } - } - c.ll = nil - c.cache = nil -} diff --git a/vendor/github.com/golang/snappy/.gitignore b/vendor/github.com/golang/snappy/.gitignore deleted file mode 100644 index 042091d9..00000000 --- a/vendor/github.com/golang/snappy/.gitignore +++ /dev/null @@ -1,16 +0,0 @@ -cmd/snappytool/snappytool -testdata/bench - -# These explicitly listed benchmark data files are for an obsolete version of -# snappy_test.go. -testdata/alice29.txt -testdata/asyoulik.txt -testdata/fireworks.jpeg -testdata/geo.protodata -testdata/html -testdata/html_x_4 -testdata/kppkn.gtb -testdata/lcet10.txt -testdata/paper-100k.pdf -testdata/plrabn12.txt -testdata/urls.10K diff --git a/vendor/github.com/golang/snappy/AUTHORS b/vendor/github.com/golang/snappy/AUTHORS deleted file mode 100644 index 52ccb5a9..00000000 --- a/vendor/github.com/golang/snappy/AUTHORS +++ /dev/null @@ -1,18 +0,0 @@ -# This is the official list of Snappy-Go authors for copyright purposes. -# This file is distinct from the CONTRIBUTORS files. -# See the latter for an explanation. - -# Names should be added to this file as -# Name or Organization -# The email address is not required for organizations. - -# Please keep the list sorted. - -Amazon.com, Inc -Damian Gryski -Eric Buth -Google Inc. -Jan Mercl <0xjnml@gmail.com> -Klaus Post -Rodolfo Carvalho -Sebastien Binet diff --git a/vendor/github.com/golang/snappy/CONTRIBUTORS b/vendor/github.com/golang/snappy/CONTRIBUTORS deleted file mode 100644 index ea6524dd..00000000 --- a/vendor/github.com/golang/snappy/CONTRIBUTORS +++ /dev/null @@ -1,41 +0,0 @@ -# This is the official list of people who can contribute -# (and typically have contributed) code to the Snappy-Go repository. -# The AUTHORS file lists the copyright holders; this file -# lists people. For example, Google employees are listed here -# but not in AUTHORS, because Google holds the copyright. -# -# The submission process automatically checks to make sure -# that people submitting code are listed in this file (by email address). -# -# Names should be added to this file only after verifying that -# the individual or the individual's organization has agreed to -# the appropriate Contributor License Agreement, found here: -# -# http://code.google.com/legal/individual-cla-v1.0.html -# http://code.google.com/legal/corporate-cla-v1.0.html -# -# The agreement for individuals can be filled out on the web. -# -# When adding J Random Contributor's name to this file, -# either J's name or J's organization's name should be -# added to the AUTHORS file, depending on whether the -# individual or corporate CLA was used. - -# Names should be added to this file like so: -# Name - -# Please keep the list sorted. - -Alex Legg -Damian Gryski -Eric Buth -Jan Mercl <0xjnml@gmail.com> -Jonathan Swinney -Kai Backman -Klaus Post -Marc-Antoine Ruel -Nigel Tao -Rob Pike -Rodolfo Carvalho -Russ Cox -Sebastien Binet diff --git a/vendor/github.com/golang/snappy/README b/vendor/github.com/golang/snappy/README deleted file mode 100644 index cea12879..00000000 --- a/vendor/github.com/golang/snappy/README +++ /dev/null @@ -1,107 +0,0 @@ -The Snappy compression format in the Go programming language. - -To download and install from source: -$ go get github.com/golang/snappy - -Unless otherwise noted, the Snappy-Go source files are distributed -under the BSD-style license found in the LICENSE file. - - - -Benchmarks. - -The golang/snappy benchmarks include compressing (Z) and decompressing (U) ten -or so files, the same set used by the C++ Snappy code (github.com/google/snappy -and note the "google", not "golang"). On an "Intel(R) Core(TM) i7-3770 CPU @ -3.40GHz", Go's GOARCH=amd64 numbers as of 2016-05-29: - -"go test -test.bench=." - -_UFlat0-8 2.19GB/s ± 0% html -_UFlat1-8 1.41GB/s ± 0% urls -_UFlat2-8 23.5GB/s ± 2% jpg -_UFlat3-8 1.91GB/s ± 0% jpg_200 -_UFlat4-8 14.0GB/s ± 1% pdf -_UFlat5-8 1.97GB/s ± 0% html4 -_UFlat6-8 814MB/s ± 0% txt1 -_UFlat7-8 785MB/s ± 0% txt2 -_UFlat8-8 857MB/s ± 0% txt3 -_UFlat9-8 719MB/s ± 1% txt4 -_UFlat10-8 2.84GB/s ± 0% pb -_UFlat11-8 1.05GB/s ± 0% gaviota - -_ZFlat0-8 1.04GB/s ± 0% html -_ZFlat1-8 534MB/s ± 0% urls -_ZFlat2-8 15.7GB/s ± 1% jpg -_ZFlat3-8 740MB/s ± 3% jpg_200 -_ZFlat4-8 9.20GB/s ± 1% pdf -_ZFlat5-8 991MB/s ± 0% html4 -_ZFlat6-8 379MB/s ± 0% txt1 -_ZFlat7-8 352MB/s ± 0% txt2 -_ZFlat8-8 396MB/s ± 1% txt3 -_ZFlat9-8 327MB/s ± 1% txt4 -_ZFlat10-8 1.33GB/s ± 1% pb -_ZFlat11-8 605MB/s ± 1% gaviota - - - -"go test -test.bench=. -tags=noasm" - -_UFlat0-8 621MB/s ± 2% html -_UFlat1-8 494MB/s ± 1% urls -_UFlat2-8 23.2GB/s ± 1% jpg -_UFlat3-8 1.12GB/s ± 1% jpg_200 -_UFlat4-8 4.35GB/s ± 1% pdf -_UFlat5-8 609MB/s ± 0% html4 -_UFlat6-8 296MB/s ± 0% txt1 -_UFlat7-8 288MB/s ± 0% txt2 -_UFlat8-8 309MB/s ± 1% txt3 -_UFlat9-8 280MB/s ± 1% txt4 -_UFlat10-8 753MB/s ± 0% pb -_UFlat11-8 400MB/s ± 0% gaviota - -_ZFlat0-8 409MB/s ± 1% html -_ZFlat1-8 250MB/s ± 1% urls -_ZFlat2-8 12.3GB/s ± 1% jpg -_ZFlat3-8 132MB/s ± 0% jpg_200 -_ZFlat4-8 2.92GB/s ± 0% pdf -_ZFlat5-8 405MB/s ± 1% html4 -_ZFlat6-8 179MB/s ± 1% txt1 -_ZFlat7-8 170MB/s ± 1% txt2 -_ZFlat8-8 189MB/s ± 1% txt3 -_ZFlat9-8 164MB/s ± 1% txt4 -_ZFlat10-8 479MB/s ± 1% pb -_ZFlat11-8 270MB/s ± 1% gaviota - - - -For comparison (Go's encoded output is byte-for-byte identical to C++'s), here -are the numbers from C++ Snappy's - -make CXXFLAGS="-O2 -DNDEBUG -g" clean snappy_unittest.log && cat snappy_unittest.log - -BM_UFlat/0 2.4GB/s html -BM_UFlat/1 1.4GB/s urls -BM_UFlat/2 21.8GB/s jpg -BM_UFlat/3 1.5GB/s jpg_200 -BM_UFlat/4 13.3GB/s pdf -BM_UFlat/5 2.1GB/s html4 -BM_UFlat/6 1.0GB/s txt1 -BM_UFlat/7 959.4MB/s txt2 -BM_UFlat/8 1.0GB/s txt3 -BM_UFlat/9 864.5MB/s txt4 -BM_UFlat/10 2.9GB/s pb -BM_UFlat/11 1.2GB/s gaviota - -BM_ZFlat/0 944.3MB/s html (22.31 %) -BM_ZFlat/1 501.6MB/s urls (47.78 %) -BM_ZFlat/2 14.3GB/s jpg (99.95 %) -BM_ZFlat/3 538.3MB/s jpg_200 (73.00 %) -BM_ZFlat/4 8.3GB/s pdf (83.30 %) -BM_ZFlat/5 903.5MB/s html4 (22.52 %) -BM_ZFlat/6 336.0MB/s txt1 (57.88 %) -BM_ZFlat/7 312.3MB/s txt2 (61.91 %) -BM_ZFlat/8 353.1MB/s txt3 (54.99 %) -BM_ZFlat/9 289.9MB/s txt4 (66.26 %) -BM_ZFlat/10 1.2GB/s pb (19.68 %) -BM_ZFlat/11 527.4MB/s gaviota (37.72 %) diff --git a/vendor/github.com/golang/snappy/decode_amd64.s b/vendor/github.com/golang/snappy/decode_amd64.s deleted file mode 100644 index e6179f65..00000000 --- a/vendor/github.com/golang/snappy/decode_amd64.s +++ /dev/null @@ -1,490 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build !appengine -// +build gc -// +build !noasm - -#include "textflag.h" - -// The asm code generally follows the pure Go code in decode_other.go, except -// where marked with a "!!!". - -// func decode(dst, src []byte) int -// -// All local variables fit into registers. The non-zero stack size is only to -// spill registers and push args when issuing a CALL. The register allocation: -// - AX scratch -// - BX scratch -// - CX length or x -// - DX offset -// - SI &src[s] -// - DI &dst[d] -// + R8 dst_base -// + R9 dst_len -// + R10 dst_base + dst_len -// + R11 src_base -// + R12 src_len -// + R13 src_base + src_len -// - R14 used by doCopy -// - R15 used by doCopy -// -// The registers R8-R13 (marked with a "+") are set at the start of the -// function, and after a CALL returns, and are not otherwise modified. -// -// The d variable is implicitly DI - R8, and len(dst)-d is R10 - DI. -// The s variable is implicitly SI - R11, and len(src)-s is R13 - SI. -TEXT ·decode(SB), NOSPLIT, $48-56 - // Initialize SI, DI and R8-R13. - MOVQ dst_base+0(FP), R8 - MOVQ dst_len+8(FP), R9 - MOVQ R8, DI - MOVQ R8, R10 - ADDQ R9, R10 - MOVQ src_base+24(FP), R11 - MOVQ src_len+32(FP), R12 - MOVQ R11, SI - MOVQ R11, R13 - ADDQ R12, R13 - -loop: - // for s < len(src) - CMPQ SI, R13 - JEQ end - - // CX = uint32(src[s]) - // - // switch src[s] & 0x03 - MOVBLZX (SI), CX - MOVL CX, BX - ANDL $3, BX - CMPL BX, $1 - JAE tagCopy - - // ---------------------------------------- - // The code below handles literal tags. - - // case tagLiteral: - // x := uint32(src[s] >> 2) - // switch - SHRL $2, CX - CMPL CX, $60 - JAE tagLit60Plus - - // case x < 60: - // s++ - INCQ SI - -doLit: - // This is the end of the inner "switch", when we have a literal tag. - // - // We assume that CX == x and x fits in a uint32, where x is the variable - // used in the pure Go decode_other.go code. - - // length = int(x) + 1 - // - // Unlike the pure Go code, we don't need to check if length <= 0 because - // CX can hold 64 bits, so the increment cannot overflow. - INCQ CX - - // Prepare to check if copying length bytes will run past the end of dst or - // src. - // - // AX = len(dst) - d - // BX = len(src) - s - MOVQ R10, AX - SUBQ DI, AX - MOVQ R13, BX - SUBQ SI, BX - - // !!! Try a faster technique for short (16 or fewer bytes) copies. - // - // if length > 16 || len(dst)-d < 16 || len(src)-s < 16 { - // goto callMemmove // Fall back on calling runtime·memmove. - // } - // - // The C++ snappy code calls this TryFastAppend. It also checks len(src)-s - // against 21 instead of 16, because it cannot assume that all of its input - // is contiguous in memory and so it needs to leave enough source bytes to - // read the next tag without refilling buffers, but Go's Decode assumes - // contiguousness (the src argument is a []byte). - CMPQ CX, $16 - JGT callMemmove - CMPQ AX, $16 - JLT callMemmove - CMPQ BX, $16 - JLT callMemmove - - // !!! Implement the copy from src to dst as a 16-byte load and store. - // (Decode's documentation says that dst and src must not overlap.) - // - // This always copies 16 bytes, instead of only length bytes, but that's - // OK. If the input is a valid Snappy encoding then subsequent iterations - // will fix up the overrun. Otherwise, Decode returns a nil []byte (and a - // non-nil error), so the overrun will be ignored. - // - // Note that on amd64, it is legal and cheap to issue unaligned 8-byte or - // 16-byte loads and stores. This technique probably wouldn't be as - // effective on architectures that are fussier about alignment. - MOVOU 0(SI), X0 - MOVOU X0, 0(DI) - - // d += length - // s += length - ADDQ CX, DI - ADDQ CX, SI - JMP loop - -callMemmove: - // if length > len(dst)-d || length > len(src)-s { etc } - CMPQ CX, AX - JGT errCorrupt - CMPQ CX, BX - JGT errCorrupt - - // copy(dst[d:], src[s:s+length]) - // - // This means calling runtime·memmove(&dst[d], &src[s], length), so we push - // DI, SI and CX as arguments. Coincidentally, we also need to spill those - // three registers to the stack, to save local variables across the CALL. - MOVQ DI, 0(SP) - MOVQ SI, 8(SP) - MOVQ CX, 16(SP) - MOVQ DI, 24(SP) - MOVQ SI, 32(SP) - MOVQ CX, 40(SP) - CALL runtime·memmove(SB) - - // Restore local variables: unspill registers from the stack and - // re-calculate R8-R13. - MOVQ 24(SP), DI - MOVQ 32(SP), SI - MOVQ 40(SP), CX - MOVQ dst_base+0(FP), R8 - MOVQ dst_len+8(FP), R9 - MOVQ R8, R10 - ADDQ R9, R10 - MOVQ src_base+24(FP), R11 - MOVQ src_len+32(FP), R12 - MOVQ R11, R13 - ADDQ R12, R13 - - // d += length - // s += length - ADDQ CX, DI - ADDQ CX, SI - JMP loop - -tagLit60Plus: - // !!! This fragment does the - // - // s += x - 58; if uint(s) > uint(len(src)) { etc } - // - // checks. In the asm version, we code it once instead of once per switch case. - ADDQ CX, SI - SUBQ $58, SI - MOVQ SI, BX - SUBQ R11, BX - CMPQ BX, R12 - JA errCorrupt - - // case x == 60: - CMPL CX, $61 - JEQ tagLit61 - JA tagLit62Plus - - // x = uint32(src[s-1]) - MOVBLZX -1(SI), CX - JMP doLit - -tagLit61: - // case x == 61: - // x = uint32(src[s-2]) | uint32(src[s-1])<<8 - MOVWLZX -2(SI), CX - JMP doLit - -tagLit62Plus: - CMPL CX, $62 - JA tagLit63 - - // case x == 62: - // x = uint32(src[s-3]) | uint32(src[s-2])<<8 | uint32(src[s-1])<<16 - MOVWLZX -3(SI), CX - MOVBLZX -1(SI), BX - SHLL $16, BX - ORL BX, CX - JMP doLit - -tagLit63: - // case x == 63: - // x = uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24 - MOVL -4(SI), CX - JMP doLit - -// The code above handles literal tags. -// ---------------------------------------- -// The code below handles copy tags. - -tagCopy4: - // case tagCopy4: - // s += 5 - ADDQ $5, SI - - // if uint(s) > uint(len(src)) { etc } - MOVQ SI, BX - SUBQ R11, BX - CMPQ BX, R12 - JA errCorrupt - - // length = 1 + int(src[s-5])>>2 - SHRQ $2, CX - INCQ CX - - // offset = int(uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24) - MOVLQZX -4(SI), DX - JMP doCopy - -tagCopy2: - // case tagCopy2: - // s += 3 - ADDQ $3, SI - - // if uint(s) > uint(len(src)) { etc } - MOVQ SI, BX - SUBQ R11, BX - CMPQ BX, R12 - JA errCorrupt - - // length = 1 + int(src[s-3])>>2 - SHRQ $2, CX - INCQ CX - - // offset = int(uint32(src[s-2]) | uint32(src[s-1])<<8) - MOVWQZX -2(SI), DX - JMP doCopy - -tagCopy: - // We have a copy tag. We assume that: - // - BX == src[s] & 0x03 - // - CX == src[s] - CMPQ BX, $2 - JEQ tagCopy2 - JA tagCopy4 - - // case tagCopy1: - // s += 2 - ADDQ $2, SI - - // if uint(s) > uint(len(src)) { etc } - MOVQ SI, BX - SUBQ R11, BX - CMPQ BX, R12 - JA errCorrupt - - // offset = int(uint32(src[s-2])&0xe0<<3 | uint32(src[s-1])) - MOVQ CX, DX - ANDQ $0xe0, DX - SHLQ $3, DX - MOVBQZX -1(SI), BX - ORQ BX, DX - - // length = 4 + int(src[s-2])>>2&0x7 - SHRQ $2, CX - ANDQ $7, CX - ADDQ $4, CX - -doCopy: - // This is the end of the outer "switch", when we have a copy tag. - // - // We assume that: - // - CX == length && CX > 0 - // - DX == offset - - // if offset <= 0 { etc } - CMPQ DX, $0 - JLE errCorrupt - - // if d < offset { etc } - MOVQ DI, BX - SUBQ R8, BX - CMPQ BX, DX - JLT errCorrupt - - // if length > len(dst)-d { etc } - MOVQ R10, BX - SUBQ DI, BX - CMPQ CX, BX - JGT errCorrupt - - // forwardCopy(dst[d:d+length], dst[d-offset:]); d += length - // - // Set: - // - R14 = len(dst)-d - // - R15 = &dst[d-offset] - MOVQ R10, R14 - SUBQ DI, R14 - MOVQ DI, R15 - SUBQ DX, R15 - - // !!! Try a faster technique for short (16 or fewer bytes) forward copies. - // - // First, try using two 8-byte load/stores, similar to the doLit technique - // above. Even if dst[d:d+length] and dst[d-offset:] can overlap, this is - // still OK if offset >= 8. Note that this has to be two 8-byte load/stores - // and not one 16-byte load/store, and the first store has to be before the - // second load, due to the overlap if offset is in the range [8, 16). - // - // if length > 16 || offset < 8 || len(dst)-d < 16 { - // goto slowForwardCopy - // } - // copy 16 bytes - // d += length - CMPQ CX, $16 - JGT slowForwardCopy - CMPQ DX, $8 - JLT slowForwardCopy - CMPQ R14, $16 - JLT slowForwardCopy - MOVQ 0(R15), AX - MOVQ AX, 0(DI) - MOVQ 8(R15), BX - MOVQ BX, 8(DI) - ADDQ CX, DI - JMP loop - -slowForwardCopy: - // !!! If the forward copy is longer than 16 bytes, or if offset < 8, we - // can still try 8-byte load stores, provided we can overrun up to 10 extra - // bytes. As above, the overrun will be fixed up by subsequent iterations - // of the outermost loop. - // - // The C++ snappy code calls this technique IncrementalCopyFastPath. Its - // commentary says: - // - // ---- - // - // The main part of this loop is a simple copy of eight bytes at a time - // until we've copied (at least) the requested amount of bytes. However, - // if d and d-offset are less than eight bytes apart (indicating a - // repeating pattern of length < 8), we first need to expand the pattern in - // order to get the correct results. For instance, if the buffer looks like - // this, with the eight-byte and patterns marked as - // intervals: - // - // abxxxxxxxxxxxx - // [------] d-offset - // [------] d - // - // a single eight-byte copy from to will repeat the pattern - // once, after which we can move two bytes without moving : - // - // ababxxxxxxxxxx - // [------] d-offset - // [------] d - // - // and repeat the exercise until the two no longer overlap. - // - // This allows us to do very well in the special case of one single byte - // repeated many times, without taking a big hit for more general cases. - // - // The worst case of extra writing past the end of the match occurs when - // offset == 1 and length == 1; the last copy will read from byte positions - // [0..7] and write to [4..11], whereas it was only supposed to write to - // position 1. Thus, ten excess bytes. - // - // ---- - // - // That "10 byte overrun" worst case is confirmed by Go's - // TestSlowForwardCopyOverrun, which also tests the fixUpSlowForwardCopy - // and finishSlowForwardCopy algorithm. - // - // if length > len(dst)-d-10 { - // goto verySlowForwardCopy - // } - SUBQ $10, R14 - CMPQ CX, R14 - JGT verySlowForwardCopy - -makeOffsetAtLeast8: - // !!! As above, expand the pattern so that offset >= 8 and we can use - // 8-byte load/stores. - // - // for offset < 8 { - // copy 8 bytes from dst[d-offset:] to dst[d:] - // length -= offset - // d += offset - // offset += offset - // // The two previous lines together means that d-offset, and therefore - // // R15, is unchanged. - // } - CMPQ DX, $8 - JGE fixUpSlowForwardCopy - MOVQ (R15), BX - MOVQ BX, (DI) - SUBQ DX, CX - ADDQ DX, DI - ADDQ DX, DX - JMP makeOffsetAtLeast8 - -fixUpSlowForwardCopy: - // !!! Add length (which might be negative now) to d (implied by DI being - // &dst[d]) so that d ends up at the right place when we jump back to the - // top of the loop. Before we do that, though, we save DI to AX so that, if - // length is positive, copying the remaining length bytes will write to the - // right place. - MOVQ DI, AX - ADDQ CX, DI - -finishSlowForwardCopy: - // !!! Repeat 8-byte load/stores until length <= 0. Ending with a negative - // length means that we overrun, but as above, that will be fixed up by - // subsequent iterations of the outermost loop. - CMPQ CX, $0 - JLE loop - MOVQ (R15), BX - MOVQ BX, (AX) - ADDQ $8, R15 - ADDQ $8, AX - SUBQ $8, CX - JMP finishSlowForwardCopy - -verySlowForwardCopy: - // verySlowForwardCopy is a simple implementation of forward copy. In C - // parlance, this is a do/while loop instead of a while loop, since we know - // that length > 0. In Go syntax: - // - // for { - // dst[d] = dst[d - offset] - // d++ - // length-- - // if length == 0 { - // break - // } - // } - MOVB (R15), BX - MOVB BX, (DI) - INCQ R15 - INCQ DI - DECQ CX - JNZ verySlowForwardCopy - JMP loop - -// The code above handles copy tags. -// ---------------------------------------- - -end: - // This is the end of the "for s < len(src)". - // - // if d != len(dst) { etc } - CMPQ DI, R10 - JNE errCorrupt - - // return 0 - MOVQ $0, ret+48(FP) - RET - -errCorrupt: - // return decodeErrCodeCorrupt - MOVQ $1, ret+48(FP) - RET diff --git a/vendor/github.com/golang/snappy/decode_arm64.s b/vendor/github.com/golang/snappy/decode_arm64.s deleted file mode 100644 index 7a3ead17..00000000 --- a/vendor/github.com/golang/snappy/decode_arm64.s +++ /dev/null @@ -1,494 +0,0 @@ -// Copyright 2020 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build !appengine -// +build gc -// +build !noasm - -#include "textflag.h" - -// The asm code generally follows the pure Go code in decode_other.go, except -// where marked with a "!!!". - -// func decode(dst, src []byte) int -// -// All local variables fit into registers. The non-zero stack size is only to -// spill registers and push args when issuing a CALL. The register allocation: -// - R2 scratch -// - R3 scratch -// - R4 length or x -// - R5 offset -// - R6 &src[s] -// - R7 &dst[d] -// + R8 dst_base -// + R9 dst_len -// + R10 dst_base + dst_len -// + R11 src_base -// + R12 src_len -// + R13 src_base + src_len -// - R14 used by doCopy -// - R15 used by doCopy -// -// The registers R8-R13 (marked with a "+") are set at the start of the -// function, and after a CALL returns, and are not otherwise modified. -// -// The d variable is implicitly R7 - R8, and len(dst)-d is R10 - R7. -// The s variable is implicitly R6 - R11, and len(src)-s is R13 - R6. -TEXT ·decode(SB), NOSPLIT, $56-56 - // Initialize R6, R7 and R8-R13. - MOVD dst_base+0(FP), R8 - MOVD dst_len+8(FP), R9 - MOVD R8, R7 - MOVD R8, R10 - ADD R9, R10, R10 - MOVD src_base+24(FP), R11 - MOVD src_len+32(FP), R12 - MOVD R11, R6 - MOVD R11, R13 - ADD R12, R13, R13 - -loop: - // for s < len(src) - CMP R13, R6 - BEQ end - - // R4 = uint32(src[s]) - // - // switch src[s] & 0x03 - MOVBU (R6), R4 - MOVW R4, R3 - ANDW $3, R3 - MOVW $1, R1 - CMPW R1, R3 - BGE tagCopy - - // ---------------------------------------- - // The code below handles literal tags. - - // case tagLiteral: - // x := uint32(src[s] >> 2) - // switch - MOVW $60, R1 - LSRW $2, R4, R4 - CMPW R4, R1 - BLS tagLit60Plus - - // case x < 60: - // s++ - ADD $1, R6, R6 - -doLit: - // This is the end of the inner "switch", when we have a literal tag. - // - // We assume that R4 == x and x fits in a uint32, where x is the variable - // used in the pure Go decode_other.go code. - - // length = int(x) + 1 - // - // Unlike the pure Go code, we don't need to check if length <= 0 because - // R4 can hold 64 bits, so the increment cannot overflow. - ADD $1, R4, R4 - - // Prepare to check if copying length bytes will run past the end of dst or - // src. - // - // R2 = len(dst) - d - // R3 = len(src) - s - MOVD R10, R2 - SUB R7, R2, R2 - MOVD R13, R3 - SUB R6, R3, R3 - - // !!! Try a faster technique for short (16 or fewer bytes) copies. - // - // if length > 16 || len(dst)-d < 16 || len(src)-s < 16 { - // goto callMemmove // Fall back on calling runtime·memmove. - // } - // - // The C++ snappy code calls this TryFastAppend. It also checks len(src)-s - // against 21 instead of 16, because it cannot assume that all of its input - // is contiguous in memory and so it needs to leave enough source bytes to - // read the next tag without refilling buffers, but Go's Decode assumes - // contiguousness (the src argument is a []byte). - CMP $16, R4 - BGT callMemmove - CMP $16, R2 - BLT callMemmove - CMP $16, R3 - BLT callMemmove - - // !!! Implement the copy from src to dst as a 16-byte load and store. - // (Decode's documentation says that dst and src must not overlap.) - // - // This always copies 16 bytes, instead of only length bytes, but that's - // OK. If the input is a valid Snappy encoding then subsequent iterations - // will fix up the overrun. Otherwise, Decode returns a nil []byte (and a - // non-nil error), so the overrun will be ignored. - // - // Note that on arm64, it is legal and cheap to issue unaligned 8-byte or - // 16-byte loads and stores. This technique probably wouldn't be as - // effective on architectures that are fussier about alignment. - LDP 0(R6), (R14, R15) - STP (R14, R15), 0(R7) - - // d += length - // s += length - ADD R4, R7, R7 - ADD R4, R6, R6 - B loop - -callMemmove: - // if length > len(dst)-d || length > len(src)-s { etc } - CMP R2, R4 - BGT errCorrupt - CMP R3, R4 - BGT errCorrupt - - // copy(dst[d:], src[s:s+length]) - // - // This means calling runtime·memmove(&dst[d], &src[s], length), so we push - // R7, R6 and R4 as arguments. Coincidentally, we also need to spill those - // three registers to the stack, to save local variables across the CALL. - MOVD R7, 8(RSP) - MOVD R6, 16(RSP) - MOVD R4, 24(RSP) - MOVD R7, 32(RSP) - MOVD R6, 40(RSP) - MOVD R4, 48(RSP) - CALL runtime·memmove(SB) - - // Restore local variables: unspill registers from the stack and - // re-calculate R8-R13. - MOVD 32(RSP), R7 - MOVD 40(RSP), R6 - MOVD 48(RSP), R4 - MOVD dst_base+0(FP), R8 - MOVD dst_len+8(FP), R9 - MOVD R8, R10 - ADD R9, R10, R10 - MOVD src_base+24(FP), R11 - MOVD src_len+32(FP), R12 - MOVD R11, R13 - ADD R12, R13, R13 - - // d += length - // s += length - ADD R4, R7, R7 - ADD R4, R6, R6 - B loop - -tagLit60Plus: - // !!! This fragment does the - // - // s += x - 58; if uint(s) > uint(len(src)) { etc } - // - // checks. In the asm version, we code it once instead of once per switch case. - ADD R4, R6, R6 - SUB $58, R6, R6 - MOVD R6, R3 - SUB R11, R3, R3 - CMP R12, R3 - BGT errCorrupt - - // case x == 60: - MOVW $61, R1 - CMPW R1, R4 - BEQ tagLit61 - BGT tagLit62Plus - - // x = uint32(src[s-1]) - MOVBU -1(R6), R4 - B doLit - -tagLit61: - // case x == 61: - // x = uint32(src[s-2]) | uint32(src[s-1])<<8 - MOVHU -2(R6), R4 - B doLit - -tagLit62Plus: - CMPW $62, R4 - BHI tagLit63 - - // case x == 62: - // x = uint32(src[s-3]) | uint32(src[s-2])<<8 | uint32(src[s-1])<<16 - MOVHU -3(R6), R4 - MOVBU -1(R6), R3 - ORR R3<<16, R4 - B doLit - -tagLit63: - // case x == 63: - // x = uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24 - MOVWU -4(R6), R4 - B doLit - - // The code above handles literal tags. - // ---------------------------------------- - // The code below handles copy tags. - -tagCopy4: - // case tagCopy4: - // s += 5 - ADD $5, R6, R6 - - // if uint(s) > uint(len(src)) { etc } - MOVD R6, R3 - SUB R11, R3, R3 - CMP R12, R3 - BGT errCorrupt - - // length = 1 + int(src[s-5])>>2 - MOVD $1, R1 - ADD R4>>2, R1, R4 - - // offset = int(uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24) - MOVWU -4(R6), R5 - B doCopy - -tagCopy2: - // case tagCopy2: - // s += 3 - ADD $3, R6, R6 - - // if uint(s) > uint(len(src)) { etc } - MOVD R6, R3 - SUB R11, R3, R3 - CMP R12, R3 - BGT errCorrupt - - // length = 1 + int(src[s-3])>>2 - MOVD $1, R1 - ADD R4>>2, R1, R4 - - // offset = int(uint32(src[s-2]) | uint32(src[s-1])<<8) - MOVHU -2(R6), R5 - B doCopy - -tagCopy: - // We have a copy tag. We assume that: - // - R3 == src[s] & 0x03 - // - R4 == src[s] - CMP $2, R3 - BEQ tagCopy2 - BGT tagCopy4 - - // case tagCopy1: - // s += 2 - ADD $2, R6, R6 - - // if uint(s) > uint(len(src)) { etc } - MOVD R6, R3 - SUB R11, R3, R3 - CMP R12, R3 - BGT errCorrupt - - // offset = int(uint32(src[s-2])&0xe0<<3 | uint32(src[s-1])) - MOVD R4, R5 - AND $0xe0, R5 - MOVBU -1(R6), R3 - ORR R5<<3, R3, R5 - - // length = 4 + int(src[s-2])>>2&0x7 - MOVD $7, R1 - AND R4>>2, R1, R4 - ADD $4, R4, R4 - -doCopy: - // This is the end of the outer "switch", when we have a copy tag. - // - // We assume that: - // - R4 == length && R4 > 0 - // - R5 == offset - - // if offset <= 0 { etc } - MOVD $0, R1 - CMP R1, R5 - BLE errCorrupt - - // if d < offset { etc } - MOVD R7, R3 - SUB R8, R3, R3 - CMP R5, R3 - BLT errCorrupt - - // if length > len(dst)-d { etc } - MOVD R10, R3 - SUB R7, R3, R3 - CMP R3, R4 - BGT errCorrupt - - // forwardCopy(dst[d:d+length], dst[d-offset:]); d += length - // - // Set: - // - R14 = len(dst)-d - // - R15 = &dst[d-offset] - MOVD R10, R14 - SUB R7, R14, R14 - MOVD R7, R15 - SUB R5, R15, R15 - - // !!! Try a faster technique for short (16 or fewer bytes) forward copies. - // - // First, try using two 8-byte load/stores, similar to the doLit technique - // above. Even if dst[d:d+length] and dst[d-offset:] can overlap, this is - // still OK if offset >= 8. Note that this has to be two 8-byte load/stores - // and not one 16-byte load/store, and the first store has to be before the - // second load, due to the overlap if offset is in the range [8, 16). - // - // if length > 16 || offset < 8 || len(dst)-d < 16 { - // goto slowForwardCopy - // } - // copy 16 bytes - // d += length - CMP $16, R4 - BGT slowForwardCopy - CMP $8, R5 - BLT slowForwardCopy - CMP $16, R14 - BLT slowForwardCopy - MOVD 0(R15), R2 - MOVD R2, 0(R7) - MOVD 8(R15), R3 - MOVD R3, 8(R7) - ADD R4, R7, R7 - B loop - -slowForwardCopy: - // !!! If the forward copy is longer than 16 bytes, or if offset < 8, we - // can still try 8-byte load stores, provided we can overrun up to 10 extra - // bytes. As above, the overrun will be fixed up by subsequent iterations - // of the outermost loop. - // - // The C++ snappy code calls this technique IncrementalCopyFastPath. Its - // commentary says: - // - // ---- - // - // The main part of this loop is a simple copy of eight bytes at a time - // until we've copied (at least) the requested amount of bytes. However, - // if d and d-offset are less than eight bytes apart (indicating a - // repeating pattern of length < 8), we first need to expand the pattern in - // order to get the correct results. For instance, if the buffer looks like - // this, with the eight-byte and patterns marked as - // intervals: - // - // abxxxxxxxxxxxx - // [------] d-offset - // [------] d - // - // a single eight-byte copy from to will repeat the pattern - // once, after which we can move two bytes without moving : - // - // ababxxxxxxxxxx - // [------] d-offset - // [------] d - // - // and repeat the exercise until the two no longer overlap. - // - // This allows us to do very well in the special case of one single byte - // repeated many times, without taking a big hit for more general cases. - // - // The worst case of extra writing past the end of the match occurs when - // offset == 1 and length == 1; the last copy will read from byte positions - // [0..7] and write to [4..11], whereas it was only supposed to write to - // position 1. Thus, ten excess bytes. - // - // ---- - // - // That "10 byte overrun" worst case is confirmed by Go's - // TestSlowForwardCopyOverrun, which also tests the fixUpSlowForwardCopy - // and finishSlowForwardCopy algorithm. - // - // if length > len(dst)-d-10 { - // goto verySlowForwardCopy - // } - SUB $10, R14, R14 - CMP R14, R4 - BGT verySlowForwardCopy - -makeOffsetAtLeast8: - // !!! As above, expand the pattern so that offset >= 8 and we can use - // 8-byte load/stores. - // - // for offset < 8 { - // copy 8 bytes from dst[d-offset:] to dst[d:] - // length -= offset - // d += offset - // offset += offset - // // The two previous lines together means that d-offset, and therefore - // // R15, is unchanged. - // } - CMP $8, R5 - BGE fixUpSlowForwardCopy - MOVD (R15), R3 - MOVD R3, (R7) - SUB R5, R4, R4 - ADD R5, R7, R7 - ADD R5, R5, R5 - B makeOffsetAtLeast8 - -fixUpSlowForwardCopy: - // !!! Add length (which might be negative now) to d (implied by R7 being - // &dst[d]) so that d ends up at the right place when we jump back to the - // top of the loop. Before we do that, though, we save R7 to R2 so that, if - // length is positive, copying the remaining length bytes will write to the - // right place. - MOVD R7, R2 - ADD R4, R7, R7 - -finishSlowForwardCopy: - // !!! Repeat 8-byte load/stores until length <= 0. Ending with a negative - // length means that we overrun, but as above, that will be fixed up by - // subsequent iterations of the outermost loop. - MOVD $0, R1 - CMP R1, R4 - BLE loop - MOVD (R15), R3 - MOVD R3, (R2) - ADD $8, R15, R15 - ADD $8, R2, R2 - SUB $8, R4, R4 - B finishSlowForwardCopy - -verySlowForwardCopy: - // verySlowForwardCopy is a simple implementation of forward copy. In C - // parlance, this is a do/while loop instead of a while loop, since we know - // that length > 0. In Go syntax: - // - // for { - // dst[d] = dst[d - offset] - // d++ - // length-- - // if length == 0 { - // break - // } - // } - MOVB (R15), R3 - MOVB R3, (R7) - ADD $1, R15, R15 - ADD $1, R7, R7 - SUB $1, R4, R4 - CBNZ R4, verySlowForwardCopy - B loop - - // The code above handles copy tags. - // ---------------------------------------- - -end: - // This is the end of the "for s < len(src)". - // - // if d != len(dst) { etc } - CMP R10, R7 - BNE errCorrupt - - // return 0 - MOVD $0, ret+48(FP) - RET - -errCorrupt: - // return decodeErrCodeCorrupt - MOVD $1, R2 - MOVD R2, ret+48(FP) - RET diff --git a/vendor/github.com/golang/snappy/decode_asm.go b/vendor/github.com/golang/snappy/decode_asm.go deleted file mode 100644 index 7082b349..00000000 --- a/vendor/github.com/golang/snappy/decode_asm.go +++ /dev/null @@ -1,15 +0,0 @@ -// Copyright 2016 The Snappy-Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build !appengine -// +build gc -// +build !noasm -// +build amd64 arm64 - -package snappy - -// decode has the same semantics as in decode_other.go. -// -//go:noescape -func decode(dst, src []byte) int diff --git a/vendor/github.com/golang/snappy/encode_amd64.s b/vendor/github.com/golang/snappy/encode_amd64.s deleted file mode 100644 index adfd979f..00000000 --- a/vendor/github.com/golang/snappy/encode_amd64.s +++ /dev/null @@ -1,730 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build !appengine -// +build gc -// +build !noasm - -#include "textflag.h" - -// The XXX lines assemble on Go 1.4, 1.5 and 1.7, but not 1.6, due to a -// Go toolchain regression. See https://github.com/golang/go/issues/15426 and -// https://github.com/golang/snappy/issues/29 -// -// As a workaround, the package was built with a known good assembler, and -// those instructions were disassembled by "objdump -d" to yield the -// 4e 0f b7 7c 5c 78 movzwq 0x78(%rsp,%r11,2),%r15 -// style comments, in AT&T asm syntax. Note that rsp here is a physical -// register, not Go/asm's SP pseudo-register (see https://golang.org/doc/asm). -// The instructions were then encoded as "BYTE $0x.." sequences, which assemble -// fine on Go 1.6. - -// The asm code generally follows the pure Go code in encode_other.go, except -// where marked with a "!!!". - -// ---------------------------------------------------------------------------- - -// func emitLiteral(dst, lit []byte) int -// -// All local variables fit into registers. The register allocation: -// - AX len(lit) -// - BX n -// - DX return value -// - DI &dst[i] -// - R10 &lit[0] -// -// The 24 bytes of stack space is to call runtime·memmove. -// -// The unusual register allocation of local variables, such as R10 for the -// source pointer, matches the allocation used at the call site in encodeBlock, -// which makes it easier to manually inline this function. -TEXT ·emitLiteral(SB), NOSPLIT, $24-56 - MOVQ dst_base+0(FP), DI - MOVQ lit_base+24(FP), R10 - MOVQ lit_len+32(FP), AX - MOVQ AX, DX - MOVL AX, BX - SUBL $1, BX - - CMPL BX, $60 - JLT oneByte - CMPL BX, $256 - JLT twoBytes - -threeBytes: - MOVB $0xf4, 0(DI) - MOVW BX, 1(DI) - ADDQ $3, DI - ADDQ $3, DX - JMP memmove - -twoBytes: - MOVB $0xf0, 0(DI) - MOVB BX, 1(DI) - ADDQ $2, DI - ADDQ $2, DX - JMP memmove - -oneByte: - SHLB $2, BX - MOVB BX, 0(DI) - ADDQ $1, DI - ADDQ $1, DX - -memmove: - MOVQ DX, ret+48(FP) - - // copy(dst[i:], lit) - // - // This means calling runtime·memmove(&dst[i], &lit[0], len(lit)), so we push - // DI, R10 and AX as arguments. - MOVQ DI, 0(SP) - MOVQ R10, 8(SP) - MOVQ AX, 16(SP) - CALL runtime·memmove(SB) - RET - -// ---------------------------------------------------------------------------- - -// func emitCopy(dst []byte, offset, length int) int -// -// All local variables fit into registers. The register allocation: -// - AX length -// - SI &dst[0] -// - DI &dst[i] -// - R11 offset -// -// The unusual register allocation of local variables, such as R11 for the -// offset, matches the allocation used at the call site in encodeBlock, which -// makes it easier to manually inline this function. -TEXT ·emitCopy(SB), NOSPLIT, $0-48 - MOVQ dst_base+0(FP), DI - MOVQ DI, SI - MOVQ offset+24(FP), R11 - MOVQ length+32(FP), AX - -loop0: - // for length >= 68 { etc } - CMPL AX, $68 - JLT step1 - - // Emit a length 64 copy, encoded as 3 bytes. - MOVB $0xfe, 0(DI) - MOVW R11, 1(DI) - ADDQ $3, DI - SUBL $64, AX - JMP loop0 - -step1: - // if length > 64 { etc } - CMPL AX, $64 - JLE step2 - - // Emit a length 60 copy, encoded as 3 bytes. - MOVB $0xee, 0(DI) - MOVW R11, 1(DI) - ADDQ $3, DI - SUBL $60, AX - -step2: - // if length >= 12 || offset >= 2048 { goto step3 } - CMPL AX, $12 - JGE step3 - CMPL R11, $2048 - JGE step3 - - // Emit the remaining copy, encoded as 2 bytes. - MOVB R11, 1(DI) - SHRL $8, R11 - SHLB $5, R11 - SUBB $4, AX - SHLB $2, AX - ORB AX, R11 - ORB $1, R11 - MOVB R11, 0(DI) - ADDQ $2, DI - - // Return the number of bytes written. - SUBQ SI, DI - MOVQ DI, ret+40(FP) - RET - -step3: - // Emit the remaining copy, encoded as 3 bytes. - SUBL $1, AX - SHLB $2, AX - ORB $2, AX - MOVB AX, 0(DI) - MOVW R11, 1(DI) - ADDQ $3, DI - - // Return the number of bytes written. - SUBQ SI, DI - MOVQ DI, ret+40(FP) - RET - -// ---------------------------------------------------------------------------- - -// func extendMatch(src []byte, i, j int) int -// -// All local variables fit into registers. The register allocation: -// - DX &src[0] -// - SI &src[j] -// - R13 &src[len(src) - 8] -// - R14 &src[len(src)] -// - R15 &src[i] -// -// The unusual register allocation of local variables, such as R15 for a source -// pointer, matches the allocation used at the call site in encodeBlock, which -// makes it easier to manually inline this function. -TEXT ·extendMatch(SB), NOSPLIT, $0-48 - MOVQ src_base+0(FP), DX - MOVQ src_len+8(FP), R14 - MOVQ i+24(FP), R15 - MOVQ j+32(FP), SI - ADDQ DX, R14 - ADDQ DX, R15 - ADDQ DX, SI - MOVQ R14, R13 - SUBQ $8, R13 - -cmp8: - // As long as we are 8 or more bytes before the end of src, we can load and - // compare 8 bytes at a time. If those 8 bytes are equal, repeat. - CMPQ SI, R13 - JA cmp1 - MOVQ (R15), AX - MOVQ (SI), BX - CMPQ AX, BX - JNE bsf - ADDQ $8, R15 - ADDQ $8, SI - JMP cmp8 - -bsf: - // If those 8 bytes were not equal, XOR the two 8 byte values, and return - // the index of the first byte that differs. The BSF instruction finds the - // least significant 1 bit, the amd64 architecture is little-endian, and - // the shift by 3 converts a bit index to a byte index. - XORQ AX, BX - BSFQ BX, BX - SHRQ $3, BX - ADDQ BX, SI - - // Convert from &src[ret] to ret. - SUBQ DX, SI - MOVQ SI, ret+40(FP) - RET - -cmp1: - // In src's tail, compare 1 byte at a time. - CMPQ SI, R14 - JAE extendMatchEnd - MOVB (R15), AX - MOVB (SI), BX - CMPB AX, BX - JNE extendMatchEnd - ADDQ $1, R15 - ADDQ $1, SI - JMP cmp1 - -extendMatchEnd: - // Convert from &src[ret] to ret. - SUBQ DX, SI - MOVQ SI, ret+40(FP) - RET - -// ---------------------------------------------------------------------------- - -// func encodeBlock(dst, src []byte) (d int) -// -// All local variables fit into registers, other than "var table". The register -// allocation: -// - AX . . -// - BX . . -// - CX 56 shift (note that amd64 shifts by non-immediates must use CX). -// - DX 64 &src[0], tableSize -// - SI 72 &src[s] -// - DI 80 &dst[d] -// - R9 88 sLimit -// - R10 . &src[nextEmit] -// - R11 96 prevHash, currHash, nextHash, offset -// - R12 104 &src[base], skip -// - R13 . &src[nextS], &src[len(src) - 8] -// - R14 . len(src), bytesBetweenHashLookups, &src[len(src)], x -// - R15 112 candidate -// -// The second column (56, 64, etc) is the stack offset to spill the registers -// when calling other functions. We could pack this slightly tighter, but it's -// simpler to have a dedicated spill map independent of the function called. -// -// "var table [maxTableSize]uint16" takes up 32768 bytes of stack space. An -// extra 56 bytes, to call other functions, and an extra 64 bytes, to spill -// local variables (registers) during calls gives 32768 + 56 + 64 = 32888. -TEXT ·encodeBlock(SB), 0, $32888-56 - MOVQ dst_base+0(FP), DI - MOVQ src_base+24(FP), SI - MOVQ src_len+32(FP), R14 - - // shift, tableSize := uint32(32-8), 1<<8 - MOVQ $24, CX - MOVQ $256, DX - -calcShift: - // for ; tableSize < maxTableSize && tableSize < len(src); tableSize *= 2 { - // shift-- - // } - CMPQ DX, $16384 - JGE varTable - CMPQ DX, R14 - JGE varTable - SUBQ $1, CX - SHLQ $1, DX - JMP calcShift - -varTable: - // var table [maxTableSize]uint16 - // - // In the asm code, unlike the Go code, we can zero-initialize only the - // first tableSize elements. Each uint16 element is 2 bytes and each MOVOU - // writes 16 bytes, so we can do only tableSize/8 writes instead of the - // 2048 writes that would zero-initialize all of table's 32768 bytes. - SHRQ $3, DX - LEAQ table-32768(SP), BX - PXOR X0, X0 - -memclr: - MOVOU X0, 0(BX) - ADDQ $16, BX - SUBQ $1, DX - JNZ memclr - - // !!! DX = &src[0] - MOVQ SI, DX - - // sLimit := len(src) - inputMargin - MOVQ R14, R9 - SUBQ $15, R9 - - // !!! Pre-emptively spill CX, DX and R9 to the stack. Their values don't - // change for the rest of the function. - MOVQ CX, 56(SP) - MOVQ DX, 64(SP) - MOVQ R9, 88(SP) - - // nextEmit := 0 - MOVQ DX, R10 - - // s := 1 - ADDQ $1, SI - - // nextHash := hash(load32(src, s), shift) - MOVL 0(SI), R11 - IMULL $0x1e35a7bd, R11 - SHRL CX, R11 - -outer: - // for { etc } - - // skip := 32 - MOVQ $32, R12 - - // nextS := s - MOVQ SI, R13 - - // candidate := 0 - MOVQ $0, R15 - -inner0: - // for { etc } - - // s := nextS - MOVQ R13, SI - - // bytesBetweenHashLookups := skip >> 5 - MOVQ R12, R14 - SHRQ $5, R14 - - // nextS = s + bytesBetweenHashLookups - ADDQ R14, R13 - - // skip += bytesBetweenHashLookups - ADDQ R14, R12 - - // if nextS > sLimit { goto emitRemainder } - MOVQ R13, AX - SUBQ DX, AX - CMPQ AX, R9 - JA emitRemainder - - // candidate = int(table[nextHash]) - // XXX: MOVWQZX table-32768(SP)(R11*2), R15 - // XXX: 4e 0f b7 7c 5c 78 movzwq 0x78(%rsp,%r11,2),%r15 - BYTE $0x4e - BYTE $0x0f - BYTE $0xb7 - BYTE $0x7c - BYTE $0x5c - BYTE $0x78 - - // table[nextHash] = uint16(s) - MOVQ SI, AX - SUBQ DX, AX - - // XXX: MOVW AX, table-32768(SP)(R11*2) - // XXX: 66 42 89 44 5c 78 mov %ax,0x78(%rsp,%r11,2) - BYTE $0x66 - BYTE $0x42 - BYTE $0x89 - BYTE $0x44 - BYTE $0x5c - BYTE $0x78 - - // nextHash = hash(load32(src, nextS), shift) - MOVL 0(R13), R11 - IMULL $0x1e35a7bd, R11 - SHRL CX, R11 - - // if load32(src, s) != load32(src, candidate) { continue } break - MOVL 0(SI), AX - MOVL (DX)(R15*1), BX - CMPL AX, BX - JNE inner0 - -fourByteMatch: - // As per the encode_other.go code: - // - // A 4-byte match has been found. We'll later see etc. - - // !!! Jump to a fast path for short (<= 16 byte) literals. See the comment - // on inputMargin in encode.go. - MOVQ SI, AX - SUBQ R10, AX - CMPQ AX, $16 - JLE emitLiteralFastPath - - // ---------------------------------------- - // Begin inline of the emitLiteral call. - // - // d += emitLiteral(dst[d:], src[nextEmit:s]) - - MOVL AX, BX - SUBL $1, BX - - CMPL BX, $60 - JLT inlineEmitLiteralOneByte - CMPL BX, $256 - JLT inlineEmitLiteralTwoBytes - -inlineEmitLiteralThreeBytes: - MOVB $0xf4, 0(DI) - MOVW BX, 1(DI) - ADDQ $3, DI - JMP inlineEmitLiteralMemmove - -inlineEmitLiteralTwoBytes: - MOVB $0xf0, 0(DI) - MOVB BX, 1(DI) - ADDQ $2, DI - JMP inlineEmitLiteralMemmove - -inlineEmitLiteralOneByte: - SHLB $2, BX - MOVB BX, 0(DI) - ADDQ $1, DI - -inlineEmitLiteralMemmove: - // Spill local variables (registers) onto the stack; call; unspill. - // - // copy(dst[i:], lit) - // - // This means calling runtime·memmove(&dst[i], &lit[0], len(lit)), so we push - // DI, R10 and AX as arguments. - MOVQ DI, 0(SP) - MOVQ R10, 8(SP) - MOVQ AX, 16(SP) - ADDQ AX, DI // Finish the "d +=" part of "d += emitLiteral(etc)". - MOVQ SI, 72(SP) - MOVQ DI, 80(SP) - MOVQ R15, 112(SP) - CALL runtime·memmove(SB) - MOVQ 56(SP), CX - MOVQ 64(SP), DX - MOVQ 72(SP), SI - MOVQ 80(SP), DI - MOVQ 88(SP), R9 - MOVQ 112(SP), R15 - JMP inner1 - -inlineEmitLiteralEnd: - // End inline of the emitLiteral call. - // ---------------------------------------- - -emitLiteralFastPath: - // !!! Emit the 1-byte encoding "uint8(len(lit)-1)<<2". - MOVB AX, BX - SUBB $1, BX - SHLB $2, BX - MOVB BX, (DI) - ADDQ $1, DI - - // !!! Implement the copy from lit to dst as a 16-byte load and store. - // (Encode's documentation says that dst and src must not overlap.) - // - // This always copies 16 bytes, instead of only len(lit) bytes, but that's - // OK. Subsequent iterations will fix up the overrun. - // - // Note that on amd64, it is legal and cheap to issue unaligned 8-byte or - // 16-byte loads and stores. This technique probably wouldn't be as - // effective on architectures that are fussier about alignment. - MOVOU 0(R10), X0 - MOVOU X0, 0(DI) - ADDQ AX, DI - -inner1: - // for { etc } - - // base := s - MOVQ SI, R12 - - // !!! offset := base - candidate - MOVQ R12, R11 - SUBQ R15, R11 - SUBQ DX, R11 - - // ---------------------------------------- - // Begin inline of the extendMatch call. - // - // s = extendMatch(src, candidate+4, s+4) - - // !!! R14 = &src[len(src)] - MOVQ src_len+32(FP), R14 - ADDQ DX, R14 - - // !!! R13 = &src[len(src) - 8] - MOVQ R14, R13 - SUBQ $8, R13 - - // !!! R15 = &src[candidate + 4] - ADDQ $4, R15 - ADDQ DX, R15 - - // !!! s += 4 - ADDQ $4, SI - -inlineExtendMatchCmp8: - // As long as we are 8 or more bytes before the end of src, we can load and - // compare 8 bytes at a time. If those 8 bytes are equal, repeat. - CMPQ SI, R13 - JA inlineExtendMatchCmp1 - MOVQ (R15), AX - MOVQ (SI), BX - CMPQ AX, BX - JNE inlineExtendMatchBSF - ADDQ $8, R15 - ADDQ $8, SI - JMP inlineExtendMatchCmp8 - -inlineExtendMatchBSF: - // If those 8 bytes were not equal, XOR the two 8 byte values, and return - // the index of the first byte that differs. The BSF instruction finds the - // least significant 1 bit, the amd64 architecture is little-endian, and - // the shift by 3 converts a bit index to a byte index. - XORQ AX, BX - BSFQ BX, BX - SHRQ $3, BX - ADDQ BX, SI - JMP inlineExtendMatchEnd - -inlineExtendMatchCmp1: - // In src's tail, compare 1 byte at a time. - CMPQ SI, R14 - JAE inlineExtendMatchEnd - MOVB (R15), AX - MOVB (SI), BX - CMPB AX, BX - JNE inlineExtendMatchEnd - ADDQ $1, R15 - ADDQ $1, SI - JMP inlineExtendMatchCmp1 - -inlineExtendMatchEnd: - // End inline of the extendMatch call. - // ---------------------------------------- - - // ---------------------------------------- - // Begin inline of the emitCopy call. - // - // d += emitCopy(dst[d:], base-candidate, s-base) - - // !!! length := s - base - MOVQ SI, AX - SUBQ R12, AX - -inlineEmitCopyLoop0: - // for length >= 68 { etc } - CMPL AX, $68 - JLT inlineEmitCopyStep1 - - // Emit a length 64 copy, encoded as 3 bytes. - MOVB $0xfe, 0(DI) - MOVW R11, 1(DI) - ADDQ $3, DI - SUBL $64, AX - JMP inlineEmitCopyLoop0 - -inlineEmitCopyStep1: - // if length > 64 { etc } - CMPL AX, $64 - JLE inlineEmitCopyStep2 - - // Emit a length 60 copy, encoded as 3 bytes. - MOVB $0xee, 0(DI) - MOVW R11, 1(DI) - ADDQ $3, DI - SUBL $60, AX - -inlineEmitCopyStep2: - // if length >= 12 || offset >= 2048 { goto inlineEmitCopyStep3 } - CMPL AX, $12 - JGE inlineEmitCopyStep3 - CMPL R11, $2048 - JGE inlineEmitCopyStep3 - - // Emit the remaining copy, encoded as 2 bytes. - MOVB R11, 1(DI) - SHRL $8, R11 - SHLB $5, R11 - SUBB $4, AX - SHLB $2, AX - ORB AX, R11 - ORB $1, R11 - MOVB R11, 0(DI) - ADDQ $2, DI - JMP inlineEmitCopyEnd - -inlineEmitCopyStep3: - // Emit the remaining copy, encoded as 3 bytes. - SUBL $1, AX - SHLB $2, AX - ORB $2, AX - MOVB AX, 0(DI) - MOVW R11, 1(DI) - ADDQ $3, DI - -inlineEmitCopyEnd: - // End inline of the emitCopy call. - // ---------------------------------------- - - // nextEmit = s - MOVQ SI, R10 - - // if s >= sLimit { goto emitRemainder } - MOVQ SI, AX - SUBQ DX, AX - CMPQ AX, R9 - JAE emitRemainder - - // As per the encode_other.go code: - // - // We could immediately etc. - - // x := load64(src, s-1) - MOVQ -1(SI), R14 - - // prevHash := hash(uint32(x>>0), shift) - MOVL R14, R11 - IMULL $0x1e35a7bd, R11 - SHRL CX, R11 - - // table[prevHash] = uint16(s-1) - MOVQ SI, AX - SUBQ DX, AX - SUBQ $1, AX - - // XXX: MOVW AX, table-32768(SP)(R11*2) - // XXX: 66 42 89 44 5c 78 mov %ax,0x78(%rsp,%r11,2) - BYTE $0x66 - BYTE $0x42 - BYTE $0x89 - BYTE $0x44 - BYTE $0x5c - BYTE $0x78 - - // currHash := hash(uint32(x>>8), shift) - SHRQ $8, R14 - MOVL R14, R11 - IMULL $0x1e35a7bd, R11 - SHRL CX, R11 - - // candidate = int(table[currHash]) - // XXX: MOVWQZX table-32768(SP)(R11*2), R15 - // XXX: 4e 0f b7 7c 5c 78 movzwq 0x78(%rsp,%r11,2),%r15 - BYTE $0x4e - BYTE $0x0f - BYTE $0xb7 - BYTE $0x7c - BYTE $0x5c - BYTE $0x78 - - // table[currHash] = uint16(s) - ADDQ $1, AX - - // XXX: MOVW AX, table-32768(SP)(R11*2) - // XXX: 66 42 89 44 5c 78 mov %ax,0x78(%rsp,%r11,2) - BYTE $0x66 - BYTE $0x42 - BYTE $0x89 - BYTE $0x44 - BYTE $0x5c - BYTE $0x78 - - // if uint32(x>>8) == load32(src, candidate) { continue } - MOVL (DX)(R15*1), BX - CMPL R14, BX - JEQ inner1 - - // nextHash = hash(uint32(x>>16), shift) - SHRQ $8, R14 - MOVL R14, R11 - IMULL $0x1e35a7bd, R11 - SHRL CX, R11 - - // s++ - ADDQ $1, SI - - // break out of the inner1 for loop, i.e. continue the outer loop. - JMP outer - -emitRemainder: - // if nextEmit < len(src) { etc } - MOVQ src_len+32(FP), AX - ADDQ DX, AX - CMPQ R10, AX - JEQ encodeBlockEnd - - // d += emitLiteral(dst[d:], src[nextEmit:]) - // - // Push args. - MOVQ DI, 0(SP) - MOVQ $0, 8(SP) // Unnecessary, as the callee ignores it, but conservative. - MOVQ $0, 16(SP) // Unnecessary, as the callee ignores it, but conservative. - MOVQ R10, 24(SP) - SUBQ R10, AX - MOVQ AX, 32(SP) - MOVQ AX, 40(SP) // Unnecessary, as the callee ignores it, but conservative. - - // Spill local variables (registers) onto the stack; call; unspill. - MOVQ DI, 80(SP) - CALL ·emitLiteral(SB) - MOVQ 80(SP), DI - - // Finish the "d +=" part of "d += emitLiteral(etc)". - ADDQ 48(SP), DI - -encodeBlockEnd: - MOVQ dst_base+0(FP), AX - SUBQ AX, DI - MOVQ DI, d+48(FP) - RET diff --git a/vendor/github.com/golang/snappy/encode_arm64.s b/vendor/github.com/golang/snappy/encode_arm64.s deleted file mode 100644 index f8d54adf..00000000 --- a/vendor/github.com/golang/snappy/encode_arm64.s +++ /dev/null @@ -1,722 +0,0 @@ -// Copyright 2020 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build !appengine -// +build gc -// +build !noasm - -#include "textflag.h" - -// The asm code generally follows the pure Go code in encode_other.go, except -// where marked with a "!!!". - -// ---------------------------------------------------------------------------- - -// func emitLiteral(dst, lit []byte) int -// -// All local variables fit into registers. The register allocation: -// - R3 len(lit) -// - R4 n -// - R6 return value -// - R8 &dst[i] -// - R10 &lit[0] -// -// The 32 bytes of stack space is to call runtime·memmove. -// -// The unusual register allocation of local variables, such as R10 for the -// source pointer, matches the allocation used at the call site in encodeBlock, -// which makes it easier to manually inline this function. -TEXT ·emitLiteral(SB), NOSPLIT, $32-56 - MOVD dst_base+0(FP), R8 - MOVD lit_base+24(FP), R10 - MOVD lit_len+32(FP), R3 - MOVD R3, R6 - MOVW R3, R4 - SUBW $1, R4, R4 - - CMPW $60, R4 - BLT oneByte - CMPW $256, R4 - BLT twoBytes - -threeBytes: - MOVD $0xf4, R2 - MOVB R2, 0(R8) - MOVW R4, 1(R8) - ADD $3, R8, R8 - ADD $3, R6, R6 - B memmove - -twoBytes: - MOVD $0xf0, R2 - MOVB R2, 0(R8) - MOVB R4, 1(R8) - ADD $2, R8, R8 - ADD $2, R6, R6 - B memmove - -oneByte: - LSLW $2, R4, R4 - MOVB R4, 0(R8) - ADD $1, R8, R8 - ADD $1, R6, R6 - -memmove: - MOVD R6, ret+48(FP) - - // copy(dst[i:], lit) - // - // This means calling runtime·memmove(&dst[i], &lit[0], len(lit)), so we push - // R8, R10 and R3 as arguments. - MOVD R8, 8(RSP) - MOVD R10, 16(RSP) - MOVD R3, 24(RSP) - CALL runtime·memmove(SB) - RET - -// ---------------------------------------------------------------------------- - -// func emitCopy(dst []byte, offset, length int) int -// -// All local variables fit into registers. The register allocation: -// - R3 length -// - R7 &dst[0] -// - R8 &dst[i] -// - R11 offset -// -// The unusual register allocation of local variables, such as R11 for the -// offset, matches the allocation used at the call site in encodeBlock, which -// makes it easier to manually inline this function. -TEXT ·emitCopy(SB), NOSPLIT, $0-48 - MOVD dst_base+0(FP), R8 - MOVD R8, R7 - MOVD offset+24(FP), R11 - MOVD length+32(FP), R3 - -loop0: - // for length >= 68 { etc } - CMPW $68, R3 - BLT step1 - - // Emit a length 64 copy, encoded as 3 bytes. - MOVD $0xfe, R2 - MOVB R2, 0(R8) - MOVW R11, 1(R8) - ADD $3, R8, R8 - SUB $64, R3, R3 - B loop0 - -step1: - // if length > 64 { etc } - CMP $64, R3 - BLE step2 - - // Emit a length 60 copy, encoded as 3 bytes. - MOVD $0xee, R2 - MOVB R2, 0(R8) - MOVW R11, 1(R8) - ADD $3, R8, R8 - SUB $60, R3, R3 - -step2: - // if length >= 12 || offset >= 2048 { goto step3 } - CMP $12, R3 - BGE step3 - CMPW $2048, R11 - BGE step3 - - // Emit the remaining copy, encoded as 2 bytes. - MOVB R11, 1(R8) - LSRW $3, R11, R11 - AND $0xe0, R11, R11 - SUB $4, R3, R3 - LSLW $2, R3 - AND $0xff, R3, R3 - ORRW R3, R11, R11 - ORRW $1, R11, R11 - MOVB R11, 0(R8) - ADD $2, R8, R8 - - // Return the number of bytes written. - SUB R7, R8, R8 - MOVD R8, ret+40(FP) - RET - -step3: - // Emit the remaining copy, encoded as 3 bytes. - SUB $1, R3, R3 - AND $0xff, R3, R3 - LSLW $2, R3, R3 - ORRW $2, R3, R3 - MOVB R3, 0(R8) - MOVW R11, 1(R8) - ADD $3, R8, R8 - - // Return the number of bytes written. - SUB R7, R8, R8 - MOVD R8, ret+40(FP) - RET - -// ---------------------------------------------------------------------------- - -// func extendMatch(src []byte, i, j int) int -// -// All local variables fit into registers. The register allocation: -// - R6 &src[0] -// - R7 &src[j] -// - R13 &src[len(src) - 8] -// - R14 &src[len(src)] -// - R15 &src[i] -// -// The unusual register allocation of local variables, such as R15 for a source -// pointer, matches the allocation used at the call site in encodeBlock, which -// makes it easier to manually inline this function. -TEXT ·extendMatch(SB), NOSPLIT, $0-48 - MOVD src_base+0(FP), R6 - MOVD src_len+8(FP), R14 - MOVD i+24(FP), R15 - MOVD j+32(FP), R7 - ADD R6, R14, R14 - ADD R6, R15, R15 - ADD R6, R7, R7 - MOVD R14, R13 - SUB $8, R13, R13 - -cmp8: - // As long as we are 8 or more bytes before the end of src, we can load and - // compare 8 bytes at a time. If those 8 bytes are equal, repeat. - CMP R13, R7 - BHI cmp1 - MOVD (R15), R3 - MOVD (R7), R4 - CMP R4, R3 - BNE bsf - ADD $8, R15, R15 - ADD $8, R7, R7 - B cmp8 - -bsf: - // If those 8 bytes were not equal, XOR the two 8 byte values, and return - // the index of the first byte that differs. - // RBIT reverses the bit order, then CLZ counts the leading zeros, the - // combination of which finds the least significant bit which is set. - // The arm64 architecture is little-endian, and the shift by 3 converts - // a bit index to a byte index. - EOR R3, R4, R4 - RBIT R4, R4 - CLZ R4, R4 - ADD R4>>3, R7, R7 - - // Convert from &src[ret] to ret. - SUB R6, R7, R7 - MOVD R7, ret+40(FP) - RET - -cmp1: - // In src's tail, compare 1 byte at a time. - CMP R7, R14 - BLS extendMatchEnd - MOVB (R15), R3 - MOVB (R7), R4 - CMP R4, R3 - BNE extendMatchEnd - ADD $1, R15, R15 - ADD $1, R7, R7 - B cmp1 - -extendMatchEnd: - // Convert from &src[ret] to ret. - SUB R6, R7, R7 - MOVD R7, ret+40(FP) - RET - -// ---------------------------------------------------------------------------- - -// func encodeBlock(dst, src []byte) (d int) -// -// All local variables fit into registers, other than "var table". The register -// allocation: -// - R3 . . -// - R4 . . -// - R5 64 shift -// - R6 72 &src[0], tableSize -// - R7 80 &src[s] -// - R8 88 &dst[d] -// - R9 96 sLimit -// - R10 . &src[nextEmit] -// - R11 104 prevHash, currHash, nextHash, offset -// - R12 112 &src[base], skip -// - R13 . &src[nextS], &src[len(src) - 8] -// - R14 . len(src), bytesBetweenHashLookups, &src[len(src)], x -// - R15 120 candidate -// - R16 . hash constant, 0x1e35a7bd -// - R17 . &table -// - . 128 table -// -// The second column (64, 72, etc) is the stack offset to spill the registers -// when calling other functions. We could pack this slightly tighter, but it's -// simpler to have a dedicated spill map independent of the function called. -// -// "var table [maxTableSize]uint16" takes up 32768 bytes of stack space. An -// extra 64 bytes, to call other functions, and an extra 64 bytes, to spill -// local variables (registers) during calls gives 32768 + 64 + 64 = 32896. -TEXT ·encodeBlock(SB), 0, $32896-56 - MOVD dst_base+0(FP), R8 - MOVD src_base+24(FP), R7 - MOVD src_len+32(FP), R14 - - // shift, tableSize := uint32(32-8), 1<<8 - MOVD $24, R5 - MOVD $256, R6 - MOVW $0xa7bd, R16 - MOVKW $(0x1e35<<16), R16 - -calcShift: - // for ; tableSize < maxTableSize && tableSize < len(src); tableSize *= 2 { - // shift-- - // } - MOVD $16384, R2 - CMP R2, R6 - BGE varTable - CMP R14, R6 - BGE varTable - SUB $1, R5, R5 - LSL $1, R6, R6 - B calcShift - -varTable: - // var table [maxTableSize]uint16 - // - // In the asm code, unlike the Go code, we can zero-initialize only the - // first tableSize elements. Each uint16 element is 2 bytes and each - // iterations writes 64 bytes, so we can do only tableSize/32 writes - // instead of the 2048 writes that would zero-initialize all of table's - // 32768 bytes. This clear could overrun the first tableSize elements, but - // it won't overrun the allocated stack size. - ADD $128, RSP, R17 - MOVD R17, R4 - - // !!! R6 = &src[tableSize] - ADD R6<<1, R17, R6 - -memclr: - STP.P (ZR, ZR), 64(R4) - STP (ZR, ZR), -48(R4) - STP (ZR, ZR), -32(R4) - STP (ZR, ZR), -16(R4) - CMP R4, R6 - BHI memclr - - // !!! R6 = &src[0] - MOVD R7, R6 - - // sLimit := len(src) - inputMargin - MOVD R14, R9 - SUB $15, R9, R9 - - // !!! Pre-emptively spill R5, R6 and R9 to the stack. Their values don't - // change for the rest of the function. - MOVD R5, 64(RSP) - MOVD R6, 72(RSP) - MOVD R9, 96(RSP) - - // nextEmit := 0 - MOVD R6, R10 - - // s := 1 - ADD $1, R7, R7 - - // nextHash := hash(load32(src, s), shift) - MOVW 0(R7), R11 - MULW R16, R11, R11 - LSRW R5, R11, R11 - -outer: - // for { etc } - - // skip := 32 - MOVD $32, R12 - - // nextS := s - MOVD R7, R13 - - // candidate := 0 - MOVD $0, R15 - -inner0: - // for { etc } - - // s := nextS - MOVD R13, R7 - - // bytesBetweenHashLookups := skip >> 5 - MOVD R12, R14 - LSR $5, R14, R14 - - // nextS = s + bytesBetweenHashLookups - ADD R14, R13, R13 - - // skip += bytesBetweenHashLookups - ADD R14, R12, R12 - - // if nextS > sLimit { goto emitRemainder } - MOVD R13, R3 - SUB R6, R3, R3 - CMP R9, R3 - BHI emitRemainder - - // candidate = int(table[nextHash]) - MOVHU 0(R17)(R11<<1), R15 - - // table[nextHash] = uint16(s) - MOVD R7, R3 - SUB R6, R3, R3 - - MOVH R3, 0(R17)(R11<<1) - - // nextHash = hash(load32(src, nextS), shift) - MOVW 0(R13), R11 - MULW R16, R11 - LSRW R5, R11, R11 - - // if load32(src, s) != load32(src, candidate) { continue } break - MOVW 0(R7), R3 - MOVW (R6)(R15), R4 - CMPW R4, R3 - BNE inner0 - -fourByteMatch: - // As per the encode_other.go code: - // - // A 4-byte match has been found. We'll later see etc. - - // !!! Jump to a fast path for short (<= 16 byte) literals. See the comment - // on inputMargin in encode.go. - MOVD R7, R3 - SUB R10, R3, R3 - CMP $16, R3 - BLE emitLiteralFastPath - - // ---------------------------------------- - // Begin inline of the emitLiteral call. - // - // d += emitLiteral(dst[d:], src[nextEmit:s]) - - MOVW R3, R4 - SUBW $1, R4, R4 - - MOVW $60, R2 - CMPW R2, R4 - BLT inlineEmitLiteralOneByte - MOVW $256, R2 - CMPW R2, R4 - BLT inlineEmitLiteralTwoBytes - -inlineEmitLiteralThreeBytes: - MOVD $0xf4, R1 - MOVB R1, 0(R8) - MOVW R4, 1(R8) - ADD $3, R8, R8 - B inlineEmitLiteralMemmove - -inlineEmitLiteralTwoBytes: - MOVD $0xf0, R1 - MOVB R1, 0(R8) - MOVB R4, 1(R8) - ADD $2, R8, R8 - B inlineEmitLiteralMemmove - -inlineEmitLiteralOneByte: - LSLW $2, R4, R4 - MOVB R4, 0(R8) - ADD $1, R8, R8 - -inlineEmitLiteralMemmove: - // Spill local variables (registers) onto the stack; call; unspill. - // - // copy(dst[i:], lit) - // - // This means calling runtime·memmove(&dst[i], &lit[0], len(lit)), so we push - // R8, R10 and R3 as arguments. - MOVD R8, 8(RSP) - MOVD R10, 16(RSP) - MOVD R3, 24(RSP) - - // Finish the "d +=" part of "d += emitLiteral(etc)". - ADD R3, R8, R8 - MOVD R7, 80(RSP) - MOVD R8, 88(RSP) - MOVD R15, 120(RSP) - CALL runtime·memmove(SB) - MOVD 64(RSP), R5 - MOVD 72(RSP), R6 - MOVD 80(RSP), R7 - MOVD 88(RSP), R8 - MOVD 96(RSP), R9 - MOVD 120(RSP), R15 - ADD $128, RSP, R17 - MOVW $0xa7bd, R16 - MOVKW $(0x1e35<<16), R16 - B inner1 - -inlineEmitLiteralEnd: - // End inline of the emitLiteral call. - // ---------------------------------------- - -emitLiteralFastPath: - // !!! Emit the 1-byte encoding "uint8(len(lit)-1)<<2". - MOVB R3, R4 - SUBW $1, R4, R4 - AND $0xff, R4, R4 - LSLW $2, R4, R4 - MOVB R4, (R8) - ADD $1, R8, R8 - - // !!! Implement the copy from lit to dst as a 16-byte load and store. - // (Encode's documentation says that dst and src must not overlap.) - // - // This always copies 16 bytes, instead of only len(lit) bytes, but that's - // OK. Subsequent iterations will fix up the overrun. - // - // Note that on arm64, it is legal and cheap to issue unaligned 8-byte or - // 16-byte loads and stores. This technique probably wouldn't be as - // effective on architectures that are fussier about alignment. - LDP 0(R10), (R0, R1) - STP (R0, R1), 0(R8) - ADD R3, R8, R8 - -inner1: - // for { etc } - - // base := s - MOVD R7, R12 - - // !!! offset := base - candidate - MOVD R12, R11 - SUB R15, R11, R11 - SUB R6, R11, R11 - - // ---------------------------------------- - // Begin inline of the extendMatch call. - // - // s = extendMatch(src, candidate+4, s+4) - - // !!! R14 = &src[len(src)] - MOVD src_len+32(FP), R14 - ADD R6, R14, R14 - - // !!! R13 = &src[len(src) - 8] - MOVD R14, R13 - SUB $8, R13, R13 - - // !!! R15 = &src[candidate + 4] - ADD $4, R15, R15 - ADD R6, R15, R15 - - // !!! s += 4 - ADD $4, R7, R7 - -inlineExtendMatchCmp8: - // As long as we are 8 or more bytes before the end of src, we can load and - // compare 8 bytes at a time. If those 8 bytes are equal, repeat. - CMP R13, R7 - BHI inlineExtendMatchCmp1 - MOVD (R15), R3 - MOVD (R7), R4 - CMP R4, R3 - BNE inlineExtendMatchBSF - ADD $8, R15, R15 - ADD $8, R7, R7 - B inlineExtendMatchCmp8 - -inlineExtendMatchBSF: - // If those 8 bytes were not equal, XOR the two 8 byte values, and return - // the index of the first byte that differs. - // RBIT reverses the bit order, then CLZ counts the leading zeros, the - // combination of which finds the least significant bit which is set. - // The arm64 architecture is little-endian, and the shift by 3 converts - // a bit index to a byte index. - EOR R3, R4, R4 - RBIT R4, R4 - CLZ R4, R4 - ADD R4>>3, R7, R7 - B inlineExtendMatchEnd - -inlineExtendMatchCmp1: - // In src's tail, compare 1 byte at a time. - CMP R7, R14 - BLS inlineExtendMatchEnd - MOVB (R15), R3 - MOVB (R7), R4 - CMP R4, R3 - BNE inlineExtendMatchEnd - ADD $1, R15, R15 - ADD $1, R7, R7 - B inlineExtendMatchCmp1 - -inlineExtendMatchEnd: - // End inline of the extendMatch call. - // ---------------------------------------- - - // ---------------------------------------- - // Begin inline of the emitCopy call. - // - // d += emitCopy(dst[d:], base-candidate, s-base) - - // !!! length := s - base - MOVD R7, R3 - SUB R12, R3, R3 - -inlineEmitCopyLoop0: - // for length >= 68 { etc } - MOVW $68, R2 - CMPW R2, R3 - BLT inlineEmitCopyStep1 - - // Emit a length 64 copy, encoded as 3 bytes. - MOVD $0xfe, R1 - MOVB R1, 0(R8) - MOVW R11, 1(R8) - ADD $3, R8, R8 - SUBW $64, R3, R3 - B inlineEmitCopyLoop0 - -inlineEmitCopyStep1: - // if length > 64 { etc } - MOVW $64, R2 - CMPW R2, R3 - BLE inlineEmitCopyStep2 - - // Emit a length 60 copy, encoded as 3 bytes. - MOVD $0xee, R1 - MOVB R1, 0(R8) - MOVW R11, 1(R8) - ADD $3, R8, R8 - SUBW $60, R3, R3 - -inlineEmitCopyStep2: - // if length >= 12 || offset >= 2048 { goto inlineEmitCopyStep3 } - MOVW $12, R2 - CMPW R2, R3 - BGE inlineEmitCopyStep3 - MOVW $2048, R2 - CMPW R2, R11 - BGE inlineEmitCopyStep3 - - // Emit the remaining copy, encoded as 2 bytes. - MOVB R11, 1(R8) - LSRW $8, R11, R11 - LSLW $5, R11, R11 - SUBW $4, R3, R3 - AND $0xff, R3, R3 - LSLW $2, R3, R3 - ORRW R3, R11, R11 - ORRW $1, R11, R11 - MOVB R11, 0(R8) - ADD $2, R8, R8 - B inlineEmitCopyEnd - -inlineEmitCopyStep3: - // Emit the remaining copy, encoded as 3 bytes. - SUBW $1, R3, R3 - LSLW $2, R3, R3 - ORRW $2, R3, R3 - MOVB R3, 0(R8) - MOVW R11, 1(R8) - ADD $3, R8, R8 - -inlineEmitCopyEnd: - // End inline of the emitCopy call. - // ---------------------------------------- - - // nextEmit = s - MOVD R7, R10 - - // if s >= sLimit { goto emitRemainder } - MOVD R7, R3 - SUB R6, R3, R3 - CMP R3, R9 - BLS emitRemainder - - // As per the encode_other.go code: - // - // We could immediately etc. - - // x := load64(src, s-1) - MOVD -1(R7), R14 - - // prevHash := hash(uint32(x>>0), shift) - MOVW R14, R11 - MULW R16, R11, R11 - LSRW R5, R11, R11 - - // table[prevHash] = uint16(s-1) - MOVD R7, R3 - SUB R6, R3, R3 - SUB $1, R3, R3 - - MOVHU R3, 0(R17)(R11<<1) - - // currHash := hash(uint32(x>>8), shift) - LSR $8, R14, R14 - MOVW R14, R11 - MULW R16, R11, R11 - LSRW R5, R11, R11 - - // candidate = int(table[currHash]) - MOVHU 0(R17)(R11<<1), R15 - - // table[currHash] = uint16(s) - ADD $1, R3, R3 - MOVHU R3, 0(R17)(R11<<1) - - // if uint32(x>>8) == load32(src, candidate) { continue } - MOVW (R6)(R15), R4 - CMPW R4, R14 - BEQ inner1 - - // nextHash = hash(uint32(x>>16), shift) - LSR $8, R14, R14 - MOVW R14, R11 - MULW R16, R11, R11 - LSRW R5, R11, R11 - - // s++ - ADD $1, R7, R7 - - // break out of the inner1 for loop, i.e. continue the outer loop. - B outer - -emitRemainder: - // if nextEmit < len(src) { etc } - MOVD src_len+32(FP), R3 - ADD R6, R3, R3 - CMP R3, R10 - BEQ encodeBlockEnd - - // d += emitLiteral(dst[d:], src[nextEmit:]) - // - // Push args. - MOVD R8, 8(RSP) - MOVD $0, 16(RSP) // Unnecessary, as the callee ignores it, but conservative. - MOVD $0, 24(RSP) // Unnecessary, as the callee ignores it, but conservative. - MOVD R10, 32(RSP) - SUB R10, R3, R3 - MOVD R3, 40(RSP) - MOVD R3, 48(RSP) // Unnecessary, as the callee ignores it, but conservative. - - // Spill local variables (registers) onto the stack; call; unspill. - MOVD R8, 88(RSP) - CALL ·emitLiteral(SB) - MOVD 88(RSP), R8 - - // Finish the "d +=" part of "d += emitLiteral(etc)". - MOVD 56(RSP), R1 - ADD R1, R8, R8 - -encodeBlockEnd: - MOVD dst_base+0(FP), R3 - SUB R3, R8, R8 - MOVD R8, d+48(FP) - RET diff --git a/vendor/github.com/golang/snappy/encode_asm.go b/vendor/github.com/golang/snappy/encode_asm.go deleted file mode 100644 index 107c1e71..00000000 --- a/vendor/github.com/golang/snappy/encode_asm.go +++ /dev/null @@ -1,30 +0,0 @@ -// Copyright 2016 The Snappy-Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build !appengine -// +build gc -// +build !noasm -// +build amd64 arm64 - -package snappy - -// emitLiteral has the same semantics as in encode_other.go. -// -//go:noescape -func emitLiteral(dst, lit []byte) int - -// emitCopy has the same semantics as in encode_other.go. -// -//go:noescape -func emitCopy(dst []byte, offset, length int) int - -// extendMatch has the same semantics as in encode_other.go. -// -//go:noescape -func extendMatch(src []byte, i, j int) int - -// encodeBlock has the same semantics as in encode_other.go. -// -//go:noescape -func encodeBlock(dst, src []byte) (d int) diff --git a/vendor/github.com/gorilla/websocket/.gitignore b/vendor/github.com/gorilla/websocket/.gitignore deleted file mode 100644 index cd3fcd1e..00000000 --- a/vendor/github.com/gorilla/websocket/.gitignore +++ /dev/null @@ -1,25 +0,0 @@ -# Compiled Object files, Static and Dynamic libs (Shared Objects) -*.o -*.a -*.so - -# Folders -_obj -_test - -# Architecture specific extensions/prefixes -*.[568vq] -[568vq].out - -*.cgo1.go -*.cgo2.c -_cgo_defun.c -_cgo_gotypes.go -_cgo_export.* - -_testmain.go - -*.exe - -.idea/ -*.iml diff --git a/vendor/github.com/gorilla/websocket/AUTHORS b/vendor/github.com/gorilla/websocket/AUTHORS deleted file mode 100644 index 1931f400..00000000 --- a/vendor/github.com/gorilla/websocket/AUTHORS +++ /dev/null @@ -1,9 +0,0 @@ -# This is the official list of Gorilla WebSocket authors for copyright -# purposes. -# -# Please keep the list sorted. - -Gary Burd -Google LLC (https://opensource.google.com/) -Joachim Bauch - diff --git a/vendor/github.com/gorilla/websocket/LICENSE b/vendor/github.com/gorilla/websocket/LICENSE deleted file mode 100644 index 9171c972..00000000 --- a/vendor/github.com/gorilla/websocket/LICENSE +++ /dev/null @@ -1,22 +0,0 @@ -Copyright (c) 2013 The Gorilla WebSocket Authors. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are met: - - Redistributions of source code must retain the above copyright notice, this - list of conditions and the following disclaimer. - - Redistributions in binary form must reproduce the above copyright notice, - this list of conditions and the following disclaimer in the documentation - and/or other materials provided with the distribution. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND -ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE -FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL -DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR -SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER -CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, -OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/gorilla/websocket/README.md b/vendor/github.com/gorilla/websocket/README.md deleted file mode 100644 index 2517a287..00000000 --- a/vendor/github.com/gorilla/websocket/README.md +++ /dev/null @@ -1,39 +0,0 @@ -# Gorilla WebSocket - -[![GoDoc](https://godoc.org/github.com/gorilla/websocket?status.svg)](https://godoc.org/github.com/gorilla/websocket) -[![CircleCI](https://circleci.com/gh/gorilla/websocket.svg?style=svg)](https://circleci.com/gh/gorilla/websocket) - -Gorilla WebSocket is a [Go](http://golang.org/) implementation of the -[WebSocket](http://www.rfc-editor.org/rfc/rfc6455.txt) protocol. - - ---- - -⚠️ **[The Gorilla WebSocket Package is looking for a new maintainer](https://github.com/gorilla/websocket/issues/370)** - ---- - -### Documentation - -* [API Reference](https://pkg.go.dev/github.com/gorilla/websocket?tab=doc) -* [Chat example](https://github.com/gorilla/websocket/tree/master/examples/chat) -* [Command example](https://github.com/gorilla/websocket/tree/master/examples/command) -* [Client and server example](https://github.com/gorilla/websocket/tree/master/examples/echo) -* [File watch example](https://github.com/gorilla/websocket/tree/master/examples/filewatch) - -### Status - -The Gorilla WebSocket package provides a complete and tested implementation of -the [WebSocket](http://www.rfc-editor.org/rfc/rfc6455.txt) protocol. The -package API is stable. - -### Installation - - go get github.com/gorilla/websocket - -### Protocol Compliance - -The Gorilla WebSocket package passes the server tests in the [Autobahn Test -Suite](https://github.com/crossbario/autobahn-testsuite) using the application in the [examples/autobahn -subdirectory](https://github.com/gorilla/websocket/tree/master/examples/autobahn). - diff --git a/vendor/github.com/gorilla/websocket/client.go b/vendor/github.com/gorilla/websocket/client.go deleted file mode 100644 index 2efd8355..00000000 --- a/vendor/github.com/gorilla/websocket/client.go +++ /dev/null @@ -1,422 +0,0 @@ -// Copyright 2013 The Gorilla WebSocket Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package websocket - -import ( - "bytes" - "context" - "crypto/tls" - "errors" - "io" - "io/ioutil" - "net" - "net/http" - "net/http/httptrace" - "net/url" - "strings" - "time" -) - -// ErrBadHandshake is returned when the server response to opening handshake is -// invalid. -var ErrBadHandshake = errors.New("websocket: bad handshake") - -var errInvalidCompression = errors.New("websocket: invalid compression negotiation") - -// NewClient creates a new client connection using the given net connection. -// The URL u specifies the host and request URI. Use requestHeader to specify -// the origin (Origin), subprotocols (Sec-WebSocket-Protocol) and cookies -// (Cookie). Use the response.Header to get the selected subprotocol -// (Sec-WebSocket-Protocol) and cookies (Set-Cookie). -// -// If the WebSocket handshake fails, ErrBadHandshake is returned along with a -// non-nil *http.Response so that callers can handle redirects, authentication, -// etc. -// -// Deprecated: Use Dialer instead. -func NewClient(netConn net.Conn, u *url.URL, requestHeader http.Header, readBufSize, writeBufSize int) (c *Conn, response *http.Response, err error) { - d := Dialer{ - ReadBufferSize: readBufSize, - WriteBufferSize: writeBufSize, - NetDial: func(net, addr string) (net.Conn, error) { - return netConn, nil - }, - } - return d.Dial(u.String(), requestHeader) -} - -// A Dialer contains options for connecting to WebSocket server. -// -// It is safe to call Dialer's methods concurrently. -type Dialer struct { - // NetDial specifies the dial function for creating TCP connections. If - // NetDial is nil, net.Dial is used. - NetDial func(network, addr string) (net.Conn, error) - - // NetDialContext specifies the dial function for creating TCP connections. If - // NetDialContext is nil, NetDial is used. - NetDialContext func(ctx context.Context, network, addr string) (net.Conn, error) - - // NetDialTLSContext specifies the dial function for creating TLS/TCP connections. If - // NetDialTLSContext is nil, NetDialContext is used. - // If NetDialTLSContext is set, Dial assumes the TLS handshake is done there and - // TLSClientConfig is ignored. - NetDialTLSContext func(ctx context.Context, network, addr string) (net.Conn, error) - - // Proxy specifies a function to return a proxy for a given - // Request. If the function returns a non-nil error, the - // request is aborted with the provided error. - // If Proxy is nil or returns a nil *URL, no proxy is used. - Proxy func(*http.Request) (*url.URL, error) - - // TLSClientConfig specifies the TLS configuration to use with tls.Client. - // If nil, the default configuration is used. - // If either NetDialTLS or NetDialTLSContext are set, Dial assumes the TLS handshake - // is done there and TLSClientConfig is ignored. - TLSClientConfig *tls.Config - - // HandshakeTimeout specifies the duration for the handshake to complete. - HandshakeTimeout time.Duration - - // ReadBufferSize and WriteBufferSize specify I/O buffer sizes in bytes. If a buffer - // size is zero, then a useful default size is used. The I/O buffer sizes - // do not limit the size of the messages that can be sent or received. - ReadBufferSize, WriteBufferSize int - - // WriteBufferPool is a pool of buffers for write operations. If the value - // is not set, then write buffers are allocated to the connection for the - // lifetime of the connection. - // - // A pool is most useful when the application has a modest volume of writes - // across a large number of connections. - // - // Applications should use a single pool for each unique value of - // WriteBufferSize. - WriteBufferPool BufferPool - - // Subprotocols specifies the client's requested subprotocols. - Subprotocols []string - - // EnableCompression specifies if the client should attempt to negotiate - // per message compression (RFC 7692). Setting this value to true does not - // guarantee that compression will be supported. Currently only "no context - // takeover" modes are supported. - EnableCompression bool - - // Jar specifies the cookie jar. - // If Jar is nil, cookies are not sent in requests and ignored - // in responses. - Jar http.CookieJar -} - -// Dial creates a new client connection by calling DialContext with a background context. -func (d *Dialer) Dial(urlStr string, requestHeader http.Header) (*Conn, *http.Response, error) { - return d.DialContext(context.Background(), urlStr, requestHeader) -} - -var errMalformedURL = errors.New("malformed ws or wss URL") - -func hostPortNoPort(u *url.URL) (hostPort, hostNoPort string) { - hostPort = u.Host - hostNoPort = u.Host - if i := strings.LastIndex(u.Host, ":"); i > strings.LastIndex(u.Host, "]") { - hostNoPort = hostNoPort[:i] - } else { - switch u.Scheme { - case "wss": - hostPort += ":443" - case "https": - hostPort += ":443" - default: - hostPort += ":80" - } - } - return hostPort, hostNoPort -} - -// DefaultDialer is a dialer with all fields set to the default values. -var DefaultDialer = &Dialer{ - Proxy: http.ProxyFromEnvironment, - HandshakeTimeout: 45 * time.Second, -} - -// nilDialer is dialer to use when receiver is nil. -var nilDialer = *DefaultDialer - -// DialContext creates a new client connection. Use requestHeader to specify the -// origin (Origin), subprotocols (Sec-WebSocket-Protocol) and cookies (Cookie). -// Use the response.Header to get the selected subprotocol -// (Sec-WebSocket-Protocol) and cookies (Set-Cookie). -// -// The context will be used in the request and in the Dialer. -// -// If the WebSocket handshake fails, ErrBadHandshake is returned along with a -// non-nil *http.Response so that callers can handle redirects, authentication, -// etcetera. The response body may not contain the entire response and does not -// need to be closed by the application. -func (d *Dialer) DialContext(ctx context.Context, urlStr string, requestHeader http.Header) (*Conn, *http.Response, error) { - if d == nil { - d = &nilDialer - } - - challengeKey, err := generateChallengeKey() - if err != nil { - return nil, nil, err - } - - u, err := url.Parse(urlStr) - if err != nil { - return nil, nil, err - } - - switch u.Scheme { - case "ws": - u.Scheme = "http" - case "wss": - u.Scheme = "https" - default: - return nil, nil, errMalformedURL - } - - if u.User != nil { - // User name and password are not allowed in websocket URIs. - return nil, nil, errMalformedURL - } - - req := &http.Request{ - Method: http.MethodGet, - URL: u, - Proto: "HTTP/1.1", - ProtoMajor: 1, - ProtoMinor: 1, - Header: make(http.Header), - Host: u.Host, - } - req = req.WithContext(ctx) - - // Set the cookies present in the cookie jar of the dialer - if d.Jar != nil { - for _, cookie := range d.Jar.Cookies(u) { - req.AddCookie(cookie) - } - } - - // Set the request headers using the capitalization for names and values in - // RFC examples. Although the capitalization shouldn't matter, there are - // servers that depend on it. The Header.Set method is not used because the - // method canonicalizes the header names. - req.Header["Upgrade"] = []string{"websocket"} - req.Header["Connection"] = []string{"Upgrade"} - req.Header["Sec-WebSocket-Key"] = []string{challengeKey} - req.Header["Sec-WebSocket-Version"] = []string{"13"} - if len(d.Subprotocols) > 0 { - req.Header["Sec-WebSocket-Protocol"] = []string{strings.Join(d.Subprotocols, ", ")} - } - for k, vs := range requestHeader { - switch { - case k == "Host": - if len(vs) > 0 { - req.Host = vs[0] - } - case k == "Upgrade" || - k == "Connection" || - k == "Sec-Websocket-Key" || - k == "Sec-Websocket-Version" || - k == "Sec-Websocket-Extensions" || - (k == "Sec-Websocket-Protocol" && len(d.Subprotocols) > 0): - return nil, nil, errors.New("websocket: duplicate header not allowed: " + k) - case k == "Sec-Websocket-Protocol": - req.Header["Sec-WebSocket-Protocol"] = vs - default: - req.Header[k] = vs - } - } - - if d.EnableCompression { - req.Header["Sec-WebSocket-Extensions"] = []string{"permessage-deflate; server_no_context_takeover; client_no_context_takeover"} - } - - if d.HandshakeTimeout != 0 { - var cancel func() - ctx, cancel = context.WithTimeout(ctx, d.HandshakeTimeout) - defer cancel() - } - - // Get network dial function. - var netDial func(network, add string) (net.Conn, error) - - switch u.Scheme { - case "http": - if d.NetDialContext != nil { - netDial = func(network, addr string) (net.Conn, error) { - return d.NetDialContext(ctx, network, addr) - } - } else if d.NetDial != nil { - netDial = d.NetDial - } - case "https": - if d.NetDialTLSContext != nil { - netDial = func(network, addr string) (net.Conn, error) { - return d.NetDialTLSContext(ctx, network, addr) - } - } else if d.NetDialContext != nil { - netDial = func(network, addr string) (net.Conn, error) { - return d.NetDialContext(ctx, network, addr) - } - } else if d.NetDial != nil { - netDial = d.NetDial - } - default: - return nil, nil, errMalformedURL - } - - if netDial == nil { - netDialer := &net.Dialer{} - netDial = func(network, addr string) (net.Conn, error) { - return netDialer.DialContext(ctx, network, addr) - } - } - - // If needed, wrap the dial function to set the connection deadline. - if deadline, ok := ctx.Deadline(); ok { - forwardDial := netDial - netDial = func(network, addr string) (net.Conn, error) { - c, err := forwardDial(network, addr) - if err != nil { - return nil, err - } - err = c.SetDeadline(deadline) - if err != nil { - c.Close() - return nil, err - } - return c, nil - } - } - - // If needed, wrap the dial function to connect through a proxy. - if d.Proxy != nil { - proxyURL, err := d.Proxy(req) - if err != nil { - return nil, nil, err - } - if proxyURL != nil { - dialer, err := proxy_FromURL(proxyURL, netDialerFunc(netDial)) - if err != nil { - return nil, nil, err - } - netDial = dialer.Dial - } - } - - hostPort, hostNoPort := hostPortNoPort(u) - trace := httptrace.ContextClientTrace(ctx) - if trace != nil && trace.GetConn != nil { - trace.GetConn(hostPort) - } - - netConn, err := netDial("tcp", hostPort) - if trace != nil && trace.GotConn != nil { - trace.GotConn(httptrace.GotConnInfo{ - Conn: netConn, - }) - } - if err != nil { - return nil, nil, err - } - - defer func() { - if netConn != nil { - netConn.Close() - } - }() - - if u.Scheme == "https" && d.NetDialTLSContext == nil { - // If NetDialTLSContext is set, assume that the TLS handshake has already been done - - cfg := cloneTLSConfig(d.TLSClientConfig) - if cfg.ServerName == "" { - cfg.ServerName = hostNoPort - } - tlsConn := tls.Client(netConn, cfg) - netConn = tlsConn - - if trace != nil && trace.TLSHandshakeStart != nil { - trace.TLSHandshakeStart() - } - err := doHandshake(ctx, tlsConn, cfg) - if trace != nil && trace.TLSHandshakeDone != nil { - trace.TLSHandshakeDone(tlsConn.ConnectionState(), err) - } - - if err != nil { - return nil, nil, err - } - } - - conn := newConn(netConn, false, d.ReadBufferSize, d.WriteBufferSize, d.WriteBufferPool, nil, nil) - - if err := req.Write(netConn); err != nil { - return nil, nil, err - } - - if trace != nil && trace.GotFirstResponseByte != nil { - if peek, err := conn.br.Peek(1); err == nil && len(peek) == 1 { - trace.GotFirstResponseByte() - } - } - - resp, err := http.ReadResponse(conn.br, req) - if err != nil { - return nil, nil, err - } - - if d.Jar != nil { - if rc := resp.Cookies(); len(rc) > 0 { - d.Jar.SetCookies(u, rc) - } - } - - if resp.StatusCode != 101 || - !tokenListContainsValue(resp.Header, "Upgrade", "websocket") || - !tokenListContainsValue(resp.Header, "Connection", "upgrade") || - resp.Header.Get("Sec-Websocket-Accept") != computeAcceptKey(challengeKey) { - // Before closing the network connection on return from this - // function, slurp up some of the response to aid application - // debugging. - buf := make([]byte, 1024) - n, _ := io.ReadFull(resp.Body, buf) - resp.Body = ioutil.NopCloser(bytes.NewReader(buf[:n])) - return nil, resp, ErrBadHandshake - } - - for _, ext := range parseExtensions(resp.Header) { - if ext[""] != "permessage-deflate" { - continue - } - _, snct := ext["server_no_context_takeover"] - _, cnct := ext["client_no_context_takeover"] - if !snct || !cnct { - return nil, resp, errInvalidCompression - } - conn.newCompressionWriter = compressNoContextTakeover - conn.newDecompressionReader = decompressNoContextTakeover - break - } - - resp.Body = ioutil.NopCloser(bytes.NewReader([]byte{})) - conn.subprotocol = resp.Header.Get("Sec-Websocket-Protocol") - - netConn.SetDeadline(time.Time{}) - netConn = nil // to avoid close in defer. - return conn, resp, nil -} - -func cloneTLSConfig(cfg *tls.Config) *tls.Config { - if cfg == nil { - return &tls.Config{} - } - return cfg.Clone() -} diff --git a/vendor/github.com/gorilla/websocket/compression.go b/vendor/github.com/gorilla/websocket/compression.go deleted file mode 100644 index 813ffb1e..00000000 --- a/vendor/github.com/gorilla/websocket/compression.go +++ /dev/null @@ -1,148 +0,0 @@ -// Copyright 2017 The Gorilla WebSocket Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package websocket - -import ( - "compress/flate" - "errors" - "io" - "strings" - "sync" -) - -const ( - minCompressionLevel = -2 // flate.HuffmanOnly not defined in Go < 1.6 - maxCompressionLevel = flate.BestCompression - defaultCompressionLevel = 1 -) - -var ( - flateWriterPools [maxCompressionLevel - minCompressionLevel + 1]sync.Pool - flateReaderPool = sync.Pool{New: func() interface{} { - return flate.NewReader(nil) - }} -) - -func decompressNoContextTakeover(r io.Reader) io.ReadCloser { - const tail = - // Add four bytes as specified in RFC - "\x00\x00\xff\xff" + - // Add final block to squelch unexpected EOF error from flate reader. - "\x01\x00\x00\xff\xff" - - fr, _ := flateReaderPool.Get().(io.ReadCloser) - fr.(flate.Resetter).Reset(io.MultiReader(r, strings.NewReader(tail)), nil) - return &flateReadWrapper{fr} -} - -func isValidCompressionLevel(level int) bool { - return minCompressionLevel <= level && level <= maxCompressionLevel -} - -func compressNoContextTakeover(w io.WriteCloser, level int) io.WriteCloser { - p := &flateWriterPools[level-minCompressionLevel] - tw := &truncWriter{w: w} - fw, _ := p.Get().(*flate.Writer) - if fw == nil { - fw, _ = flate.NewWriter(tw, level) - } else { - fw.Reset(tw) - } - return &flateWriteWrapper{fw: fw, tw: tw, p: p} -} - -// truncWriter is an io.Writer that writes all but the last four bytes of the -// stream to another io.Writer. -type truncWriter struct { - w io.WriteCloser - n int - p [4]byte -} - -func (w *truncWriter) Write(p []byte) (int, error) { - n := 0 - - // fill buffer first for simplicity. - if w.n < len(w.p) { - n = copy(w.p[w.n:], p) - p = p[n:] - w.n += n - if len(p) == 0 { - return n, nil - } - } - - m := len(p) - if m > len(w.p) { - m = len(w.p) - } - - if nn, err := w.w.Write(w.p[:m]); err != nil { - return n + nn, err - } - - copy(w.p[:], w.p[m:]) - copy(w.p[len(w.p)-m:], p[len(p)-m:]) - nn, err := w.w.Write(p[:len(p)-m]) - return n + nn, err -} - -type flateWriteWrapper struct { - fw *flate.Writer - tw *truncWriter - p *sync.Pool -} - -func (w *flateWriteWrapper) Write(p []byte) (int, error) { - if w.fw == nil { - return 0, errWriteClosed - } - return w.fw.Write(p) -} - -func (w *flateWriteWrapper) Close() error { - if w.fw == nil { - return errWriteClosed - } - err1 := w.fw.Flush() - w.p.Put(w.fw) - w.fw = nil - if w.tw.p != [4]byte{0, 0, 0xff, 0xff} { - return errors.New("websocket: internal error, unexpected bytes at end of flate stream") - } - err2 := w.tw.w.Close() - if err1 != nil { - return err1 - } - return err2 -} - -type flateReadWrapper struct { - fr io.ReadCloser -} - -func (r *flateReadWrapper) Read(p []byte) (int, error) { - if r.fr == nil { - return 0, io.ErrClosedPipe - } - n, err := r.fr.Read(p) - if err == io.EOF { - // Preemptively place the reader back in the pool. This helps with - // scenarios where the application does not call NextReader() soon after - // this final read. - r.Close() - } - return n, err -} - -func (r *flateReadWrapper) Close() error { - if r.fr == nil { - return io.ErrClosedPipe - } - err := r.fr.Close() - flateReaderPool.Put(r.fr) - r.fr = nil - return err -} diff --git a/vendor/github.com/gorilla/websocket/conn.go b/vendor/github.com/gorilla/websocket/conn.go deleted file mode 100644 index 331eebc8..00000000 --- a/vendor/github.com/gorilla/websocket/conn.go +++ /dev/null @@ -1,1230 +0,0 @@ -// Copyright 2013 The Gorilla WebSocket Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package websocket - -import ( - "bufio" - "encoding/binary" - "errors" - "io" - "io/ioutil" - "math/rand" - "net" - "strconv" - "strings" - "sync" - "time" - "unicode/utf8" -) - -const ( - // Frame header byte 0 bits from Section 5.2 of RFC 6455 - finalBit = 1 << 7 - rsv1Bit = 1 << 6 - rsv2Bit = 1 << 5 - rsv3Bit = 1 << 4 - - // Frame header byte 1 bits from Section 5.2 of RFC 6455 - maskBit = 1 << 7 - - maxFrameHeaderSize = 2 + 8 + 4 // Fixed header + length + mask - maxControlFramePayloadSize = 125 - - writeWait = time.Second - - defaultReadBufferSize = 4096 - defaultWriteBufferSize = 4096 - - continuationFrame = 0 - noFrame = -1 -) - -// Close codes defined in RFC 6455, section 11.7. -const ( - CloseNormalClosure = 1000 - CloseGoingAway = 1001 - CloseProtocolError = 1002 - CloseUnsupportedData = 1003 - CloseNoStatusReceived = 1005 - CloseAbnormalClosure = 1006 - CloseInvalidFramePayloadData = 1007 - ClosePolicyViolation = 1008 - CloseMessageTooBig = 1009 - CloseMandatoryExtension = 1010 - CloseInternalServerErr = 1011 - CloseServiceRestart = 1012 - CloseTryAgainLater = 1013 - CloseTLSHandshake = 1015 -) - -// The message types are defined in RFC 6455, section 11.8. -const ( - // TextMessage denotes a text data message. The text message payload is - // interpreted as UTF-8 encoded text data. - TextMessage = 1 - - // BinaryMessage denotes a binary data message. - BinaryMessage = 2 - - // CloseMessage denotes a close control message. The optional message - // payload contains a numeric code and text. Use the FormatCloseMessage - // function to format a close message payload. - CloseMessage = 8 - - // PingMessage denotes a ping control message. The optional message payload - // is UTF-8 encoded text. - PingMessage = 9 - - // PongMessage denotes a pong control message. The optional message payload - // is UTF-8 encoded text. - PongMessage = 10 -) - -// ErrCloseSent is returned when the application writes a message to the -// connection after sending a close message. -var ErrCloseSent = errors.New("websocket: close sent") - -// ErrReadLimit is returned when reading a message that is larger than the -// read limit set for the connection. -var ErrReadLimit = errors.New("websocket: read limit exceeded") - -// netError satisfies the net Error interface. -type netError struct { - msg string - temporary bool - timeout bool -} - -func (e *netError) Error() string { return e.msg } -func (e *netError) Temporary() bool { return e.temporary } -func (e *netError) Timeout() bool { return e.timeout } - -// CloseError represents a close message. -type CloseError struct { - // Code is defined in RFC 6455, section 11.7. - Code int - - // Text is the optional text payload. - Text string -} - -func (e *CloseError) Error() string { - s := []byte("websocket: close ") - s = strconv.AppendInt(s, int64(e.Code), 10) - switch e.Code { - case CloseNormalClosure: - s = append(s, " (normal)"...) - case CloseGoingAway: - s = append(s, " (going away)"...) - case CloseProtocolError: - s = append(s, " (protocol error)"...) - case CloseUnsupportedData: - s = append(s, " (unsupported data)"...) - case CloseNoStatusReceived: - s = append(s, " (no status)"...) - case CloseAbnormalClosure: - s = append(s, " (abnormal closure)"...) - case CloseInvalidFramePayloadData: - s = append(s, " (invalid payload data)"...) - case ClosePolicyViolation: - s = append(s, " (policy violation)"...) - case CloseMessageTooBig: - s = append(s, " (message too big)"...) - case CloseMandatoryExtension: - s = append(s, " (mandatory extension missing)"...) - case CloseInternalServerErr: - s = append(s, " (internal server error)"...) - case CloseTLSHandshake: - s = append(s, " (TLS handshake error)"...) - } - if e.Text != "" { - s = append(s, ": "...) - s = append(s, e.Text...) - } - return string(s) -} - -// IsCloseError returns boolean indicating whether the error is a *CloseError -// with one of the specified codes. -func IsCloseError(err error, codes ...int) bool { - if e, ok := err.(*CloseError); ok { - for _, code := range codes { - if e.Code == code { - return true - } - } - } - return false -} - -// IsUnexpectedCloseError returns boolean indicating whether the error is a -// *CloseError with a code not in the list of expected codes. -func IsUnexpectedCloseError(err error, expectedCodes ...int) bool { - if e, ok := err.(*CloseError); ok { - for _, code := range expectedCodes { - if e.Code == code { - return false - } - } - return true - } - return false -} - -var ( - errWriteTimeout = &netError{msg: "websocket: write timeout", timeout: true, temporary: true} - errUnexpectedEOF = &CloseError{Code: CloseAbnormalClosure, Text: io.ErrUnexpectedEOF.Error()} - errBadWriteOpCode = errors.New("websocket: bad write message type") - errWriteClosed = errors.New("websocket: write closed") - errInvalidControlFrame = errors.New("websocket: invalid control frame") -) - -func newMaskKey() [4]byte { - n := rand.Uint32() - return [4]byte{byte(n), byte(n >> 8), byte(n >> 16), byte(n >> 24)} -} - -func hideTempErr(err error) error { - if e, ok := err.(net.Error); ok && e.Temporary() { - err = &netError{msg: e.Error(), timeout: e.Timeout()} - } - return err -} - -func isControl(frameType int) bool { - return frameType == CloseMessage || frameType == PingMessage || frameType == PongMessage -} - -func isData(frameType int) bool { - return frameType == TextMessage || frameType == BinaryMessage -} - -var validReceivedCloseCodes = map[int]bool{ - // see http://www.iana.org/assignments/websocket/websocket.xhtml#close-code-number - - CloseNormalClosure: true, - CloseGoingAway: true, - CloseProtocolError: true, - CloseUnsupportedData: true, - CloseNoStatusReceived: false, - CloseAbnormalClosure: false, - CloseInvalidFramePayloadData: true, - ClosePolicyViolation: true, - CloseMessageTooBig: true, - CloseMandatoryExtension: true, - CloseInternalServerErr: true, - CloseServiceRestart: true, - CloseTryAgainLater: true, - CloseTLSHandshake: false, -} - -func isValidReceivedCloseCode(code int) bool { - return validReceivedCloseCodes[code] || (code >= 3000 && code <= 4999) -} - -// BufferPool represents a pool of buffers. The *sync.Pool type satisfies this -// interface. The type of the value stored in a pool is not specified. -type BufferPool interface { - // Get gets a value from the pool or returns nil if the pool is empty. - Get() interface{} - // Put adds a value to the pool. - Put(interface{}) -} - -// writePoolData is the type added to the write buffer pool. This wrapper is -// used to prevent applications from peeking at and depending on the values -// added to the pool. -type writePoolData struct{ buf []byte } - -// The Conn type represents a WebSocket connection. -type Conn struct { - conn net.Conn - isServer bool - subprotocol string - - // Write fields - mu chan struct{} // used as mutex to protect write to conn - writeBuf []byte // frame is constructed in this buffer. - writePool BufferPool - writeBufSize int - writeDeadline time.Time - writer io.WriteCloser // the current writer returned to the application - isWriting bool // for best-effort concurrent write detection - - writeErrMu sync.Mutex - writeErr error - - enableWriteCompression bool - compressionLevel int - newCompressionWriter func(io.WriteCloser, int) io.WriteCloser - - // Read fields - reader io.ReadCloser // the current reader returned to the application - readErr error - br *bufio.Reader - // bytes remaining in current frame. - // set setReadRemaining to safely update this value and prevent overflow - readRemaining int64 - readFinal bool // true the current message has more frames. - readLength int64 // Message size. - readLimit int64 // Maximum message size. - readMaskPos int - readMaskKey [4]byte - handlePong func(string) error - handlePing func(string) error - handleClose func(int, string) error - readErrCount int - messageReader *messageReader // the current low-level reader - - readDecompress bool // whether last read frame had RSV1 set - newDecompressionReader func(io.Reader) io.ReadCloser -} - -func newConn(conn net.Conn, isServer bool, readBufferSize, writeBufferSize int, writeBufferPool BufferPool, br *bufio.Reader, writeBuf []byte) *Conn { - - if br == nil { - if readBufferSize == 0 { - readBufferSize = defaultReadBufferSize - } else if readBufferSize < maxControlFramePayloadSize { - // must be large enough for control frame - readBufferSize = maxControlFramePayloadSize - } - br = bufio.NewReaderSize(conn, readBufferSize) - } - - if writeBufferSize <= 0 { - writeBufferSize = defaultWriteBufferSize - } - writeBufferSize += maxFrameHeaderSize - - if writeBuf == nil && writeBufferPool == nil { - writeBuf = make([]byte, writeBufferSize) - } - - mu := make(chan struct{}, 1) - mu <- struct{}{} - c := &Conn{ - isServer: isServer, - br: br, - conn: conn, - mu: mu, - readFinal: true, - writeBuf: writeBuf, - writePool: writeBufferPool, - writeBufSize: writeBufferSize, - enableWriteCompression: true, - compressionLevel: defaultCompressionLevel, - } - c.SetCloseHandler(nil) - c.SetPingHandler(nil) - c.SetPongHandler(nil) - return c -} - -// setReadRemaining tracks the number of bytes remaining on the connection. If n -// overflows, an ErrReadLimit is returned. -func (c *Conn) setReadRemaining(n int64) error { - if n < 0 { - return ErrReadLimit - } - - c.readRemaining = n - return nil -} - -// Subprotocol returns the negotiated protocol for the connection. -func (c *Conn) Subprotocol() string { - return c.subprotocol -} - -// Close closes the underlying network connection without sending or waiting -// for a close message. -func (c *Conn) Close() error { - return c.conn.Close() -} - -// LocalAddr returns the local network address. -func (c *Conn) LocalAddr() net.Addr { - return c.conn.LocalAddr() -} - -// RemoteAddr returns the remote network address. -func (c *Conn) RemoteAddr() net.Addr { - return c.conn.RemoteAddr() -} - -// Write methods - -func (c *Conn) writeFatal(err error) error { - err = hideTempErr(err) - c.writeErrMu.Lock() - if c.writeErr == nil { - c.writeErr = err - } - c.writeErrMu.Unlock() - return err -} - -func (c *Conn) read(n int) ([]byte, error) { - p, err := c.br.Peek(n) - if err == io.EOF { - err = errUnexpectedEOF - } - c.br.Discard(len(p)) - return p, err -} - -func (c *Conn) write(frameType int, deadline time.Time, buf0, buf1 []byte) error { - <-c.mu - defer func() { c.mu <- struct{}{} }() - - c.writeErrMu.Lock() - err := c.writeErr - c.writeErrMu.Unlock() - if err != nil { - return err - } - - c.conn.SetWriteDeadline(deadline) - if len(buf1) == 0 { - _, err = c.conn.Write(buf0) - } else { - err = c.writeBufs(buf0, buf1) - } - if err != nil { - return c.writeFatal(err) - } - if frameType == CloseMessage { - c.writeFatal(ErrCloseSent) - } - return nil -} - -func (c *Conn) writeBufs(bufs ...[]byte) error { - b := net.Buffers(bufs) - _, err := b.WriteTo(c.conn) - return err -} - -// WriteControl writes a control message with the given deadline. The allowed -// message types are CloseMessage, PingMessage and PongMessage. -func (c *Conn) WriteControl(messageType int, data []byte, deadline time.Time) error { - if !isControl(messageType) { - return errBadWriteOpCode - } - if len(data) > maxControlFramePayloadSize { - return errInvalidControlFrame - } - - b0 := byte(messageType) | finalBit - b1 := byte(len(data)) - if !c.isServer { - b1 |= maskBit - } - - buf := make([]byte, 0, maxFrameHeaderSize+maxControlFramePayloadSize) - buf = append(buf, b0, b1) - - if c.isServer { - buf = append(buf, data...) - } else { - key := newMaskKey() - buf = append(buf, key[:]...) - buf = append(buf, data...) - maskBytes(key, 0, buf[6:]) - } - - d := 1000 * time.Hour - if !deadline.IsZero() { - d = deadline.Sub(time.Now()) - if d < 0 { - return errWriteTimeout - } - } - - timer := time.NewTimer(d) - select { - case <-c.mu: - timer.Stop() - case <-timer.C: - return errWriteTimeout - } - defer func() { c.mu <- struct{}{} }() - - c.writeErrMu.Lock() - err := c.writeErr - c.writeErrMu.Unlock() - if err != nil { - return err - } - - c.conn.SetWriteDeadline(deadline) - _, err = c.conn.Write(buf) - if err != nil { - return c.writeFatal(err) - } - if messageType == CloseMessage { - c.writeFatal(ErrCloseSent) - } - return err -} - -// beginMessage prepares a connection and message writer for a new message. -func (c *Conn) beginMessage(mw *messageWriter, messageType int) error { - // Close previous writer if not already closed by the application. It's - // probably better to return an error in this situation, but we cannot - // change this without breaking existing applications. - if c.writer != nil { - c.writer.Close() - c.writer = nil - } - - if !isControl(messageType) && !isData(messageType) { - return errBadWriteOpCode - } - - c.writeErrMu.Lock() - err := c.writeErr - c.writeErrMu.Unlock() - if err != nil { - return err - } - - mw.c = c - mw.frameType = messageType - mw.pos = maxFrameHeaderSize - - if c.writeBuf == nil { - wpd, ok := c.writePool.Get().(writePoolData) - if ok { - c.writeBuf = wpd.buf - } else { - c.writeBuf = make([]byte, c.writeBufSize) - } - } - return nil -} - -// NextWriter returns a writer for the next message to send. The writer's Close -// method flushes the complete message to the network. -// -// There can be at most one open writer on a connection. NextWriter closes the -// previous writer if the application has not already done so. -// -// All message types (TextMessage, BinaryMessage, CloseMessage, PingMessage and -// PongMessage) are supported. -func (c *Conn) NextWriter(messageType int) (io.WriteCloser, error) { - var mw messageWriter - if err := c.beginMessage(&mw, messageType); err != nil { - return nil, err - } - c.writer = &mw - if c.newCompressionWriter != nil && c.enableWriteCompression && isData(messageType) { - w := c.newCompressionWriter(c.writer, c.compressionLevel) - mw.compress = true - c.writer = w - } - return c.writer, nil -} - -type messageWriter struct { - c *Conn - compress bool // whether next call to flushFrame should set RSV1 - pos int // end of data in writeBuf. - frameType int // type of the current frame. - err error -} - -func (w *messageWriter) endMessage(err error) error { - if w.err != nil { - return err - } - c := w.c - w.err = err - c.writer = nil - if c.writePool != nil { - c.writePool.Put(writePoolData{buf: c.writeBuf}) - c.writeBuf = nil - } - return err -} - -// flushFrame writes buffered data and extra as a frame to the network. The -// final argument indicates that this is the last frame in the message. -func (w *messageWriter) flushFrame(final bool, extra []byte) error { - c := w.c - length := w.pos - maxFrameHeaderSize + len(extra) - - // Check for invalid control frames. - if isControl(w.frameType) && - (!final || length > maxControlFramePayloadSize) { - return w.endMessage(errInvalidControlFrame) - } - - b0 := byte(w.frameType) - if final { - b0 |= finalBit - } - if w.compress { - b0 |= rsv1Bit - } - w.compress = false - - b1 := byte(0) - if !c.isServer { - b1 |= maskBit - } - - // Assume that the frame starts at beginning of c.writeBuf. - framePos := 0 - if c.isServer { - // Adjust up if mask not included in the header. - framePos = 4 - } - - switch { - case length >= 65536: - c.writeBuf[framePos] = b0 - c.writeBuf[framePos+1] = b1 | 127 - binary.BigEndian.PutUint64(c.writeBuf[framePos+2:], uint64(length)) - case length > 125: - framePos += 6 - c.writeBuf[framePos] = b0 - c.writeBuf[framePos+1] = b1 | 126 - binary.BigEndian.PutUint16(c.writeBuf[framePos+2:], uint16(length)) - default: - framePos += 8 - c.writeBuf[framePos] = b0 - c.writeBuf[framePos+1] = b1 | byte(length) - } - - if !c.isServer { - key := newMaskKey() - copy(c.writeBuf[maxFrameHeaderSize-4:], key[:]) - maskBytes(key, 0, c.writeBuf[maxFrameHeaderSize:w.pos]) - if len(extra) > 0 { - return w.endMessage(c.writeFatal(errors.New("websocket: internal error, extra used in client mode"))) - } - } - - // Write the buffers to the connection with best-effort detection of - // concurrent writes. See the concurrency section in the package - // documentation for more info. - - if c.isWriting { - panic("concurrent write to websocket connection") - } - c.isWriting = true - - err := c.write(w.frameType, c.writeDeadline, c.writeBuf[framePos:w.pos], extra) - - if !c.isWriting { - panic("concurrent write to websocket connection") - } - c.isWriting = false - - if err != nil { - return w.endMessage(err) - } - - if final { - w.endMessage(errWriteClosed) - return nil - } - - // Setup for next frame. - w.pos = maxFrameHeaderSize - w.frameType = continuationFrame - return nil -} - -func (w *messageWriter) ncopy(max int) (int, error) { - n := len(w.c.writeBuf) - w.pos - if n <= 0 { - if err := w.flushFrame(false, nil); err != nil { - return 0, err - } - n = len(w.c.writeBuf) - w.pos - } - if n > max { - n = max - } - return n, nil -} - -func (w *messageWriter) Write(p []byte) (int, error) { - if w.err != nil { - return 0, w.err - } - - if len(p) > 2*len(w.c.writeBuf) && w.c.isServer { - // Don't buffer large messages. - err := w.flushFrame(false, p) - if err != nil { - return 0, err - } - return len(p), nil - } - - nn := len(p) - for len(p) > 0 { - n, err := w.ncopy(len(p)) - if err != nil { - return 0, err - } - copy(w.c.writeBuf[w.pos:], p[:n]) - w.pos += n - p = p[n:] - } - return nn, nil -} - -func (w *messageWriter) WriteString(p string) (int, error) { - if w.err != nil { - return 0, w.err - } - - nn := len(p) - for len(p) > 0 { - n, err := w.ncopy(len(p)) - if err != nil { - return 0, err - } - copy(w.c.writeBuf[w.pos:], p[:n]) - w.pos += n - p = p[n:] - } - return nn, nil -} - -func (w *messageWriter) ReadFrom(r io.Reader) (nn int64, err error) { - if w.err != nil { - return 0, w.err - } - for { - if w.pos == len(w.c.writeBuf) { - err = w.flushFrame(false, nil) - if err != nil { - break - } - } - var n int - n, err = r.Read(w.c.writeBuf[w.pos:]) - w.pos += n - nn += int64(n) - if err != nil { - if err == io.EOF { - err = nil - } - break - } - } - return nn, err -} - -func (w *messageWriter) Close() error { - if w.err != nil { - return w.err - } - return w.flushFrame(true, nil) -} - -// WritePreparedMessage writes prepared message into connection. -func (c *Conn) WritePreparedMessage(pm *PreparedMessage) error { - frameType, frameData, err := pm.frame(prepareKey{ - isServer: c.isServer, - compress: c.newCompressionWriter != nil && c.enableWriteCompression && isData(pm.messageType), - compressionLevel: c.compressionLevel, - }) - if err != nil { - return err - } - if c.isWriting { - panic("concurrent write to websocket connection") - } - c.isWriting = true - err = c.write(frameType, c.writeDeadline, frameData, nil) - if !c.isWriting { - panic("concurrent write to websocket connection") - } - c.isWriting = false - return err -} - -// WriteMessage is a helper method for getting a writer using NextWriter, -// writing the message and closing the writer. -func (c *Conn) WriteMessage(messageType int, data []byte) error { - - if c.isServer && (c.newCompressionWriter == nil || !c.enableWriteCompression) { - // Fast path with no allocations and single frame. - - var mw messageWriter - if err := c.beginMessage(&mw, messageType); err != nil { - return err - } - n := copy(c.writeBuf[mw.pos:], data) - mw.pos += n - data = data[n:] - return mw.flushFrame(true, data) - } - - w, err := c.NextWriter(messageType) - if err != nil { - return err - } - if _, err = w.Write(data); err != nil { - return err - } - return w.Close() -} - -// SetWriteDeadline sets the write deadline on the underlying network -// connection. After a write has timed out, the websocket state is corrupt and -// all future writes will return an error. A zero value for t means writes will -// not time out. -func (c *Conn) SetWriteDeadline(t time.Time) error { - c.writeDeadline = t - return nil -} - -// Read methods - -func (c *Conn) advanceFrame() (int, error) { - // 1. Skip remainder of previous frame. - - if c.readRemaining > 0 { - if _, err := io.CopyN(ioutil.Discard, c.br, c.readRemaining); err != nil { - return noFrame, err - } - } - - // 2. Read and parse first two bytes of frame header. - // To aid debugging, collect and report all errors in the first two bytes - // of the header. - - var errors []string - - p, err := c.read(2) - if err != nil { - return noFrame, err - } - - frameType := int(p[0] & 0xf) - final := p[0]&finalBit != 0 - rsv1 := p[0]&rsv1Bit != 0 - rsv2 := p[0]&rsv2Bit != 0 - rsv3 := p[0]&rsv3Bit != 0 - mask := p[1]&maskBit != 0 - c.setReadRemaining(int64(p[1] & 0x7f)) - - c.readDecompress = false - if rsv1 { - if c.newDecompressionReader != nil { - c.readDecompress = true - } else { - errors = append(errors, "RSV1 set") - } - } - - if rsv2 { - errors = append(errors, "RSV2 set") - } - - if rsv3 { - errors = append(errors, "RSV3 set") - } - - switch frameType { - case CloseMessage, PingMessage, PongMessage: - if c.readRemaining > maxControlFramePayloadSize { - errors = append(errors, "len > 125 for control") - } - if !final { - errors = append(errors, "FIN not set on control") - } - case TextMessage, BinaryMessage: - if !c.readFinal { - errors = append(errors, "data before FIN") - } - c.readFinal = final - case continuationFrame: - if c.readFinal { - errors = append(errors, "continuation after FIN") - } - c.readFinal = final - default: - errors = append(errors, "bad opcode "+strconv.Itoa(frameType)) - } - - if mask != c.isServer { - errors = append(errors, "bad MASK") - } - - if len(errors) > 0 { - return noFrame, c.handleProtocolError(strings.Join(errors, ", ")) - } - - // 3. Read and parse frame length as per - // https://tools.ietf.org/html/rfc6455#section-5.2 - // - // The length of the "Payload data", in bytes: if 0-125, that is the payload - // length. - // - If 126, the following 2 bytes interpreted as a 16-bit unsigned - // integer are the payload length. - // - If 127, the following 8 bytes interpreted as - // a 64-bit unsigned integer (the most significant bit MUST be 0) are the - // payload length. Multibyte length quantities are expressed in network byte - // order. - - switch c.readRemaining { - case 126: - p, err := c.read(2) - if err != nil { - return noFrame, err - } - - if err := c.setReadRemaining(int64(binary.BigEndian.Uint16(p))); err != nil { - return noFrame, err - } - case 127: - p, err := c.read(8) - if err != nil { - return noFrame, err - } - - if err := c.setReadRemaining(int64(binary.BigEndian.Uint64(p))); err != nil { - return noFrame, err - } - } - - // 4. Handle frame masking. - - if mask { - c.readMaskPos = 0 - p, err := c.read(len(c.readMaskKey)) - if err != nil { - return noFrame, err - } - copy(c.readMaskKey[:], p) - } - - // 5. For text and binary messages, enforce read limit and return. - - if frameType == continuationFrame || frameType == TextMessage || frameType == BinaryMessage { - - c.readLength += c.readRemaining - // Don't allow readLength to overflow in the presence of a large readRemaining - // counter. - if c.readLength < 0 { - return noFrame, ErrReadLimit - } - - if c.readLimit > 0 && c.readLength > c.readLimit { - c.WriteControl(CloseMessage, FormatCloseMessage(CloseMessageTooBig, ""), time.Now().Add(writeWait)) - return noFrame, ErrReadLimit - } - - return frameType, nil - } - - // 6. Read control frame payload. - - var payload []byte - if c.readRemaining > 0 { - payload, err = c.read(int(c.readRemaining)) - c.setReadRemaining(0) - if err != nil { - return noFrame, err - } - if c.isServer { - maskBytes(c.readMaskKey, 0, payload) - } - } - - // 7. Process control frame payload. - - switch frameType { - case PongMessage: - if err := c.handlePong(string(payload)); err != nil { - return noFrame, err - } - case PingMessage: - if err := c.handlePing(string(payload)); err != nil { - return noFrame, err - } - case CloseMessage: - closeCode := CloseNoStatusReceived - closeText := "" - if len(payload) >= 2 { - closeCode = int(binary.BigEndian.Uint16(payload)) - if !isValidReceivedCloseCode(closeCode) { - return noFrame, c.handleProtocolError("bad close code " + strconv.Itoa(closeCode)) - } - closeText = string(payload[2:]) - if !utf8.ValidString(closeText) { - return noFrame, c.handleProtocolError("invalid utf8 payload in close frame") - } - } - if err := c.handleClose(closeCode, closeText); err != nil { - return noFrame, err - } - return noFrame, &CloseError{Code: closeCode, Text: closeText} - } - - return frameType, nil -} - -func (c *Conn) handleProtocolError(message string) error { - data := FormatCloseMessage(CloseProtocolError, message) - if len(data) > maxControlFramePayloadSize { - data = data[:maxControlFramePayloadSize] - } - c.WriteControl(CloseMessage, data, time.Now().Add(writeWait)) - return errors.New("websocket: " + message) -} - -// NextReader returns the next data message received from the peer. The -// returned messageType is either TextMessage or BinaryMessage. -// -// There can be at most one open reader on a connection. NextReader discards -// the previous message if the application has not already consumed it. -// -// Applications must break out of the application's read loop when this method -// returns a non-nil error value. Errors returned from this method are -// permanent. Once this method returns a non-nil error, all subsequent calls to -// this method return the same error. -func (c *Conn) NextReader() (messageType int, r io.Reader, err error) { - // Close previous reader, only relevant for decompression. - if c.reader != nil { - c.reader.Close() - c.reader = nil - } - - c.messageReader = nil - c.readLength = 0 - - for c.readErr == nil { - frameType, err := c.advanceFrame() - if err != nil { - c.readErr = hideTempErr(err) - break - } - - if frameType == TextMessage || frameType == BinaryMessage { - c.messageReader = &messageReader{c} - c.reader = c.messageReader - if c.readDecompress { - c.reader = c.newDecompressionReader(c.reader) - } - return frameType, c.reader, nil - } - } - - // Applications that do handle the error returned from this method spin in - // tight loop on connection failure. To help application developers detect - // this error, panic on repeated reads to the failed connection. - c.readErrCount++ - if c.readErrCount >= 1000 { - panic("repeated read on failed websocket connection") - } - - return noFrame, nil, c.readErr -} - -type messageReader struct{ c *Conn } - -func (r *messageReader) Read(b []byte) (int, error) { - c := r.c - if c.messageReader != r { - return 0, io.EOF - } - - for c.readErr == nil { - - if c.readRemaining > 0 { - if int64(len(b)) > c.readRemaining { - b = b[:c.readRemaining] - } - n, err := c.br.Read(b) - c.readErr = hideTempErr(err) - if c.isServer { - c.readMaskPos = maskBytes(c.readMaskKey, c.readMaskPos, b[:n]) - } - rem := c.readRemaining - rem -= int64(n) - c.setReadRemaining(rem) - if c.readRemaining > 0 && c.readErr == io.EOF { - c.readErr = errUnexpectedEOF - } - return n, c.readErr - } - - if c.readFinal { - c.messageReader = nil - return 0, io.EOF - } - - frameType, err := c.advanceFrame() - switch { - case err != nil: - c.readErr = hideTempErr(err) - case frameType == TextMessage || frameType == BinaryMessage: - c.readErr = errors.New("websocket: internal error, unexpected text or binary in Reader") - } - } - - err := c.readErr - if err == io.EOF && c.messageReader == r { - err = errUnexpectedEOF - } - return 0, err -} - -func (r *messageReader) Close() error { - return nil -} - -// ReadMessage is a helper method for getting a reader using NextReader and -// reading from that reader to a buffer. -func (c *Conn) ReadMessage() (messageType int, p []byte, err error) { - var r io.Reader - messageType, r, err = c.NextReader() - if err != nil { - return messageType, nil, err - } - p, err = ioutil.ReadAll(r) - return messageType, p, err -} - -// SetReadDeadline sets the read deadline on the underlying network connection. -// After a read has timed out, the websocket connection state is corrupt and -// all future reads will return an error. A zero value for t means reads will -// not time out. -func (c *Conn) SetReadDeadline(t time.Time) error { - return c.conn.SetReadDeadline(t) -} - -// SetReadLimit sets the maximum size in bytes for a message read from the peer. If a -// message exceeds the limit, the connection sends a close message to the peer -// and returns ErrReadLimit to the application. -func (c *Conn) SetReadLimit(limit int64) { - c.readLimit = limit -} - -// CloseHandler returns the current close handler -func (c *Conn) CloseHandler() func(code int, text string) error { - return c.handleClose -} - -// SetCloseHandler sets the handler for close messages received from the peer. -// The code argument to h is the received close code or CloseNoStatusReceived -// if the close message is empty. The default close handler sends a close -// message back to the peer. -// -// The handler function is called from the NextReader, ReadMessage and message -// reader Read methods. The application must read the connection to process -// close messages as described in the section on Control Messages above. -// -// The connection read methods return a CloseError when a close message is -// received. Most applications should handle close messages as part of their -// normal error handling. Applications should only set a close handler when the -// application must perform some action before sending a close message back to -// the peer. -func (c *Conn) SetCloseHandler(h func(code int, text string) error) { - if h == nil { - h = func(code int, text string) error { - message := FormatCloseMessage(code, "") - c.WriteControl(CloseMessage, message, time.Now().Add(writeWait)) - return nil - } - } - c.handleClose = h -} - -// PingHandler returns the current ping handler -func (c *Conn) PingHandler() func(appData string) error { - return c.handlePing -} - -// SetPingHandler sets the handler for ping messages received from the peer. -// The appData argument to h is the PING message application data. The default -// ping handler sends a pong to the peer. -// -// The handler function is called from the NextReader, ReadMessage and message -// reader Read methods. The application must read the connection to process -// ping messages as described in the section on Control Messages above. -func (c *Conn) SetPingHandler(h func(appData string) error) { - if h == nil { - h = func(message string) error { - err := c.WriteControl(PongMessage, []byte(message), time.Now().Add(writeWait)) - if err == ErrCloseSent { - return nil - } else if e, ok := err.(net.Error); ok && e.Temporary() { - return nil - } - return err - } - } - c.handlePing = h -} - -// PongHandler returns the current pong handler -func (c *Conn) PongHandler() func(appData string) error { - return c.handlePong -} - -// SetPongHandler sets the handler for pong messages received from the peer. -// The appData argument to h is the PONG message application data. The default -// pong handler does nothing. -// -// The handler function is called from the NextReader, ReadMessage and message -// reader Read methods. The application must read the connection to process -// pong messages as described in the section on Control Messages above. -func (c *Conn) SetPongHandler(h func(appData string) error) { - if h == nil { - h = func(string) error { return nil } - } - c.handlePong = h -} - -// UnderlyingConn returns the internal net.Conn. This can be used to further -// modifications to connection specific flags. -func (c *Conn) UnderlyingConn() net.Conn { - return c.conn -} - -// EnableWriteCompression enables and disables write compression of -// subsequent text and binary messages. This function is a noop if -// compression was not negotiated with the peer. -func (c *Conn) EnableWriteCompression(enable bool) { - c.enableWriteCompression = enable -} - -// SetCompressionLevel sets the flate compression level for subsequent text and -// binary messages. This function is a noop if compression was not negotiated -// with the peer. See the compress/flate package for a description of -// compression levels. -func (c *Conn) SetCompressionLevel(level int) error { - if !isValidCompressionLevel(level) { - return errors.New("websocket: invalid compression level") - } - c.compressionLevel = level - return nil -} - -// FormatCloseMessage formats closeCode and text as a WebSocket close message. -// An empty message is returned for code CloseNoStatusReceived. -func FormatCloseMessage(closeCode int, text string) []byte { - if closeCode == CloseNoStatusReceived { - // Return empty message because it's illegal to send - // CloseNoStatusReceived. Return non-nil value in case application - // checks for nil. - return []byte{} - } - buf := make([]byte, 2+len(text)) - binary.BigEndian.PutUint16(buf, uint16(closeCode)) - copy(buf[2:], text) - return buf -} diff --git a/vendor/github.com/gorilla/websocket/doc.go b/vendor/github.com/gorilla/websocket/doc.go deleted file mode 100644 index 8db0cef9..00000000 --- a/vendor/github.com/gorilla/websocket/doc.go +++ /dev/null @@ -1,227 +0,0 @@ -// Copyright 2013 The Gorilla WebSocket Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package websocket implements the WebSocket protocol defined in RFC 6455. -// -// Overview -// -// The Conn type represents a WebSocket connection. A server application calls -// the Upgrader.Upgrade method from an HTTP request handler to get a *Conn: -// -// var upgrader = websocket.Upgrader{ -// ReadBufferSize: 1024, -// WriteBufferSize: 1024, -// } -// -// func handler(w http.ResponseWriter, r *http.Request) { -// conn, err := upgrader.Upgrade(w, r, nil) -// if err != nil { -// log.Println(err) -// return -// } -// ... Use conn to send and receive messages. -// } -// -// Call the connection's WriteMessage and ReadMessage methods to send and -// receive messages as a slice of bytes. This snippet of code shows how to echo -// messages using these methods: -// -// for { -// messageType, p, err := conn.ReadMessage() -// if err != nil { -// log.Println(err) -// return -// } -// if err := conn.WriteMessage(messageType, p); err != nil { -// log.Println(err) -// return -// } -// } -// -// In above snippet of code, p is a []byte and messageType is an int with value -// websocket.BinaryMessage or websocket.TextMessage. -// -// An application can also send and receive messages using the io.WriteCloser -// and io.Reader interfaces. To send a message, call the connection NextWriter -// method to get an io.WriteCloser, write the message to the writer and close -// the writer when done. To receive a message, call the connection NextReader -// method to get an io.Reader and read until io.EOF is returned. This snippet -// shows how to echo messages using the NextWriter and NextReader methods: -// -// for { -// messageType, r, err := conn.NextReader() -// if err != nil { -// return -// } -// w, err := conn.NextWriter(messageType) -// if err != nil { -// return err -// } -// if _, err := io.Copy(w, r); err != nil { -// return err -// } -// if err := w.Close(); err != nil { -// return err -// } -// } -// -// Data Messages -// -// The WebSocket protocol distinguishes between text and binary data messages. -// Text messages are interpreted as UTF-8 encoded text. The interpretation of -// binary messages is left to the application. -// -// This package uses the TextMessage and BinaryMessage integer constants to -// identify the two data message types. The ReadMessage and NextReader methods -// return the type of the received message. The messageType argument to the -// WriteMessage and NextWriter methods specifies the type of a sent message. -// -// It is the application's responsibility to ensure that text messages are -// valid UTF-8 encoded text. -// -// Control Messages -// -// The WebSocket protocol defines three types of control messages: close, ping -// and pong. Call the connection WriteControl, WriteMessage or NextWriter -// methods to send a control message to the peer. -// -// Connections handle received close messages by calling the handler function -// set with the SetCloseHandler method and by returning a *CloseError from the -// NextReader, ReadMessage or the message Read method. The default close -// handler sends a close message to the peer. -// -// Connections handle received ping messages by calling the handler function -// set with the SetPingHandler method. The default ping handler sends a pong -// message to the peer. -// -// Connections handle received pong messages by calling the handler function -// set with the SetPongHandler method. The default pong handler does nothing. -// If an application sends ping messages, then the application should set a -// pong handler to receive the corresponding pong. -// -// The control message handler functions are called from the NextReader, -// ReadMessage and message reader Read methods. The default close and ping -// handlers can block these methods for a short time when the handler writes to -// the connection. -// -// The application must read the connection to process close, ping and pong -// messages sent from the peer. If the application is not otherwise interested -// in messages from the peer, then the application should start a goroutine to -// read and discard messages from the peer. A simple example is: -// -// func readLoop(c *websocket.Conn) { -// for { -// if _, _, err := c.NextReader(); err != nil { -// c.Close() -// break -// } -// } -// } -// -// Concurrency -// -// Connections support one concurrent reader and one concurrent writer. -// -// Applications are responsible for ensuring that no more than one goroutine -// calls the write methods (NextWriter, SetWriteDeadline, WriteMessage, -// WriteJSON, EnableWriteCompression, SetCompressionLevel) concurrently and -// that no more than one goroutine calls the read methods (NextReader, -// SetReadDeadline, ReadMessage, ReadJSON, SetPongHandler, SetPingHandler) -// concurrently. -// -// The Close and WriteControl methods can be called concurrently with all other -// methods. -// -// Origin Considerations -// -// Web browsers allow Javascript applications to open a WebSocket connection to -// any host. It's up to the server to enforce an origin policy using the Origin -// request header sent by the browser. -// -// The Upgrader calls the function specified in the CheckOrigin field to check -// the origin. If the CheckOrigin function returns false, then the Upgrade -// method fails the WebSocket handshake with HTTP status 403. -// -// If the CheckOrigin field is nil, then the Upgrader uses a safe default: fail -// the handshake if the Origin request header is present and the Origin host is -// not equal to the Host request header. -// -// The deprecated package-level Upgrade function does not perform origin -// checking. The application is responsible for checking the Origin header -// before calling the Upgrade function. -// -// Buffers -// -// Connections buffer network input and output to reduce the number -// of system calls when reading or writing messages. -// -// Write buffers are also used for constructing WebSocket frames. See RFC 6455, -// Section 5 for a discussion of message framing. A WebSocket frame header is -// written to the network each time a write buffer is flushed to the network. -// Decreasing the size of the write buffer can increase the amount of framing -// overhead on the connection. -// -// The buffer sizes in bytes are specified by the ReadBufferSize and -// WriteBufferSize fields in the Dialer and Upgrader. The Dialer uses a default -// size of 4096 when a buffer size field is set to zero. The Upgrader reuses -// buffers created by the HTTP server when a buffer size field is set to zero. -// The HTTP server buffers have a size of 4096 at the time of this writing. -// -// The buffer sizes do not limit the size of a message that can be read or -// written by a connection. -// -// Buffers are held for the lifetime of the connection by default. If the -// Dialer or Upgrader WriteBufferPool field is set, then a connection holds the -// write buffer only when writing a message. -// -// Applications should tune the buffer sizes to balance memory use and -// performance. Increasing the buffer size uses more memory, but can reduce the -// number of system calls to read or write the network. In the case of writing, -// increasing the buffer size can reduce the number of frame headers written to -// the network. -// -// Some guidelines for setting buffer parameters are: -// -// Limit the buffer sizes to the maximum expected message size. Buffers larger -// than the largest message do not provide any benefit. -// -// Depending on the distribution of message sizes, setting the buffer size to -// a value less than the maximum expected message size can greatly reduce memory -// use with a small impact on performance. Here's an example: If 99% of the -// messages are smaller than 256 bytes and the maximum message size is 512 -// bytes, then a buffer size of 256 bytes will result in 1.01 more system calls -// than a buffer size of 512 bytes. The memory savings is 50%. -// -// A write buffer pool is useful when the application has a modest number -// writes over a large number of connections. when buffers are pooled, a larger -// buffer size has a reduced impact on total memory use and has the benefit of -// reducing system calls and frame overhead. -// -// Compression EXPERIMENTAL -// -// Per message compression extensions (RFC 7692) are experimentally supported -// by this package in a limited capacity. Setting the EnableCompression option -// to true in Dialer or Upgrader will attempt to negotiate per message deflate -// support. -// -// var upgrader = websocket.Upgrader{ -// EnableCompression: true, -// } -// -// If compression was successfully negotiated with the connection's peer, any -// message received in compressed form will be automatically decompressed. -// All Read methods will return uncompressed bytes. -// -// Per message compression of messages written to a connection can be enabled -// or disabled by calling the corresponding Conn method: -// -// conn.EnableWriteCompression(false) -// -// Currently this package does not support compression with "context takeover". -// This means that messages must be compressed and decompressed in isolation, -// without retaining sliding window or dictionary state across messages. For -// more details refer to RFC 7692. -// -// Use of compression is experimental and may result in decreased performance. -package websocket diff --git a/vendor/github.com/gorilla/websocket/join.go b/vendor/github.com/gorilla/websocket/join.go deleted file mode 100644 index c64f8c82..00000000 --- a/vendor/github.com/gorilla/websocket/join.go +++ /dev/null @@ -1,42 +0,0 @@ -// Copyright 2019 The Gorilla WebSocket Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package websocket - -import ( - "io" - "strings" -) - -// JoinMessages concatenates received messages to create a single io.Reader. -// The string term is appended to each message. The returned reader does not -// support concurrent calls to the Read method. -func JoinMessages(c *Conn, term string) io.Reader { - return &joinReader{c: c, term: term} -} - -type joinReader struct { - c *Conn - term string - r io.Reader -} - -func (r *joinReader) Read(p []byte) (int, error) { - if r.r == nil { - var err error - _, r.r, err = r.c.NextReader() - if err != nil { - return 0, err - } - if r.term != "" { - r.r = io.MultiReader(r.r, strings.NewReader(r.term)) - } - } - n, err := r.r.Read(p) - if err == io.EOF { - err = nil - r.r = nil - } - return n, err -} diff --git a/vendor/github.com/gorilla/websocket/json.go b/vendor/github.com/gorilla/websocket/json.go deleted file mode 100644 index dc2c1f64..00000000 --- a/vendor/github.com/gorilla/websocket/json.go +++ /dev/null @@ -1,60 +0,0 @@ -// Copyright 2013 The Gorilla WebSocket Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package websocket - -import ( - "encoding/json" - "io" -) - -// WriteJSON writes the JSON encoding of v as a message. -// -// Deprecated: Use c.WriteJSON instead. -func WriteJSON(c *Conn, v interface{}) error { - return c.WriteJSON(v) -} - -// WriteJSON writes the JSON encoding of v as a message. -// -// See the documentation for encoding/json Marshal for details about the -// conversion of Go values to JSON. -func (c *Conn) WriteJSON(v interface{}) error { - w, err := c.NextWriter(TextMessage) - if err != nil { - return err - } - err1 := json.NewEncoder(w).Encode(v) - err2 := w.Close() - if err1 != nil { - return err1 - } - return err2 -} - -// ReadJSON reads the next JSON-encoded message from the connection and stores -// it in the value pointed to by v. -// -// Deprecated: Use c.ReadJSON instead. -func ReadJSON(c *Conn, v interface{}) error { - return c.ReadJSON(v) -} - -// ReadJSON reads the next JSON-encoded message from the connection and stores -// it in the value pointed to by v. -// -// See the documentation for the encoding/json Unmarshal function for details -// about the conversion of JSON to a Go value. -func (c *Conn) ReadJSON(v interface{}) error { - _, r, err := c.NextReader() - if err != nil { - return err - } - err = json.NewDecoder(r).Decode(v) - if err == io.EOF { - // One value is expected in the message. - err = io.ErrUnexpectedEOF - } - return err -} diff --git a/vendor/github.com/gorilla/websocket/mask.go b/vendor/github.com/gorilla/websocket/mask.go deleted file mode 100644 index d0742bf2..00000000 --- a/vendor/github.com/gorilla/websocket/mask.go +++ /dev/null @@ -1,55 +0,0 @@ -// Copyright 2016 The Gorilla WebSocket Authors. All rights reserved. Use of -// this source code is governed by a BSD-style license that can be found in the -// LICENSE file. - -//go:build !appengine -// +build !appengine - -package websocket - -import "unsafe" - -const wordSize = int(unsafe.Sizeof(uintptr(0))) - -func maskBytes(key [4]byte, pos int, b []byte) int { - // Mask one byte at a time for small buffers. - if len(b) < 2*wordSize { - for i := range b { - b[i] ^= key[pos&3] - pos++ - } - return pos & 3 - } - - // Mask one byte at a time to word boundary. - if n := int(uintptr(unsafe.Pointer(&b[0]))) % wordSize; n != 0 { - n = wordSize - n - for i := range b[:n] { - b[i] ^= key[pos&3] - pos++ - } - b = b[n:] - } - - // Create aligned word size key. - var k [wordSize]byte - for i := range k { - k[i] = key[(pos+i)&3] - } - kw := *(*uintptr)(unsafe.Pointer(&k)) - - // Mask one word at a time. - n := (len(b) / wordSize) * wordSize - for i := 0; i < n; i += wordSize { - *(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&b[0])) + uintptr(i))) ^= kw - } - - // Mask one byte at a time for remaining bytes. - b = b[n:] - for i := range b { - b[i] ^= key[pos&3] - pos++ - } - - return pos & 3 -} diff --git a/vendor/github.com/gorilla/websocket/mask_safe.go b/vendor/github.com/gorilla/websocket/mask_safe.go deleted file mode 100644 index 36250ca7..00000000 --- a/vendor/github.com/gorilla/websocket/mask_safe.go +++ /dev/null @@ -1,16 +0,0 @@ -// Copyright 2016 The Gorilla WebSocket Authors. All rights reserved. Use of -// this source code is governed by a BSD-style license that can be found in the -// LICENSE file. - -//go:build appengine -// +build appengine - -package websocket - -func maskBytes(key [4]byte, pos int, b []byte) int { - for i := range b { - b[i] ^= key[pos&3] - pos++ - } - return pos & 3 -} diff --git a/vendor/github.com/gorilla/websocket/prepared.go b/vendor/github.com/gorilla/websocket/prepared.go deleted file mode 100644 index c854225e..00000000 --- a/vendor/github.com/gorilla/websocket/prepared.go +++ /dev/null @@ -1,102 +0,0 @@ -// Copyright 2017 The Gorilla WebSocket Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package websocket - -import ( - "bytes" - "net" - "sync" - "time" -) - -// PreparedMessage caches on the wire representations of a message payload. -// Use PreparedMessage to efficiently send a message payload to multiple -// connections. PreparedMessage is especially useful when compression is used -// because the CPU and memory expensive compression operation can be executed -// once for a given set of compression options. -type PreparedMessage struct { - messageType int - data []byte - mu sync.Mutex - frames map[prepareKey]*preparedFrame -} - -// prepareKey defines a unique set of options to cache prepared frames in PreparedMessage. -type prepareKey struct { - isServer bool - compress bool - compressionLevel int -} - -// preparedFrame contains data in wire representation. -type preparedFrame struct { - once sync.Once - data []byte -} - -// NewPreparedMessage returns an initialized PreparedMessage. You can then send -// it to connection using WritePreparedMessage method. Valid wire -// representation will be calculated lazily only once for a set of current -// connection options. -func NewPreparedMessage(messageType int, data []byte) (*PreparedMessage, error) { - pm := &PreparedMessage{ - messageType: messageType, - frames: make(map[prepareKey]*preparedFrame), - data: data, - } - - // Prepare a plain server frame. - _, frameData, err := pm.frame(prepareKey{isServer: true, compress: false}) - if err != nil { - return nil, err - } - - // To protect against caller modifying the data argument, remember the data - // copied to the plain server frame. - pm.data = frameData[len(frameData)-len(data):] - return pm, nil -} - -func (pm *PreparedMessage) frame(key prepareKey) (int, []byte, error) { - pm.mu.Lock() - frame, ok := pm.frames[key] - if !ok { - frame = &preparedFrame{} - pm.frames[key] = frame - } - pm.mu.Unlock() - - var err error - frame.once.Do(func() { - // Prepare a frame using a 'fake' connection. - // TODO: Refactor code in conn.go to allow more direct construction of - // the frame. - mu := make(chan struct{}, 1) - mu <- struct{}{} - var nc prepareConn - c := &Conn{ - conn: &nc, - mu: mu, - isServer: key.isServer, - compressionLevel: key.compressionLevel, - enableWriteCompression: true, - writeBuf: make([]byte, defaultWriteBufferSize+maxFrameHeaderSize), - } - if key.compress { - c.newCompressionWriter = compressNoContextTakeover - } - err = c.WriteMessage(pm.messageType, pm.data) - frame.data = nc.buf.Bytes() - }) - return pm.messageType, frame.data, err -} - -type prepareConn struct { - buf bytes.Buffer - net.Conn -} - -func (pc *prepareConn) Write(p []byte) (int, error) { return pc.buf.Write(p) } -func (pc *prepareConn) SetWriteDeadline(t time.Time) error { return nil } diff --git a/vendor/github.com/gorilla/websocket/proxy.go b/vendor/github.com/gorilla/websocket/proxy.go deleted file mode 100644 index e0f466b7..00000000 --- a/vendor/github.com/gorilla/websocket/proxy.go +++ /dev/null @@ -1,77 +0,0 @@ -// Copyright 2017 The Gorilla WebSocket Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package websocket - -import ( - "bufio" - "encoding/base64" - "errors" - "net" - "net/http" - "net/url" - "strings" -) - -type netDialerFunc func(network, addr string) (net.Conn, error) - -func (fn netDialerFunc) Dial(network, addr string) (net.Conn, error) { - return fn(network, addr) -} - -func init() { - proxy_RegisterDialerType("http", func(proxyURL *url.URL, forwardDialer proxy_Dialer) (proxy_Dialer, error) { - return &httpProxyDialer{proxyURL: proxyURL, forwardDial: forwardDialer.Dial}, nil - }) -} - -type httpProxyDialer struct { - proxyURL *url.URL - forwardDial func(network, addr string) (net.Conn, error) -} - -func (hpd *httpProxyDialer) Dial(network string, addr string) (net.Conn, error) { - hostPort, _ := hostPortNoPort(hpd.proxyURL) - conn, err := hpd.forwardDial(network, hostPort) - if err != nil { - return nil, err - } - - connectHeader := make(http.Header) - if user := hpd.proxyURL.User; user != nil { - proxyUser := user.Username() - if proxyPassword, passwordSet := user.Password(); passwordSet { - credential := base64.StdEncoding.EncodeToString([]byte(proxyUser + ":" + proxyPassword)) - connectHeader.Set("Proxy-Authorization", "Basic "+credential) - } - } - - connectReq := &http.Request{ - Method: http.MethodConnect, - URL: &url.URL{Opaque: addr}, - Host: addr, - Header: connectHeader, - } - - if err := connectReq.Write(conn); err != nil { - conn.Close() - return nil, err - } - - // Read response. It's OK to use and discard buffered reader here becaue - // the remote server does not speak until spoken to. - br := bufio.NewReader(conn) - resp, err := http.ReadResponse(br, connectReq) - if err != nil { - conn.Close() - return nil, err - } - - if resp.StatusCode != 200 { - conn.Close() - f := strings.SplitN(resp.Status, " ", 2) - return nil, errors.New(f[1]) - } - return conn, nil -} diff --git a/vendor/github.com/gorilla/websocket/server.go b/vendor/github.com/gorilla/websocket/server.go deleted file mode 100644 index 24d53b38..00000000 --- a/vendor/github.com/gorilla/websocket/server.go +++ /dev/null @@ -1,365 +0,0 @@ -// Copyright 2013 The Gorilla WebSocket Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package websocket - -import ( - "bufio" - "errors" - "io" - "net/http" - "net/url" - "strings" - "time" -) - -// HandshakeError describes an error with the handshake from the peer. -type HandshakeError struct { - message string -} - -func (e HandshakeError) Error() string { return e.message } - -// Upgrader specifies parameters for upgrading an HTTP connection to a -// WebSocket connection. -// -// It is safe to call Upgrader's methods concurrently. -type Upgrader struct { - // HandshakeTimeout specifies the duration for the handshake to complete. - HandshakeTimeout time.Duration - - // ReadBufferSize and WriteBufferSize specify I/O buffer sizes in bytes. If a buffer - // size is zero, then buffers allocated by the HTTP server are used. The - // I/O buffer sizes do not limit the size of the messages that can be sent - // or received. - ReadBufferSize, WriteBufferSize int - - // WriteBufferPool is a pool of buffers for write operations. If the value - // is not set, then write buffers are allocated to the connection for the - // lifetime of the connection. - // - // A pool is most useful when the application has a modest volume of writes - // across a large number of connections. - // - // Applications should use a single pool for each unique value of - // WriteBufferSize. - WriteBufferPool BufferPool - - // Subprotocols specifies the server's supported protocols in order of - // preference. If this field is not nil, then the Upgrade method negotiates a - // subprotocol by selecting the first match in this list with a protocol - // requested by the client. If there's no match, then no protocol is - // negotiated (the Sec-Websocket-Protocol header is not included in the - // handshake response). - Subprotocols []string - - // Error specifies the function for generating HTTP error responses. If Error - // is nil, then http.Error is used to generate the HTTP response. - Error func(w http.ResponseWriter, r *http.Request, status int, reason error) - - // CheckOrigin returns true if the request Origin header is acceptable. If - // CheckOrigin is nil, then a safe default is used: return false if the - // Origin request header is present and the origin host is not equal to - // request Host header. - // - // A CheckOrigin function should carefully validate the request origin to - // prevent cross-site request forgery. - CheckOrigin func(r *http.Request) bool - - // EnableCompression specify if the server should attempt to negotiate per - // message compression (RFC 7692). Setting this value to true does not - // guarantee that compression will be supported. Currently only "no context - // takeover" modes are supported. - EnableCompression bool -} - -func (u *Upgrader) returnError(w http.ResponseWriter, r *http.Request, status int, reason string) (*Conn, error) { - err := HandshakeError{reason} - if u.Error != nil { - u.Error(w, r, status, err) - } else { - w.Header().Set("Sec-Websocket-Version", "13") - http.Error(w, http.StatusText(status), status) - } - return nil, err -} - -// checkSameOrigin returns true if the origin is not set or is equal to the request host. -func checkSameOrigin(r *http.Request) bool { - origin := r.Header["Origin"] - if len(origin) == 0 { - return true - } - u, err := url.Parse(origin[0]) - if err != nil { - return false - } - return equalASCIIFold(u.Host, r.Host) -} - -func (u *Upgrader) selectSubprotocol(r *http.Request, responseHeader http.Header) string { - if u.Subprotocols != nil { - clientProtocols := Subprotocols(r) - for _, serverProtocol := range u.Subprotocols { - for _, clientProtocol := range clientProtocols { - if clientProtocol == serverProtocol { - return clientProtocol - } - } - } - } else if responseHeader != nil { - return responseHeader.Get("Sec-Websocket-Protocol") - } - return "" -} - -// Upgrade upgrades the HTTP server connection to the WebSocket protocol. -// -// The responseHeader is included in the response to the client's upgrade -// request. Use the responseHeader to specify cookies (Set-Cookie). To specify -// subprotocols supported by the server, set Upgrader.Subprotocols directly. -// -// If the upgrade fails, then Upgrade replies to the client with an HTTP error -// response. -func (u *Upgrader) Upgrade(w http.ResponseWriter, r *http.Request, responseHeader http.Header) (*Conn, error) { - const badHandshake = "websocket: the client is not using the websocket protocol: " - - if !tokenListContainsValue(r.Header, "Connection", "upgrade") { - return u.returnError(w, r, http.StatusBadRequest, badHandshake+"'upgrade' token not found in 'Connection' header") - } - - if !tokenListContainsValue(r.Header, "Upgrade", "websocket") { - return u.returnError(w, r, http.StatusBadRequest, badHandshake+"'websocket' token not found in 'Upgrade' header") - } - - if r.Method != http.MethodGet { - return u.returnError(w, r, http.StatusMethodNotAllowed, badHandshake+"request method is not GET") - } - - if !tokenListContainsValue(r.Header, "Sec-Websocket-Version", "13") { - return u.returnError(w, r, http.StatusBadRequest, "websocket: unsupported version: 13 not found in 'Sec-Websocket-Version' header") - } - - if _, ok := responseHeader["Sec-Websocket-Extensions"]; ok { - return u.returnError(w, r, http.StatusInternalServerError, "websocket: application specific 'Sec-WebSocket-Extensions' headers are unsupported") - } - - checkOrigin := u.CheckOrigin - if checkOrigin == nil { - checkOrigin = checkSameOrigin - } - if !checkOrigin(r) { - return u.returnError(w, r, http.StatusForbidden, "websocket: request origin not allowed by Upgrader.CheckOrigin") - } - - challengeKey := r.Header.Get("Sec-Websocket-Key") - if challengeKey == "" { - return u.returnError(w, r, http.StatusBadRequest, "websocket: not a websocket handshake: 'Sec-WebSocket-Key' header is missing or blank") - } - - subprotocol := u.selectSubprotocol(r, responseHeader) - - // Negotiate PMCE - var compress bool - if u.EnableCompression { - for _, ext := range parseExtensions(r.Header) { - if ext[""] != "permessage-deflate" { - continue - } - compress = true - break - } - } - - h, ok := w.(http.Hijacker) - if !ok { - return u.returnError(w, r, http.StatusInternalServerError, "websocket: response does not implement http.Hijacker") - } - var brw *bufio.ReadWriter - netConn, brw, err := h.Hijack() - if err != nil { - return u.returnError(w, r, http.StatusInternalServerError, err.Error()) - } - - if brw.Reader.Buffered() > 0 { - netConn.Close() - return nil, errors.New("websocket: client sent data before handshake is complete") - } - - var br *bufio.Reader - if u.ReadBufferSize == 0 && bufioReaderSize(netConn, brw.Reader) > 256 { - // Reuse hijacked buffered reader as connection reader. - br = brw.Reader - } - - buf := bufioWriterBuffer(netConn, brw.Writer) - - var writeBuf []byte - if u.WriteBufferPool == nil && u.WriteBufferSize == 0 && len(buf) >= maxFrameHeaderSize+256 { - // Reuse hijacked write buffer as connection buffer. - writeBuf = buf - } - - c := newConn(netConn, true, u.ReadBufferSize, u.WriteBufferSize, u.WriteBufferPool, br, writeBuf) - c.subprotocol = subprotocol - - if compress { - c.newCompressionWriter = compressNoContextTakeover - c.newDecompressionReader = decompressNoContextTakeover - } - - // Use larger of hijacked buffer and connection write buffer for header. - p := buf - if len(c.writeBuf) > len(p) { - p = c.writeBuf - } - p = p[:0] - - p = append(p, "HTTP/1.1 101 Switching Protocols\r\nUpgrade: websocket\r\nConnection: Upgrade\r\nSec-WebSocket-Accept: "...) - p = append(p, computeAcceptKey(challengeKey)...) - p = append(p, "\r\n"...) - if c.subprotocol != "" { - p = append(p, "Sec-WebSocket-Protocol: "...) - p = append(p, c.subprotocol...) - p = append(p, "\r\n"...) - } - if compress { - p = append(p, "Sec-WebSocket-Extensions: permessage-deflate; server_no_context_takeover; client_no_context_takeover\r\n"...) - } - for k, vs := range responseHeader { - if k == "Sec-Websocket-Protocol" { - continue - } - for _, v := range vs { - p = append(p, k...) - p = append(p, ": "...) - for i := 0; i < len(v); i++ { - b := v[i] - if b <= 31 { - // prevent response splitting. - b = ' ' - } - p = append(p, b) - } - p = append(p, "\r\n"...) - } - } - p = append(p, "\r\n"...) - - // Clear deadlines set by HTTP server. - netConn.SetDeadline(time.Time{}) - - if u.HandshakeTimeout > 0 { - netConn.SetWriteDeadline(time.Now().Add(u.HandshakeTimeout)) - } - if _, err = netConn.Write(p); err != nil { - netConn.Close() - return nil, err - } - if u.HandshakeTimeout > 0 { - netConn.SetWriteDeadline(time.Time{}) - } - - return c, nil -} - -// Upgrade upgrades the HTTP server connection to the WebSocket protocol. -// -// Deprecated: Use websocket.Upgrader instead. -// -// Upgrade does not perform origin checking. The application is responsible for -// checking the Origin header before calling Upgrade. An example implementation -// of the same origin policy check is: -// -// if req.Header.Get("Origin") != "http://"+req.Host { -// http.Error(w, "Origin not allowed", http.StatusForbidden) -// return -// } -// -// If the endpoint supports subprotocols, then the application is responsible -// for negotiating the protocol used on the connection. Use the Subprotocols() -// function to get the subprotocols requested by the client. Use the -// Sec-Websocket-Protocol response header to specify the subprotocol selected -// by the application. -// -// The responseHeader is included in the response to the client's upgrade -// request. Use the responseHeader to specify cookies (Set-Cookie) and the -// negotiated subprotocol (Sec-Websocket-Protocol). -// -// The connection buffers IO to the underlying network connection. The -// readBufSize and writeBufSize parameters specify the size of the buffers to -// use. Messages can be larger than the buffers. -// -// If the request is not a valid WebSocket handshake, then Upgrade returns an -// error of type HandshakeError. Applications should handle this error by -// replying to the client with an HTTP error response. -func Upgrade(w http.ResponseWriter, r *http.Request, responseHeader http.Header, readBufSize, writeBufSize int) (*Conn, error) { - u := Upgrader{ReadBufferSize: readBufSize, WriteBufferSize: writeBufSize} - u.Error = func(w http.ResponseWriter, r *http.Request, status int, reason error) { - // don't return errors to maintain backwards compatibility - } - u.CheckOrigin = func(r *http.Request) bool { - // allow all connections by default - return true - } - return u.Upgrade(w, r, responseHeader) -} - -// Subprotocols returns the subprotocols requested by the client in the -// Sec-Websocket-Protocol header. -func Subprotocols(r *http.Request) []string { - h := strings.TrimSpace(r.Header.Get("Sec-Websocket-Protocol")) - if h == "" { - return nil - } - protocols := strings.Split(h, ",") - for i := range protocols { - protocols[i] = strings.TrimSpace(protocols[i]) - } - return protocols -} - -// IsWebSocketUpgrade returns true if the client requested upgrade to the -// WebSocket protocol. -func IsWebSocketUpgrade(r *http.Request) bool { - return tokenListContainsValue(r.Header, "Connection", "upgrade") && - tokenListContainsValue(r.Header, "Upgrade", "websocket") -} - -// bufioReaderSize size returns the size of a bufio.Reader. -func bufioReaderSize(originalReader io.Reader, br *bufio.Reader) int { - // This code assumes that peek on a reset reader returns - // bufio.Reader.buf[:0]. - // TODO: Use bufio.Reader.Size() after Go 1.10 - br.Reset(originalReader) - if p, err := br.Peek(0); err == nil { - return cap(p) - } - return 0 -} - -// writeHook is an io.Writer that records the last slice passed to it vio -// io.Writer.Write. -type writeHook struct { - p []byte -} - -func (wh *writeHook) Write(p []byte) (int, error) { - wh.p = p - return len(p), nil -} - -// bufioWriterBuffer grabs the buffer from a bufio.Writer. -func bufioWriterBuffer(originalWriter io.Writer, bw *bufio.Writer) []byte { - // This code assumes that bufio.Writer.buf[:1] is passed to the - // bufio.Writer's underlying writer. - var wh writeHook - bw.Reset(&wh) - bw.WriteByte(0) - bw.Flush() - - bw.Reset(originalWriter) - - return wh.p[:cap(wh.p)] -} diff --git a/vendor/github.com/gorilla/websocket/tls_handshake.go b/vendor/github.com/gorilla/websocket/tls_handshake.go deleted file mode 100644 index a62b68cc..00000000 --- a/vendor/github.com/gorilla/websocket/tls_handshake.go +++ /dev/null @@ -1,21 +0,0 @@ -//go:build go1.17 -// +build go1.17 - -package websocket - -import ( - "context" - "crypto/tls" -) - -func doHandshake(ctx context.Context, tlsConn *tls.Conn, cfg *tls.Config) error { - if err := tlsConn.HandshakeContext(ctx); err != nil { - return err - } - if !cfg.InsecureSkipVerify { - if err := tlsConn.VerifyHostname(cfg.ServerName); err != nil { - return err - } - } - return nil -} diff --git a/vendor/github.com/gorilla/websocket/tls_handshake_116.go b/vendor/github.com/gorilla/websocket/tls_handshake_116.go deleted file mode 100644 index e1b2b44f..00000000 --- a/vendor/github.com/gorilla/websocket/tls_handshake_116.go +++ /dev/null @@ -1,21 +0,0 @@ -//go:build !go1.17 -// +build !go1.17 - -package websocket - -import ( - "context" - "crypto/tls" -) - -func doHandshake(ctx context.Context, tlsConn *tls.Conn, cfg *tls.Config) error { - if err := tlsConn.Handshake(); err != nil { - return err - } - if !cfg.InsecureSkipVerify { - if err := tlsConn.VerifyHostname(cfg.ServerName); err != nil { - return err - } - } - return nil -} diff --git a/vendor/github.com/gorilla/websocket/util.go b/vendor/github.com/gorilla/websocket/util.go deleted file mode 100644 index 7bf2f66c..00000000 --- a/vendor/github.com/gorilla/websocket/util.go +++ /dev/null @@ -1,283 +0,0 @@ -// Copyright 2013 The Gorilla WebSocket Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package websocket - -import ( - "crypto/rand" - "crypto/sha1" - "encoding/base64" - "io" - "net/http" - "strings" - "unicode/utf8" -) - -var keyGUID = []byte("258EAFA5-E914-47DA-95CA-C5AB0DC85B11") - -func computeAcceptKey(challengeKey string) string { - h := sha1.New() - h.Write([]byte(challengeKey)) - h.Write(keyGUID) - return base64.StdEncoding.EncodeToString(h.Sum(nil)) -} - -func generateChallengeKey() (string, error) { - p := make([]byte, 16) - if _, err := io.ReadFull(rand.Reader, p); err != nil { - return "", err - } - return base64.StdEncoding.EncodeToString(p), nil -} - -// Token octets per RFC 2616. -var isTokenOctet = [256]bool{ - '!': true, - '#': true, - '$': true, - '%': true, - '&': true, - '\'': true, - '*': true, - '+': true, - '-': true, - '.': true, - '0': true, - '1': true, - '2': true, - '3': true, - '4': true, - '5': true, - '6': true, - '7': true, - '8': true, - '9': true, - 'A': true, - 'B': true, - 'C': true, - 'D': true, - 'E': true, - 'F': true, - 'G': true, - 'H': true, - 'I': true, - 'J': true, - 'K': true, - 'L': true, - 'M': true, - 'N': true, - 'O': true, - 'P': true, - 'Q': true, - 'R': true, - 'S': true, - 'T': true, - 'U': true, - 'W': true, - 'V': true, - 'X': true, - 'Y': true, - 'Z': true, - '^': true, - '_': true, - '`': true, - 'a': true, - 'b': true, - 'c': true, - 'd': true, - 'e': true, - 'f': true, - 'g': true, - 'h': true, - 'i': true, - 'j': true, - 'k': true, - 'l': true, - 'm': true, - 'n': true, - 'o': true, - 'p': true, - 'q': true, - 'r': true, - 's': true, - 't': true, - 'u': true, - 'v': true, - 'w': true, - 'x': true, - 'y': true, - 'z': true, - '|': true, - '~': true, -} - -// skipSpace returns a slice of the string s with all leading RFC 2616 linear -// whitespace removed. -func skipSpace(s string) (rest string) { - i := 0 - for ; i < len(s); i++ { - if b := s[i]; b != ' ' && b != '\t' { - break - } - } - return s[i:] -} - -// nextToken returns the leading RFC 2616 token of s and the string following -// the token. -func nextToken(s string) (token, rest string) { - i := 0 - for ; i < len(s); i++ { - if !isTokenOctet[s[i]] { - break - } - } - return s[:i], s[i:] -} - -// nextTokenOrQuoted returns the leading token or quoted string per RFC 2616 -// and the string following the token or quoted string. -func nextTokenOrQuoted(s string) (value string, rest string) { - if !strings.HasPrefix(s, "\"") { - return nextToken(s) - } - s = s[1:] - for i := 0; i < len(s); i++ { - switch s[i] { - case '"': - return s[:i], s[i+1:] - case '\\': - p := make([]byte, len(s)-1) - j := copy(p, s[:i]) - escape := true - for i = i + 1; i < len(s); i++ { - b := s[i] - switch { - case escape: - escape = false - p[j] = b - j++ - case b == '\\': - escape = true - case b == '"': - return string(p[:j]), s[i+1:] - default: - p[j] = b - j++ - } - } - return "", "" - } - } - return "", "" -} - -// equalASCIIFold returns true if s is equal to t with ASCII case folding as -// defined in RFC 4790. -func equalASCIIFold(s, t string) bool { - for s != "" && t != "" { - sr, size := utf8.DecodeRuneInString(s) - s = s[size:] - tr, size := utf8.DecodeRuneInString(t) - t = t[size:] - if sr == tr { - continue - } - if 'A' <= sr && sr <= 'Z' { - sr = sr + 'a' - 'A' - } - if 'A' <= tr && tr <= 'Z' { - tr = tr + 'a' - 'A' - } - if sr != tr { - return false - } - } - return s == t -} - -// tokenListContainsValue returns true if the 1#token header with the given -// name contains a token equal to value with ASCII case folding. -func tokenListContainsValue(header http.Header, name string, value string) bool { -headers: - for _, s := range header[name] { - for { - var t string - t, s = nextToken(skipSpace(s)) - if t == "" { - continue headers - } - s = skipSpace(s) - if s != "" && s[0] != ',' { - continue headers - } - if equalASCIIFold(t, value) { - return true - } - if s == "" { - continue headers - } - s = s[1:] - } - } - return false -} - -// parseExtensions parses WebSocket extensions from a header. -func parseExtensions(header http.Header) []map[string]string { - // From RFC 6455: - // - // Sec-WebSocket-Extensions = extension-list - // extension-list = 1#extension - // extension = extension-token *( ";" extension-param ) - // extension-token = registered-token - // registered-token = token - // extension-param = token [ "=" (token | quoted-string) ] - // ;When using the quoted-string syntax variant, the value - // ;after quoted-string unescaping MUST conform to the - // ;'token' ABNF. - - var result []map[string]string -headers: - for _, s := range header["Sec-Websocket-Extensions"] { - for { - var t string - t, s = nextToken(skipSpace(s)) - if t == "" { - continue headers - } - ext := map[string]string{"": t} - for { - s = skipSpace(s) - if !strings.HasPrefix(s, ";") { - break - } - var k string - k, s = nextToken(skipSpace(s[1:])) - if k == "" { - continue headers - } - s = skipSpace(s) - var v string - if strings.HasPrefix(s, "=") { - v, s = nextTokenOrQuoted(skipSpace(s[1:])) - s = skipSpace(s) - } - if s != "" && s[0] != ',' && s[0] != ';' { - continue headers - } - ext[k] = v - } - if s != "" && s[0] != ',' { - continue headers - } - result = append(result, ext) - if s == "" { - continue headers - } - s = s[1:] - } - } - return result -} diff --git a/vendor/github.com/gorilla/websocket/x_net_proxy.go b/vendor/github.com/gorilla/websocket/x_net_proxy.go deleted file mode 100644 index 2e668f6b..00000000 --- a/vendor/github.com/gorilla/websocket/x_net_proxy.go +++ /dev/null @@ -1,473 +0,0 @@ -// Code generated by golang.org/x/tools/cmd/bundle. DO NOT EDIT. -//go:generate bundle -o x_net_proxy.go golang.org/x/net/proxy - -// Package proxy provides support for a variety of protocols to proxy network -// data. -// - -package websocket - -import ( - "errors" - "io" - "net" - "net/url" - "os" - "strconv" - "strings" - "sync" -) - -type proxy_direct struct{} - -// Direct is a direct proxy: one that makes network connections directly. -var proxy_Direct = proxy_direct{} - -func (proxy_direct) Dial(network, addr string) (net.Conn, error) { - return net.Dial(network, addr) -} - -// A PerHost directs connections to a default Dialer unless the host name -// requested matches one of a number of exceptions. -type proxy_PerHost struct { - def, bypass proxy_Dialer - - bypassNetworks []*net.IPNet - bypassIPs []net.IP - bypassZones []string - bypassHosts []string -} - -// NewPerHost returns a PerHost Dialer that directs connections to either -// defaultDialer or bypass, depending on whether the connection matches one of -// the configured rules. -func proxy_NewPerHost(defaultDialer, bypass proxy_Dialer) *proxy_PerHost { - return &proxy_PerHost{ - def: defaultDialer, - bypass: bypass, - } -} - -// Dial connects to the address addr on the given network through either -// defaultDialer or bypass. -func (p *proxy_PerHost) Dial(network, addr string) (c net.Conn, err error) { - host, _, err := net.SplitHostPort(addr) - if err != nil { - return nil, err - } - - return p.dialerForRequest(host).Dial(network, addr) -} - -func (p *proxy_PerHost) dialerForRequest(host string) proxy_Dialer { - if ip := net.ParseIP(host); ip != nil { - for _, net := range p.bypassNetworks { - if net.Contains(ip) { - return p.bypass - } - } - for _, bypassIP := range p.bypassIPs { - if bypassIP.Equal(ip) { - return p.bypass - } - } - return p.def - } - - for _, zone := range p.bypassZones { - if strings.HasSuffix(host, zone) { - return p.bypass - } - if host == zone[1:] { - // For a zone ".example.com", we match "example.com" - // too. - return p.bypass - } - } - for _, bypassHost := range p.bypassHosts { - if bypassHost == host { - return p.bypass - } - } - return p.def -} - -// AddFromString parses a string that contains comma-separated values -// specifying hosts that should use the bypass proxy. Each value is either an -// IP address, a CIDR range, a zone (*.example.com) or a host name -// (localhost). A best effort is made to parse the string and errors are -// ignored. -func (p *proxy_PerHost) AddFromString(s string) { - hosts := strings.Split(s, ",") - for _, host := range hosts { - host = strings.TrimSpace(host) - if len(host) == 0 { - continue - } - if strings.Contains(host, "/") { - // We assume that it's a CIDR address like 127.0.0.0/8 - if _, net, err := net.ParseCIDR(host); err == nil { - p.AddNetwork(net) - } - continue - } - if ip := net.ParseIP(host); ip != nil { - p.AddIP(ip) - continue - } - if strings.HasPrefix(host, "*.") { - p.AddZone(host[1:]) - continue - } - p.AddHost(host) - } -} - -// AddIP specifies an IP address that will use the bypass proxy. Note that -// this will only take effect if a literal IP address is dialed. A connection -// to a named host will never match an IP. -func (p *proxy_PerHost) AddIP(ip net.IP) { - p.bypassIPs = append(p.bypassIPs, ip) -} - -// AddNetwork specifies an IP range that will use the bypass proxy. Note that -// this will only take effect if a literal IP address is dialed. A connection -// to a named host will never match. -func (p *proxy_PerHost) AddNetwork(net *net.IPNet) { - p.bypassNetworks = append(p.bypassNetworks, net) -} - -// AddZone specifies a DNS suffix that will use the bypass proxy. A zone of -// "example.com" matches "example.com" and all of its subdomains. -func (p *proxy_PerHost) AddZone(zone string) { - if strings.HasSuffix(zone, ".") { - zone = zone[:len(zone)-1] - } - if !strings.HasPrefix(zone, ".") { - zone = "." + zone - } - p.bypassZones = append(p.bypassZones, zone) -} - -// AddHost specifies a host name that will use the bypass proxy. -func (p *proxy_PerHost) AddHost(host string) { - if strings.HasSuffix(host, ".") { - host = host[:len(host)-1] - } - p.bypassHosts = append(p.bypassHosts, host) -} - -// A Dialer is a means to establish a connection. -type proxy_Dialer interface { - // Dial connects to the given address via the proxy. - Dial(network, addr string) (c net.Conn, err error) -} - -// Auth contains authentication parameters that specific Dialers may require. -type proxy_Auth struct { - User, Password string -} - -// FromEnvironment returns the dialer specified by the proxy related variables in -// the environment. -func proxy_FromEnvironment() proxy_Dialer { - allProxy := proxy_allProxyEnv.Get() - if len(allProxy) == 0 { - return proxy_Direct - } - - proxyURL, err := url.Parse(allProxy) - if err != nil { - return proxy_Direct - } - proxy, err := proxy_FromURL(proxyURL, proxy_Direct) - if err != nil { - return proxy_Direct - } - - noProxy := proxy_noProxyEnv.Get() - if len(noProxy) == 0 { - return proxy - } - - perHost := proxy_NewPerHost(proxy, proxy_Direct) - perHost.AddFromString(noProxy) - return perHost -} - -// proxySchemes is a map from URL schemes to a function that creates a Dialer -// from a URL with such a scheme. -var proxy_proxySchemes map[string]func(*url.URL, proxy_Dialer) (proxy_Dialer, error) - -// RegisterDialerType takes a URL scheme and a function to generate Dialers from -// a URL with that scheme and a forwarding Dialer. Registered schemes are used -// by FromURL. -func proxy_RegisterDialerType(scheme string, f func(*url.URL, proxy_Dialer) (proxy_Dialer, error)) { - if proxy_proxySchemes == nil { - proxy_proxySchemes = make(map[string]func(*url.URL, proxy_Dialer) (proxy_Dialer, error)) - } - proxy_proxySchemes[scheme] = f -} - -// FromURL returns a Dialer given a URL specification and an underlying -// Dialer for it to make network requests. -func proxy_FromURL(u *url.URL, forward proxy_Dialer) (proxy_Dialer, error) { - var auth *proxy_Auth - if u.User != nil { - auth = new(proxy_Auth) - auth.User = u.User.Username() - if p, ok := u.User.Password(); ok { - auth.Password = p - } - } - - switch u.Scheme { - case "socks5": - return proxy_SOCKS5("tcp", u.Host, auth, forward) - } - - // If the scheme doesn't match any of the built-in schemes, see if it - // was registered by another package. - if proxy_proxySchemes != nil { - if f, ok := proxy_proxySchemes[u.Scheme]; ok { - return f(u, forward) - } - } - - return nil, errors.New("proxy: unknown scheme: " + u.Scheme) -} - -var ( - proxy_allProxyEnv = &proxy_envOnce{ - names: []string{"ALL_PROXY", "all_proxy"}, - } - proxy_noProxyEnv = &proxy_envOnce{ - names: []string{"NO_PROXY", "no_proxy"}, - } -) - -// envOnce looks up an environment variable (optionally by multiple -// names) once. It mitigates expensive lookups on some platforms -// (e.g. Windows). -// (Borrowed from net/http/transport.go) -type proxy_envOnce struct { - names []string - once sync.Once - val string -} - -func (e *proxy_envOnce) Get() string { - e.once.Do(e.init) - return e.val -} - -func (e *proxy_envOnce) init() { - for _, n := range e.names { - e.val = os.Getenv(n) - if e.val != "" { - return - } - } -} - -// SOCKS5 returns a Dialer that makes SOCKSv5 connections to the given address -// with an optional username and password. See RFC 1928 and RFC 1929. -func proxy_SOCKS5(network, addr string, auth *proxy_Auth, forward proxy_Dialer) (proxy_Dialer, error) { - s := &proxy_socks5{ - network: network, - addr: addr, - forward: forward, - } - if auth != nil { - s.user = auth.User - s.password = auth.Password - } - - return s, nil -} - -type proxy_socks5 struct { - user, password string - network, addr string - forward proxy_Dialer -} - -const proxy_socks5Version = 5 - -const ( - proxy_socks5AuthNone = 0 - proxy_socks5AuthPassword = 2 -) - -const proxy_socks5Connect = 1 - -const ( - proxy_socks5IP4 = 1 - proxy_socks5Domain = 3 - proxy_socks5IP6 = 4 -) - -var proxy_socks5Errors = []string{ - "", - "general failure", - "connection forbidden", - "network unreachable", - "host unreachable", - "connection refused", - "TTL expired", - "command not supported", - "address type not supported", -} - -// Dial connects to the address addr on the given network via the SOCKS5 proxy. -func (s *proxy_socks5) Dial(network, addr string) (net.Conn, error) { - switch network { - case "tcp", "tcp6", "tcp4": - default: - return nil, errors.New("proxy: no support for SOCKS5 proxy connections of type " + network) - } - - conn, err := s.forward.Dial(s.network, s.addr) - if err != nil { - return nil, err - } - if err := s.connect(conn, addr); err != nil { - conn.Close() - return nil, err - } - return conn, nil -} - -// connect takes an existing connection to a socks5 proxy server, -// and commands the server to extend that connection to target, -// which must be a canonical address with a host and port. -func (s *proxy_socks5) connect(conn net.Conn, target string) error { - host, portStr, err := net.SplitHostPort(target) - if err != nil { - return err - } - - port, err := strconv.Atoi(portStr) - if err != nil { - return errors.New("proxy: failed to parse port number: " + portStr) - } - if port < 1 || port > 0xffff { - return errors.New("proxy: port number out of range: " + portStr) - } - - // the size here is just an estimate - buf := make([]byte, 0, 6+len(host)) - - buf = append(buf, proxy_socks5Version) - if len(s.user) > 0 && len(s.user) < 256 && len(s.password) < 256 { - buf = append(buf, 2 /* num auth methods */, proxy_socks5AuthNone, proxy_socks5AuthPassword) - } else { - buf = append(buf, 1 /* num auth methods */, proxy_socks5AuthNone) - } - - if _, err := conn.Write(buf); err != nil { - return errors.New("proxy: failed to write greeting to SOCKS5 proxy at " + s.addr + ": " + err.Error()) - } - - if _, err := io.ReadFull(conn, buf[:2]); err != nil { - return errors.New("proxy: failed to read greeting from SOCKS5 proxy at " + s.addr + ": " + err.Error()) - } - if buf[0] != 5 { - return errors.New("proxy: SOCKS5 proxy at " + s.addr + " has unexpected version " + strconv.Itoa(int(buf[0]))) - } - if buf[1] == 0xff { - return errors.New("proxy: SOCKS5 proxy at " + s.addr + " requires authentication") - } - - // See RFC 1929 - if buf[1] == proxy_socks5AuthPassword { - buf = buf[:0] - buf = append(buf, 1 /* password protocol version */) - buf = append(buf, uint8(len(s.user))) - buf = append(buf, s.user...) - buf = append(buf, uint8(len(s.password))) - buf = append(buf, s.password...) - - if _, err := conn.Write(buf); err != nil { - return errors.New("proxy: failed to write authentication request to SOCKS5 proxy at " + s.addr + ": " + err.Error()) - } - - if _, err := io.ReadFull(conn, buf[:2]); err != nil { - return errors.New("proxy: failed to read authentication reply from SOCKS5 proxy at " + s.addr + ": " + err.Error()) - } - - if buf[1] != 0 { - return errors.New("proxy: SOCKS5 proxy at " + s.addr + " rejected username/password") - } - } - - buf = buf[:0] - buf = append(buf, proxy_socks5Version, proxy_socks5Connect, 0 /* reserved */) - - if ip := net.ParseIP(host); ip != nil { - if ip4 := ip.To4(); ip4 != nil { - buf = append(buf, proxy_socks5IP4) - ip = ip4 - } else { - buf = append(buf, proxy_socks5IP6) - } - buf = append(buf, ip...) - } else { - if len(host) > 255 { - return errors.New("proxy: destination host name too long: " + host) - } - buf = append(buf, proxy_socks5Domain) - buf = append(buf, byte(len(host))) - buf = append(buf, host...) - } - buf = append(buf, byte(port>>8), byte(port)) - - if _, err := conn.Write(buf); err != nil { - return errors.New("proxy: failed to write connect request to SOCKS5 proxy at " + s.addr + ": " + err.Error()) - } - - if _, err := io.ReadFull(conn, buf[:4]); err != nil { - return errors.New("proxy: failed to read connect reply from SOCKS5 proxy at " + s.addr + ": " + err.Error()) - } - - failure := "unknown error" - if int(buf[1]) < len(proxy_socks5Errors) { - failure = proxy_socks5Errors[buf[1]] - } - - if len(failure) > 0 { - return errors.New("proxy: SOCKS5 proxy at " + s.addr + " failed to connect: " + failure) - } - - bytesToDiscard := 0 - switch buf[3] { - case proxy_socks5IP4: - bytesToDiscard = net.IPv4len - case proxy_socks5IP6: - bytesToDiscard = net.IPv6len - case proxy_socks5Domain: - _, err := io.ReadFull(conn, buf[:1]) - if err != nil { - return errors.New("proxy: failed to read domain length from SOCKS5 proxy at " + s.addr + ": " + err.Error()) - } - bytesToDiscard = int(buf[0]) - default: - return errors.New("proxy: got unknown address type " + strconv.Itoa(int(buf[3])) + " from SOCKS5 proxy at " + s.addr) - } - - if cap(buf) < bytesToDiscard { - buf = make([]byte, bytesToDiscard) - } else { - buf = buf[:bytesToDiscard] - } - if _, err := io.ReadFull(conn, buf); err != nil { - return errors.New("proxy: failed to read address from SOCKS5 proxy at " + s.addr + ": " + err.Error()) - } - - // Also need to discard the port number - if _, err := io.ReadFull(conn, buf[:2]); err != nil { - return errors.New("proxy: failed to read port from SOCKS5 proxy at " + s.addr + ": " + err.Error()) - } - - return nil -} diff --git a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/retry/doc.go b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/retry/doc.go index afd924a1..f8ba7198 100644 --- a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/retry/doc.go +++ b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/retry/doc.go @@ -18,7 +18,7 @@ Other default options are: retry on `ResourceExhausted` and `Unavailable` gRPC c linear backoff with 10% jitter. For chained interceptors, the retry interceptor will call every interceptor that follows it -whenever when a retry happens. +whenever a retry happens. Please see examples for more advanced use. */ diff --git a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/retry/retry.go b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/retry/retry.go index 62d83120..003bbd90 100644 --- a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/retry/retry.go +++ b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/retry/retry.go @@ -5,8 +5,8 @@ package grpc_retry import ( "context" - "fmt" "io" + "strconv" "sync" "time" @@ -136,7 +136,6 @@ func StreamClientInterceptor(optFuncs ...CallOption) grpc.StreamClientIntercepto type serverStreamingRetryingStream struct { grpc.ClientStream bufferedSends []interface{} // single message that the client can sen - receivedGood bool // indicates whether any prior receives were successful wasClosedSend bool // indicates that CloseSend was closed parentCtx context.Context callOpts *options @@ -209,17 +208,8 @@ func (s *serverStreamingRetryingStream) RecvMsg(m interface{}) error { } func (s *serverStreamingRetryingStream) receiveMsgAndIndicateRetry(m interface{}) (bool, error) { - s.mu.RLock() - wasGood := s.receivedGood - s.mu.RUnlock() err := s.getStream().RecvMsg(m) if err == nil || err == io.EOF { - s.mu.Lock() - s.receivedGood = true - s.mu.Unlock() - return false, err - } else if wasGood { - // previous RecvMsg in the stream succeeded, no retry logic should interfere return false, err } if isContextError(err) { @@ -303,7 +293,7 @@ func perCallContext(parentCtx context.Context, callOpts *options, attempt uint) ctx, _ = context.WithTimeout(ctx, callOpts.perCallTimeout) } if attempt > 0 && callOpts.includeHeader { - mdClone := metautils.ExtractOutgoing(ctx).Clone().Set(AttemptMetadataKey, fmt.Sprintf("%d", attempt)) + mdClone := metautils.ExtractOutgoing(ctx).Clone().Set(AttemptMetadataKey, strconv.FormatUint(uint64(attempt), 10)) ctx = mdClone.ToOutgoing(ctx) } return ctx diff --git a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/util/metautils/nicemd.go b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/util/metautils/nicemd.go index 1c60585d..15225d71 100644 --- a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/util/metautils/nicemd.go +++ b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/util/metautils/nicemd.go @@ -10,7 +10,7 @@ import ( "google.golang.org/grpc/metadata" ) -// NiceMD is a convenience wrapper definiting extra functions on the metadata. +// NiceMD is a convenience wrapper defining extra functions on the metadata. type NiceMD metadata.MD // ExtractIncoming extracts an inbound metadata from the server-side context. @@ -39,7 +39,7 @@ func ExtractOutgoing(ctx context.Context) NiceMD { // Clone performs a *deep* copy of the metadata.MD. // -// You can specify the lower-case copiedKeys to only copy certain whitelisted keys. If no keys are explicitly whitelisted +// You can specify the lower-case copiedKeys to only copy certain allow-listed keys. If no keys are explicitly allow-listed // all keys get copied. func (m NiceMD) Clone(copiedKeys ...string) NiceMD { newMd := NiceMD(metadata.Pairs()) @@ -61,7 +61,7 @@ func (m NiceMD) Clone(copiedKeys ...string) NiceMD { newMd[k] = make([]string, len(vv)) copy(newMd[k], vv) } - return NiceMD(newMd) + return newMd } // ToOutgoing sets the given NiceMD as a client-side context for dispatching. diff --git a/vendor/github.com/hashicorp/consul-template/config/auth.go b/vendor/github.com/hashicorp/consul-template/config/auth.go deleted file mode 100644 index c60a85b1..00000000 --- a/vendor/github.com/hashicorp/consul-template/config/auth.go +++ /dev/null @@ -1,141 +0,0 @@ -package config - -import ( - "errors" - "fmt" - "strings" -) - -// ErrAuthStringEmpty is the error returned with authentication is provided, -// but empty. -var ErrAuthStringEmpty = errors.New("auth: cannot be empty") - -// AuthConfig is the HTTP basic authentication data. -// Skip passwords in json output that is used for logging. -type AuthConfig struct { - Enabled *bool `mapstructure:"enabled"` - Username *string `mapstructure:"username"` - Password *string `mapstructure:"password" json:"-"` -} - -// DefaultAuthConfig is the default configuration. -func DefaultAuthConfig() *AuthConfig { - return &AuthConfig{} -} - -// ParseAuthConfig parses the auth into username:password. -func ParseAuthConfig(s string) (*AuthConfig, error) { - if s == "" { - return nil, ErrAuthStringEmpty - } - - var a AuthConfig - - if strings.Contains(s, ":") { - split := strings.SplitN(s, ":", 2) - a.Username = String(split[0]) - a.Password = String(split[1]) - } else { - a.Username = String(s) - } - - return &a, nil -} - -// Copy returns a deep copy of this configuration. -func (c *AuthConfig) Copy() *AuthConfig { - if c == nil { - return nil - } - - var o AuthConfig - o.Enabled = c.Enabled - o.Username = c.Username - o.Password = c.Password - return &o -} - -// Merge combines all values in this configuration with the values in the other -// configuration, with values in the other configuration taking precedence. -// Maps and slices are merged, most other values are overwritten. Complex -// structs define their own merge functionality. -func (c *AuthConfig) Merge(o *AuthConfig) *AuthConfig { - if c == nil { - if o == nil { - return nil - } - return o.Copy() - } - - if o == nil { - return c.Copy() - } - - r := c.Copy() - - if o.Enabled != nil { - r.Enabled = o.Enabled - } - - if o.Username != nil { - r.Username = o.Username - } - - if o.Password != nil { - r.Password = o.Password - } - - return r -} - -// Finalize ensures there no nil pointers. -func (c *AuthConfig) Finalize() { - if c.Enabled == nil { - c.Enabled = Bool(false || - StringPresent(c.Username) || - StringPresent(c.Password)) - } - if c.Username == nil { - c.Username = String("") - } - - if c.Password == nil { - c.Password = String("") - } - - if c.Enabled == nil { - c.Enabled = Bool(*c.Username != "" || *c.Password != "") - } -} - -// GoString defines the printable version of this struct. -func (c *AuthConfig) GoString() string { - if c == nil { - return "(*AuthConfig)(nil)" - } - - return fmt.Sprintf("&AuthConfig{"+ - "Enabled:%s, "+ - "Username:%s, "+ - "Password:%s"+ - "}", - BoolGoString(c.Enabled), - StringGoString(c.Username), - StringGoString(c.Password), - ) -} - -// String is the string representation of this authentication. If authentication -// is not enabled, this returns the empty string. The username and password will -// be separated by a colon. -func (c *AuthConfig) String() string { - if !BoolVal(c.Enabled) { - return "" - } - - if c.Password != nil { - return fmt.Sprintf("%s:%s", StringVal(c.Username), StringVal(c.Password)) - } - - return StringVal(c.Username) -} diff --git a/vendor/github.com/hashicorp/consul-template/config/config.go b/vendor/github.com/hashicorp/consul-template/config/config.go deleted file mode 100644 index 8f8bb24a..00000000 --- a/vendor/github.com/hashicorp/consul-template/config/config.go +++ /dev/null @@ -1,714 +0,0 @@ -package config - -import ( - "fmt" - "io/ioutil" - "log" - "os" - "path/filepath" - "reflect" - "strconv" - "strings" - "syscall" - "time" - - "github.com/hashicorp/consul-template/signals" - "github.com/hashicorp/hcl" - homedir "github.com/mitchellh/go-homedir" - "github.com/mitchellh/mapstructure" - - "github.com/pkg/errors" -) - -const ( - // DefaultLogLevel is the default logging level. - DefaultLogLevel = "WARN" - - // DefaultMaxStale is the default staleness permitted. This enables stale - // queries by default for performance reasons. - DefaultMaxStale = 2 * time.Second - - // DefaultReloadSignal is the default signal for reload. - DefaultReloadSignal = syscall.SIGHUP - - // DefaultKillSignal is the default signal for termination. - DefaultKillSignal = syscall.SIGINT - - // DefaultBlockQueryWaitTime is amount of time in seconds to do a blocking query for - DefaultBlockQueryWaitTime = 60 * time.Second -) - -// homePath is the location to the user's home directory. -var homePath, _ = homedir.Dir() - -// Config is used to configure Consul Template -type Config struct { - // Consul is the configuration for connecting to a Consul cluster. - Consul *ConsulConfig `mapstructure:"consul"` - - // Dedup is used to configure the dedup settings - Dedup *DedupConfig `mapstructure:"deduplicate"` - - // DefaultDelims is used to configure the default delimiters for templates - DefaultDelims *DefaultDelims `mapstructure:"default_delimiters"` - - // Exec is the configuration for exec/supervise mode. - Exec *ExecConfig `mapstructure:"exec"` - - // KillSignal is the signal to listen for a graceful terminate event. - KillSignal *os.Signal `mapstructure:"kill_signal"` - - // LogLevel is the level with which to log for this config. - LogLevel *string `mapstructure:"log_level"` - - // FileLog is the configuration for file logging. - FileLog *LogFileConfig `mapstructure:"log_file"` - - // MaxStale is the maximum amount of time for staleness from Consul as given - // by LastContact. If supplied, Consul Template will query all servers instead - // of just the leader. - MaxStale *time.Duration `mapstructure:"max_stale"` - - // PidFile is the path on disk where a PID file should be written containing - // this processes PID. - PidFile *string `mapstructure:"pid_file"` - - // ReloadSignal is the signal to listen for a reload event. - ReloadSignal *os.Signal `mapstructure:"reload_signal"` - - // Syslog is the configuration for syslog. - Syslog *SyslogConfig `mapstructure:"syslog"` - - // Templates is the list of templates. - Templates *TemplateConfigs `mapstructure:"template"` - - // TemplateErrFatal determines whether template errors should cause the - // process to exit, or just log and continue. - TemplateErrFatal *bool `mapstructure:"template_error_fatal"` - - // Vault is the configuration for connecting to a vault server. - Vault *VaultConfig `mapstructure:"vault"` - - // Nomad is the configuration for connecting to a Nomad agent. - Nomad *NomadConfig `mapstructure:"nomad"` - - // Wait is the quiescence timers. - Wait *WaitConfig `mapstructure:"wait"` - - // Additional command line options - // Run once, executing each template exactly once, and exit - Once bool - - // ParseOnly prevents any rendering and only loads the templates for - // checking well formedness. - ParseOnly bool - - // BlockQueryWaitTime is amount of time in seconds to do a blocking query for - BlockQueryWaitTime *time.Duration `mapstructure:"block_query_wait"` -} - -// Copy returns a deep copy of the current configuration. This is useful because -// the nested data structures may be shared. -func (c *Config) Copy() *Config { - if c == nil { - return nil - } - var o Config - - o.Consul = c.Consul - - if c.Consul != nil { - o.Consul = c.Consul.Copy() - } - - if c.Dedup != nil { - o.Dedup = c.Dedup.Copy() - } - - if c.DefaultDelims != nil { - o.DefaultDelims = c.DefaultDelims.Copy() - } - - if c.Exec != nil { - o.Exec = c.Exec.Copy() - } - - o.KillSignal = c.KillSignal - - o.LogLevel = c.LogLevel - - o.MaxStale = c.MaxStale - - o.PidFile = c.PidFile - - o.ReloadSignal = c.ReloadSignal - - if c.FileLog != nil { - o.FileLog = c.FileLog.Copy() - } - - if c.Syslog != nil { - o.Syslog = c.Syslog.Copy() - } - - if c.Templates != nil { - o.Templates = c.Templates.Copy() - } - - if c.TemplateErrFatal != nil { - o.TemplateErrFatal = c.TemplateErrFatal - } - - if c.Vault != nil { - o.Vault = c.Vault.Copy() - } - - if c.Wait != nil { - o.Wait = c.Wait.Copy() - } - - o.Once = c.Once - o.ParseOnly = c.ParseOnly - - o.BlockQueryWaitTime = c.BlockQueryWaitTime - - if c.Nomad != nil { - o.Nomad = c.Nomad.Copy() - } - - return &o -} - -// Merge merges the values in config into this config object. Values in the -// config object overwrite the values in c. -func (c *Config) Merge(o *Config) *Config { - if c == nil { - if o == nil { - return nil - } - return o.Copy() - } - - if o == nil { - return c.Copy() - } - - r := c.Copy() - - if o.Consul != nil { - r.Consul = r.Consul.Merge(o.Consul) - } - - if o.Dedup != nil { - r.Dedup = r.Dedup.Merge(o.Dedup) - } - - if o.DefaultDelims != nil { - r.DefaultDelims = r.DefaultDelims.Merge(o.DefaultDelims) - } - - if o.Exec != nil { - r.Exec = r.Exec.Merge(o.Exec) - } - - if o.KillSignal != nil { - r.KillSignal = o.KillSignal - } - - if o.LogLevel != nil { - r.LogLevel = o.LogLevel - } - - if o.MaxStale != nil { - r.MaxStale = o.MaxStale - } - - if o.PidFile != nil { - r.PidFile = o.PidFile - } - - if o.ReloadSignal != nil { - r.ReloadSignal = o.ReloadSignal - } - - if o.FileLog != nil { - r.FileLog = r.FileLog.Merge(o.FileLog) - } - - if o.Syslog != nil { - r.Syslog = r.Syslog.Merge(o.Syslog) - } - - if o.Templates != nil { - r.Templates = r.Templates.Merge(o.Templates) - } - - if o.TemplateErrFatal != nil { - r.TemplateErrFatal = o.TemplateErrFatal - } - - if o.Vault != nil { - r.Vault = r.Vault.Merge(o.Vault) - } - - if o.Wait != nil { - r.Wait = r.Wait.Merge(o.Wait) - } - - if o.BlockQueryWaitTime != nil { - r.BlockQueryWaitTime = o.BlockQueryWaitTime - } - - r.Once = o.Once - r.ParseOnly = o.ParseOnly - - if o.Nomad != nil { - r.Nomad = r.Nomad.Merge(o.Nomad) - } - - return r -} - -// Parse parses the given string contents as a config -func Parse(s string) (*Config, error) { - var shadow interface{} - if err := hcl.Decode(&shadow, s); err != nil { - return nil, errors.Wrap(err, "error decoding config") - } - - // Convert to a map and flatten the keys we want to flatten - parsed, ok := shadow.(map[string]interface{}) - if !ok { - return nil, errors.New("error converting config") - } - - flattenKeys(parsed, []string{ - "auth", - "consul", - "consul.auth", - "consul.retry", - "consul.ssl", - "consul.transport", - "deduplicate", - "default_delimiters", - "env", - "exec", - "exec.env", - "log_file", - "nomad", - "nomad.ssl", - "nomad.transport", - "ssl", - "syslog", - "vault", - "vault.retry", - "vault.ssl", - "vault.transport", - "wait", - }) - - // FlattenFlatten keys belonging to the templates. We cannot do this above - // because it is an array of templates. - if templates, ok := parsed["template"].([]map[string]interface{}); ok { - for _, template := range templates { - flattenKeys(template, []string{ - "env", - "exec", - "exec.env", - "wait", - }) - } - } - - // Create a new, empty config - var c Config - - // Use mapstructure to populate the basic config fields - var md mapstructure.Metadata - decoder, err := mapstructure.NewDecoder(&mapstructure.DecoderConfig{ - DecodeHook: mapstructure.ComposeDecodeHookFunc( - ConsulStringToStructFunc(), - StringToFileModeFunc(), - signals.StringToSignalFunc(), - StringToWaitDurationHookFunc(), - mapstructure.StringToSliceHookFunc(","), - mapstructure.StringToTimeDurationHookFunc(), - ), - ErrorUnused: true, - Metadata: &md, - Result: &c, - }) - if err != nil { - return nil, errors.Wrap(err, "mapstructure decoder creation failed") - } - if err := decoder.Decode(parsed); err != nil { - return nil, errors.Wrap(err, "mapstructure decode failed") - } - - return &c, nil -} - -// Must returns a config object that must compile. If there are any errors, this -// function will panic. This is most useful in testing or constants. -func Must(s string) *Config { - c, err := Parse(s) - if err != nil { - log.Fatal(err) - } - return c -} - -// TestConfig returns a default, finalized config, with the provided -// configuration taking precedence. -func TestConfig(c *Config) *Config { - d := DefaultConfig().Merge(c) - d.Finalize() - return d -} - -// FromFile reads the configuration file at the given path and returns a new -// Config struct with the data populated. -func FromFile(path string) (*Config, error) { - c, err := ioutil.ReadFile(path) - if err != nil { - return nil, errors.Wrap(err, "from file: "+path) - } - - config, err := Parse(string(c)) - if err != nil { - return nil, errors.Wrap(err, "from file: "+path) - } - return config, nil -} - -// FromPath iterates and merges all configuration files in a given -// directory, returning the resulting config. -func FromPath(path string) (*Config, error) { - // Ensure the given filepath exists - if _, err := os.Stat(path); os.IsNotExist(err) { - return nil, errors.Wrap(err, "missing file/folder: "+path) - } - - // Check if a file was given or a path to a directory - stat, err := os.Stat(path) - if err != nil { - return nil, errors.Wrap(err, "failed stating file: "+path) - } - - // Recursively parse directories, single load files - if stat.Mode().IsDir() { - // Ensure the given filepath has at least one config file - _, err := ioutil.ReadDir(path) - if err != nil { - return nil, errors.Wrap(err, "failed listing dir: "+path) - } - - // Create a blank config to merge off of - var c *Config - - // Potential bug: Walk does not follow symlinks! - err = filepath.Walk(path, func(path string, info os.FileInfo, err error) error { - // If WalkFunc had an error, just return it - if err != nil { - return err - } - - // Do nothing for directories - if info.IsDir() { - return nil - } - - // Parse and merge the config - newConfig, err := FromFile(path) - if err != nil { - return err - } - c = c.Merge(newConfig) - - return nil - }) - - if err != nil { - return nil, errors.Wrap(err, "walk error") - } - - return c, nil - } else if stat.Mode().IsRegular() { - return FromFile(path) - } - - return nil, fmt.Errorf("unknown filetype: %q", stat.Mode().String()) -} - -// GoString defines the printable version of this struct. -func (c *Config) GoString() string { - if c == nil { - return "(*Config)(nil)" - } - - return fmt.Sprintf("&Config{"+ - "Consul:%#v, "+ - "Dedup:%#v, "+ - "DefaultDelims:%#v, "+ - "Exec:%#v, "+ - "KillSignal:%s, "+ - "LogLevel:%s, "+ - "MaxStale:%s, "+ - "PidFile:%s, "+ - "ReloadSignal:%s, "+ - "FileLog:%#v, "+ - "Syslog:%#v, "+ - "Templates:%#v, "+ - "TemplateErrFatal:%#v"+ - "Vault:%#v, "+ - "Wait:%#v, "+ - "Once:%#v, "+ - "BlockQueryWaitTime:%#v"+ - "}", - c.Consul, - c.Dedup, - c.DefaultDelims, - c.Exec, - SignalGoString(c.KillSignal), - StringGoString(c.LogLevel), - TimeDurationGoString(c.MaxStale), - StringGoString(c.PidFile), - SignalGoString(c.ReloadSignal), - c.FileLog, - c.Syslog, - c.Templates, - c.TemplateErrFatal, - c.Vault, - c.Wait, - c.Once, - TimeDurationGoString(c.BlockQueryWaitTime), - ) -} - -// Show diff between 2 Configs, useful in tests -func (expected *Config) Diff(actual *Config) string { - var b strings.Builder - fmt.Fprintf(&b, "\n") - ve := reflect.ValueOf(*expected) - va := reflect.ValueOf(*actual) - ct := ve.Type() - - for i := 0; i < ve.NumField(); i++ { - fc := ve.Field(i) - fo := va.Field(i) - if !reflect.DeepEqual(fc.Interface(), fo.Interface()) { - fmt.Fprintf(&b, "%s:\n", ct.Field(i).Name) - fi := fc.Interface() - if _, ok := fi.(fmt.GoStringer); ok { - fmt.Fprintf(&b, "\texp: %#v\n", fc.Interface()) - fmt.Fprintf(&b, "\tact: %#v\n", fo.Interface()) - } else { - fmt.Fprintf(&b, "\texp: %+v\n", fc.Interface()) - fmt.Fprintf(&b, "\tact: %+v\n", fo.Interface()) - } - } - } - - return b.String() -} - -// DefaultConfig returns the default configuration struct. Certain environment -// variables may be set which control the values for the default configuration. -func DefaultConfig() *Config { - return &Config{ - Consul: DefaultConsulConfig(), - Dedup: DefaultDedupConfig(), - DefaultDelims: DefaultDefaultDelims(), - Exec: DefaultExecConfig(), - FileLog: DefaultLogFileConfig(), - Nomad: DefaultNomadConfig(), - Syslog: DefaultSyslogConfig(), - Templates: DefaultTemplateConfigs(), - Vault: DefaultVaultConfig(), - Wait: DefaultWaitConfig(), - } -} - -// Finalize ensures all configuration options have the default values, so it -// is safe to dereference the pointers later down the line. It also -// intelligently tries to activate stanzas that should be "enabled" because -// data was given, but the user did not explicitly add "Enabled: true" to the -// configuration. -func (c *Config) Finalize() { - if c == nil { - return - } - if c.Consul == nil { - c.Consul = DefaultConsulConfig() - } - c.Consul.Finalize() - - if c.Dedup == nil { - c.Dedup = DefaultDedupConfig() - } - c.Dedup.Finalize() - - if c.DefaultDelims == nil { - c.DefaultDelims = DefaultDefaultDelims() - } - - if c.Exec == nil { - c.Exec = DefaultExecConfig() - } - c.Exec.Finalize() - - if c.KillSignal == nil { - c.KillSignal = Signal(DefaultKillSignal) - } - - if c.LogLevel == nil { - c.LogLevel = stringFromEnv([]string{ - "CT_LOG", - "CONSUL_TEMPLATE_LOG", - "CONSUL_TEMPLATE_LOG_LEVEL", - }, DefaultLogLevel) - } - - if c.MaxStale == nil { - c.MaxStale = TimeDuration(DefaultMaxStale) - } - - if c.PidFile == nil { - c.PidFile = String("") - } - - if c.ReloadSignal == nil { - c.ReloadSignal = Signal(DefaultReloadSignal) - } - - if c.FileLog == nil { - c.FileLog = DefaultLogFileConfig() - } - c.FileLog.Finalize() - - if c.Nomad == nil { - c.Nomad = DefaultNomadConfig() - } - c.Nomad.Finalize() - - if c.Syslog == nil { - c.Syslog = DefaultSyslogConfig() - } - c.Syslog.Finalize() - - if c.Templates == nil { - c.Templates = DefaultTemplateConfigs() - } - for _, tmpl := range *c.Templates { - if tmpl.ErrFatal == nil { - tmpl.ErrFatal = c.TemplateErrFatal - } - } - c.Templates.Finalize() - - if c.Vault == nil { - c.Vault = DefaultVaultConfig() - } - c.Vault.Finalize() - - if c.Wait == nil { - c.Wait = DefaultWaitConfig() - } - c.Wait.Finalize() - - // disable Wait if -once was specified - if c.Once { - c.Wait = &WaitConfig{Enabled: Bool(false)} - } - - // defaults WaitTime to 60 seconds - if c.BlockQueryWaitTime == nil { - c.BlockQueryWaitTime = TimeDuration(DefaultBlockQueryWaitTime) - } -} - -func stringFromEnv(list []string, def string) *string { - for _, s := range list { - if v := os.Getenv(s); v != "" { - return String(strings.TrimSpace(v)) - } - } - return String(def) -} - -func stringFromFile(list []string, def string) *string { - for _, s := range list { - c, err := ioutil.ReadFile(s) - if err == nil { - return String(strings.TrimSpace(string(c))) - } - } - return String(def) -} - -func antiboolFromEnv(list []string, def bool) *bool { - for _, s := range list { - if v := os.Getenv(s); v != "" { - b, err := strconv.ParseBool(v) - if err == nil { - return Bool(!b) - } - } - } - return Bool(def) -} - -func boolFromEnv(list []string, def bool) *bool { - for _, s := range list { - if v := os.Getenv(s); v != "" { - b, err := strconv.ParseBool(v) - if err == nil { - return Bool(b) - } - } - } - return Bool(def) -} - -// flattenKeys is a function that takes a map[string]interface{} and recursively -// flattens any keys that are a []map[string]interface{} where the key is in the -// given list of keys. -func flattenKeys(m map[string]interface{}, keys []string) { - keyMap := make(map[string]struct{}) - for _, key := range keys { - keyMap[key] = struct{}{} - } - - var flatten func(map[string]interface{}, string) - flatten = func(m map[string]interface{}, parent string) { - for k, v := range m { - // Calculate the map key, since it could include a parent. - mapKey := k - if parent != "" { - mapKey = parent + "." + k - } - - if _, ok := keyMap[mapKey]; !ok { - continue - } - - switch typed := v.(type) { - case []map[string]interface{}: - if len(typed) > 0 { - last := typed[len(typed)-1] - flatten(last, mapKey) - m[k] = last - } else { - m[k] = nil - } - case map[string]interface{}: - flatten(typed, mapKey) - m[k] = typed - default: - m[k] = v - } - } - } - - flatten(m, "") -} diff --git a/vendor/github.com/hashicorp/consul-template/config/consul.go b/vendor/github.com/hashicorp/consul-template/config/consul.go deleted file mode 100644 index 8ace7265..00000000 --- a/vendor/github.com/hashicorp/consul-template/config/consul.go +++ /dev/null @@ -1,205 +0,0 @@ -package config - -import "fmt" - -// ConsulConfig contains the configurations options for connecting to a -// Consul cluster. -type ConsulConfig struct { - // Address is the address of the Consul server. It may be an IP or FQDN. - Address *string - - // Namespace is the Consul namespace to use for reading/writing. This can - // also be set via the CONSUL_NAMESPACE environment variable. - Namespace *string `mapstructure:"namespace"` - - // Auth is the HTTP basic authentication for communicating with Consul. - Auth *AuthConfig `mapstructure:"auth"` - - // Retry is the configuration for specifying how to behave on failure. - Retry *RetryConfig `mapstructure:"retry"` - - // SSL indicates we should use a secure connection while talking to - // Consul. This requires Consul to be configured to serve HTTPS. - SSL *SSLConfig `mapstructure:"ssl"` - - // Token is the token to communicate with Consul securely. - Token *string - - // TokenFile is the path to a token to communicate with Consul securely. - TokenFile *string `mapstructure:"token_file"` - - // Transport configures the low-level network connection details. - Transport *TransportConfig `mapstructure:"transport"` -} - -// DefaultConsulConfig returns a configuration that is populated with the -// default values. -func DefaultConsulConfig() *ConsulConfig { - return &ConsulConfig{ - Auth: DefaultAuthConfig(), - Retry: DefaultRetryConfig(), - SSL: DefaultSSLConfig(), - Transport: DefaultTransportConfig(), - } -} - -// Copy returns a deep copy of this configuration. -func (c *ConsulConfig) Copy() *ConsulConfig { - if c == nil { - return nil - } - - var o ConsulConfig - - o.Address = c.Address - - o.Namespace = c.Namespace - - if c.Auth != nil { - o.Auth = c.Auth.Copy() - } - - if c.Retry != nil { - o.Retry = c.Retry.Copy() - } - - if c.SSL != nil { - o.SSL = c.SSL.Copy() - } - - o.Token = c.Token - o.TokenFile = c.TokenFile - - if c.Transport != nil { - o.Transport = c.Transport.Copy() - } - - return &o -} - -// Merge combines all values in this configuration with the values in the other -// configuration, with values in the other configuration taking precedence. -// Maps and slices are merged, most other values are overwritten. Complex -// structs define their own merge functionality. -func (c *ConsulConfig) Merge(o *ConsulConfig) *ConsulConfig { - if c == nil { - if o == nil { - return nil - } - return o.Copy() - } - - if o == nil { - return c.Copy() - } - - r := c.Copy() - - if o.Address != nil { - r.Address = o.Address - } - - if o.Namespace != nil { - r.Namespace = o.Namespace - } - - if o.Auth != nil { - r.Auth = r.Auth.Merge(o.Auth) - } - - if o.Retry != nil { - r.Retry = r.Retry.Merge(o.Retry) - } - - if o.SSL != nil { - r.SSL = r.SSL.Merge(o.SSL) - } - - if o.Token != nil { - r.Token = o.Token - } - - if o.TokenFile != nil { - r.TokenFile = o.TokenFile - } - - if o.Transport != nil { - r.Transport = r.Transport.Merge(o.Transport) - } - - return r -} - -// Finalize ensures there no nil pointers. -func (c *ConsulConfig) Finalize() { - if c.Address == nil { - c.Address = stringFromEnv([]string{ - "CONSUL_HTTP_ADDR", - }, "") - } - - if c.Namespace == nil { - c.Namespace = stringFromEnv([]string{"CONSUL_NAMESPACE"}, "") - } - - if c.Auth == nil { - c.Auth = DefaultAuthConfig() - } - c.Auth.Finalize() - - if c.Retry == nil { - c.Retry = DefaultRetryConfig() - } - c.Retry.Finalize() - - if c.SSL == nil { - c.SSL = DefaultSSLConfig() - } - c.SSL.Finalize() - - if c.Token == nil { - c.Token = stringFromEnv([]string{ - "CONSUL_TOKEN", - "CONSUL_HTTP_TOKEN", - }, "") - } - - if c.TokenFile == nil { - c.TokenFile = stringFromEnv([]string{ - "CONSUL_TOKEN_FILE", - "CONSUL_HTTP_TOKEN_FILE", - }, "") - } - - if c.Transport == nil { - c.Transport = DefaultTransportConfig() - } - c.Transport.Finalize() -} - -// GoString defines the printable version of this struct. -func (c *ConsulConfig) GoString() string { - if c == nil { - return "(*ConsulConfig)(nil)" - } - - return fmt.Sprintf("&ConsulConfig{"+ - "Address:%s, "+ - "Namespace:%s, "+ - "Auth:%#v, "+ - "Retry:%#v, "+ - "SSL:%#v, "+ - "Token:%t, "+ - "TokenFile:%s, "+ - "Transport:%#v"+ - "}", - StringGoString(c.Address), - StringGoString(c.Namespace), - c.Auth, - c.Retry, - c.SSL, - StringPresent(c.Token), - StringGoString(c.TokenFile), - c.Transport, - ) -} diff --git a/vendor/github.com/hashicorp/consul-template/config/convert.go b/vendor/github.com/hashicorp/consul-template/config/convert.go deleted file mode 100644 index 8ddd5669..00000000 --- a/vendor/github.com/hashicorp/consul-template/config/convert.go +++ /dev/null @@ -1,210 +0,0 @@ -package config - -import ( - "fmt" - "os" - "time" - - "github.com/hashicorp/consul-template/signals" -) - -// Bool returns a pointer to the given bool. -func Bool(b bool) *bool { - return &b -} - -// BoolVal returns the value of the boolean at the pointer, or false if the -// pointer is nil. -func BoolVal(b *bool) bool { - if b == nil { - return false - } - return *b -} - -// BoolGoString returns the value of the boolean for printing in a string. -func BoolGoString(b *bool) string { - if b == nil { - return "(*bool)(nil)" - } - return fmt.Sprintf("%t", *b) -} - -// BoolPresent returns a boolean indicating if the pointer is nil, or if the -// pointer is pointing to the zero value.. -func BoolPresent(b *bool) bool { - if b == nil { - return false - } - return true -} - -// FileMode returns a pointer to the given os.FileMode. -func FileMode(o os.FileMode) *os.FileMode { - return &o -} - -// FileModeVal returns the value of the os.FileMode at the pointer, or 0 if the -// pointer is nil. -func FileModeVal(o *os.FileMode) os.FileMode { - if o == nil { - return 0 - } - return *o -} - -// FileModeGoString returns the value of the os.FileMode for printing in a -// string. -func FileModeGoString(o *os.FileMode) string { - if o == nil { - return "(*os.FileMode)(nil)" - } - return fmt.Sprintf("%q", *o) -} - -// FileModePresent returns a boolean indicating if the pointer is nil, or if -// the pointer is pointing to the zero value. -func FileModePresent(o *os.FileMode) bool { - if o == nil { - return false - } - return *o != 0 -} - -// Float64 returns a pointer to the given float64 -func Float64(f float64) *float64 { - return &f -} - -// FloatGoString returns the value of the float for printing in a string. -func FloatGoString(f *float64) string { - if f == nil { - return "(*float64)(nil)" - } - return fmt.Sprintf("%f", *f) -} - -// Int returns a pointer to the given int. -func Int(i int) *int { - return &i -} - -// IntVal returns the value of the int at the pointer, or 0 if the pointer is -// nil. -func IntVal(i *int) int { - if i == nil { - return 0 - } - return *i -} - -// IntGoString returns the value of the int for printing in a string. -func IntGoString(i *int) string { - if i == nil { - return "(*int)(nil)" - } - return fmt.Sprintf("%d", *i) -} - -// IntPresent returns a boolean indicating if the pointer is nil, or if the -// pointer is pointing to the zero value. -func IntPresent(i *int) bool { - if i == nil { - return false - } - return *i != 0 -} - -// Signal returns a pointer to the given os.Signal. -func Signal(s os.Signal) *os.Signal { - return &s -} - -// SignalVal returns the value of the os.Signal at the pointer, or 0 if the -// pointer is nil. -func SignalVal(s *os.Signal) os.Signal { - if s == nil { - return (os.Signal)(nil) - } - return *s -} - -// SignalGoString returns the value of the os.Signal for printing in a string. -func SignalGoString(s *os.Signal) string { - if s == nil { - return "(*os.Signal)(nil)" - } - if *s == nil { - return "" - } - return fmt.Sprintf("%q", *s) -} - -// SignalPresent returns a boolean indicating if the pointer is nil, or if the pointer is pointing to the zero value.. -func SignalPresent(s *os.Signal) bool { - if s == nil { - return false - } - return *s != signals.SIGNULL -} - -// String returns a pointer to the given string. -func String(s string) *string { - return &s -} - -// StringVal returns the value of the string at the pointer, or "" if the -// pointer is nil. -func StringVal(s *string) string { - if s == nil { - return "" - } - return *s -} - -// StringGoString returns the value of the string for printing in a string. -func StringGoString(s *string) string { - if s == nil { - return "(*string)(nil)" - } - return fmt.Sprintf("%q", *s) -} - -// StringPresent returns a boolean indicating if the pointer is nil, or if the pointer is pointing to the zero value.. -func StringPresent(s *string) bool { - if s == nil { - return false - } - return *s != "" -} - -// TimeDuration returns a pointer to the given time.Duration. -func TimeDuration(t time.Duration) *time.Duration { - return &t -} - -// TimeDurationVal returns the value of the string at the pointer, or 0 if the -// pointer is nil. -func TimeDurationVal(t *time.Duration) time.Duration { - if t == nil { - return time.Duration(0) - } - return *t -} - -// TimeDurationGoString returns the value of the time.Duration for printing in a -// string. -func TimeDurationGoString(t *time.Duration) string { - if t == nil { - return "(*time.Duration)(nil)" - } - return fmt.Sprintf("%s", t) -} - -// TimeDurationPresent returns a boolean indicating if the pointer is nil, or if the pointer is pointing to the zero value.. -func TimeDurationPresent(t *time.Duration) bool { - if t == nil { - return false - } - return *t != 0 -} diff --git a/vendor/github.com/hashicorp/consul-template/config/dedup.go b/vendor/github.com/hashicorp/consul-template/config/dedup.go deleted file mode 100644 index 57c6f24a..00000000 --- a/vendor/github.com/hashicorp/consul-template/config/dedup.go +++ /dev/null @@ -1,150 +0,0 @@ -package config - -import ( - "fmt" - "time" -) - -const ( - // DefaultDedupPrefix is the default prefix used for deduplication mode. - DefaultDedupPrefix = "consul-template/dedup/" - - // DefaultDedupTTL is the default TTL for deduplicate mode. - DefaultDedupTTL = 15 * time.Second - - // DefaultDedupMaxStale is the default max staleness for the deduplication - // manager. - DefaultDedupMaxStale = DefaultMaxStale - - // DefaultDedupBlockQueryWaitTime is the default amount of time to do a blocking query for the deduplication - DefaultDedupBlockQueryWaitTime = 60 * time.Second -) - -// DedupConfig is used to enable the de-duplication mode, which depends -// on electing a leader per-template and watching of a key. This is used -// to reduce the cost of many instances of CT running the same template. -type DedupConfig struct { - // Controls if deduplication mode is enabled - Enabled *bool `mapstructure:"enabled"` - - // MaxStale is the maximum amount of time to allow for stale queries. - MaxStale *time.Duration `mapstructure:"max_stale"` - - // Controls the KV prefix used. Defaults to defaultDedupPrefix - Prefix *string `mapstructure:"prefix"` - - // TTL is the Session TTL used for lock acquisition, defaults to 15 seconds. - TTL *time.Duration `mapstructure:"ttl"` - - // BlockQueryWaitTime is amount of time to do a blocking query for, defaults to 60 seconds. - BlockQueryWaitTime *time.Duration `mapstructure:"block_query_wait"` -} - -// DefaultDedupConfig returns a configuration that is populated with the -// default values. -func DefaultDedupConfig() *DedupConfig { - return &DedupConfig{} -} - -// Copy returns a deep copy of this configuration. -func (c *DedupConfig) Copy() *DedupConfig { - if c == nil { - return nil - } - - var o DedupConfig - o.Enabled = c.Enabled - o.MaxStale = c.MaxStale - o.Prefix = c.Prefix - o.TTL = c.TTL - o.BlockQueryWaitTime = c.BlockQueryWaitTime - return &o -} - -// Merge combines all values in this configuration with the values in the other -// configuration, with values in the other configuration taking precedence. -// Maps and slices are merged, most other values are overwritten. Complex -// structs define their own merge functionality. -func (c *DedupConfig) Merge(o *DedupConfig) *DedupConfig { - if c == nil { - if o == nil { - return nil - } - return o.Copy() - } - - if o == nil { - return c.Copy() - } - - r := c.Copy() - - if o.Enabled != nil { - r.Enabled = o.Enabled - } - - if o.MaxStale != nil { - r.MaxStale = o.MaxStale - } - - if o.Prefix != nil { - r.Prefix = o.Prefix - } - - if o.TTL != nil { - r.TTL = o.TTL - } - - if o.BlockQueryWaitTime != nil { - r.BlockQueryWaitTime = o.BlockQueryWaitTime - } - - return r -} - -// Finalize ensures there no nil pointers. -func (c *DedupConfig) Finalize() { - if c.Enabled == nil { - c.Enabled = Bool(false || - TimeDurationPresent(c.MaxStale) || - StringPresent(c.Prefix) || - TimeDurationPresent(c.TTL) || - TimeDurationPresent(c.BlockQueryWaitTime)) - } - - if c.MaxStale == nil { - c.MaxStale = TimeDuration(DefaultDedupMaxStale) - } - - if c.Prefix == nil { - c.Prefix = String(DefaultDedupPrefix) - } - - if c.TTL == nil { - c.TTL = TimeDuration(DefaultDedupTTL) - } - - if c.BlockQueryWaitTime == nil { - c.BlockQueryWaitTime = TimeDuration(DefaultDedupBlockQueryWaitTime) - } -} - -// GoString defines the printable version of this struct. -func (c *DedupConfig) GoString() string { - if c == nil { - return "(*DedupConfig)(nil)" - } - return fmt.Sprintf("&DedupConfig{"+ - "Enabled:%s, "+ - "MaxStale:%s, "+ - "Prefix:%s, "+ - "TTL:%s, "+ - "BlockQueryWaitTime:%s"+ - "}", - BoolGoString(c.Enabled), - TimeDurationGoString(c.MaxStale), - StringGoString(c.Prefix), - TimeDurationGoString(c.TTL), - TimeDurationGoString(c.BlockQueryWaitTime), - ) -} diff --git a/vendor/github.com/hashicorp/consul-template/config/default_delimiters.go b/vendor/github.com/hashicorp/consul-template/config/default_delimiters.go deleted file mode 100644 index bc2a28f8..00000000 --- a/vendor/github.com/hashicorp/consul-template/config/default_delimiters.go +++ /dev/null @@ -1,53 +0,0 @@ -package config - -// DefaultDelims is used to configure the default delimiters used for all templates -type DefaultDelims struct { - // Left is the left delimiter for templating - Left *string `mapstructure:"left"` - - // Right is the right delimiter for templating - Right *string `mapstructure:"right"` -} - -// DefaultDefaultDelims returns the default DefaultDelims -func DefaultDefaultDelims() *DefaultDelims { - return &DefaultDelims{} -} - -// Copy returns a copy of the DefaultDelims -func (c *DefaultDelims) Copy() *DefaultDelims { - if c == nil { - return nil - } - - return &DefaultDelims{ - Left: c.Left, - Right: c.Right, - } -} - -// Merge merges the DefaultDelims -func (c *DefaultDelims) Merge(o *DefaultDelims) *DefaultDelims { - if c == nil { - if o == nil { - return nil - } - return o.Copy() - } - - if o == nil { - return c.Copy() - } - - r := c.Copy() - - if o.Left != nil { - r.Left = o.Left - } - - if o.Right != nil { - r.Right = o.Right - } - - return r -} diff --git a/vendor/github.com/hashicorp/consul-template/config/env.go b/vendor/github.com/hashicorp/consul-template/config/env.go deleted file mode 100644 index 0d93aef6..00000000 --- a/vendor/github.com/hashicorp/consul-template/config/env.go +++ /dev/null @@ -1,270 +0,0 @@ -package config - -import ( - "fmt" - "os" - "path/filepath" - "strings" -) - -// EnvConfig is an embeddable struct for things that accept environment -// variable filtering. You should not use this directly and it is only public -// for mapstructure's decoding. -type EnvConfig struct { - // Denylist specifies a list of environment variables to explicitly - // exclude from the list of environment variables populated to the child. - // If both Allowlist and Denylist are provided, Denylist takes - // precedence over the values in Allowlist. - Denylist []string `mapstructure:"denylist"` - - // DenylistDeprecated is the backward compatible option for Denylist for - // configuration supported by v0.25.0 and older. This should not be used - // directly, use Denylist instead. Values from this are combined to - // Denylist in Finalize(). - DenylistDeprecated []string `mapstructure:"blacklist" json:"-"` - - // CustomEnv specifies custom environment variables to pass to the child - // process. These are provided programmatically, override any environment - // variables of the same name, are ignored from allowlist/denylist, and - // are still included even if PristineEnv is set to true. - Custom []string `mapstructure:"custom"` - - // PristineEnv specifies if the child process should inherit the parent's - // environment. - Pristine *bool `mapstructure:"pristine"` - - // Allowlist specifies a list of environment variables to exclusively - // include in the list of environment variables populated to the child. - Allowlist []string `mapstructure:"allowlist"` - - // AllowlistDeprecated is the backward compatible option for Allowlist for - // configuration supported by v0.25.0 and older. This should not be used - // directly, use Allowlist instead. Values from this are combined to - // Allowlist in Finalize(). - AllowlistDeprecated []string `mapstructure:"whitelist" json:"-"` -} - -// DefaultEnvConfig returns a configuration that is populated with the -// default values. -func DefaultEnvConfig() *EnvConfig { - return &EnvConfig{} -} - -// Copy returns a deep copy of this configuration. -func (c *EnvConfig) Copy() *EnvConfig { - if c == nil { - return nil - } - - var o EnvConfig - - if c.Denylist != nil { - o.Denylist = append([]string{}, c.Denylist...) - } - - if c.DenylistDeprecated != nil { - o.DenylistDeprecated = append([]string{}, c.DenylistDeprecated...) - } - - if c.Custom != nil { - o.Custom = append([]string{}, c.Custom...) - } - - o.Pristine = c.Pristine - - if c.Allowlist != nil { - o.Allowlist = append([]string{}, c.Allowlist...) - } - - if c.AllowlistDeprecated != nil { - o.AllowlistDeprecated = append([]string{}, c.AllowlistDeprecated...) - } - - return &o -} - -// Merge combines all values in this configuration with the values in the other -// configuration, with values in the other configuration taking precedence. -// Maps and slices are merged, most other values are overwritten. Complex -// structs define their own merge functionality. -func (c *EnvConfig) Merge(o *EnvConfig) *EnvConfig { - if c == nil { - if o == nil { - return nil - } - return o.Copy() - } - - if o == nil { - return c.Copy() - } - - r := c.Copy() - - if o.Denylist != nil { - r.Denylist = append(r.Denylist, o.Denylist...) - } - - if o.DenylistDeprecated != nil { - r.DenylistDeprecated = append(r.DenylistDeprecated, o.DenylistDeprecated...) - } - - if o.Custom != nil { - r.Custom = append(r.Custom, o.Custom...) - } - - if o.Pristine != nil { - r.Pristine = o.Pristine - } - - if o.Allowlist != nil { - r.Allowlist = append(r.Allowlist, o.Allowlist...) - } - - if o.AllowlistDeprecated != nil { - r.AllowlistDeprecated = append(r.AllowlistDeprecated, o.AllowlistDeprecated...) - } - - return r -} - -// Env calculates and returns the finalized environment for this exec -// configuration. It takes into account pristine, custom environment, allowlist, -// and denylist values. -func (c *EnvConfig) Env() []string { - // In pristine mode, just return the custom environment. If the user did not - // specify a custom environment, just return the empty slice to force an - // empty environment. We cannot return nil here because the later call to - // os/exec will think we want to inherit the parent. - if BoolVal(c.Pristine) { - if len(c.Custom) > 0 { - return c.Custom - } - return []string{} - } - - // Pull all the key-value pairs out of the environment - environ := os.Environ() - keys := make([]string, len(environ)) - env := make(map[string]string, len(environ)) - for i, v := range environ { - list := strings.SplitN(v, "=", 2) - keys[i] = list[0] - env[list[0]] = list[1] - } - - // anyGlobMatch is a helper function which checks if any of the given globs - // match the string. - anyGlobMatch := func(s string, patterns []string) bool { - for _, pattern := range patterns { - if matched, _ := filepath.Match(pattern, s); matched { - return true - } - } - return false - } - - // Pull out any envvars that match the allowlist. - // Combining lists on each reference may be slightly inefficient but this - // allows for out of order method calls, not requiring the config to be - // finalized first. - allowlist := combineLists(c.Allowlist, c.AllowlistDeprecated) - if len(allowlist) > 0 { - newKeys := make([]string, 0, len(keys)) - for _, k := range keys { - if anyGlobMatch(k, allowlist) { - newKeys = append(newKeys, k) - } - } - keys = newKeys - } - - // Remove any envvars that match the denylist. - // Combining lists on each reference may be slightly inefficient but this - // allows for out of order method calls, not requiring the config to be - // finalized first. - denylist := combineLists(c.Denylist, c.DenylistDeprecated) - if len(denylist) > 0 { - newKeys := make([]string, 0, len(keys)) - for _, k := range keys { - if !anyGlobMatch(k, denylist) { - newKeys = append(newKeys, k) - } - } - keys = newKeys - } - - // Build the final list using only the filtered keys. - finalEnv := make([]string, 0, len(keys)+len(c.Custom)) - for _, k := range keys { - finalEnv = append(finalEnv, k+"="+env[k]) - } - - // Append remaining custom environment. - finalEnv = append(finalEnv, c.Custom...) - - return finalEnv -} - -// Finalize ensures there no nil pointers. -func (c *EnvConfig) Finalize() { - if c.Denylist == nil && c.DenylistDeprecated == nil { - c.Denylist = []string{} - c.DenylistDeprecated = []string{} - } else { - c.Denylist = combineLists(c.Denylist, c.DenylistDeprecated) - } - - if c.Custom == nil { - c.Custom = []string{} - } - - if c.Pristine == nil { - c.Pristine = Bool(false) - } - - if c.Allowlist == nil && c.AllowlistDeprecated == nil { - c.Allowlist = []string{} - c.AllowlistDeprecated = []string{} - } else { - c.Allowlist = combineLists(c.Allowlist, c.AllowlistDeprecated) - } -} - -// GoString defines the printable version of this struct. -func (c *EnvConfig) GoString() string { - if c == nil { - return "(*EnvConfig)(nil)" - } - - return fmt.Sprintf("&EnvConfig{"+ - "Denylist:%v, "+ - "Custom:%v, "+ - "Pristine:%s, "+ - "Allowlist:%v"+ - "}", - combineLists(c.Denylist, c.DenylistDeprecated), - c.Custom, - BoolGoString(c.Pristine), - combineLists(c.Allowlist, c.AllowlistDeprecated), - ) -} - -// combineLists makes a new list that combines 2 lists by adding values from -// the second list without removing any duplicates from the first. -func combineLists(a, b []string) []string { - combined := make([]string, len(a), len(a)+len(b)) - m := make(map[string]bool) - for i, v := range a { - m[v] = true - combined[i] = v - } - - for _, v := range b { - if !m[v] { - combined = append(combined, v) - } - } - - return combined -} diff --git a/vendor/github.com/hashicorp/consul-template/config/exec.go b/vendor/github.com/hashicorp/consul-template/config/exec.go deleted file mode 100644 index 4482009c..00000000 --- a/vendor/github.com/hashicorp/consul-template/config/exec.go +++ /dev/null @@ -1,220 +0,0 @@ -package config - -import ( - "fmt" - "os" - "syscall" - "time" -) - -const ( - // DefaultExecKillSignal is the default signal to send to the process to - // tell it to gracefully terminate. - DefaultExecKillSignal = syscall.SIGINT - - // DefaultExecKillTimeout is the maximum amount of time to wait for the - // process to gracefully terminate before force-killing it. - DefaultExecKillTimeout = 30 * time.Second - - // DefaultExecTimeout is the default amount of time to wait for a - // command to exit. By default, this is disabled, which means the command - // is allowed to run for an infinite amount of time. - DefaultExecTimeout = 0 * time.Second -) - -// DefaultExecReloadSignal is the default signal to send to the process to -// tell it to reload its configuration. -var DefaultExecReloadSignal = (os.Signal)(nil) - -// ExecConfig is used to configure the application when it runs in -// exec/supervise mode. -type ExecConfig struct { - // Command is the command to execute and watch as a child process. - Command commandList `mapstructure:"command"` - - // Enabled controls if this exec is enabled. - Enabled *bool `mapstructure:"enabled"` - - // EnvConfig is the environmental customizations. - Env *EnvConfig `mapstructure:"env"` - - // KillSignal is the signal to send to the command to kill it gracefully. - KillSignal *os.Signal `mapstructure:"kill_signal"` - - // KillTimeout is the amount of time to give the process to cleanup before - // hard-killing it. - KillTimeout *time.Duration `mapstructure:"kill_timeout"` - - // ReloadSignal is the signal to send to the child process when a template - // changes. This tells the child process that templates have - ReloadSignal *os.Signal `mapstructure:"reload_signal"` - - // Splay is the maximum amount of random time to wait to signal or kill the - // process. By default this is disabled, but it can be set to low values to - // reduce the "thundering herd" problem where all tasks are restarted at once. - Splay *time.Duration `mapstructure:"splay"` - - // Timeout is the maximum amount of time to wait for a command to complete. - // By default, this is 0, which means "wait forever". - Timeout *time.Duration `mapstructure:"timeout"` -} - -// commandList is a []string with a common method for testing for content -type commandList []string - -func (c commandList) Empty() bool { - return len(c) == 0 || c[0] == "" -} - -// DefaultExecConfig returns a configuration that is populated with the -// default values. -func DefaultExecConfig() *ExecConfig { - return &ExecConfig{ - Env: DefaultEnvConfig(), - } -} - -// Copy returns a deep copy of this configuration. -func (c *ExecConfig) Copy() *ExecConfig { - if c == nil { - return nil - } - - var o ExecConfig - - o.Command = c.Command - - o.Enabled = c.Enabled - - if c.Env != nil { - o.Env = c.Env.Copy() - } - - o.KillSignal = c.KillSignal - - o.KillTimeout = c.KillTimeout - - o.ReloadSignal = c.ReloadSignal - - o.Splay = c.Splay - - o.Timeout = c.Timeout - - return &o -} - -// Merge combines all values in this configuration with the values in the other -// configuration, with values in the other configuration taking precedence. -// Maps and slices are merged, most other values are overwritten. Complex -// structs define their own merge functionality. -func (c *ExecConfig) Merge(o *ExecConfig) *ExecConfig { - if c == nil { - if o == nil { - return nil - } - return o.Copy() - } - - if o == nil { - return c.Copy() - } - - r := c.Copy() - - if o.Command != nil { - r.Command = o.Command - } - - if o.Enabled != nil { - r.Enabled = o.Enabled - } - - if o.Env != nil { - r.Env = r.Env.Merge(o.Env) - } - - if o.KillSignal != nil { - r.KillSignal = o.KillSignal - } - - if o.KillTimeout != nil { - r.KillTimeout = o.KillTimeout - } - - if o.ReloadSignal != nil { - r.ReloadSignal = o.ReloadSignal - } - - if o.Splay != nil { - r.Splay = o.Splay - } - - if o.Timeout != nil { - r.Timeout = o.Timeout - } - - return r -} - -// Finalize ensures there no nil pointers. -func (c *ExecConfig) Finalize() { - if c.Enabled == nil { - c.Enabled = Bool(!c.Command.Empty()) - } - - if c.Command == nil { - c.Command = []string{} - } - - if c.Env == nil { - c.Env = DefaultEnvConfig() - } - c.Env.Finalize() - - if c.KillSignal == nil { - c.KillSignal = Signal(DefaultExecKillSignal) - } - - if c.KillTimeout == nil { - c.KillTimeout = TimeDuration(DefaultExecKillTimeout) - } - - if c.ReloadSignal == nil { - c.ReloadSignal = Signal(DefaultExecReloadSignal) - } - - if c.Splay == nil { - c.Splay = TimeDuration(0 * time.Second) - } - - if c.Timeout == nil { - c.Timeout = TimeDuration(DefaultExecTimeout) - } -} - -// GoString defines the printable version of this struct. -func (c *ExecConfig) GoString() string { - if c == nil { - return "(*ExecConfig)(nil)" - } - - return fmt.Sprintf("&ExecConfig{"+ - "Command:%s, "+ - "Enabled:%s, "+ - "Env:%#v, "+ - "KillSignal:%s, "+ - "KillTimeout:%s, "+ - "ReloadSignal:%s, "+ - "Splay:%s, "+ - "Timeout:%s"+ - "}", - c.Command, - BoolGoString(c.Enabled), - c.Env, - SignalGoString(c.KillSignal), - TimeDurationGoString(c.KillTimeout), - SignalGoString(c.ReloadSignal), - TimeDurationGoString(c.Splay), - TimeDurationGoString(c.Timeout), - ) -} diff --git a/vendor/github.com/hashicorp/consul-template/config/logfile.go b/vendor/github.com/hashicorp/consul-template/config/logfile.go deleted file mode 100644 index 8016557e..00000000 --- a/vendor/github.com/hashicorp/consul-template/config/logfile.go +++ /dev/null @@ -1,127 +0,0 @@ -package config - -import ( - "fmt" - "time" - - "github.com/hashicorp/consul-template/version" -) - -var ( - // DefaultLogFileName is the default filename if the user didn't specify one - // which means that the user specified a directory to log to - DefaultLogFileName = fmt.Sprintf("%s.log", version.Name) - - // DefaultLogRotateDuration is the default time taken by the agent to rotate logs - DefaultLogRotateDuration = 24 * time.Hour -) - -type LogFileConfig struct { - // LogFilePath is the path to the file the logs get written to - LogFilePath *string `mapstructure:"path"` - - // LogRotateBytes is the maximum number of bytes that should be written to a log - // file - LogRotateBytes *int `mapstructure:"log_rotate_bytes"` - - // LogRotateDuration is the time after which log rotation needs to be performed - LogRotateDuration *time.Duration `mapstructure:"log_rotate_duration"` - - // LogRotateMaxFiles is the maximum number of log file archives to keep - LogRotateMaxFiles *int `mapstructure:"log_rotate_max_files"` -} - -// DefaultLogFileConfig returns a configuration that is populated with the -// default values. -func DefaultLogFileConfig() *LogFileConfig { - return &LogFileConfig{} -} - -// Copy returns a deep copy of this configuration. -func (c *LogFileConfig) Copy() *LogFileConfig { - if c == nil { - return nil - } - - var o LogFileConfig - o.LogFilePath = c.LogFilePath - o.LogRotateBytes = c.LogRotateBytes - o.LogRotateDuration = c.LogRotateDuration - o.LogRotateMaxFiles = c.LogRotateMaxFiles - return &o -} - -// Merge combines all values in this configuration with the values in the other -// configuration, with values in the other configuration taking precedence. -// Maps and slices are merged, most other values are overwritten. Complex -// structs define their own merge functionality. -func (c *LogFileConfig) Merge(o *LogFileConfig) *LogFileConfig { - if c == nil { - if o == nil { - return nil - } - return o.Copy() - } - - if o == nil { - return c.Copy() - } - - r := c.Copy() - - if o.LogFilePath != nil { - r.LogFilePath = o.LogFilePath - } - - if o.LogRotateBytes != nil { - r.LogRotateBytes = o.LogRotateBytes - } - - if o.LogRotateDuration != nil { - r.LogRotateDuration = o.LogRotateDuration - } - - if o.LogRotateMaxFiles != nil { - r.LogRotateMaxFiles = o.LogRotateMaxFiles - } - - return r -} - -// Finalize ensures there no nil pointers. -func (c *LogFileConfig) Finalize() { - if c.LogFilePath == nil { - c.LogFilePath = String("") - } - - if c.LogRotateBytes == nil { - c.LogRotateBytes = Int(0) - } - - if c.LogRotateDuration == nil { - c.LogRotateDuration = TimeDuration(DefaultLogRotateDuration) - } - - if c.LogRotateMaxFiles == nil { - c.LogRotateMaxFiles = Int(0) - } -} - -// GoString defines the printable version of this struct. -func (c *LogFileConfig) GoString() string { - if c == nil { - return "(*LogFileConfig)(nil)" - } - - return fmt.Sprintf("&LogFileConfig{"+ - "LogFilePath:%s, "+ - "LogRotateBytes:%s, "+ - "LogRotateDuration:%s, "+ - "LogRotateMaxFiles:%s, "+ - "}", - StringGoString(c.LogFilePath), - IntGoString(c.LogRotateBytes), - TimeDurationGoString(c.LogRotateDuration), - IntGoString(c.LogRotateMaxFiles), - ) -} diff --git a/vendor/github.com/hashicorp/consul-template/config/mapstructure.go b/vendor/github.com/hashicorp/consul-template/config/mapstructure.go deleted file mode 100644 index 505f6207..00000000 --- a/vendor/github.com/hashicorp/consul-template/config/mapstructure.go +++ /dev/null @@ -1,78 +0,0 @@ -package config - -import ( - "log" - "os" - "reflect" - "strconv" - - "github.com/mitchellh/mapstructure" -) - -// StringToFileModeFunc returns a function that converts strings to os.FileMode -// value. This is designed to be used with mapstructure for parsing out a -// filemode value. -func StringToFileModeFunc() mapstructure.DecodeHookFunc { - return func( - f reflect.Type, - t reflect.Type, - data interface{}, - ) (interface{}, error) { - if f.Kind() != reflect.String { - return data, nil - } - if t != reflect.TypeOf(os.FileMode(0)) { - return data, nil - } - - // Convert it by parsing - v, err := strconv.ParseUint(data.(string), 8, 12) - if err != nil { - return data, err - } - return os.FileMode(v), nil - } -} - -// StringToWaitDurationHookFunc returns a function that converts strings to wait -// value. This is designed to be used with mapstructure for parsing out a wait -// value. -func StringToWaitDurationHookFunc() mapstructure.DecodeHookFunc { - return func( - f reflect.Type, - t reflect.Type, - data interface{}, - ) (interface{}, error) { - if f.Kind() != reflect.String { - return data, nil - } - if t != reflect.TypeOf(WaitConfig{}) { - return data, nil - } - - // Convert it by parsing - return ParseWaitConfig(data.(string)) - } -} - -// ConsulStringToStructFunc checks if the value set for the key should actually -// be a struct and sets the appropriate value in the struct. This is for -// backwards-compatability with older versions of Consul Template. -func ConsulStringToStructFunc() mapstructure.DecodeHookFunc { - return func( - f reflect.Type, - t reflect.Type, - data interface{}, - ) (interface{}, error) { - if t == reflect.TypeOf(ConsulConfig{}) && f.Kind() == reflect.String { - log.Println("[WARN] consul now accepts a stanza instead of a string. " + - "Update your configuration files and change consul = \"\" to " + - "consul { } instead.") - return &ConsulConfig{ - Address: String(data.(string)), - }, nil - } - - return data, nil - } -} diff --git a/vendor/github.com/hashicorp/consul-template/config/nomad.go b/vendor/github.com/hashicorp/consul-template/config/nomad.go deleted file mode 100644 index 8accc35e..00000000 --- a/vendor/github.com/hashicorp/consul-template/config/nomad.go +++ /dev/null @@ -1,193 +0,0 @@ -package config - -import "fmt" - -// NomadConfig is the configuration for connecting to a Nomad agent. -type NomadConfig struct { - // Address is the URI to the Nomad agent. - Address *string `mapstructure:"address"` - - // Enabled controls whether the Nomad integration is active. - Enabled *bool `mapstructure:"enabled"` - - // Namespace is the Nomad namespace to use. This can also be set via - // the NOMAD_NAMESPACE environment variable. - Namespace *string `mapstructure:"namespace"` - - // SSL indicates we should use a secure connection while talking - // to Nomad. - SSL *SSLConfig `mapstructure:"ssl"` - - // Token is the Nomad ACL token to use with API requests. This can also - // be set via the NOMAD_TOKEN environment variable. - Token *string `mapstructure:"token" json:"-"` - - // AuthUsername and AuthPassword are the HTTP Basic Auth username and - // password to use when authenticating with the Nomad API. - AuthUsername *string `mapstructure:"auth_username"` - AuthPassword *string `mapstructure:"auth_password"` - - // Transport configures the low-level network connection details. - Transport *TransportConfig `mapstructure:"transport"` - - // Retry is the configuration for specifying how to behave on failure. - Retry *RetryConfig `mapstructure:"retry"` -} - -func DefaultNomadConfig() *NomadConfig { - return &NomadConfig{ - SSL: DefaultSSLConfig(), - Transport: DefaultTransportConfig(), - } -} - -// Copy returns a deep copy of this configuration. -func (n *NomadConfig) Copy() *NomadConfig { - if n == nil { - return nil - } - - var o NomadConfig - - o.Address = n.Address - o.Enabled = n.Enabled - o.Namespace = n.Namespace - o.SSL = n.SSL.Copy() - o.Token = n.Token - o.AuthUsername = n.AuthUsername - o.AuthPassword = n.AuthPassword - o.Transport = n.Transport.Copy() - o.Retry = n.Retry.Copy() - - return &o -} - -// Merge combines all values in this configuration with the values in the other -// configuration, with values in the other configuration taking precedence. -// Maps and slices are merged, most other values are overwritten. Complex -// structs define their own merge functionality. -func (n *NomadConfig) Merge(o *NomadConfig) *NomadConfig { - if n == nil { - if o == nil { - return nil - } - return o.Copy() - } - - if o == nil { - return n.Copy() - } - - r := n.Copy() - - if o.Address != nil { - r.Address = o.Address - } - - if o.Enabled != nil { - r.Enabled = o.Enabled - } - - if o.Namespace != nil { - r.Namespace = o.Namespace - } - - if o.SSL != nil { - r.SSL = r.SSL.Merge(o.SSL) - } - - if o.Token != nil { - r.Token = o.Token - } - - if o.AuthUsername != nil { - r.AuthUsername = o.AuthUsername - } - - if o.AuthPassword != nil { - r.AuthPassword = o.AuthPassword - } - - if o.Transport != nil { - r.Transport = r.Transport.Merge(o.Transport) - } - - if o.Retry != nil { - r.Retry = r.Retry.Merge(o.Retry) - } - - return r -} - -// Finalize ensures there no nil pointers. -func (n *NomadConfig) Finalize() { - if n.Address == nil { - n.Address = stringFromEnv([]string{"NOMAD_ADDR"}, "") - } - - if n.Enabled == nil { - // Enable if there's an address or custom dialer - customDialer := n.Transport != nil && n.Transport.CustomDialer != nil - addressPresent := n.Address != nil && *n.Address != "" - n.Enabled = Bool(addressPresent || customDialer) - } - - if n.Namespace == nil { - n.Namespace = stringFromEnv([]string{"NOMAD_NAMESPACE"}, "") - } - - if n.SSL == nil { - n.SSL = DefaultSSLConfig() - } - n.SSL.Finalize() - - if n.Token == nil { - n.Token = stringFromEnv([]string{"NOMAD_TOKEN"}, "") - } - - if n.AuthUsername == nil { - n.AuthUsername = String("") - } - - if n.AuthPassword == nil { - n.AuthPassword = String("") - } - - if n.Transport == nil { - n.Transport = DefaultTransportConfig() - } - n.Transport.Finalize() - - if n.Retry == nil { - n.Retry = DefaultRetryConfig() - } - n.Retry.Finalize() -} - -func (n *NomadConfig) GoString() string { - if n == nil { - return "(*NomadConfig)(nil)" - } - - return fmt.Sprintf("&NomadConfig{"+ - "Address:%s, "+ - "Enabled:%s, "+ - "Namespace:%s, "+ - "SSL:%#v, "+ - "Token:%s, "+ - "AuthUsername:%s, "+ - "AuthPassword:%s, "+ - "Transport:%#v, "+ - "Retry:%#v, "+ - "}", - StringGoString(n.Address), - BoolGoString(n.Enabled), - StringGoString(n.Namespace), - n.SSL, - StringGoString(n.Token), - StringGoString(n.AuthUsername), - StringGoString(n.AuthPassword), - n.Transport, - n.Retry, - ) -} diff --git a/vendor/github.com/hashicorp/consul-template/config/retry.go b/vendor/github.com/hashicorp/consul-template/config/retry.go deleted file mode 100644 index 0a4346cf..00000000 --- a/vendor/github.com/hashicorp/consul-template/config/retry.go +++ /dev/null @@ -1,170 +0,0 @@ -package config - -import ( - "fmt" - "math" - "time" -) - -const ( - // DefaultRetryAttempts is the default number of maximum retry attempts. - DefaultRetryAttempts = 12 - - // DefaultRetryBackoff is the default base for the exponential backoff - // algorithm. - DefaultRetryBackoff = 250 * time.Millisecond - - // DefaultRetryMaxBackoff is the default maximum of backoff time - DefaultRetryMaxBackoff = 1 * time.Minute -) - -// RetryFunc is the signature of a function that supports retries. -type RetryFunc func(int) (bool, time.Duration) - -// RetryConfig is a shared configuration for upstreams that support retires on -// failure. -type RetryConfig struct { - // Attempts is the total number of maximum attempts to retry before letting - // the error fall through. - // 0 means unlimited. - Attempts *int - - // Backoff is the base of the exponentialbackoff. This number will be - // multiplied by the next power of 2 on each iteration. - Backoff *time.Duration - - // MaxBackoff is an upper limit to the sleep time between retries - // A MaxBackoff of zero means there is no limit to the exponential growth of the backoff. - MaxBackoff *time.Duration `mapstructure:"max_backoff"` - - // Enabled signals if this retry is enabled. - Enabled *bool -} - -// DefaultRetryConfig returns a configuration that is populated with the -// default values. -func DefaultRetryConfig() *RetryConfig { - return &RetryConfig{} -} - -// Copy returns a deep copy of this configuration. -func (c *RetryConfig) Copy() *RetryConfig { - if c == nil { - return nil - } - - var o RetryConfig - - o.Attempts = c.Attempts - - o.Backoff = c.Backoff - - o.MaxBackoff = c.MaxBackoff - - o.Enabled = c.Enabled - - return &o -} - -// Merge combines all values in this configuration with the values in the other -// configuration, with values in the other configuration taking precedence. -// Maps and slices are merged, most other values are overwritten. Complex -// structs define their own merge functionality. -func (c *RetryConfig) Merge(o *RetryConfig) *RetryConfig { - if c == nil { - if o == nil { - return nil - } - return o.Copy() - } - - if o == nil { - return c.Copy() - } - - r := c.Copy() - - if o.Attempts != nil { - r.Attempts = o.Attempts - } - - if o.Backoff != nil { - r.Backoff = o.Backoff - } - - if o.MaxBackoff != nil { - r.MaxBackoff = o.MaxBackoff - } - - if o.Enabled != nil { - r.Enabled = o.Enabled - } - - return r -} - -// RetryFunc returns the retry function associated with this configuration. -func (c *RetryConfig) RetryFunc() RetryFunc { - return func(retry int) (bool, time.Duration) { - if !BoolVal(c.Enabled) { - return false, 0 - } - - if IntVal(c.Attempts) > 0 && retry > IntVal(c.Attempts)-1 { - return false, 0 - } - - baseSleep := TimeDurationVal(c.Backoff) - maxSleep := TimeDurationVal(c.MaxBackoff) - - if maxSleep > 0 { - attemptsTillMaxBackoff := int(math.Log2(maxSleep.Seconds() / baseSleep.Seconds())) - if retry > attemptsTillMaxBackoff { - return true, maxSleep - } - } - - base := math.Pow(2, float64(retry)) - sleep := time.Duration(base) * baseSleep - - return true, sleep - } -} - -// Finalize ensures there no nil pointers. -func (c *RetryConfig) Finalize() { - if c.Attempts == nil { - c.Attempts = Int(DefaultRetryAttempts) - } - - if c.Backoff == nil { - c.Backoff = TimeDuration(DefaultRetryBackoff) - } - - if c.MaxBackoff == nil { - c.MaxBackoff = TimeDuration(DefaultRetryMaxBackoff) - } - - if c.Enabled == nil { - c.Enabled = Bool(true) - } -} - -// GoString defines the printable version of this struct. -func (c *RetryConfig) GoString() string { - if c == nil { - return "(*RetryConfig)(nil)" - } - - return fmt.Sprintf("&RetryConfig{"+ - "Attempts:%s, "+ - "Backoff:%s, "+ - "MaxBackoff:%s, "+ - "Enabled:%s"+ - "}", - IntGoString(c.Attempts), - TimeDurationGoString(c.Backoff), - TimeDurationGoString(c.MaxBackoff), - BoolGoString(c.Enabled), - ) -} diff --git a/vendor/github.com/hashicorp/consul-template/config/ssl.go b/vendor/github.com/hashicorp/consul-template/config/ssl.go deleted file mode 100644 index ab3b77e6..00000000 --- a/vendor/github.com/hashicorp/consul-template/config/ssl.go +++ /dev/null @@ -1,153 +0,0 @@ -package config - -import "fmt" - -const ( - // DefaultSSLVerify is the default value for SSL verification. - DefaultSSLVerify = true -) - -// SSLConfig is the configuration for SSL. -type SSLConfig struct { - CaCert *string `mapstructure:"ca_cert"` - CaPath *string `mapstructure:"ca_path"` - Cert *string `mapstructure:"cert"` - Enabled *bool `mapstructure:"enabled"` - Key *string `mapstructure:"key"` - ServerName *string `mapstructure:"server_name"` - Verify *bool `mapstructure:"verify"` -} - -// DefaultSSLConfig returns a configuration that is populated with the -// default values. -func DefaultSSLConfig() *SSLConfig { - return &SSLConfig{} -} - -// Copy returns a deep copy of this configuration. -func (c *SSLConfig) Copy() *SSLConfig { - if c == nil { - return nil - } - - var o SSLConfig - o.CaCert = c.CaCert - o.CaPath = c.CaPath - o.Cert = c.Cert - o.Enabled = c.Enabled - o.Key = c.Key - o.ServerName = c.ServerName - o.Verify = c.Verify - return &o -} - -// Merge combines all values in this configuration with the values in the other -// configuration, with values in the other configuration taking precedence. -// Maps and slices are merged, most other values are overwritten. Complex -// structs define their own merge functionality. -func (c *SSLConfig) Merge(o *SSLConfig) *SSLConfig { - if c == nil { - if o == nil { - return nil - } - return o.Copy() - } - - if o == nil { - return c.Copy() - } - - r := c.Copy() - - if o.Cert != nil { - r.Cert = o.Cert - } - - if o.CaCert != nil { - r.CaCert = o.CaCert - } - - if o.CaPath != nil { - r.CaPath = o.CaPath - } - - if o.Enabled != nil { - r.Enabled = o.Enabled - } - - if o.Key != nil { - r.Key = o.Key - } - - if o.ServerName != nil { - r.ServerName = o.ServerName - } - - if o.Verify != nil { - r.Verify = o.Verify - } - - return r -} - -// Finalize ensures there no nil pointers. -func (c *SSLConfig) Finalize() { - if c.Enabled == nil { - c.Enabled = Bool(false || - StringPresent(c.Cert) || - StringPresent(c.CaCert) || - StringPresent(c.CaPath) || - StringPresent(c.Key) || - StringPresent(c.ServerName) || - BoolPresent(c.Verify)) - } - - if c.Cert == nil { - c.Cert = String("") - } - - if c.CaCert == nil { - c.CaCert = String("") - } - - if c.CaPath == nil { - c.CaPath = String("") - } - - if c.Key == nil { - c.Key = String("") - } - - if c.ServerName == nil { - c.ServerName = String("") - } - - if c.Verify == nil { - c.Verify = Bool(DefaultSSLVerify) - } -} - -// GoString defines the printable version of this struct. -func (c *SSLConfig) GoString() string { - if c == nil { - return "(*SSLConfig)(nil)" - } - - return fmt.Sprintf("&SSLConfig{"+ - "CaCert:%s, "+ - "CaPath:%s, "+ - "Cert:%s, "+ - "Enabled:%s, "+ - "Key:%s, "+ - "ServerName:%s, "+ - "Verify:%s"+ - "}", - StringGoString(c.CaCert), - StringGoString(c.CaPath), - StringGoString(c.Cert), - BoolGoString(c.Enabled), - StringGoString(c.Key), - StringGoString(c.ServerName), - BoolGoString(c.Verify), - ) -} diff --git a/vendor/github.com/hashicorp/consul-template/config/syslog.go b/vendor/github.com/hashicorp/consul-template/config/syslog.go deleted file mode 100644 index 19058c10..00000000 --- a/vendor/github.com/hashicorp/consul-template/config/syslog.go +++ /dev/null @@ -1,106 +0,0 @@ -package config - -import ( - "fmt" - - "github.com/hashicorp/consul-template/version" -) - -const ( - // DefaultSyslogFacility is the default facility to log to. - DefaultSyslogFacility = "LOCAL0" -) - -// DefaultSyslogName is the default app name in syslog. -var DefaultSyslogName = version.Name - -// SyslogConfig is the configuration for syslog. -type SyslogConfig struct { - Enabled *bool `mapstructure:"enabled"` - Facility *string `mapstructure:"facility"` - Name *string `mapstructure:"name"` -} - -// DefaultSyslogConfig returns a configuration that is populated with the -// default values. -func DefaultSyslogConfig() *SyslogConfig { - return &SyslogConfig{} -} - -// Copy returns a deep copy of this configuration. -func (c *SyslogConfig) Copy() *SyslogConfig { - if c == nil { - return nil - } - - var o SyslogConfig - o.Enabled = c.Enabled - o.Facility = c.Facility - o.Name = c.Name - return &o -} - -// Merge combines all values in this configuration with the values in the other -// configuration, with values in the other configuration taking precedence. -// Maps and slices are merged, most other values are overwritten. Complex -// structs define their own merge functionality. -func (c *SyslogConfig) Merge(o *SyslogConfig) *SyslogConfig { - if c == nil { - if o == nil { - return nil - } - return o.Copy() - } - - if o == nil { - return c.Copy() - } - - r := c.Copy() - - if o.Enabled != nil { - r.Enabled = o.Enabled - } - - if o.Facility != nil { - r.Facility = o.Facility - } - - if o.Name != nil { - r.Name = o.Name - } - - return r -} - -// Finalize ensures there no nil pointers. -func (c *SyslogConfig) Finalize() { - if c.Enabled == nil { - c.Enabled = Bool(StringPresent(c.Facility) || StringPresent(c.Name)) - } - - if c.Facility == nil { - c.Facility = String(DefaultSyslogFacility) - } - - if c.Name == nil { - c.Name = String(DefaultSyslogName) - } -} - -// GoString defines the printable version of this struct. -func (c *SyslogConfig) GoString() string { - if c == nil { - return "(*SyslogConfig)(nil)" - } - - return fmt.Sprintf("&SyslogConfig{"+ - "Enabled:%s, "+ - "Facility:%s"+ - "Name:%s"+ - "}", - BoolGoString(c.Enabled), - StringGoString(c.Facility), - StringGoString(c.Name), - ) -} diff --git a/vendor/github.com/hashicorp/consul-template/config/template.go b/vendor/github.com/hashicorp/consul-template/config/template.go deleted file mode 100644 index 221bea5b..00000000 --- a/vendor/github.com/hashicorp/consul-template/config/template.go +++ /dev/null @@ -1,556 +0,0 @@ -package config - -import ( - "errors" - "fmt" - "os" - "regexp" - "strconv" - "strings" - "time" -) - -const ( - // DefaultTemplateCommandTimeout is the amount of time to wait for a command - // to return. - DefaultTemplateCommandTimeout = 30 * time.Second -) - -var ( - // ErrTemplateStringEmpty is the error returned with the template contents - // are empty. - ErrTemplateStringEmpty = errors.New("template: cannot be empty") - - // configTemplateRe is the pattern to split the config template syntax. - configTemplateRe = regexp.MustCompile("([a-zA-Z]:)?([^:]+)") -) - -// TemplateConfig is a representation of a template on disk, as well as the -// associated commands and reload instructions. -type TemplateConfig struct { - // Backup determines if this template should retain a backup. The default - // value is false. - Backup *bool `mapstructure:"backup"` - - // Command is the arbitrary command to execute after a template has - // successfully rendered. This is DEPRECATED. Use Exec instead. - Command commandList `mapstructure:"command"` - - // CommandTimeout is the amount of time to wait for the command to finish - // before force-killing it. This is DEPRECATED. Use Exec instead. - CommandTimeout *time.Duration `mapstructure:"command_timeout"` - - // Contents are the raw template contents to evaluate. Either this or Source - // must be specified, but not both. - Contents *string `mapstructure:"contents"` - - // CreateDestDirs tells Consul Template to create the parent directories of - // the destination path if they do not exist. The default value is true. - CreateDestDirs *bool `mapstructure:"create_dest_dirs"` - - // Destination is the location on disk where the template should be rendered. - // This is required unless running in debug/dry mode. - Destination *string `mapstructure:"destination"` - - // ErrMissingKey is used to control how the template behaves when attempting - // to index a struct or map key that does not exist. - ErrMissingKey *bool `mapstructure:"error_on_missing_key"` - - // ErrFatal determines whether template errors should cause the process to - // exit, or just log and continue. - ErrFatal *bool `mapstructure:"error_fatal"` - - // Exec is the configuration for the command to run when the template renders - // successfully. - Exec *ExecConfig `mapstructure:"exec"` - - // Perms are the file system permissions to use when creating the file on - // disk. This is useful for when files contain sensitive information, such as - // secrets from Vault. - Perms *os.FileMode `mapstructure:"perms"` - - // User is the username or uid that will be set when creating the file on disk. - // Useful when simply setting Perms is not enough. - // - // Platform dependent: this doesn't work on Windows but it fails gracefully - // with a warning - User *string `mapstructure:"user"` - - // Uid is equivalent to User when it's a valid int and it's defined for backward compatibility with v0.28.0 - Uid *int `mapstructure:"uid"` - - // Group is the group name or gid that will be set when creating the file on disk. - // Useful when simply setting Perms is not enough. - // - // Platform dependent: this doesn't work on Windows but it fails gracefully - // with a warning - Group *string `mapstructure:"group"` - - // Gid is equivalent to Group when it's a valid int and it's defined for backward compatibility with v0.28.0 - Gid *int `mapstructure:"gid"` - - // Source is the path on disk to the template contents to evaluate. Either - // this or Contents should be specified, but not both. - Source *string `mapstructure:"source"` - - // Wait configures per-template quiescence timers. - Wait *WaitConfig `mapstructure:"wait"` - - // LeftDelim and RightDelim are optional configurations to control what - // delimiter is utilized when parsing the template. - LeftDelim *string `mapstructure:"left_delimiter"` - RightDelim *string `mapstructure:"right_delimiter"` - - // FunctionDenylist is a list of functions that this template is not - // permitted to run. - FunctionDenylist []string `mapstructure:"function_denylist"` - - // FunctionDenylistDeprecated is the backward compatible option for - // FunctionDenylist for configuration supported by v0.25.0 and older. This - // should not be used directly, use FunctionDenylist instead. Values from - // this are combined to FunctionDenylist in Finalize(). - FunctionDenylistDeprecated []string `mapstructure:"function_blacklist" json:"-"` - - // SandboxPath adds a prefix to any path provided to the `file` function - // and causes an error if a relative path tries to traverse outside that - // prefix. - SandboxPath *string `mapstructure:"sandbox_path"` -} - -// DefaultTemplateConfig returns a configuration that is populated with the -// default values. -func DefaultTemplateConfig() *TemplateConfig { - return &TemplateConfig{ - Exec: DefaultExecConfig(), - Wait: DefaultWaitConfig(), - } -} - -// Copy returns a deep copy of this configuration. -func (c *TemplateConfig) Copy() *TemplateConfig { - if c == nil { - return nil - } - - var o TemplateConfig - - o.Backup = c.Backup - - o.Command = c.Command - - o.CommandTimeout = c.CommandTimeout - - o.Contents = c.Contents - - o.CreateDestDirs = c.CreateDestDirs - - o.Destination = c.Destination - - o.ErrMissingKey = c.ErrMissingKey - - o.ErrFatal = c.ErrFatal - - if c.Exec != nil { - o.Exec = c.Exec.Copy() - } - - o.Perms = c.Perms - - o.Source = c.Source - - o.User = c.User - o.Group = c.Group - - o.Uid = c.Uid - o.Gid = c.Gid - - if c.Wait != nil { - o.Wait = c.Wait.Copy() - } - - o.LeftDelim = c.LeftDelim - o.RightDelim = c.RightDelim - - for _, fun := range c.FunctionDenylist { - o.FunctionDenylist = append(o.FunctionDenylist, fun) - } - - for _, fun := range c.FunctionDenylistDeprecated { - o.FunctionDenylistDeprecated = append(o.FunctionDenylistDeprecated, fun) - } - - o.SandboxPath = c.SandboxPath - - return &o -} - -// Merge combines all values in this configuration with the values in the other -// configuration, with values in the other configuration taking precedence. -// Maps and slices are merged, most other values are overwritten. Complex -// structs define their own merge functionality. -func (c *TemplateConfig) Merge(o *TemplateConfig) *TemplateConfig { - if c == nil { - if o == nil { - return nil - } - return o.Copy() - } - - if o == nil { - return c.Copy() - } - - r := c.Copy() - - if o.Backup != nil { - r.Backup = o.Backup - } - - if o.Command != nil { - r.Command = o.Command - } - - if o.CommandTimeout != nil { - r.CommandTimeout = o.CommandTimeout - } - - if o.Contents != nil { - r.Contents = o.Contents - } - - if o.CreateDestDirs != nil { - r.CreateDestDirs = o.CreateDestDirs - } - - if o.Destination != nil { - r.Destination = o.Destination - } - - if o.ErrMissingKey != nil { - r.ErrMissingKey = o.ErrMissingKey - } - - if o.ErrFatal != nil { - r.ErrFatal = o.ErrFatal - } - - if o.Exec != nil { - r.Exec = r.Exec.Merge(o.Exec) - } - - if o.Perms != nil { - r.Perms = o.Perms - } - - if o.Source != nil { - r.Source = o.Source - } - - if o.User != nil { - r.User = o.User - } - if o.Group != nil { - r.Group = o.Group - } - - if o.Uid != nil { - r.Uid = o.Uid - } - - if o.Gid != nil { - r.Gid = o.Gid - } - - if o.Wait != nil { - r.Wait = r.Wait.Merge(o.Wait) - } - - if o.LeftDelim != nil { - r.LeftDelim = o.LeftDelim - } - - if o.RightDelim != nil { - r.RightDelim = o.RightDelim - } - - for _, fun := range o.FunctionDenylist { - r.FunctionDenylist = append(r.FunctionDenylist, fun) - } - - for _, fun := range o.FunctionDenylistDeprecated { - r.FunctionDenylistDeprecated = append(r.FunctionDenylistDeprecated, fun) - } - - if o.SandboxPath != nil { - r.SandboxPath = o.SandboxPath - } - - return r -} - -// Finalize ensures the configuration has no nil pointers and sets default -// values. -func (c *TemplateConfig) Finalize() { - if c.Backup == nil { - c.Backup = Bool(false) - } - - if c.Command == nil { - c.Command = []string{} - } - - if c.CommandTimeout == nil { - c.CommandTimeout = TimeDuration(DefaultTemplateCommandTimeout) - } - - if c.Contents == nil { - c.Contents = String("") - } - - if c.CreateDestDirs == nil { - c.CreateDestDirs = Bool(true) - } - - if c.Destination == nil { - c.Destination = String("") - } - - if c.ErrMissingKey == nil { - c.ErrMissingKey = Bool(false) - } - - if c.ErrFatal == nil { - c.ErrFatal = Bool(true) - } - - // Backwards compatibility for uid - if c.User == nil && c.Uid != nil { - uStr := strconv.Itoa(*c.Uid) - c.User = &uStr - } - - // Backwards compatibility for gid - if c.Group == nil && c.Gid != nil { - gStr := strconv.Itoa(*c.Gid) - c.Group = &gStr - } - - if c.Exec == nil { - c.Exec = DefaultExecConfig() - } - - // Backwards compat for specifying command directly - if c.Exec.Command == nil && c.Command != nil { - c.Exec.Command = c.Command - } - // backwards compat with command_timeout and default support for exec.timeout - switch { - case c.Exec.Timeout == nil && c.CommandTimeout != nil: - c.Exec.Timeout = c.CommandTimeout - case c.Exec.Timeout == nil: - c.Exec.Timeout = TimeDuration(DefaultTemplateCommandTimeout) - } - c.Exec.Finalize() - - if c.Perms == nil { - c.Perms = FileMode(0) - } - - if c.Source == nil { - c.Source = String("") - } - - if c.Wait == nil { - c.Wait = DefaultWaitConfig() - } - c.Wait.Finalize() - - if c.LeftDelim == nil { - c.LeftDelim = String("") - } - - if c.RightDelim == nil { - c.RightDelim = String("") - } - - if c.SandboxPath == nil { - c.SandboxPath = String("") - } - - if c.FunctionDenylist == nil && c.FunctionDenylistDeprecated == nil { - c.FunctionDenylist = []string{} - c.FunctionDenylistDeprecated = []string{} - } else { - c.FunctionDenylist = combineLists(c.FunctionDenylist, c.FunctionDenylistDeprecated) - } -} - -// GoString defines the printable version of this struct. -func (c *TemplateConfig) GoString() string { - if c == nil { - return "(*TemplateConfig)(nil)" - } - - return fmt.Sprintf("&TemplateConfig{"+ - "Backup:%s, "+ - "Command:%s, "+ - "CommandTimeout:%s, "+ - "Contents:%s, "+ - "CreateDestDirs:%s, "+ - "Destination:%s, "+ - "ErrMissingKey:%s, "+ - "ErrFatal:%s, "+ - "Exec:%#v, "+ - "Perms:%s, "+ - "Source:%s, "+ - "Wait:%#v, "+ - "LeftDelim:%s, "+ - "RightDelim:%s, "+ - "FunctionDenylist:%s, "+ - "SandboxPath:%s"+ - "}", - BoolGoString(c.Backup), - c.Command, - TimeDurationGoString(c.CommandTimeout), - StringGoString(c.Contents), - BoolGoString(c.CreateDestDirs), - StringGoString(c.Destination), - BoolGoString(c.ErrMissingKey), - BoolGoString(c.ErrFatal), - c.Exec, - FileModeGoString(c.Perms), - StringGoString(c.Source), - c.Wait, - StringGoString(c.LeftDelim), - StringGoString(c.RightDelim), - combineLists(c.FunctionDenylist, c.FunctionDenylistDeprecated), - StringGoString(c.SandboxPath), - ) -} - -// Display is the human-friendly form of this configuration. It tries to -// describe this template in as much detail as possible in a single line, so -// log consumers can uniquely identify it. -func (c *TemplateConfig) Display() string { - if c == nil { - return "" - } - - source := c.Source - if StringPresent(c.Contents) { - source = String("(dynamic)") - } - - return fmt.Sprintf("%q => %q", - StringVal(source), - StringVal(c.Destination), - ) -} - -// TemplateConfigs is a collection of TemplateConfigs -type TemplateConfigs []*TemplateConfig - -// DefaultTemplateConfigs returns a configuration that is populated with the -// default values. -func DefaultTemplateConfigs() *TemplateConfigs { - return &TemplateConfigs{} -} - -// Copy returns a deep copy of this configuration. -func (c *TemplateConfigs) Copy() *TemplateConfigs { - o := make(TemplateConfigs, len(*c)) - for i, t := range *c { - o[i] = t.Copy() - } - return &o -} - -// Merge combines all values in this configuration with the values in the other -// configuration, with values in the other configuration taking precedence. -// Maps and slices are merged, most other values are overwritten. Complex -// structs define their own merge functionality. -func (c *TemplateConfigs) Merge(o *TemplateConfigs) *TemplateConfigs { - if c == nil { - if o == nil { - return nil - } - return o.Copy() - } - - if o == nil { - return c.Copy() - } - - r := c.Copy() - - *r = append(*r, *o...) - - return r -} - -// Finalize ensures the configuration has no nil pointers and sets default -// values. -func (c *TemplateConfigs) Finalize() { - if c == nil { - *c = *DefaultTemplateConfigs() - } - - for _, t := range *c { - t.Finalize() - } -} - -// GoString defines the printable version of this struct. -func (c *TemplateConfigs) GoString() string { - if c == nil { - return "(*TemplateConfigs)(nil)" - } - - s := make([]string, len(*c)) - for i, t := range *c { - s[i] = t.GoString() - } - - return "{" + strings.Join(s, ", ") + "}" -} - -// ParseTemplateConfig parses a string in the form source:destination:command -// into a TemplateConfig. -func ParseTemplateConfig(s string) (*TemplateConfig, error) { - if len(strings.TrimSpace(s)) < 1 { - return nil, ErrTemplateStringEmpty - } - - var source, destination, command string - parts := configTemplateRe.FindAllString(s, -1) - - switch len(parts) { - case 1: - source = parts[0] - case 2: - source, destination = parts[0], parts[1] - case 3: - source, destination, command = parts[0], parts[1], parts[2] - default: - source, destination = parts[0], parts[1] - command = strings.Join(parts[2:], ":") - } - - var sourcePtr, destinationPtr *string - var commandL commandList - if source != "" { - sourcePtr = String(source) - } - if destination != "" { - destinationPtr = String(destination) - } - if command != "" { - commandL = []string{command} - } - - return &TemplateConfig{ - Source: sourcePtr, - Destination: destinationPtr, - Command: commandL, - }, nil -} diff --git a/vendor/github.com/hashicorp/consul-template/config/transport.go b/vendor/github.com/hashicorp/consul-template/config/transport.go deleted file mode 100644 index 420bdf1e..00000000 --- a/vendor/github.com/hashicorp/consul-template/config/transport.go +++ /dev/null @@ -1,198 +0,0 @@ -package config - -import ( - "fmt" - "runtime" - "time" - - "github.com/hashicorp/consul-template/dependency" -) - -const ( - // DefaultDialKeepAlive is the default amount of time to keep alive - // connections. - DefaultDialKeepAlive = 30 * time.Second - - // DefaultDialTimeout is the amount of time to attempt to dial before timing - // out. - DefaultDialTimeout = 30 * time.Second - - // DefaultIdleConnTimeout is the default connection timeout for idle - // connections. - DefaultIdleConnTimeout = 90 * time.Second - - // DefaultMaxIdleConns is the default number of maximum idle connections. - DefaultMaxIdleConns = 100 - - // DefaultTLSHandshakeTimeout is the amount of time to negotiate the TLS - // handshake. - DefaultTLSHandshakeTimeout = 10 * time.Second -) - -// DefaultMaxIdleConnsPerHost is the default number of idle connections to use -// per host. -var DefaultMaxIdleConnsPerHost = runtime.GOMAXPROCS(0) + 1 - -// TransportConfig is the configuration to tune low-level APIs for the -// interactions on the wire. -type TransportConfig struct { - // CustomDialer overrides the default net.Dial with a custom dialer. This is - // useful for instance with Vault Agent Templating to direct Consul Template - // requests through an internal cache. - CustomDialer dependency.TransportDialer `mapstructure:"-"` - - // DialKeepAlive is the amount of time for keep-alives. - DialKeepAlive *time.Duration `mapstructure:"dial_keep_alive"` - - // DialTimeout is the amount of time to wait to establish a connection. - DialTimeout *time.Duration `mapstructure:"dial_timeout"` - - // DisableKeepAlives determines if keep-alives should be used. Disabling this - // significantly decreases performance. - DisableKeepAlives *bool `mapstructure:"disable_keep_alives"` - - // IdleConnTimeout is the timeout for idle connections. - IdleConnTimeout *time.Duration `mapstructure:"idle_conn_timeout"` - - // MaxIdleConns is the maximum number of total idle connections. - MaxIdleConns *int `mapstructure:"max_idle_conns"` - - // MaxIdleConnsPerHost is the maximum number of idle connections per remote - // host. - MaxIdleConnsPerHost *int `mapstructure:"max_idle_conns_per_host"` - - // TLSHandshakeTimeout is the amount of time to wait to complete the TLS - // handshake. - TLSHandshakeTimeout *time.Duration `mapstructure:"tls_handshake_timeout"` -} - -// DefaultTransportConfig returns a configuration that is populated with the -// default values. -func DefaultTransportConfig() *TransportConfig { - return &TransportConfig{} -} - -// Copy returns a deep copy of this configuration. -func (c *TransportConfig) Copy() *TransportConfig { - if c == nil { - return nil - } - - var o TransportConfig - - o.CustomDialer = c.CustomDialer - o.DialKeepAlive = c.DialKeepAlive - o.DialTimeout = c.DialTimeout - o.DisableKeepAlives = c.DisableKeepAlives - o.IdleConnTimeout = c.IdleConnTimeout - o.MaxIdleConns = c.MaxIdleConns - o.MaxIdleConnsPerHost = c.MaxIdleConnsPerHost - o.TLSHandshakeTimeout = c.TLSHandshakeTimeout - - return &o -} - -// Merge combines all values in this configuration with the values in the other -// configuration, with values in the other configuration taking precedence. -// Maps and slices are merged, most other values are overwritten. Complex -// structs define their own merge functionality. -func (c *TransportConfig) Merge(o *TransportConfig) *TransportConfig { - if c == nil { - if o == nil { - return nil - } - return o.Copy() - } - - if o == nil { - return c.Copy() - } - - r := c.Copy() - - if o.CustomDialer != nil { - r.CustomDialer = o.CustomDialer - } - - if o.DialKeepAlive != nil { - r.DialKeepAlive = o.DialKeepAlive - } - - if o.DialTimeout != nil { - r.DialTimeout = o.DialTimeout - } - - if o.DisableKeepAlives != nil { - r.DisableKeepAlives = o.DisableKeepAlives - } - - if o.IdleConnTimeout != nil { - r.IdleConnTimeout = o.IdleConnTimeout - } - - if o.MaxIdleConns != nil { - r.MaxIdleConns = o.MaxIdleConns - } - - if o.MaxIdleConnsPerHost != nil { - r.MaxIdleConnsPerHost = o.MaxIdleConnsPerHost - } - - if o.TLSHandshakeTimeout != nil { - r.TLSHandshakeTimeout = o.TLSHandshakeTimeout - } - - return r -} - -// Finalize ensures there no nil pointers. -func (c *TransportConfig) Finalize() { - if c.DialKeepAlive == nil { - c.DialKeepAlive = TimeDuration(DefaultDialKeepAlive) - } - - if c.DialTimeout == nil { - c.DialTimeout = TimeDuration(DefaultDialTimeout) - } - - if c.DisableKeepAlives == nil { - c.DisableKeepAlives = Bool(false) - } - - if c.IdleConnTimeout == nil { - c.IdleConnTimeout = TimeDuration(DefaultIdleConnTimeout) - } - - if c.MaxIdleConns == nil { - c.MaxIdleConns = Int(DefaultMaxIdleConns) - } - - if c.MaxIdleConnsPerHost == nil { - c.MaxIdleConnsPerHost = Int(DefaultMaxIdleConnsPerHost) - } - - if c.TLSHandshakeTimeout == nil { - c.TLSHandshakeTimeout = TimeDuration(DefaultTLSHandshakeTimeout) - } -} - -// GoString defines the printable version of this struct. -func (c *TransportConfig) GoString() string { - if c == nil { - return "(*TransportConfig)(nil)" - } - - return fmt.Sprintf("&TransportConfig{"+ - "DialKeepAlive:%s, "+ - "DialTimeout:%s, "+ - "DisableKeepAlives:%t, "+ - "MaxIdleConnsPerHost:%d, "+ - "TLSHandshakeTimeout:%s"+ - "}", - TimeDurationVal(c.DialKeepAlive), - TimeDurationVal(c.DialTimeout), - BoolVal(c.DisableKeepAlives), - IntVal(c.MaxIdleConnsPerHost), - TimeDurationVal(c.TLSHandshakeTimeout), - ) -} diff --git a/vendor/github.com/hashicorp/consul-template/config/vault.go b/vendor/github.com/hashicorp/consul-template/config/vault.go deleted file mode 100644 index 7c7fbb99..00000000 --- a/vendor/github.com/hashicorp/consul-template/config/vault.go +++ /dev/null @@ -1,435 +0,0 @@ -package config - -import ( - "fmt" - "time" - - "github.com/hashicorp/vault/api" -) - -const ( - // XXX Change use to api.EnvVaultSkipVerify once we've updated vendored - // vault to version 1.1.0 or newer. - EnvVaultSkipVerify = "VAULT_SKIP_VERIFY" - - // DefaultVaultRenewToken is the default value for if the Vault token should - // be renewed. - DefaultVaultRenewToken = true - - // DefaultVaultUnwrapToken is the default value for if the Vault token should - // be unwrapped. - DefaultVaultUnwrapToken = false - - // DefaultVaultRetryBase is the default value for the base time to use for - // exponential backoff. - DefaultVaultRetryBase = 250 * time.Millisecond - - // DefaultVaultRetryMaxAttempts is the default maximum number of attempts to - // retry before quitting. - DefaultVaultRetryMaxAttempts = 5 - - // DefaultVaultLeaseDuration is the default lease duration in seconds. - DefaultVaultLeaseDuration = 5 * time.Minute - - // DefaultLeaseRenewalThreshold is the default fraction of a non-renewable - // lease to wait for before refreshing - DefaultLeaseRenewalThreshold = .90 - - // DefaultK8SServiceAccountTokenPath is a default path to a file - // with service token for the k8s auth method. - DefaultK8SServiceAccountTokenPath = "/run/secrets/kubernetes.io/serviceaccount/token" - - // DefaultK8SServiceMountPath is a default value of the k8s auth method - // login path. - DefaultK8SServiceMountPath = "kubernetes" -) - -// VaultConfig is the configuration for connecting to a vault server. -type VaultConfig struct { - // Address is the URI to the Vault server. - Address *string `mapstructure:"address"` - - // Enabled controls whether the Vault integration is active. - Enabled *bool `mapstructure:"enabled"` - - // Namespace is the Vault namespace to use for reading/writing secrets. This can - // also be set via the VAULT_NAMESPACE environment variable. - Namespace *string `mapstructure:"namespace"` - - // RenewToken renews the Vault token. - RenewToken *bool `mapstructure:"renew_token"` - - // Retry is the configuration for specifying how to behave on failure. - Retry *RetryConfig `mapstructure:"retry"` - - // SSL indicates we should use a secure connection while talking to Vault. - SSL *SSLConfig `mapstructure:"ssl"` - - // Token is the Vault token to communicate with for requests. It may be - // a wrapped token or a real token. This can also be set via the VAULT_TOKEN - // environment variable, or via the VaultAgentTokenFile. - Token *string `mapstructure:"token" json:"-"` - - // VaultAgentTokenFile is the path of file that contains a Vault Agent token. - // If vault_agent_token_file is specified: - // - Consul Template will not try to renew the Vault token. - // - Consul Template will periodically stat the file and update the token if it has - // changed. - VaultAgentTokenFile *string `mapstructure:"vault_agent_token_file" json:"-"` - - // Transport configures the low-level network connection details. - Transport *TransportConfig `mapstructure:"transport"` - - // UnwrapToken unwraps the provided Vault token as a wrapped token. - UnwrapToken *bool `mapstructure:"unwrap_token"` - - // DefaultLeaseDuration configures the default lease duration when not explicitly - // set by vault - DefaultLeaseDuration *time.Duration `mapstructure:"default_lease_duration"` - - // LeaseRenewalThreshold configues how long Consul Template should wait for to - // refresh dynamic, non-renewable leases, measured as a fraction of the lease - // duration. - LeaseRenewalThreshold *float64 `mapstructure:"lease_renewal_threshold"` - - // If Token is empty and K8SAuthRoleName is set, it means to use - // k8s vault auth method. - // - // The kubernetes auth method can be used to authenticate with Vault - // using a Kubernetes Service Account Token. This method of - // authentication makes it easy to introduce a Vault token into - // a Kubernetes Pod. - // - // This can also be set via the VAULT_K8S_AUTH_ROLE_NAME. - K8SAuthRoleName *string `mapstructure:"k8s_auth_role_name"` - // K8SServiceAccountTokenPath is the path of file that contains - // a K8SServiceAccountToken. It will be ignored if K8SServiceAccountToken - // is set. - // - // Default value is "/run/secrets/kubernetes.io/serviceaccount/token". - // - // This can also be set via the VAULT_K8S_SERVICE_ACCOUNT_TOKEN_PATH. - K8SServiceAccountTokenPath *string `mapstructure:"k8s_service_account_token_path"` - // Value of an account token for k8s auth method. - // - // This can also be set via the VAULT_K8S_SERVICE_ACCOUNT_TOKEN. - K8SServiceAccountToken *string `mapstructure:"k8s_service_account_token"` - // K8SServiceMountPath is a part of k8s login path, by default the value is - // "kubernetes". In this case a full path will be "auth/kubernetes/login". - // - // This can also be set via the VAULT_K8S_SERVICE_MOUNT_PATH. - K8SServiceMountPath *string `mapstructure:"k8s_service_mount_path"` -} - -// DefaultVaultConfig returns a configuration that is populated with the -// default values. -func DefaultVaultConfig() *VaultConfig { - v := &VaultConfig{ - Retry: DefaultRetryConfig(), - SSL: DefaultSSLConfig(), - Transport: DefaultTransportConfig(), - } - - // Force SSL when communicating with Vault. - v.SSL.Enabled = Bool(true) - - return v -} - -// Copy returns a deep copy of this configuration. -func (c *VaultConfig) Copy() *VaultConfig { - if c == nil { - return nil - } - - var o VaultConfig - o.Address = c.Address - - o.Enabled = c.Enabled - - o.Namespace = c.Namespace - - o.RenewToken = c.RenewToken - - if c.Retry != nil { - o.Retry = c.Retry.Copy() - } - - if c.SSL != nil { - o.SSL = c.SSL.Copy() - } - - o.Token = c.Token - - o.VaultAgentTokenFile = c.VaultAgentTokenFile - - if c.Transport != nil { - o.Transport = c.Transport.Copy() - } - - o.UnwrapToken = c.UnwrapToken - - o.DefaultLeaseDuration = c.DefaultLeaseDuration - o.LeaseRenewalThreshold = c.LeaseRenewalThreshold - - o.K8SAuthRoleName = c.K8SAuthRoleName - o.K8SServiceAccountToken = c.K8SServiceAccountToken - o.K8SServiceAccountTokenPath = c.K8SServiceAccountTokenPath - o.K8SServiceMountPath = c.K8SServiceMountPath - - return &o -} - -// Merge combines all values in this configuration with the values in the other -// configuration, with values in the other configuration taking precedence. -// Maps and slices are merged, most other values are overwritten. Complex -// structs define their own merge functionality. -func (c *VaultConfig) Merge(o *VaultConfig) *VaultConfig { - if c == nil { - if o == nil { - return nil - } - return o.Copy() - } - - if o == nil { - return c.Copy() - } - - r := c.Copy() - - if o.Address != nil { - r.Address = o.Address - } - - if o.Enabled != nil { - r.Enabled = o.Enabled - } - - if o.Namespace != nil { - r.Namespace = o.Namespace - } - - if o.RenewToken != nil { - r.RenewToken = o.RenewToken - } - - if o.Retry != nil { - r.Retry = r.Retry.Merge(o.Retry) - } - - if o.SSL != nil { - r.SSL = r.SSL.Merge(o.SSL) - } - - if o.Token != nil { - r.Token = o.Token - } - - if o.VaultAgentTokenFile != nil { - r.VaultAgentTokenFile = o.VaultAgentTokenFile - } - - if o.Transport != nil { - r.Transport = r.Transport.Merge(o.Transport) - } - - if o.UnwrapToken != nil { - r.UnwrapToken = o.UnwrapToken - } - - if o.DefaultLeaseDuration != nil { - r.DefaultLeaseDuration = o.DefaultLeaseDuration - } - - if o.LeaseRenewalThreshold != nil { - r.LeaseRenewalThreshold = o.LeaseRenewalThreshold - } - - if o.K8SAuthRoleName != nil { - r.K8SAuthRoleName = o.K8SAuthRoleName - } - - if o.K8SServiceAccountToken != nil { - r.K8SServiceAccountToken = o.K8SServiceAccountToken - } - - if o.K8SServiceAccountTokenPath != nil { - r.K8SServiceAccountTokenPath = o.K8SServiceAccountTokenPath - } - - if o.K8SServiceMountPath != nil { - r.K8SServiceMountPath = o.K8SServiceMountPath - } - - return r -} - -// Finalize ensures there no nil pointers. -func (c *VaultConfig) Finalize() { - if c.Address == nil { - c.Address = stringFromEnv([]string{ - api.EnvVaultAddress, - }, "") - } - - if c.Namespace == nil { - c.Namespace = stringFromEnv([]string{"VAULT_NAMESPACE"}, "") - } - - if c.Retry == nil { - c.Retry = DefaultRetryConfig() - } - c.Retry.Finalize() - - // Vault has custom SSL settings - if c.SSL == nil { - c.SSL = DefaultSSLConfig() - } - if c.SSL.Enabled == nil { - c.SSL.Enabled = Bool(true) - } - if c.SSL.CaCert == nil { - c.SSL.CaCert = stringFromEnv([]string{api.EnvVaultCACert}, "") - } - if c.SSL.CaPath == nil { - c.SSL.CaPath = stringFromEnv([]string{api.EnvVaultCAPath}, "") - } - if c.SSL.Cert == nil { - c.SSL.Cert = stringFromEnv([]string{api.EnvVaultClientCert}, "") - } - if c.SSL.Key == nil { - c.SSL.Key = stringFromEnv([]string{api.EnvVaultClientKey}, "") - } - if c.SSL.ServerName == nil { - c.SSL.ServerName = stringFromEnv([]string{api.EnvVaultTLSServerName}, "") - } - if c.SSL.Verify == nil { - c.SSL.Verify = antiboolFromEnv([]string{ - EnvVaultSkipVerify, api.EnvVaultInsecure, - }, true) - } - c.SSL.Finalize() - - // Order of precedence - // 1. `vault_agent_token_file` configuration value - // 2. `token` configuration value` - // 3. `VAULT_TOKEN` environment variable - if c.Token == nil { - c.Token = stringFromEnv([]string{ - "VAULT_TOKEN", - }, "") - } - - if c.VaultAgentTokenFile == nil { - if StringVal(c.Token) == "" { - if homePath != "" { - c.Token = stringFromFile([]string{ - homePath + "/.vault-token", - }, "") - } - } - } else { - c.Token = stringFromFile([]string{*c.VaultAgentTokenFile}, "") - } - - // must be after c.Token setting, as default depends on that. - if c.RenewToken == nil { - default_renew := DefaultVaultRenewToken - if c.VaultAgentTokenFile != nil { - default_renew = false - } else if StringVal(c.Token) == "" { - default_renew = false - } - c.RenewToken = boolFromEnv([]string{ - "VAULT_RENEW_TOKEN", - }, default_renew) - } - - if c.Transport == nil { - c.Transport = DefaultTransportConfig() - } - c.Transport.Finalize() - - if c.UnwrapToken == nil { - c.UnwrapToken = boolFromEnv([]string{ - "VAULT_UNWRAP_TOKEN", - }, DefaultVaultUnwrapToken) - } - - if c.Enabled == nil { - c.Enabled = Bool(StringPresent(c.Address)) - } - - if c.DefaultLeaseDuration == nil { - c.DefaultLeaseDuration = TimeDuration(DefaultVaultLeaseDuration) - } - - if c.LeaseRenewalThreshold == nil { - c.LeaseRenewalThreshold = Float64(DefaultLeaseRenewalThreshold) - } - - if c.K8SAuthRoleName == nil { - c.K8SAuthRoleName = stringFromEnv([]string{ - "VAULT_K8S_AUTH_ROLE_NAME", - }, "") - } - if c.K8SServiceAccountToken == nil { - c.K8SServiceAccountToken = stringFromEnv([]string{ - "VAULT_K8S_SERVICE_ACCOUNT_TOKEN", - }, "") - } - if c.K8SServiceAccountTokenPath == nil { - c.K8SServiceAccountTokenPath = stringFromEnv([]string{ - "VAULT_K8S_SERVICE_ACCOUNT_TOKEN_PATH", - }, DefaultK8SServiceAccountTokenPath) - } - if c.K8SServiceMountPath == nil { - c.K8SServiceMountPath = stringFromEnv([]string{ - "VAULT_K8S_SERVICE_MOUNT_PATH", - }, DefaultK8SServiceMountPath) - } -} - -// GoString defines the printable version of this struct. -func (c *VaultConfig) GoString() string { - if c == nil { - return "(*VaultConfig)(nil)" - } - - return fmt.Sprintf("&VaultConfig{"+ - "Address:%s, "+ - "Enabled:%s, "+ - "Namespace:%s,"+ - "RenewToken:%s, "+ - "Retry:%#v, "+ - "SSL:%#v, "+ - "Token:%t, "+ - "VaultAgentTokenFile:%t, "+ - "Transport:%#v, "+ - "UnwrapToken:%s, "+ - "DefaultLeaseDuration:%s, "+ - "LeaseRenewalThreshold:%s, "+ - "K8SAuthRoleName:%s, "+ - "K8SServiceAccountToken:%s, "+ - "K8SServiceAccountTokenPath:%s, "+ - "K8SServiceMountPath:%s, "+ - "}", - StringGoString(c.Address), - BoolGoString(c.Enabled), - StringGoString(c.Namespace), - BoolGoString(c.RenewToken), - c.Retry, - c.SSL, - StringPresent(c.Token), - StringPresent(c.VaultAgentTokenFile), - c.Transport, - BoolGoString(c.UnwrapToken), - TimeDurationGoString(c.DefaultLeaseDuration), - FloatGoString(c.LeaseRenewalThreshold), - StringGoString(c.K8SAuthRoleName), - StringGoString(c.K8SServiceAccountToken), - StringGoString(c.K8SServiceAccountTokenPath), - StringGoString(c.K8SServiceMountPath), - ) -} diff --git a/vendor/github.com/hashicorp/consul-template/config/wait.go b/vendor/github.com/hashicorp/consul-template/config/wait.go deleted file mode 100644 index 8e3d56c1..00000000 --- a/vendor/github.com/hashicorp/consul-template/config/wait.go +++ /dev/null @@ -1,191 +0,0 @@ -package config - -import ( - "errors" - "fmt" - "strings" - "time" -) - -var ( - // ErrWaitStringEmpty is the error returned when wait is specified as an empty - // string. - ErrWaitStringEmpty = errors.New("wait: cannot be empty") - - // ErrWaitInvalidFormat is the error returned when the wait is specified - // incorrectly. - ErrWaitInvalidFormat = errors.New("wait: invalid format") - - // ErrWaitNegative is the error returned with the wait is negative. - ErrWaitNegative = errors.New("wait: cannot be negative") - - // ErrWaitMinLTMax is the error returned with the minimum wait time is not - // less than the maximum wait time. - ErrWaitMinLTMax = errors.New("wait: min must be less than max") -) - -// WaitConfig is the Min/Max duration used by the Watcher -type WaitConfig struct { - // Enabled determines if this wait is enabled. - Enabled *bool `mapstructure:"bool"` - - // Min and Max are the minimum and maximum time, respectively, to wait for - // data changes before rendering a new template to disk. - Min *time.Duration `mapstructure:"min"` - Max *time.Duration `mapstructure:"max"` -} - -// DefaultWaitConfig is the default configuration. -func DefaultWaitConfig() *WaitConfig { - return &WaitConfig{} -} - -// Copy returns a deep copy of this configuration. -func (c *WaitConfig) Copy() *WaitConfig { - if c == nil { - return nil - } - - var o WaitConfig - o.Enabled = c.Enabled - o.Min = c.Min - o.Max = c.Max - return &o -} - -// Merge combines all values in this configuration with the values in the other -// configuration, with values in the other configuration taking precedence. -// Maps and slices are merged, most other values are overwritten. Complex -// structs define their own merge functionality. -func (c *WaitConfig) Merge(o *WaitConfig) *WaitConfig { - if c == nil { - if o == nil { - return nil - } - return o.Copy() - } - - if o == nil { - return c.Copy() - } - - r := c.Copy() - - if o.Enabled != nil { - r.Enabled = o.Enabled - } - - if o.Min != nil { - r.Min = o.Min - } - - if o.Max != nil { - r.Max = o.Max - } - - return r -} - -// Finalize ensures there no nil pointers. -func (c *WaitConfig) Finalize() { - if c.Enabled == nil { - c.Enabled = Bool(TimeDurationPresent(c.Min)) - } - - if c.Min == nil { - c.Min = TimeDuration(0 * time.Second) - } - - if c.Max == nil { - c.Max = TimeDuration(4 * *c.Min) - } -} - -// GoString defines the printable version of this struct. -func (c *WaitConfig) GoString() string { - if c == nil { - return "(*WaitConfig)(nil)" - } - - return fmt.Sprintf("&WaitConfig{"+ - "Enabled:%s, "+ - "Min:%s, "+ - "Max:%s"+ - "}", - BoolGoString(c.Enabled), - TimeDurationGoString(c.Min), - TimeDurationGoString(c.Max), - ) -} - -// ParseWaitConfig parses a string of the format `minimum(:maximum)` into a -// WaitConfig. -func ParseWaitConfig(s string) (*WaitConfig, error) { - s = strings.TrimSpace(s) - if len(s) < 1 { - return nil, ErrWaitStringEmpty - } - - parts := strings.Split(s, ":") - - var min, max time.Duration - var err error - - switch len(parts) { - case 1: - min, err = time.ParseDuration(strings.TrimSpace(parts[0])) - if err != nil { - return nil, err - } - - max = 4 * min - case 2: - min, err = time.ParseDuration(strings.TrimSpace(parts[0])) - if err != nil { - return nil, err - } - - max, err = time.ParseDuration(strings.TrimSpace(parts[1])) - if err != nil { - return nil, err - } - default: - return nil, ErrWaitInvalidFormat - } - - if min < 0 || max < 0 { - return nil, ErrWaitNegative - } - - if max < min { - return nil, ErrWaitMinLTMax - } - - var c WaitConfig - c.Min = TimeDuration(min) - c.Max = TimeDuration(max) - - return &c, nil -} - -// WaitVar implements the Flag.Value interface and allows the user to specify -// a watch interval using Go's flag parsing library. -type WaitVar WaitConfig - -// Set sets the value in the format min[:max] for a wait timer. -func (w *WaitVar) Set(value string) error { - wait, err := ParseWaitConfig(value) - if err != nil { - return err - } - - w.Min = wait.Min - w.Max = wait.Max - - return nil -} - -// String returns the string format for this wait variable -func (w *WaitVar) String() string { - return fmt.Sprintf("%s:%s", w.Min, w.Max) -} diff --git a/vendor/github.com/hashicorp/consul-template/dependency/catalog_datacenters.go b/vendor/github.com/hashicorp/consul-template/dependency/catalog_datacenters.go deleted file mode 100644 index a78d33cf..00000000 --- a/vendor/github.com/hashicorp/consul-template/dependency/catalog_datacenters.go +++ /dev/null @@ -1,112 +0,0 @@ -package dependency - -import ( - "log" - "net/url" - "sort" - "time" - - "github.com/hashicorp/consul/api" - "github.com/pkg/errors" -) - -var ( - // Ensure implements - _ Dependency = (*CatalogDatacentersQuery)(nil) - - // CatalogDatacentersQuerySleepTime is the amount of time to sleep between - // queries, since the endpoint does not support blocking queries. - CatalogDatacentersQuerySleepTime = 15 * time.Second -) - -// CatalogDatacentersQuery is the dependency to query all datacenters -type CatalogDatacentersQuery struct { - ignoreFailing bool - - stopCh chan struct{} -} - -// NewCatalogDatacentersQuery creates a new datacenter dependency. -func NewCatalogDatacentersQuery(ignoreFailing bool) (*CatalogDatacentersQuery, error) { - return &CatalogDatacentersQuery{ - ignoreFailing: ignoreFailing, - stopCh: make(chan struct{}, 1), - }, nil -} - -// Fetch queries the Consul API defined by the given client and returns a slice -// of strings representing the datacenters -func (d *CatalogDatacentersQuery) Fetch(clients *ClientSet, opts *QueryOptions) (interface{}, *ResponseMetadata, error) { - opts = opts.Merge(&QueryOptions{}) - - log.Printf("[TRACE] %s: GET %s", d, &url.URL{ - Path: "/v1/catalog/datacenters", - RawQuery: opts.String(), - }) - - // This is pretty ghetto, but the datacenters endpoint does not support - // blocking queries, so we are going to "fake it until we make it". When we - // first query, the LastIndex will be "0", meaning we should immediately - // return data, but future calls will include a LastIndex. If we have a - // LastIndex in the query metadata, sleep for 15 seconds before asking Consul - // again. - // - // This is probably okay given the frequency in which datacenters actually - // change, but is technically not edge-triggering. - if opts.WaitIndex != 0 { - log.Printf("[TRACE] %s: long polling for %s", d, CatalogDatacentersQuerySleepTime) - - select { - case <-d.stopCh: - return nil, nil, ErrStopped - case <-time.After(CatalogDatacentersQuerySleepTime): - } - } - - result, err := clients.Consul().Catalog().Datacenters() - if err != nil { - return nil, nil, errors.Wrapf(err, d.String()) - } - - // If the user opted in for skipping "down" datacenters, figure out which - // datacenters are down. - if d.ignoreFailing { - dcs := make([]string, 0, len(result)) - for _, dc := range result { - if _, _, err := clients.Consul().Catalog().Services(&api.QueryOptions{ - Datacenter: dc, - AllowStale: false, - RequireConsistent: true, - }); err == nil { - dcs = append(dcs, dc) - } - } - result = dcs - } - - log.Printf("[TRACE] %s: returned %d results", d, len(result)) - - sort.Strings(result) - - return respWithMetadata(result) -} - -// CanShare returns if this dependency is shareable. -func (d *CatalogDatacentersQuery) CanShare() bool { - return true -} - -// String returns the human-friendly version of this dependency. -func (d *CatalogDatacentersQuery) String() string { - return "catalog.datacenters" -} - -// Stop terminates this dependency's fetch. -func (d *CatalogDatacentersQuery) Stop() { - close(d.stopCh) -} - -// Type returns the type of this dependency. -func (d *CatalogDatacentersQuery) Type() Type { - return TypeConsul -} diff --git a/vendor/github.com/hashicorp/consul-template/dependency/catalog_node.go b/vendor/github.com/hashicorp/consul-template/dependency/catalog_node.go deleted file mode 100644 index 11ec3af8..00000000 --- a/vendor/github.com/hashicorp/consul-template/dependency/catalog_node.go +++ /dev/null @@ -1,181 +0,0 @@ -package dependency - -import ( - "encoding/gob" - "fmt" - "log" - "net/url" - "regexp" - "sort" - - "github.com/pkg/errors" -) - -var ( - // Ensure implements - _ Dependency = (*CatalogNodeQuery)(nil) - - // CatalogNodeQueryRe is the regular expression to use. - CatalogNodeQueryRe = regexp.MustCompile(`\A` + nodeNameRe + dcRe + `\z`) -) - -func init() { - gob.Register([]*CatalogNode{}) - gob.Register([]*CatalogNodeService{}) -} - -// CatalogNodeQuery represents a single node from the Consul catalog. -type CatalogNodeQuery struct { - stopCh chan struct{} - - dc string - name string -} - -// CatalogNode is a wrapper around the node and its services. -type CatalogNode struct { - Node *Node - Services []*CatalogNodeService -} - -// CatalogNodeService is a service on a single node. -type CatalogNodeService struct { - ID string - Service string - Tags ServiceTags - Meta map[string]string - Port int - Address string - EnableTagOverride bool -} - -// NewCatalogNodeQuery parses the given string into a dependency. If the name is -// empty then the name of the local agent is used. -func NewCatalogNodeQuery(s string) (*CatalogNodeQuery, error) { - if s != "" && !CatalogNodeQueryRe.MatchString(s) { - return nil, fmt.Errorf("catalog.node: invalid format: %q", s) - } - - m := regexpMatch(CatalogNodeQueryRe, s) - return &CatalogNodeQuery{ - dc: m["dc"], - name: m["name"], - stopCh: make(chan struct{}, 1), - }, nil -} - -// Fetch queries the Consul API defined by the given client and returns a -// of CatalogNode object. -func (d *CatalogNodeQuery) Fetch(clients *ClientSet, opts *QueryOptions) (interface{}, *ResponseMetadata, error) { - select { - case <-d.stopCh: - return nil, nil, ErrStopped - default: - } - - opts = opts.Merge(&QueryOptions{ - Datacenter: d.dc, - }) - - // Grab the name - name := d.name - - if name == "" { - log.Printf("[TRACE] %s: getting local agent name", d) - var err error - name, err = clients.Consul().Agent().NodeName() - if err != nil { - return nil, nil, errors.Wrapf(err, d.String()) - } - } - - log.Printf("[TRACE] %s: GET %s", d, &url.URL{ - Path: "/v1/catalog/node/" + name, - RawQuery: opts.String(), - }) - node, qm, err := clients.Consul().Catalog().Node(name, opts.ToConsulOpts()) - if err != nil { - return nil, nil, errors.Wrap(err, d.String()) - } - - log.Printf("[TRACE] %s: returned response", d) - - rm := &ResponseMetadata{ - LastIndex: qm.LastIndex, - LastContact: qm.LastContact, - } - - if node == nil { - log.Printf("[WARN] %s: no node exists with the name %q", d, name) - var node CatalogNode - return &node, rm, nil - } - - services := make([]*CatalogNodeService, 0, len(node.Services)) - for _, v := range node.Services { - services = append(services, &CatalogNodeService{ - ID: v.ID, - Service: v.Service, - Tags: ServiceTags(deepCopyAndSortTags(v.Tags)), - Meta: v.Meta, - Port: v.Port, - Address: v.Address, - EnableTagOverride: v.EnableTagOverride, - }) - } - sort.Stable(ByService(services)) - - detail := &CatalogNode{ - Node: &Node{ - ID: node.Node.ID, - Node: node.Node.Node, - Address: node.Node.Address, - Datacenter: node.Node.Datacenter, - TaggedAddresses: node.Node.TaggedAddresses, - Meta: node.Node.Meta, - }, - Services: services, - } - - return detail, rm, nil -} - -// CanShare returns a boolean if this dependency is shareable. -func (d *CatalogNodeQuery) CanShare() bool { - return false -} - -// String returns the human-friendly version of this dependency. -func (d *CatalogNodeQuery) String() string { - name := d.name - if d.dc != "" { - name = name + "@" + d.dc - } - - if name == "" { - return "catalog.node" - } - return fmt.Sprintf("catalog.node(%s)", name) -} - -// Stop halts the dependency's fetch function. -func (d *CatalogNodeQuery) Stop() { - close(d.stopCh) -} - -// Type returns the type of this dependency. -func (d *CatalogNodeQuery) Type() Type { - return TypeConsul -} - -// ByService is a sorter of node services by their service name and then ID. -type ByService []*CatalogNodeService - -func (s ByService) Len() int { return len(s) } -func (s ByService) Swap(i, j int) { s[i], s[j] = s[j], s[i] } -func (s ByService) Less(i, j int) bool { - if s[i].Service == s[j].Service { - return s[i].ID < s[j].ID - } - return s[i].Service < s[j].Service -} diff --git a/vendor/github.com/hashicorp/consul-template/dependency/catalog_nodes.go b/vendor/github.com/hashicorp/consul-template/dependency/catalog_nodes.go deleted file mode 100644 index 3c8314ac..00000000 --- a/vendor/github.com/hashicorp/consul-template/dependency/catalog_nodes.go +++ /dev/null @@ -1,150 +0,0 @@ -package dependency - -import ( - "encoding/gob" - "fmt" - "log" - "net/url" - "regexp" - "sort" - - "github.com/pkg/errors" -) - -var ( - // Ensure implements - _ Dependency = (*CatalogNodesQuery)(nil) - - // CatalogNodesQueryRe is the regular expression to use. - CatalogNodesQueryRe = regexp.MustCompile(`\A` + dcRe + nearRe + `\z`) -) - -func init() { - gob.Register([]*Node{}) -} - -// Node is a node entry in Consul -type Node struct { - ID string - Node string - Address string - Datacenter string - TaggedAddresses map[string]string - Meta map[string]string -} - -// CatalogNodesQuery is the representation of all registered nodes in Consul. -type CatalogNodesQuery struct { - stopCh chan struct{} - - dc string - near string -} - -// NewCatalogNodesQuery parses the given string into a dependency. If the name is -// empty then the name of the local agent is used. -func NewCatalogNodesQuery(s string) (*CatalogNodesQuery, error) { - if !CatalogNodesQueryRe.MatchString(s) { - return nil, fmt.Errorf("catalog.nodes: invalid format: %q", s) - } - - m := regexpMatch(CatalogNodesQueryRe, s) - return &CatalogNodesQuery{ - dc: m["dc"], - near: m["near"], - stopCh: make(chan struct{}, 1), - }, nil -} - -// Fetch queries the Consul API defined by the given client and returns a slice -// of Node objects -func (d *CatalogNodesQuery) Fetch(clients *ClientSet, opts *QueryOptions) (interface{}, *ResponseMetadata, error) { - select { - case <-d.stopCh: - return nil, nil, ErrStopped - default: - } - - opts = opts.Merge(&QueryOptions{ - Datacenter: d.dc, - Near: d.near, - }) - - log.Printf("[TRACE] %s: GET %s", d, &url.URL{ - Path: "/v1/catalog/nodes", - RawQuery: opts.String(), - }) - n, qm, err := clients.Consul().Catalog().Nodes(opts.ToConsulOpts()) - if err != nil { - return nil, nil, errors.Wrap(err, d.String()) - } - - log.Printf("[TRACE] %s: returned %d results", d, len(n)) - - nodes := make([]*Node, 0, len(n)) - for _, node := range n { - nodes = append(nodes, &Node{ - ID: node.ID, - Node: node.Node, - Address: node.Address, - Datacenter: node.Datacenter, - TaggedAddresses: node.TaggedAddresses, - Meta: node.Meta, - }) - } - - // Sort unless the user explicitly asked for nearness - if d.near == "" { - sort.Stable(ByNode(nodes)) - } - - rm := &ResponseMetadata{ - LastIndex: qm.LastIndex, - LastContact: qm.LastContact, - } - - return nodes, rm, nil -} - -// CanShare returns a boolean if this dependency is shareable. -func (d *CatalogNodesQuery) CanShare() bool { - return true -} - -// String returns the human-friendly version of this dependency. -func (d *CatalogNodesQuery) String() string { - name := "" - if d.dc != "" { - name = name + "@" + d.dc - } - if d.near != "" { - name = name + "~" + d.near - } - - if name == "" { - return "catalog.nodes" - } - return fmt.Sprintf("catalog.nodes(%s)", name) -} - -// Stop halts the dependency's fetch function. -func (d *CatalogNodesQuery) Stop() { - close(d.stopCh) -} - -// Type returns the type of this dependency. -func (d *CatalogNodesQuery) Type() Type { - return TypeConsul -} - -// ByNode is a sortable list of nodes by name and then IP address. -type ByNode []*Node - -func (s ByNode) Len() int { return len(s) } -func (s ByNode) Swap(i, j int) { s[i], s[j] = s[j], s[i] } -func (s ByNode) Less(i, j int) bool { - if s[i].Node == s[j].Node { - return s[i].Address < s[j].Address - } - return s[i].Node < s[j].Node -} diff --git a/vendor/github.com/hashicorp/consul-template/dependency/catalog_service.go b/vendor/github.com/hashicorp/consul-template/dependency/catalog_service.go deleted file mode 100644 index 8b94a599..00000000 --- a/vendor/github.com/hashicorp/consul-template/dependency/catalog_service.go +++ /dev/null @@ -1,154 +0,0 @@ -package dependency - -import ( - "encoding/gob" - "fmt" - "log" - "net/url" - "regexp" - - "github.com/pkg/errors" -) - -var ( - // Ensure implements - _ Dependency = (*CatalogServiceQuery)(nil) - - // CatalogServiceQueryRe is the regular expression to use. - CatalogServiceQueryRe = regexp.MustCompile(`\A` + tagRe + serviceNameRe + dcRe + nearRe + `\z`) -) - -func init() { - gob.Register([]*CatalogSnippet{}) -} - -// CatalogService is a catalog entry in Consul. -type CatalogService struct { - ID string - Node string - Address string - Datacenter string - TaggedAddresses map[string]string - NodeMeta map[string]string - ServiceID string - ServiceName string - ServiceAddress string - ServiceTags ServiceTags - ServiceMeta map[string]string - ServicePort int -} - -// CatalogServiceQuery is the representation of a requested catalog services -// dependency from inside a template. -type CatalogServiceQuery struct { - stopCh chan struct{} - - dc string - name string - near string - tag string -} - -// NewCatalogServiceQuery parses a string into a CatalogServiceQuery. -func NewCatalogServiceQuery(s string) (*CatalogServiceQuery, error) { - if !CatalogServiceQueryRe.MatchString(s) { - return nil, fmt.Errorf("catalog.service: invalid format: %q", s) - } - - m := regexpMatch(CatalogServiceQueryRe, s) - return &CatalogServiceQuery{ - stopCh: make(chan struct{}, 1), - dc: m["dc"], - name: m["name"], - near: m["near"], - tag: m["tag"], - }, nil -} - -// Fetch queries the Consul API defined by the given client and returns a slice -// of CatalogService objects. -func (d *CatalogServiceQuery) Fetch(clients *ClientSet, opts *QueryOptions) (interface{}, *ResponseMetadata, error) { - select { - case <-d.stopCh: - return nil, nil, ErrStopped - default: - } - - opts = opts.Merge(&QueryOptions{ - Datacenter: d.dc, - Near: d.near, - }) - - u := &url.URL{ - Path: "/v1/catalog/service/" + d.name, - RawQuery: opts.String(), - } - if d.tag != "" { - q := u.Query() - q.Set("tag", d.tag) - u.RawQuery = q.Encode() - } - log.Printf("[TRACE] %s: GET %s", d, u) - - entries, qm, err := clients.Consul().Catalog().Service(d.name, d.tag, opts.ToConsulOpts()) - if err != nil { - return nil, nil, errors.Wrap(err, d.String()) - } - - log.Printf("[TRACE] %s: returned %d results", d, len(entries)) - - var list []*CatalogService - for _, s := range entries { - list = append(list, &CatalogService{ - ID: s.ID, - Node: s.Node, - Address: s.Address, - Datacenter: s.Datacenter, - TaggedAddresses: s.TaggedAddresses, - NodeMeta: s.NodeMeta, - ServiceID: s.ServiceID, - ServiceName: s.ServiceName, - ServiceAddress: s.ServiceAddress, - ServiceTags: ServiceTags(deepCopyAndSortTags(s.ServiceTags)), - ServiceMeta: s.ServiceMeta, - ServicePort: s.ServicePort, - }) - } - - rm := &ResponseMetadata{ - LastIndex: qm.LastIndex, - LastContact: qm.LastContact, - } - - return list, rm, nil -} - -// CanShare returns a boolean if this dependency is shareable. -func (d *CatalogServiceQuery) CanShare() bool { - return true -} - -// String returns the human-friendly version of this dependency. -func (d *CatalogServiceQuery) String() string { - name := d.name - if d.tag != "" { - name = d.tag + "." + name - } - if d.dc != "" { - name = name + "@" + d.dc - } - if d.near != "" { - name = name + "~" + d.near - } - return fmt.Sprintf("catalog.service(%s)", name) -} - -// Stop halts the dependency's fetch function. -func (d *CatalogServiceQuery) Stop() { - close(d.stopCh) -} - -// Type returns the type of this dependency. -func (d *CatalogServiceQuery) Type() Type { - return TypeConsul -} diff --git a/vendor/github.com/hashicorp/consul-template/dependency/catalog_services.go b/vendor/github.com/hashicorp/consul-template/dependency/catalog_services.go deleted file mode 100644 index c967b428..00000000 --- a/vendor/github.com/hashicorp/consul-template/dependency/catalog_services.go +++ /dev/null @@ -1,124 +0,0 @@ -package dependency - -import ( - "encoding/gob" - "fmt" - "log" - "net/url" - "regexp" - "sort" - - "github.com/pkg/errors" -) - -var ( - // Ensure implements - _ Dependency = (*CatalogServicesQuery)(nil) - - // CatalogServicesQueryRe is the regular expression to use for CatalogNodesQuery. - CatalogServicesQueryRe = regexp.MustCompile(`\A` + dcRe + `\z`) -) - -func init() { - gob.Register([]*CatalogSnippet{}) -} - -// CatalogSnippet is a catalog entry in Consul. -type CatalogSnippet struct { - Name string - Tags ServiceTags -} - -// CatalogServicesQuery is the representation of a requested catalog service -// dependency from inside a template. -type CatalogServicesQuery struct { - stopCh chan struct{} - - dc string -} - -// NewCatalogServicesQuery parses a string of the format @dc. -func NewCatalogServicesQuery(s string) (*CatalogServicesQuery, error) { - if !CatalogServicesQueryRe.MatchString(s) { - return nil, fmt.Errorf("catalog.services: invalid format: %q", s) - } - - m := regexpMatch(CatalogServicesQueryRe, s) - return &CatalogServicesQuery{ - stopCh: make(chan struct{}, 1), - dc: m["dc"], - }, nil -} - -// Fetch queries the Consul API defined by the given client and returns a slice -// of CatalogService objects. -func (d *CatalogServicesQuery) Fetch(clients *ClientSet, opts *QueryOptions) (interface{}, *ResponseMetadata, error) { - select { - case <-d.stopCh: - return nil, nil, ErrStopped - default: - } - - opts = opts.Merge(&QueryOptions{ - Datacenter: d.dc, - }) - - log.Printf("[TRACE] %s: GET %s", d, &url.URL{ - Path: "/v1/catalog/services", - RawQuery: opts.String(), - }) - - entries, qm, err := clients.Consul().Catalog().Services(opts.ToConsulOpts()) - if err != nil { - return nil, nil, errors.Wrap(err, d.String()) - } - - log.Printf("[TRACE] %s: returned %d results", d, len(entries)) - - var catalogServices []*CatalogSnippet - for name, tags := range entries { - catalogServices = append(catalogServices, &CatalogSnippet{ - Name: name, - Tags: ServiceTags(deepCopyAndSortTags(tags)), - }) - } - - sort.Stable(ByName(catalogServices)) - - rm := &ResponseMetadata{ - LastIndex: qm.LastIndex, - LastContact: qm.LastContact, - } - - return catalogServices, rm, nil -} - -// CanShare returns a boolean if this dependency is shareable. -func (d *CatalogServicesQuery) CanShare() bool { - return true -} - -// String returns the human-friendly version of this dependency. -func (d *CatalogServicesQuery) String() string { - if d.dc != "" { - return fmt.Sprintf("catalog.services(@%s)", d.dc) - } - return "catalog.services" -} - -// Stop halts the dependency's fetch function. -func (d *CatalogServicesQuery) Stop() { - close(d.stopCh) -} - -// Type returns the type of this dependency. -func (d *CatalogServicesQuery) Type() Type { - return TypeConsul -} - -// ByName is a sortable slice of CatalogService structs. -type ByName []*CatalogSnippet - -func (s ByName) Len() int { return len(s) } -func (s ByName) Swap(i, j int) { s[i], s[j] = s[j], s[i] } -func (s ByName) Less(i, j int) bool { return s[i].Name < s[j].Name } diff --git a/vendor/github.com/hashicorp/consul-template/dependency/client_set.go b/vendor/github.com/hashicorp/consul-template/dependency/client_set.go deleted file mode 100644 index fa5a1691..00000000 --- a/vendor/github.com/hashicorp/consul-template/dependency/client_set.go +++ /dev/null @@ -1,556 +0,0 @@ -package dependency - -import ( - "context" - "crypto/tls" - "fmt" - "log" - "net" - "net/http" - "sync" - "time" - - consulapi "github.com/hashicorp/consul/api" - rootcerts "github.com/hashicorp/go-rootcerts" - nomadapi "github.com/hashicorp/nomad/api" - vaultapi "github.com/hashicorp/vault/api" - vaultkubernetesauth "github.com/hashicorp/vault/api/auth/kubernetes" -) - -// ClientSet is a collection of clients that dependencies use to communicate -// with remote services like Consul or Vault. -type ClientSet struct { - sync.RWMutex - - vault *vaultClient - consul *consulClient - nomad *nomadClient -} - -// consulClient is a wrapper around a real Consul API client. -type consulClient struct { - client *consulapi.Client - transport *http.Transport -} - -// vaultClient is a wrapper around a real Vault API client. -type vaultClient struct { - client *vaultapi.Client - httpClient *http.Client -} - -// nomadClient is a wrapper around a real Nomad API client. -type nomadClient struct { - client *nomadapi.Client - httpClient *http.Client -} - -// TransportDialer is an interface that allows passing a custom dialer function -// to an HTTP client's transport config -type TransportDialer interface { - // Dial is intended to match https://pkg.go.dev/net#Dialer.Dial - Dial(network, address string) (net.Conn, error) - - // DialContext is intended to match https://pkg.go.dev/net#Dialer.DialContext - DialContext(ctx context.Context, network, address string) (net.Conn, error) -} - -// CreateConsulClientInput is used as input to the CreateConsulClient function. -type CreateConsulClientInput struct { - Address string - Namespace string - Token string - TokenFile string - AuthEnabled bool - AuthUsername string - AuthPassword string - SSLEnabled bool - SSLVerify bool - SSLCert string - SSLKey string - SSLCACert string - SSLCAPath string - ServerName string - - TransportDialKeepAlive time.Duration - TransportDialTimeout time.Duration - TransportDisableKeepAlives bool - TransportIdleConnTimeout time.Duration - TransportMaxIdleConns int - TransportMaxIdleConnsPerHost int - TransportTLSHandshakeTimeout time.Duration -} - -// CreateVaultClientInput is used as input to the CreateVaultClient function. -type CreateVaultClientInput struct { - Address string - Namespace string - Token string - UnwrapToken bool - SSLEnabled bool - SSLVerify bool - SSLCert string - SSLKey string - SSLCACert string - SSLCAPath string - ServerName string - - K8SAuthRoleName string - K8SServiceAccountTokenPath string - K8SServiceAccountToken string - K8SServiceMountPath string - - TransportCustomDialer TransportDialer - TransportDialKeepAlive time.Duration - TransportDialTimeout time.Duration - TransportDisableKeepAlives bool - TransportIdleConnTimeout time.Duration - TransportMaxIdleConns int - TransportMaxIdleConnsPerHost int - TransportTLSHandshakeTimeout time.Duration -} - -// CreateNomadClientInput is used as input to the CreateNomadClient function. -type CreateNomadClientInput struct { - Address string - Namespace string - Token string - AuthUsername string - AuthPassword string - SSLEnabled bool - SSLVerify bool - SSLCert string - SSLKey string - SSLCACert string - SSLCAPath string - ServerName string - - TransportCustomDialer TransportDialer - TransportDialKeepAlive time.Duration - TransportDialTimeout time.Duration - TransportDisableKeepAlives bool - TransportIdleConnTimeout time.Duration - TransportMaxIdleConns int - TransportMaxIdleConnsPerHost int - TransportTLSHandshakeTimeout time.Duration -} - -// NewClientSet creates a new client set that is ready to accept clients. -func NewClientSet() *ClientSet { - return &ClientSet{} -} - -// CreateConsulClient creates a new Consul API client from the given input. -func (c *ClientSet) CreateConsulClient(i *CreateConsulClientInput) error { - consulConfig := consulapi.DefaultConfig() - - if i.Address != "" { - consulConfig.Address = i.Address - } - - if i.Namespace != "" { - consulConfig.Namespace = i.Namespace - } - - if i.Token != "" { - consulConfig.Token = i.Token - } - - if i.TokenFile != "" { - consulConfig.TokenFile = i.TokenFile - } - - if i.AuthEnabled { - consulConfig.HttpAuth = &consulapi.HttpBasicAuth{ - Username: i.AuthUsername, - Password: i.AuthPassword, - } - } - - // This transport will attempt to keep connections open to the Consul server. - transport := &http.Transport{ - Proxy: http.ProxyFromEnvironment, - Dial: (&net.Dialer{ - Timeout: i.TransportDialTimeout, - KeepAlive: i.TransportDialKeepAlive, - }).Dial, - DisableKeepAlives: i.TransportDisableKeepAlives, - MaxIdleConns: i.TransportMaxIdleConns, - IdleConnTimeout: i.TransportIdleConnTimeout, - MaxIdleConnsPerHost: i.TransportMaxIdleConnsPerHost, - TLSHandshakeTimeout: i.TransportTLSHandshakeTimeout, - } - - // Configure SSL - if i.SSLEnabled { - consulConfig.Scheme = "https" - - var tlsConfig tls.Config - - // Custom certificate or certificate and key - if i.SSLCert != "" && i.SSLKey != "" { - cert, err := tls.LoadX509KeyPair(i.SSLCert, i.SSLKey) - if err != nil { - return fmt.Errorf("client set: consul: %s", err) - } - tlsConfig.Certificates = []tls.Certificate{cert} - } else if i.SSLCert != "" { - cert, err := tls.LoadX509KeyPair(i.SSLCert, i.SSLCert) - if err != nil { - return fmt.Errorf("client set: consul: %s", err) - } - tlsConfig.Certificates = []tls.Certificate{cert} - } - - // Custom CA certificate - if i.SSLCACert != "" || i.SSLCAPath != "" { - rootConfig := &rootcerts.Config{ - CAFile: i.SSLCACert, - CAPath: i.SSLCAPath, - } - if err := rootcerts.ConfigureTLS(&tlsConfig, rootConfig); err != nil { - return fmt.Errorf("client set: consul configuring TLS failed: %s", err) - } - } - - // Construct all the certificates now - tlsConfig.BuildNameToCertificate() - - // SSL verification - if i.ServerName != "" { - tlsConfig.ServerName = i.ServerName - tlsConfig.InsecureSkipVerify = false - } - if !i.SSLVerify { - log.Printf("[WARN] (clients) disabling consul SSL verification") - tlsConfig.InsecureSkipVerify = true - } - - // Save the TLS config on our transport - transport.TLSClientConfig = &tlsConfig - } - - // Setup the new transport - consulConfig.Transport = transport - - // Create the API client - client, err := consulapi.NewClient(consulConfig) - if err != nil { - return fmt.Errorf("client set: consul: %s", err) - } - - // Save the data on ourselves - c.Lock() - c.consul = &consulClient{ - client: client, - transport: transport, - } - c.Unlock() - - return nil -} - -func (c *ClientSet) CreateVaultClient(i *CreateVaultClientInput) error { - vaultConfig := vaultapi.DefaultConfig() - - if i.Address != "" { - vaultConfig.Address = i.Address - } - - // This transport will attempt to keep connections open to the Vault server. - var dialer TransportDialer - dialer = &net.Dialer{ - Timeout: i.TransportDialTimeout, - KeepAlive: i.TransportDialKeepAlive, - } - - if i.TransportCustomDialer != nil { - dialer = i.TransportCustomDialer - } - - transport := &http.Transport{ - Proxy: http.ProxyFromEnvironment, - Dial: dialer.Dial, - DisableKeepAlives: i.TransportDisableKeepAlives, - MaxIdleConns: i.TransportMaxIdleConns, - IdleConnTimeout: i.TransportIdleConnTimeout, - MaxIdleConnsPerHost: i.TransportMaxIdleConnsPerHost, - TLSHandshakeTimeout: i.TransportTLSHandshakeTimeout, - } - - // Configure SSL - if i.SSLEnabled { - var tlsConfig tls.Config - - // Custom certificate or certificate and key - if i.SSLCert != "" && i.SSLKey != "" { - cert, err := tls.LoadX509KeyPair(i.SSLCert, i.SSLKey) - if err != nil { - return fmt.Errorf("client set: vault: %s", err) - } - tlsConfig.Certificates = []tls.Certificate{cert} - } else if i.SSLCert != "" { - cert, err := tls.LoadX509KeyPair(i.SSLCert, i.SSLCert) - if err != nil { - return fmt.Errorf("client set: vault: %s", err) - } - tlsConfig.Certificates = []tls.Certificate{cert} - } - - // Custom CA certificate - if i.SSLCACert != "" || i.SSLCAPath != "" { - rootConfig := &rootcerts.Config{ - CAFile: i.SSLCACert, - CAPath: i.SSLCAPath, - } - if err := rootcerts.ConfigureTLS(&tlsConfig, rootConfig); err != nil { - return fmt.Errorf("client set: vault configuring TLS failed: %s", err) - } - } - - // Construct all the certificates now - tlsConfig.BuildNameToCertificate() - - // SSL verification - if i.ServerName != "" { - tlsConfig.ServerName = i.ServerName - tlsConfig.InsecureSkipVerify = false - } - if !i.SSLVerify { - log.Printf("[WARN] (clients) disabling vault SSL verification") - tlsConfig.InsecureSkipVerify = true - } - - // Save the TLS config on our transport - transport.TLSClientConfig = &tlsConfig - } - - // Setup the new transport - vaultConfig.HttpClient.Transport = transport - - // Create the client - client, err := vaultapi.NewClient(vaultConfig) - if err != nil { - return fmt.Errorf("client set: vault: %s", err) - } - - // Set the namespace if given. - if i.Namespace != "" { - client.SetNamespace(i.Namespace) - } - - // Set token using k8s auth method. - if i.K8SAuthRoleName != "" && i.Token == "" { - err = prepareK8SServiceTokenAuth(i, client) - if err != nil { - return fmt.Errorf("client set: vault: %w", err) - } - } - - if i.Token != "" { - client.SetToken(i.Token) - } - - // Save the data on ourselves - c.Lock() - c.vault = &vaultClient{ - client: client, - httpClient: vaultConfig.HttpClient, - } - c.Unlock() - - return nil -} - -// CreateNomadClient creates a new Nomad API client from the given input. -func (c *ClientSet) CreateNomadClient(i *CreateNomadClientInput) error { - conf := nomadapi.DefaultConfig() - - if i.Address != "" { - conf.Address = i.Address - } - - if i.Namespace != "" { - conf.Namespace = i.Namespace - } - - if i.Token != "" { - conf.SecretID = i.Token - } - - if i.AuthUsername != "" || i.AuthPassword != "" { - conf.HttpAuth = &nomadapi.HttpBasicAuth{ - Username: i.AuthUsername, - Password: i.AuthPassword, - } - } - - // This transport will attempt to keep connections open to the Nomad server. - var dialer TransportDialer - dialer = &net.Dialer{ - Timeout: i.TransportDialTimeout, - KeepAlive: i.TransportDialKeepAlive, - } - - if i.TransportCustomDialer != nil { - dialer = i.TransportCustomDialer - } - - transport := &http.Transport{ - Proxy: http.ProxyFromEnvironment, - Dial: dialer.Dial, - DisableKeepAlives: i.TransportDisableKeepAlives, - MaxIdleConns: i.TransportMaxIdleConns, - IdleConnTimeout: i.TransportIdleConnTimeout, - MaxIdleConnsPerHost: i.TransportMaxIdleConnsPerHost, - TLSHandshakeTimeout: i.TransportTLSHandshakeTimeout, - } - - // Configure SSL - if i.SSLEnabled { - var tlsConfig tls.Config - - // Custom certificate or certificate and key - if i.SSLCert != "" && i.SSLKey != "" { - cert, err := tls.LoadX509KeyPair(i.SSLCert, i.SSLKey) - if err != nil { - return fmt.Errorf("client set: nomad: %s", err) - } - tlsConfig.Certificates = []tls.Certificate{cert} - } else if i.SSLCert != "" { - cert, err := tls.LoadX509KeyPair(i.SSLCert, i.SSLCert) - if err != nil { - return fmt.Errorf("client set: nomad: %s", err) - } - tlsConfig.Certificates = []tls.Certificate{cert} - } - - // Custom CA certificate - if i.SSLCACert != "" || i.SSLCAPath != "" { - rootConfig := &rootcerts.Config{ - CAFile: i.SSLCACert, - CAPath: i.SSLCAPath, - } - if err := rootcerts.ConfigureTLS(&tlsConfig, rootConfig); err != nil { - return fmt.Errorf("client set: nomad configuring TLS failed: %s", err) - } - } - - // Construct all the certificates now - tlsConfig.BuildNameToCertificate() - - // SSL verification - if i.ServerName != "" { - tlsConfig.ServerName = i.ServerName - tlsConfig.InsecureSkipVerify = false - } - if !i.SSLVerify { - log.Printf("[WARN] (clients) disabling nomad SSL verification") - tlsConfig.InsecureSkipVerify = true - } - - // Save the TLS config on our transport - transport.TLSClientConfig = &tlsConfig - } - - conf.HttpClient = &http.Client{ - Transport: transport, - } - - // Create the API client - client, err := nomadapi.NewClient(conf) - if err != nil { - return fmt.Errorf("client set: nomad: %w", err) - } - - // Save the data on ourselves - c.Lock() - c.nomad = &nomadClient{ - client: client, - httpClient: conf.HttpClient, - } - c.Unlock() - - return nil -} - -// Consul returns the Consul client for this set. -func (c *ClientSet) Consul() *consulapi.Client { - c.RLock() - defer c.RUnlock() - return c.consul.client -} - -// Vault returns the Vault client for this set. -func (c *ClientSet) Vault() *vaultapi.Client { - c.RLock() - defer c.RUnlock() - return c.vault.client -} - -// Nomad returns the Nomad client for this set. -func (c *ClientSet) Nomad() *nomadapi.Client { - c.RLock() - defer c.RUnlock() - return c.nomad.client -} - -// Stop closes all idle connections for any attached clients. -func (c *ClientSet) Stop() { - c.Lock() - defer c.Unlock() - - if c.consul != nil { - c.consul.transport.CloseIdleConnections() - } - - if c.vault != nil { - c.vault.httpClient.Transport.(*http.Transport).CloseIdleConnections() - } - - if c.nomad != nil { - c.nomad.httpClient.Transport.(*http.Transport).CloseIdleConnections() - } -} - -func prepareK8SServiceTokenAuth( - i *CreateVaultClientInput, - client *vaultapi.Client, -) (err error) { - opts := make([]vaultkubernetesauth.LoginOption, 0, 2) - - switch { - case i.K8SServiceAccountToken != "": - opts = append(opts, vaultkubernetesauth.WithServiceAccountToken( - i.K8SServiceAccountToken, - )) - case i.K8SServiceAccountTokenPath != "": - opts = append(opts, vaultkubernetesauth.WithServiceAccountTokenPath( - i.K8SServiceAccountTokenPath, - )) - default: - // The Kubernetes service account token JWT will be retrieved - // from /run/secrets/kubernetes.io/serviceaccount/token. - } - - if i.K8SServiceMountPath != "" { - opts = append(opts, vaultkubernetesauth.WithMountPath( - i.K8SServiceMountPath, - )) - } - - k8sAuth, err := vaultkubernetesauth.NewKubernetesAuth(i.K8SAuthRoleName, opts...) - if err != nil { - return fmt.Errorf("k8s auth: new kubernetes auth: %w", err) - } - - ctx := context.TODO() - sec, err := client.Auth().Login(ctx, k8sAuth) - if err != nil { - return fmt.Errorf("k8s auth: login: %w", err) - } - - i.Token = sec.Auth.ClientToken - - return nil -} diff --git a/vendor/github.com/hashicorp/consul-template/dependency/connect_ca.go b/vendor/github.com/hashicorp/consul-template/dependency/connect_ca.go deleted file mode 100644 index 94183674..00000000 --- a/vendor/github.com/hashicorp/consul-template/dependency/connect_ca.go +++ /dev/null @@ -1,69 +0,0 @@ -package dependency - -import ( - "log" - "net/url" - - "github.com/pkg/errors" -) - -// Ensure implements -var _ Dependency = (*ConnectCAQuery)(nil) - -type ConnectCAQuery struct { - stopCh chan struct{} -} - -func NewConnectCAQuery() *ConnectCAQuery { - return &ConnectCAQuery{ - stopCh: make(chan struct{}, 1), - } -} - -func (d *ConnectCAQuery) Fetch(clients *ClientSet, opts *QueryOptions) ( - interface{}, *ResponseMetadata, error, -) { - select { - case <-d.stopCh: - return nil, nil, ErrStopped - default: - } - - opts = opts.Merge(nil) - log.Printf("[TRACE] %s: GET %s", d, &url.URL{ - Path: "/v1/agent/connect/ca/roots", - RawQuery: opts.String(), - }) - - certs, md, err := clients.Consul().Agent().ConnectCARoots( - opts.ToConsulOpts()) - if err != nil { - return nil, nil, errors.Wrap(err, d.String()) - } - - log.Printf("[TRACE] %s: returned %d results", d, len(certs.Roots)) - log.Printf("[TRACE] %s: %#v ", d, md) - - rm := &ResponseMetadata{ - LastIndex: md.LastIndex, - LastContact: md.LastContact, - } - - return certs.Roots, rm, nil -} - -func (d *ConnectCAQuery) Stop() { - close(d.stopCh) -} - -func (d *ConnectCAQuery) CanShare() bool { - return false -} - -func (d *ConnectCAQuery) Type() Type { - return TypeConsul -} - -func (d *ConnectCAQuery) String() string { - return "connect.caroots" -} diff --git a/vendor/github.com/hashicorp/consul-template/dependency/connect_leaf.go b/vendor/github.com/hashicorp/consul-template/dependency/connect_leaf.go deleted file mode 100644 index 4433ed64..00000000 --- a/vendor/github.com/hashicorp/consul-template/dependency/connect_leaf.go +++ /dev/null @@ -1,74 +0,0 @@ -package dependency - -import ( - "fmt" - "log" - "net/url" - - "github.com/pkg/errors" -) - -// Ensure implements -var _ Dependency = (*ConnectLeafQuery)(nil) - -type ConnectLeafQuery struct { - stopCh chan struct{} - - service string -} - -func NewConnectLeafQuery(service string) *ConnectLeafQuery { - return &ConnectLeafQuery{ - stopCh: make(chan struct{}, 1), - service: service, - } -} - -func (d *ConnectLeafQuery) Fetch(clients *ClientSet, opts *QueryOptions) ( - interface{}, *ResponseMetadata, error, -) { - select { - case <-d.stopCh: - return nil, nil, ErrStopped - default: - } - opts = opts.Merge(nil) - log.Printf("[TRACE] %s: GET %s", d, &url.URL{ - Path: "/v1/agent/connect/ca/leaf/" + d.service, - RawQuery: opts.String(), - }) - - cert, md, err := clients.Consul().Agent().ConnectCALeaf(d.service, - opts.ToConsulOpts()) - if err != nil { - return nil, nil, errors.Wrap(err, d.String()) - } - - log.Printf("[TRACE] %s: returned response", d) - - rm := &ResponseMetadata{ - LastIndex: md.LastIndex, - LastContact: md.LastContact, - } - - return cert, rm, nil -} - -func (d *ConnectLeafQuery) Stop() { - close(d.stopCh) -} - -func (d *ConnectLeafQuery) CanShare() bool { - return false -} - -func (d *ConnectLeafQuery) Type() Type { - return TypeConsul -} - -func (d *ConnectLeafQuery) String() string { - if d.service != "" { - return fmt.Sprintf("connect.caleaf(%s)", d.service) - } - return "connect.caleaf" -} diff --git a/vendor/github.com/hashicorp/consul-template/dependency/dependency.go b/vendor/github.com/hashicorp/consul-template/dependency/dependency.go deleted file mode 100644 index c1928554..00000000 --- a/vendor/github.com/hashicorp/consul-template/dependency/dependency.go +++ /dev/null @@ -1,231 +0,0 @@ -package dependency - -import ( - "net/url" - "regexp" - "sort" - "strconv" - "time" - - consulapi "github.com/hashicorp/consul/api" - nomadapi "github.com/hashicorp/nomad/api" -) - -const ( - dcRe = `(@(?P[[:word:]\.\-\_]+))?` - keyRe = `/?(?P[^@]+)` - filterRe = `(\|(?P[[:word:]\,]+))?` - serviceNameRe = `(?P[[:word:]\-\_]+)` - nodeNameRe = `(?P[[:word:]\.\-\_]+)` - nearRe = `(~(?P[[:word:]\.\-\_]+))?` - prefixRe = `/?(?P[^@]+)` - tagRe = `((?P[[:word:]=:\.\-\_]+)\.)?` - regionRe = `(@(?P[[:word:]\.\-\_]+))?` - nvPathRe = `/?(?P[^@]+)` - nvNamespaceRe = `(@(?P[[:word:]\-\_]+))?` - nvListPrefixRe = `/?(?P[^@]*)` - nvListNSRe = `(@(?P([[:word:]\-\_]+|\*)))?` - nvRegionRe = `(\.(?P[[:word:]\-\_]+))?` -) - -type Type int - -const ( - TypeConsul Type = iota - TypeVault - TypeLocal - TypeNomad -) - -// Dependency is an interface for a dependency that Consul Template is capable -// of watching. -type Dependency interface { - Fetch(*ClientSet, *QueryOptions) (interface{}, *ResponseMetadata, error) - CanShare() bool - String() string - Stop() - Type() Type -} - -// ServiceTags is a slice of tags assigned to a Service -type ServiceTags []string - -// QueryOptions is a list of options to send with the query. These options are -// client-agnostic, and the dependency determines which, if any, of the options -// to use. -type QueryOptions struct { - AllowStale bool - Datacenter string - Region string - Near string - Choose string - RequireConsistent bool - VaultGrace time.Duration - WaitIndex uint64 - WaitTime time.Duration -} - -func (q *QueryOptions) Merge(o *QueryOptions) *QueryOptions { - var r QueryOptions - - if q == nil { - if o == nil { - return &QueryOptions{} - } - r = *o - return &r - } - - r = *q - - if o == nil { - return &r - } - - if o.AllowStale != false { - r.AllowStale = o.AllowStale - } - - if o.Datacenter != "" { - r.Datacenter = o.Datacenter - } - - if o.Region != "" { - r.Region = o.Region - } - - if o.Near != "" { - r.Near = o.Near - } - - if o.Choose != "" { - r.Choose = o.Choose - } - - if o.RequireConsistent != false { - r.RequireConsistent = o.RequireConsistent - } - - if o.WaitIndex != 0 { - r.WaitIndex = o.WaitIndex - } - - if o.WaitTime != 0 { - r.WaitTime = o.WaitTime - } - - return &r -} - -func (q *QueryOptions) ToConsulOpts() *consulapi.QueryOptions { - return &consulapi.QueryOptions{ - AllowStale: q.AllowStale, - Datacenter: q.Datacenter, - Near: q.Near, - RequireConsistent: q.RequireConsistent, - WaitIndex: q.WaitIndex, - WaitTime: q.WaitTime, - } -} - -func (q *QueryOptions) ToNomadOpts() *nomadapi.QueryOptions { - var params map[string]string - if q.Choose != "" { - params = map[string]string{ - "choose": q.Choose, - } - } - return &nomadapi.QueryOptions{ - AllowStale: q.AllowStale, - Region: q.Region, - Params: params, - WaitIndex: q.WaitIndex, - WaitTime: q.WaitTime, - } -} - -func (q *QueryOptions) String() string { - u := &url.Values{} - - if q.AllowStale { - u.Add("stale", strconv.FormatBool(q.AllowStale)) - } - - if q.Datacenter != "" { - u.Add("dc", q.Datacenter) - } - - if q.Region != "" { - u.Add("region", q.Region) - } - - if q.Near != "" { - u.Add("near", q.Near) - } - - if q.Choose != "" { - u.Add("choose", q.Choose) - } - - if q.RequireConsistent { - u.Add("consistent", strconv.FormatBool(q.RequireConsistent)) - } - - if q.WaitIndex != 0 { - u.Add("index", strconv.FormatUint(q.WaitIndex, 10)) - } - - if q.WaitTime != 0 { - u.Add("wait", q.WaitTime.String()) - } - - return u.Encode() -} - -// ResponseMetadata is a struct that contains metadata about the response. This -// is returned from a Fetch function call. -type ResponseMetadata struct { - LastIndex uint64 - LastContact time.Duration - BlockOnNil bool // keep blocking on `nil` data returns -} - -// deepCopyAndSortTags deep copies the tags in the given string slice and then -// sorts and returns the copied result. -func deepCopyAndSortTags(tags []string) []string { - newTags := make([]string, 0, len(tags)) - for _, tag := range tags { - newTags = append(newTags, tag) - } - sort.Strings(newTags) - return newTags -} - -// respWithMetadata is a short wrapper to return the given interface with fake -// response metadata for non-Consul dependencies. -func respWithMetadata(i interface{}) (interface{}, *ResponseMetadata, error) { - return i, &ResponseMetadata{ - LastContact: 0, - LastIndex: uint64(time.Now().Unix()), - }, nil -} - -// regexpMatch matches the given regexp and extracts the match groups into a -// named map. -func regexpMatch(re *regexp.Regexp, q string) map[string]string { - names := re.SubexpNames() - match := re.FindAllStringSubmatch(q, -1) - - if len(match) == 0 { - return map[string]string{} - } - - m := map[string]string{} - for i, n := range match[0] { - if names[i] != "" { - m[names[i]] = n - } - } - - return m -} diff --git a/vendor/github.com/hashicorp/consul-template/dependency/errors.go b/vendor/github.com/hashicorp/consul-template/dependency/errors.go deleted file mode 100644 index dd03ac87..00000000 --- a/vendor/github.com/hashicorp/consul-template/dependency/errors.go +++ /dev/null @@ -1,13 +0,0 @@ -package dependency - -import "errors" - -// ErrStopped is a special error that is returned when a dependency is -// prematurely stopped, usually due to a configuration reload or a process -// interrupt. -var ErrStopped = errors.New("dependency stopped") - -// ErrContinue is a special error which says to continue (retry) on error. -var ErrContinue = errors.New("dependency continue") - -var ErrLeaseExpired = errors.New("lease expired or is not renewable") diff --git a/vendor/github.com/hashicorp/consul-template/dependency/file.go b/vendor/github.com/hashicorp/consul-template/dependency/file.go deleted file mode 100644 index 3f9fb52e..00000000 --- a/vendor/github.com/hashicorp/consul-template/dependency/file.go +++ /dev/null @@ -1,129 +0,0 @@ -package dependency - -import ( - "fmt" - "io/ioutil" - "log" - "os" - "strings" - "time" - - "github.com/pkg/errors" -) - -var ( - // Ensure implements - _ Dependency = (*FileQuery)(nil) - - // FileQuerySleepTime is the amount of time to sleep between queries, since - // the fsnotify library is not compatible with solaris and other OSes yet. - FileQuerySleepTime = 2 * time.Second -) - -// FileQuery represents a local file dependency. -type FileQuery struct { - stopCh chan struct{} - - path string - stat os.FileInfo -} - -// NewFileQuery creates a file dependency from the given path. -func NewFileQuery(s string) (*FileQuery, error) { - s = strings.TrimSpace(s) - if s == "" { - return nil, fmt.Errorf("file: invalid format: %q", s) - } - - return &FileQuery{ - stopCh: make(chan struct{}, 1), - path: s, - }, nil -} - -// Fetch retrieves this dependency and returns the result or any errors that -// occur in the process. -func (d *FileQuery) Fetch(clients *ClientSet, opts *QueryOptions) (interface{}, *ResponseMetadata, error) { - log.Printf("[TRACE] %s: READ %s", d, d.path) - - select { - case <-d.stopCh: - log.Printf("[TRACE] %s: stopped", d) - return "", nil, ErrStopped - case r := <-d.watch(d.stat): - if r.err != nil { - return "", nil, errors.Wrap(r.err, d.String()) - } - - log.Printf("[TRACE] %s: reported change", d) - - data, err := ioutil.ReadFile(d.path) - if err != nil { - return "", nil, errors.Wrap(err, d.String()) - } - - d.stat = r.stat - return respWithMetadata(string(data)) - } -} - -// CanShare returns a boolean if this dependency is shareable. -func (d *FileQuery) CanShare() bool { - return false -} - -// Stop halts the dependency's fetch function. -func (d *FileQuery) Stop() { - close(d.stopCh) -} - -// String returns the human-friendly version of this dependency. -func (d *FileQuery) String() string { - return fmt.Sprintf("file(%s)", d.path) -} - -// Type returns the type of this dependency. -func (d *FileQuery) Type() Type { - return TypeLocal -} - -type watchResult struct { - stat os.FileInfo - err error -} - -// watch watchers the file for changes -func (d *FileQuery) watch(lastStat os.FileInfo) <-chan *watchResult { - ch := make(chan *watchResult, 1) - - go func(lastStat os.FileInfo) { - for { - stat, err := os.Stat(d.path) - if err != nil { - select { - case <-d.stopCh: - return - case ch <- &watchResult{err: err}: - return - } - } - - changed := lastStat == nil || - lastStat.Size() != stat.Size() || - lastStat.ModTime() != stat.ModTime() - - if changed { - select { - case <-d.stopCh: - return - case ch <- &watchResult{stat: stat}: - return - } - } - - time.Sleep(FileQuerySleepTime) - } - }(lastStat) - - return ch -} diff --git a/vendor/github.com/hashicorp/consul-template/dependency/health_service.go b/vendor/github.com/hashicorp/consul-template/dependency/health_service.go deleted file mode 100644 index 6a683231..00000000 --- a/vendor/github.com/hashicorp/consul-template/dependency/health_service.go +++ /dev/null @@ -1,274 +0,0 @@ -package dependency - -import ( - "encoding/gob" - "fmt" - "log" - "net/url" - "regexp" - "sort" - "strings" - - "github.com/hashicorp/consul/api" - "github.com/pkg/errors" -) - -const ( - HealthAny = "any" - HealthPassing = "passing" - HealthWarning = "warning" - HealthCritical = "critical" - HealthMaint = "maintenance" - - NodeMaint = "_node_maintenance" - ServiceMaint = "_service_maintenance:" -) - -var ( - // Ensure implements - _ Dependency = (*HealthServiceQuery)(nil) - - // HealthServiceQueryRe is the regular expression to use. - HealthServiceQueryRe = regexp.MustCompile(`\A` + tagRe + serviceNameRe + dcRe + nearRe + filterRe + `\z`) -) - -func init() { - gob.Register([]*HealthService{}) -} - -// HealthService is a service entry in Consul. -type HealthService struct { - Node string - NodeID string - NodeAddress string - NodeTaggedAddresses map[string]string - NodeMeta map[string]string - ServiceMeta map[string]string - Address string - ServiceTaggedAddresses map[string]api.ServiceAddress - ID string - Name string - Tags ServiceTags - Checks api.HealthChecks - Status string - Port int - Weights api.AgentWeights -} - -// HealthServiceQuery is the representation of all a service query in Consul. -type HealthServiceQuery struct { - stopCh chan struct{} - - dc string - filters []string - name string - near string - tag string - connect bool -} - -// NewHealthServiceQuery processes the strings to build a service dependency. -func NewHealthServiceQuery(s string) (*HealthServiceQuery, error) { - return healthServiceQuery(s, false) -} - -// NewHealthConnect Query processes the strings to build a connect dependency. -func NewHealthConnectQuery(s string) (*HealthServiceQuery, error) { - return healthServiceQuery(s, true) -} - -func healthServiceQuery(s string, connect bool) (*HealthServiceQuery, error) { - if !HealthServiceQueryRe.MatchString(s) { - return nil, fmt.Errorf("health.service: invalid format: %q", s) - } - - m := regexpMatch(HealthServiceQueryRe, s) - - var filters []string - if filter := m["filter"]; filter != "" { - split := strings.Split(filter, ",") - for _, f := range split { - f = strings.TrimSpace(f) - switch f { - case HealthAny, - HealthPassing, - HealthWarning, - HealthCritical, - HealthMaint: - filters = append(filters, f) - case "": - default: - return nil, fmt.Errorf( - "health.service: invalid filter: %q in %q", f, s) - } - } - sort.Strings(filters) - } else { - filters = []string{HealthPassing} - } - - return &HealthServiceQuery{ - stopCh: make(chan struct{}, 1), - dc: m["dc"], - filters: filters, - name: m["name"], - near: m["near"], - tag: m["tag"], - connect: connect, - }, nil -} - -// Fetch queries the Consul API defined by the given client and returns a slice -// of HealthService objects. -func (d *HealthServiceQuery) Fetch(clients *ClientSet, opts *QueryOptions) (interface{}, *ResponseMetadata, error) { - select { - case <-d.stopCh: - return nil, nil, ErrStopped - default: - } - - opts = opts.Merge(&QueryOptions{ - Datacenter: d.dc, - Near: d.near, - }) - - u := &url.URL{ - Path: "/v1/health/service/" + d.name, - RawQuery: opts.String(), - } - if d.tag != "" { - q := u.Query() - q.Set("tag", d.tag) - u.RawQuery = q.Encode() - } - log.Printf("[TRACE] %s: GET %s", d, u) - - // Check if a user-supplied filter was given. If so, we may be querying for - // more than healthy services, so we need to implement client-side - // filtering. - passingOnly := len(d.filters) == 1 && d.filters[0] == HealthPassing - - nodes := clients.Consul().Health().Service - if d.connect { - nodes = clients.Consul().Health().Connect - } - entries, qm, err := nodes(d.name, d.tag, passingOnly, opts.ToConsulOpts()) - if err != nil { - return nil, nil, errors.Wrap(err, d.String()) - } - - log.Printf("[TRACE] %s: returned %d results", d, len(entries)) - - list := make([]*HealthService, 0, len(entries)) - for _, entry := range entries { - // Get the status of this service from its checks. - status := entry.Checks.AggregatedStatus() - - // If we are not checking only healthy services, filter out services - // that do not match the given filter. - if !acceptStatus(d.filters, status) { - continue - } - - // Get the address of the service, falling back to the address of the - // node. - address := entry.Service.Address - if address == "" { - address = entry.Node.Address - } - - list = append(list, &HealthService{ - Node: entry.Node.Node, - NodeID: entry.Node.ID, - NodeAddress: entry.Node.Address, - NodeTaggedAddresses: entry.Node.TaggedAddresses, - NodeMeta: entry.Node.Meta, - ServiceMeta: entry.Service.Meta, - Address: address, - ServiceTaggedAddresses: entry.Service.TaggedAddresses, - ID: entry.Service.ID, - Name: entry.Service.Service, - Tags: ServiceTags( - deepCopyAndSortTags(entry.Service.Tags)), - Status: status, - Checks: entry.Checks, - Port: entry.Service.Port, - Weights: entry.Service.Weights, - }) - } - - log.Printf("[TRACE] %s: returned %d results after filtering", d, len(list)) - - // Sort unless the user explicitly asked for nearness - if d.near == "" { - sort.Stable(ByNodeThenID(list)) - } - - rm := &ResponseMetadata{ - LastIndex: qm.LastIndex, - LastContact: qm.LastContact, - } - - return list, rm, nil -} - -// CanShare returns a boolean if this dependency is shareable. -func (d *HealthServiceQuery) CanShare() bool { - return true -} - -// Stop halts the dependency's fetch function. -func (d *HealthServiceQuery) Stop() { - close(d.stopCh) -} - -// String returns the human-friendly version of this dependency. -func (d *HealthServiceQuery) String() string { - name := d.name - if d.tag != "" { - name = d.tag + "." + name - } - if d.dc != "" { - name = name + "@" + d.dc - } - if d.near != "" { - name = name + "~" + d.near - } - if len(d.filters) > 0 { - name = name + "|" + strings.Join(d.filters, ",") - } - if d.connect { - return fmt.Sprintf("health.connect(%s)", name) - } - return fmt.Sprintf("health.service(%s)", name) -} - -// Type returns the type of this dependency. -func (d *HealthServiceQuery) Type() Type { - return TypeConsul -} - -// acceptStatus allows us to check if a slice of health checks pass this filter. -func acceptStatus(list []string, s string) bool { - for _, status := range list { - if status == s || status == HealthAny { - return true - } - } - return false -} - -// ByNodeThenID is a sortable slice of Service -type ByNodeThenID []*HealthService - -// Len, Swap, and Less are used to implement the sort.Sort interface. -func (s ByNodeThenID) Len() int { return len(s) } -func (s ByNodeThenID) Swap(i, j int) { s[i], s[j] = s[j], s[i] } -func (s ByNodeThenID) Less(i, j int) bool { - if s[i].Node < s[j].Node { - return true - } else if s[i].Node == s[j].Node { - return s[i].ID < s[j].ID - } - return false -} diff --git a/vendor/github.com/hashicorp/consul-template/dependency/kv_get.go b/vendor/github.com/hashicorp/consul-template/dependency/kv_get.go deleted file mode 100644 index eb3954e6..00000000 --- a/vendor/github.com/hashicorp/consul-template/dependency/kv_get.go +++ /dev/null @@ -1,115 +0,0 @@ -package dependency - -import ( - "fmt" - "log" - "net/url" - "regexp" - - "github.com/pkg/errors" -) - -var ( - // Ensure implements - _ Dependency = (*KVGetQuery)(nil) - - // KVGetQueryRe is the regular expression to use. - KVGetQueryRe = regexp.MustCompile(`\A` + keyRe + dcRe + `\z`) -) - -// KVGetQuery queries the KV store for a single key. -type KVGetQuery struct { - stopCh chan struct{} - - dc string - key string - blockOnNil bool -} - -// NewKVGetQuery parses a string into a dependency. -func NewKVGetQuery(s string) (*KVGetQuery, error) { - if s != "" && !KVGetQueryRe.MatchString(s) { - return nil, fmt.Errorf("kv.get: invalid format: %q", s) - } - - m := regexpMatch(KVGetQueryRe, s) - return &KVGetQuery{ - stopCh: make(chan struct{}, 1), - dc: m["dc"], - key: m["key"], - }, nil -} - -// Fetch queries the Consul API defined by the given client. -func (d *KVGetQuery) Fetch(clients *ClientSet, opts *QueryOptions) (interface{}, *ResponseMetadata, error) { - select { - case <-d.stopCh: - return nil, nil, ErrStopped - default: - } - - opts = opts.Merge(&QueryOptions{ - Datacenter: d.dc, - }) - - log.Printf("[TRACE] %s: GET %s", d, &url.URL{ - Path: "/v1/kv/" + d.key, - RawQuery: opts.String(), - }) - - // NOTE that the Consul HTTP KV API returns a 404 on failed gets, but the - // Consul Go API Package ignores those for KV Gets, returning nil data and - // an error. - pair, qm, err := clients.Consul().KV().Get(d.key, opts.ToConsulOpts()) - if err != nil { - return nil, nil, errors.Wrap(err, d.String()) - } - - rm := &ResponseMetadata{ - LastIndex: qm.LastIndex, - LastContact: qm.LastContact, - BlockOnNil: d.blockOnNil, - } - - if pair == nil { - log.Printf("[TRACE] %s: returned nil", d) - return nil, rm, nil - } - - value := string(pair.Value) - log.Printf("[TRACE] %s: returned %q", d, value) - return value, rm, nil -} - -// EnableBlocking turns this into a blocking KV query. -func (d *KVGetQuery) EnableBlocking() { - d.blockOnNil = true -} - -// CanShare returns a boolean if this dependency is shareable. -func (d *KVGetQuery) CanShare() bool { - return true -} - -// String returns the human-friendly version of this dependency. -func (d *KVGetQuery) String() string { - key := d.key - if d.dc != "" { - key = key + "@" + d.dc - } - - if d.blockOnNil { - return fmt.Sprintf("kv.block(%s)", key) - } - return fmt.Sprintf("kv.get(%s)", key) -} - -// Stop halts the dependency's fetch function. -func (d *KVGetQuery) Stop() { - close(d.stopCh) -} - -// Type returns the type of this dependency. -func (d *KVGetQuery) Type() Type { - return TypeConsul -} diff --git a/vendor/github.com/hashicorp/consul-template/dependency/kv_keys.go b/vendor/github.com/hashicorp/consul-template/dependency/kv_keys.go deleted file mode 100644 index 60e1ef7e..00000000 --- a/vendor/github.com/hashicorp/consul-template/dependency/kv_keys.go +++ /dev/null @@ -1,104 +0,0 @@ -package dependency - -import ( - "fmt" - "log" - "net/url" - "regexp" - "strings" - - "github.com/pkg/errors" -) - -var ( - // Ensure implements - _ Dependency = (*KVKeysQuery)(nil) - - // KVKeysQueryRe is the regular expression to use. - KVKeysQueryRe = regexp.MustCompile(`\A` + prefixRe + dcRe + `\z`) -) - -// KVKeysQuery queries the KV store for a single key. -type KVKeysQuery struct { - stopCh chan struct{} - - dc string - prefix string -} - -// NewKVKeysQuery parses a string into a dependency. -func NewKVKeysQuery(s string) (*KVKeysQuery, error) { - if s != "" && !KVKeysQueryRe.MatchString(s) { - return nil, fmt.Errorf("kv.keys: invalid format: %q", s) - } - - m := regexpMatch(KVKeysQueryRe, s) - return &KVKeysQuery{ - stopCh: make(chan struct{}, 1), - dc: m["dc"], - prefix: m["prefix"], - }, nil -} - -// Fetch queries the Consul API defined by the given client. -func (d *KVKeysQuery) Fetch(clients *ClientSet, opts *QueryOptions) (interface{}, *ResponseMetadata, error) { - select { - case <-d.stopCh: - return nil, nil, ErrStopped - default: - } - - opts = opts.Merge(&QueryOptions{ - Datacenter: d.dc, - }) - - log.Printf("[TRACE] %s: GET %s", d, &url.URL{ - Path: "/v1/kv/" + d.prefix, - RawQuery: opts.String(), - }) - - list, qm, err := clients.Consul().KV().Keys(d.prefix, "", opts.ToConsulOpts()) - if err != nil { - return nil, nil, errors.Wrap(err, d.String()) - } - - keys := make([]string, len(list)) - for i, v := range list { - v = strings.TrimPrefix(v, d.prefix) - v = strings.TrimLeft(v, "/") - keys[i] = v - } - - log.Printf("[TRACE] %s: returned %d results", d, len(list)) - - rm := &ResponseMetadata{ - LastIndex: qm.LastIndex, - LastContact: qm.LastContact, - } - - return keys, rm, nil -} - -// CanShare returns a boolean if this dependency is shareable. -func (d *KVKeysQuery) CanShare() bool { - return true -} - -// String returns the human-friendly version of this dependency. -func (d *KVKeysQuery) String() string { - prefix := d.prefix - if d.dc != "" { - prefix = prefix + "@" + d.dc - } - return fmt.Sprintf("kv.keys(%s)", prefix) -} - -// Stop halts the dependency's fetch function. -func (d *KVKeysQuery) Stop() { - close(d.stopCh) -} - -// Type returns the type of this dependency. -func (d *KVKeysQuery) Type() Type { - return TypeConsul -} diff --git a/vendor/github.com/hashicorp/consul-template/dependency/kv_list.go b/vendor/github.com/hashicorp/consul-template/dependency/kv_list.go deleted file mode 100644 index 929dfa42..00000000 --- a/vendor/github.com/hashicorp/consul-template/dependency/kv_list.go +++ /dev/null @@ -1,133 +0,0 @@ -package dependency - -import ( - "encoding/gob" - "fmt" - "log" - "net/url" - "regexp" - "strings" - - "github.com/pkg/errors" -) - -var ( - // Ensure implements - _ Dependency = (*KVListQuery)(nil) - - // KVListQueryRe is the regular expression to use. - KVListQueryRe = regexp.MustCompile(`\A` + prefixRe + dcRe + `\z`) -) - -func init() { - gob.Register([]*KeyPair{}) -} - -// KeyPair is a simple Key-Value pair -type KeyPair struct { - Path string - Key string - Value string - - // Lesser-used, but still valuable keys from api.KV - CreateIndex uint64 - ModifyIndex uint64 - LockIndex uint64 - Flags uint64 - Session string -} - -// KVListQuery queries the KV store for a single key. -type KVListQuery struct { - stopCh chan struct{} - - dc string - prefix string -} - -// NewKVListQuery parses a string into a dependency. -func NewKVListQuery(s string) (*KVListQuery, error) { - if s != "" && !KVListQueryRe.MatchString(s) { - return nil, fmt.Errorf("kv.list: invalid format: %q", s) - } - - m := regexpMatch(KVListQueryRe, s) - return &KVListQuery{ - stopCh: make(chan struct{}, 1), - dc: m["dc"], - prefix: m["prefix"], - }, nil -} - -// Fetch queries the Consul API defined by the given client. -func (d *KVListQuery) Fetch(clients *ClientSet, opts *QueryOptions) (interface{}, *ResponseMetadata, error) { - select { - case <-d.stopCh: - return nil, nil, ErrStopped - default: - } - - opts = opts.Merge(&QueryOptions{ - Datacenter: d.dc, - }) - - log.Printf("[TRACE] %s: GET %s", d, &url.URL{ - Path: "/v1/kv/" + d.prefix, - RawQuery: opts.String(), - }) - - list, qm, err := clients.Consul().KV().List(d.prefix, opts.ToConsulOpts()) - if err != nil { - return nil, nil, errors.Wrap(err, d.String()) - } - - log.Printf("[TRACE] %s: returned %d pairs", d, len(list)) - - pairs := make([]*KeyPair, 0, len(list)) - for _, pair := range list { - key := strings.TrimPrefix(pair.Key, d.prefix) - key = strings.TrimLeft(key, "/") - - pairs = append(pairs, &KeyPair{ - Path: pair.Key, - Key: key, - Value: string(pair.Value), - CreateIndex: pair.CreateIndex, - ModifyIndex: pair.ModifyIndex, - LockIndex: pair.LockIndex, - Flags: pair.Flags, - Session: pair.Session, - }) - } - - rm := &ResponseMetadata{ - LastIndex: qm.LastIndex, - LastContact: qm.LastContact, - } - - return pairs, rm, nil -} - -// CanShare returns a boolean if this dependency is shareable. -func (d *KVListQuery) CanShare() bool { - return true -} - -// String returns the human-friendly version of this dependency. -func (d *KVListQuery) String() string { - prefix := d.prefix - if d.dc != "" { - prefix = prefix + "@" + d.dc - } - return fmt.Sprintf("kv.list(%s)", prefix) -} - -// Stop halts the dependency's fetch function. -func (d *KVListQuery) Stop() { - close(d.stopCh) -} - -// Type returns the type of this dependency. -func (d *KVListQuery) Type() Type { - return TypeConsul -} diff --git a/vendor/github.com/hashicorp/consul-template/dependency/nomad_service.go b/vendor/github.com/hashicorp/consul-template/dependency/nomad_service.go deleted file mode 100644 index e3e50f2c..00000000 --- a/vendor/github.com/hashicorp/consul-template/dependency/nomad_service.go +++ /dev/null @@ -1,182 +0,0 @@ -package dependency - -import ( - "encoding/gob" - "fmt" - "log" - "net/url" - "regexp" - "sort" - - "github.com/pkg/errors" -) - -var ( - // Ensure NomadServiceQuery meets the Dependency interface. - _ Dependency = (*NomadServiceQuery)(nil) - - // NomadServiceQueryRe is the regex that is used to understand a service - // specific Nomad query. - // - // e.g. ".@" - NomadServiceQueryRe = regexp.MustCompile(`\A` + tagRe + serviceNameRe + regionRe + `\z`) -) - -func init() { - gob.Register([]*NomadService{}) -} - -// NomadService is a fully hydrated service registration response from the -// mirroring the Nomad API response object. -type NomadService struct { - ID string - Name string - Node string - Address string - Port int - Datacenter string - Tags ServiceTags - JobID string - AllocID string -} - -// NomadServiceQuery is the representation of a requested Nomad services -// dependency from inside a template. -type NomadServiceQuery struct { - stopCh chan struct{} - - region string - name string - tag string - choose string -} - -// NewNomadServiceQuery parses a string into a NomadServiceQuery which is -// used to list services registered within Nomad which match a particular name. -func NewNomadServiceQuery(s string) (*NomadServiceQuery, error) { - if !NomadServiceQueryRe.MatchString(s) { - return nil, fmt.Errorf("nomad.service: invalid format: %q", s) - } - - m := regexpMatch(NomadServiceQueryRe, s) - - return &NomadServiceQuery{ - stopCh: make(chan struct{}, 1), - region: m["region"], - name: m["name"], - tag: m["tag"], - }, nil -} - -// NewNomadServiceChooseQuery parses s using NewNomadServiceQuery, and then also -// configures the resulting query with the choose parameter set according to the -// count and key arguments. -func NewNomadServiceChooseQuery(count int, key, s string) (*NomadServiceQuery, error) { - query, err := NewNomadServiceQuery(s) - if err != nil { - return nil, err - } - - choose := fmt.Sprintf("%d|%s", count, key) - query.choose = choose - - return query, nil -} - -// Fetch queries the Nomad API defined by the given client and returns a slice -// of NomadService objects. -func (d *NomadServiceQuery) Fetch(client *ClientSet, opts *QueryOptions) (interface{}, *ResponseMetadata, error) { - select { - case <-d.stopCh: - return nil, nil, ErrStopped - default: - } - - opts = opts.Merge(&QueryOptions{ - Region: d.region, - Choose: d.choose, - }) - - u := &url.URL{ - Path: "/v1/service/" + d.name, - RawQuery: opts.String(), - } - - log.Printf("[TRACE] %s: GET %s", d, u) - - entries, qm, err := client.Nomad().Services().Get(d.name, opts.ToNomadOpts()) - if err != nil { - return nil, nil, errors.Wrap(err, d.String()) - } - - log.Printf("[TRACE] %s: returned %d results", d, len(entries)) - - services := make([]*NomadService, 0, len(entries)) - for _, s := range entries { - // Filter by tag - if d.tag != "" { - found := false - for i := 0; !found && i < len(s.Tags); i++ { - found = s.Tags[i] == d.tag - } - if !found { - continue - } - } - - services = append(services, &NomadService{ - ID: s.ID, - Name: s.ServiceName, - Node: s.NodeID, - Address: s.Address, - Port: s.Port, - Datacenter: s.Datacenter, - Tags: deepCopyAndSortTags(s.Tags), - JobID: s.JobID, - AllocID: s.AllocID, - }) - } - - sort.Stable(NomadServiceByName(services)) - - rm := &ResponseMetadata{ - LastIndex: qm.LastIndex, - } - - return services, rm, nil -} - -func (d *NomadServiceQuery) CanShare() bool { - return true -} - -func (d *NomadServiceQuery) String() string { - name := d.name - if d.tag != "" { - name = d.tag + "." + name - } - if d.region != "" { - name = name + "@" + d.region - } - if d.choose != "" { - name = name + ":" + d.choose - } - return fmt.Sprintf("nomad.service(%s)", name) -} - -// Stop halts the dependency's fetch function. -func (d *NomadServiceQuery) Stop() { - close(d.stopCh) -} - -// Type returns the type of this dependency. -func (d *NomadServiceQuery) Type() Type { - return TypeNomad -} - -// NomadServiceByName is a sortable slice of NomadService structs. -type NomadServiceByName []*NomadService - -func (s NomadServiceByName) Len() int { return len(s) } -func (s NomadServiceByName) Swap(i, j int) { s[i], s[j] = s[j], s[i] } -func (s NomadServiceByName) Less(i, j int) bool { return s[i].Name < s[j].Name } diff --git a/vendor/github.com/hashicorp/consul-template/dependency/nomad_services.go b/vendor/github.com/hashicorp/consul-template/dependency/nomad_services.go deleted file mode 100644 index 6838726d..00000000 --- a/vendor/github.com/hashicorp/consul-template/dependency/nomad_services.go +++ /dev/null @@ -1,134 +0,0 @@ -package dependency - -import ( - "encoding/gob" - "fmt" - "log" - "net/url" - "regexp" - "sort" - - nomadapi "github.com/hashicorp/nomad/api" - "github.com/pkg/errors" -) - -var ( - // Ensure NomadServiceQuery meets the Dependency interface. - _ Dependency = (*NomadServicesQuery)(nil) - - // NomadServicesQueryRe is the regex that is used to understand a service - // listing Nomad query. - NomadServicesQueryRe = regexp.MustCompile(`\A` + regionRe + `\z`) -) - -func init() { - gob.Register([]*NomadServicesSnippet{}) -} - -// NomadServicesSnippet is a stub service entry in Nomad. -type NomadServicesSnippet struct { - Name string - Tags ServiceTags -} - -// nomadSortableSnippet is a sortable slice of NomadServicesSnippet structs. -type nomadSortableSnippet []*NomadServicesSnippet - -func (s nomadSortableSnippet) Len() int { return len(s) } -func (s nomadSortableSnippet) Swap(i, j int) { s[i], s[j] = s[j], s[i] } -func (s nomadSortableSnippet) Less(i, j int) bool { return s[i].Name < s[j].Name } - -// NomadServicesQuery is the representation of a requested Nomad service -// dependency from inside a template. -type NomadServicesQuery struct { - stopCh chan struct{} - - region string -} - -// NewNomadServicesQuery parses a string into a NomadServicesQuery which is -// used to list services registered within Nomad. -func NewNomadServicesQuery(s string) (*NomadServicesQuery, error) { - if !NomadServicesQueryRe.MatchString(s) { - return nil, fmt.Errorf("nomad.services: invalid format: %q", s) - } - - m := regexpMatch(NomadServicesQueryRe, s) - return &NomadServicesQuery{ - stopCh: make(chan struct{}, 1), - region: m["region"], - }, nil -} - -// CanShare returns true since Nomad service dependencies are shareable. -func (*NomadServicesQuery) CanShare() bool { - return true -} - -// Fetch queries the Nomad API defined by the given client and returns a slice -// of NomadServiceSnippet objects. -func (d *NomadServicesQuery) Fetch(clients *ClientSet, opts *QueryOptions) (interface{}, *ResponseMetadata, error) { - select { - case <-d.stopCh: - return nil, nil, ErrStopped - default: - } - - opts = opts.Merge(&QueryOptions{ - Region: d.region, - }) - - log.Printf("[TRACE] %s: GET %s", d, &url.URL{ - Path: "/v1/services", - RawQuery: opts.String(), - }) - - namespaces, qm, err := clients.Nomad().Services().List(opts.ToNomadOpts()) - if err != nil { - return nil, nil, errors.Wrap(err, d.String()) - } - - // Cross namespaces queries aren't allowed via consul-template, so only - // the namespace the client is configured for will be returned. - var entries []*nomadapi.ServiceRegistrationStub - if len(namespaces) > 0 { - entries = namespaces[0].Services - } - - log.Printf("[TRACE] %s: returned %d results", d, len(entries)) - - services := make([]*NomadServicesSnippet, len(entries)) - for i, s := range entries { - services[i] = &NomadServicesSnippet{ - Name: s.ServiceName, - Tags: deepCopyAndSortTags(s.Tags), - } - } - - sort.Stable(nomadSortableSnippet(services)) - - rm := &ResponseMetadata{ - LastIndex: qm.LastIndex, - LastContact: qm.LastContact, - } - - return services, rm, nil -} - -// String returns the human-friendly version of this dependency. -func (d *NomadServicesQuery) String() string { - if d.region != "" { - return fmt.Sprintf("nomad.services(@%s)", d.region) - } - return "nomad.services" -} - -// Stop halts the dependency's fetch function. -func (d *NomadServicesQuery) Stop() { - close(d.stopCh) -} - -// Type returns the type of this dependency. -func (d *NomadServicesQuery) Type() Type { - return TypeNomad -} diff --git a/vendor/github.com/hashicorp/consul-template/dependency/nomad_var_get.go b/vendor/github.com/hashicorp/consul-template/dependency/nomad_var_get.go deleted file mode 100644 index cb0564cf..00000000 --- a/vendor/github.com/hashicorp/consul-template/dependency/nomad_var_get.go +++ /dev/null @@ -1,133 +0,0 @@ -package dependency - -import ( - "fmt" - "log" - "net/url" - "regexp" - "strings" - - "github.com/pkg/errors" -) - -var ( - // Ensure implements - _ Dependency = (*NVGetQuery)(nil) - - // NVGetQueryRe is the regular expression to use. - NVGetQueryRe = regexp.MustCompile(`\A` + nvPathRe + nvNamespaceRe + nvRegionRe + `\z`) -) - -// NVGetQuery queries the KV store for a single key. -type NVGetQuery struct { - stopCh chan struct{} - - path string - namespace string - region string - - blockOnNil bool -} - -// NewNVGetQuery parses a string into a dependency. -func NewNVGetQuery(ns, s string) (*NVGetQuery, error) { - s = strings.TrimSpace(s) - s = strings.Trim(s, "/") - - if s != "" && !NVGetQueryRe.MatchString(s) { - return nil, fmt.Errorf("nomad.var.get: invalid format: %q", s) - } - - m := regexpMatch(NVGetQueryRe, s) - out := &NVGetQuery{ - stopCh: make(chan struct{}, 1), - path: m["path"], - namespace: m["namespace"], - region: m["region"], - } - if out.namespace == "" && ns != "" { - out.namespace = ns - } - return out, nil -} - -// Fetch queries the Nomad API defined by the given client. -func (d *NVGetQuery) Fetch(clients *ClientSet, opts *QueryOptions) (interface{}, *ResponseMetadata, error) { - select { - case <-d.stopCh: - return nil, nil, ErrStopped - default: - } - - opts = opts.Merge(&QueryOptions{}) - - log.Printf("[TRACE] %s: GET %s", d, &url.URL{ - Path: "/v1/var/" + d.path, - RawQuery: opts.String(), - }) - - nOpts := opts.ToNomadOpts() - nOpts.Namespace = d.namespace - nOpts.Region = d.region - // NOTE: The Peek method of the Nomad Variables API will check a value, - // return it if it exists, but return a nil value and NO error if it is - // not found. - nVar, qm, err := clients.Nomad().Variables().Peek(d.path, nOpts) - if err != nil { - return nil, nil, errors.Wrap(err, d.String()) - } - - rm := &ResponseMetadata{ - LastIndex: qm.LastIndex, - LastContact: qm.LastContact, - BlockOnNil: d.blockOnNil, - } - - if nVar == nil { - log.Printf("[TRACE] %s: returned nil", d) - return nil, rm, nil - } - - items := &NewNomadVariable(nVar).Items - log.Printf("[TRACE] %s: returned %q", d, nVar.Path) - return items, rm, nil -} - -// EnableBlocking turns this into a blocking KV query. -func (d *NVGetQuery) EnableBlocking() { - d.blockOnNil = true -} - -// CanShare returns a boolean if this dependency is shareable. -func (d *NVGetQuery) CanShare() bool { - return true -} - -// String returns the human-friendly version of this dependency. -// This value is also used to disambiguate multiple instances in the Brain -func (d *NVGetQuery) String() string { - ns := d.namespace - if ns == "" { - ns = "default" - } - region := d.region - if region == "" { - region = "global" - } - path := d.path - key := fmt.Sprintf("%s@%s.%s", path, ns, region) - if d.blockOnNil { - return fmt.Sprintf("nomad.var.block(%s)", key) - } - return fmt.Sprintf("nomad.var.get(%s)", key) -} - -// Stop halts the dependency's fetch function. -func (d *NVGetQuery) Stop() { - close(d.stopCh) -} - -// Type returns the type of this dependency. -func (d *NVGetQuery) Type() Type { - return TypeNomad -} diff --git a/vendor/github.com/hashicorp/consul-template/dependency/nomad_var_list.go b/vendor/github.com/hashicorp/consul-template/dependency/nomad_var_list.go deleted file mode 100644 index a71d45c6..00000000 --- a/vendor/github.com/hashicorp/consul-template/dependency/nomad_var_list.go +++ /dev/null @@ -1,136 +0,0 @@ -package dependency - -import ( - "encoding/gob" - "fmt" - "log" - "net/url" - "regexp" - "strings" - - "github.com/hashicorp/nomad/api" - "github.com/pkg/errors" -) - -var ( - // Ensure implements - _ Dependency = (*NVListQuery)(nil) - - // NVListQueryRe is the regular expression to use. - NVListQueryRe = regexp.MustCompile(`\A` + nvListPrefixRe + nvListNSRe + nvRegionRe + `\z`) -) - -func init() { - gob.Register([]*NomadVarMeta{}) -} - -// NVListQuery queries the SV store for the metadata for keys matching the given -// prefix. -type NVListQuery struct { - stopCh chan struct{} - namespace string - region string - prefix string -} - -// NewNVListQuery parses a string into a dependency. -func NewNVListQuery(ns, s string) (*NVListQuery, error) { - if s != "" && !NVListQueryRe.MatchString(s) { - return nil, fmt.Errorf("nomad.var.list: invalid format: %q", s) - } - - m := regexpMatch(NVListQueryRe, s) - out := &NVListQuery{ - stopCh: make(chan struct{}, 1), - namespace: m["namespace"], - region: m["region"], - prefix: m["prefix"], - } - - // Handle paths that are only slashes and discard them - if strings.Trim(out.prefix, "/") == "" { - out.prefix = "" - } - - if out.namespace == "" && ns != "" { - out.namespace = ns - } - - return out, nil -} - -// Fetch queries the Nomad API defined by the given client. -func (d *NVListQuery) Fetch(clients *ClientSet, opts *QueryOptions) (interface{}, *ResponseMetadata, error) { - select { - case <-d.stopCh: - return nil, nil, ErrStopped - default: - } - - opts = opts.Merge(&QueryOptions{}) - - log.Printf("[TRACE] %s: GET %s", d, &url.URL{ - Path: "/v1/vars/", - RawQuery: opts.String(), - }) - - nOpts := opts.ToNomadOpts() - nOpts.Namespace = d.namespace - nOpts.Region = d.region - list, qm, err := clients.Nomad().Variables().PrefixList(d.prefix, nOpts) - if err != nil && !strings.Contains(err.Error(), "Permission denied") { - return nil, nil, errors.Wrap(err, d.String()) - } - - log.Printf("[TRACE] %s: returned %d paths", d, len(list)) - - vars := make([]*NomadVarMeta, 0, len(list)) - for _, nVar := range list { - vars = append(vars, NewNomadVarMeta(nVar)) - } - - // 404's don't return QueryMeta. - if qm == nil { - qm = &api.QueryMeta{ - LastIndex: 1, - } - } - - rm := &ResponseMetadata{ - LastIndex: qm.LastIndex, - LastContact: qm.LastContact, - } - - return vars, rm, nil -} - -// CanShare returns a boolean if this dependency is shareable. -func (d *NVListQuery) CanShare() bool { - return true -} - -// String returns the human-friendly version of this dependency. -func (d *NVListQuery) String() string { - ns := d.namespace - if ns == "" { - ns = "default" - } - region := d.region - if region == "" { - region = "global" - } - prefix := d.prefix - key := fmt.Sprintf("%s@%s.%s", prefix, ns, region) - - return fmt.Sprintf("nomad.var.list(%s)", key) -} - -// Stop halts the dependency's fetch function. -func (d *NVListQuery) Stop() { - close(d.stopCh) -} - -// Type returns the type of this dependency. -func (d *NVListQuery) Type() Type { - return TypeNomad -} diff --git a/vendor/github.com/hashicorp/consul-template/dependency/nomad_var_structs.go b/vendor/github.com/hashicorp/consul-template/dependency/nomad_var_structs.go deleted file mode 100644 index a1f36318..00000000 --- a/vendor/github.com/hashicorp/consul-template/dependency/nomad_var_structs.go +++ /dev/null @@ -1,153 +0,0 @@ -package dependency - -import ( - "fmt" - "sort" - "time" - - "github.com/hashicorp/nomad/api" -) - -// NewNomadVarMeta is used to create a NomadVarMeta from a Nomad API -// VariableMetadata response. -func NewNomadVarMeta(in *api.VariableMetadata) *NomadVarMeta { - return &NomadVarMeta{ - Namespace: in.Namespace, - Path: in.Path, - CreateIndex: in.CreateIndex, - ModifyIndex: in.ModifyIndex, - CreateTime: nanoTime(in.CreateTime), - ModifyTime: nanoTime(in.ModifyTime), - } -} - -// NewNomadVariable is used to create a NomadVariable from a Nomad -// API Variable response. -func NewNomadVariable(in *api.Variable) *NomadVariable { - out := NomadVariable{ - Namespace: in.Namespace, - Path: in.Path, - CreateIndex: in.CreateIndex, - ModifyIndex: in.ModifyIndex, - CreateTime: nanoTime(in.CreateTime), - ModifyTime: nanoTime(in.ModifyTime), - Items: map[string]NomadVarItem{}, - nVar: in, - } - - items := make(NomadVarItems, len(in.Items)) - for k, v := range in.Items { - items[k] = NomadVarItem{k, v, &out} - } - out.Items = items - return &out -} - -// NomadVariable is a template friendly container struct that allows for -// the NomadVar funcs to start inside of Items and have a rational way back up -// to the Variable that is JSON structurally equivalent to the API response. -// This struct's zero value is not trivially usable and should be created with -// NewNomadVariable--especially when outside of the dependency package as -// there is no access to nVar. -type NomadVariable struct { - Namespace, Path string - CreateIndex, ModifyIndex uint64 - CreateTime, ModifyTime nanoTime - Items NomadVarItems - nVar *api.Variable -} - -func (cv NomadVariable) Metadata() *NomadVarMeta { - return NewNomadVarMeta(cv.nVar.Metadata()) -} - -type NomadVarItems map[string]NomadVarItem - -// Keys returns a sorted list of the Item map's keys. -func (v NomadVarItems) Keys() []string { - out := make([]string, 0, len(v)) - for k := range v { - out = append(out, k) - } - sort.Strings(out) - return out -} - -// Values produces a key-sorted list of the Items map's values -func (v NomadVarItems) Values() []string { - out := make([]string, 0, len(v)) - for _, k := range v.Keys() { - out = append(out, v[k].String()) - } - return out -} - -// Tuples produces a key-sorted list of K,V tuple structs from the Items map's -// values -func (v NomadVarItems) Tuples() []struct{ K, V string } { - out := make([]struct{ K, V string }, 0, len(v)) - for _, k := range v.Keys() { - out = append(out, struct{ K, V string }{K: k, V: v[k].String()}) - } - return out -} - -// Metadata returns this item's parent's metadata -func (i NomadVarItems) Metadata() *NomadVarMeta { - for _, v := range i { - return v.parent.Metadata() - } - return nil -} - -// Parent returns the item's container object -func (i NomadVarItems) Parent() *NomadVariable { - for _, v := range i { - return v.Parent() - } - return nil -} - -func (i NomadVarItems) ItemsMap() map[string]interface{} { - if len(i) == 0 { - return nil - } - out := make(map[string]interface{}) - for k, v := range i { - out[k] = v.String() - } - return out -} - -// NomadVarItem enriches the basic string values in a api.Variable's Items -// map with additional helper funcs for formatting and access to its parent -// item. This enables us to have the template funcs start at the Items -// collection without the user having to delve to it themselves and to minimize -// the number of template funcs that we have to provide for coverage. -type NomadVarItem struct { - Key, Value string - parent *NomadVariable -} - -func (v NomadVarItem) String() string { return v.Value } -func (v NomadVarItem) Metadata() *NomadVarMeta { return v.parent.Metadata() } -func (v NomadVarItem) Parent() *NomadVariable { return v.parent } -func (v NomadVarItem) MarshalJSON() ([]byte, error) { return []byte(fmt.Sprintf("%q", v.Value)), nil } - -// NomadVarMeta provides the same fields as api.VariableMetadata -// but aliases the times into a more template friendly alternative. -type NomadVarMeta struct { - Namespace, Path string - CreateIndex, ModifyIndex uint64 - CreateTime, ModifyTime nanoTime -} - -func (s NomadVarMeta) String() string { return s.Path } - -// nanoTime is the typical storage encoding for times in Nomad's backend. They -// are not pretty for consul-template consumption, so this gives us a type to -// add receivers on. -type nanoTime int64 - -func (t nanoTime) String() string { return fmt.Sprintf("%v", time.Unix(0, int64(t))) } -func (t nanoTime) Time() time.Time { return time.Unix(0, int64(t)) } diff --git a/vendor/github.com/hashicorp/consul-template/dependency/set.go b/vendor/github.com/hashicorp/consul-template/dependency/set.go deleted file mode 100644 index d3a5df3a..00000000 --- a/vendor/github.com/hashicorp/consul-template/dependency/set.go +++ /dev/null @@ -1,72 +0,0 @@ -package dependency - -import ( - "strings" - "sync" -) - -// Set is a dependency-specific set implementation. Relative ordering is -// preserved. -type Set struct { - once sync.Once - sync.RWMutex - list []string - set map[string]Dependency -} - -// Add adds a new element to the set if it does not already exist. -func (s *Set) Add(d Dependency) bool { - s.init() - s.Lock() - defer s.Unlock() - if _, ok := s.set[d.String()]; !ok { - s.list = append(s.list, d.String()) - s.set[d.String()] = d - return true - } - return false -} - -// Get retrieves a single element from the set by name. -func (s *Set) Get(v string) Dependency { - s.RLock() - defer s.RUnlock() - return s.set[v] -} - -// List returns the insertion-ordered list of dependencies. -func (s *Set) List() []Dependency { - s.RLock() - defer s.RUnlock() - r := make([]Dependency, len(s.list)) - for i, k := range s.list { - r[i] = s.set[k] - } - return r -} - -// Len is the size of the set. -func (s *Set) Len() int { - s.RLock() - defer s.RUnlock() - return len(s.list) -} - -// String is a string representation of the set. -func (s *Set) String() string { - s.RLock() - defer s.RUnlock() - return strings.Join(s.list, ", ") -} - -func (s *Set) init() { - s.once.Do(func() { - if s.list == nil { - s.list = make([]string, 0, 8) - } - - if s.set == nil { - s.set = make(map[string]Dependency) - } - }) -} diff --git a/vendor/github.com/hashicorp/consul-template/dependency/vault_agent_token.go b/vendor/github.com/hashicorp/consul-template/dependency/vault_agent_token.go deleted file mode 100644 index f0670467..00000000 --- a/vendor/github.com/hashicorp/consul-template/dependency/vault_agent_token.go +++ /dev/null @@ -1,117 +0,0 @@ -package dependency - -import ( - "log" - "os" - "time" - - "github.com/pkg/errors" -) - -// Ensure implements -var _ Dependency = (*VaultAgentTokenQuery)(nil) - -const ( - // VaultAgentTokenSleepTime is the amount of time to sleep between queries, since - // the fsnotify library is not compatible with solaris and other OSes yet. - VaultAgentTokenSleepTime = 15 * time.Second -) - -// VaultAgentTokenQuery is the dependency to Vault Agent token -type VaultAgentTokenQuery struct { - stopCh chan struct{} - path string - stat os.FileInfo -} - -// NewVaultAgentTokenQuery creates a new dependency. -func NewVaultAgentTokenQuery(path string) (*VaultAgentTokenQuery, error) { - return &VaultAgentTokenQuery{ - stopCh: make(chan struct{}, 1), - path: path, - }, nil -} - -// Fetch retrieves this dependency and returns the result or any errors that -// occur in the process. -func (d *VaultAgentTokenQuery) Fetch(clients *ClientSet, opts *QueryOptions) (interface{}, *ResponseMetadata, error) { - log.Printf("[TRACE] %s: READ %s", d, d.path) - - var token string - select { - case <-d.stopCh: - log.Printf("[TRACE] %s: stopped", d) - return "", nil, ErrStopped - case r := <-d.watch(d.stat): - if r.err != nil { - return "", nil, errors.Wrap(r.err, d.String()) - } - - log.Printf("[TRACE] %s: reported change", d) - - raw_token, err := os.ReadFile(d.path) - if err != nil { - return "", nil, errors.Wrap(err, d.String()) - } - d.stat = r.stat - token = string(raw_token) - } - - return respWithMetadata(token) -} - -// CanShare returns if this dependency is sharable. -func (d *VaultAgentTokenQuery) CanShare() bool { - return false -} - -// Stop halts the dependency's fetch function. -func (d *VaultAgentTokenQuery) Stop() { - close(d.stopCh) -} - -// String returns the human-friendly version of this dependency. -func (d *VaultAgentTokenQuery) String() string { - return "vault-agent.token" -} - -// Type returns the type of this dependency. -func (d *VaultAgentTokenQuery) Type() Type { - return TypeVault -} - -// watch watches the file for changes -func (d *VaultAgentTokenQuery) watch(lastStat os.FileInfo) <-chan *watchResult { - ch := make(chan *watchResult, 1) - - go func(lastStat os.FileInfo) { - for { - stat, err := os.Stat(d.path) - if err != nil { - select { - case <-d.stopCh: - return - case ch <- &watchResult{err: err}: - return - } - } - - changed := lastStat == nil || - lastStat.Size() != stat.Size() || - lastStat.ModTime() != stat.ModTime() - - if changed { - select { - case <-d.stopCh: - return - case ch <- &watchResult{stat: stat}: - return - } - } - - time.Sleep(VaultAgentTokenSleepTime) - } - }(lastStat) - - return ch -} diff --git a/vendor/github.com/hashicorp/consul-template/dependency/vault_common.go b/vendor/github.com/hashicorp/consul-template/dependency/vault_common.go deleted file mode 100644 index 76096bdf..00000000 --- a/vendor/github.com/hashicorp/consul-template/dependency/vault_common.go +++ /dev/null @@ -1,372 +0,0 @@ -package dependency - -import ( - "encoding/json" - "fmt" - "log" - "math/rand" - "sync" - "time" - - "github.com/hashicorp/vault/api" -) - -var ( - // VaultDefaultLeaseDuration is the default lease duration in seconds. - VaultDefaultLeaseDuration time.Duration - onceVaultDefaultLeaseDuration sync.Once - VaultLeaseRenewalThreshold float64 - onceVaultLeaseRenewalThreshold sync.Once -) - -// Secret is the structure returned for every secret within Vault. -type Secret struct { - // The request ID that generated this response - RequestID string - - LeaseID string - LeaseDuration int - Renewable bool - - // Data is the actual contents of the secret. The format of the data - // is arbitrary and up to the secret backend. - Data map[string]interface{} - - // Warnings contains any warnings related to the operation. These - // are not issues that caused the command to fail, but that the - // client should be aware of. - Warnings []string - - // Auth, if non-nil, means that there was authentication information - // attached to this response. - Auth *SecretAuth - - // WrapInfo, if non-nil, means that the initial response was wrapped in the - // cubbyhole of the given token (which has a TTL of the given number of - // seconds) - WrapInfo *SecretWrapInfo -} - -// SecretAuth is the structure containing auth information if we have it. -type SecretAuth struct { - ClientToken string - Accessor string - Policies []string - Metadata map[string]string - - LeaseDuration int - Renewable bool -} - -// SecretWrapInfo contains wrapping information if we have it. If what is -// contained is an authentication token, the accessor for the token will be -// available in WrappedAccessor. -type SecretWrapInfo struct { - Token string - TTL int - CreationTime time.Time - WrappedAccessor string -} - -type renewer interface { - Dependency - stopChan() chan struct{} - secrets() (*Secret, *api.Secret) -} - -func renewSecret(clients *ClientSet, d renewer) error { - log.Printf("[TRACE] %s: starting renewer", d) - - secret, vaultSecret := d.secrets() - renewer, err := clients.Vault().NewRenewer(&api.RenewerInput{ - Secret: vaultSecret, - }) - if err != nil { - return err - } - go renewer.Renew() - defer renewer.Stop() - - for { - select { - case err := <-renewer.DoneCh(): - if err != nil { - log.Printf("[WARN] %s: failed to renew: %s", d, err) - } - log.Printf("[WARN] %s: renewer done (maybe the lease expired)", d) - return nil - case renewal := <-renewer.RenewCh(): - log.Printf("[TRACE] %s: successfully renewed", d) - printVaultWarnings(d, renewal.Secret.Warnings) - updateSecret(secret, renewal.Secret) - case <-d.stopChan(): - return ErrStopped - } - } -} - -// leaseCheckWait accepts a secret and returns the recommended amount of -// time to sleep. -func leaseCheckWait(s *Secret) time.Duration { - // Handle whether this is an auth or a regular secret. - base := s.LeaseDuration - if s.Auth != nil && s.Auth.LeaseDuration > 0 { - base = s.Auth.LeaseDuration - } - - // Handle if this is a certificate with no lease - if _, ok := s.Data["certificate"]; ok && s.LeaseID == "" { - if expInterface, ok := s.Data["expiration"]; ok { - if expData, err := expInterface.(json.Number).Int64(); err == nil { - base = int(expData - time.Now().Unix()) - log.Printf("[DEBUG] Found certificate and set lease duration to %d seconds", base) - } - } - } - - // Handle if this is an AppRole secret_id with no lease - if _, ok := s.Data["secret_id"]; ok && s.LeaseID == "" { - if expInterface, ok := s.Data["secret_id_ttl"]; ok { - if ttlData, err := expInterface.(json.Number).Int64(); err == nil && ttlData > 0 { - base = int(ttlData) + 1 - log.Printf("[DEBUG] Found approle secret_id and non-zero secret_id_ttl, setting lease duration to %d seconds", base) - } - } - } - - // Handle if this is a secret with a rotation period. If this is a rotating secret, - // the rotating secret's TTL will be the duration to sleep before rendering the new secret. - var rotatingSecret bool - if _, ok := s.Data["rotation_period"]; ok && s.LeaseID == "" { - if ttlInterface, ok := s.Data["ttl"]; ok { - if ttlData, err := ttlInterface.(json.Number).Int64(); err == nil { - log.Printf("[DEBUG] Found rotation_period and set lease duration to %d seconds", ttlData) - // Add a second for cushion - base = int(ttlData) + 1 - rotatingSecret = true - } - } - } - - // Ensure we have a lease duration, since sometimes this can be zero. - if base <= 0 { - base = int(VaultDefaultLeaseDuration.Seconds()) - } - - // Convert to float seconds. - sleep := float64(time.Duration(base) * time.Second) - - if vaultSecretRenewable(s) { - // Renew at 1/3 the remaining lease. This will give us an opportunity to retry - // at least one more time should the first renewal fail. - sleep = sleep / 3.0 - - // Use some randomness so many clients do not hit Vault simultaneously. - sleep = sleep * (rand.Float64() + 1) / 2.0 - } else if !rotatingSecret { - // If the secret doesn't have a rotation period, this is a non-renewable leased - // secret. - // For non-renewable leases set the renew duration to use much of the secret - // lease as possible. Use a stagger over the configured threshold - // fraction of the lease duration so that many clients do not hit - // Vault simultaneously. - finalFraction := VaultLeaseRenewalThreshold + (rand.Float64()-0.5)*0.1 - if finalFraction >= 1.0 || finalFraction <= 0.0 { - // If the fraction randomly winds up outside of (0.0-1.0), clamp - // back down to the VaultLeaseRenewalThreshold provided by the user, - // since a) the user picked that value, so they should be - // comfortable with it, and b) it should not skew the staggering too - // much - finalFraction = VaultLeaseRenewalThreshold - } - sleep = sleep * finalFraction - } - - return time.Duration(sleep) -} - -// printVaultWarnings prints warnings for a given dependency. -func printVaultWarnings(d Dependency, warnings []string) { - for _, w := range warnings { - log.Printf("[WARN] %s: %s", d, w) - } -} - -// vaultSecretRenewable determines if the given secret is renewable. -func vaultSecretRenewable(s *Secret) bool { - if s.Auth != nil { - return s.Auth.Renewable - } - return s.Renewable -} - -// transformSecret transforms an api secret into our secret. This does not deep -// copy underlying deep data structures, so it's not safe to modify the vault -// secret as that may modify the data in the transformed secret. -func transformSecret(theirs *api.Secret) *Secret { - var ours Secret - updateSecret(&ours, theirs) - return &ours -} - -// updateSecret updates our secret with the new data from the api, careful to -// not overwrite missing data. Renewals don't include the original secret, and -// we don't want to delete that data accidentally. -func updateSecret(ours *Secret, theirs *api.Secret) { - if theirs.RequestID != "" { - ours.RequestID = theirs.RequestID - } - - if theirs.LeaseID != "" { - ours.LeaseID = theirs.LeaseID - } - - if theirs.LeaseDuration != 0 { - ours.LeaseDuration = theirs.LeaseDuration - } - - if theirs.Renewable { - ours.Renewable = theirs.Renewable - } - - if len(theirs.Data) != 0 { - ours.Data = theirs.Data - } - - if len(theirs.Warnings) != 0 { - ours.Warnings = theirs.Warnings - } - - if theirs.Auth != nil { - if ours.Auth == nil { - ours.Auth = &SecretAuth{} - } - - if theirs.Auth.ClientToken != "" { - ours.Auth.ClientToken = theirs.Auth.ClientToken - } - - if theirs.Auth.Accessor != "" { - ours.Auth.Accessor = theirs.Auth.Accessor - } - - if len(theirs.Auth.Policies) != 0 { - ours.Auth.Policies = theirs.Auth.Policies - } - - if len(theirs.Auth.Metadata) != 0 { - ours.Auth.Metadata = theirs.Auth.Metadata - } - - if theirs.Auth.LeaseDuration != 0 { - ours.Auth.LeaseDuration = theirs.Auth.LeaseDuration - } - - if theirs.Auth.Renewable { - ours.Auth.Renewable = theirs.Auth.Renewable - } - } - - if theirs.WrapInfo != nil { - if ours.WrapInfo == nil { - ours.WrapInfo = &SecretWrapInfo{} - } - - if theirs.WrapInfo.Token != "" { - ours.WrapInfo.Token = theirs.WrapInfo.Token - } - - if theirs.WrapInfo.TTL != 0 { - ours.WrapInfo.TTL = theirs.WrapInfo.TTL - } - - if !theirs.WrapInfo.CreationTime.IsZero() { - ours.WrapInfo.CreationTime = theirs.WrapInfo.CreationTime - } - - if theirs.WrapInfo.WrappedAccessor != "" { - ours.WrapInfo.WrappedAccessor = theirs.WrapInfo.WrappedAccessor - } - } -} - -func isKVv2(client *api.Client, path string) (string, bool, error) { - // We don't want to use a wrapping call here so save any custom value and - // restore after - currentWrappingLookupFunc := client.CurrentWrappingLookupFunc() - client.SetWrappingLookupFunc(nil) - defer client.SetWrappingLookupFunc(currentWrappingLookupFunc) - currentOutputCurlString := client.OutputCurlString() - client.SetOutputCurlString(false) - defer client.SetOutputCurlString(currentOutputCurlString) - - r := client.NewRequest("GET", "/v1/sys/internal/ui/mounts/"+path) - resp, err := client.RawRequest(r) - if resp != nil { - defer resp.Body.Close() - } - if err != nil { - // If we get a 404 we are using an older version of vault, default to - // version 1 - if resp != nil && resp.StatusCode == 404 { - return "", false, nil - } - - // anonymous requests may fail to access /sys/internal/ui path - // Vault v1.1.3 returns 500 status code but may return 4XX in future - if client.Token() == "" { - return "", false, nil - } - - return "", false, err - } - - secret, err := api.ParseSecret(resp.Body) - if err != nil { - return "", false, err - } - if secret == nil { - return "", false, fmt.Errorf("secret at path %s does not exist", path) - } - var mountPath string - if mountPathRaw, ok := secret.Data["path"]; ok { - mountPath = mountPathRaw.(string) - } - var mountType string - if mountTypeRaw, ok := secret.Data["type"]; ok { - mountType = mountTypeRaw.(string) - } - options := secret.Data["options"] - if options == nil { - return mountPath, false, nil - } - versionRaw := options.(map[string]interface{})["version"] - if versionRaw == nil { - return mountPath, false, nil - } - version := versionRaw.(string) - switch version { - case "", "1": - return mountPath, false, nil - case "2": - return mountPath, mountType == "kv", nil - } - - return mountPath, false, nil -} - -// Make sure to only set VaultDefaultLeaseDuration once -func SetVaultDefaultLeaseDuration(t time.Duration) { - set := func() { - VaultDefaultLeaseDuration = t - } - onceVaultDefaultLeaseDuration.Do(set) -} - -// Make sure to only set VaultLeaseRenewalThreshold once -func SetVaultLeaseRenewalThreshold(f float64) { - set := func() { - VaultLeaseRenewalThreshold = f - } - onceVaultLeaseRenewalThreshold.Do(set) -} diff --git a/vendor/github.com/hashicorp/consul-template/dependency/vault_list.go b/vendor/github.com/hashicorp/consul-template/dependency/vault_list.go deleted file mode 100644 index d5c71c1e..00000000 --- a/vendor/github.com/hashicorp/consul-template/dependency/vault_list.go +++ /dev/null @@ -1,153 +0,0 @@ -package dependency - -import ( - "fmt" - "log" - "net/url" - "path" - "sort" - "strings" - "time" - - "github.com/pkg/errors" -) - -// Ensure implements -var _ Dependency = (*VaultListQuery)(nil) - -// VaultListQuery is the dependency to Vault for a secret -type VaultListQuery struct { - stopCh chan struct{} - - path string -} - -// NewVaultListQuery creates a new datacenter dependency. -func NewVaultListQuery(s string) (*VaultListQuery, error) { - s = strings.TrimSpace(s) - s = strings.Trim(s, "/") - if s == "" { - return nil, fmt.Errorf("vault.list: invalid format: %q", s) - } - - return &VaultListQuery{ - stopCh: make(chan struct{}, 1), - path: s, - }, nil -} - -// Fetch queries the Vault API -func (d *VaultListQuery) Fetch(clients *ClientSet, opts *QueryOptions) (interface{}, *ResponseMetadata, error) { - select { - case <-d.stopCh: - return nil, nil, ErrStopped - default: - } - - opts = opts.Merge(&QueryOptions{}) - - // If this is not the first query, poll to simulate blocking-queries. - if opts.WaitIndex != 0 { - dur := VaultDefaultLeaseDuration - log.Printf("[TRACE] %s: long polling for %s", d, dur) - - select { - case <-d.stopCh: - return nil, nil, ErrStopped - case <-time.After(dur): - } - } - - secretsPath := d.path - - // Checking secret engine version. If it's v2, we should shim /metadata/ - // to secret path if necessary. - mountPath, isV2, _ := isKVv2(clients.Vault(), secretsPath) - if isV2 { - secretsPath = shimKvV2ListPath(secretsPath, mountPath) - } - - // If we got this far, we either didn't have a secret to renew, the secret was - // not renewable, or the renewal failed, so attempt a fresh list. - log.Printf("[TRACE] %s: LIST %s", d, &url.URL{ - Path: "/v1/" + secretsPath, - RawQuery: opts.String(), - }) - secret, err := clients.Vault().Logical().List(secretsPath) - if err != nil { - return nil, nil, errors.Wrap(err, d.String()) - } - - var result []string - - // The secret could be nil if it does not exist. - if secret == nil || secret.Data == nil { - log.Printf("[TRACE] %s: no data", d) - return respWithMetadata(result) - } - - // This is a weird thing that happened once... - keys, ok := secret.Data["keys"] - if !ok { - log.Printf("[TRACE] %s: no keys", d) - return respWithMetadata(result) - } - - list, ok := keys.([]interface{}) - if !ok { - log.Printf("[TRACE] %s: not list", d) - return nil, nil, fmt.Errorf("%s: unexpected response", d) - } - - for _, v := range list { - typed, ok := v.(string) - if !ok { - return nil, nil, fmt.Errorf("%s: non-string in list", d) - } - result = append(result, typed) - } - sort.Strings(result) - - log.Printf("[TRACE] %s: returned %d results", d, len(result)) - - return respWithMetadata(result) -} - -// CanShare returns if this dependency is shareable. -func (d *VaultListQuery) CanShare() bool { - return false -} - -// Stop halts the given dependency's fetch. -func (d *VaultListQuery) Stop() { - close(d.stopCh) -} - -// String returns the human-friendly version of this dependency. -func (d *VaultListQuery) String() string { - return fmt.Sprintf("vault.list(%s)", d.path) -} - -// Type returns the type of this dependency. -func (d *VaultListQuery) Type() Type { - return TypeVault -} - -// shimKvV2ListPath aligns the supported legacy path to KV v2 specs by inserting -// /metadata/ into the path for listing secrets. Paths with /metadata/ are not modified. -func shimKvV2ListPath(rawPath, mountPath string) string { - mountPath = strings.TrimSuffix(mountPath, "/") - - if strings.HasPrefix(rawPath, path.Join(mountPath, "metadata")) { - // It doesn't need modifying. - return rawPath - } - - switch { - case rawPath == mountPath: - return path.Join(mountPath, "metadata") - default: - rawPath = strings.TrimPrefix(rawPath, mountPath) - return path.Join(mountPath, "metadata", rawPath) - } -} diff --git a/vendor/github.com/hashicorp/consul-template/dependency/vault_pki.go b/vendor/github.com/hashicorp/consul-template/dependency/vault_pki.go deleted file mode 100644 index 3ecdce63..00000000 --- a/vendor/github.com/hashicorp/consul-template/dependency/vault_pki.go +++ /dev/null @@ -1,234 +0,0 @@ -package dependency - -import ( - "bytes" - "crypto/x509" - "encoding/pem" - "fmt" - "math/rand" - "net/url" - "os" - "strings" - "time" - - "github.com/pkg/errors" -) - -// Ensure implements -var _ Dependency = (*VaultPKIQuery)(nil) - -// Return type containing PEMs as strings -type PemEncoded struct{ Cert, Key, CA string } - -// a wrapper to mimic v2 secrets Data wrapper -func (p PemEncoded) Data() PemEncoded { - return p -} - -// VaultPKIQuery is the dependency to Vault for a secret -type VaultPKIQuery struct { - stopCh chan struct{} - sleepCh chan time.Duration - - pkiPath string - data map[string]interface{} - filePath string -} - -// NewVaultReadQuery creates a new datacenter dependency. -func NewVaultPKIQuery(urlpath, filepath string, data map[string]interface{}) (*VaultPKIQuery, error) { - urlpath = strings.TrimSpace(urlpath) - urlpath = strings.Trim(urlpath, "/") - if urlpath == "" { - return nil, fmt.Errorf("vault.read: invalid format: %q", urlpath) - } - - secretURL, err := url.Parse(urlpath) - if err != nil { - return nil, err - } - - return &VaultPKIQuery{ - stopCh: make(chan struct{}, 1), - sleepCh: make(chan time.Duration, 1), - pkiPath: secretURL.Path, - data: data, - filePath: filepath, - }, nil -} - -// Fetch queries the Vault API -func (d *VaultPKIQuery) Fetch(clients *ClientSet, opts *QueryOptions) (interface{}, *ResponseMetadata, error) { - select { - case <-d.stopCh: - return nil, nil, ErrStopped - default: - } - select { - case dur := <-d.sleepCh: - time.Sleep(dur) - default: - } - - needsRenewal := fmt.Errorf("needs renewal") - getPEMs := func(renew bool) (PemEncoded, error) { - rawPems, err := os.ReadFile(d.filePath) - if renew || err != nil || len(rawPems) == 0 { - rawPems, err = d.fetchPEMs(clients) - // no need to write cert to file as it is the template dest - } - if err != nil { - return PemEncoded{}, err - } - - encPems, cert, err := pemsCert(rawPems) - if err != nil { - return encPems, err - } - - if sleepFor, ok := goodFor(cert); ok { - d.sleepCh <- sleepFor - return encPems, nil - } - return encPems, needsRenewal - } - - encPems, err := getPEMs(false) - switch err { - case nil: - case needsRenewal: - encPems, err = getPEMs(true) - if err != nil { - return PemEncoded{}, nil, err - } - default: - return PemEncoded{}, nil, err - } - return respWithMetadata(encPems) -} - -// returns time left in ~90% of the original lease and a boolean -// that returns false if cert needs renewing, true otherwise -func goodFor(cert *x509.Certificate) (time.Duration, bool) { - // If we got called with a cert that doesn't exist, just say there's no - // time left, and it needs to be renewed - if cert == nil { - return 0, false - } - // These are all int64's with Seconds since the Epoch, handy for the math - start, end := cert.NotBefore.Unix(), cert.NotAfter.Unix() - now := time.Now().UTC().Unix() - if end <= now { // already expired - return 0, false - } - lifespan := end - start // full ttl of cert - duration := end - now // duration remaining - gooddur := (duration * 9) / 10 // 90% of duration - mindur := (lifespan / 10) // 10% of lifespan - if gooddur <= mindur { - return 0, false // almost expired, get a new one - } - if gooddur > 100 { // 100 seconds - // add jitter if big enough for it to matter - r := rand.New(rand.NewSource(time.Now().UnixNano())) - // between 87% and 93% - gooddur = gooddur + ((gooddur / 100) * int64(r.Intn(6)-3)) - } - sleepFor := time.Duration(gooddur * 1e9) // basically: gooddur*time.Second - return sleepFor, true -} - -// loops through all pem encoded blocks in the byte stream -// returning the CA, Certificate and Private Key PEM strings -// also returns the cert for the Certificate as we have it and need it -func pemsCert(encoded []byte) (PemEncoded, *x509.Certificate, error) { - var block *pem.Block - var cert *x509.Certificate - var encPems PemEncoded - var aPem []byte - for { - aPem, encoded = nextPem(encoded) - // scan, find and parse PEM blocks - block, _ = pem.Decode(aPem) - switch { - case block == nil: // end of scan, no more PEMs found - return encPems, cert, nil - case strings.HasSuffix(block.Type, "PRIVATE KEY"): - // PKCS#1 and PKCS#8 matching to find private key - encPems.Key = string(pem.EncodeToMemory(block)) - continue - } - // CERTIFICATE PEM blocks (Cert and CA) are left - maybeCert, err := x509.ParseCertificate(block.Bytes) - switch { - case err != nil: - return PemEncoded{}, nil, err - case maybeCert.IsCA: - encPems.CA = string(pem.EncodeToMemory(block)) - default: // the certificate - cert = maybeCert - encPems.Cert = string(pem.EncodeToMemory(block)) - } - } -} - -// find the next PEM in the byte stream -func nextPem(encoded []byte) (aPem []byte, theRest []byte) { - start := bytes.Index(encoded, []byte("-----BEGIN")) - if start >= 0 { // finds the PEM and pulls it to decode - encoded = encoded[start:] // clip pre-pem junk - // find the end - end := bytes.Index(encoded, []byte("-----END")) + 8 - end = end + bytes.Index(encoded[end:], []byte("-----")) + 5 - // the PEM padded with newlines (what pem.Decode likes) - aPem = append([]byte("\n"), encoded[:end]...) - aPem = append(aPem, []byte("\n")...) - theRest = encoded[end:] // the rest - } - return aPem, theRest -} - -// Vault call to fetch the PKI Cert PEM data -func (d *VaultPKIQuery) fetchPEMs(clients *ClientSet) ([]byte, error) { - vaultSecret, err := clients.Vault().Logical().Write(d.pkiPath, d.data) - switch { - case err != nil: - return nil, errors.Wrap(err, d.String()) - case vaultSecret == nil: - return nil, fmt.Errorf("no secret exists at %s", d.pkiPath) - } - printVaultWarnings(d, vaultSecret.Warnings) - pems := bytes.Buffer{} - for _, v := range vaultSecret.Data { - switch v := v.(type) { - case string: - pems.WriteString(v + "\n") - } - } - - return pems.Bytes(), nil -} - -func (d *VaultPKIQuery) stopChan() chan struct{} { - return d.stopCh -} - -// CanShare returns if this dependency is shareable. -func (d *VaultPKIQuery) CanShare() bool { - return false -} - -// Stop halts the given dependency's fetch. -func (d *VaultPKIQuery) Stop() { - close(d.stopCh) -} - -// String returns the human-friendly version of this dependency. -func (d *VaultPKIQuery) String() string { - return fmt.Sprintf("vault.pki(%s->%s)", d.pkiPath, d.filePath) -} - -// Type returns the type of this dependency. -func (d *VaultPKIQuery) Type() Type { - return TypeVault -} diff --git a/vendor/github.com/hashicorp/consul-template/dependency/vault_read.go b/vendor/github.com/hashicorp/consul-template/dependency/vault_read.go deleted file mode 100644 index 9e20d7cd..00000000 --- a/vendor/github.com/hashicorp/consul-template/dependency/vault_read.go +++ /dev/null @@ -1,199 +0,0 @@ -package dependency - -import ( - "fmt" - "log" - "net/url" - "path" - "strings" - "time" - - "github.com/hashicorp/vault/api" - "github.com/pkg/errors" -) - -// Ensure implements -var _ Dependency = (*VaultReadQuery)(nil) - -// VaultReadQuery is the dependency to Vault for a secret -type VaultReadQuery struct { - stopCh chan struct{} - sleepCh chan time.Duration - - rawPath string - queryValues url.Values - secret *Secret - isKVv2 *bool - secretPath string - - // vaultSecret is the actual Vault secret which we are renewing - vaultSecret *api.Secret -} - -// NewVaultReadQuery creates a new datacenter dependency. -func NewVaultReadQuery(s string) (*VaultReadQuery, error) { - s = strings.TrimSpace(s) - s = strings.Trim(s, "/") - if s == "" { - return nil, fmt.Errorf("vault.read: invalid format: %q", s) - } - - secretURL, err := url.Parse(s) - if err != nil { - return nil, err - } - - return &VaultReadQuery{ - stopCh: make(chan struct{}, 1), - sleepCh: make(chan time.Duration, 1), - rawPath: secretURL.Path, - queryValues: secretURL.Query(), - }, nil -} - -// Fetch queries the Vault API -func (d *VaultReadQuery) Fetch(clients *ClientSet, opts *QueryOptions, -) (interface{}, *ResponseMetadata, error) { - select { - case <-d.stopCh: - return nil, nil, ErrStopped - default: - } - select { - case dur := <-d.sleepCh: - select { - case <-time.After(dur): - break - case <-d.stopCh: - return nil, nil, ErrStopped - } - default: - } - - firstRun := d.secret == nil - - if !firstRun && vaultSecretRenewable(d.secret) { - err := renewSecret(clients, d) - if err != nil { - return nil, nil, errors.Wrap(err, d.String()) - } - } - - err := d.fetchSecret(clients, opts) - if err != nil { - return nil, nil, errors.Wrap(err, d.String()) - } - - if !vaultSecretRenewable(d.secret) { - dur := leaseCheckWait(d.secret) - log.Printf("[TRACE] %s: non-renewable secret, set sleep for %s", d, dur) - d.sleepCh <- dur - } - - return respWithMetadata(d.secret) -} - -func (d *VaultReadQuery) fetchSecret(clients *ClientSet, opts *QueryOptions, -) error { - opts = opts.Merge(&QueryOptions{}) - vaultSecret, err := d.readSecret(clients, opts) - if err == nil { - printVaultWarnings(d, vaultSecret.Warnings) - d.vaultSecret = vaultSecret - // the cloned secret which will be exposed to the template - d.secret = transformSecret(vaultSecret) - } - return err -} - -func (d *VaultReadQuery) stopChan() chan struct{} { - return d.stopCh -} - -func (d *VaultReadQuery) secrets() (*Secret, *api.Secret) { - return d.secret, d.vaultSecret -} - -// CanShare returns if this dependency is shareable. -func (d *VaultReadQuery) CanShare() bool { - return false -} - -// Stop halts the given dependency's fetch. -func (d *VaultReadQuery) Stop() { - close(d.stopCh) -} - -// String returns the human-friendly version of this dependency. -func (d *VaultReadQuery) String() string { - if v := d.queryValues["version"]; len(v) > 0 { - return fmt.Sprintf("vault.read(%s.v%s)", d.rawPath, v[0]) - } - return fmt.Sprintf("vault.read(%s)", d.rawPath) -} - -// Type returns the type of this dependency. -func (d *VaultReadQuery) Type() Type { - return TypeVault -} - -func (d *VaultReadQuery) readSecret(clients *ClientSet, opts *QueryOptions) (*api.Secret, error) { - vaultClient := clients.Vault() - - // Check whether this secret refers to a KV v2 entry if we haven't yet. - if d.isKVv2 == nil { - mountPath, isKVv2, err := isKVv2(vaultClient, d.rawPath) - if err != nil { - log.Printf("[WARN] %s: failed to check if %s is KVv2, "+ - "assume not: %s", d, d.rawPath, err) - isKVv2 = false - d.secretPath = d.rawPath - } else if isKVv2 { - d.secretPath = shimKVv2Path(d.rawPath, mountPath) - } else { - d.secretPath = d.rawPath - } - d.isKVv2 = &isKVv2 - } - - queryString := d.queryValues.Encode() - log.Printf("[TRACE] %s: GET %s", d, &url.URL{ - Path: "/v1/" + d.secretPath, - RawQuery: queryString, - }) - vaultSecret, err := vaultClient.Logical().ReadWithData(d.secretPath, - d.queryValues) - if err != nil { - return nil, errors.Wrap(err, d.String()) - } - if vaultSecret == nil || deletedKVv2(vaultSecret) { - return nil, fmt.Errorf("no secret exists at %s", d.secretPath) - } - return vaultSecret, nil -} - -func deletedKVv2(s *api.Secret) bool { - switch md := s.Data["metadata"].(type) { - case map[string]interface{}: - return md["deletion_time"] != "" - } - return false -} - -// shimKVv2Path aligns the supported legacy path to KV v2 specs by inserting -// /data/ into the path for reading secrets. Paths for metadata are not modified. -func shimKVv2Path(rawPath, mountPath string) string { - switch { - case rawPath == mountPath, rawPath == strings.TrimSuffix(mountPath, "/"): - return path.Join(mountPath, "data") - default: - p := strings.TrimPrefix(rawPath, mountPath) - - // Only add /data/ prefix to the path if neither /data/ or /metadata/ are - // present. - if strings.HasPrefix(p, "data/") || strings.HasPrefix(p, "metadata/") { - return rawPath - } - return path.Join(mountPath, "data", p) - } -} diff --git a/vendor/github.com/hashicorp/consul-template/dependency/vault_token.go b/vendor/github.com/hashicorp/consul-template/dependency/vault_token.go deleted file mode 100644 index 8c687d86..00000000 --- a/vendor/github.com/hashicorp/consul-template/dependency/vault_token.go +++ /dev/null @@ -1,79 +0,0 @@ -package dependency - -import ( - "github.com/hashicorp/vault/api" - "github.com/pkg/errors" -) - -// Ensure implements -var _ Dependency = (*VaultTokenQuery)(nil) - -// VaultTokenQuery is the dependency to Vault for a secret -type VaultTokenQuery struct { - stopCh chan struct{} - secret *Secret - vaultSecret *api.Secret -} - -// NewVaultTokenQuery creates a new dependency. -func NewVaultTokenQuery(token string) (*VaultTokenQuery, error) { - vaultSecret := &api.Secret{ - Auth: &api.SecretAuth{ - ClientToken: token, - Renewable: true, - LeaseDuration: 1, - }, - } - return &VaultTokenQuery{ - stopCh: make(chan struct{}, 1), - vaultSecret: vaultSecret, - secret: transformSecret(vaultSecret), - }, nil -} - -// Fetch queries the Vault API -func (d *VaultTokenQuery) Fetch(clients *ClientSet, opts *QueryOptions, -) (interface{}, *ResponseMetadata, error) { - select { - case <-d.stopCh: - return nil, nil, ErrStopped - default: - } - - if vaultSecretRenewable(d.secret) { - err := renewSecret(clients, d) - if err != nil { - return nil, nil, errors.Wrap(err, d.String()) - } - } - - return nil, nil, ErrLeaseExpired -} - -func (d *VaultTokenQuery) stopChan() chan struct{} { - return d.stopCh -} - -func (d *VaultTokenQuery) secrets() (*Secret, *api.Secret) { - return d.secret, d.vaultSecret -} - -// CanShare returns if this dependency is shareable. -func (d *VaultTokenQuery) CanShare() bool { - return false -} - -// Stop halts the dependency's fetch function. -func (d *VaultTokenQuery) Stop() { - close(d.stopCh) -} - -// String returns the human-friendly version of this dependency. -func (d *VaultTokenQuery) String() string { - return "vault.token" -} - -// Type returns the type of this dependency. -func (d *VaultTokenQuery) Type() Type { - return TypeVault -} diff --git a/vendor/github.com/hashicorp/consul-template/dependency/vault_write.go b/vendor/github.com/hashicorp/consul-template/dependency/vault_write.go deleted file mode 100644 index 5ad07c77..00000000 --- a/vendor/github.com/hashicorp/consul-template/dependency/vault_write.go +++ /dev/null @@ -1,176 +0,0 @@ -package dependency - -import ( - "crypto/sha1" - "fmt" - "io" - "log" - "net/url" - "sort" - "strings" - "time" - - "github.com/hashicorp/vault/api" - "github.com/pkg/errors" -) - -// Ensure implements -var _ Dependency = (*VaultWriteQuery)(nil) - -// VaultWriteQuery is the dependency to Vault for a secret -type VaultWriteQuery struct { - stopCh chan struct{} - sleepCh chan time.Duration - - path string - data map[string]interface{} - dataHash string - secret *Secret - - // vaultSecret is the actual Vault secret which we are renewing - vaultSecret *api.Secret -} - -// NewVaultWriteQuery creates a new datacenter dependency. -func NewVaultWriteQuery(s string, d map[string]interface{}) (*VaultWriteQuery, error) { - s = strings.TrimSpace(s) - s = strings.Trim(s, "/") - if s == "" { - return nil, fmt.Errorf("vault.write: invalid format: %q", s) - } - - return &VaultWriteQuery{ - stopCh: make(chan struct{}, 1), - sleepCh: make(chan time.Duration, 1), - path: s, - data: d, - dataHash: sha1Map(d), - }, nil -} - -// Fetch queries the Vault API -func (d *VaultWriteQuery) Fetch(clients *ClientSet, opts *QueryOptions, -) (interface{}, *ResponseMetadata, error) { - select { - case <-d.stopCh: - return nil, nil, ErrStopped - default: - } - select { - case dur := <-d.sleepCh: - time.Sleep(dur) - default: - } - - firstRun := d.secret == nil - - if !firstRun && vaultSecretRenewable(d.secret) { - err := renewSecret(clients, d) - if err != nil { - return nil, nil, errors.Wrap(err, d.String()) - } - } - - opts = opts.Merge(&QueryOptions{}) - vaultSecret, err := d.writeSecret(clients, opts) - if err != nil { - return nil, nil, errors.Wrap(err, d.String()) - } - - // vaultSecret == nil when writing to KVv1 engines - if vaultSecret == nil { - return respWithMetadata(d.secret) - } - - printVaultWarnings(d, vaultSecret.Warnings) - d.vaultSecret = vaultSecret - // cloned secret which will be exposed to the template - d.secret = transformSecret(vaultSecret) - - if !vaultSecretRenewable(d.secret) { - dur := leaseCheckWait(d.secret) - log.Printf("[TRACE] %s: non-renewable secret, set sleep for %s", d, dur) - d.sleepCh <- dur - } - - return respWithMetadata(d.secret) -} - -// meet renewer interface -func (d *VaultWriteQuery) stopChan() chan struct{} { - return d.stopCh -} - -func (d *VaultWriteQuery) secrets() (*Secret, *api.Secret) { - return d.secret, d.vaultSecret -} - -// CanShare returns if this dependency is shareable. -func (d *VaultWriteQuery) CanShare() bool { - return false -} - -// Stop halts the given dependency's fetch. -func (d *VaultWriteQuery) Stop() { - close(d.stopCh) -} - -// String returns the human-friendly version of this dependency. -func (d *VaultWriteQuery) String() string { - return fmt.Sprintf("vault.write(%s -> %s)", d.path, d.dataHash) -} - -// Type returns the type of this dependency. -func (d *VaultWriteQuery) Type() Type { - return TypeVault -} - -// sha1Map returns the sha1 hash of the data in the map. The reason this data is -// hashed is because it appears in the output and could contain sensitive -// information. -func sha1Map(m map[string]interface{}) string { - keys := make([]string, 0, len(m)) - for k := range m { - keys = append(keys, k) - } - sort.Strings(keys) - - h := sha1.New() - for _, k := range keys { - io.WriteString(h, fmt.Sprintf("%s=%q", k, m[k])) - } - - return fmt.Sprintf("%.4x", h.Sum(nil)) -} - -func (d *VaultWriteQuery) printWarnings(warnings []string) { - for _, w := range warnings { - log.Printf("[WARN] %s: %s", d, w) - } -} - -func (d *VaultWriteQuery) writeSecret(clients *ClientSet, opts *QueryOptions) (*api.Secret, error) { - log.Printf("[TRACE] %s: PUT %s", d, &url.URL{ - Path: "/v1/" + d.path, - RawQuery: opts.String(), - }) - - path := d.path - data := d.data - mountPath, isv2, _ := isKVv2(clients.Vault(), path) - if isv2 { - path = shimKVv2Path(path, mountPath) - data = map[string]interface{}{"data": d.data} - } - - vaultSecret, err := clients.Vault().Logical().Write(path, data) - if err != nil { - return nil, errors.Wrap(err, d.String()) - } - // vaultSecret is always nil when KVv1 engine (isv2==false) - if isv2 && vaultSecret == nil { - return nil, fmt.Errorf("no secret exists at %s", d.path) - } - - return vaultSecret, nil -} diff --git a/vendor/github.com/hashicorp/consul-template/version/version.go b/vendor/github.com/hashicorp/consul-template/version/version.go deleted file mode 100644 index 880f2eae..00000000 --- a/vendor/github.com/hashicorp/consul-template/version/version.go +++ /dev/null @@ -1,16 +0,0 @@ -package version - -import "fmt" - -const ( - Version = "0.29.5" - VersionPrerelease = "" // "-dev", "-beta", "-rc1", etc. (include dash) -) - -var ( - Name string = "consul-template" - GitCommit string - - HumanVersion = fmt.Sprintf("%s v%s%s (%s)", - Name, Version, VersionPrerelease, GitCommit) -) diff --git a/vendor/github.com/containerd/cgroups/LICENSE b/vendor/github.com/hashicorp/cronexpr/LICENSE similarity index 99% rename from vendor/github.com/containerd/cgroups/LICENSE rename to vendor/github.com/hashicorp/cronexpr/LICENSE index 261eeb9e..d6456956 100644 --- a/vendor/github.com/containerd/cgroups/LICENSE +++ b/vendor/github.com/hashicorp/cronexpr/LICENSE @@ -1,3 +1,4 @@ + Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ diff --git a/vendor/github.com/hashicorp/go-hclog/intlogger.go b/vendor/github.com/hashicorp/go-hclog/intlogger.go index e4cd8edd..89d26c9b 100644 --- a/vendor/github.com/hashicorp/go-hclog/intlogger.go +++ b/vendor/github.com/hashicorp/go-hclog/intlogger.go @@ -261,7 +261,6 @@ func needsQuoting(str string) bool { // 2. Color the whole log line, based on the level. // 3. Color only the header (level) part of the log line. // 4. Color both the header and fields of the log line. -// func (l *intLogger) logPlain(t time.Time, name string, level Level, msg string, args ...interface{}) { if !l.disableTime { @@ -845,6 +844,11 @@ func (l *intLogger) SetLevel(level Level) { atomic.StoreInt32(l.level, int32(level)) } +// Returns the current level +func (l *intLogger) GetLevel() Level { + return Level(atomic.LoadInt32(l.level)) +} + // Create a *log.Logger that will send it's data through this Logger. This // allows packages that expect to be using the standard library log to actually // use this logger. diff --git a/vendor/github.com/hashicorp/go-hclog/logger.go b/vendor/github.com/hashicorp/go-hclog/logger.go index 50dee822..3cdb2837 100644 --- a/vendor/github.com/hashicorp/go-hclog/logger.go +++ b/vendor/github.com/hashicorp/go-hclog/logger.go @@ -198,6 +198,9 @@ type Logger interface { // implementation cannot update the level on the fly, it should no-op. SetLevel(level Level) + // Returns the current level + GetLevel() Level + // Return a value that conforms to the stdlib log.Logger interface StandardLogger(opts *StandardLoggerOptions) *log.Logger diff --git a/vendor/github.com/hashicorp/go-hclog/nulllogger.go b/vendor/github.com/hashicorp/go-hclog/nulllogger.go index bc14f770..55e89dd3 100644 --- a/vendor/github.com/hashicorp/go-hclog/nulllogger.go +++ b/vendor/github.com/hashicorp/go-hclog/nulllogger.go @@ -49,6 +49,8 @@ func (l *nullLogger) ResetNamed(name string) Logger { return l } func (l *nullLogger) SetLevel(level Level) {} +func (l *nullLogger) GetLevel() Level { return NoLevel } + func (l *nullLogger) StandardLogger(opts *StandardLoggerOptions) *log.Logger { return log.New(l.StandardWriter(opts), "", log.LstdFlags) } diff --git a/vendor/github.com/hashicorp/go-plugin/CHANGELOG.md b/vendor/github.com/hashicorp/go-plugin/CHANGELOG.md index 4c2c0e64..dbb20c96 100644 --- a/vendor/github.com/hashicorp/go-plugin/CHANGELOG.md +++ b/vendor/github.com/hashicorp/go-plugin/CHANGELOG.md @@ -1,3 +1,13 @@ +## v1.4.10 + +BUG FIXES: + +* additional notes: ensure to close files [GH-241](https://github.com/hashicorp/go-plugin/pull/241)] + +ENHANCEMENTS: + +* deps: Remove direct dependency on golang.org/x/net [GH-240](https://github.com/hashicorp/go-plugin/pull/240)] + ## v1.4.9 ENHANCEMENTS: diff --git a/vendor/github.com/hashicorp/go-plugin/README.md b/vendor/github.com/hashicorp/go-plugin/README.md index 39391f24..50baee06 100644 --- a/vendor/github.com/hashicorp/go-plugin/README.md +++ b/vendor/github.com/hashicorp/go-plugin/README.md @@ -4,8 +4,9 @@ that has been in use by HashiCorp tooling for over 4 years. While initially created for [Packer](https://www.packer.io), it is additionally in use by [Terraform](https://www.terraform.io), [Nomad](https://www.nomadproject.io), -[Vault](https://www.vaultproject.io), and -[Boundary](https://www.boundaryproject.io). +[Vault](https://www.vaultproject.io), +[Boundary](https://www.boundaryproject.io), +and [Waypoint](https://www.waypointproject.io). While the plugin system is over RPC, it is currently only designed to work over a local [reliable] network. Plugins over a real network are not supported diff --git a/vendor/github.com/hashicorp/go-plugin/grpc_client.go b/vendor/github.com/hashicorp/go-plugin/grpc_client.go index b0592cb5..6454d426 100644 --- a/vendor/github.com/hashicorp/go-plugin/grpc_client.go +++ b/vendor/github.com/hashicorp/go-plugin/grpc_client.go @@ -4,6 +4,7 @@ package plugin import ( + "context" "crypto/tls" "fmt" "math" @@ -11,7 +12,6 @@ import ( "time" "github.com/hashicorp/go-plugin/internal/plugin" - "golang.org/x/net/context" "google.golang.org/grpc" "google.golang.org/grpc/credentials" "google.golang.org/grpc/health/grpc_health_v1" diff --git a/vendor/github.com/hashicorp/go-plugin/internal/plugin/grpc_broker.pb.go b/vendor/github.com/hashicorp/go-plugin/internal/plugin/grpc_broker.pb.go index 6bf10385..303b63e4 100644 --- a/vendor/github.com/hashicorp/go-plugin/internal/plugin/grpc_broker.pb.go +++ b/vendor/github.com/hashicorp/go-plugin/internal/plugin/grpc_broker.pb.go @@ -8,7 +8,7 @@ import fmt "fmt" import math "math" import ( - context "golang.org/x/net/context" + context "context" grpc "google.golang.org/grpc" ) diff --git a/vendor/github.com/hashicorp/go-plugin/internal/plugin/grpc_controller.pb.go b/vendor/github.com/hashicorp/go-plugin/internal/plugin/grpc_controller.pb.go index 3e39da95..982fca0a 100644 --- a/vendor/github.com/hashicorp/go-plugin/internal/plugin/grpc_controller.pb.go +++ b/vendor/github.com/hashicorp/go-plugin/internal/plugin/grpc_controller.pb.go @@ -8,7 +8,7 @@ import fmt "fmt" import math "math" import ( - context "golang.org/x/net/context" + context "context" grpc "google.golang.org/grpc" ) diff --git a/vendor/github.com/hashicorp/go-plugin/internal/plugin/grpc_stdio.pb.go b/vendor/github.com/hashicorp/go-plugin/internal/plugin/grpc_stdio.pb.go index c8f94921..bdef71b8 100644 --- a/vendor/github.com/hashicorp/go-plugin/internal/plugin/grpc_stdio.pb.go +++ b/vendor/github.com/hashicorp/go-plugin/internal/plugin/grpc_stdio.pb.go @@ -9,7 +9,7 @@ import math "math" import empty "github.com/golang/protobuf/ptypes/empty" import ( - context "golang.org/x/net/context" + context "context" grpc "google.golang.org/grpc" ) diff --git a/vendor/github.com/hashicorp/go-plugin/notes_unix.go b/vendor/github.com/hashicorp/go-plugin/notes_unix.go index 9a0e5c50..97345575 100644 --- a/vendor/github.com/hashicorp/go-plugin/notes_unix.go +++ b/vendor/github.com/hashicorp/go-plugin/notes_unix.go @@ -53,10 +53,13 @@ func additionalNotesAboutCommand(path string) string { } if elfFile, err := elf.Open(path); err == nil { + defer elfFile.Close() notes += fmt.Sprintf(" ELF architecture: %s (current architecture: %s)\n", elfFile.Machine, runtime.GOARCH) } else if machoFile, err := macho.Open(path); err == nil { + defer machoFile.Close() notes += fmt.Sprintf(" MachO architecture: %s (current architecture: %s)\n", machoFile.Cpu, runtime.GOARCH) } else if peFile, err := pe.Open(path); err == nil { + defer peFile.Close() machine, ok := peTypes[peFile.Machine] if !ok { machine = "unknown" diff --git a/vendor/github.com/hashicorp/go-plugin/notes_windows.go b/vendor/github.com/hashicorp/go-plugin/notes_windows.go index 15680850..7afcaf3f 100644 --- a/vendor/github.com/hashicorp/go-plugin/notes_windows.go +++ b/vendor/github.com/hashicorp/go-plugin/notes_windows.go @@ -29,10 +29,13 @@ func additionalNotesAboutCommand(path string) string { notes += fmt.Sprintf(" Mode: %s\n", stat.Mode()) if elfFile, err := elf.Open(path); err == nil { + defer elfFile.Close() notes += fmt.Sprintf(" ELF architecture: %s (current architecture: %s)\n", elfFile.Machine, runtime.GOARCH) } else if machoFile, err := macho.Open(path); err == nil { + defer machoFile.Close() notes += fmt.Sprintf(" MachO architecture: %s (current architecture: %s)\n", machoFile.Cpu, runtime.GOARCH) } else if peFile, err := pe.Open(path); err == nil { + defer peFile.Close() machine, ok := peTypes[peFile.Machine] if !ok { machine = "unknown" diff --git a/vendor/github.com/hashicorp/go-secure-stdlib/mlock/mlock.go b/vendor/github.com/hashicorp/go-secure-stdlib/mlock/mlock.go deleted file mode 100644 index 1675633d..00000000 --- a/vendor/github.com/hashicorp/go-secure-stdlib/mlock/mlock.go +++ /dev/null @@ -1,15 +0,0 @@ -package mlock - -// This should be set by the OS-specific packages to tell whether LockMemory -// is supported or not. -var supported bool - -// Supported returns true if LockMemory is functional on this system. -func Supported() bool { - return supported -} - -// LockMemory prevents any memory from being swapped to disk. -func LockMemory() error { - return lockMemory() -} diff --git a/vendor/github.com/hashicorp/go-secure-stdlib/mlock/mlock_unavail.go b/vendor/github.com/hashicorp/go-secure-stdlib/mlock/mlock_unavail.go deleted file mode 100644 index 941eb2d7..00000000 --- a/vendor/github.com/hashicorp/go-secure-stdlib/mlock/mlock_unavail.go +++ /dev/null @@ -1,13 +0,0 @@ -// +build darwin nacl netbsd plan9 windows - -package mlock - -func init() { - supported = false -} - -func lockMemory() error { - // XXX: No good way to do this on Windows. There is the VirtualLock - // method, but it requires a specific address and offset. - return nil -} diff --git a/vendor/github.com/hashicorp/go-secure-stdlib/mlock/mlock_unix.go b/vendor/github.com/hashicorp/go-secure-stdlib/mlock/mlock_unix.go deleted file mode 100644 index af0a69d4..00000000 --- a/vendor/github.com/hashicorp/go-secure-stdlib/mlock/mlock_unix.go +++ /dev/null @@ -1,18 +0,0 @@ -// +build dragonfly freebsd linux openbsd solaris - -package mlock - -import ( - "syscall" - - "golang.org/x/sys/unix" -) - -func init() { - supported = true -} - -func lockMemory() error { - // Mlockall prevents all current and future pages from being swapped out. - return unix.Mlockall(syscall.MCL_CURRENT | syscall.MCL_FUTURE) -} diff --git a/vendor/github.com/hashicorp/go-uuid/.travis.yml b/vendor/github.com/hashicorp/go-uuid/.travis.yml deleted file mode 100644 index 76984907..00000000 --- a/vendor/github.com/hashicorp/go-uuid/.travis.yml +++ /dev/null @@ -1,12 +0,0 @@ -language: go - -sudo: false - -go: - - 1.4 - - 1.5 - - 1.6 - - tip - -script: - - go test -bench . -benchmem -v ./... diff --git a/vendor/github.com/hashicorp/go-uuid/LICENSE b/vendor/github.com/hashicorp/go-uuid/LICENSE deleted file mode 100644 index a320b309..00000000 --- a/vendor/github.com/hashicorp/go-uuid/LICENSE +++ /dev/null @@ -1,365 +0,0 @@ -Copyright © 2015-2022 HashiCorp, Inc. - -Mozilla Public License, version 2.0 - -1. Definitions - -1.1. "Contributor" - - means each individual or legal entity that creates, contributes to the - creation of, or owns Covered Software. - -1.2. "Contributor Version" - - means the combination of the Contributions of others (if any) used by a - Contributor and that particular Contributor's Contribution. - -1.3. "Contribution" - - means Covered Software of a particular Contributor. - -1.4. "Covered Software" - - means Source Code Form to which the initial Contributor has attached the - notice in Exhibit A, the Executable Form of such Source Code Form, and - Modifications of such Source Code Form, in each case including portions - thereof. - -1.5. "Incompatible With Secondary Licenses" - means - - a. that the initial Contributor has attached the notice described in - Exhibit B to the Covered Software; or - - b. that the Covered Software was made available under the terms of - version 1.1 or earlier of the License, but not also under the terms of - a Secondary License. - -1.6. "Executable Form" - - means any form of the work other than Source Code Form. - -1.7. "Larger Work" - - means a work that combines Covered Software with other material, in a - separate file or files, that is not Covered Software. - -1.8. "License" - - means this document. - -1.9. "Licensable" - - means having the right to grant, to the maximum extent possible, whether - at the time of the initial grant or subsequently, any and all of the - rights conveyed by this License. - -1.10. "Modifications" - - means any of the following: - - a. any file in Source Code Form that results from an addition to, - deletion from, or modification of the contents of Covered Software; or - - b. any new file in Source Code Form that contains any Covered Software. - -1.11. "Patent Claims" of a Contributor - - means any patent claim(s), including without limitation, method, - process, and apparatus claims, in any patent Licensable by such - Contributor that would be infringed, but for the grant of the License, - by the making, using, selling, offering for sale, having made, import, - or transfer of either its Contributions or its Contributor Version. - -1.12. "Secondary License" - - means either the GNU General Public License, Version 2.0, the GNU Lesser - General Public License, Version 2.1, the GNU Affero General Public - License, Version 3.0, or any later versions of those licenses. - -1.13. "Source Code Form" - - means the form of the work preferred for making modifications. - -1.14. "You" (or "Your") - - means an individual or a legal entity exercising rights under this - License. For legal entities, "You" includes any entity that controls, is - controlled by, or is under common control with You. For purposes of this - definition, "control" means (a) the power, direct or indirect, to cause - the direction or management of such entity, whether by contract or - otherwise, or (b) ownership of more than fifty percent (50%) of the - outstanding shares or beneficial ownership of such entity. - - -2. License Grants and Conditions - -2.1. Grants - - Each Contributor hereby grants You a world-wide, royalty-free, - non-exclusive license: - - a. under intellectual property rights (other than patent or trademark) - Licensable by such Contributor to use, reproduce, make available, - modify, display, perform, distribute, and otherwise exploit its - Contributions, either on an unmodified basis, with Modifications, or - as part of a Larger Work; and - - b. under Patent Claims of such Contributor to make, use, sell, offer for - sale, have made, import, and otherwise transfer either its - Contributions or its Contributor Version. - -2.2. Effective Date - - The licenses granted in Section 2.1 with respect to any Contribution - become effective for each Contribution on the date the Contributor first - distributes such Contribution. - -2.3. Limitations on Grant Scope - - The licenses granted in this Section 2 are the only rights granted under - this License. No additional rights or licenses will be implied from the - distribution or licensing of Covered Software under this License. - Notwithstanding Section 2.1(b) above, no patent license is granted by a - Contributor: - - a. for any code that a Contributor has removed from Covered Software; or - - b. for infringements caused by: (i) Your and any other third party's - modifications of Covered Software, or (ii) the combination of its - Contributions with other software (except as part of its Contributor - Version); or - - c. under Patent Claims infringed by Covered Software in the absence of - its Contributions. - - This License does not grant any rights in the trademarks, service marks, - or logos of any Contributor (except as may be necessary to comply with - the notice requirements in Section 3.4). - -2.4. Subsequent Licenses - - No Contributor makes additional grants as a result of Your choice to - distribute the Covered Software under a subsequent version of this - License (see Section 10.2) or under the terms of a Secondary License (if - permitted under the terms of Section 3.3). - -2.5. Representation - - Each Contributor represents that the Contributor believes its - Contributions are its original creation(s) or it has sufficient rights to - grant the rights to its Contributions conveyed by this License. - -2.6. Fair Use - - This License is not intended to limit any rights You have under - applicable copyright doctrines of fair use, fair dealing, or other - equivalents. - -2.7. Conditions - - Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in - Section 2.1. - - -3. Responsibilities - -3.1. Distribution of Source Form - - All distribution of Covered Software in Source Code Form, including any - Modifications that You create or to which You contribute, must be under - the terms of this License. You must inform recipients that the Source - Code Form of the Covered Software is governed by the terms of this - License, and how they can obtain a copy of this License. You may not - attempt to alter or restrict the recipients' rights in the Source Code - Form. - -3.2. Distribution of Executable Form - - If You distribute Covered Software in Executable Form then: - - a. such Covered Software must also be made available in Source Code Form, - as described in Section 3.1, and You must inform recipients of the - Executable Form how they can obtain a copy of such Source Code Form by - reasonable means in a timely manner, at a charge no more than the cost - of distribution to the recipient; and - - b. You may distribute such Executable Form under the terms of this - License, or sublicense it under different terms, provided that the - license for the Executable Form does not attempt to limit or alter the - recipients' rights in the Source Code Form under this License. - -3.3. Distribution of a Larger Work - - You may create and distribute a Larger Work under terms of Your choice, - provided that You also comply with the requirements of this License for - the Covered Software. If the Larger Work is a combination of Covered - Software with a work governed by one or more Secondary Licenses, and the - Covered Software is not Incompatible With Secondary Licenses, this - License permits You to additionally distribute such Covered Software - under the terms of such Secondary License(s), so that the recipient of - the Larger Work may, at their option, further distribute the Covered - Software under the terms of either this License or such Secondary - License(s). - -3.4. Notices - - You may not remove or alter the substance of any license notices - (including copyright notices, patent notices, disclaimers of warranty, or - limitations of liability) contained within the Source Code Form of the - Covered Software, except that You may alter any license notices to the - extent required to remedy known factual inaccuracies. - -3.5. Application of Additional Terms - - You may choose to offer, and to charge a fee for, warranty, support, - indemnity or liability obligations to one or more recipients of Covered - Software. However, You may do so only on Your own behalf, and not on - behalf of any Contributor. You must make it absolutely clear that any - such warranty, support, indemnity, or liability obligation is offered by - You alone, and You hereby agree to indemnify every Contributor for any - liability incurred by such Contributor as a result of warranty, support, - indemnity or liability terms You offer. You may include additional - disclaimers of warranty and limitations of liability specific to any - jurisdiction. - -4. Inability to Comply Due to Statute or Regulation - - If it is impossible for You to comply with any of the terms of this License - with respect to some or all of the Covered Software due to statute, - judicial order, or regulation then You must: (a) comply with the terms of - this License to the maximum extent possible; and (b) describe the - limitations and the code they affect. Such description must be placed in a - text file included with all distributions of the Covered Software under - this License. Except to the extent prohibited by statute or regulation, - such description must be sufficiently detailed for a recipient of ordinary - skill to be able to understand it. - -5. Termination - -5.1. The rights granted under this License will terminate automatically if You - fail to comply with any of its terms. However, if You become compliant, - then the rights granted under this License from a particular Contributor - are reinstated (a) provisionally, unless and until such Contributor - explicitly and finally terminates Your grants, and (b) on an ongoing - basis, if such Contributor fails to notify You of the non-compliance by - some reasonable means prior to 60 days after You have come back into - compliance. Moreover, Your grants from a particular Contributor are - reinstated on an ongoing basis if such Contributor notifies You of the - non-compliance by some reasonable means, this is the first time You have - received notice of non-compliance with this License from such - Contributor, and You become compliant prior to 30 days after Your receipt - of the notice. - -5.2. If You initiate litigation against any entity by asserting a patent - infringement claim (excluding declaratory judgment actions, - counter-claims, and cross-claims) alleging that a Contributor Version - directly or indirectly infringes any patent, then the rights granted to - You by any and all Contributors for the Covered Software under Section - 2.1 of this License shall terminate. - -5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user - license agreements (excluding distributors and resellers) which have been - validly granted by You or Your distributors under this License prior to - termination shall survive termination. - -6. Disclaimer of Warranty - - Covered Software is provided under this License on an "as is" basis, - without warranty of any kind, either expressed, implied, or statutory, - including, without limitation, warranties that the Covered Software is free - of defects, merchantable, fit for a particular purpose or non-infringing. - The entire risk as to the quality and performance of the Covered Software - is with You. Should any Covered Software prove defective in any respect, - You (not any Contributor) assume the cost of any necessary servicing, - repair, or correction. This disclaimer of warranty constitutes an essential - part of this License. No use of any Covered Software is authorized under - this License except under this disclaimer. - -7. Limitation of Liability - - Under no circumstances and under no legal theory, whether tort (including - negligence), contract, or otherwise, shall any Contributor, or anyone who - distributes Covered Software as permitted above, be liable to You for any - direct, indirect, special, incidental, or consequential damages of any - character including, without limitation, damages for lost profits, loss of - goodwill, work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses, even if such party shall have been - informed of the possibility of such damages. This limitation of liability - shall not apply to liability for death or personal injury resulting from - such party's negligence to the extent applicable law prohibits such - limitation. Some jurisdictions do not allow the exclusion or limitation of - incidental or consequential damages, so this exclusion and limitation may - not apply to You. - -8. Litigation - - Any litigation relating to this License may be brought only in the courts - of a jurisdiction where the defendant maintains its principal place of - business and such litigation shall be governed by laws of that - jurisdiction, without reference to its conflict-of-law provisions. Nothing - in this Section shall prevent a party's ability to bring cross-claims or - counter-claims. - -9. Miscellaneous - - This License represents the complete agreement concerning the subject - matter hereof. If any provision of this License is held to be - unenforceable, such provision shall be reformed only to the extent - necessary to make it enforceable. Any law or regulation which provides that - the language of a contract shall be construed against the drafter shall not - be used to construe this License against a Contributor. - - -10. Versions of the License - -10.1. New Versions - - Mozilla Foundation is the license steward. Except as provided in Section - 10.3, no one other than the license steward has the right to modify or - publish new versions of this License. Each version will be given a - distinguishing version number. - -10.2. Effect of New Versions - - You may distribute the Covered Software under the terms of the version - of the License under which You originally received the Covered Software, - or under the terms of any subsequent version published by the license - steward. - -10.3. Modified Versions - - If you create software not governed by this License, and you want to - create a new license for such software, you may create and use a - modified version of this License if you rename the license and remove - any references to the name of the license steward (except to note that - such modified license differs from this License). - -10.4. Distributing Source Code Form that is Incompatible With Secondary - Licenses If You choose to distribute Source Code Form that is - Incompatible With Secondary Licenses under the terms of this version of - the License, the notice described in Exhibit B of this License must be - attached. - -Exhibit A - Source Code Form License Notice - - This Source Code Form is subject to the - terms of the Mozilla Public License, v. - 2.0. If a copy of the MPL was not - distributed with this file, You can - obtain one at - http://mozilla.org/MPL/2.0/. - -If it is not possible or desirable to put the notice in a particular file, -then You may include the notice in a location (such as a LICENSE file in a -relevant directory) where a recipient would be likely to look for such a -notice. - -You may add additional accurate notices of copyright ownership. - -Exhibit B - "Incompatible With Secondary Licenses" Notice - - This Source Code Form is "Incompatible - With Secondary Licenses", as defined by - the Mozilla Public License, v. 2.0. - diff --git a/vendor/github.com/hashicorp/go-uuid/README.md b/vendor/github.com/hashicorp/go-uuid/README.md deleted file mode 100644 index fbde8b9a..00000000 --- a/vendor/github.com/hashicorp/go-uuid/README.md +++ /dev/null @@ -1,8 +0,0 @@ -# uuid [![Build Status](https://travis-ci.org/hashicorp/go-uuid.svg?branch=master)](https://travis-ci.org/hashicorp/go-uuid) - -Generates UUID-format strings using high quality, _purely random_ bytes. It is **not** intended to be RFC compliant, merely to use a well-understood string representation of a 128-bit value. It can also parse UUID-format strings into their component bytes. - -Documentation -============= - -The full documentation is available on [Godoc](http://godoc.org/github.com/hashicorp/go-uuid). diff --git a/vendor/github.com/hashicorp/go-uuid/uuid.go b/vendor/github.com/hashicorp/go-uuid/uuid.go deleted file mode 100644 index 0c10c4e9..00000000 --- a/vendor/github.com/hashicorp/go-uuid/uuid.go +++ /dev/null @@ -1,83 +0,0 @@ -package uuid - -import ( - "crypto/rand" - "encoding/hex" - "fmt" - "io" -) - -// GenerateRandomBytes is used to generate random bytes of given size. -func GenerateRandomBytes(size int) ([]byte, error) { - return GenerateRandomBytesWithReader(size, rand.Reader) -} - -// GenerateRandomBytesWithReader is used to generate random bytes of given size read from a given reader. -func GenerateRandomBytesWithReader(size int, reader io.Reader) ([]byte, error) { - if reader == nil { - return nil, fmt.Errorf("provided reader is nil") - } - buf := make([]byte, size) - if _, err := io.ReadFull(reader, buf); err != nil { - return nil, fmt.Errorf("failed to read random bytes: %v", err) - } - return buf, nil -} - - -const uuidLen = 16 - -// GenerateUUID is used to generate a random UUID -func GenerateUUID() (string, error) { - return GenerateUUIDWithReader(rand.Reader) -} - -// GenerateUUIDWithReader is used to generate a random UUID with a given Reader -func GenerateUUIDWithReader(reader io.Reader) (string, error) { - if reader == nil { - return "", fmt.Errorf("provided reader is nil") - } - buf, err := GenerateRandomBytesWithReader(uuidLen, reader) - if err != nil { - return "", err - } - return FormatUUID(buf) -} - -func FormatUUID(buf []byte) (string, error) { - if buflen := len(buf); buflen != uuidLen { - return "", fmt.Errorf("wrong length byte slice (%d)", buflen) - } - - return fmt.Sprintf("%x-%x-%x-%x-%x", - buf[0:4], - buf[4:6], - buf[6:8], - buf[8:10], - buf[10:16]), nil -} - -func ParseUUID(uuid string) ([]byte, error) { - if len(uuid) != 2 * uuidLen + 4 { - return nil, fmt.Errorf("uuid string is wrong length") - } - - if uuid[8] != '-' || - uuid[13] != '-' || - uuid[18] != '-' || - uuid[23] != '-' { - return nil, fmt.Errorf("uuid is improperly formatted") - } - - hexStr := uuid[0:8] + uuid[9:13] + uuid[14:18] + uuid[19:23] + uuid[24:36] - - ret, err := hex.DecodeString(hexStr) - if err != nil { - return nil, err - } - if len(ret) != uuidLen { - return nil, fmt.Errorf("decoded hex is the wrong length") - } - - return ret, nil -} diff --git a/vendor/github.com/hashicorp/nomad/acl/acl.go b/vendor/github.com/hashicorp/nomad/acl/acl.go index 02a4da0c..4f37e0a0 100644 --- a/vendor/github.com/hashicorp/nomad/acl/acl.go +++ b/vendor/github.com/hashicorp/nomad/acl/acl.go @@ -386,6 +386,10 @@ func (a *ACL) AllowVariableSearch(ns string) bool { if a.management { return true } + if ns == "*" { + return a.variables.Len() > 0 || a.wildcardVariables.Len() > 0 + } + iter := a.variables.Root().Iterator() iter.SeekPrefix([]byte(ns)) _, _, ok := iter.Next() diff --git a/vendor/github.com/hashicorp/nomad/acl/policy.go b/vendor/github.com/hashicorp/nomad/acl/policy.go index 39d886bb..89153285 100644 --- a/vendor/github.com/hashicorp/nomad/acl/policy.go +++ b/vendor/github.com/hashicorp/nomad/acl/policy.go @@ -1,10 +1,12 @@ package acl import ( + "errors" "fmt" "regexp" "github.com/hashicorp/hcl" + "github.com/hashicorp/hcl/hcl/ast" ) const ( @@ -400,5 +402,65 @@ func hclDecode(p *Policy, rules string) (err error) { } }() - return hcl.Decode(p, rules) + if err = hcl.Decode(p, rules); err != nil { + return err + } + + // Manually parse the policy to fix blocks without labels. + // + // Due to a bug in the way HCL decodes files, a block without a label may + // return an incorrect key value and make it impossible to determine if the + // key was set by the user or incorrectly set by the decoder. + // + // By manually parsing the file we are able to determine if the label is + // missing in the file and set them to an empty string so the policy + // validation can return the appropriate errors. + root, err := hcl.Parse(rules) + if err != nil { + return fmt.Errorf("failed to parse policy: %w", err) + } + + list, ok := root.Node.(*ast.ObjectList) + if !ok { + return errors.New("error parsing: root should be an object") + } + + nsList := list.Filter("namespace") + for i, nsObj := range nsList.Items { + // Fix missing namespace key. + if len(nsObj.Keys) == 0 { + p.Namespaces[i].Name = "" + } + + // Fix missing variable paths. + nsOT, ok := nsObj.Val.(*ast.ObjectType) + if !ok { + continue + } + varsList := nsOT.List.Filter("variables") + if varsList == nil || len(varsList.Items) == 0 { + continue + } + + varsObj, ok := varsList.Items[0].Val.(*ast.ObjectType) + if !ok { + continue + } + paths := varsObj.List.Filter("path") + for j, path := range paths.Items { + if len(path.Keys) == 0 { + p.Namespaces[i].Variables.Paths[j].PathSpec = "" + } + } + } + + hvList := list.Filter("host_volume") + for i, hvObj := range hvList.Items { + // Fix missing host volume key. + if len(hvObj.Keys) == 0 { + p.HostVolumes[i].Name = "" + } + } + + return nil } diff --git a/vendor/github.com/hashicorp/nomad/api/LICENSE b/vendor/github.com/hashicorp/nomad/api/LICENSE deleted file mode 100644 index e87a115e..00000000 --- a/vendor/github.com/hashicorp/nomad/api/LICENSE +++ /dev/null @@ -1,363 +0,0 @@ -Mozilla Public License, version 2.0 - -1. Definitions - -1.1. "Contributor" - - means each individual or legal entity that creates, contributes to the - creation of, or owns Covered Software. - -1.2. "Contributor Version" - - means the combination of the Contributions of others (if any) used by a - Contributor and that particular Contributor's Contribution. - -1.3. "Contribution" - - means Covered Software of a particular Contributor. - -1.4. "Covered Software" - - means Source Code Form to which the initial Contributor has attached the - notice in Exhibit A, the Executable Form of such Source Code Form, and - Modifications of such Source Code Form, in each case including portions - thereof. - -1.5. "Incompatible With Secondary Licenses" - means - - a. that the initial Contributor has attached the notice described in - Exhibit B to the Covered Software; or - - b. that the Covered Software was made available under the terms of - version 1.1 or earlier of the License, but not also under the terms of - a Secondary License. - -1.6. "Executable Form" - - means any form of the work other than Source Code Form. - -1.7. "Larger Work" - - means a work that combines Covered Software with other material, in a - separate file or files, that is not Covered Software. - -1.8. "License" - - means this document. - -1.9. "Licensable" - - means having the right to grant, to the maximum extent possible, whether - at the time of the initial grant or subsequently, any and all of the - rights conveyed by this License. - -1.10. "Modifications" - - means any of the following: - - a. any file in Source Code Form that results from an addition to, - deletion from, or modification of the contents of Covered Software; or - - b. any new file in Source Code Form that contains any Covered Software. - -1.11. "Patent Claims" of a Contributor - - means any patent claim(s), including without limitation, method, - process, and apparatus claims, in any patent Licensable by such - Contributor that would be infringed, but for the grant of the License, - by the making, using, selling, offering for sale, having made, import, - or transfer of either its Contributions or its Contributor Version. - -1.12. "Secondary License" - - means either the GNU General Public License, Version 2.0, the GNU Lesser - General Public License, Version 2.1, the GNU Affero General Public - License, Version 3.0, or any later versions of those licenses. - -1.13. "Source Code Form" - - means the form of the work preferred for making modifications. - -1.14. "You" (or "Your") - - means an individual or a legal entity exercising rights under this - License. For legal entities, "You" includes any entity that controls, is - controlled by, or is under common control with You. For purposes of this - definition, "control" means (a) the power, direct or indirect, to cause - the direction or management of such entity, whether by contract or - otherwise, or (b) ownership of more than fifty percent (50%) of the - outstanding shares or beneficial ownership of such entity. - - -2. License Grants and Conditions - -2.1. Grants - - Each Contributor hereby grants You a world-wide, royalty-free, - non-exclusive license: - - a. under intellectual property rights (other than patent or trademark) - Licensable by such Contributor to use, reproduce, make available, - modify, display, perform, distribute, and otherwise exploit its - Contributions, either on an unmodified basis, with Modifications, or - as part of a Larger Work; and - - b. under Patent Claims of such Contributor to make, use, sell, offer for - sale, have made, import, and otherwise transfer either its - Contributions or its Contributor Version. - -2.2. Effective Date - - The licenses granted in Section 2.1 with respect to any Contribution - become effective for each Contribution on the date the Contributor first - distributes such Contribution. - -2.3. Limitations on Grant Scope - - The licenses granted in this Section 2 are the only rights granted under - this License. No additional rights or licenses will be implied from the - distribution or licensing of Covered Software under this License. - Notwithstanding Section 2.1(b) above, no patent license is granted by a - Contributor: - - a. for any code that a Contributor has removed from Covered Software; or - - b. for infringements caused by: (i) Your and any other third party's - modifications of Covered Software, or (ii) the combination of its - Contributions with other software (except as part of its Contributor - Version); or - - c. under Patent Claims infringed by Covered Software in the absence of - its Contributions. - - This License does not grant any rights in the trademarks, service marks, - or logos of any Contributor (except as may be necessary to comply with - the notice requirements in Section 3.4). - -2.4. Subsequent Licenses - - No Contributor makes additional grants as a result of Your choice to - distribute the Covered Software under a subsequent version of this - License (see Section 10.2) or under the terms of a Secondary License (if - permitted under the terms of Section 3.3). - -2.5. Representation - - Each Contributor represents that the Contributor believes its - Contributions are its original creation(s) or it has sufficient rights to - grant the rights to its Contributions conveyed by this License. - -2.6. Fair Use - - This License is not intended to limit any rights You have under - applicable copyright doctrines of fair use, fair dealing, or other - equivalents. - -2.7. Conditions - - Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in - Section 2.1. - - -3. Responsibilities - -3.1. Distribution of Source Form - - All distribution of Covered Software in Source Code Form, including any - Modifications that You create or to which You contribute, must be under - the terms of this License. You must inform recipients that the Source - Code Form of the Covered Software is governed by the terms of this - License, and how they can obtain a copy of this License. You may not - attempt to alter or restrict the recipients' rights in the Source Code - Form. - -3.2. Distribution of Executable Form - - If You distribute Covered Software in Executable Form then: - - a. such Covered Software must also be made available in Source Code Form, - as described in Section 3.1, and You must inform recipients of the - Executable Form how they can obtain a copy of such Source Code Form by - reasonable means in a timely manner, at a charge no more than the cost - of distribution to the recipient; and - - b. You may distribute such Executable Form under the terms of this - License, or sublicense it under different terms, provided that the - license for the Executable Form does not attempt to limit or alter the - recipients' rights in the Source Code Form under this License. - -3.3. Distribution of a Larger Work - - You may create and distribute a Larger Work under terms of Your choice, - provided that You also comply with the requirements of this License for - the Covered Software. If the Larger Work is a combination of Covered - Software with a work governed by one or more Secondary Licenses, and the - Covered Software is not Incompatible With Secondary Licenses, this - License permits You to additionally distribute such Covered Software - under the terms of such Secondary License(s), so that the recipient of - the Larger Work may, at their option, further distribute the Covered - Software under the terms of either this License or such Secondary - License(s). - -3.4. Notices - - You may not remove or alter the substance of any license notices - (including copyright notices, patent notices, disclaimers of warranty, or - limitations of liability) contained within the Source Code Form of the - Covered Software, except that You may alter any license notices to the - extent required to remedy known factual inaccuracies. - -3.5. Application of Additional Terms - - You may choose to offer, and to charge a fee for, warranty, support, - indemnity or liability obligations to one or more recipients of Covered - Software. However, You may do so only on Your own behalf, and not on - behalf of any Contributor. You must make it absolutely clear that any - such warranty, support, indemnity, or liability obligation is offered by - You alone, and You hereby agree to indemnify every Contributor for any - liability incurred by such Contributor as a result of warranty, support, - indemnity or liability terms You offer. You may include additional - disclaimers of warranty and limitations of liability specific to any - jurisdiction. - -4. Inability to Comply Due to Statute or Regulation - - If it is impossible for You to comply with any of the terms of this License - with respect to some or all of the Covered Software due to statute, - judicial order, or regulation then You must: (a) comply with the terms of - this License to the maximum extent possible; and (b) describe the - limitations and the code they affect. Such description must be placed in a - text file included with all distributions of the Covered Software under - this License. Except to the extent prohibited by statute or regulation, - such description must be sufficiently detailed for a recipient of ordinary - skill to be able to understand it. - -5. Termination - -5.1. The rights granted under this License will terminate automatically if You - fail to comply with any of its terms. However, if You become compliant, - then the rights granted under this License from a particular Contributor - are reinstated (a) provisionally, unless and until such Contributor - explicitly and finally terminates Your grants, and (b) on an ongoing - basis, if such Contributor fails to notify You of the non-compliance by - some reasonable means prior to 60 days after You have come back into - compliance. Moreover, Your grants from a particular Contributor are - reinstated on an ongoing basis if such Contributor notifies You of the - non-compliance by some reasonable means, this is the first time You have - received notice of non-compliance with this License from such - Contributor, and You become compliant prior to 30 days after Your receipt - of the notice. - -5.2. If You initiate litigation against any entity by asserting a patent - infringement claim (excluding declaratory judgment actions, - counter-claims, and cross-claims) alleging that a Contributor Version - directly or indirectly infringes any patent, then the rights granted to - You by any and all Contributors for the Covered Software under Section - 2.1 of this License shall terminate. - -5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user - license agreements (excluding distributors and resellers) which have been - validly granted by You or Your distributors under this License prior to - termination shall survive termination. - -6. Disclaimer of Warranty - - Covered Software is provided under this License on an "as is" basis, - without warranty of any kind, either expressed, implied, or statutory, - including, without limitation, warranties that the Covered Software is free - of defects, merchantable, fit for a particular purpose or non-infringing. - The entire risk as to the quality and performance of the Covered Software - is with You. Should any Covered Software prove defective in any respect, - You (not any Contributor) assume the cost of any necessary servicing, - repair, or correction. This disclaimer of warranty constitutes an essential - part of this License. No use of any Covered Software is authorized under - this License except under this disclaimer. - -7. Limitation of Liability - - Under no circumstances and under no legal theory, whether tort (including - negligence), contract, or otherwise, shall any Contributor, or anyone who - distributes Covered Software as permitted above, be liable to You for any - direct, indirect, special, incidental, or consequential damages of any - character including, without limitation, damages for lost profits, loss of - goodwill, work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses, even if such party shall have been - informed of the possibility of such damages. This limitation of liability - shall not apply to liability for death or personal injury resulting from - such party's negligence to the extent applicable law prohibits such - limitation. Some jurisdictions do not allow the exclusion or limitation of - incidental or consequential damages, so this exclusion and limitation may - not apply to You. - -8. Litigation - - Any litigation relating to this License may be brought only in the courts - of a jurisdiction where the defendant maintains its principal place of - business and such litigation shall be governed by laws of that - jurisdiction, without reference to its conflict-of-law provisions. Nothing - in this Section shall prevent a party's ability to bring cross-claims or - counter-claims. - -9. Miscellaneous - - This License represents the complete agreement concerning the subject - matter hereof. If any provision of this License is held to be - unenforceable, such provision shall be reformed only to the extent - necessary to make it enforceable. Any law or regulation which provides that - the language of a contract shall be construed against the drafter shall not - be used to construe this License against a Contributor. - - -10. Versions of the License - -10.1. New Versions - - Mozilla Foundation is the license steward. Except as provided in Section - 10.3, no one other than the license steward has the right to modify or - publish new versions of this License. Each version will be given a - distinguishing version number. - -10.2. Effect of New Versions - - You may distribute the Covered Software under the terms of the version - of the License under which You originally received the Covered Software, - or under the terms of any subsequent version published by the license - steward. - -10.3. Modified Versions - - If you create software not governed by this License, and you want to - create a new license for such software, you may create and use a - modified version of this License if you rename the license and remove - any references to the name of the license steward (except to note that - such modified license differs from this License). - -10.4. Distributing Source Code Form that is Incompatible With Secondary - Licenses If You choose to distribute Source Code Form that is - Incompatible With Secondary Licenses under the terms of this version of - the License, the notice described in Exhibit B of this License must be - attached. - -Exhibit A - Source Code Form License Notice - - This Source Code Form is subject to the - terms of the Mozilla Public License, v. - 2.0. If a copy of the MPL was not - distributed with this file, You can - obtain one at - http://mozilla.org/MPL/2.0/. - -If it is not possible or desirable to put the notice in a particular file, -then You may include the notice in a location (such as a LICENSE file in a -relevant directory) where a recipient would be likely to look for such a -notice. - -You may add additional accurate notices of copyright ownership. - -Exhibit B - "Incompatible With Secondary Licenses" Notice - - This Source Code Form is "Incompatible - With Secondary Licenses", as defined by - the Mozilla Public License, v. 2.0. - diff --git a/vendor/github.com/hashicorp/nomad/api/acl.go b/vendor/github.com/hashicorp/nomad/api/acl.go deleted file mode 100644 index 67a9a04a..00000000 --- a/vendor/github.com/hashicorp/nomad/api/acl.go +++ /dev/null @@ -1,477 +0,0 @@ -package api - -import ( - "errors" - "time" -) - -// ACLPolicies is used to query the ACL Policy endpoints. -type ACLPolicies struct { - client *Client -} - -// ACLPolicies returns a new handle on the ACL policies. -func (c *Client) ACLPolicies() *ACLPolicies { - return &ACLPolicies{client: c} -} - -// List is used to dump all of the policies. -func (a *ACLPolicies) List(q *QueryOptions) ([]*ACLPolicyListStub, *QueryMeta, error) { - var resp []*ACLPolicyListStub - qm, err := a.client.query("/v1/acl/policies", &resp, q) - if err != nil { - return nil, nil, err - } - return resp, qm, nil -} - -// Upsert is used to create or update a policy -func (a *ACLPolicies) Upsert(policy *ACLPolicy, q *WriteOptions) (*WriteMeta, error) { - if policy == nil || policy.Name == "" { - return nil, errors.New("missing policy name") - } - wm, err := a.client.write("/v1/acl/policy/"+policy.Name, policy, nil, q) - if err != nil { - return nil, err - } - return wm, nil -} - -// Delete is used to delete a policy -func (a *ACLPolicies) Delete(policyName string, q *WriteOptions) (*WriteMeta, error) { - if policyName == "" { - return nil, errors.New("missing policy name") - } - wm, err := a.client.delete("/v1/acl/policy/"+policyName, nil, nil, q) - if err != nil { - return nil, err - } - return wm, nil -} - -// Info is used to query a specific policy -func (a *ACLPolicies) Info(policyName string, q *QueryOptions) (*ACLPolicy, *QueryMeta, error) { - if policyName == "" { - return nil, nil, errors.New("missing policy name") - } - var resp ACLPolicy - wm, err := a.client.query("/v1/acl/policy/"+policyName, &resp, q) - if err != nil { - return nil, nil, err - } - return &resp, wm, nil -} - -// ACLTokens is used to query the ACL token endpoints. -type ACLTokens struct { - client *Client -} - -// ACLTokens returns a new handle on the ACL tokens. -func (c *Client) ACLTokens() *ACLTokens { - return &ACLTokens{client: c} -} - -// DEPRECATED: will be removed in Nomad 1.5.0 -// Bootstrap is used to get the initial bootstrap token -func (a *ACLTokens) Bootstrap(q *WriteOptions) (*ACLToken, *WriteMeta, error) { - var resp ACLToken - wm, err := a.client.write("/v1/acl/bootstrap", nil, &resp, q) - if err != nil { - return nil, nil, err - } - return &resp, wm, nil -} - -// BootstrapOpts is used to get the initial bootstrap token or pass in the one that was provided in the API -func (a *ACLTokens) BootstrapOpts(btoken string, q *WriteOptions) (*ACLToken, *WriteMeta, error) { - if q == nil { - q = &WriteOptions{} - } - req := &BootstrapRequest{ - BootstrapSecret: btoken, - } - - var resp ACLToken - wm, err := a.client.write("/v1/acl/bootstrap", req, &resp, q) - if err != nil { - return nil, nil, err - } - return &resp, wm, nil -} - -// List is used to dump all of the tokens. -func (a *ACLTokens) List(q *QueryOptions) ([]*ACLTokenListStub, *QueryMeta, error) { - var resp []*ACLTokenListStub - qm, err := a.client.query("/v1/acl/tokens", &resp, q) - if err != nil { - return nil, nil, err - } - return resp, qm, nil -} - -// Create is used to create a token -func (a *ACLTokens) Create(token *ACLToken, q *WriteOptions) (*ACLToken, *WriteMeta, error) { - if token.AccessorID != "" { - return nil, nil, errors.New("cannot specify Accessor ID") - } - var resp ACLToken - wm, err := a.client.write("/v1/acl/token", token, &resp, q) - if err != nil { - return nil, nil, err - } - return &resp, wm, nil -} - -// Update is used to update an existing token -func (a *ACLTokens) Update(token *ACLToken, q *WriteOptions) (*ACLToken, *WriteMeta, error) { - if token.AccessorID == "" { - return nil, nil, errors.New("missing accessor ID") - } - var resp ACLToken - wm, err := a.client.write("/v1/acl/token/"+token.AccessorID, - token, &resp, q) - if err != nil { - return nil, nil, err - } - return &resp, wm, nil -} - -// Delete is used to delete a token -func (a *ACLTokens) Delete(accessorID string, q *WriteOptions) (*WriteMeta, error) { - if accessorID == "" { - return nil, errors.New("missing accessor ID") - } - wm, err := a.client.delete("/v1/acl/token/"+accessorID, nil, nil, q) - if err != nil { - return nil, err - } - return wm, nil -} - -// Info is used to query a token -func (a *ACLTokens) Info(accessorID string, q *QueryOptions) (*ACLToken, *QueryMeta, error) { - if accessorID == "" { - return nil, nil, errors.New("missing accessor ID") - } - var resp ACLToken - wm, err := a.client.query("/v1/acl/token/"+accessorID, &resp, q) - if err != nil { - return nil, nil, err - } - return &resp, wm, nil -} - -// Self is used to query our own token -func (a *ACLTokens) Self(q *QueryOptions) (*ACLToken, *QueryMeta, error) { - var resp ACLToken - wm, err := a.client.query("/v1/acl/token/self", &resp, q) - if err != nil { - return nil, nil, err - } - return &resp, wm, nil -} - -// UpsertOneTimeToken is used to create a one-time token -func (a *ACLTokens) UpsertOneTimeToken(q *WriteOptions) (*OneTimeToken, *WriteMeta, error) { - var resp *OneTimeTokenUpsertResponse - wm, err := a.client.write("/v1/acl/token/onetime", nil, &resp, q) - if err != nil { - return nil, nil, err - } - if resp == nil { - return nil, nil, errors.New("no one-time token returned") - } - return resp.OneTimeToken, wm, nil -} - -// ExchangeOneTimeToken is used to create a one-time token -func (a *ACLTokens) ExchangeOneTimeToken(secret string, q *WriteOptions) (*ACLToken, *WriteMeta, error) { - if secret == "" { - return nil, nil, errors.New("missing secret ID") - } - req := &OneTimeTokenExchangeRequest{OneTimeSecretID: secret} - var resp *OneTimeTokenExchangeResponse - wm, err := a.client.write("/v1/acl/token/onetime/exchange", req, &resp, q) - if err != nil { - return nil, nil, err - } - if resp == nil { - return nil, nil, errors.New("no ACL token returned") - } - return resp.Token, wm, nil -} - -var ( - // errMissingACLRoleID is the generic errors to use when a call is missing - // the required ACL Role ID parameter. - errMissingACLRoleID = errors.New("missing ACL role ID") -) - -// ACLRoles is used to query the ACL Role endpoints. -type ACLRoles struct { - client *Client -} - -// ACLRoles returns a new handle on the ACL roles API client. -func (c *Client) ACLRoles() *ACLRoles { - return &ACLRoles{client: c} -} - -// List is used to detail all the ACL roles currently stored within state. -func (a *ACLRoles) List(q *QueryOptions) ([]*ACLRoleListStub, *QueryMeta, error) { - var resp []*ACLRoleListStub - qm, err := a.client.query("/v1/acl/roles", &resp, q) - if err != nil { - return nil, nil, err - } - return resp, qm, nil -} - -// Create is used to create an ACL role. -func (a *ACLRoles) Create(role *ACLRole, w *WriteOptions) (*ACLRole, *WriteMeta, error) { - if role.ID != "" { - return nil, nil, errors.New("cannot specify ACL role ID") - } - var resp ACLRole - wm, err := a.client.write("/v1/acl/role", role, &resp, w) - if err != nil { - return nil, nil, err - } - return &resp, wm, nil -} - -// Update is used to update an existing ACL role. -func (a *ACLRoles) Update(role *ACLRole, w *WriteOptions) (*ACLRole, *WriteMeta, error) { - if role.ID == "" { - return nil, nil, errMissingACLRoleID - } - var resp ACLRole - wm, err := a.client.write("/v1/acl/role/"+role.ID, role, &resp, w) - if err != nil { - return nil, nil, err - } - return &resp, wm, nil -} - -// Delete is used to delete an ACL role. -func (a *ACLRoles) Delete(roleID string, w *WriteOptions) (*WriteMeta, error) { - if roleID == "" { - return nil, errMissingACLRoleID - } - wm, err := a.client.delete("/v1/acl/role/"+roleID, nil, nil, w) - if err != nil { - return nil, err - } - return wm, nil -} - -// Get is used to look up an ACL role. -func (a *ACLRoles) Get(roleID string, q *QueryOptions) (*ACLRole, *QueryMeta, error) { - if roleID == "" { - return nil, nil, errMissingACLRoleID - } - var resp ACLRole - qm, err := a.client.query("/v1/acl/role/"+roleID, &resp, q) - if err != nil { - return nil, nil, err - } - return &resp, qm, nil -} - -// GetByName is used to look up an ACL role using its name. -func (a *ACLRoles) GetByName(roleName string, q *QueryOptions) (*ACLRole, *QueryMeta, error) { - if roleName == "" { - return nil, nil, errors.New("missing ACL role name") - } - var resp ACLRole - qm, err := a.client.query("/v1/acl/role/name/"+roleName, &resp, q) - if err != nil { - return nil, nil, err - } - return &resp, qm, nil -} - -// ACLPolicyListStub is used to for listing ACL policies -type ACLPolicyListStub struct { - Name string - Description string - CreateIndex uint64 - ModifyIndex uint64 -} - -// ACLPolicy is used to represent an ACL policy -type ACLPolicy struct { - Name string - Description string - Rules string - JobACL *JobACL - - CreateIndex uint64 - ModifyIndex uint64 -} - -// JobACL represents an ACL policy's attachment to a job, group, or task. -type JobACL struct { - Namespace string - JobID string - Group string - Task string -} - -// ACLToken represents a client token which is used to Authenticate -type ACLToken struct { - AccessorID string - SecretID string - Name string - Type string - Policies []string - - // Roles represents the ACL roles that this token is tied to. The token - // will inherit the permissions of all policies detailed within the role. - Roles []*ACLTokenRoleLink - - Global bool - CreateTime time.Time - - // ExpirationTime represents the point after which a token should be - // considered revoked and is eligible for destruction. The zero value of - // time.Time does not respect json omitempty directives, so we must use a - // pointer. - ExpirationTime *time.Time `json:",omitempty"` - - // ExpirationTTL is a convenience field for helping set ExpirationTime to a - // value of CreateTime+ExpirationTTL. This can only be set during token - // creation. This is a string version of a time.Duration like "2m". - ExpirationTTL time.Duration `json:",omitempty"` - - CreateIndex uint64 - ModifyIndex uint64 -} - -// ACLTokenRoleLink is used to link an ACL token to an ACL role. The ACL token -// can therefore inherit all the ACL policy permissions that the ACL role -// contains. -type ACLTokenRoleLink struct { - - // ID is the ACLRole.ID UUID. This field is immutable and represents the - // absolute truth for the link. - ID string - - // Name is the human friendly identifier for the ACL role and is a - // convenience field for operators. - Name string -} - -type ACLTokenListStub struct { - AccessorID string - Name string - Type string - Policies []string - Roles []*ACLTokenRoleLink - Global bool - CreateTime time.Time - - // ExpirationTime represents the point after which a token should be - // considered revoked and is eligible for destruction. A nil value - // indicates no expiration has been set on the token. - ExpirationTime *time.Time `json:",omitempty"` - - CreateIndex uint64 - ModifyIndex uint64 -} - -type OneTimeToken struct { - OneTimeSecretID string - AccessorID string - ExpiresAt time.Time - CreateIndex uint64 - ModifyIndex uint64 -} - -type OneTimeTokenUpsertResponse struct { - OneTimeToken *OneTimeToken -} - -type OneTimeTokenExchangeRequest struct { - OneTimeSecretID string -} - -type OneTimeTokenExchangeResponse struct { - Token *ACLToken -} - -// BootstrapRequest is used for when operators provide an ACL Bootstrap Token -type BootstrapRequest struct { - BootstrapSecret string -} - -// ACLRole is an abstraction for the ACL system which allows the grouping of -// ACL policies into a single object. ACL tokens can be created and linked to -// a role; the token then inherits all the permissions granted by the policies. -type ACLRole struct { - - // ID is an internally generated UUID for this role and is controlled by - // Nomad. It can be used after role creation to update the existing role. - ID string - - // Name is unique across the entire set of federated clusters and is - // supplied by the operator on role creation. The name can be modified by - // updating the role and including the Nomad generated ID. This update will - // not affect tokens created and linked to this role. This is a required - // field. - Name string - - // Description is a human-readable, operator set description that can - // provide additional context about the role. This is an optional field. - Description string - - // Policies is an array of ACL policy links. Although currently policies - // can only be linked using their name, in the future we will want to add - // IDs also and thus allow operators to specify either a name, an ID, or - // both. At least one entry is required. - Policies []*ACLRolePolicyLink - - CreateIndex uint64 - ModifyIndex uint64 -} - -// ACLRolePolicyLink is used to link a policy to an ACL role. We use a struct -// rather than a list of strings as in the future we will want to add IDs to -// policies and then link via these. -type ACLRolePolicyLink struct { - - // Name is the ACLPolicy.Name value which will be linked to the ACL role. - Name string -} - -// ACLRoleListStub is the stub object returned when performing a listing of ACL -// roles. While it might not currently be different to the full response -// object, it allows us to future-proof the RPC in the event the ACLRole object -// grows over time. -type ACLRoleListStub struct { - - // ID is an internally generated UUID for this role and is controlled by - // Nomad. - ID string - - // Name is unique across the entire set of federated clusters and is - // supplied by the operator on role creation. The name can be modified by - // updating the role and including the Nomad generated ID. This update will - // not affect tokens created and linked to this role. This is a required - // field. - Name string - - // Description is a human-readable, operator set description that can - // provide additional context about the role. This is an operational field. - Description string - - // Policies is an array of ACL policy links. Although currently policies - // can only be linked using their name, in the future we will want to add - // IDs also and thus allow operators to specify either a name, an ID, or - // both. - Policies []*ACLRolePolicyLink - - CreateIndex uint64 - ModifyIndex uint64 -} diff --git a/vendor/github.com/hashicorp/nomad/api/agent.go b/vendor/github.com/hashicorp/nomad/api/agent.go deleted file mode 100644 index 2d19b953..00000000 --- a/vendor/github.com/hashicorp/nomad/api/agent.go +++ /dev/null @@ -1,571 +0,0 @@ -package api - -import ( - "encoding/json" - "fmt" - "io/ioutil" - "net/url" - "strconv" -) - -// Agent encapsulates an API client which talks to Nomad's -// agent endpoints for a specific node. -type Agent struct { - client *Client - - // Cache static agent info - nodeName string - datacenter string - region string -} - -// KeyringResponse is a unified key response and can be used for install, -// remove, use, as well as listing key queries. -type KeyringResponse struct { - Messages map[string]string - Keys map[string]int - NumNodes int -} - -// KeyringRequest is request objects for serf key operations. -type KeyringRequest struct { - Key string -} - -// Agent returns a new agent which can be used to query -// the agent-specific endpoints. -func (c *Client) Agent() *Agent { - return &Agent{client: c} -} - -// Self is used to query the /v1/agent/self endpoint and -// returns information specific to the running agent. -func (a *Agent) Self() (*AgentSelf, error) { - var out *AgentSelf - - // Query the self endpoint on the agent - _, err := a.client.query("/v1/agent/self", &out, nil) - if err != nil { - return nil, fmt.Errorf("failed querying self endpoint: %s", err) - } - - // Populate the cache for faster queries - a.populateCache(out) - - return out, nil -} - -// populateCache is used to insert various pieces of static -// data into the agent handle. This is used during subsequent -// lookups for the same data later on to save the round trip. -func (a *Agent) populateCache(self *AgentSelf) { - if a.nodeName == "" { - a.nodeName = self.Member.Name - } - if a.datacenter == "" { - if val, ok := self.Config["Datacenter"]; ok { - a.datacenter, _ = val.(string) - } - } - if a.region == "" { - if val, ok := self.Config["Region"]; ok { - a.region, _ = val.(string) - } - } -} - -// NodeName is used to query the Nomad agent for its node name. -func (a *Agent) NodeName() (string, error) { - // Return from cache if we have it - if a.nodeName != "" { - return a.nodeName, nil - } - - // Query the node name - _, err := a.Self() - return a.nodeName, err -} - -// Datacenter is used to return the name of the datacenter which -// the agent is a member of. -func (a *Agent) Datacenter() (string, error) { - // Return from cache if we have it - if a.datacenter != "" { - return a.datacenter, nil - } - - // Query the agent for the DC - _, err := a.Self() - return a.datacenter, err -} - -// Region is used to look up the region the agent is in. -func (a *Agent) Region() (string, error) { - // Return from cache if we have it - if a.region != "" { - return a.region, nil - } - - // Query the agent for the region - _, err := a.Self() - return a.region, err -} - -// Join is used to instruct a server node to join another server -// via the gossip protocol. Multiple addresses may be specified. -// We attempt to join all of the hosts in the list. Returns the -// number of nodes successfully joined and any error. If one or -// more nodes have a successful result, no error is returned. -func (a *Agent) Join(addrs ...string) (int, error) { - // Accumulate the addresses - v := url.Values{} - for _, addr := range addrs { - v.Add("address", addr) - } - - // Send the join request - var resp joinResponse - _, err := a.client.write("/v1/agent/join?"+v.Encode(), nil, &resp, nil) - if err != nil { - return 0, fmt.Errorf("failed joining: %s", err) - } - if resp.Error != "" { - return 0, fmt.Errorf("failed joining: %s", resp.Error) - } - return resp.NumJoined, nil -} - -// Members is used to query all of the known server members -func (a *Agent) Members() (*ServerMembers, error) { - var resp *ServerMembers - - // Query the known members - _, err := a.client.query("/v1/agent/members", &resp, nil) - if err != nil { - return nil, err - } - return resp, nil -} - -// Members is used to query all of the known server members -// with the ability to set QueryOptions -func (a *Agent) MembersOpts(opts *QueryOptions) (*ServerMembers, error) { - var resp *ServerMembers - _, err := a.client.query("/v1/agent/members", &resp, opts) - if err != nil { - return nil, err - } - return resp, nil -} - -// ForceLeave is used to eject an existing node from the cluster. -func (a *Agent) ForceLeave(node string) error { - _, err := a.client.write("/v1/agent/force-leave?node="+node, nil, nil, nil) - return err -} - -// Servers is used to query the list of servers on a client node. -func (a *Agent) Servers() ([]string, error) { - var resp []string - _, err := a.client.query("/v1/agent/servers", &resp, nil) - if err != nil { - return nil, err - } - return resp, nil -} - -// SetServers is used to update the list of servers on a client node. -func (a *Agent) SetServers(addrs []string) error { - // Accumulate the addresses - v := url.Values{} - for _, addr := range addrs { - v.Add("address", addr) - } - - _, err := a.client.write("/v1/agent/servers?"+v.Encode(), nil, nil, nil) - return err -} - -// ListKeys returns the list of installed keys -func (a *Agent) ListKeys() (*KeyringResponse, error) { - var resp KeyringResponse - _, err := a.client.query("/v1/agent/keyring/list", &resp, nil) - if err != nil { - return nil, err - } - return &resp, nil -} - -// InstallKey installs a key in the keyrings of all the serf members -func (a *Agent) InstallKey(key string) (*KeyringResponse, error) { - args := KeyringRequest{ - Key: key, - } - var resp KeyringResponse - _, err := a.client.write("/v1/agent/keyring/install", &args, &resp, nil) - return &resp, err -} - -// UseKey uses a key from the keyring of serf members -func (a *Agent) UseKey(key string) (*KeyringResponse, error) { - args := KeyringRequest{ - Key: key, - } - var resp KeyringResponse - _, err := a.client.write("/v1/agent/keyring/use", &args, &resp, nil) - return &resp, err -} - -// RemoveKey removes a particular key from keyrings of serf members -func (a *Agent) RemoveKey(key string) (*KeyringResponse, error) { - args := KeyringRequest{ - Key: key, - } - var resp KeyringResponse - _, err := a.client.write("/v1/agent/keyring/remove", &args, &resp, nil) - return &resp, err -} - -// Health queries the agent's health -func (a *Agent) Health() (*AgentHealthResponse, error) { - req, err := a.client.newRequest("GET", "/v1/agent/health") - if err != nil { - return nil, err - } - - var health AgentHealthResponse - _, resp, err := a.client.doRequest(req) - if err != nil { - return nil, err - } - defer resp.Body.Close() - - // Always try to decode the response as JSON - err = json.NewDecoder(resp.Body).Decode(&health) - if err == nil { - return &health, nil - } - - // Return custom error when response is not expected JSON format - return nil, fmt.Errorf("unable to unmarshal response with status %d: %v", resp.StatusCode, err) -} - -// Host returns debugging context about the agent's host operating system -func (a *Agent) Host(serverID, nodeID string, q *QueryOptions) (*HostDataResponse, error) { - if q == nil { - q = &QueryOptions{} - } - if q.Params == nil { - q.Params = make(map[string]string) - } - - if serverID != "" { - q.Params["server_id"] = serverID - } - - if nodeID != "" { - q.Params["node_id"] = nodeID - } - - var resp HostDataResponse - _, err := a.client.query("/v1/agent/host", &resp, q) - if err != nil { - return nil, err - } - - return &resp, nil -} - -// Monitor returns a channel which will receive streaming logs from the agent -// Providing a non-nil stopCh can be used to close the connection and stop log streaming -func (a *Agent) Monitor(stopCh <-chan struct{}, q *QueryOptions) (<-chan *StreamFrame, <-chan error) { - errCh := make(chan error, 1) - r, err := a.client.newRequest("GET", "/v1/agent/monitor") - if err != nil { - errCh <- err - return nil, errCh - } - - r.setQueryOptions(q) - _, resp, err := requireOK(a.client.doRequest(r)) - if err != nil { - errCh <- err - return nil, errCh - } - - frames := make(chan *StreamFrame, 10) - go func() { - defer resp.Body.Close() - - dec := json.NewDecoder(resp.Body) - - for { - select { - case <-stopCh: - close(frames) - return - default: - } - - // Decode the next frame - var frame StreamFrame - if err := dec.Decode(&frame); err != nil { - close(frames) - errCh <- err - return - } - - // Discard heartbeat frame - if frame.IsHeartbeat() { - continue - } - - frames <- &frame - } - }() - - return frames, errCh -} - -// PprofOptions contain a set of parameters for profiling a node or server. -type PprofOptions struct { - // ServerID is the server ID, name, or special value "leader" to - // specify the server that a given profile should be run on. - ServerID string - - // NodeID is the node ID that a given profile should be run on. - NodeID string - - // Seconds specifies the amount of time a profile should be run for. - // Seconds only applies for certain runtime profiles like CPU and Trace. - Seconds int - - // GC determines if a runtime.GC() should be called before a heap - // profile. - GC int - - // Debug specifies if the output of a lookup profile should be returned - // in human readable format instead of binary. - Debug int -} - -// CPUProfile returns a runtime/pprof cpu profile for a given server or node. -// The profile will run for the amount of seconds passed in or default to 1. -// If no serverID or nodeID are provided the current Agents server will be -// used. -// -// The call blocks until the profile finishes, and returns the raw bytes of the -// profile. -func (a *Agent) CPUProfile(opts PprofOptions, q *QueryOptions) ([]byte, error) { - return a.pprofRequest("profile", opts, q) -} - -// Trace returns a runtime/pprof trace for a given server or node. -// The trace will run for the amount of seconds passed in or default to 1. -// If no serverID or nodeID are provided the current Agents server will be -// used. -// -// The call blocks until the profile finishes, and returns the raw bytes of the -// profile. -func (a *Agent) Trace(opts PprofOptions, q *QueryOptions) ([]byte, error) { - return a.pprofRequest("trace", opts, q) -} - -// Lookup returns a runtime/pprof profile using pprof.Lookup to determine -// which profile to run. Accepts a client or server ID but not both simultaneously. -// -// The call blocks until the profile finishes, and returns the raw bytes of the -// profile unless debug is set. -func (a *Agent) Lookup(profile string, opts PprofOptions, q *QueryOptions) ([]byte, error) { - return a.pprofRequest(profile, opts, q) -} - -func (a *Agent) pprofRequest(req string, opts PprofOptions, q *QueryOptions) ([]byte, error) { - if q == nil { - q = &QueryOptions{} - } - if q.Params == nil { - q.Params = make(map[string]string) - } - - q.Params["seconds"] = strconv.Itoa(opts.Seconds) - q.Params["debug"] = strconv.Itoa(opts.Debug) - q.Params["gc"] = strconv.Itoa(opts.GC) - q.Params["node_id"] = opts.NodeID - q.Params["server_id"] = opts.ServerID - - body, err := a.client.rawQuery(fmt.Sprintf("/v1/agent/pprof/%s", req), q) - if err != nil { - return nil, err - } - - resp, err := ioutil.ReadAll(body) - if err != nil { - return nil, err - } - return resp, nil -} - -// joinResponse is used to decode the response we get while -// sending a member join request. -type joinResponse struct { - NumJoined int `json:"num_joined"` - Error string `json:"error"` -} - -type ServerMembers struct { - ServerName string - ServerRegion string - ServerDC string - Members []*AgentMember -} - -type AgentSelf struct { - Config map[string]interface{} `json:"config"` - Member AgentMember `json:"member"` - Stats map[string]map[string]string `json:"stats"` -} - -// AgentMember represents a cluster member known to the agent -type AgentMember struct { - Name string - Addr string - Port uint16 - Tags map[string]string - Status string - ProtocolMin uint8 - ProtocolMax uint8 - ProtocolCur uint8 - DelegateMin uint8 - DelegateMax uint8 - DelegateCur uint8 -} - -// AgentMembersNameSort implements sort.Interface for []*AgentMembersNameSort -// based on the Name, DC and Region -type AgentMembersNameSort []*AgentMember - -func (a AgentMembersNameSort) Len() int { return len(a) } -func (a AgentMembersNameSort) Swap(i, j int) { a[i], a[j] = a[j], a[i] } -func (a AgentMembersNameSort) Less(i, j int) bool { - if a[i].Tags["region"] != a[j].Tags["region"] { - return a[i].Tags["region"] < a[j].Tags["region"] - } - - if a[i].Tags["dc"] != a[j].Tags["dc"] { - return a[i].Tags["dc"] < a[j].Tags["dc"] - } - - return a[i].Name < a[j].Name - -} - -// AgentHealthResponse is the response from the Health endpoint describing an -// agent's health. -type AgentHealthResponse struct { - Client *AgentHealth `json:"client,omitempty"` - Server *AgentHealth `json:"server,omitempty"` -} - -// AgentHealth describes the Client or Server's health in a Health request. -type AgentHealth struct { - // Ok is false if the agent is unhealthy - Ok bool `json:"ok"` - - // Message describes why the agent is unhealthy - Message string `json:"message"` -} - -type HostData struct { - OS string - Network []map[string]string - ResolvConf string - Hosts string - Environment map[string]string - Disk map[string]DiskUsage -} - -type DiskUsage struct { - DiskMB int64 - UsedMB int64 -} - -type HostDataResponse struct { - AgentID string - HostData *HostData `json:",omitempty"` -} - -// GetSchedulerWorkerConfig returns the targeted agent's worker pool configuration -func (a *Agent) GetSchedulerWorkerConfig(q *QueryOptions) (*SchedulerWorkerPoolArgs, error) { - var resp AgentSchedulerWorkerConfigResponse - _, err := a.client.query("/v1/agent/schedulers/config", &resp, q) - if err != nil { - return nil, err - } - - return &SchedulerWorkerPoolArgs{NumSchedulers: resp.NumSchedulers, EnabledSchedulers: resp.EnabledSchedulers}, nil -} - -// SetSchedulerWorkerConfig attempts to update the targeted agent's worker pool configuration -func (a *Agent) SetSchedulerWorkerConfig(args SchedulerWorkerPoolArgs, q *WriteOptions) (*SchedulerWorkerPoolArgs, error) { - req := AgentSchedulerWorkerConfigRequest(args) - var resp AgentSchedulerWorkerConfigResponse - - _, err := a.client.write("/v1/agent/schedulers/config", &req, &resp, q) - if err != nil { - return nil, err - } - - return &SchedulerWorkerPoolArgs{NumSchedulers: resp.NumSchedulers, EnabledSchedulers: resp.EnabledSchedulers}, nil -} - -type SchedulerWorkerPoolArgs struct { - NumSchedulers int - EnabledSchedulers []string -} - -// AgentSchedulerWorkerConfigRequest is used to provide new scheduler worker configuration -// to a specific Nomad server. EnabledSchedulers must contain at least the `_core` scheduler -// to be valid. -type AgentSchedulerWorkerConfigRequest struct { - NumSchedulers int `json:"num_schedulers"` - EnabledSchedulers []string `json:"enabled_schedulers"` -} - -// AgentSchedulerWorkerConfigResponse contains the Nomad server's current running configuration -// as well as the server's id as a convenience. This can be used to provide starting values for -// creating an AgentSchedulerWorkerConfigRequest to make changes to the running configuration. -type AgentSchedulerWorkerConfigResponse struct { - ServerID string `json:"server_id"` - NumSchedulers int `json:"num_schedulers"` - EnabledSchedulers []string `json:"enabled_schedulers"` -} - -// GetSchedulerWorkersInfo returns the current status of all of the scheduler workers on -// a Nomad server. -func (a *Agent) GetSchedulerWorkersInfo(q *QueryOptions) (*AgentSchedulerWorkersInfo, error) { - var out *AgentSchedulerWorkersInfo - - _, err := a.client.query("/v1/agent/schedulers", &out, q) - if err != nil { - return nil, err - } - - return out, nil -} - -// AgentSchedulerWorkersInfo is the response from the scheduler information endpoint containing -// a detailed status of each scheduler worker running on the server. -type AgentSchedulerWorkersInfo struct { - ServerID string `json:"server_id"` - Schedulers []AgentSchedulerWorkerInfo `json:"schedulers"` -} - -// AgentSchedulerWorkerInfo holds the detailed status information for a single scheduler worker. -type AgentSchedulerWorkerInfo struct { - ID string `json:"id"` - EnabledSchedulers []string `json:"enabled_schedulers"` - Started string `json:"started"` - Status string `json:"status"` - WorkloadStatus string `json:"workload_status"` -} diff --git a/vendor/github.com/hashicorp/nomad/api/allocations.go b/vendor/github.com/hashicorp/nomad/api/allocations.go deleted file mode 100644 index dc2ebb27..00000000 --- a/vendor/github.com/hashicorp/nomad/api/allocations.go +++ /dev/null @@ -1,563 +0,0 @@ -package api - -import ( - "context" - "errors" - "io" - "sort" - "strings" - "time" -) - -var ( - // NodeDownErr marks an operation as not able to complete since the node is - // down. - NodeDownErr = errors.New("node down") -) - -const ( - AllocDesiredStatusRun = "run" // Allocation should run - AllocDesiredStatusStop = "stop" // Allocation should stop - AllocDesiredStatusEvict = "evict" // Allocation should stop, and was evicted -) - -const ( - AllocClientStatusPending = "pending" - AllocClientStatusRunning = "running" - AllocClientStatusComplete = "complete" - AllocClientStatusFailed = "failed" - AllocClientStatusLost = "lost" -) - -const ( - AllocRestartReasonWithinPolicy = "Restart within policy" -) - -// Allocations is used to query the alloc-related endpoints. -type Allocations struct { - client *Client -} - -// Allocations returns a handle on the allocs endpoints. -func (c *Client) Allocations() *Allocations { - return &Allocations{client: c} -} - -// List returns a list of all of the allocations. -func (a *Allocations) List(q *QueryOptions) ([]*AllocationListStub, *QueryMeta, error) { - var resp []*AllocationListStub - qm, err := a.client.query("/v1/allocations", &resp, q) - if err != nil { - return nil, nil, err - } - sort.Sort(AllocIndexSort(resp)) - return resp, qm, nil -} - -func (a *Allocations) PrefixList(prefix string) ([]*AllocationListStub, *QueryMeta, error) { - return a.List(&QueryOptions{Prefix: prefix}) -} - -// Info is used to retrieve a single allocation. -func (a *Allocations) Info(allocID string, q *QueryOptions) (*Allocation, *QueryMeta, error) { - var resp Allocation - qm, err := a.client.query("/v1/allocation/"+allocID, &resp, q) - if err != nil { - return nil, nil, err - } - return &resp, qm, nil -} - -// Exec is used to execute a command inside a running task. The command is to run inside -// the task environment. -// -// The parameters are: -// - ctx: context to set deadlines or timeout -// - allocation: the allocation to execute command inside -// - task: the task's name to execute command in -// - tty: indicates whether to start a pseudo-tty for the command -// - stdin, stdout, stderr: the std io to pass to command. -// If tty is true, then streams need to point to a tty that's alive for the whole process -// - terminalSizeCh: A channel to send new tty terminal sizes -// -// The call blocks until command terminates (or an error occurs), and returns the exit code. -// -// Note: for cluster topologies where API consumers don't have network access to -// Nomad clients, set api.ClientConnTimeout to a small value (ex 1ms) to avoid -// long pauses on this API call. -func (a *Allocations) Exec(ctx context.Context, - alloc *Allocation, task string, tty bool, command []string, - stdin io.Reader, stdout, stderr io.Writer, - terminalSizeCh <-chan TerminalSize, q *QueryOptions) (exitCode int, err error) { - - s := &execSession{ - client: a.client, - alloc: alloc, - task: task, - tty: tty, - command: command, - - stdin: stdin, - stdout: stdout, - stderr: stderr, - - terminalSizeCh: terminalSizeCh, - q: q, - } - - return s.run(ctx) -} - -// Stats gets allocation resource usage statistics about an allocation. -// -// Note: for cluster topologies where API consumers don't have network access to -// Nomad clients, set api.ClientConnTimeout to a small value (ex 1ms) to avoid -// long pauses on this API call. -func (a *Allocations) Stats(alloc *Allocation, q *QueryOptions) (*AllocResourceUsage, error) { - var resp AllocResourceUsage - _, err := a.client.query("/v1/client/allocation/"+alloc.ID+"/stats", &resp, q) - return &resp, err -} - -// Checks gets status information for nomad service checks that exist in the allocation. -// -// Note: for cluster topologies where API consumers don't have network access to -// Nomad clients, set api.ClientConnTimeout to a small value (ex 1ms) to avoid -// long pauses on this API call. -func (a *Allocations) Checks(allocID string, q *QueryOptions) (AllocCheckStatuses, error) { - var resp AllocCheckStatuses - _, err := a.client.query("/v1/client/allocation/"+allocID+"/checks", &resp, q) - return resp, err -} - -// GC forces a garbage collection of client state for an allocation. -// -// Note: for cluster topologies where API consumers don't have network access to -// Nomad clients, set api.ClientConnTimeout to a small value (ex 1ms) to avoid -// long pauses on this API call. -func (a *Allocations) GC(alloc *Allocation, q *QueryOptions) error { - var resp struct{} - _, err := a.client.query("/v1/client/allocation/"+alloc.ID+"/gc", &resp, nil) - return err -} - -// Restart restarts the tasks that are currently running or a specific task if -// taskName is provided. An error is returned if the task to be restarted is -// not running. -// -// Note: for cluster topologies where API consumers don't have network access to -// Nomad clients, set api.ClientConnTimeout to a small value (ex 1ms) to avoid -// long pauses on this API call. -func (a *Allocations) Restart(alloc *Allocation, taskName string, q *QueryOptions) error { - req := AllocationRestartRequest{ - TaskName: taskName, - } - - var resp struct{} - _, err := a.client.putQuery("/v1/client/allocation/"+alloc.ID+"/restart", &req, &resp, q) - return err -} - -// RestartAllTasks restarts all tasks in the allocation, regardless of -// lifecycle type or state. Tasks will restart following their lifecycle order. -// -// Note: for cluster topologies where API consumers don't have network access to -// Nomad clients, set api.ClientConnTimeout to a small value (ex 1ms) to avoid -// long pauses on this API call. -func (a *Allocations) RestartAllTasks(alloc *Allocation, q *QueryOptions) error { - req := AllocationRestartRequest{ - AllTasks: true, - } - - var resp struct{} - _, err := a.client.putQuery("/v1/client/allocation/"+alloc.ID+"/restart", &req, &resp, q) - return err -} - -// Stop stops an allocation. -// -// Note: for cluster topologies where API consumers don't have network access to -// Nomad clients, set api.ClientConnTimeout to a small value (ex 1ms) to avoid -// long pauses on this API call. -func (a *Allocations) Stop(alloc *Allocation, q *QueryOptions) (*AllocStopResponse, error) { - var resp AllocStopResponse - _, err := a.client.putQuery("/v1/allocation/"+alloc.ID+"/stop", nil, &resp, q) - return &resp, err -} - -// AllocStopResponse is the response to an `AllocStopRequest` -type AllocStopResponse struct { - // EvalID is the id of the follow up evalution for the rescheduled alloc. - EvalID string - - WriteMeta -} - -// Signal sends a signal to the allocation. -// -// Note: for cluster topologies where API consumers don't have network access to -// Nomad clients, set api.ClientConnTimeout to a small value (ex 1ms) to avoid -// long pauses on this API call. -func (a *Allocations) Signal(alloc *Allocation, q *QueryOptions, task, signal string) error { - req := AllocSignalRequest{ - Signal: signal, - Task: task, - } - - var resp GenericResponse - _, err := a.client.putQuery("/v1/client/allocation/"+alloc.ID+"/signal", &req, &resp, q) - return err -} - -// Services is used to return a list of service registrations associated to the -// specified allocID. -func (a *Allocations) Services(allocID string, q *QueryOptions) ([]*ServiceRegistration, *QueryMeta, error) { - var resp []*ServiceRegistration - qm, err := a.client.query("/v1/allocation/"+allocID+"/services", &resp, q) - return resp, qm, err -} - -// Allocation is used for serialization of allocations. -type Allocation struct { - ID string - Namespace string - EvalID string - Name string - NodeID string - NodeName string - JobID string - Job *Job - TaskGroup string - Resources *Resources - TaskResources map[string]*Resources - AllocatedResources *AllocatedResources - Services map[string]string - Metrics *AllocationMetric - DesiredStatus string - DesiredDescription string - DesiredTransition DesiredTransition - ClientStatus string - ClientDescription string - TaskStates map[string]*TaskState - DeploymentID string - DeploymentStatus *AllocDeploymentStatus - FollowupEvalID string - PreviousAllocation string - NextAllocation string - RescheduleTracker *RescheduleTracker - PreemptedAllocations []string - PreemptedByAllocation string - CreateIndex uint64 - ModifyIndex uint64 - AllocModifyIndex uint64 - CreateTime int64 - ModifyTime int64 -} - -// AllocationMetric is used to deserialize allocation metrics. -type AllocationMetric struct { - NodesEvaluated int - NodesFiltered int - NodesAvailable map[string]int - ClassFiltered map[string]int - ConstraintFiltered map[string]int - NodesExhausted int - ClassExhausted map[string]int - DimensionExhausted map[string]int - QuotaExhausted []string - ResourcesExhausted map[string]*Resources - // Deprecated, replaced with ScoreMetaData - Scores map[string]float64 - AllocationTime time.Duration - CoalescedFailures int - ScoreMetaData []*NodeScoreMeta -} - -// NodeScoreMeta is used to serialize node scoring metadata -// displayed in the CLI during verbose mode -type NodeScoreMeta struct { - NodeID string - Scores map[string]float64 - NormScore float64 -} - -// Stub returns a list stub for the allocation -func (a *Allocation) Stub() *AllocationListStub { - return &AllocationListStub{ - ID: a.ID, - EvalID: a.EvalID, - Name: a.Name, - Namespace: a.Namespace, - NodeID: a.NodeID, - NodeName: a.NodeName, - JobID: a.JobID, - JobType: *a.Job.Type, - JobVersion: *a.Job.Version, - TaskGroup: a.TaskGroup, - DesiredStatus: a.DesiredStatus, - DesiredDescription: a.DesiredDescription, - ClientStatus: a.ClientStatus, - ClientDescription: a.ClientDescription, - TaskStates: a.TaskStates, - DeploymentStatus: a.DeploymentStatus, - FollowupEvalID: a.FollowupEvalID, - RescheduleTracker: a.RescheduleTracker, - PreemptedAllocations: a.PreemptedAllocations, - PreemptedByAllocation: a.PreemptedByAllocation, - CreateIndex: a.CreateIndex, - ModifyIndex: a.ModifyIndex, - CreateTime: a.CreateTime, - ModifyTime: a.ModifyTime, - } -} - -// ServerTerminalStatus returns true if the desired state of the allocation is -// terminal. -func (a *Allocation) ServerTerminalStatus() bool { - switch a.DesiredStatus { - case AllocDesiredStatusStop, AllocDesiredStatusEvict: - return true - default: - return false - } -} - -// ClientTerminalStatus returns true if the client status is terminal and will -// therefore no longer transition. -func (a *Allocation) ClientTerminalStatus() bool { - switch a.ClientStatus { - case AllocClientStatusComplete, AllocClientStatusFailed, AllocClientStatusLost: - return true - default: - return false - } -} - -// AllocationListStub is used to return a subset of an allocation -// during list operations. -type AllocationListStub struct { - ID string - EvalID string - Name string - Namespace string - NodeID string - NodeName string - JobID string - JobType string - JobVersion uint64 - TaskGroup string - AllocatedResources *AllocatedResources `json:",omitempty"` - DesiredStatus string - DesiredDescription string - ClientStatus string - ClientDescription string - TaskStates map[string]*TaskState - DeploymentStatus *AllocDeploymentStatus - FollowupEvalID string - RescheduleTracker *RescheduleTracker - PreemptedAllocations []string - PreemptedByAllocation string - CreateIndex uint64 - ModifyIndex uint64 - CreateTime int64 - ModifyTime int64 -} - -// AllocDeploymentStatus captures the status of the allocation as part of the -// deployment. This can include things like if the allocation has been marked as -// healthy. -type AllocDeploymentStatus struct { - Healthy *bool - Timestamp time.Time - Canary bool - ModifyIndex uint64 -} - -type AllocatedResources struct { - Tasks map[string]*AllocatedTaskResources - Shared AllocatedSharedResources -} - -type AllocatedTaskResources struct { - Cpu AllocatedCpuResources - Memory AllocatedMemoryResources - Networks []*NetworkResource - Devices []*AllocatedDeviceResource -} - -type AllocatedSharedResources struct { - DiskMB int64 - Networks []*NetworkResource - Ports []PortMapping -} - -type PortMapping struct { - Label string - Value int - To int - HostIP string -} - -type AllocatedCpuResources struct { - CpuShares int64 -} - -type AllocatedMemoryResources struct { - MemoryMB int64 - MemoryMaxMB int64 -} - -type AllocatedDeviceResource struct { - Vendor string - Type string - Name string - DeviceIDs []string -} - -// AllocIndexSort reverse sorts allocs by CreateIndex. -type AllocIndexSort []*AllocationListStub - -func (a AllocIndexSort) Len() int { - return len(a) -} - -func (a AllocIndexSort) Less(i, j int) bool { - return a[i].CreateIndex > a[j].CreateIndex -} - -func (a AllocIndexSort) Swap(i, j int) { - a[i], a[j] = a[j], a[i] -} - -func (a Allocation) GetTaskGroup() *TaskGroup { - for _, tg := range a.Job.TaskGroups { - if *tg.Name == a.TaskGroup { - return tg - } - } - return nil -} - -// RescheduleInfo is used to calculate remaining reschedule attempts -// according to the given time and the task groups reschedule policy -func (a Allocation) RescheduleInfo(t time.Time) (int, int) { - tg := a.GetTaskGroup() - if tg == nil || tg.ReschedulePolicy == nil { - return 0, 0 - } - reschedulePolicy := tg.ReschedulePolicy - availableAttempts := *reschedulePolicy.Attempts - interval := *reschedulePolicy.Interval - attempted := 0 - - // Loop over reschedule tracker to find attempts within the restart policy's interval - if a.RescheduleTracker != nil && availableAttempts > 0 && interval > 0 { - for j := len(a.RescheduleTracker.Events) - 1; j >= 0; j-- { - lastAttempt := a.RescheduleTracker.Events[j].RescheduleTime - timeDiff := t.UTC().UnixNano() - lastAttempt - if timeDiff < interval.Nanoseconds() { - attempted += 1 - } - } - } - return attempted, availableAttempts -} - -type AllocationRestartRequest struct { - TaskName string - AllTasks bool -} - -type AllocSignalRequest struct { - Task string - Signal string -} - -// GenericResponse is used to respond to a request where no -// specific response information is needed. -type GenericResponse struct { - WriteMeta -} - -// RescheduleTracker encapsulates previous reschedule events -type RescheduleTracker struct { - Events []*RescheduleEvent -} - -// RescheduleEvent is used to keep track of previous attempts at rescheduling an allocation -type RescheduleEvent struct { - // RescheduleTime is the timestamp of a reschedule attempt - RescheduleTime int64 - - // PrevAllocID is the ID of the previous allocation being restarted - PrevAllocID string - - // PrevNodeID is the node ID of the previous allocation - PrevNodeID string -} - -// DesiredTransition is used to mark an allocation as having a desired state -// transition. This information can be used by the scheduler to make the -// correct decision. -type DesiredTransition struct { - // Migrate is used to indicate that this allocation should be stopped and - // migrated to another node. - Migrate *bool - - // Reschedule is used to indicate that this allocation is eligible to be - // rescheduled. - Reschedule *bool -} - -// ShouldMigrate returns whether the transition object dictates a migration. -func (d DesiredTransition) ShouldMigrate() bool { - return d.Migrate != nil && *d.Migrate -} - -// ExecStreamingIOOperation represents a stream write operation: either appending data or close (exclusively) -type ExecStreamingIOOperation struct { - Data []byte `json:"data,omitempty"` - Close bool `json:"close,omitempty"` -} - -// TerminalSize represents the size of the terminal -type TerminalSize struct { - Height int `json:"height,omitempty"` - Width int `json:"width,omitempty"` -} - -var execStreamingInputHeartbeat = ExecStreamingInput{} - -// ExecStreamingInput represents user input to be sent to nomad exec handler. -// -// At most one field should be set. -type ExecStreamingInput struct { - Stdin *ExecStreamingIOOperation `json:"stdin,omitempty"` - TTYSize *TerminalSize `json:"tty_size,omitempty"` -} - -// ExecStreamingExitResult captures the exit code of just completed nomad exec command -type ExecStreamingExitResult struct { - ExitCode int `json:"exit_code"` -} - -// ExecStreamingOutput represents an output streaming entity, e.g. stdout/stderr update or termination -// -// At most one of these fields should be set: `Stdout`, `Stderr`, or `Result`. -// If `Exited` is true, then `Result` is non-nil, and other fields are nil. -type ExecStreamingOutput struct { - Stdout *ExecStreamingIOOperation `json:"stdout,omitempty"` - Stderr *ExecStreamingIOOperation `json:"stderr,omitempty"` - - Exited bool `json:"exited,omitempty"` - Result *ExecStreamingExitResult `json:"result,omitempty"` -} - -func AllocSuffix(name string) string { - idx := strings.LastIndex(name, "[") - if idx == -1 { - return "" - } - suffix := name[idx:] - return suffix -} diff --git a/vendor/github.com/hashicorp/nomad/api/allocations_exec.go b/vendor/github.com/hashicorp/nomad/api/allocations_exec.go deleted file mode 100644 index 25f7955d..00000000 --- a/vendor/github.com/hashicorp/nomad/api/allocations_exec.go +++ /dev/null @@ -1,246 +0,0 @@ -package api - -import ( - "context" - "encoding/json" - "errors" - "fmt" - "io" - "strconv" - "sync" - "time" - - "github.com/gorilla/websocket" -) - -const ( - // heartbeatInterval is the amount of time to wait between sending heartbeats - // during an exec streaming operation - heartbeatInterval = 10 * time.Second -) - -type execSession struct { - client *Client - alloc *Allocation - task string - tty bool - command []string - - stdin io.Reader - stdout io.Writer - stderr io.Writer - - terminalSizeCh <-chan TerminalSize - - q *QueryOptions -} - -func (s *execSession) run(ctx context.Context) (exitCode int, err error) { - ctx, cancelFn := context.WithCancel(ctx) - defer cancelFn() - - conn, err := s.startConnection() - if err != nil { - return -2, err - } - defer conn.Close() - - sendErrCh := s.startTransmit(ctx, conn) - exitCh, recvErrCh := s.startReceiving(ctx, conn) - - for { - select { - case <-ctx.Done(): - return -2, ctx.Err() - case exitCode := <-exitCh: - return exitCode, nil - case recvErr := <-recvErrCh: - // drop websocket code, not relevant to user - if wsErr, ok := recvErr.(*websocket.CloseError); ok && wsErr.Text != "" { - return -2, errors.New(wsErr.Text) - } - - return -2, recvErr - case sendErr := <-sendErrCh: - return -2, fmt.Errorf("failed to send input: %w", sendErr) - } - } -} - -func (s *execSession) startConnection() (*websocket.Conn, error) { - // First, attempt to connect to the node directly, but may fail due to network isolation - // and network errors. Fallback to using server-side forwarding instead. - nodeClient, err := s.client.GetNodeClientWithTimeout(s.alloc.NodeID, ClientConnTimeout, s.q) - if err == NodeDownErr { - return nil, NodeDownErr - } - - q := s.q - if q == nil { - q = &QueryOptions{} - } - if q.Params == nil { - q.Params = make(map[string]string) - } - - commandBytes, err := json.Marshal(s.command) - if err != nil { - return nil, fmt.Errorf("failed to marshal command: %W", err) - } - - q.Params["tty"] = strconv.FormatBool(s.tty) - q.Params["task"] = s.task - q.Params["command"] = string(commandBytes) - - reqPath := fmt.Sprintf("/v1/client/allocation/%s/exec", s.alloc.ID) - - var conn *websocket.Conn - - if nodeClient != nil { - conn, _, _ = nodeClient.websocket(reqPath, q) - } - - if conn == nil { - conn, _, err = s.client.websocket(reqPath, q) - if err != nil { - return nil, err - } - } - - return conn, nil -} - -func (s *execSession) startTransmit(ctx context.Context, conn *websocket.Conn) <-chan error { - - // FIXME: Handle websocket send errors. - // Currently, websocket write failures are dropped. As sending and - // receiving are running concurrently, it's expected that some send - // requests may fail with connection errors when connection closes. - // Connection errors should surface in the receive paths already, - // but I'm unsure about one-sided communication errors. - var sendLock sync.Mutex - send := func(v *ExecStreamingInput) { - sendLock.Lock() - defer sendLock.Unlock() - - conn.WriteJSON(v) - } - - errCh := make(chan error, 4) - - // propagate stdin - go func() { - - bytes := make([]byte, 2048) - for { - if ctx.Err() != nil { - return - } - - input := ExecStreamingInput{Stdin: &ExecStreamingIOOperation{}} - - n, err := s.stdin.Read(bytes) - - // always send data if we read some - if n != 0 { - input.Stdin.Data = bytes[:n] - send(&input) - } - - // then handle error - if err == io.EOF { - // if n != 0, send data and we'll get n = 0 on next read - if n == 0 { - input.Stdin.Close = true - send(&input) - return - } - } else if err != nil { - errCh <- err - return - } - } - }() - - // propagate terminal sizing updates - go func() { - for { - resizeInput := ExecStreamingInput{} - - select { - case <-ctx.Done(): - return - case size, ok := <-s.terminalSizeCh: - if !ok { - return - } - resizeInput.TTYSize = &size - send(&resizeInput) - } - - } - }() - - // send a heartbeat every 10 seconds - go func() { - t := time.NewTimer(heartbeatInterval) - defer t.Stop() - - for { - t.Reset(heartbeatInterval) - - select { - case <-ctx.Done(): - return - case <-t.C: - // heartbeat message - send(&execStreamingInputHeartbeat) - } - } - }() - - return errCh -} - -func (s *execSession) startReceiving(ctx context.Context, conn *websocket.Conn) (<-chan int, <-chan error) { - exitCodeCh := make(chan int, 1) - errCh := make(chan error, 1) - - go func() { - for ctx.Err() == nil { - - // Decode the next frame - var frame ExecStreamingOutput - err := conn.ReadJSON(&frame) - if websocket.IsCloseError(err, websocket.CloseNormalClosure) { - errCh <- fmt.Errorf("websocket closed before receiving exit code: %w", err) - return - } else if err != nil { - errCh <- err - return - } - - switch { - case frame.Stdout != nil: - if len(frame.Stdout.Data) != 0 { - s.stdout.Write(frame.Stdout.Data) - } - // don't really do anything if stdout is closing - case frame.Stderr != nil: - if len(frame.Stderr.Data) != 0 { - s.stderr.Write(frame.Stderr.Data) - } - // don't really do anything if stderr is closing - case frame.Exited && frame.Result != nil: - exitCodeCh <- frame.Result.ExitCode - return - default: - // noop - heartbeat - } - - } - - }() - - return exitCodeCh, errCh -} diff --git a/vendor/github.com/hashicorp/nomad/api/api.go b/vendor/github.com/hashicorp/nomad/api/api.go deleted file mode 100644 index 9a16a833..00000000 --- a/vendor/github.com/hashicorp/nomad/api/api.go +++ /dev/null @@ -1,1143 +0,0 @@ -package api - -import ( - "bytes" - "compress/gzip" - "context" - "crypto/tls" - "encoding/json" - "errors" - "fmt" - "io" - "net" - "net/http" - "net/url" - "os" - "strconv" - "strings" - "time" - - "github.com/gorilla/websocket" - "github.com/hashicorp/go-cleanhttp" - "github.com/hashicorp/go-rootcerts" -) - -var ( - // ClientConnTimeout is the timeout applied when attempting to contact a - // client directly before switching to a connection through the Nomad - // server. For cluster topologies where API consumers don't have network - // access to Nomad clients, set this to a small value (ex 1ms) to avoid - // pausing on client APIs such as AllocFS. - ClientConnTimeout = 1 * time.Second -) - -const ( - // AllNamespacesNamespace is a sentinel Namespace value to indicate that api should search for - // jobs and allocations in all the namespaces the requester can access. - AllNamespacesNamespace = "*" - - // PermissionDeniedErrorContent is the string content of an error returned - // by the API which indicates the caller does not have permission to - // perform the action. - PermissionDeniedErrorContent = "Permission denied" -) - -// QueryOptions are used to parametrize a query -type QueryOptions struct { - // Providing a datacenter overwrites the region provided - // by the Config - Region string - - // Namespace is the target namespace for the query. - Namespace string - - // AllowStale allows any Nomad server (non-leader) to service - // a read. This allows for lower latency and higher throughput - AllowStale bool - - // WaitIndex is used to enable a blocking query. Waits - // until the timeout or the next index is reached - WaitIndex uint64 - - // WaitTime is used to bound the duration of a wait. - // Defaults to that of the Config, but can be overridden. - WaitTime time.Duration - - // If set, used as prefix for resource list searches - Prefix string - - // Set HTTP parameters on the query. - Params map[string]string - - // Set HTTP headers on the query. - Headers map[string]string - - // AuthToken is the secret ID of an ACL token - AuthToken string - - // Filter specifies the go-bexpr filter expression to be used for - // filtering the data prior to returning a response - Filter string - - // PerPage is the number of entries to be returned in queries that support - // paginated lists. - PerPage int32 - - // NextToken is the token used to indicate where to start paging - // for queries that support paginated lists. This token should be - // the ID of the next object after the last one seen in the - // previous response. - NextToken string - - // Reverse is used to reverse the default order of list results. - // - // Currently only supported by specific endpoints. - Reverse bool - - // ctx is an optional context pass through to the underlying HTTP - // request layer. Use Context() and WithContext() to manage this. - ctx context.Context -} - -// WriteOptions are used to parametrize a write -type WriteOptions struct { - // Providing a datacenter overwrites the region provided - // by the Config - Region string - - // Namespace is the target namespace for the write. - Namespace string - - // AuthToken is the secret ID of an ACL token - AuthToken string - - // Set HTTP headers on the query. - Headers map[string]string - - // ctx is an optional context pass through to the underlying HTTP - // request layer. Use Context() and WithContext() to manage this. - ctx context.Context - - // IdempotencyToken can be used to ensure the write is idempotent. - IdempotencyToken string -} - -// QueryMeta is used to return meta data about a query -type QueryMeta struct { - // LastIndex. This can be used as a WaitIndex to perform - // a blocking query - LastIndex uint64 - - // Time of last contact from the leader for the - // server servicing the request - LastContact time.Duration - - // Is there a known leader - KnownLeader bool - - // How long did the request take - RequestTime time.Duration - - // NextToken is the token used to indicate where to start paging - // for queries that support paginated lists. To resume paging from - // this point, pass this token in the next request's QueryOptions - NextToken string -} - -// WriteMeta is used to return meta data about a write -type WriteMeta struct { - // LastIndex. This can be used as a WaitIndex to perform - // a blocking query - LastIndex uint64 - - // How long did the request take - RequestTime time.Duration -} - -// HttpBasicAuth is used to authenticate http client with HTTP Basic Authentication -type HttpBasicAuth struct { - // Username to use for HTTP Basic Authentication - Username string - - // Password to use for HTTP Basic Authentication - Password string -} - -// Config is used to configure the creation of a client -type Config struct { - // Address is the address of the Nomad agent - Address string - - // Region to use. If not provided, the default agent region is used. - Region string - - // SecretID to use. This can be overwritten per request. - SecretID string - - // Namespace to use. If not provided the default namespace is used. - Namespace string - - // HttpClient is the client to use. Default will be used if not provided. - // - // If set, it expected to be configured for tls already, and TLSConfig is ignored. - // You may use ConfigureTLS() function to aid with initialization. - HttpClient *http.Client - - // HttpAuth is the auth info to use for http access. - HttpAuth *HttpBasicAuth - - // WaitTime limits how long a Watch will block. If not provided, - // the agent default values will be used. - WaitTime time.Duration - - // TLSConfig provides the various TLS related configurations for the http - // client. - // - // TLSConfig is ignored if HttpClient is set. - TLSConfig *TLSConfig - - Headers http.Header -} - -// ClientConfig copies the configuration with a new client address, region, and -// whether the client has TLS enabled. -func (c *Config) ClientConfig(region, address string, tlsEnabled bool) *Config { - scheme := "http" - if tlsEnabled { - scheme = "https" - } - config := &Config{ - Address: fmt.Sprintf("%s://%s", scheme, address), - Region: region, - Namespace: c.Namespace, - HttpClient: c.HttpClient, - SecretID: c.SecretID, - HttpAuth: c.HttpAuth, - WaitTime: c.WaitTime, - TLSConfig: c.TLSConfig.Copy(), - } - - // Update the tls server name for connecting to a client - if tlsEnabled && config.TLSConfig != nil { - config.TLSConfig.TLSServerName = fmt.Sprintf("client.%s.nomad", region) - } - - return config -} - -// TLSConfig contains the parameters needed to configure TLS on the HTTP client -// used to communicate with Nomad. -type TLSConfig struct { - // CACert is the path to a PEM-encoded CA cert file to use to verify the - // Nomad server SSL certificate. - CACert string - - // CAPath is the path to a directory of PEM-encoded CA cert files to verify - // the Nomad server SSL certificate. - CAPath string - - // CACertPem is the PEM-encoded CA cert to use to verify the Nomad server - // SSL certificate. - CACertPEM []byte - - // ClientCert is the path to the certificate for Nomad communication - ClientCert string - - // ClientCertPEM is the PEM-encoded certificate for Nomad communication - ClientCertPEM []byte - - // ClientKey is the path to the private key for Nomad communication - ClientKey string - - // ClientKeyPEM is the PEM-encoded private key for Nomad communication - ClientKeyPEM []byte - - // TLSServerName, if set, is used to set the SNI host when connecting via - // TLS. - TLSServerName string - - // Insecure enables or disables SSL verification - Insecure bool -} - -func (t *TLSConfig) Copy() *TLSConfig { - if t == nil { - return nil - } - - nt := new(TLSConfig) - *nt = *t - return nt -} - -func defaultHttpClient() *http.Client { - httpClient := cleanhttp.DefaultPooledClient() - transport := httpClient.Transport.(*http.Transport) - transport.TLSHandshakeTimeout = 10 * time.Second - transport.TLSClientConfig = &tls.Config{ - MinVersion: tls.VersionTLS12, - } - - // Default to http/1: alloc exec/websocket aren't supported in http/2 - // well yet: https://github.com/gorilla/websocket/issues/417 - transport.ForceAttemptHTTP2 = false - - return httpClient -} - -// DefaultConfig returns a default configuration for the client -func DefaultConfig() *Config { - config := &Config{ - Address: "http://127.0.0.1:4646", - TLSConfig: &TLSConfig{}, - } - if addr := os.Getenv("NOMAD_ADDR"); addr != "" { - config.Address = addr - } - if v := os.Getenv("NOMAD_REGION"); v != "" { - config.Region = v - } - if v := os.Getenv("NOMAD_NAMESPACE"); v != "" { - config.Namespace = v - } - if auth := os.Getenv("NOMAD_HTTP_AUTH"); auth != "" { - var username, password string - if strings.Contains(auth, ":") { - split := strings.SplitN(auth, ":", 2) - username = split[0] - password = split[1] - } else { - username = auth - } - - config.HttpAuth = &HttpBasicAuth{ - Username: username, - Password: password, - } - } - - // Read TLS specific env vars - if v := os.Getenv("NOMAD_CACERT"); v != "" { - config.TLSConfig.CACert = v - } - if v := os.Getenv("NOMAD_CAPATH"); v != "" { - config.TLSConfig.CAPath = v - } - if v := os.Getenv("NOMAD_CLIENT_CERT"); v != "" { - config.TLSConfig.ClientCert = v - } - if v := os.Getenv("NOMAD_CLIENT_KEY"); v != "" { - config.TLSConfig.ClientKey = v - } - if v := os.Getenv("NOMAD_TLS_SERVER_NAME"); v != "" { - config.TLSConfig.TLSServerName = v - } - if v := os.Getenv("NOMAD_SKIP_VERIFY"); v != "" { - if insecure, err := strconv.ParseBool(v); err == nil { - config.TLSConfig.Insecure = insecure - } - } - if v := os.Getenv("NOMAD_TOKEN"); v != "" { - config.SecretID = v - } - return config -} - -// cloneWithTimeout returns a cloned httpClient with set timeout if positive; -// otherwise, returns the same client -func cloneWithTimeout(httpClient *http.Client, t time.Duration) (*http.Client, error) { - if httpClient == nil { - return nil, errors.New("nil HTTP client") - } else if httpClient.Transport == nil { - return nil, errors.New("nil HTTP client transport") - } - - if t.Nanoseconds() < 0 { - return httpClient, nil - } - - tr, ok := httpClient.Transport.(*http.Transport) - if !ok { - return nil, fmt.Errorf("unexpected HTTP transport: %T", httpClient.Transport) - } - - // copy all public fields, to avoid copying transient state and locks - ntr := &http.Transport{ - Proxy: tr.Proxy, - DialContext: tr.DialContext, - Dial: tr.Dial, - DialTLS: tr.DialTLS, - TLSClientConfig: tr.TLSClientConfig, - TLSHandshakeTimeout: tr.TLSHandshakeTimeout, - DisableKeepAlives: tr.DisableKeepAlives, - DisableCompression: tr.DisableCompression, - MaxIdleConns: tr.MaxIdleConns, - MaxIdleConnsPerHost: tr.MaxIdleConnsPerHost, - MaxConnsPerHost: tr.MaxConnsPerHost, - IdleConnTimeout: tr.IdleConnTimeout, - ResponseHeaderTimeout: tr.ResponseHeaderTimeout, - ExpectContinueTimeout: tr.ExpectContinueTimeout, - TLSNextProto: tr.TLSNextProto, - ProxyConnectHeader: tr.ProxyConnectHeader, - MaxResponseHeaderBytes: tr.MaxResponseHeaderBytes, - } - - // apply timeout - ntr.DialContext = (&net.Dialer{ - Timeout: t, - KeepAlive: 30 * time.Second, - }).DialContext - - // clone http client with new transport - nc := *httpClient - nc.Transport = ntr - return &nc, nil -} - -// ConfigureTLS applies a set of TLS configurations to the the HTTP client. -func ConfigureTLS(httpClient *http.Client, tlsConfig *TLSConfig) error { - if tlsConfig == nil { - return nil - } - if httpClient == nil { - return errors.New("config HTTP Client must be set") - } - - var clientCert tls.Certificate - foundClientCert := false - if tlsConfig.ClientCert != "" || tlsConfig.ClientKey != "" { - if tlsConfig.ClientCert != "" && tlsConfig.ClientKey != "" { - var err error - clientCert, err = tls.LoadX509KeyPair(tlsConfig.ClientCert, tlsConfig.ClientKey) - if err != nil { - return err - } - foundClientCert = true - } else { - return errors.New("Both client cert and client key must be provided") - } - } else if len(tlsConfig.ClientCertPEM) != 0 || len(tlsConfig.ClientKeyPEM) != 0 { - if len(tlsConfig.ClientCertPEM) != 0 && len(tlsConfig.ClientKeyPEM) != 0 { - var err error - clientCert, err = tls.X509KeyPair(tlsConfig.ClientCertPEM, tlsConfig.ClientKeyPEM) - if err != nil { - return err - } - foundClientCert = true - } else { - return errors.New("Both client cert and client key must be provided") - } - } - - clientTLSConfig := httpClient.Transport.(*http.Transport).TLSClientConfig - rootConfig := &rootcerts.Config{ - CAFile: tlsConfig.CACert, - CAPath: tlsConfig.CAPath, - CACertificate: tlsConfig.CACertPEM, - } - if err := rootcerts.ConfigureTLS(clientTLSConfig, rootConfig); err != nil { - return err - } - - clientTLSConfig.InsecureSkipVerify = tlsConfig.Insecure - - if foundClientCert { - clientTLSConfig.Certificates = []tls.Certificate{clientCert} - } - if tlsConfig.TLSServerName != "" { - clientTLSConfig.ServerName = tlsConfig.TLSServerName - } - - return nil -} - -// Client provides a client to the Nomad API -type Client struct { - httpClient *http.Client - config Config -} - -// NewClient returns a new client -func NewClient(config *Config) (*Client, error) { - // bootstrap the config - defConfig := DefaultConfig() - - if config.Address == "" { - config.Address = defConfig.Address - } else if _, err := url.Parse(config.Address); err != nil { - return nil, fmt.Errorf("invalid address '%s': %v", config.Address, err) - } - - httpClient := config.HttpClient - if httpClient == nil { - httpClient = defaultHttpClient() - if err := ConfigureTLS(httpClient, config.TLSConfig); err != nil { - return nil, err - } - } - - client := &Client{ - config: *config, - httpClient: httpClient, - } - return client, nil -} - -// Close closes the client's idle keep-alived connections. The default -// client configuration uses keep-alive to maintain connections and -// you should instantiate a single Client and reuse it for all -// requests from the same host. Connections will be closed -// automatically once the client is garbage collected. If you are -// creating multiple clients on the same host (for example, for -// testing), it may be useful to call Close() to avoid hitting -// connection limits. -func (c *Client) Close() { - c.httpClient.CloseIdleConnections() -} - -// Address return the address of the Nomad agent -func (c *Client) Address() string { - return c.config.Address -} - -// SetRegion sets the region to forward API requests to. -func (c *Client) SetRegion(region string) { - c.config.Region = region -} - -// SetNamespace sets the namespace to forward API requests to. -func (c *Client) SetNamespace(namespace string) { - c.config.Namespace = namespace -} - -// GetNodeClient returns a new Client that will dial the specified node. If the -// QueryOptions is set, its region will be used. -func (c *Client) GetNodeClient(nodeID string, q *QueryOptions) (*Client, error) { - return c.getNodeClientImpl(nodeID, -1, q, c.Nodes().Info) -} - -// GetNodeClientWithTimeout returns a new Client that will dial the specified -// node using the specified timeout. If the QueryOptions is set, its region will -// be used. -func (c *Client) GetNodeClientWithTimeout( - nodeID string, timeout time.Duration, q *QueryOptions) (*Client, error) { - return c.getNodeClientImpl(nodeID, timeout, q, c.Nodes().Info) -} - -// nodeLookup is the definition of a function used to lookup a node. This is -// largely used to mock the lookup in tests. -type nodeLookup func(nodeID string, q *QueryOptions) (*Node, *QueryMeta, error) - -// getNodeClientImpl is the implementation of creating a API client for -// contacting a node. It takes a function to lookup the node such that it can be -// mocked during tests. -func (c *Client) getNodeClientImpl(nodeID string, timeout time.Duration, q *QueryOptions, lookup nodeLookup) (*Client, error) { - node, _, err := lookup(nodeID, q) - if err != nil { - return nil, err - } - if node.Status == "down" { - return nil, NodeDownErr - } - if node.HTTPAddr == "" { - return nil, fmt.Errorf("http addr of node %q (%s) is not advertised", node.Name, nodeID) - } - - var region string - switch { - case q != nil && q.Region != "": - // Prefer the region set in the query parameter - region = q.Region - case c.config.Region != "": - // If the client is configured for a particular region use that - region = c.config.Region - default: - // No region information is given so use GlobalRegion as the default. - region = GlobalRegion - } - - // Get an API client for the node - conf := c.config.ClientConfig(region, node.HTTPAddr, node.TLSEnabled) - - // set timeout - preserve old behavior where errors are ignored and use untimed one - httpClient, err := cloneWithTimeout(c.httpClient, timeout) - // on error, fallback to using current http client - if err != nil { - httpClient = c.httpClient - } - conf.HttpClient = httpClient - - return NewClient(conf) -} - -// SetSecretID sets the ACL token secret for API requests. -func (c *Client) SetSecretID(secretID string) { - c.config.SecretID = secretID -} - -// request is used to help build up a request -type request struct { - config *Config - method string - url *url.URL - params url.Values - token string - body io.Reader - obj interface{} - ctx context.Context - header http.Header -} - -// setQueryOptions is used to annotate the request with -// additional query options -func (r *request) setQueryOptions(q *QueryOptions) { - if q == nil { - return - } - if q.Region != "" { - r.params.Set("region", q.Region) - } - if q.Namespace != "" { - r.params.Set("namespace", q.Namespace) - } - if q.AuthToken != "" { - r.token = q.AuthToken - } - if q.AllowStale { - r.params.Set("stale", "") - } - if q.WaitIndex != 0 { - r.params.Set("index", strconv.FormatUint(q.WaitIndex, 10)) - } - if q.WaitTime != 0 { - r.params.Set("wait", durToMsec(q.WaitTime)) - } - if q.Prefix != "" { - r.params.Set("prefix", q.Prefix) - } - if q.Filter != "" { - r.params.Set("filter", q.Filter) - } - if q.PerPage != 0 { - r.params.Set("per_page", fmt.Sprint(q.PerPage)) - } - if q.NextToken != "" { - r.params.Set("next_token", q.NextToken) - } - if q.Reverse { - r.params.Set("reverse", "true") - } - for k, v := range q.Params { - r.params.Set(k, v) - } - r.ctx = q.Context() - - for k, v := range q.Headers { - r.header.Set(k, v) - } -} - -// durToMsec converts a duration to a millisecond specified string -func durToMsec(dur time.Duration) string { - return fmt.Sprintf("%dms", dur/time.Millisecond) -} - -// setWriteOptions is used to annotate the request with -// additional write options -func (r *request) setWriteOptions(q *WriteOptions) { - if q == nil { - return - } - if q.Region != "" { - r.params.Set("region", q.Region) - } - if q.Namespace != "" { - r.params.Set("namespace", q.Namespace) - } - if q.AuthToken != "" { - r.token = q.AuthToken - } - if q.IdempotencyToken != "" { - r.params.Set("idempotency_token", q.IdempotencyToken) - } - r.ctx = q.Context() - - for k, v := range q.Headers { - r.header.Set(k, v) - } -} - -// toHTTP converts the request to an HTTP request -func (r *request) toHTTP() (*http.Request, error) { - // Encode the query parameters - r.url.RawQuery = r.params.Encode() - - // Check if we should encode the body - if r.body == nil && r.obj != nil { - if b, err := encodeBody(r.obj); err != nil { - return nil, err - } else { - r.body = b - } - } - - ctx := func() context.Context { - if r.ctx != nil { - return r.ctx - } - return context.Background() - }() - - // Create the HTTP request - req, err := http.NewRequestWithContext(ctx, r.method, r.url.RequestURI(), r.body) - if err != nil { - return nil, err - } - - req.Header = r.header - - // Optionally configure HTTP basic authentication - if r.url.User != nil { - username := r.url.User.Username() - password, _ := r.url.User.Password() - req.SetBasicAuth(username, password) - } else if r.config.HttpAuth != nil { - req.SetBasicAuth(r.config.HttpAuth.Username, r.config.HttpAuth.Password) - } - - req.Header.Add("Accept-Encoding", "gzip") - if r.token != "" { - req.Header.Set("X-Nomad-Token", r.token) - } - - req.URL.Host = r.url.Host - req.URL.Scheme = r.url.Scheme - req.Host = r.url.Host - return req, nil -} - -// newRequest is used to create a new request -func (c *Client) newRequest(method, path string) (*request, error) { - base, _ := url.Parse(c.config.Address) - u, err := url.Parse(path) - if err != nil { - return nil, err - } - r := &request{ - config: &c.config, - method: method, - url: &url.URL{ - Scheme: base.Scheme, - User: base.User, - Host: base.Host, - Path: u.Path, - RawPath: u.RawPath, - }, - header: make(http.Header), - params: make(map[string][]string), - } - if c.config.Region != "" { - r.params.Set("region", c.config.Region) - } - if c.config.Namespace != "" { - r.params.Set("namespace", c.config.Namespace) - } - if c.config.WaitTime != 0 { - r.params.Set("wait", durToMsec(r.config.WaitTime)) - } - if c.config.SecretID != "" { - r.token = r.config.SecretID - } - - // Add in the query parameters, if any - for key, values := range u.Query() { - for _, value := range values { - r.params.Add(key, value) - } - } - - for key, values := range c.config.Headers { - r.header[key] = values - } - - return r, nil -} - -// multiCloser is to wrap a ReadCloser such that when close is called, multiple -// Closes occur. -type multiCloser struct { - reader io.Reader - inorderClose []io.Closer -} - -func (m *multiCloser) Close() error { - for _, c := range m.inorderClose { - if err := c.Close(); err != nil { - return err - } - } - return nil -} - -func (m *multiCloser) Read(p []byte) (int, error) { - return m.reader.Read(p) -} - -// doRequest runs a request with our client -func (c *Client) doRequest(r *request) (time.Duration, *http.Response, error) { - req, err := r.toHTTP() - if err != nil { - return 0, nil, err - } - - start := time.Now() - resp, err := c.httpClient.Do(req) - diff := time.Since(start) - - // If the response is compressed, we swap the body's reader. - if zipErr := c.autoUnzip(resp); zipErr != nil { - return 0, nil, zipErr - } - - return diff, resp, err -} - -// autoUnzip modifies resp in-place, wrapping the response body with a gzip -// reader if the Content-Encoding of the response is "gzip". -func (*Client) autoUnzip(resp *http.Response) error { - if resp == nil || resp.Header == nil { - return nil - } - - if resp.Header.Get("Content-Encoding") == "gzip" { - zReader, err := gzip.NewReader(resp.Body) - if err == io.EOF { - // zero length response, do not wrap - return nil - } else if err != nil { - // some other error (e.g. corrupt) - return err - } - - // The gzip reader does not close an underlying reader, so use a - // multiCloser to make sure response body does get closed. - resp.Body = &multiCloser{ - reader: zReader, - inorderClose: []io.Closer{zReader, resp.Body}, - } - } - - return nil -} - -// rawQuery makes a GET request to the specified endpoint but returns just the -// response body. -func (c *Client) rawQuery(endpoint string, q *QueryOptions) (io.ReadCloser, error) { - r, err := c.newRequest("GET", endpoint) - if err != nil { - return nil, err - } - r.setQueryOptions(q) - _, resp, err := requireOK(c.doRequest(r)) - if err != nil { - return nil, err - } - - return resp.Body, nil -} - -// websocket makes a websocket request to the specific endpoint -func (c *Client) websocket(endpoint string, q *QueryOptions) (*websocket.Conn, *http.Response, error) { - - transport, ok := c.httpClient.Transport.(*http.Transport) - if !ok { - return nil, nil, errors.New("unsupported transport") - } - dialer := websocket.Dialer{ - ReadBufferSize: 4096, - WriteBufferSize: 4096, - HandshakeTimeout: c.httpClient.Timeout, - - // values to inherit from http client configuration - NetDial: transport.Dial, - NetDialContext: transport.DialContext, - Proxy: transport.Proxy, - TLSClientConfig: transport.TLSClientConfig, - } - - // build request object for header and parameters - r, err := c.newRequest("GET", endpoint) - if err != nil { - return nil, nil, err - } - r.setQueryOptions(q) - - rhttp, err := r.toHTTP() - if err != nil { - return nil, nil, err - } - - // convert scheme - wsScheme := "" - switch rhttp.URL.Scheme { - case "http": - wsScheme = "ws" - case "https": - wsScheme = "wss" - default: - return nil, nil, fmt.Errorf("unsupported scheme: %v", rhttp.URL.Scheme) - } - rhttp.URL.Scheme = wsScheme - - conn, resp, err := dialer.Dial(rhttp.URL.String(), rhttp.Header) - - // check resp status code, as it's more informative than handshake error we get from ws library - if resp != nil && resp.StatusCode != 101 { - var buf bytes.Buffer - - if resp.Header.Get("Content-Encoding") == "gzip" { - greader, err := gzip.NewReader(resp.Body) - if err != nil { - return nil, nil, fmt.Errorf("Unexpected response code: %d", resp.StatusCode) - } - io.Copy(&buf, greader) - } else { - io.Copy(&buf, resp.Body) - } - resp.Body.Close() - - return nil, nil, fmt.Errorf("Unexpected response code: %d (%s)", resp.StatusCode, buf.Bytes()) - } - - return conn, resp, err -} - -// query is used to do a GET request against an endpoint -// and deserialize the response into an interface using -// standard Nomad conventions. -func (c *Client) query(endpoint string, out interface{}, q *QueryOptions) (*QueryMeta, error) { - r, err := c.newRequest("GET", endpoint) - if err != nil { - return nil, err - } - r.setQueryOptions(q) - rtt, resp, err := requireOK(c.doRequest(r)) - if err != nil { - return nil, err - } - defer resp.Body.Close() - - qm := &QueryMeta{} - parseQueryMeta(resp, qm) - qm.RequestTime = rtt - - if err := decodeBody(resp, out); err != nil { - return nil, err - } - return qm, nil -} - -// putQuery is used to do a PUT request when doing a read against an endpoint -// and deserialize the response into an interface using standard Nomad -// conventions. -func (c *Client) putQuery(endpoint string, in, out interface{}, q *QueryOptions) (*QueryMeta, error) { - r, err := c.newRequest("PUT", endpoint) - if err != nil { - return nil, err - } - r.setQueryOptions(q) - r.obj = in - rtt, resp, err := requireOK(c.doRequest(r)) - if err != nil { - return nil, err - } - defer resp.Body.Close() - - qm := &QueryMeta{} - parseQueryMeta(resp, qm) - qm.RequestTime = rtt - - if err := decodeBody(resp, out); err != nil { - return nil, err - } - return qm, nil -} - -// write is used to do a PUT request against an endpoint -// and serialize/deserialized using the standard Nomad conventions. -func (c *Client) write(endpoint string, in, out interface{}, q *WriteOptions) (*WriteMeta, error) { - r, err := c.newRequest("PUT", endpoint) - if err != nil { - return nil, err - } - r.setWriteOptions(q) - r.obj = in - rtt, resp, err := requireOK(c.doRequest(r)) - if err != nil { - return nil, err - } - defer resp.Body.Close() - - wm := &WriteMeta{RequestTime: rtt} - parseWriteMeta(resp, wm) - - if out != nil { - if err := decodeBody(resp, &out); err != nil { - return nil, err - } - } - return wm, nil -} - -// delete is used to do a DELETE request against an endpoint and -// serialize/deserialized using the standard Nomad conventions. -func (c *Client) delete(endpoint string, in, out interface{}, q *WriteOptions) (*WriteMeta, error) { - r, err := c.newRequest("DELETE", endpoint) - if err != nil { - return nil, err - } - r.setWriteOptions(q) - r.obj = in - rtt, resp, err := requireOK(c.doRequest(r)) - if err != nil { - return nil, err - } - defer resp.Body.Close() - - wm := &WriteMeta{RequestTime: rtt} - parseWriteMeta(resp, wm) - - if out != nil { - if err := decodeBody(resp, &out); err != nil { - return nil, err - } - } - return wm, nil -} - -// parseQueryMeta is used to help parse query meta-data -func parseQueryMeta(resp *http.Response, q *QueryMeta) error { - header := resp.Header - - // Parse the X-Nomad-Index - index, err := strconv.ParseUint(header.Get("X-Nomad-Index"), 10, 64) - if err != nil { - return fmt.Errorf("Failed to parse X-Nomad-Index: %v", err) - } - q.LastIndex = index - - // Parse the X-Nomad-LastContact - last, err := strconv.ParseUint(header.Get("X-Nomad-LastContact"), 10, 64) - if err != nil { - return fmt.Errorf("Failed to parse X-Nomad-LastContact: %v", err) - } - q.LastContact = time.Duration(last) * time.Millisecond - q.NextToken = header.Get("X-Nomad-NextToken") - - // Parse the X-Nomad-KnownLeader - switch header.Get("X-Nomad-KnownLeader") { - case "true": - q.KnownLeader = true - default: - q.KnownLeader = false - } - return nil -} - -// parseWriteMeta is used to help parse write meta-data -func parseWriteMeta(resp *http.Response, q *WriteMeta) error { - header := resp.Header - - // Parse the X-Nomad-Index - index, err := strconv.ParseUint(header.Get("X-Nomad-Index"), 10, 64) - if err != nil { - return fmt.Errorf("Failed to parse X-Nomad-Index: %v", err) - } - q.LastIndex = index - return nil -} - -// decodeBody is used to JSON decode a body -func decodeBody(resp *http.Response, out interface{}) error { - switch resp.ContentLength { - case 0: - if out == nil { - return nil - } - return errors.New("Got 0 byte response with non-nil decode object") - default: - dec := json.NewDecoder(resp.Body) - return dec.Decode(out) - } -} - -// encodeBody prepares the reader to serve as the request body. -// -// Returns the `obj` input if it is a raw io.Reader object; otherwise -// returns a reader of the json format of the passed argument. -func encodeBody(obj interface{}) (io.Reader, error) { - if reader, ok := obj.(io.Reader); ok { - return reader, nil - } - - buf := bytes.NewBuffer(nil) - enc := json.NewEncoder(buf) - if err := enc.Encode(obj); err != nil { - return nil, err - } - return buf, nil -} - -// requireOK is used to wrap doRequest and check for a 200 -func requireOK(d time.Duration, resp *http.Response, e error) (time.Duration, *http.Response, error) { - if e != nil { - if resp != nil { - resp.Body.Close() - } - return d, nil, e - } - if resp.StatusCode != 200 { - var buf bytes.Buffer - _, _ = io.Copy(&buf, resp.Body) - _ = resp.Body.Close() - body := strings.TrimSpace(buf.String()) - return d, nil, fmt.Errorf("Unexpected response code: %d (%s)", resp.StatusCode, body) - } - return d, resp, nil -} - -// Context returns the context used for canceling HTTP requests related to this query -func (o *QueryOptions) Context() context.Context { - if o != nil && o.ctx != nil { - return o.ctx - } - return context.Background() -} - -// WithContext creates a copy of the query options using the provided context to cancel related HTTP requests -func (o *QueryOptions) WithContext(ctx context.Context) *QueryOptions { - o2 := new(QueryOptions) - if o != nil { - *o2 = *o - } - o2.ctx = ctx - return o2 -} - -// Context returns the context used for canceling HTTP requests related to this write -func (o *WriteOptions) Context() context.Context { - if o != nil && o.ctx != nil { - return o.ctx - } - return context.Background() -} - -// WithContext creates a copy of the write options using the provided context to cancel related HTTP requests -func (o *WriteOptions) WithContext(ctx context.Context) *WriteOptions { - o2 := new(WriteOptions) - if o != nil { - *o2 = *o - } - o2.ctx = ctx - return o2 -} diff --git a/vendor/github.com/hashicorp/nomad/api/constraint.go b/vendor/github.com/hashicorp/nomad/api/constraint.go deleted file mode 100644 index 7213270e..00000000 --- a/vendor/github.com/hashicorp/nomad/api/constraint.go +++ /dev/null @@ -1,30 +0,0 @@ -package api - -const ( - ConstraintDistinctProperty = "distinct_property" - ConstraintDistinctHosts = "distinct_hosts" - ConstraintRegex = "regexp" - ConstraintVersion = "version" - ConstraintSemver = "semver" - ConstraintSetContains = "set_contains" - ConstraintSetContainsAll = "set_contains_all" - ConstraintSetContainsAny = "set_contains_any" - ConstraintAttributeIsSet = "is_set" - ConstraintAttributeIsNotSet = "is_not_set" -) - -// Constraint is used to serialize a job placement constraint. -type Constraint struct { - LTarget string `hcl:"attribute,optional"` - RTarget string `hcl:"value,optional"` - Operand string `hcl:"operator,optional"` -} - -// NewConstraint generates a new job placement constraint. -func NewConstraint(left, operand, right string) *Constraint { - return &Constraint{ - LTarget: left, - RTarget: right, - Operand: operand, - } -} diff --git a/vendor/github.com/hashicorp/nomad/api/consul.go b/vendor/github.com/hashicorp/nomad/api/consul.go deleted file mode 100644 index 9a76bfb3..00000000 --- a/vendor/github.com/hashicorp/nomad/api/consul.go +++ /dev/null @@ -1,607 +0,0 @@ -package api - -import ( - "time" -) - -// Consul represents configuration related to consul. -type Consul struct { - // (Enterprise-only) Namespace represents a Consul namespace. - Namespace string `mapstructure:"namespace" hcl:"namespace,optional"` -} - -// Canonicalize Consul into a canonical form. The Canonicalize structs containing -// a Consul should ensure it is not nil. -func (c *Consul) Canonicalize() { - // Nothing to do here. - // - // If Namespace is nil, that is a choice of the job submitter that - // we should inherit from higher up (i.e. job<-group). Likewise, if - // Namespace is set but empty, that is a choice to use the default consul - // namespace. -} - -// Copy creates a deep copy of c. -func (c *Consul) Copy() *Consul { - return &Consul{ - Namespace: c.Namespace, - } -} - -// MergeNamespace sets Namespace to namespace if not already configured. -// This is used to inherit the job-level consul_namespace if the group-level -// namespace is not explicitly configured. -func (c *Consul) MergeNamespace(namespace *string) { - // only inherit namespace from above if not already set - if c.Namespace == "" && namespace != nil { - c.Namespace = *namespace - } -} - -// ConsulConnect represents a Consul Connect jobspec stanza. -type ConsulConnect struct { - Native bool `hcl:"native,optional"` - Gateway *ConsulGateway `hcl:"gateway,block"` - SidecarService *ConsulSidecarService `mapstructure:"sidecar_service" hcl:"sidecar_service,block"` - SidecarTask *SidecarTask `mapstructure:"sidecar_task" hcl:"sidecar_task,block"` -} - -func (cc *ConsulConnect) Canonicalize() { - if cc == nil { - return - } - - cc.SidecarService.Canonicalize() - cc.SidecarTask.Canonicalize() - cc.Gateway.Canonicalize() -} - -// ConsulSidecarService represents a Consul Connect SidecarService jobspec -// stanza. -type ConsulSidecarService struct { - Tags []string `hcl:"tags,optional"` - Port string `hcl:"port,optional"` - Proxy *ConsulProxy `hcl:"proxy,block"` - DisableDefaultTCPCheck bool `mapstructure:"disable_default_tcp_check" hcl:"disable_default_tcp_check,optional"` -} - -func (css *ConsulSidecarService) Canonicalize() { - if css == nil { - return - } - - if len(css.Tags) == 0 { - css.Tags = nil - } - - css.Proxy.Canonicalize() -} - -// SidecarTask represents a subset of Task fields that can be set to override -// the fields of the Task generated for the sidecar -type SidecarTask struct { - Name string `hcl:"name,optional"` - Driver string `hcl:"driver,optional"` - User string `hcl:"user,optional"` - Config map[string]interface{} `hcl:"config,block"` - Env map[string]string `hcl:"env,block"` - Resources *Resources `hcl:"resources,block"` - Meta map[string]string `hcl:"meta,block"` - KillTimeout *time.Duration `mapstructure:"kill_timeout" hcl:"kill_timeout,optional"` - LogConfig *LogConfig `mapstructure:"logs" hcl:"logs,block"` - ShutdownDelay *time.Duration `mapstructure:"shutdown_delay" hcl:"shutdown_delay,optional"` - KillSignal string `mapstructure:"kill_signal" hcl:"kill_signal,optional"` -} - -func (st *SidecarTask) Canonicalize() { - if st == nil { - return - } - - if len(st.Config) == 0 { - st.Config = nil - } - - if len(st.Env) == 0 { - st.Env = nil - } - - if st.Resources == nil { - st.Resources = DefaultResources() - } else { - st.Resources.Canonicalize() - } - - if st.LogConfig == nil { - st.LogConfig = DefaultLogConfig() - } else { - st.LogConfig.Canonicalize() - } - - if len(st.Meta) == 0 { - st.Meta = nil - } - - if st.KillTimeout == nil { - st.KillTimeout = pointerOf(5 * time.Second) - } - - if st.ShutdownDelay == nil { - st.ShutdownDelay = pointerOf(time.Duration(0)) - } -} - -// ConsulProxy represents a Consul Connect sidecar proxy jobspec stanza. -type ConsulProxy struct { - LocalServiceAddress string `mapstructure:"local_service_address" hcl:"local_service_address,optional"` - LocalServicePort int `mapstructure:"local_service_port" hcl:"local_service_port,optional"` - ExposeConfig *ConsulExposeConfig `mapstructure:"expose" hcl:"expose,block"` - Upstreams []*ConsulUpstream `hcl:"upstreams,block"` - Config map[string]interface{} `hcl:"config,block"` -} - -func (cp *ConsulProxy) Canonicalize() { - if cp == nil { - return - } - - cp.ExposeConfig.Canonicalize() - - if len(cp.Upstreams) == 0 { - cp.Upstreams = nil - } - - for _, upstream := range cp.Upstreams { - upstream.Canonicalize() - } - - if len(cp.Config) == 0 { - cp.Config = nil - } -} - -// ConsulMeshGateway is used to configure mesh gateway usage when connecting to -// a connect upstream in another datacenter. -type ConsulMeshGateway struct { - // Mode configures how an upstream should be accessed with regard to using - // mesh gateways. - // - // local - the connect proxy makes outbound connections through mesh gateway - // originating in the same datacenter. - // - // remote - the connect proxy makes outbound connections to a mesh gateway - // in the destination datacenter. - // - // none (default) - no mesh gateway is used, the proxy makes outbound connections - // directly to destination services. - // - // https://www.consul.io/docs/connect/gateways/mesh-gateway#modes-of-operation - Mode string `mapstructure:"mode" hcl:"mode,optional"` -} - -func (c *ConsulMeshGateway) Canonicalize() { - // Mode may be empty string, indicating behavior will defer to Consul - // service-defaults config entry. - return -} - -func (c *ConsulMeshGateway) Copy() *ConsulMeshGateway { - if c == nil { - return nil - } - - return &ConsulMeshGateway{ - Mode: c.Mode, - } -} - -// ConsulUpstream represents a Consul Connect upstream jobspec stanza. -type ConsulUpstream struct { - DestinationName string `mapstructure:"destination_name" hcl:"destination_name,optional"` - DestinationNamespace string `mapstructure:"destination_namespace" hcl:"destination_namespace,optional"` - LocalBindPort int `mapstructure:"local_bind_port" hcl:"local_bind_port,optional"` - Datacenter string `mapstructure:"datacenter" hcl:"datacenter,optional"` - LocalBindAddress string `mapstructure:"local_bind_address" hcl:"local_bind_address,optional"` - MeshGateway *ConsulMeshGateway `mapstructure:"mesh_gateway" hcl:"mesh_gateway,block"` -} - -func (cu *ConsulUpstream) Copy() *ConsulUpstream { - if cu == nil { - return nil - } - return &ConsulUpstream{ - DestinationName: cu.DestinationName, - DestinationNamespace: cu.DestinationNamespace, - LocalBindPort: cu.LocalBindPort, - Datacenter: cu.Datacenter, - LocalBindAddress: cu.LocalBindAddress, - MeshGateway: cu.MeshGateway.Copy(), - } -} - -func (cu *ConsulUpstream) Canonicalize() { - if cu == nil { - return - } - cu.MeshGateway.Canonicalize() -} - -type ConsulExposeConfig struct { - Path []*ConsulExposePath `mapstructure:"path" hcl:"path,block"` -} - -func (cec *ConsulExposeConfig) Canonicalize() { - if cec == nil { - return - } - - if len(cec.Path) == 0 { - cec.Path = nil - } -} - -type ConsulExposePath struct { - Path string `hcl:"path,optional"` - Protocol string `hcl:"protocol,optional"` - LocalPathPort int `mapstructure:"local_path_port" hcl:"local_path_port,optional"` - ListenerPort string `mapstructure:"listener_port" hcl:"listener_port,optional"` -} - -// ConsulGateway is used to configure one of the Consul Connect Gateway types. -type ConsulGateway struct { - // Proxy is used to configure the Envoy instance acting as the gateway. - Proxy *ConsulGatewayProxy `hcl:"proxy,block"` - - // Ingress represents the Consul Configuration Entry for an Ingress Gateway. - Ingress *ConsulIngressConfigEntry `hcl:"ingress,block"` - - // Terminating represents the Consul Configuration Entry for a Terminating Gateway. - Terminating *ConsulTerminatingConfigEntry `hcl:"terminating,block"` - - // Mesh indicates the Consul service should be a Mesh Gateway. - Mesh *ConsulMeshConfigEntry `hcl:"mesh,block"` -} - -func (g *ConsulGateway) Canonicalize() { - if g == nil { - return - } - g.Proxy.Canonicalize() - g.Ingress.Canonicalize() - g.Terminating.Canonicalize() -} - -func (g *ConsulGateway) Copy() *ConsulGateway { - if g == nil { - return nil - } - - return &ConsulGateway{ - Proxy: g.Proxy.Copy(), - Ingress: g.Ingress.Copy(), - Terminating: g.Terminating.Copy(), - } -} - -type ConsulGatewayBindAddress struct { - Name string `hcl:",label"` - Address string `mapstructure:"address" hcl:"address,optional"` - Port int `mapstructure:"port" hcl:"port,optional"` -} - -var ( - // defaultGatewayConnectTimeout is the default amount of time connections to - // upstreams are allowed before timing out. - defaultGatewayConnectTimeout = 5 * time.Second -) - -// ConsulGatewayProxy is used to tune parameters of the proxy instance acting as -// one of the forms of Connect gateways that Consul supports. -// -// https://www.consul.io/docs/connect/proxies/envoy#gateway-options -type ConsulGatewayProxy struct { - ConnectTimeout *time.Duration `mapstructure:"connect_timeout" hcl:"connect_timeout,optional"` - EnvoyGatewayBindTaggedAddresses bool `mapstructure:"envoy_gateway_bind_tagged_addresses" hcl:"envoy_gateway_bind_tagged_addresses,optional"` - EnvoyGatewayBindAddresses map[string]*ConsulGatewayBindAddress `mapstructure:"envoy_gateway_bind_addresses" hcl:"envoy_gateway_bind_addresses,block"` - EnvoyGatewayNoDefaultBind bool `mapstructure:"envoy_gateway_no_default_bind" hcl:"envoy_gateway_no_default_bind,optional"` - EnvoyDNSDiscoveryType string `mapstructure:"envoy_dns_discovery_type" hcl:"envoy_dns_discovery_type,optional"` - Config map[string]interface{} `hcl:"config,block"` // escape hatch envoy config -} - -func (p *ConsulGatewayProxy) Canonicalize() { - if p == nil { - return - } - - if p.ConnectTimeout == nil { - // same as the default from consul - p.ConnectTimeout = pointerOf(defaultGatewayConnectTimeout) - } - - if len(p.EnvoyGatewayBindAddresses) == 0 { - p.EnvoyGatewayBindAddresses = nil - } - - if len(p.Config) == 0 { - p.Config = nil - } -} - -func (p *ConsulGatewayProxy) Copy() *ConsulGatewayProxy { - if p == nil { - return nil - } - - var binds map[string]*ConsulGatewayBindAddress = nil - if p.EnvoyGatewayBindAddresses != nil { - binds = make(map[string]*ConsulGatewayBindAddress, len(p.EnvoyGatewayBindAddresses)) - for k, v := range p.EnvoyGatewayBindAddresses { - binds[k] = v - } - } - - var config map[string]interface{} = nil - if p.Config != nil { - config = make(map[string]interface{}, len(p.Config)) - for k, v := range p.Config { - config[k] = v - } - } - - return &ConsulGatewayProxy{ - ConnectTimeout: pointerOf(*p.ConnectTimeout), - EnvoyGatewayBindTaggedAddresses: p.EnvoyGatewayBindTaggedAddresses, - EnvoyGatewayBindAddresses: binds, - EnvoyGatewayNoDefaultBind: p.EnvoyGatewayNoDefaultBind, - EnvoyDNSDiscoveryType: p.EnvoyDNSDiscoveryType, - Config: config, - } -} - -// ConsulGatewayTLSConfig is used to configure TLS for a gateway. -type ConsulGatewayTLSConfig struct { - Enabled bool `hcl:"enabled,optional"` - TLSMinVersion string `hcl:"tls_min_version,optional" mapstructure:"tls_min_version"` - TLSMaxVersion string `hcl:"tls_max_version,optional" mapstructure:"tls_max_version"` - CipherSuites []string `hcl:"cipher_suites,optional" mapstructure:"cipher_suites"` -} - -func (tc *ConsulGatewayTLSConfig) Canonicalize() { -} - -func (tc *ConsulGatewayTLSConfig) Copy() *ConsulGatewayTLSConfig { - if tc == nil { - return nil - } - - result := &ConsulGatewayTLSConfig{ - Enabled: tc.Enabled, - TLSMinVersion: tc.TLSMinVersion, - TLSMaxVersion: tc.TLSMaxVersion, - } - if len(tc.CipherSuites) != 0 { - cipherSuites := make([]string, len(tc.CipherSuites)) - copy(cipherSuites, tc.CipherSuites) - result.CipherSuites = cipherSuites - } - - return result -} - -// ConsulIngressService is used to configure a service fronted by the ingress gateway. -type ConsulIngressService struct { - // Namespace is not yet supported. - // Namespace string - Name string `hcl:"name,optional"` - - Hosts []string `hcl:"hosts,optional"` -} - -func (s *ConsulIngressService) Canonicalize() { - if s == nil { - return - } - - if len(s.Hosts) == 0 { - s.Hosts = nil - } -} - -func (s *ConsulIngressService) Copy() *ConsulIngressService { - if s == nil { - return nil - } - - var hosts []string = nil - if n := len(s.Hosts); n > 0 { - hosts = make([]string, n) - copy(hosts, s.Hosts) - } - - return &ConsulIngressService{ - Name: s.Name, - Hosts: hosts, - } -} - -const ( - defaultIngressListenerProtocol = "tcp" -) - -// ConsulIngressListener is used to configure a listener on a Consul Ingress -// Gateway. -type ConsulIngressListener struct { - Port int `hcl:"port,optional"` - Protocol string `hcl:"protocol,optional"` - Services []*ConsulIngressService `hcl:"service,block"` -} - -func (l *ConsulIngressListener) Canonicalize() { - if l == nil { - return - } - - if l.Protocol == "" { - // same as default from consul - l.Protocol = defaultIngressListenerProtocol - } - - if len(l.Services) == 0 { - l.Services = nil - } -} - -func (l *ConsulIngressListener) Copy() *ConsulIngressListener { - if l == nil { - return nil - } - - var services []*ConsulIngressService = nil - if n := len(l.Services); n > 0 { - services = make([]*ConsulIngressService, n) - for i := 0; i < n; i++ { - services[i] = l.Services[i].Copy() - } - } - - return &ConsulIngressListener{ - Port: l.Port, - Protocol: l.Protocol, - Services: services, - } -} - -// ConsulIngressConfigEntry represents the Consul Configuration Entry type for -// an Ingress Gateway. -// -// https://www.consul.io/docs/agent/config-entries/ingress-gateway#available-fields -type ConsulIngressConfigEntry struct { - // Namespace is not yet supported. - // Namespace string - - TLS *ConsulGatewayTLSConfig `hcl:"tls,block"` - Listeners []*ConsulIngressListener `hcl:"listener,block"` -} - -func (e *ConsulIngressConfigEntry) Canonicalize() { - if e == nil { - return - } - - e.TLS.Canonicalize() - - if len(e.Listeners) == 0 { - e.Listeners = nil - } - - for _, listener := range e.Listeners { - listener.Canonicalize() - } -} - -func (e *ConsulIngressConfigEntry) Copy() *ConsulIngressConfigEntry { - if e == nil { - return nil - } - - var listeners []*ConsulIngressListener = nil - if n := len(e.Listeners); n > 0 { - listeners = make([]*ConsulIngressListener, n) - for i := 0; i < n; i++ { - listeners[i] = e.Listeners[i].Copy() - } - } - - return &ConsulIngressConfigEntry{ - TLS: e.TLS.Copy(), - Listeners: listeners, - } -} - -type ConsulLinkedService struct { - Name string `hcl:"name,optional"` - CAFile string `hcl:"ca_file,optional" mapstructure:"ca_file"` - CertFile string `hcl:"cert_file,optional" mapstructure:"cert_file"` - KeyFile string `hcl:"key_file,optional" mapstructure:"key_file"` - SNI string `hcl:"sni,optional"` -} - -func (s *ConsulLinkedService) Canonicalize() { - // nothing to do for now -} - -func (s *ConsulLinkedService) Copy() *ConsulLinkedService { - if s == nil { - return nil - } - - return &ConsulLinkedService{ - Name: s.Name, - CAFile: s.CAFile, - CertFile: s.CertFile, - KeyFile: s.KeyFile, - SNI: s.SNI, - } -} - -// ConsulTerminatingConfigEntry represents the Consul Configuration Entry type -// for a Terminating Gateway. -// -// https://www.consul.io/docs/agent/config-entries/terminating-gateway#available-fields -type ConsulTerminatingConfigEntry struct { - // Namespace is not yet supported. - // Namespace string - - Services []*ConsulLinkedService `hcl:"service,block"` -} - -func (e *ConsulTerminatingConfigEntry) Canonicalize() { - if e == nil { - return - } - - if len(e.Services) == 0 { - e.Services = nil - } - - for _, service := range e.Services { - service.Canonicalize() - } -} - -func (e *ConsulTerminatingConfigEntry) Copy() *ConsulTerminatingConfigEntry { - if e == nil { - return nil - } - - var services []*ConsulLinkedService = nil - if n := len(e.Services); n > 0 { - services = make([]*ConsulLinkedService, n) - for i := 0; i < n; i++ { - services[i] = e.Services[i].Copy() - } - } - - return &ConsulTerminatingConfigEntry{ - Services: services, - } -} - -// ConsulMeshConfigEntry is a stub used to represent that the gateway service type -// should be for a Mesh Gateway. Unlike Ingress and Terminating, there is no -// actual Consul Config Entry type for mesh-gateway, at least for now. We still -// create a type for future proofing, instead just using a bool for example. -type ConsulMeshConfigEntry struct { - // nothing in here -} - -func (e *ConsulMeshConfigEntry) Canonicalize() { - return -} - -func (e *ConsulMeshConfigEntry) Copy() *ConsulMeshConfigEntry { - if e == nil { - return nil - } - return new(ConsulMeshConfigEntry) -} diff --git a/vendor/github.com/hashicorp/nomad/api/contexts/contexts.go b/vendor/github.com/hashicorp/nomad/api/contexts/contexts.go deleted file mode 100644 index f189c0de..00000000 --- a/vendor/github.com/hashicorp/nomad/api/contexts/contexts.go +++ /dev/null @@ -1,34 +0,0 @@ -// Package contexts provides constants used with the Nomad Search API. -package contexts - -// Context defines the scope in which a search for Nomad object operates. -type Context string - -const ( - // These Context types are used to reference the high level Nomad object - // types than can be searched. - Allocs Context = "allocs" - Deployments Context = "deployment" - Evals Context = "evals" - Jobs Context = "jobs" - Nodes Context = "nodes" - Namespaces Context = "namespaces" - Quotas Context = "quotas" - Recommendations Context = "recommendations" - ScalingPolicies Context = "scaling_policy" - Plugins Context = "plugins" - Variables Context = "vars" - Volumes Context = "volumes" - - // These Context types are used to associate a search result from a lower - // level Nomad object with one of the higher level Context types above. - Groups Context = "groups" - Services Context = "services" - Tasks Context = "tasks" - Images Context = "images" - Commands Context = "commands" - Classes Context = "classes" - - // Context used to represent the set of all the higher level Context types. - All Context = "all" -) diff --git a/vendor/github.com/hashicorp/nomad/api/csi.go b/vendor/github.com/hashicorp/nomad/api/csi.go deleted file mode 100644 index dc31db8f..00000000 --- a/vendor/github.com/hashicorp/nomad/api/csi.go +++ /dev/null @@ -1,597 +0,0 @@ -package api - -import ( - "fmt" - "net/url" - "sort" - "strings" - "time" -) - -// CSIVolumes is used to access Container Storage Interface (CSI) endpoints. -type CSIVolumes struct { - client *Client -} - -// CSIVolumes returns a handle on the CSIVolumes endpoint. -func (c *Client) CSIVolumes() *CSIVolumes { - return &CSIVolumes{client: c} -} - -// List returns all CSI volumes. -func (v *CSIVolumes) List(q *QueryOptions) ([]*CSIVolumeListStub, *QueryMeta, error) { - var resp []*CSIVolumeListStub - qm, err := v.client.query("/v1/volumes?type=csi", &resp, q) - if err != nil { - return nil, nil, err - } - sort.Sort(CSIVolumeIndexSort(resp)) - return resp, qm, nil -} - -// ListExternal returns all CSI volumes, as understood by the external storage -// provider. These volumes may or may not be currently registered with Nomad. -// The response is paginated by the plugin and accepts the -// QueryOptions.PerPage and QueryOptions.NextToken fields. -func (v *CSIVolumes) ListExternal(pluginID string, q *QueryOptions) (*CSIVolumeListExternalResponse, *QueryMeta, error) { - var resp *CSIVolumeListExternalResponse - - qp := url.Values{} - qp.Set("plugin_id", pluginID) - if q.NextToken != "" { - qp.Set("next_token", q.NextToken) - } - if q.PerPage != 0 { - qp.Set("per_page", fmt.Sprint(q.PerPage)) - } - - qm, err := v.client.query("/v1/volumes/external?"+qp.Encode(), &resp, q) - if err != nil { - return nil, nil, err - } - - sort.Sort(CSIVolumeExternalStubSort(resp.Volumes)) - return resp, qm, nil -} - -// PluginList returns all CSI volumes for the specified plugin id -func (v *CSIVolumes) PluginList(pluginID string) ([]*CSIVolumeListStub, *QueryMeta, error) { - return v.List(&QueryOptions{Prefix: pluginID}) -} - -// Info is used to retrieve a single CSIVolume -func (v *CSIVolumes) Info(id string, q *QueryOptions) (*CSIVolume, *QueryMeta, error) { - var resp CSIVolume - qm, err := v.client.query("/v1/volume/csi/"+id, &resp, q) - if err != nil { - return nil, nil, err - } - - return &resp, qm, nil -} - -// Register registers a single CSIVolume with Nomad. The volume must already -// exist in the external storage provider. -func (v *CSIVolumes) Register(vol *CSIVolume, w *WriteOptions) (*WriteMeta, error) { - req := CSIVolumeRegisterRequest{ - Volumes: []*CSIVolume{vol}, - } - meta, err := v.client.write("/v1/volume/csi/"+vol.ID, req, nil, w) - return meta, err -} - -// Deregister deregisters a single CSIVolume from Nomad. The volume will not be deleted from the external storage provider. -func (v *CSIVolumes) Deregister(id string, force bool, w *WriteOptions) error { - _, err := v.client.delete(fmt.Sprintf("/v1/volume/csi/%v?force=%t", url.PathEscape(id), force), nil, nil, w) - return err -} - -// Create creates a single CSIVolume in an external storage provider and -// registers it with Nomad. You do not need to call Register if this call is -// successful. -func (v *CSIVolumes) Create(vol *CSIVolume, w *WriteOptions) ([]*CSIVolume, *WriteMeta, error) { - req := CSIVolumeCreateRequest{ - Volumes: []*CSIVolume{vol}, - } - - resp := &CSIVolumeCreateResponse{} - meta, err := v.client.write(fmt.Sprintf("/v1/volume/csi/%v/create", vol.ID), req, resp, w) - return resp.Volumes, meta, err -} - -// DEPRECATED: will be removed in Nomad 1.4.0 -// Delete deletes a CSI volume from an external storage provider. The ID -// passed as an argument here is for the storage provider's ID, so a volume -// that's already been deregistered can be deleted. -func (v *CSIVolumes) Delete(externalVolID string, w *WriteOptions) error { - _, err := v.client.delete(fmt.Sprintf("/v1/volume/csi/%v/delete", url.PathEscape(externalVolID)), nil, nil, w) - return err -} - -// DeleteOpts deletes a CSI volume from an external storage -// provider. The ID passed in the request is for the storage -// provider's ID, so a volume that's already been deregistered can be -// deleted. -func (v *CSIVolumes) DeleteOpts(req *CSIVolumeDeleteRequest, w *WriteOptions) error { - if w == nil { - w = &WriteOptions{} - } - w.SetHeadersFromCSISecrets(req.Secrets) - _, err := v.client.delete(fmt.Sprintf("/v1/volume/csi/%v/delete", url.PathEscape(req.ExternalVolumeID)), nil, nil, w) - return err -} - -// Detach causes Nomad to attempt to detach a CSI volume from a client -// node. This is used in the case that the node is temporarily lost and the -// allocations are unable to drop their claims automatically. -func (v *CSIVolumes) Detach(volID, nodeID string, w *WriteOptions) error { - _, err := v.client.delete(fmt.Sprintf("/v1/volume/csi/%v/detach?node=%v", url.PathEscape(volID), nodeID), nil, nil, w) - return err -} - -// CreateSnapshot snapshots an external storage volume. -func (v *CSIVolumes) CreateSnapshot(snap *CSISnapshot, w *WriteOptions) (*CSISnapshotCreateResponse, *WriteMeta, error) { - req := &CSISnapshotCreateRequest{ - Snapshots: []*CSISnapshot{snap}, - } - if w == nil { - w = &WriteOptions{} - } - w.SetHeadersFromCSISecrets(snap.Secrets) - resp := &CSISnapshotCreateResponse{} - meta, err := v.client.write("/v1/volumes/snapshot", req, resp, w) - return resp, meta, err -} - -// DeleteSnapshot deletes an external storage volume snapshot. -func (v *CSIVolumes) DeleteSnapshot(snap *CSISnapshot, w *WriteOptions) error { - qp := url.Values{} - qp.Set("snapshot_id", snap.ID) - qp.Set("plugin_id", snap.PluginID) - if w == nil { - w = &WriteOptions{} - } - w.SetHeadersFromCSISecrets(snap.Secrets) - _, err := v.client.delete("/v1/volumes/snapshot?"+qp.Encode(), nil, nil, w) - return err -} - -// ListSnapshotsOpts lists external storage volume snapshots. -func (v *CSIVolumes) ListSnapshotsOpts(req *CSISnapshotListRequest) (*CSISnapshotListResponse, *QueryMeta, error) { - var resp *CSISnapshotListResponse - - qp := url.Values{} - if req.PluginID != "" { - qp.Set("plugin_id", req.PluginID) - } - if req.NextToken != "" { - qp.Set("next_token", req.NextToken) - } - if req.PerPage != 0 { - qp.Set("per_page", fmt.Sprint(req.PerPage)) - } - req.QueryOptions.SetHeadersFromCSISecrets(req.Secrets) - - qm, err := v.client.query("/v1/volumes/snapshot?"+qp.Encode(), &resp, &req.QueryOptions) - if err != nil { - return nil, nil, err - } - - sort.Sort(CSISnapshotSort(resp.Snapshots)) - return resp, qm, nil -} - -// DEPRECATED: will be removed in Nomad 1.4.0 -// ListSnapshots lists external storage volume snapshots. -func (v *CSIVolumes) ListSnapshots(pluginID string, secrets string, q *QueryOptions) (*CSISnapshotListResponse, *QueryMeta, error) { - var resp *CSISnapshotListResponse - - qp := url.Values{} - if pluginID != "" { - qp.Set("plugin_id", pluginID) - } - if q.NextToken != "" { - qp.Set("next_token", q.NextToken) - } - if q.PerPage != 0 { - qp.Set("per_page", fmt.Sprint(q.PerPage)) - } - - qm, err := v.client.query("/v1/volumes/snapshot?"+qp.Encode(), &resp, q) - if err != nil { - return nil, nil, err - } - - sort.Sort(CSISnapshotSort(resp.Snapshots)) - return resp, qm, nil -} - -// CSIVolumeAttachmentMode chooses the type of storage api that will be used to -// interact with the device. (Duplicated in nomad/structs/csi.go) -type CSIVolumeAttachmentMode string - -const ( - CSIVolumeAttachmentModeUnknown CSIVolumeAttachmentMode = "" - CSIVolumeAttachmentModeBlockDevice CSIVolumeAttachmentMode = "block-device" - CSIVolumeAttachmentModeFilesystem CSIVolumeAttachmentMode = "file-system" -) - -// CSIVolumeAccessMode indicates how a volume should be used in a storage topology -// e.g whether the provider should make the volume available concurrently. (Duplicated in nomad/structs/csi.go) -type CSIVolumeAccessMode string - -const ( - CSIVolumeAccessModeUnknown CSIVolumeAccessMode = "" - CSIVolumeAccessModeSingleNodeReader CSIVolumeAccessMode = "single-node-reader-only" - CSIVolumeAccessModeSingleNodeWriter CSIVolumeAccessMode = "single-node-writer" - CSIVolumeAccessModeMultiNodeReader CSIVolumeAccessMode = "multi-node-reader-only" - CSIVolumeAccessModeMultiNodeSingleWriter CSIVolumeAccessMode = "multi-node-single-writer" - CSIVolumeAccessModeMultiNodeMultiWriter CSIVolumeAccessMode = "multi-node-multi-writer" -) - -const ( - CSIVolumeTypeHost = "host" - CSIVolumeTypeCSI = "csi" -) - -// CSIMountOptions contain optional additional configuration that can be used -// when specifying that a Volume should be used with VolumeAccessTypeMount. -type CSIMountOptions struct { - // FSType is an optional field that allows an operator to specify the type - // of the filesystem. - FSType string `hcl:"fs_type,optional"` - - // MountFlags contains additional options that may be used when mounting the - // volume by the plugin. This may contain sensitive data and should not be - // leaked. - MountFlags []string `hcl:"mount_flags,optional"` - - ExtraKeysHCL []string `hcl1:",unusedKeys" json:"-"` // report unexpected keys -} - -func (o *CSIMountOptions) Merge(p *CSIMountOptions) { - if p == nil { - return - } - if p.FSType != "" { - o.FSType = p.FSType - } - if p.MountFlags != nil { - o.MountFlags = p.MountFlags - } -} - -// CSISecrets contain optional additional credentials that may be needed by -// the storage provider. These values will be redacted when reported in the -// API or in Nomad's logs. -type CSISecrets map[string]string - -func (q *QueryOptions) SetHeadersFromCSISecrets(secrets CSISecrets) { - pairs := []string{} - for k, v := range secrets { - pairs = append(pairs, fmt.Sprintf("%v=%v", k, v)) - } - if q.Headers == nil { - q.Headers = map[string]string{} - } - q.Headers["X-Nomad-CSI-Secrets"] = strings.Join(pairs, ",") -} - -func (w *WriteOptions) SetHeadersFromCSISecrets(secrets CSISecrets) { - pairs := []string{} - for k, v := range secrets { - pairs = append(pairs, fmt.Sprintf("%v=%v", k, v)) - } - if w.Headers == nil { - w.Headers = map[string]string{} - } - w.Headers["X-Nomad-CSI-Secrets"] = strings.Join(pairs, ",") -} - -// CSIVolume is used for serialization, see also nomad/structs/csi.go -type CSIVolume struct { - ID string - Name string - ExternalID string `mapstructure:"external_id" hcl:"external_id"` - Namespace string - - // RequestedTopologies are the topologies submitted as options to - // the storage provider at the time the volume was created. After - // volumes are created, this field is ignored. - RequestedTopologies *CSITopologyRequest `hcl:"topology_request"` - - // Topologies are the topologies returned by the storage provider, - // based on the RequestedTopologies and what the storage provider - // could support. This value cannot be set by the user. - Topologies []*CSITopology - - AccessMode CSIVolumeAccessMode `hcl:"access_mode"` - AttachmentMode CSIVolumeAttachmentMode `hcl:"attachment_mode"` - MountOptions *CSIMountOptions `hcl:"mount_options"` - Secrets CSISecrets `mapstructure:"secrets" hcl:"secrets"` - Parameters map[string]string `mapstructure:"parameters" hcl:"parameters"` - Context map[string]string `mapstructure:"context" hcl:"context"` - Capacity int64 `hcl:"-"` - - // These fields are used as part of the volume creation request - RequestedCapacityMin int64 `hcl:"capacity_min"` - RequestedCapacityMax int64 `hcl:"capacity_max"` - RequestedCapabilities []*CSIVolumeCapability `hcl:"capability"` - CloneID string `mapstructure:"clone_id" hcl:"clone_id"` - SnapshotID string `mapstructure:"snapshot_id" hcl:"snapshot_id"` - - // ReadAllocs is a map of allocation IDs for tracking reader claim status. - // The Allocation value will always be nil; clients can populate this data - // by iterating over the Allocations field. - ReadAllocs map[string]*Allocation - - // WriteAllocs is a map of allocation IDs for tracking writer claim - // status. The Allocation value will always be nil; clients can populate - // this data by iterating over the Allocations field. - WriteAllocs map[string]*Allocation - - // Allocations is a combined list of readers and writers - Allocations []*AllocationListStub - - // Schedulable is true if all the denormalized plugin health fields are true - Schedulable bool - PluginID string `mapstructure:"plugin_id" hcl:"plugin_id"` - Provider string - ProviderVersion string - ControllerRequired bool - ControllersHealthy int - ControllersExpected int - NodesHealthy int - NodesExpected int - ResourceExhausted time.Time - - CreateIndex uint64 - ModifyIndex uint64 - - // ExtraKeysHCL is used by the hcl parser to report unexpected keys - ExtraKeysHCL []string `hcl1:",unusedKeys" json:"-"` -} - -// CSIVolumeCapability is a requested attachment and access mode for a -// volume -type CSIVolumeCapability struct { - AccessMode CSIVolumeAccessMode `mapstructure:"access_mode" hcl:"access_mode"` - AttachmentMode CSIVolumeAttachmentMode `mapstructure:"attachment_mode" hcl:"attachment_mode"` -} - -// CSIVolumeIndexSort is a helper used for sorting volume stubs by creation -// time. -type CSIVolumeIndexSort []*CSIVolumeListStub - -func (v CSIVolumeIndexSort) Len() int { - return len(v) -} - -func (v CSIVolumeIndexSort) Less(i, j int) bool { - return v[i].CreateIndex > v[j].CreateIndex -} - -func (v CSIVolumeIndexSort) Swap(i, j int) { - v[i], v[j] = v[j], v[i] -} - -// CSIVolumeListStub omits allocations. See also nomad/structs/csi.go -type CSIVolumeListStub struct { - ID string - Namespace string - Name string - ExternalID string - Topologies []*CSITopology - AccessMode CSIVolumeAccessMode - AttachmentMode CSIVolumeAttachmentMode - CurrentReaders int - CurrentWriters int - Schedulable bool - PluginID string - Provider string - ControllerRequired bool - ControllersHealthy int - ControllersExpected int - NodesHealthy int - NodesExpected int - ResourceExhausted time.Time - - CreateIndex uint64 - ModifyIndex uint64 -} - -type CSIVolumeListExternalResponse struct { - Volumes []*CSIVolumeExternalStub - NextToken string -} - -// CSIVolumeExternalStub is the storage provider's view of a volume, as -// returned from the controller plugin; all IDs are for external resources -type CSIVolumeExternalStub struct { - ExternalID string - CapacityBytes int64 - VolumeContext map[string]string - CloneID string - SnapshotID string - PublishedExternalNodeIDs []string - IsAbnormal bool - Status string -} - -// CSIVolumeExternalStubSort is a sorting helper for external volumes. We -// can't sort these by creation time because we don't get that data back from -// the storage provider. Sort by External ID within this page. -type CSIVolumeExternalStubSort []*CSIVolumeExternalStub - -func (v CSIVolumeExternalStubSort) Len() int { - return len(v) -} - -func (v CSIVolumeExternalStubSort) Less(i, j int) bool { - return v[i].ExternalID > v[j].ExternalID -} - -func (v CSIVolumeExternalStubSort) Swap(i, j int) { - v[i], v[j] = v[j], v[i] -} - -type CSIVolumeCreateRequest struct { - Volumes []*CSIVolume - WriteRequest -} - -type CSIVolumeCreateResponse struct { - Volumes []*CSIVolume - QueryMeta -} - -type CSIVolumeRegisterRequest struct { - Volumes []*CSIVolume - WriteRequest -} - -type CSIVolumeDeregisterRequest struct { - VolumeIDs []string - WriteRequest -} - -type CSIVolumeDeleteRequest struct { - ExternalVolumeID string - Secrets CSISecrets - WriteRequest -} - -// CSISnapshot is the storage provider's view of a volume snapshot -type CSISnapshot struct { - ID string // storage provider's ID - ExternalSourceVolumeID string // storage provider's ID for volume - SizeBytes int64 // value from storage provider - CreateTime int64 // value from storage provider - IsReady bool // value from storage provider - SourceVolumeID string // Nomad volume ID - PluginID string // CSI plugin ID - - // These field are only used during snapshot creation and will not be - // populated when the snapshot is returned - Name string // suggested name of the snapshot, used for creation - Secrets CSISecrets // secrets needed to create snapshot - Parameters map[string]string // secrets needed to create snapshot -} - -// CSISnapshotSort is a helper used for sorting snapshots by creation time. -type CSISnapshotSort []*CSISnapshot - -func (v CSISnapshotSort) Len() int { - return len(v) -} - -func (v CSISnapshotSort) Less(i, j int) bool { - return v[i].CreateTime > v[j].CreateTime -} - -func (v CSISnapshotSort) Swap(i, j int) { - v[i], v[j] = v[j], v[i] -} - -type CSISnapshotCreateRequest struct { - Snapshots []*CSISnapshot - WriteRequest -} - -type CSISnapshotCreateResponse struct { - Snapshots []*CSISnapshot - QueryMeta -} - -// CSISnapshotListRequest is a request to a controller plugin to list all the -// snapshot known to the the storage provider. This request is paginated by -// the plugin and accepts the QueryOptions.PerPage and QueryOptions.NextToken -// fields -type CSISnapshotListRequest struct { - PluginID string - Secrets CSISecrets - QueryOptions -} - -type CSISnapshotListResponse struct { - Snapshots []*CSISnapshot - NextToken string - QueryMeta -} - -// CSI Plugins are jobs with plugin specific data -type CSIPlugins struct { - client *Client -} - -// CSIPlugin is used for serialization, see also nomad/structs/csi.go -type CSIPlugin struct { - ID string - Provider string - Version string - ControllerRequired bool - // Map Node.ID to CSIInfo fingerprint results - Controllers map[string]*CSIInfo - Nodes map[string]*CSIInfo - Allocations []*AllocationListStub - ControllersHealthy int - ControllersExpected int - NodesHealthy int - NodesExpected int - CreateIndex uint64 - ModifyIndex uint64 -} - -type CSIPluginListStub struct { - ID string - Provider string - ControllerRequired bool - ControllersHealthy int - ControllersExpected int - NodesHealthy int - NodesExpected int - CreateIndex uint64 - ModifyIndex uint64 -} - -// CSIPluginIndexSort is a helper used for sorting plugin stubs by creation -// time. -type CSIPluginIndexSort []*CSIPluginListStub - -func (v CSIPluginIndexSort) Len() int { - return len(v) -} - -func (v CSIPluginIndexSort) Less(i, j int) bool { - return v[i].CreateIndex > v[j].CreateIndex -} - -func (v CSIPluginIndexSort) Swap(i, j int) { - v[i], v[j] = v[j], v[i] -} - -// CSIPlugins returns a handle on the CSIPlugins endpoint -func (c *Client) CSIPlugins() *CSIPlugins { - return &CSIPlugins{client: c} -} - -// List returns all CSI plugins -func (v *CSIPlugins) List(q *QueryOptions) ([]*CSIPluginListStub, *QueryMeta, error) { - var resp []*CSIPluginListStub - qm, err := v.client.query("/v1/plugins?type=csi", &resp, q) - if err != nil { - return nil, nil, err - } - sort.Sort(CSIPluginIndexSort(resp)) - return resp, qm, nil -} - -// Info is used to retrieve a single CSI Plugin Job -func (v *CSIPlugins) Info(id string, q *QueryOptions) (*CSIPlugin, *QueryMeta, error) { - var resp *CSIPlugin - qm, err := v.client.query("/v1/plugin/csi/"+id, &resp, q) - if err != nil { - return nil, nil, err - } - return resp, qm, nil -} diff --git a/vendor/github.com/hashicorp/nomad/api/deployments.go b/vendor/github.com/hashicorp/nomad/api/deployments.go deleted file mode 100644 index 1c8011a6..00000000 --- a/vendor/github.com/hashicorp/nomad/api/deployments.go +++ /dev/null @@ -1,297 +0,0 @@ -package api - -import ( - "sort" - "time" -) - -// Deployments is used to query the deployments endpoints. -type Deployments struct { - client *Client -} - -// Deployments returns a new handle on the deployments. -func (c *Client) Deployments() *Deployments { - return &Deployments{client: c} -} - -// List is used to dump all of the deployments. -func (d *Deployments) List(q *QueryOptions) ([]*Deployment, *QueryMeta, error) { - var resp []*Deployment - qm, err := d.client.query("/v1/deployments", &resp, q) - if err != nil { - return nil, nil, err - } - sort.Sort(DeploymentIndexSort(resp)) - return resp, qm, nil -} - -func (d *Deployments) PrefixList(prefix string) ([]*Deployment, *QueryMeta, error) { - return d.List(&QueryOptions{Prefix: prefix}) -} - -// Info is used to query a single deployment by its ID. -func (d *Deployments) Info(deploymentID string, q *QueryOptions) (*Deployment, *QueryMeta, error) { - var resp Deployment - qm, err := d.client.query("/v1/deployment/"+deploymentID, &resp, q) - if err != nil { - return nil, nil, err - } - return &resp, qm, nil -} - -// Allocations is used to retrieve a set of allocations that are part of the -// deployment -func (d *Deployments) Allocations(deploymentID string, q *QueryOptions) ([]*AllocationListStub, *QueryMeta, error) { - var resp []*AllocationListStub - qm, err := d.client.query("/v1/deployment/allocations/"+deploymentID, &resp, q) - if err != nil { - return nil, nil, err - } - sort.Sort(AllocIndexSort(resp)) - return resp, qm, nil -} - -// Fail is used to fail the given deployment. -func (d *Deployments) Fail(deploymentID string, q *WriteOptions) (*DeploymentUpdateResponse, *WriteMeta, error) { - var resp DeploymentUpdateResponse - req := &DeploymentFailRequest{ - DeploymentID: deploymentID, - } - wm, err := d.client.write("/v1/deployment/fail/"+deploymentID, req, &resp, q) - if err != nil { - return nil, nil, err - } - return &resp, wm, nil -} - -// Pause is used to pause or unpause the given deployment. -func (d *Deployments) Pause(deploymentID string, pause bool, q *WriteOptions) (*DeploymentUpdateResponse, *WriteMeta, error) { - var resp DeploymentUpdateResponse - req := &DeploymentPauseRequest{ - DeploymentID: deploymentID, - Pause: pause, - } - wm, err := d.client.write("/v1/deployment/pause/"+deploymentID, req, &resp, q) - if err != nil { - return nil, nil, err - } - return &resp, wm, nil -} - -// PromoteAll is used to promote all canaries in the given deployment -func (d *Deployments) PromoteAll(deploymentID string, q *WriteOptions) (*DeploymentUpdateResponse, *WriteMeta, error) { - var resp DeploymentUpdateResponse - req := &DeploymentPromoteRequest{ - DeploymentID: deploymentID, - All: true, - } - wm, err := d.client.write("/v1/deployment/promote/"+deploymentID, req, &resp, q) - if err != nil { - return nil, nil, err - } - return &resp, wm, nil -} - -// PromoteGroups is used to promote canaries in the passed groups in the given deployment -func (d *Deployments) PromoteGroups(deploymentID string, groups []string, q *WriteOptions) (*DeploymentUpdateResponse, *WriteMeta, error) { - var resp DeploymentUpdateResponse - req := &DeploymentPromoteRequest{ - DeploymentID: deploymentID, - Groups: groups, - } - wm, err := d.client.write("/v1/deployment/promote/"+deploymentID, req, &resp, q) - if err != nil { - return nil, nil, err - } - return &resp, wm, nil -} - -// Unblock is used to unblock the given deployment. -func (d *Deployments) Unblock(deploymentID string, q *WriteOptions) (*DeploymentUpdateResponse, *WriteMeta, error) { - var resp DeploymentUpdateResponse - req := &DeploymentUnblockRequest{ - DeploymentID: deploymentID, - } - wm, err := d.client.write("/v1/deployment/unblock/"+deploymentID, req, &resp, q) - if err != nil { - return nil, nil, err - } - return &resp, wm, nil -} - -// SetAllocHealth is used to set allocation health for allocs that are part of -// the given deployment -func (d *Deployments) SetAllocHealth(deploymentID string, healthy, unhealthy []string, q *WriteOptions) (*DeploymentUpdateResponse, *WriteMeta, error) { - var resp DeploymentUpdateResponse - req := &DeploymentAllocHealthRequest{ - DeploymentID: deploymentID, - HealthyAllocationIDs: healthy, - UnhealthyAllocationIDs: unhealthy, - } - wm, err := d.client.write("/v1/deployment/allocation-health/"+deploymentID, req, &resp, q) - if err != nil { - return nil, nil, err - } - return &resp, wm, nil -} - -const ( - DeploymentStatusRunning = "running" - DeploymentStatusPaused = "paused" - DeploymentStatusFailed = "failed" - DeploymentStatusSuccessful = "successful" - DeploymentStatusCancelled = "cancelled" - DeploymentStatusPending = "pending" - DeploymentStatusBlocked = "blocked" - DeploymentStatusUnblocking = "unblocking" -) - -// Deployment is used to serialize an deployment. -type Deployment struct { - // ID is a generated UUID for the deployment - ID string - - // Namespace is the namespace the deployment is created in - Namespace string - - // JobID is the job the deployment is created for - JobID string - - // JobVersion is the version of the job at which the deployment is tracking - JobVersion uint64 - - // JobModifyIndex is the ModifyIndex of the job which the deployment is - // tracking. - JobModifyIndex uint64 - - // JobSpecModifyIndex is the JobModifyIndex of the job which the - // deployment is tracking. - JobSpecModifyIndex uint64 - - // JobCreateIndex is the create index of the job which the deployment is - // tracking. It is needed so that if the job gets stopped and reran we can - // present the correct list of deployments for the job and not old ones. - JobCreateIndex uint64 - - // IsMultiregion specifies if this deployment is part of a multi-region deployment - IsMultiregion bool - - // TaskGroups is the set of task groups effected by the deployment and their - // current deployment status. - TaskGroups map[string]*DeploymentState - - // The status of the deployment - Status string - - // StatusDescription allows a human readable description of the deployment - // status. - StatusDescription string - - CreateIndex uint64 - ModifyIndex uint64 -} - -// DeploymentState tracks the state of a deployment for a given task group. -type DeploymentState struct { - PlacedCanaries []string - AutoRevert bool - ProgressDeadline time.Duration - RequireProgressBy time.Time - Promoted bool - DesiredCanaries int - DesiredTotal int - PlacedAllocs int - HealthyAllocs int - UnhealthyAllocs int -} - -// DeploymentIndexSort is a wrapper to sort deployments by CreateIndex. We -// reverse the test so that we get the highest index first. -type DeploymentIndexSort []*Deployment - -func (d DeploymentIndexSort) Len() int { - return len(d) -} - -func (d DeploymentIndexSort) Less(i, j int) bool { - return d[i].CreateIndex > d[j].CreateIndex -} - -func (d DeploymentIndexSort) Swap(i, j int) { - d[i], d[j] = d[j], d[i] -} - -// DeploymentUpdateResponse is used to respond to a deployment change. The -// response will include the modify index of the deployment as well as details -// of any triggered evaluation. -type DeploymentUpdateResponse struct { - EvalID string - EvalCreateIndex uint64 - DeploymentModifyIndex uint64 - RevertedJobVersion *uint64 - WriteMeta -} - -// DeploymentAllocHealthRequest is used to set the health of a set of -// allocations as part of a deployment. -type DeploymentAllocHealthRequest struct { - DeploymentID string - - // Marks these allocations as healthy, allow further allocations - // to be rolled. - HealthyAllocationIDs []string - - // Any unhealthy allocations fail the deployment - UnhealthyAllocationIDs []string - - WriteRequest -} - -// DeploymentPromoteRequest is used to promote task groups in a deployment -type DeploymentPromoteRequest struct { - DeploymentID string - - // All is to promote all task groups - All bool - - // Groups is used to set the promotion status per task group - Groups []string - - WriteRequest -} - -// DeploymentPauseRequest is used to pause a deployment -type DeploymentPauseRequest struct { - DeploymentID string - - // Pause sets the pause status - Pause bool - - WriteRequest -} - -// DeploymentSpecificRequest is used to make a request specific to a particular -// deployment -type DeploymentSpecificRequest struct { - DeploymentID string - QueryOptions -} - -// DeploymentFailRequest is used to fail a particular deployment -type DeploymentFailRequest struct { - DeploymentID string - WriteRequest -} - -// DeploymentUnblockRequest is used to unblock a particular deployment -type DeploymentUnblockRequest struct { - DeploymentID string - WriteRequest -} - -// SingleDeploymentResponse is used to respond with a single deployment -type SingleDeploymentResponse struct { - Deployment *Deployment - QueryMeta -} diff --git a/vendor/github.com/hashicorp/nomad/api/evaluations.go b/vendor/github.com/hashicorp/nomad/api/evaluations.go deleted file mode 100644 index bcb0eb2b..00000000 --- a/vendor/github.com/hashicorp/nomad/api/evaluations.go +++ /dev/null @@ -1,150 +0,0 @@ -package api - -import ( - "sort" - "time" -) - -// Evaluations is used to query the evaluation endpoints. -type Evaluations struct { - client *Client -} - -// Evaluations returns a new handle on the evaluations. -func (c *Client) Evaluations() *Evaluations { - return &Evaluations{client: c} -} - -// List is used to dump all of the evaluations. -func (e *Evaluations) List(q *QueryOptions) ([]*Evaluation, *QueryMeta, error) { - var resp []*Evaluation - qm, err := e.client.query("/v1/evaluations", &resp, q) - if err != nil { - return nil, nil, err - } - sort.Sort(EvalIndexSort(resp)) - return resp, qm, nil -} - -func (e *Evaluations) PrefixList(prefix string) ([]*Evaluation, *QueryMeta, error) { - return e.List(&QueryOptions{Prefix: prefix}) -} - -// Info is used to query a single evaluation by its ID. -func (e *Evaluations) Info(evalID string, q *QueryOptions) (*Evaluation, *QueryMeta, error) { - var resp Evaluation - qm, err := e.client.query("/v1/evaluation/"+evalID, &resp, q) - if err != nil { - return nil, nil, err - } - return &resp, qm, nil -} - -// Delete is used to batch delete evaluations using their IDs. -func (e *Evaluations) Delete(evalIDs []string, w *WriteOptions) (*WriteMeta, error) { - req := EvalDeleteRequest{ - EvalIDs: evalIDs, - } - wm, err := e.client.delete("/v1/evaluations", &req, nil, w) - if err != nil { - return nil, err - } - return wm, nil -} - -// Allocations is used to retrieve a set of allocations given -// an evaluation ID. -func (e *Evaluations) Allocations(evalID string, q *QueryOptions) ([]*AllocationListStub, *QueryMeta, error) { - var resp []*AllocationListStub - qm, err := e.client.query("/v1/evaluation/"+evalID+"/allocations", &resp, q) - if err != nil { - return nil, nil, err - } - sort.Sort(AllocIndexSort(resp)) - return resp, qm, nil -} - -const ( - EvalStatusBlocked = "blocked" - EvalStatusPending = "pending" - EvalStatusComplete = "complete" - EvalStatusFailed = "failed" - EvalStatusCancelled = "canceled" -) - -// Evaluation is used to serialize an evaluation. -type Evaluation struct { - ID string - Priority int - Type string - TriggeredBy string - Namespace string - JobID string - JobModifyIndex uint64 - NodeID string - NodeModifyIndex uint64 - DeploymentID string - Status string - StatusDescription string - Wait time.Duration - WaitUntil time.Time - NextEval string - PreviousEval string - BlockedEval string - RelatedEvals []*EvaluationStub - FailedTGAllocs map[string]*AllocationMetric - ClassEligibility map[string]bool - EscapedComputedClass bool - QuotaLimitReached string - AnnotatePlan bool - QueuedAllocations map[string]int - SnapshotIndex uint64 - CreateIndex uint64 - ModifyIndex uint64 - CreateTime int64 - ModifyTime int64 -} - -// EvaluationStub is used to serialize parts of an evaluation returned in the -// RelatedEvals field of an Evaluation. -type EvaluationStub struct { - ID string - Priority int - Type string - TriggeredBy string - Namespace string - JobID string - NodeID string - DeploymentID string - Status string - StatusDescription string - WaitUntil time.Time - NextEval string - PreviousEval string - BlockedEval string - CreateIndex uint64 - ModifyIndex uint64 - CreateTime int64 - ModifyTime int64 -} - -type EvalDeleteRequest struct { - EvalIDs []string - WriteRequest -} - -// EvalIndexSort is a wrapper to sort evaluations by CreateIndex. -// We reverse the test so that we get the highest index first. -type EvalIndexSort []*Evaluation - -func (e EvalIndexSort) Len() int { - return len(e) -} - -func (e EvalIndexSort) Less(i, j int) bool { - return e[i].CreateIndex > e[j].CreateIndex -} - -func (e EvalIndexSort) Swap(i, j int) { - e[i], e[j] = e[j], e[i] -} diff --git a/vendor/github.com/hashicorp/nomad/api/event_stream.go b/vendor/github.com/hashicorp/nomad/api/event_stream.go deleted file mode 100644 index 10825b5c..00000000 --- a/vendor/github.com/hashicorp/nomad/api/event_stream.go +++ /dev/null @@ -1,204 +0,0 @@ -package api - -import ( - "context" - "encoding/json" - "fmt" - "strconv" - "time" - - "github.com/mitchellh/mapstructure" -) - -const ( - TopicDeployment Topic = "Deployment" - TopicEvaluation Topic = "Evaluation" - TopicAllocation Topic = "Allocation" - TopicJob Topic = "Job" - TopicNode Topic = "Node" - TopicService Topic = "Service" - TopicAll Topic = "*" -) - -// Events is a set of events for a corresponding index. Events returned for the -// index depend on which topics are subscribed to when a request is made. -type Events struct { - Index uint64 - Events []Event - Err error -} - -// Topic is an event Topic -type Topic string - -// Event holds information related to an event that occurred in Nomad. -// The Payload is a hydrated object related to the Topic -type Event struct { - Topic Topic - Type string - Key string - FilterKeys []string - Index uint64 - Payload map[string]interface{} -} - -// Deployment returns a Deployment struct from a given event payload. If the -// Event Topic is Deployment this will return a valid Deployment -func (e *Event) Deployment() (*Deployment, error) { - out, err := e.decodePayload() - if err != nil { - return nil, err - } - return out.Deployment, nil -} - -// Evaluation returns a Evaluation struct from a given event payload. If the -// Event Topic is Evaluation this will return a valid Evaluation -func (e *Event) Evaluation() (*Evaluation, error) { - out, err := e.decodePayload() - if err != nil { - return nil, err - } - return out.Evaluation, nil -} - -// Allocation returns a Allocation struct from a given event payload. If the -// Event Topic is Allocation this will return a valid Allocation. -func (e *Event) Allocation() (*Allocation, error) { - out, err := e.decodePayload() - if err != nil { - return nil, err - } - return out.Allocation, nil -} - -// Job returns a Job struct from a given event payload. If the -// Event Topic is Job this will return a valid Job. -func (e *Event) Job() (*Job, error) { - out, err := e.decodePayload() - if err != nil { - return nil, err - } - return out.Job, nil -} - -// Node returns a Node struct from a given event payload. If the -// Event Topic is Node this will return a valid Node. -func (e *Event) Node() (*Node, error) { - out, err := e.decodePayload() - if err != nil { - return nil, err - } - return out.Node, nil -} - -// Service returns a ServiceRegistration struct from a given event payload. If -// the Event Topic is Service this will return a valid ServiceRegistration. -func (e *Event) Service() (*ServiceRegistration, error) { - out, err := e.decodePayload() - if err != nil { - return nil, err - } - return out.Service, nil -} - -type eventPayload struct { - Allocation *Allocation `mapstructure:"Allocation"` - Deployment *Deployment `mapstructure:"Deployment"` - Evaluation *Evaluation `mapstructure:"Evaluation"` - Job *Job `mapstructure:"Job"` - Node *Node `mapstructure:"Node"` - Service *ServiceRegistration `mapstructure:"Service"` -} - -func (e *Event) decodePayload() (*eventPayload, error) { - var out eventPayload - cfg := &mapstructure.DecoderConfig{ - Result: &out, - DecodeHook: mapstructure.StringToTimeHookFunc(time.RFC3339), - } - - dec, err := mapstructure.NewDecoder(cfg) - if err != nil { - return nil, err - } - - if err := dec.Decode(e.Payload); err != nil { - return nil, err - } - - return &out, nil -} - -// IsHeartbeat specifies if the event is an empty heartbeat used to -// keep a connection alive. -func (e *Events) IsHeartbeat() bool { - return e.Index == 0 && len(e.Events) == 0 -} - -// EventStream is used to stream events from Nomad -type EventStream struct { - client *Client -} - -// EventStream returns a handle to the Events endpoint -func (c *Client) EventStream() *EventStream { - return &EventStream{client: c} -} - -// Stream establishes a new subscription to Nomad's event stream and streams -// results back to the returned channel. -func (e *EventStream) Stream(ctx context.Context, topics map[Topic][]string, index uint64, q *QueryOptions) (<-chan *Events, error) { - r, err := e.client.newRequest("GET", "/v1/event/stream") - if err != nil { - return nil, err - } - q = q.WithContext(ctx) - if q.Params == nil { - q.Params = map[string]string{} - } - q.Params["index"] = strconv.FormatUint(index, 10) - r.setQueryOptions(q) - - // Build topic query params - for topic, keys := range topics { - for _, k := range keys { - r.params.Add("topic", fmt.Sprintf("%s:%s", topic, k)) - } - } - - _, resp, err := requireOK(e.client.doRequest(r)) - - if err != nil { - return nil, err - } - - eventsCh := make(chan *Events, 10) - go func() { - defer resp.Body.Close() - defer close(eventsCh) - - dec := json.NewDecoder(resp.Body) - - for ctx.Err() == nil { - // Decode next newline delimited json of events - var events Events - if err := dec.Decode(&events); err != nil { - // set error and fallthrough to - // select eventsCh - events = Events{Err: err} - } - if events.Err == nil && events.IsHeartbeat() { - continue - } - - select { - case <-ctx.Done(): - return - case eventsCh <- &events: - } - } - }() - - return eventsCh, nil -} diff --git a/vendor/github.com/hashicorp/nomad/api/fs.go b/vendor/github.com/hashicorp/nomad/api/fs.go deleted file mode 100644 index 8b63a388..00000000 --- a/vendor/github.com/hashicorp/nomad/api/fs.go +++ /dev/null @@ -1,404 +0,0 @@ -package api - -import ( - "encoding/json" - "fmt" - "io" - "net" - "strconv" - "sync" - "time" -) - -const ( - // OriginStart and OriginEnd are the available parameters for the origin - // argument when streaming a file. They respectively offset from the start - // and end of a file. - OriginStart = "start" - OriginEnd = "end" -) - -// AllocFileInfo holds information about a file inside the AllocDir -type AllocFileInfo struct { - Name string - IsDir bool - Size int64 - FileMode string - ModTime time.Time - ContentType string -} - -// StreamFrame is used to frame data of a file when streaming -type StreamFrame struct { - Offset int64 `json:",omitempty"` - Data []byte `json:",omitempty"` - File string `json:",omitempty"` - FileEvent string `json:",omitempty"` -} - -// IsHeartbeat returns if the frame is a heartbeat frame -func (s *StreamFrame) IsHeartbeat() bool { - return len(s.Data) == 0 && s.FileEvent == "" && s.File == "" && s.Offset == 0 -} - -// AllocFS is used to introspect an allocation directory on a Nomad client -type AllocFS struct { - client *Client -} - -// AllocFS returns an handle to the AllocFS endpoints -func (c *Client) AllocFS() *AllocFS { - return &AllocFS{client: c} -} - -// List is used to list the files at a given path of an allocation directory. -// Note: for cluster topologies where API consumers don't have network access to -// Nomad clients, set api.ClientConnTimeout to a small value (ex 1ms) to avoid -// long pauses on this API call. -func (a *AllocFS) List(alloc *Allocation, path string, q *QueryOptions) ([]*AllocFileInfo, *QueryMeta, error) { - if q == nil { - q = &QueryOptions{} - } - if q.Params == nil { - q.Params = make(map[string]string) - } - q.Params["path"] = path - - var resp []*AllocFileInfo - qm, err := a.client.query(fmt.Sprintf("/v1/client/fs/ls/%s", alloc.ID), &resp, q) - if err != nil { - return nil, nil, err - } - - return resp, qm, nil -} - -// Stat is used to stat a file at a given path of an allocation directory. -// Note: for cluster topologies where API consumers don't have network access to -// Nomad clients, set api.ClientConnTimeout to a small value (ex 1ms) to avoid -// long pauses on this API call. -func (a *AllocFS) Stat(alloc *Allocation, path string, q *QueryOptions) (*AllocFileInfo, *QueryMeta, error) { - if q == nil { - q = &QueryOptions{} - } - if q.Params == nil { - q.Params = make(map[string]string) - } - - q.Params["path"] = path - - var resp AllocFileInfo - qm, err := a.client.query(fmt.Sprintf("/v1/client/fs/stat/%s", alloc.ID), &resp, q) - if err != nil { - return nil, nil, err - } - return &resp, qm, nil -} - -// ReadAt is used to read bytes at a given offset until limit at the given path -// in an allocation directory. If limit is <= 0, there is no limit. -// Note: for cluster topologies where API consumers don't have network access to -// Nomad clients, set api.ClientConnTimeout to a small value (ex 1ms) to avoid -// long pauses on this API call. -func (a *AllocFS) ReadAt(alloc *Allocation, path string, offset int64, limit int64, q *QueryOptions) (io.ReadCloser, error) { - reqPath := fmt.Sprintf("/v1/client/fs/readat/%s", alloc.ID) - - return queryClientNode(a.client, alloc, reqPath, q, - func(q *QueryOptions) { - q.Params["path"] = path - q.Params["offset"] = strconv.FormatInt(offset, 10) - q.Params["limit"] = strconv.FormatInt(limit, 10) - }) -} - -// Cat is used to read contents of a file at the given path in an allocation -// directory. -// Note: for cluster topologies where API consumers don't have network access to -// Nomad clients, set api.ClientConnTimeout to a small value (ex 1ms) to avoid -// long pauses on this API call. -func (a *AllocFS) Cat(alloc *Allocation, path string, q *QueryOptions) (io.ReadCloser, error) { - reqPath := fmt.Sprintf("/v1/client/fs/cat/%s", alloc.ID) - return queryClientNode(a.client, alloc, reqPath, q, - func(q *QueryOptions) { - q.Params["path"] = path - }) -} - -// Stream streams the content of a file blocking on EOF. -// The parameters are: -// * path: path to file to stream. -// * offset: The offset to start streaming data at. -// * origin: Either "start" or "end" and defines from where the offset is applied. -// * cancel: A channel that when closed, streaming will end. -// -// The return value is a channel that will emit StreamFrames as they are read. -// -// Note: for cluster topologies where API consumers don't have network access to -// Nomad clients, set api.ClientConnTimeout to a small value (ex 1ms) to avoid -// long pauses on this API call. -func (a *AllocFS) Stream(alloc *Allocation, path, origin string, offset int64, - cancel <-chan struct{}, q *QueryOptions) (<-chan *StreamFrame, <-chan error) { - - errCh := make(chan error, 1) - - reqPath := fmt.Sprintf("/v1/client/fs/stream/%s", alloc.ID) - r, err := queryClientNode(a.client, alloc, reqPath, q, - func(q *QueryOptions) { - q.Params["path"] = path - q.Params["offset"] = strconv.FormatInt(offset, 10) - q.Params["origin"] = origin - }) - if err != nil { - errCh <- err - return nil, errCh - } - - // Create the output channel - frames := make(chan *StreamFrame, 10) - - go func() { - // Close the body - defer r.Close() - - // Create a decoder - dec := json.NewDecoder(r) - - for { - // Check if we have been cancelled - select { - case <-cancel: - return - default: - } - - // Decode the next frame - var frame StreamFrame - if err := dec.Decode(&frame); err != nil { - errCh <- err - close(frames) - return - } - - // Discard heartbeat frames - if frame.IsHeartbeat() { - continue - } - - frames <- &frame - } - }() - - return frames, errCh -} - -func queryClientNode(c *Client, alloc *Allocation, reqPath string, q *QueryOptions, customizeQ func(*QueryOptions)) (io.ReadCloser, error) { - nodeClient, _ := c.GetNodeClientWithTimeout(alloc.NodeID, ClientConnTimeout, q) - - if q == nil { - q = &QueryOptions{} - } - if q.Params == nil { - q.Params = make(map[string]string) - } - if customizeQ != nil { - customizeQ(q) - } - - var r io.ReadCloser - var err error - - if nodeClient != nil { - r, err = nodeClient.rawQuery(reqPath, q) - if _, ok := err.(net.Error); err != nil && !ok { - // found a non networking error talking to client directly - return nil, err - } - - } - - // failed to query node, access through server directly - // or network error when talking to the client directly - if r == nil { - return c.rawQuery(reqPath, q) - } - - return r, err -} - -// Logs streams the content of a tasks logs blocking on EOF. -// The parameters are: -// * allocation: the allocation to stream from. -// * follow: Whether the logs should be followed. -// * task: the tasks name to stream logs for. -// * logType: Either "stdout" or "stderr" -// * origin: Either "start" or "end" and defines from where the offset is applied. -// * offset: The offset to start streaming data at. -// * cancel: A channel that when closed, streaming will end. -// -// The return value is a channel that will emit StreamFrames as they are read. -// The chan will be closed when follow=false and the end of the file is -// reached. -// -// Unexpected (non-EOF) errors will be sent on the error chan. -// -// Note: for cluster topologies where API consumers don't have network access to -// Nomad clients, set api.ClientConnTimeout to a small value (ex 1ms) to avoid -// long pauses on this API call. -func (a *AllocFS) Logs(alloc *Allocation, follow bool, task, logType, origin string, - offset int64, cancel <-chan struct{}, q *QueryOptions) (<-chan *StreamFrame, <-chan error) { - - errCh := make(chan error, 1) - - reqPath := fmt.Sprintf("/v1/client/fs/logs/%s", alloc.ID) - r, err := queryClientNode(a.client, alloc, reqPath, q, - func(q *QueryOptions) { - q.Params["follow"] = strconv.FormatBool(follow) - q.Params["task"] = task - q.Params["type"] = logType - q.Params["origin"] = origin - q.Params["offset"] = strconv.FormatInt(offset, 10) - }) - if err != nil { - errCh <- err - return nil, errCh - } - - // Create the output channel - frames := make(chan *StreamFrame, 10) - - go func() { - // Close the body - defer r.Close() - - // Create a decoder - dec := json.NewDecoder(r) - - for { - // Check if we have been cancelled - select { - case <-cancel: - close(frames) - return - default: - } - - // Decode the next frame - var frame StreamFrame - if err := dec.Decode(&frame); err != nil { - if err == io.EOF || err == io.ErrClosedPipe { - close(frames) - } else { - errCh <- err - } - return - } - - // Discard heartbeat frames - if frame.IsHeartbeat() { - continue - } - - frames <- &frame - } - }() - - return frames, errCh -} - -// FrameReader is used to convert a stream of frames into a read closer. -type FrameReader struct { - frames <-chan *StreamFrame - errCh <-chan error - cancelCh chan struct{} - - closedLock sync.Mutex - closed bool - - unblockTime time.Duration - - frame *StreamFrame - frameOffset int - - byteOffset int -} - -// NewFrameReader takes a channel of frames and returns a FrameReader which -// implements io.ReadCloser -func NewFrameReader(frames <-chan *StreamFrame, errCh <-chan error, cancelCh chan struct{}) *FrameReader { - return &FrameReader{ - frames: frames, - errCh: errCh, - cancelCh: cancelCh, - } -} - -// SetUnblockTime sets the time to unblock and return zero bytes read. If the -// duration is unset or is zero or less, the read will block until data is read. -func (f *FrameReader) SetUnblockTime(d time.Duration) { - f.unblockTime = d -} - -// Offset returns the offset into the stream. -func (f *FrameReader) Offset() int { - return f.byteOffset -} - -// Read reads the data of the incoming frames into the bytes buffer. Returns EOF -// when there are no more frames. -func (f *FrameReader) Read(p []byte) (n int, err error) { - f.closedLock.Lock() - closed := f.closed - f.closedLock.Unlock() - if closed { - return 0, io.EOF - } - - if f.frame == nil { - var unblock <-chan time.Time - if f.unblockTime.Nanoseconds() > 0 { - unblock = time.After(f.unblockTime) - } - - select { - case frame, ok := <-f.frames: - if !ok { - return 0, io.EOF - } - f.frame = frame - - // Store the total offset into the file - f.byteOffset = int(f.frame.Offset) - case <-unblock: - return 0, nil - case err := <-f.errCh: - return 0, err - case <-f.cancelCh: - return 0, io.EOF - } - } - - // Copy the data out of the frame and update our offset - n = copy(p, f.frame.Data[f.frameOffset:]) - f.frameOffset += n - - // Clear the frame and its offset once we have read everything - if len(f.frame.Data) == f.frameOffset { - f.frame = nil - f.frameOffset = 0 - } - - return n, nil -} - -// Close cancels the stream of frames -func (f *FrameReader) Close() error { - f.closedLock.Lock() - defer f.closedLock.Unlock() - if f.closed { - return nil - } - - close(f.cancelCh) - f.closed = true - return nil -} diff --git a/vendor/github.com/hashicorp/nomad/api/ioutil.go b/vendor/github.com/hashicorp/nomad/api/ioutil.go deleted file mode 100644 index fe3cce5a..00000000 --- a/vendor/github.com/hashicorp/nomad/api/ioutil.go +++ /dev/null @@ -1,81 +0,0 @@ -package api - -import ( - "crypto/md5" - "crypto/sha256" - "crypto/sha512" - "encoding/base64" - "errors" - "hash" - "io" - "strings" -) - -var errMismatchChecksum = errors.New("mismatch checksum") - -// checksumValidatingReader is a wrapper reader that validates -// the checksum of the underlying reader. -type checksumValidatingReader struct { - r io.ReadCloser - - // algo is the hash algorithm (e.g. `sha-256`) - algo string - - // checksum is the base64 component of checksum - checksum string - - // hash is the hashing function used to compute the checksum - hash hash.Hash -} - -// newChecksumValidatingReader returns a checksum-validating wrapper reader, according -// to a digest received in HTTP header -// -// The digest must be in the format "=" (e.g. "sha-256=gPelGB7..."). -// -// When the reader is fully consumed (i.e. EOT is encountered), if the checksum don't match, -// `Read` returns a checksum mismatch error. -func newChecksumValidatingReader(r io.ReadCloser, digest string) (io.ReadCloser, error) { - parts := strings.SplitN(digest, "=", 2) - if len(parts) != 2 { - return nil, errors.New("invalid digest format") - } - - algo := parts[0] - var hash hash.Hash - switch algo { - case "sha-256": - hash = sha256.New() - case "sha-512": - hash = sha512.New() - case "md5": - hash = md5.New() - } - - return &checksumValidatingReader{ - r: r, - algo: algo, - checksum: parts[1], - hash: hash, - }, nil -} - -func (r *checksumValidatingReader) Read(b []byte) (int, error) { - n, err := r.r.Read(b) - if n != 0 { - r.hash.Write(b[:n]) - } - - if err == io.EOF || err == io.ErrClosedPipe { - found := base64.StdEncoding.EncodeToString(r.hash.Sum(nil)) - if found != r.checksum { - return n, errMismatchChecksum - } - } - - return n, err -} - -func (r *checksumValidatingReader) Close() error { - return r.r.Close() -} diff --git a/vendor/github.com/hashicorp/nomad/api/jobs.go b/vendor/github.com/hashicorp/nomad/api/jobs.go deleted file mode 100644 index 4fc048dd..00000000 --- a/vendor/github.com/hashicorp/nomad/api/jobs.go +++ /dev/null @@ -1,1392 +0,0 @@ -package api - -import ( - "errors" - "fmt" - "net/url" - "sort" - "strconv" - "time" - - "github.com/hashicorp/cronexpr" -) - -const ( - // JobTypeService indicates a long-running processes - JobTypeService = "service" - - // JobTypeBatch indicates a short-lived process - JobTypeBatch = "batch" - - // JobTypeSystem indicates a system process that should run on all clients - JobTypeSystem = "system" - - // JobTypeSysbatch indicates a short-lived system process that should run - // on all clients. - JobTypeSysbatch = "sysbatch" - - // PeriodicSpecCron is used for a cron spec. - PeriodicSpecCron = "cron" - - // DefaultNamespace is the default namespace. - DefaultNamespace = "default" - - // For Job configuration, GlobalRegion is a sentinel region value - // that users may specify to indicate the job should be run on - // the region of the node that the job was submitted to. - // For Client configuration, if no region information is given, - // the client node will default to be part of the GlobalRegion. - GlobalRegion = "global" -) - -const ( - // RegisterEnforceIndexErrPrefix is the prefix to use in errors caused by - // enforcing the job modify index during registers. - RegisterEnforceIndexErrPrefix = "Enforcing job modify index" -) - -const ( - // JobPeriodicLaunchSuffix is the string appended to the periodic jobs ID - // when launching derived instances of it. - JobPeriodicLaunchSuffix = "/periodic-" - - // JobDispatchLaunchSuffix is the string appended to the parameterized job's ID - // when dispatching instances of it. - JobDispatchLaunchSuffix = "/dispatch-" -) - -// Jobs is used to access the job-specific endpoints. -type Jobs struct { - client *Client -} - -// JobsParseRequest is used for arguments of the /v1/jobs/parse endpoint -type JobsParseRequest struct { - // JobHCL is an hcl jobspec - JobHCL string - - // HCLv1 indicates whether the JobHCL should be parsed with the hcl v1 parser - HCLv1 bool `json:"hclv1,omitempty"` - - // Canonicalize is a flag as to if the server should return default values - // for unset fields - Canonicalize bool -} - -// Jobs returns a handle on the jobs endpoints. -func (c *Client) Jobs() *Jobs { - return &Jobs{client: c} -} - -// ParseHCL is used to convert the HCL repesentation of a Job to JSON server side. -// To parse the HCL client side see package github.com/hashicorp/nomad/jobspec -// Use ParseHCLOpts if you need to customize JobsParseRequest. -func (j *Jobs) ParseHCL(jobHCL string, canonicalize bool) (*Job, error) { - req := &JobsParseRequest{ - JobHCL: jobHCL, - Canonicalize: canonicalize, - } - return j.ParseHCLOpts(req) -} - -// ParseHCLOpts is used to convert the HCL representation of a Job to JSON -// server side. To parse the HCL client side see package -// github.com/hashicorp/nomad/jobspec. -// ParseHCL is an alternative convenience API for HCLv2 users. -func (j *Jobs) ParseHCLOpts(req *JobsParseRequest) (*Job, error) { - var job Job - _, err := j.client.write("/v1/jobs/parse", req, &job, nil) - return &job, err -} - -func (j *Jobs) Validate(job *Job, q *WriteOptions) (*JobValidateResponse, *WriteMeta, error) { - var resp JobValidateResponse - req := &JobValidateRequest{Job: job} - if q != nil { - req.WriteRequest = WriteRequest{Region: q.Region} - } - wm, err := j.client.write("/v1/validate/job", req, &resp, q) - return &resp, wm, err -} - -// RegisterOptions is used to pass through job registration parameters -type RegisterOptions struct { - EnforceIndex bool - ModifyIndex uint64 - PolicyOverride bool - PreserveCounts bool - EvalPriority int -} - -// Register is used to register a new job. It returns the ID -// of the evaluation, along with any errors encountered. -func (j *Jobs) Register(job *Job, q *WriteOptions) (*JobRegisterResponse, *WriteMeta, error) { - return j.RegisterOpts(job, nil, q) -} - -// EnforceRegister is used to register a job enforcing its job modify index. -func (j *Jobs) EnforceRegister(job *Job, modifyIndex uint64, q *WriteOptions) (*JobRegisterResponse, *WriteMeta, error) { - opts := RegisterOptions{EnforceIndex: true, ModifyIndex: modifyIndex} - return j.RegisterOpts(job, &opts, q) -} - -// RegisterOpts is used to register a new job with the passed RegisterOpts. It -// returns the ID of the evaluation, along with any errors encountered. -func (j *Jobs) RegisterOpts(job *Job, opts *RegisterOptions, q *WriteOptions) (*JobRegisterResponse, *WriteMeta, error) { - // Format the request - req := &JobRegisterRequest{ - Job: job, - } - if opts != nil { - if opts.EnforceIndex { - req.EnforceIndex = true - req.JobModifyIndex = opts.ModifyIndex - } - req.PolicyOverride = opts.PolicyOverride - req.PreserveCounts = opts.PreserveCounts - req.EvalPriority = opts.EvalPriority - } - - var resp JobRegisterResponse - wm, err := j.client.write("/v1/jobs", req, &resp, q) - if err != nil { - return nil, nil, err - } - return &resp, wm, nil -} - -// List is used to list all of the existing jobs. -func (j *Jobs) List(q *QueryOptions) ([]*JobListStub, *QueryMeta, error) { - var resp []*JobListStub - qm, err := j.client.query("/v1/jobs", &resp, q) - if err != nil { - return nil, qm, err - } - sort.Sort(JobIDSort(resp)) - return resp, qm, nil -} - -// PrefixList is used to list all existing jobs that match the prefix. -func (j *Jobs) PrefixList(prefix string) ([]*JobListStub, *QueryMeta, error) { - return j.List(&QueryOptions{Prefix: prefix}) -} - -// Info is used to retrieve information about a particular -// job given its unique ID. -func (j *Jobs) Info(jobID string, q *QueryOptions) (*Job, *QueryMeta, error) { - var resp Job - qm, err := j.client.query("/v1/job/"+url.PathEscape(jobID), &resp, q) - if err != nil { - return nil, nil, err - } - return &resp, qm, nil -} - -// Scale is used to retrieve information about a particular -// job given its unique ID. -func (j *Jobs) Scale(jobID, group string, count *int, message string, error bool, meta map[string]interface{}, - q *WriteOptions) (*JobRegisterResponse, *WriteMeta, error) { - - var count64 *int64 - if count != nil { - count64 = pointerOf(int64(*count)) - } - req := &ScalingRequest{ - Count: count64, - Target: map[string]string{ - "Job": jobID, - "Group": group, - }, - Error: error, - Message: message, - Meta: meta, - } - var resp JobRegisterResponse - qm, err := j.client.write(fmt.Sprintf("/v1/job/%s/scale", url.PathEscape(jobID)), req, &resp, q) - if err != nil { - return nil, nil, err - } - return &resp, qm, nil -} - -// ScaleStatus is used to retrieve information about a particular -// job given its unique ID. -func (j *Jobs) ScaleStatus(jobID string, q *QueryOptions) (*JobScaleStatusResponse, *QueryMeta, error) { - var resp JobScaleStatusResponse - qm, err := j.client.query(fmt.Sprintf("/v1/job/%s/scale", url.PathEscape(jobID)), &resp, q) - if err != nil { - return nil, nil, err - } - return &resp, qm, nil -} - -// Versions is used to retrieve all versions of a particular job given its -// unique ID. -func (j *Jobs) Versions(jobID string, diffs bool, q *QueryOptions) ([]*Job, []*JobDiff, *QueryMeta, error) { - var resp JobVersionsResponse - qm, err := j.client.query(fmt.Sprintf("/v1/job/%s/versions?diffs=%v", url.PathEscape(jobID), diffs), &resp, q) - if err != nil { - return nil, nil, nil, err - } - return resp.Versions, resp.Diffs, qm, nil -} - -// Allocations is used to return the allocs for a given job ID. -func (j *Jobs) Allocations(jobID string, allAllocs bool, q *QueryOptions) ([]*AllocationListStub, *QueryMeta, error) { - var resp []*AllocationListStub - u, err := url.Parse("/v1/job/" + url.PathEscape(jobID) + "/allocations") - if err != nil { - return nil, nil, err - } - - v := u.Query() - v.Add("all", strconv.FormatBool(allAllocs)) - u.RawQuery = v.Encode() - - qm, err := j.client.query(u.String(), &resp, q) - if err != nil { - return nil, nil, err - } - sort.Sort(AllocIndexSort(resp)) - return resp, qm, nil -} - -// Deployments is used to query the deployments associated with the given job -// ID. -func (j *Jobs) Deployments(jobID string, all bool, q *QueryOptions) ([]*Deployment, *QueryMeta, error) { - var resp []*Deployment - u, err := url.Parse("/v1/job/" + url.PathEscape(jobID) + "/deployments") - if err != nil { - return nil, nil, err - } - - v := u.Query() - v.Add("all", strconv.FormatBool(all)) - u.RawQuery = v.Encode() - qm, err := j.client.query(u.String(), &resp, q) - if err != nil { - return nil, nil, err - } - sort.Sort(DeploymentIndexSort(resp)) - return resp, qm, nil -} - -// LatestDeployment is used to query for the latest deployment associated with -// the given job ID. -func (j *Jobs) LatestDeployment(jobID string, q *QueryOptions) (*Deployment, *QueryMeta, error) { - var resp *Deployment - qm, err := j.client.query("/v1/job/"+url.PathEscape(jobID)+"/deployment", &resp, q) - if err != nil { - return nil, nil, err - } - return resp, qm, nil -} - -// Evaluations is used to query the evaluations associated with the given job -// ID. -func (j *Jobs) Evaluations(jobID string, q *QueryOptions) ([]*Evaluation, *QueryMeta, error) { - var resp []*Evaluation - qm, err := j.client.query("/v1/job/"+url.PathEscape(jobID)+"/evaluations", &resp, q) - if err != nil { - return nil, nil, err - } - sort.Sort(EvalIndexSort(resp)) - return resp, qm, nil -} - -// Deregister is used to remove an existing job. If purge is set to true, the job -// is deregistered and purged from the system versus still being queryable and -// eventually GC'ed from the system. Most callers should not specify purge. -func (j *Jobs) Deregister(jobID string, purge bool, q *WriteOptions) (string, *WriteMeta, error) { - var resp JobDeregisterResponse - wm, err := j.client.delete(fmt.Sprintf("/v1/job/%v?purge=%t", url.PathEscape(jobID), purge), nil, &resp, q) - if err != nil { - return "", nil, err - } - return resp.EvalID, wm, nil -} - -// DeregisterOptions is used to pass through job deregistration parameters -type DeregisterOptions struct { - // If Purge is set to true, the job is deregistered and purged from the - // system versus still being queryable and eventually GC'ed from the - // system. Most callers should not specify purge. - Purge bool - - // If Global is set to true, all regions of a multiregion job will be - // stopped. - Global bool - - // EvalPriority is an optional priority to use on any evaluation created as - // a result on this job deregistration. This value must be between 1-100 - // inclusively, where a larger value corresponds to a higher priority. This - // is useful when an operator wishes to push through a job deregistration - // in busy clusters with a large evaluation backlog. - EvalPriority int - - // NoShutdownDelay, if set to true, will override the group and - // task shutdown_delay configuration and ignore the delay for any - // allocations stopped as a result of this Deregister call. - NoShutdownDelay bool -} - -// DeregisterOpts is used to remove an existing job. See DeregisterOptions -// for parameters. -func (j *Jobs) DeregisterOpts(jobID string, opts *DeregisterOptions, q *WriteOptions) (string, *WriteMeta, error) { - var resp JobDeregisterResponse - - // The base endpoint to add query params to. - endpoint := "/v1/job/" + url.PathEscape(jobID) - - // Protect against nil opts. url.Values expects a string, and so using - // fmt.Sprintf is the best way to do this. - if opts != nil { - endpoint += fmt.Sprintf("?purge=%t&global=%t&eval_priority=%v&no_shutdown_delay=%t", - opts.Purge, opts.Global, opts.EvalPriority, opts.NoShutdownDelay) - } - - wm, err := j.client.delete(endpoint, nil, &resp, q) - if err != nil { - return "", nil, err - } - return resp.EvalID, wm, nil -} - -// ForceEvaluate is used to force-evaluate an existing job. -func (j *Jobs) ForceEvaluate(jobID string, q *WriteOptions) (string, *WriteMeta, error) { - var resp JobRegisterResponse - wm, err := j.client.write("/v1/job/"+url.PathEscape(jobID)+"/evaluate", nil, &resp, q) - if err != nil { - return "", nil, err - } - return resp.EvalID, wm, nil -} - -// EvaluateWithOpts is used to force-evaluate an existing job and takes additional options -// for whether to force reschedule failed allocations -func (j *Jobs) EvaluateWithOpts(jobID string, opts EvalOptions, q *WriteOptions) (string, *WriteMeta, error) { - req := &JobEvaluateRequest{ - JobID: jobID, - EvalOptions: opts, - } - - var resp JobRegisterResponse - wm, err := j.client.write("/v1/job/"+url.PathEscape(jobID)+"/evaluate", req, &resp, q) - if err != nil { - return "", nil, err - } - return resp.EvalID, wm, nil -} - -// PeriodicForce spawns a new instance of the periodic job and returns the eval ID -func (j *Jobs) PeriodicForce(jobID string, q *WriteOptions) (string, *WriteMeta, error) { - var resp periodicForceResponse - wm, err := j.client.write("/v1/job/"+url.PathEscape(jobID)+"/periodic/force", nil, &resp, q) - if err != nil { - return "", nil, err - } - return resp.EvalID, wm, nil -} - -// PlanOptions is used to pass through job planning parameters -type PlanOptions struct { - Diff bool - PolicyOverride bool -} - -func (j *Jobs) Plan(job *Job, diff bool, q *WriteOptions) (*JobPlanResponse, *WriteMeta, error) { - opts := PlanOptions{Diff: diff} - return j.PlanOpts(job, &opts, q) -} - -func (j *Jobs) PlanOpts(job *Job, opts *PlanOptions, q *WriteOptions) (*JobPlanResponse, *WriteMeta, error) { - if job == nil { - return nil, nil, errors.New("must pass non-nil job") - } - - // Setup the request - req := &JobPlanRequest{ - Job: job, - } - if opts != nil { - req.Diff = opts.Diff - req.PolicyOverride = opts.PolicyOverride - } - - var resp JobPlanResponse - wm, err := j.client.write("/v1/job/"+url.PathEscape(*job.ID)+"/plan", req, &resp, q) - if err != nil { - return nil, nil, err - } - return &resp, wm, nil -} - -func (j *Jobs) Summary(jobID string, q *QueryOptions) (*JobSummary, *QueryMeta, error) { - var resp JobSummary - qm, err := j.client.query("/v1/job/"+url.PathEscape(jobID)+"/summary", &resp, q) - if err != nil { - return nil, nil, err - } - return &resp, qm, nil -} - -func (j *Jobs) Dispatch(jobID string, meta map[string]string, - payload []byte, q *WriteOptions) (*JobDispatchResponse, *WriteMeta, error) { - var resp JobDispatchResponse - req := &JobDispatchRequest{ - JobID: jobID, - Meta: meta, - Payload: payload, - } - wm, err := j.client.write("/v1/job/"+url.PathEscape(jobID)+"/dispatch", req, &resp, q) - if err != nil { - return nil, nil, err - } - return &resp, wm, nil -} - -// Revert is used to revert the given job to the passed version. If -// enforceVersion is set, the job is only reverted if the current version is at -// the passed version. -func (j *Jobs) Revert(jobID string, version uint64, enforcePriorVersion *uint64, - q *WriteOptions, consulToken, vaultToken string) (*JobRegisterResponse, *WriteMeta, error) { - - var resp JobRegisterResponse - req := &JobRevertRequest{ - JobID: jobID, - JobVersion: version, - EnforcePriorVersion: enforcePriorVersion, - ConsulToken: consulToken, - VaultToken: vaultToken, - } - wm, err := j.client.write("/v1/job/"+url.PathEscape(jobID)+"/revert", req, &resp, q) - if err != nil { - return nil, nil, err - } - return &resp, wm, nil -} - -// Stable is used to mark a job version's stability. -func (j *Jobs) Stable(jobID string, version uint64, stable bool, - q *WriteOptions) (*JobStabilityResponse, *WriteMeta, error) { - - var resp JobStabilityResponse - req := &JobStabilityRequest{ - JobID: jobID, - JobVersion: version, - Stable: stable, - } - wm, err := j.client.write("/v1/job/"+url.PathEscape(jobID)+"/stable", req, &resp, q) - if err != nil { - return nil, nil, err - } - return &resp, wm, nil -} - -// Services is used to return a list of service registrations associated to the -// specified jobID. -func (j *Jobs) Services(jobID string, q *QueryOptions) ([]*ServiceRegistration, *QueryMeta, error) { - var resp []*ServiceRegistration - qm, err := j.client.query("/v1/job/"+jobID+"/services", &resp, q) - return resp, qm, err -} - -// periodicForceResponse is used to deserialize a force response -type periodicForceResponse struct { - EvalID string -} - -// UpdateStrategy defines a task groups update strategy. -type UpdateStrategy struct { - Stagger *time.Duration `mapstructure:"stagger" hcl:"stagger,optional"` - MaxParallel *int `mapstructure:"max_parallel" hcl:"max_parallel,optional"` - HealthCheck *string `mapstructure:"health_check" hcl:"health_check,optional"` - MinHealthyTime *time.Duration `mapstructure:"min_healthy_time" hcl:"min_healthy_time,optional"` - HealthyDeadline *time.Duration `mapstructure:"healthy_deadline" hcl:"healthy_deadline,optional"` - ProgressDeadline *time.Duration `mapstructure:"progress_deadline" hcl:"progress_deadline,optional"` - Canary *int `mapstructure:"canary" hcl:"canary,optional"` - AutoRevert *bool `mapstructure:"auto_revert" hcl:"auto_revert,optional"` - AutoPromote *bool `mapstructure:"auto_promote" hcl:"auto_promote,optional"` -} - -// DefaultUpdateStrategy provides a baseline that can be used to upgrade -// jobs with the old policy or for populating field defaults. -func DefaultUpdateStrategy() *UpdateStrategy { - return &UpdateStrategy{ - Stagger: pointerOf(30 * time.Second), - MaxParallel: pointerOf(1), - HealthCheck: pointerOf("checks"), - MinHealthyTime: pointerOf(10 * time.Second), - HealthyDeadline: pointerOf(5 * time.Minute), - ProgressDeadline: pointerOf(10 * time.Minute), - AutoRevert: pointerOf(false), - Canary: pointerOf(0), - AutoPromote: pointerOf(false), - } -} - -func (u *UpdateStrategy) Copy() *UpdateStrategy { - if u == nil { - return nil - } - - copy := new(UpdateStrategy) - - if u.Stagger != nil { - copy.Stagger = pointerOf(*u.Stagger) - } - - if u.MaxParallel != nil { - copy.MaxParallel = pointerOf(*u.MaxParallel) - } - - if u.HealthCheck != nil { - copy.HealthCheck = pointerOf(*u.HealthCheck) - } - - if u.MinHealthyTime != nil { - copy.MinHealthyTime = pointerOf(*u.MinHealthyTime) - } - - if u.HealthyDeadline != nil { - copy.HealthyDeadline = pointerOf(*u.HealthyDeadline) - } - - if u.ProgressDeadline != nil { - copy.ProgressDeadline = pointerOf(*u.ProgressDeadline) - } - - if u.AutoRevert != nil { - copy.AutoRevert = pointerOf(*u.AutoRevert) - } - - if u.Canary != nil { - copy.Canary = pointerOf(*u.Canary) - } - - if u.AutoPromote != nil { - copy.AutoPromote = pointerOf(*u.AutoPromote) - } - - return copy -} - -func (u *UpdateStrategy) Merge(o *UpdateStrategy) { - if o == nil { - return - } - - if o.Stagger != nil { - u.Stagger = pointerOf(*o.Stagger) - } - - if o.MaxParallel != nil { - u.MaxParallel = pointerOf(*o.MaxParallel) - } - - if o.HealthCheck != nil { - u.HealthCheck = pointerOf(*o.HealthCheck) - } - - if o.MinHealthyTime != nil { - u.MinHealthyTime = pointerOf(*o.MinHealthyTime) - } - - if o.HealthyDeadline != nil { - u.HealthyDeadline = pointerOf(*o.HealthyDeadline) - } - - if o.ProgressDeadline != nil { - u.ProgressDeadline = pointerOf(*o.ProgressDeadline) - } - - if o.AutoRevert != nil { - u.AutoRevert = pointerOf(*o.AutoRevert) - } - - if o.Canary != nil { - u.Canary = pointerOf(*o.Canary) - } - - if o.AutoPromote != nil { - u.AutoPromote = pointerOf(*o.AutoPromote) - } -} - -func (u *UpdateStrategy) Canonicalize() { - d := DefaultUpdateStrategy() - - if u.MaxParallel == nil { - u.MaxParallel = d.MaxParallel - } - - if u.Stagger == nil { - u.Stagger = d.Stagger - } - - if u.HealthCheck == nil { - u.HealthCheck = d.HealthCheck - } - - if u.HealthyDeadline == nil { - u.HealthyDeadline = d.HealthyDeadline - } - - if u.ProgressDeadline == nil { - u.ProgressDeadline = d.ProgressDeadline - } - - if u.MinHealthyTime == nil { - u.MinHealthyTime = d.MinHealthyTime - } - - if u.AutoRevert == nil { - u.AutoRevert = d.AutoRevert - } - - if u.Canary == nil { - u.Canary = d.Canary - } - - if u.AutoPromote == nil { - u.AutoPromote = d.AutoPromote - } -} - -// Empty returns whether the UpdateStrategy is empty or has user defined values. -func (u *UpdateStrategy) Empty() bool { - if u == nil { - return true - } - - if u.Stagger != nil && *u.Stagger != 0 { - return false - } - - if u.MaxParallel != nil && *u.MaxParallel != 0 { - return false - } - - if u.HealthCheck != nil && *u.HealthCheck != "" { - return false - } - - if u.MinHealthyTime != nil && *u.MinHealthyTime != 0 { - return false - } - - if u.HealthyDeadline != nil && *u.HealthyDeadline != 0 { - return false - } - - if u.ProgressDeadline != nil && *u.ProgressDeadline != 0 { - return false - } - - if u.AutoRevert != nil && *u.AutoRevert { - return false - } - - if u.AutoPromote != nil && *u.AutoPromote { - return false - } - - if u.Canary != nil && *u.Canary != 0 { - return false - } - - return true -} - -type Multiregion struct { - Strategy *MultiregionStrategy `hcl:"strategy,block"` - Regions []*MultiregionRegion `hcl:"region,block"` -} - -func (m *Multiregion) Canonicalize() { - if m.Strategy == nil { - m.Strategy = &MultiregionStrategy{ - MaxParallel: pointerOf(0), - OnFailure: pointerOf(""), - } - } else { - if m.Strategy.MaxParallel == nil { - m.Strategy.MaxParallel = pointerOf(0) - } - if m.Strategy.OnFailure == nil { - m.Strategy.OnFailure = pointerOf("") - } - } - if m.Regions == nil { - m.Regions = []*MultiregionRegion{} - } - for _, region := range m.Regions { - if region.Count == nil { - region.Count = pointerOf(1) - } - if region.Datacenters == nil { - region.Datacenters = []string{} - } - if region.Meta == nil { - region.Meta = map[string]string{} - } - } -} - -func (m *Multiregion) Copy() *Multiregion { - if m == nil { - return nil - } - copy := new(Multiregion) - if m.Strategy != nil { - copy.Strategy = new(MultiregionStrategy) - copy.Strategy.MaxParallel = pointerOf(*m.Strategy.MaxParallel) - copy.Strategy.OnFailure = pointerOf(*m.Strategy.OnFailure) - } - for _, region := range m.Regions { - copyRegion := new(MultiregionRegion) - copyRegion.Name = region.Name - copyRegion.Count = pointerOf(*region.Count) - copyRegion.Datacenters = append(copyRegion.Datacenters, region.Datacenters...) - for k, v := range region.Meta { - copyRegion.Meta[k] = v - } - copy.Regions = append(copy.Regions, copyRegion) - } - return copy -} - -type MultiregionStrategy struct { - MaxParallel *int `mapstructure:"max_parallel" hcl:"max_parallel,optional"` - OnFailure *string `mapstructure:"on_failure" hcl:"on_failure,optional"` -} - -type MultiregionRegion struct { - Name string `hcl:",label"` - Count *int `hcl:"count,optional"` - Datacenters []string `hcl:"datacenters,optional"` - Meta map[string]string `hcl:"meta,block"` -} - -// PeriodicConfig is for serializing periodic config for a job. -type PeriodicConfig struct { - Enabled *bool `hcl:"enabled,optional"` - Spec *string `hcl:"cron,optional"` - SpecType *string - ProhibitOverlap *bool `mapstructure:"prohibit_overlap" hcl:"prohibit_overlap,optional"` - TimeZone *string `mapstructure:"time_zone" hcl:"time_zone,optional"` -} - -func (p *PeriodicConfig) Canonicalize() { - if p.Enabled == nil { - p.Enabled = pointerOf(true) - } - if p.Spec == nil { - p.Spec = pointerOf("") - } - if p.SpecType == nil { - p.SpecType = pointerOf(PeriodicSpecCron) - } - if p.ProhibitOverlap == nil { - p.ProhibitOverlap = pointerOf(false) - } - if p.TimeZone == nil || *p.TimeZone == "" { - p.TimeZone = pointerOf("UTC") - } -} - -// Next returns the closest time instant matching the spec that is after the -// passed time. If no matching instance exists, the zero value of time.Time is -// returned. The `time.Location` of the returned value matches that of the -// passed time. -func (p *PeriodicConfig) Next(fromTime time.Time) (time.Time, error) { - if *p.SpecType == PeriodicSpecCron { - e, err := cronexpr.Parse(*p.Spec) - if err != nil { - return time.Time{}, fmt.Errorf("failed parsing cron expression %q: %v", *p.Spec, err) - } - return cronParseNext(e, fromTime, *p.Spec) - } - - return time.Time{}, nil -} - -// cronParseNext is a helper that parses the next time for the given expression -// but captures any panic that may occur in the underlying library. -// --- THIS FUNCTION IS REPLICATED IN nomad/structs/structs.go -// and should be kept in sync. -func cronParseNext(e *cronexpr.Expression, fromTime time.Time, spec string) (t time.Time, err error) { - defer func() { - if recover() != nil { - t = time.Time{} - err = fmt.Errorf("failed parsing cron expression: %q", spec) - } - }() - - return e.Next(fromTime), nil -} - -func (p *PeriodicConfig) GetLocation() (*time.Location, error) { - if p.TimeZone == nil || *p.TimeZone == "" { - return time.UTC, nil - } - - return time.LoadLocation(*p.TimeZone) -} - -// ParameterizedJobConfig is used to configure the parameterized job. -type ParameterizedJobConfig struct { - Payload string `hcl:"payload,optional"` - MetaRequired []string `mapstructure:"meta_required" hcl:"meta_required,optional"` - MetaOptional []string `mapstructure:"meta_optional" hcl:"meta_optional,optional"` -} - -// Job is used to serialize a job. -type Job struct { - /* Fields parsed from HCL config */ - - Region *string `hcl:"region,optional"` - Namespace *string `hcl:"namespace,optional"` - ID *string `hcl:"id,optional"` - Name *string `hcl:"name,optional"` - Type *string `hcl:"type,optional"` - Priority *int `hcl:"priority,optional"` - AllAtOnce *bool `mapstructure:"all_at_once" hcl:"all_at_once,optional"` - Datacenters []string `hcl:"datacenters,optional"` - Constraints []*Constraint `hcl:"constraint,block"` - Affinities []*Affinity `hcl:"affinity,block"` - TaskGroups []*TaskGroup `hcl:"group,block"` - Update *UpdateStrategy `hcl:"update,block"` - Multiregion *Multiregion `hcl:"multiregion,block"` - Spreads []*Spread `hcl:"spread,block"` - Periodic *PeriodicConfig `hcl:"periodic,block"` - ParameterizedJob *ParameterizedJobConfig `hcl:"parameterized,block"` - Reschedule *ReschedulePolicy `hcl:"reschedule,block"` - Migrate *MigrateStrategy `hcl:"migrate,block"` - Meta map[string]string `hcl:"meta,block"` - ConsulToken *string `mapstructure:"consul_token" hcl:"consul_token,optional"` - VaultToken *string `mapstructure:"vault_token" hcl:"vault_token,optional"` - - /* Fields set by server, not sourced from job config file */ - - Stop *bool - ParentID *string - Dispatched bool - DispatchIdempotencyToken *string - Payload []byte - ConsulNamespace *string `mapstructure:"consul_namespace"` - VaultNamespace *string `mapstructure:"vault_namespace"` - NomadTokenID *string `mapstructure:"nomad_token_id"` - Status *string - StatusDescription *string - Stable *bool - Version *uint64 - SubmitTime *int64 - CreateIndex *uint64 - ModifyIndex *uint64 - JobModifyIndex *uint64 -} - -// IsPeriodic returns whether a job is periodic. -func (j *Job) IsPeriodic() bool { - return j.Periodic != nil -} - -// IsParameterized returns whether a job is parameterized job. -func (j *Job) IsParameterized() bool { - return j.ParameterizedJob != nil && !j.Dispatched -} - -// IsMultiregion returns whether a job is a multiregion job -func (j *Job) IsMultiregion() bool { - return j.Multiregion != nil && j.Multiregion.Regions != nil && len(j.Multiregion.Regions) > 0 -} - -func (j *Job) Canonicalize() { - if j.ID == nil { - j.ID = pointerOf("") - } - if j.Name == nil { - j.Name = pointerOf(*j.ID) - } - if j.ParentID == nil { - j.ParentID = pointerOf("") - } - if j.Namespace == nil { - j.Namespace = pointerOf(DefaultNamespace) - } - if j.Priority == nil { - j.Priority = pointerOf(50) - } - if j.Stop == nil { - j.Stop = pointerOf(false) - } - if j.Region == nil { - j.Region = pointerOf(GlobalRegion) - } - if j.Namespace == nil { - j.Namespace = pointerOf("default") - } - if j.Type == nil { - j.Type = pointerOf("service") - } - if j.AllAtOnce == nil { - j.AllAtOnce = pointerOf(false) - } - if j.ConsulToken == nil { - j.ConsulToken = pointerOf("") - } - if j.ConsulNamespace == nil { - j.ConsulNamespace = pointerOf("") - } - if j.VaultToken == nil { - j.VaultToken = pointerOf("") - } - if j.VaultNamespace == nil { - j.VaultNamespace = pointerOf("") - } - if j.NomadTokenID == nil { - j.NomadTokenID = pointerOf("") - } - if j.Status == nil { - j.Status = pointerOf("") - } - if j.StatusDescription == nil { - j.StatusDescription = pointerOf("") - } - if j.Stable == nil { - j.Stable = pointerOf(false) - } - if j.Version == nil { - j.Version = pointerOf(uint64(0)) - } - if j.CreateIndex == nil { - j.CreateIndex = pointerOf(uint64(0)) - } - if j.ModifyIndex == nil { - j.ModifyIndex = pointerOf(uint64(0)) - } - if j.JobModifyIndex == nil { - j.JobModifyIndex = pointerOf(uint64(0)) - } - if j.Periodic != nil { - j.Periodic.Canonicalize() - } - if j.Update != nil { - j.Update.Canonicalize() - } else if *j.Type == JobTypeService { - j.Update = DefaultUpdateStrategy() - } - if j.Multiregion != nil { - j.Multiregion.Canonicalize() - } - - for _, tg := range j.TaskGroups { - tg.Canonicalize(j) - } - - for _, spread := range j.Spreads { - spread.Canonicalize() - } - for _, a := range j.Affinities { - a.Canonicalize() - } -} - -// LookupTaskGroup finds a task group by name -func (j *Job) LookupTaskGroup(name string) *TaskGroup { - for _, tg := range j.TaskGroups { - if *tg.Name == name { - return tg - } - } - return nil -} - -// JobSummary summarizes the state of the allocations of a job -type JobSummary struct { - JobID string - Namespace string - Summary map[string]TaskGroupSummary - Children *JobChildrenSummary - - // Raft Indexes - CreateIndex uint64 - ModifyIndex uint64 -} - -// JobChildrenSummary contains the summary of children job status -type JobChildrenSummary struct { - Pending int64 - Running int64 - Dead int64 -} - -func (jc *JobChildrenSummary) Sum() int { - if jc == nil { - return 0 - } - - return int(jc.Pending + jc.Running + jc.Dead) -} - -// TaskGroup summarizes the state of all the allocations of a particular -// TaskGroup -type TaskGroupSummary struct { - Queued int - Complete int - Failed int - Running int - Starting int - Lost int - Unknown int -} - -// JobListStub is used to return a subset of information about -// jobs during list operations. -type JobListStub struct { - ID string - ParentID string - Name string - Namespace string `json:",omitempty"` - Datacenters []string - Type string - Priority int - Periodic bool - ParameterizedJob bool - Stop bool - Status string - StatusDescription string - JobSummary *JobSummary - CreateIndex uint64 - ModifyIndex uint64 - JobModifyIndex uint64 - SubmitTime int64 -} - -// JobIDSort is used to sort jobs by their job ID's. -type JobIDSort []*JobListStub - -func (j JobIDSort) Len() int { - return len(j) -} - -func (j JobIDSort) Less(a, b int) bool { - return j[a].ID < j[b].ID -} - -func (j JobIDSort) Swap(a, b int) { - j[a], j[b] = j[b], j[a] -} - -// NewServiceJob creates and returns a new service-style job -// for long-lived processes using the provided name, ID, and -// relative job priority. -func NewServiceJob(id, name, region string, pri int) *Job { - return newJob(id, name, region, JobTypeService, pri) -} - -// NewBatchJob creates and returns a new batch-style job for -// short-lived processes using the provided name and ID along -// with the relative job priority. -func NewBatchJob(id, name, region string, pri int) *Job { - return newJob(id, name, region, JobTypeBatch, pri) -} - -// NewSystemJob creates and returns a new system-style job for processes -// designed to run on all clients, using the provided name and ID along with -// the relative job priority. -func NewSystemJob(id, name, region string, pri int) *Job { - return newJob(id, name, region, JobTypeSystem, pri) -} - -// NewSysbatchJob creates and returns a new sysbatch-style job for short-lived -// processes designed to run on all clients, using the provided name and ID -// along with the relative job priority. -func NewSysbatchJob(id, name, region string, pri int) *Job { - return newJob(id, name, region, JobTypeSysbatch, pri) -} - -// newJob is used to create a new Job struct. -func newJob(id, name, region, typ string, pri int) *Job { - return &Job{ - Region: ®ion, - ID: &id, - Name: &name, - Type: &typ, - Priority: &pri, - } -} - -// SetMeta is used to set arbitrary k/v pairs of metadata on a job. -func (j *Job) SetMeta(key, val string) *Job { - if j.Meta == nil { - j.Meta = make(map[string]string) - } - j.Meta[key] = val - return j -} - -// AddDatacenter is used to add a datacenter to a job. -func (j *Job) AddDatacenter(dc string) *Job { - j.Datacenters = append(j.Datacenters, dc) - return j -} - -// Constrain is used to add a constraint to a job. -func (j *Job) Constrain(c *Constraint) *Job { - j.Constraints = append(j.Constraints, c) - return j -} - -// AddAffinity is used to add an affinity to a job. -func (j *Job) AddAffinity(a *Affinity) *Job { - j.Affinities = append(j.Affinities, a) - return j -} - -// AddTaskGroup adds a task group to an existing job. -func (j *Job) AddTaskGroup(grp *TaskGroup) *Job { - j.TaskGroups = append(j.TaskGroups, grp) - return j -} - -// AddPeriodicConfig adds a periodic config to an existing job. -func (j *Job) AddPeriodicConfig(cfg *PeriodicConfig) *Job { - j.Periodic = cfg - return j -} - -func (j *Job) AddSpread(s *Spread) *Job { - j.Spreads = append(j.Spreads, s) - return j -} - -type WriteRequest struct { - // The target region for this write - Region string - - // Namespace is the target namespace for this write - Namespace string - - // SecretID is the secret ID of an ACL token - SecretID string -} - -// JobValidateRequest is used to validate a job -type JobValidateRequest struct { - Job *Job - WriteRequest -} - -// JobValidateResponse is the response from validate request -type JobValidateResponse struct { - // DriverConfigValidated indicates whether the agent validated the driver - // config - DriverConfigValidated bool - - // ValidationErrors is a list of validation errors - ValidationErrors []string - - // Error is a string version of any error that may have occurred - Error string - - // Warnings contains any warnings about the given job. These may include - // deprecation warnings. - Warnings string -} - -// JobRevertRequest is used to revert a job to a prior version. -type JobRevertRequest struct { - // JobID is the ID of the job being reverted - JobID string - - // JobVersion the version to revert to. - JobVersion uint64 - - // EnforcePriorVersion if set will enforce that the job is at the given - // version before reverting. - EnforcePriorVersion *uint64 - - // ConsulToken is the Consul token that proves the submitter of the job revert - // has access to the Service Identity policies associated with the job's - // Consul Connect enabled services. This field is only used to transfer the - // token and is not stored after the Job revert. - ConsulToken string `json:",omitempty"` - - // VaultToken is the Vault token that proves the submitter of the job revert - // has access to any Vault policies specified in the targeted job version. This - // field is only used to authorize the revert and is not stored after the Job - // revert. - VaultToken string `json:",omitempty"` - - WriteRequest -} - -// JobRegisterRequest is used to update a job -type JobRegisterRequest struct { - Job *Job - // If EnforceIndex is set then the job will only be registered if the passed - // JobModifyIndex matches the current Jobs index. If the index is zero, the - // register only occurs if the job is new. - EnforceIndex bool `json:",omitempty"` - JobModifyIndex uint64 `json:",omitempty"` - PolicyOverride bool `json:",omitempty"` - PreserveCounts bool `json:",omitempty"` - - // EvalPriority is an optional priority to use on any evaluation created as - // a result on this job registration. This value must be between 1-100 - // inclusively, where a larger value corresponds to a higher priority. This - // is useful when an operator wishes to push through a job registration in - // busy clusters with a large evaluation backlog. This avoids needing to - // change the job priority which also impacts preemption. - EvalPriority int `json:",omitempty"` - - WriteRequest -} - -// JobRegisterResponse is used to respond to a job registration -type JobRegisterResponse struct { - EvalID string - EvalCreateIndex uint64 - JobModifyIndex uint64 - - // Warnings contains any warnings about the given job. These may include - // deprecation warnings. - Warnings string - - QueryMeta -} - -// JobDeregisterResponse is used to respond to a job deregistration -type JobDeregisterResponse struct { - EvalID string - EvalCreateIndex uint64 - JobModifyIndex uint64 - QueryMeta -} - -type JobPlanRequest struct { - Job *Job - Diff bool - PolicyOverride bool - WriteRequest -} - -type JobPlanResponse struct { - JobModifyIndex uint64 - CreatedEvals []*Evaluation - Diff *JobDiff - Annotations *PlanAnnotations - FailedTGAllocs map[string]*AllocationMetric - NextPeriodicLaunch time.Time - - // Warnings contains any warnings about the given job. These may include - // deprecation warnings. - Warnings string -} - -type JobDiff struct { - Type string - ID string - Fields []*FieldDiff - Objects []*ObjectDiff - TaskGroups []*TaskGroupDiff -} - -type TaskGroupDiff struct { - Type string - Name string - Fields []*FieldDiff - Objects []*ObjectDiff - Tasks []*TaskDiff - Updates map[string]uint64 -} - -type TaskDiff struct { - Type string - Name string - Fields []*FieldDiff - Objects []*ObjectDiff - Annotations []string -} - -type FieldDiff struct { - Type string - Name string - Old, New string - Annotations []string -} - -type ObjectDiff struct { - Type string - Name string - Fields []*FieldDiff - Objects []*ObjectDiff -} - -type PlanAnnotations struct { - DesiredTGUpdates map[string]*DesiredUpdates - PreemptedAllocs []*AllocationListStub -} - -type DesiredUpdates struct { - Ignore uint64 - Place uint64 - Migrate uint64 - Stop uint64 - InPlaceUpdate uint64 - DestructiveUpdate uint64 - Canary uint64 - Preemptions uint64 -} - -type JobDispatchRequest struct { - JobID string - Payload []byte - Meta map[string]string -} - -type JobDispatchResponse struct { - DispatchedJobID string - EvalID string - EvalCreateIndex uint64 - JobCreateIndex uint64 - WriteMeta -} - -// JobVersionsResponse is used for a job get versions request -type JobVersionsResponse struct { - Versions []*Job - Diffs []*JobDiff - QueryMeta -} - -// JobStabilityRequest is used to marked a job as stable. -type JobStabilityRequest struct { - // Job to set the stability on - JobID string - JobVersion uint64 - - // Set the stability - Stable bool - WriteRequest -} - -// JobStabilityResponse is the response when marking a job as stable. -type JobStabilityResponse struct { - JobModifyIndex uint64 - WriteMeta -} - -// JobEvaluateRequest is used when we just need to re-evaluate a target job -type JobEvaluateRequest struct { - JobID string - EvalOptions EvalOptions - WriteRequest -} - -// EvalOptions is used to encapsulate options when forcing a job evaluation -type EvalOptions struct { - ForceReschedule bool -} diff --git a/vendor/github.com/hashicorp/nomad/api/keyring.go b/vendor/github.com/hashicorp/nomad/api/keyring.go deleted file mode 100644 index 3bb004f2..00000000 --- a/vendor/github.com/hashicorp/nomad/api/keyring.go +++ /dev/null @@ -1,88 +0,0 @@ -package api - -import ( - "fmt" - "net/url" -) - -// Keyring is used to access the Variables keyring. -type Keyring struct { - client *Client -} - -// Keyring returns a handle to the Keyring endpoint -func (c *Client) Keyring() *Keyring { - return &Keyring{client: c} -} - -// EncryptionAlgorithm chooses which algorithm is used for -// encrypting / decrypting entries with this key -type EncryptionAlgorithm string - -const ( - EncryptionAlgorithmAES256GCM EncryptionAlgorithm = "aes256-gcm" -) - -// RootKeyMeta is the metadata used to refer to a RootKey. -type RootKeyMeta struct { - KeyID string // UUID - Algorithm EncryptionAlgorithm - CreateTime int64 - CreateIndex uint64 - ModifyIndex uint64 - State RootKeyState -} - -// RootKeyState enum describes the lifecycle of a root key. -type RootKeyState string - -const ( - RootKeyStateInactive RootKeyState = "inactive" - RootKeyStateActive = "active" - RootKeyStateRekeying = "rekeying" - RootKeyStateDeprecated = "deprecated" -) - -// List lists all the keyring metadata -func (k *Keyring) List(q *QueryOptions) ([]*RootKeyMeta, *QueryMeta, error) { - var resp []*RootKeyMeta - qm, err := k.client.query("/v1/operator/keyring/keys", &resp, q) - if err != nil { - return nil, nil, err - } - return resp, qm, nil -} - -// Delete deletes a specific inactive key from the keyring -func (k *Keyring) Delete(opts *KeyringDeleteOptions, w *WriteOptions) (*WriteMeta, error) { - wm, err := k.client.delete(fmt.Sprintf("/v1/operator/keyring/key/%v", - url.PathEscape(opts.KeyID)), nil, nil, w) - return wm, err -} - -// KeyringDeleteOptions are parameters for the Delete API -type KeyringDeleteOptions struct { - KeyID string // UUID -} - -// Rotate requests a key rotation -func (k *Keyring) Rotate(opts *KeyringRotateOptions, w *WriteOptions) (*RootKeyMeta, *WriteMeta, error) { - qp := url.Values{} - if opts != nil { - if opts.Algorithm != "" { - qp.Set("algo", string(opts.Algorithm)) - } - if opts.Full { - qp.Set("full", "true") - } - } - resp := &struct{ Key *RootKeyMeta }{} - wm, err := k.client.write("/v1/operator/keyring/rotate?"+qp.Encode(), nil, resp, w) - return resp.Key, wm, err -} - -// KeyringRotateOptions are parameters for the Rotate API -type KeyringRotateOptions struct { - Full bool - Algorithm EncryptionAlgorithm -} diff --git a/vendor/github.com/hashicorp/nomad/api/namespace.go b/vendor/github.com/hashicorp/nomad/api/namespace.go deleted file mode 100644 index 3a21e224..00000000 --- a/vendor/github.com/hashicorp/nomad/api/namespace.go +++ /dev/null @@ -1,98 +0,0 @@ -package api - -import ( - "fmt" - "sort" -) - -// Namespaces is used to query the namespace endpoints. -type Namespaces struct { - client *Client -} - -// Namespaces returns a new handle on the namespaces. -func (c *Client) Namespaces() *Namespaces { - return &Namespaces{client: c} -} - -// List is used to dump all of the namespaces. -func (n *Namespaces) List(q *QueryOptions) ([]*Namespace, *QueryMeta, error) { - var resp []*Namespace - qm, err := n.client.query("/v1/namespaces", &resp, q) - if err != nil { - return nil, nil, err - } - sort.Sort(NamespaceIndexSort(resp)) - return resp, qm, nil -} - -// PrefixList is used to do a PrefixList search over namespaces -func (n *Namespaces) PrefixList(prefix string, q *QueryOptions) ([]*Namespace, *QueryMeta, error) { - if q == nil { - q = &QueryOptions{Prefix: prefix} - } else { - q.Prefix = prefix - } - - return n.List(q) -} - -// Info is used to query a single namespace by its name. -func (n *Namespaces) Info(name string, q *QueryOptions) (*Namespace, *QueryMeta, error) { - var resp Namespace - qm, err := n.client.query("/v1/namespace/"+name, &resp, q) - if err != nil { - return nil, nil, err - } - return &resp, qm, nil -} - -// Register is used to register a namespace. -func (n *Namespaces) Register(namespace *Namespace, q *WriteOptions) (*WriteMeta, error) { - wm, err := n.client.write("/v1/namespace", namespace, nil, q) - if err != nil { - return nil, err - } - return wm, nil -} - -// Delete is used to delete a namespace -func (n *Namespaces) Delete(namespace string, q *WriteOptions) (*WriteMeta, error) { - wm, err := n.client.delete(fmt.Sprintf("/v1/namespace/%s", namespace), nil, nil, q) - if err != nil { - return nil, err - } - return wm, nil -} - -// Namespace is used to serialize a namespace. -type Namespace struct { - Name string - Description string - Quota string - Capabilities *NamespaceCapabilities `hcl:"capabilities,block"` - Meta map[string]string - CreateIndex uint64 - ModifyIndex uint64 -} - -type NamespaceCapabilities struct { - EnabledTaskDrivers []string `hcl:"enabled_task_drivers"` - DisabledTaskDrivers []string `hcl:"disabled_task_drivers"` -} - -// NamespaceIndexSort is a wrapper to sort Namespaces by CreateIndex. We -// reverse the test so that we get the highest index first. -type NamespaceIndexSort []*Namespace - -func (n NamespaceIndexSort) Len() int { - return len(n) -} - -func (n NamespaceIndexSort) Less(i, j int) bool { - return n[i].CreateIndex > n[j].CreateIndex -} - -func (n NamespaceIndexSort) Swap(i, j int) { - n[i], n[j] = n[j], n[i] -} diff --git a/vendor/github.com/hashicorp/nomad/api/nodes.go b/vendor/github.com/hashicorp/nomad/api/nodes.go deleted file mode 100644 index e91a3894..00000000 --- a/vendor/github.com/hashicorp/nomad/api/nodes.go +++ /dev/null @@ -1,959 +0,0 @@ -package api - -import ( - "context" - "fmt" - "sort" - "strconv" - "time" -) - -const ( - NodeStatusInit = "initializing" - NodeStatusReady = "ready" - NodeStatusDown = "down" - - // NodeSchedulingEligible and Ineligible marks the node as eligible or not, - // respectively, for receiving allocations. This is orthogonal to the node - // status being ready. - NodeSchedulingEligible = "eligible" - NodeSchedulingIneligible = "ineligible" - - DrainStatusDraining DrainStatus = "draining" - DrainStatusComplete DrainStatus = "complete" - DrainStatusCanceled DrainStatus = "canceled" -) - -// Nodes is used to query node-related API endpoints -type Nodes struct { - client *Client -} - -// Nodes returns a handle on the node endpoints. -func (c *Client) Nodes() *Nodes { - return &Nodes{client: c} -} - -// List is used to list out all of the nodes -func (n *Nodes) List(q *QueryOptions) ([]*NodeListStub, *QueryMeta, error) { - var resp NodeIndexSort - qm, err := n.client.query("/v1/nodes", &resp, q) - if err != nil { - return nil, nil, err - } - sort.Sort(resp) - return resp, qm, nil -} - -func (n *Nodes) PrefixList(prefix string) ([]*NodeListStub, *QueryMeta, error) { - return n.List(&QueryOptions{Prefix: prefix}) -} - -func (n *Nodes) PrefixListOpts(prefix string, opts *QueryOptions) ([]*NodeListStub, *QueryMeta, error) { - if opts == nil { - opts = &QueryOptions{Prefix: prefix} - } else { - opts.Prefix = prefix - } - return n.List(opts) -} - -// Info is used to query a specific node by its ID. -func (n *Nodes) Info(nodeID string, q *QueryOptions) (*Node, *QueryMeta, error) { - var resp Node - qm, err := n.client.query("/v1/node/"+nodeID, &resp, q) - if err != nil { - return nil, nil, err - } - return &resp, qm, nil -} - -// NodeUpdateDrainRequest is used to update the drain specification for a node. -type NodeUpdateDrainRequest struct { - // NodeID is the node to update the drain specification for. - NodeID string - - // DrainSpec is the drain specification to set for the node. A nil DrainSpec - // will disable draining. - DrainSpec *DrainSpec - - // MarkEligible marks the node as eligible for scheduling if removing - // the drain strategy. - MarkEligible bool - - // Meta allows operators to specify metadata related to the drain operation - Meta map[string]string -} - -// NodeDrainUpdateResponse is used to respond to a node drain update -type NodeDrainUpdateResponse struct { - NodeModifyIndex uint64 - EvalIDs []string - EvalCreateIndex uint64 - WriteMeta -} - -// DrainOptions is used to pass through node drain parameters -type DrainOptions struct { - // DrainSpec contains the drain specification for the node. If non-nil, - // the node will be marked ineligible and begin/continue draining according - // to the provided drain spec. - // If nil, any existing drain operation will be canceled. - DrainSpec *DrainSpec - - // MarkEligible indicates whether the node should be marked as eligible when - // canceling a drain operation. - MarkEligible bool - - // Meta is metadata that is persisted in Node.LastDrain about this - // drain update. - Meta map[string]string -} - -// UpdateDrain is used to update the drain strategy for a given node. If -// markEligible is true and the drain is being removed, the node will be marked -// as having its scheduling being eligible -func (n *Nodes) UpdateDrain(nodeID string, spec *DrainSpec, markEligible bool, q *WriteOptions) (*NodeDrainUpdateResponse, error) { - resp, err := n.UpdateDrainOpts(nodeID, &DrainOptions{ - DrainSpec: spec, - MarkEligible: markEligible, - Meta: nil, - }, q) - return resp, err -} - -// UpdateDrainWithMeta is used to update the drain strategy for a given node. If -// markEligible is true and the drain is being removed, the node will be marked -// as having its scheduling being eligible -func (n *Nodes) UpdateDrainOpts(nodeID string, opts *DrainOptions, q *WriteOptions) (*NodeDrainUpdateResponse, - error) { - req := &NodeUpdateDrainRequest{ - NodeID: nodeID, - DrainSpec: opts.DrainSpec, - MarkEligible: opts.MarkEligible, - Meta: opts.Meta, - } - - var resp NodeDrainUpdateResponse - wm, err := n.client.write("/v1/node/"+nodeID+"/drain", req, &resp, q) - if err != nil { - return nil, err - } - resp.WriteMeta = *wm - return &resp, nil -} - -// MonitorMsgLevels represents the severity log level of a MonitorMessage. -type MonitorMsgLevel int - -const ( - MonitorMsgLevelNormal MonitorMsgLevel = 0 - MonitorMsgLevelInfo MonitorMsgLevel = 1 - MonitorMsgLevelWarn MonitorMsgLevel = 2 - MonitorMsgLevelError MonitorMsgLevel = 3 -) - -// MonitorMessage contains a message and log level. -type MonitorMessage struct { - Level MonitorMsgLevel - Message string -} - -// Messagef formats a new MonitorMessage. -func Messagef(lvl MonitorMsgLevel, msg string, args ...interface{}) *MonitorMessage { - return &MonitorMessage{ - Level: lvl, - Message: fmt.Sprintf(msg, args...), - } -} - -func (m *MonitorMessage) String() string { - return m.Message -} - -// MonitorDrain emits drain related events on the returned string channel. The -// channel will be closed when all allocations on the draining node have -// stopped, when an error occurs, or if the context is canceled. -func (n *Nodes) MonitorDrain(ctx context.Context, nodeID string, index uint64, ignoreSys bool) <-chan *MonitorMessage { - outCh := make(chan *MonitorMessage, 8) - nodeCh := make(chan *MonitorMessage, 1) - allocCh := make(chan *MonitorMessage, 8) - - // Multiplex node and alloc chans onto outCh. This goroutine closes - // outCh when other chans have been closed. - multiplexCtx, cancel := context.WithCancel(ctx) - go n.monitorDrainMultiplex(multiplexCtx, cancel, outCh, nodeCh, allocCh) - - // Monitor node for updates - go n.monitorDrainNode(multiplexCtx, nodeID, index, nodeCh) - - // Monitor allocs on node for updates - go n.monitorDrainAllocs(multiplexCtx, nodeID, ignoreSys, allocCh) - - return outCh -} - -// monitorDrainMultiplex multiplexes node and alloc updates onto the out chan. -// Closes out chan when either the context is canceled, both update chans are -// closed, or an error occurs. -func (n *Nodes) monitorDrainMultiplex(ctx context.Context, cancel func(), - outCh chan<- *MonitorMessage, nodeCh, allocCh <-chan *MonitorMessage) { - - defer cancel() - defer close(outCh) - - nodeOk := true - allocOk := true - var msg *MonitorMessage - for { - // If both chans have been closed, close the output chan - if !nodeOk && !allocOk { - return - } - - select { - case msg, nodeOk = <-nodeCh: - if !nodeOk { - // nil chan to prevent further recvs - nodeCh = nil - continue - } - - case msg, allocOk = <-allocCh: - if !allocOk { - // nil chan to prevent further recvs - allocCh = nil - continue - } - - case <-ctx.Done(): - return - } - - if msg == nil { - continue - } - - select { - case outCh <- msg: - case <-ctx.Done(): - return - } - - // Abort on error messages - if msg.Level == MonitorMsgLevelError { - return - } - } -} - -// monitorDrainNode emits node updates on nodeCh and closes the channel when -// the node has finished draining. -func (n *Nodes) monitorDrainNode(ctx context.Context, nodeID string, - index uint64, nodeCh chan<- *MonitorMessage) { - - defer close(nodeCh) - - var lastStrategy *DrainStrategy - q := QueryOptions{ - AllowStale: true, - WaitIndex: index, - } - for { - node, meta, err := n.Info(nodeID, &q) - if err != nil { - msg := Messagef(MonitorMsgLevelError, "Error monitoring node: %v", err) - select { - case nodeCh <- msg: - case <-ctx.Done(): - } - return - } - - if node.DrainStrategy == nil { - msg := Messagef(MonitorMsgLevelInfo, "Drain complete for node %s", nodeID) - select { - case nodeCh <- msg: - case <-ctx.Done(): - } - return - } - - if node.Status == NodeStatusDown { - msg := Messagef(MonitorMsgLevelWarn, "Node %q down", nodeID) - select { - case nodeCh <- msg: - case <-ctx.Done(): - } - } - - // DrainStrategy changed - if lastStrategy != nil && !node.DrainStrategy.Equal(lastStrategy) { - msg := Messagef(MonitorMsgLevelInfo, "Node %q drain updated: %s", nodeID, node.DrainStrategy) - select { - case nodeCh <- msg: - case <-ctx.Done(): - return - } - } - - lastStrategy = node.DrainStrategy - - // Drain still ongoing, update index and block for updates - q.WaitIndex = meta.LastIndex - } -} - -// monitorDrainAllocs emits alloc updates on allocCh and closes the channel -// when the node has finished draining. -func (n *Nodes) monitorDrainAllocs(ctx context.Context, nodeID string, ignoreSys bool, allocCh chan<- *MonitorMessage) { - defer close(allocCh) - - q := QueryOptions{AllowStale: true} - initial := make(map[string]*Allocation, 4) - - for { - allocs, meta, err := n.Allocations(nodeID, &q) - if err != nil { - msg := Messagef(MonitorMsgLevelError, "Error monitoring allocations: %v", err) - select { - case allocCh <- msg: - case <-ctx.Done(): - } - return - } - - q.WaitIndex = meta.LastIndex - - runningAllocs := 0 - for _, a := range allocs { - // Get previous version of alloc - orig, existing := initial[a.ID] - - // Update local alloc state - initial[a.ID] = a - - migrating := a.DesiredTransition.ShouldMigrate() - - var msg string - switch { - case !existing: - // Should only be possible if response - // from initial Allocations call was - // stale. No need to output - - case orig.ClientStatus != a.ClientStatus: - // Alloc status has changed; output - msg = fmt.Sprintf("status %s -> %s", orig.ClientStatus, a.ClientStatus) - - case migrating && !orig.DesiredTransition.ShouldMigrate(): - // Alloc was marked for migration - msg = "marked for migration" - - case migrating && (orig.DesiredStatus != a.DesiredStatus) && a.DesiredStatus == AllocDesiredStatusStop: - // Alloc has already been marked for migration and is now being stopped - msg = "draining" - } - - if msg != "" { - select { - case allocCh <- Messagef(MonitorMsgLevelNormal, "Alloc %q %s", a.ID, msg): - case <-ctx.Done(): - return - } - } - - // Ignore malformed allocs - if a.Job == nil || a.Job.Type == nil { - continue - } - - // Track how many allocs are still running - if ignoreSys && a.Job.Type != nil && *a.Job.Type == JobTypeSystem { - continue - } - - switch a.ClientStatus { - case AllocClientStatusPending, AllocClientStatusRunning: - runningAllocs++ - } - } - - // Exit if all allocs are terminal - if runningAllocs == 0 { - msg := Messagef(MonitorMsgLevelInfo, "All allocations on node %q have stopped", nodeID) - select { - case allocCh <- msg: - case <-ctx.Done(): - } - return - } - } -} - -// NodeUpdateEligibilityRequest is used to update the drain specification for a node. -type NodeUpdateEligibilityRequest struct { - // NodeID is the node to update the drain specification for. - NodeID string - Eligibility string -} - -// NodeEligibilityUpdateResponse is used to respond to a node eligibility update -type NodeEligibilityUpdateResponse struct { - NodeModifyIndex uint64 - EvalIDs []string - EvalCreateIndex uint64 - WriteMeta -} - -// ToggleEligibility is used to update the scheduling eligibility of the node -func (n *Nodes) ToggleEligibility(nodeID string, eligible bool, q *WriteOptions) (*NodeEligibilityUpdateResponse, error) { - e := NodeSchedulingEligible - if !eligible { - e = NodeSchedulingIneligible - } - - req := &NodeUpdateEligibilityRequest{ - NodeID: nodeID, - Eligibility: e, - } - - var resp NodeEligibilityUpdateResponse - wm, err := n.client.write("/v1/node/"+nodeID+"/eligibility", req, &resp, q) - if err != nil { - return nil, err - } - resp.WriteMeta = *wm - return &resp, nil -} - -// Allocations is used to return the allocations associated with a node. -func (n *Nodes) Allocations(nodeID string, q *QueryOptions) ([]*Allocation, *QueryMeta, error) { - var resp []*Allocation - qm, err := n.client.query("/v1/node/"+nodeID+"/allocations", &resp, q) - if err != nil { - return nil, nil, err - } - sort.Sort(AllocationSort(resp)) - return resp, qm, nil -} - -func (n *Nodes) CSIVolumes(nodeID string, q *QueryOptions) ([]*CSIVolumeListStub, error) { - var resp []*CSIVolumeListStub - path := fmt.Sprintf("/v1/volumes?type=csi&node_id=%s", nodeID) - if _, err := n.client.query(path, &resp, q); err != nil { - return nil, err - } - - return resp, nil -} - -// ForceEvaluate is used to force-evaluate an existing node. -func (n *Nodes) ForceEvaluate(nodeID string, q *WriteOptions) (string, *WriteMeta, error) { - var resp nodeEvalResponse - wm, err := n.client.write("/v1/node/"+nodeID+"/evaluate", nil, &resp, q) - if err != nil { - return "", nil, err - } - return resp.EvalID, wm, nil -} - -func (n *Nodes) Stats(nodeID string, q *QueryOptions) (*HostStats, error) { - var resp HostStats - path := fmt.Sprintf("/v1/client/stats?node_id=%s", nodeID) - if _, err := n.client.query(path, &resp, q); err != nil { - return nil, err - } - - return &resp, nil -} - -func (n *Nodes) GC(nodeID string, q *QueryOptions) error { - path := fmt.Sprintf("/v1/client/gc?node_id=%s", nodeID) - _, err := n.client.query(path, nil, q) - return err -} - -// TODO Add tests -func (n *Nodes) GcAlloc(allocID string, q *QueryOptions) error { - path := fmt.Sprintf("/v1/client/allocation/%s/gc", allocID) - _, err := n.client.query(path, nil, q) - return err -} - -// Purge removes a node from the system. Nodes can still re-join the cluster if -// they are alive. -func (n *Nodes) Purge(nodeID string, q *QueryOptions) (*NodePurgeResponse, *QueryMeta, error) { - var resp NodePurgeResponse - path := fmt.Sprintf("/v1/node/%s/purge", nodeID) - qm, err := n.client.putQuery(path, nil, &resp, q) - if err != nil { - return nil, nil, err - } - return &resp, qm, nil -} - -// NodePurgeResponse is used to deserialize a Purge response. -type NodePurgeResponse struct { - EvalIDs []string - EvalCreateIndex uint64 - NodeModifyIndex uint64 -} - -// DriverInfo is used to deserialize a DriverInfo entry -type DriverInfo struct { - Attributes map[string]string - Detected bool - Healthy bool - HealthDescription string - UpdateTime time.Time -} - -// HostVolumeInfo is used to return metadata about a given HostVolume. -type HostVolumeInfo struct { - Path string - ReadOnly bool -} - -//HostNetworkInfo is used to return metadata about a given HostNetwork -type HostNetworkInfo struct { - Name string - CIDR string - Interface string - ReservedPorts string -} - -type DrainStatus string - -// DrainMetadata contains information about the most recent drain operation for a given Node. -type DrainMetadata struct { - StartedAt time.Time - UpdatedAt time.Time - Status DrainStatus - AccessorID string - Meta map[string]string -} - -// Node is used to deserialize a node entry. -type Node struct { - ID string - Datacenter string - Name string - HTTPAddr string - TLSEnabled bool - Attributes map[string]string - Resources *Resources - Reserved *Resources - NodeResources *NodeResources - ReservedResources *NodeReservedResources - Links map[string]string - Meta map[string]string - NodeClass string - CgroupParent string - Drain bool - DrainStrategy *DrainStrategy - SchedulingEligibility string - Status string - StatusDescription string - StatusUpdatedAt int64 - Events []*NodeEvent - Drivers map[string]*DriverInfo - HostVolumes map[string]*HostVolumeInfo - HostNetworks map[string]*HostNetworkInfo - CSIControllerPlugins map[string]*CSIInfo - CSINodePlugins map[string]*CSIInfo - LastDrain *DrainMetadata - CreateIndex uint64 - ModifyIndex uint64 -} - -type NodeResources struct { - Cpu NodeCpuResources - Memory NodeMemoryResources - Disk NodeDiskResources - Networks []*NetworkResource - Devices []*NodeDeviceResource - - MinDynamicPort int - MaxDynamicPort int -} - -type NodeCpuResources struct { - CpuShares int64 - TotalCpuCores uint16 - ReservableCpuCores []uint16 -} - -type NodeMemoryResources struct { - MemoryMB int64 -} - -type NodeDiskResources struct { - DiskMB int64 -} - -type NodeReservedResources struct { - Cpu NodeReservedCpuResources - Memory NodeReservedMemoryResources - Disk NodeReservedDiskResources - Networks NodeReservedNetworkResources -} - -type NodeReservedCpuResources struct { - CpuShares uint64 -} - -type NodeReservedMemoryResources struct { - MemoryMB uint64 -} - -type NodeReservedDiskResources struct { - DiskMB uint64 -} - -type NodeReservedNetworkResources struct { - ReservedHostPorts string -} - -type CSITopologyRequest struct { - Required []*CSITopology `hcl:"required"` - Preferred []*CSITopology `hcl:"preferred"` -} - -type CSITopology struct { - Segments map[string]string `hcl:"segments"` -} - -// CSINodeInfo is the fingerprinted data from a CSI Plugin that is specific to -// the Node API. -type CSINodeInfo struct { - ID string - MaxVolumes int64 - AccessibleTopology *CSITopology - - // RequiresNodeStageVolume indicates whether the client should Stage/Unstage - // volumes on this node. - RequiresNodeStageVolume bool - - // SupportsStats indicates plugin support for GET_VOLUME_STATS - SupportsStats bool - - // SupportsExpand indicates plugin support for EXPAND_VOLUME - SupportsExpand bool - - // SupportsCondition indicates plugin support for VOLUME_CONDITION - SupportsCondition bool -} - -// CSIControllerInfo is the fingerprinted data from a CSI Plugin that is specific to -// the Controller API. -type CSIControllerInfo struct { - // SupportsCreateDelete indicates plugin support for CREATE_DELETE_VOLUME - SupportsCreateDelete bool - - // SupportsPublishVolume is true when the controller implements the - // methods required to attach and detach volumes. If this is false Nomad - // should skip the controller attachment flow. - SupportsAttachDetach bool - - // SupportsListVolumes is true when the controller implements the - // ListVolumes RPC. NOTE: This does not guarantee that attached nodes will - // be returned unless SupportsListVolumesAttachedNodes is also true. - SupportsListVolumes bool - - // SupportsGetCapacity indicates plugin support for GET_CAPACITY - SupportsGetCapacity bool - - // SupportsCreateDeleteSnapshot indicates plugin support for - // CREATE_DELETE_SNAPSHOT - SupportsCreateDeleteSnapshot bool - - // SupportsListSnapshots indicates plugin support for LIST_SNAPSHOTS - SupportsListSnapshots bool - - // SupportsClone indicates plugin support for CLONE_VOLUME - SupportsClone bool - - // SupportsReadOnlyAttach is set to true when the controller returns the - // ATTACH_READONLY capability. - SupportsReadOnlyAttach bool - - // SupportsExpand indicates plugin support for EXPAND_VOLUME - SupportsExpand bool - - // SupportsListVolumesAttachedNodes indicates whether the plugin will - // return attached nodes data when making ListVolume RPCs (plugin support - // for LIST_VOLUMES_PUBLISHED_NODES) - SupportsListVolumesAttachedNodes bool - - // SupportsCondition indicates plugin support for VOLUME_CONDITION - SupportsCondition bool - - // SupportsGet indicates plugin support for GET_VOLUME - SupportsGet bool -} - -// CSIInfo is the current state of a single CSI Plugin. This is updated regularly -// as plugin health changes on the node. -type CSIInfo struct { - PluginID string - AllocID string - Healthy bool - HealthDescription string - UpdateTime time.Time - RequiresControllerPlugin bool - RequiresTopologies bool - ControllerInfo *CSIControllerInfo `json:",omitempty"` - NodeInfo *CSINodeInfo `json:",omitempty"` -} - -// DrainStrategy describes a Node's drain behavior. -type DrainStrategy struct { - // DrainSpec is the user declared drain specification - DrainSpec - - // ForceDeadline is the deadline time for the drain after which drains will - // be forced - ForceDeadline time.Time - - // StartedAt is the time the drain process started - StartedAt time.Time -} - -// DrainSpec describes a Node's drain behavior. -type DrainSpec struct { - // Deadline is the duration after StartTime when the remaining - // allocations on a draining Node should be told to stop. - Deadline time.Duration - - // IgnoreSystemJobs allows systems jobs to remain on the node even though it - // has been marked for draining. - IgnoreSystemJobs bool -} - -func (d *DrainStrategy) Equal(o *DrainStrategy) bool { - if d == nil || o == nil { - return d == o - } - - if d.ForceDeadline != o.ForceDeadline { - return false - } - if d.Deadline != o.Deadline { - return false - } - if d.IgnoreSystemJobs != o.IgnoreSystemJobs { - return false - } - - return true -} - -// String returns a human readable version of the drain strategy. -func (d *DrainStrategy) String() string { - if d.IgnoreSystemJobs { - return fmt.Sprintf("drain ignoring system jobs and deadline at %s", d.ForceDeadline) - } - return fmt.Sprintf("drain with deadline at %s", d.ForceDeadline) -} - -const ( - NodeEventSubsystemDrain = "Drain" - NodeEventSubsystemDriver = "Driver" - NodeEventSubsystemHeartbeat = "Heartbeat" - NodeEventSubsystemCluster = "Cluster" -) - -// NodeEvent is a single unit representing a node’s state change -type NodeEvent struct { - Message string - Subsystem string - Details map[string]string - Timestamp time.Time - CreateIndex uint64 -} - -// HostStats represents resource usage stats of the host running a Nomad client -type HostStats struct { - Memory *HostMemoryStats - CPU []*HostCPUStats - DiskStats []*HostDiskStats - DeviceStats []*DeviceGroupStats - Uptime uint64 - CPUTicksConsumed float64 -} - -type HostMemoryStats struct { - Total uint64 - Available uint64 - Used uint64 - Free uint64 -} - -type HostCPUStats struct { - CPU string - User float64 - System float64 - Idle float64 -} - -type HostDiskStats struct { - Device string - Mountpoint string - Size uint64 - Used uint64 - Available uint64 - UsedPercent float64 - InodesUsedPercent float64 -} - -// DeviceGroupStats contains statistics for each device of a particular -// device group, identified by the vendor, type and name of the device. -type DeviceGroupStats struct { - Vendor string - Type string - Name string - - // InstanceStats is a mapping of each device ID to its statistics. - InstanceStats map[string]*DeviceStats -} - -// DeviceStats is the statistics for an individual device -type DeviceStats struct { - // Summary exposes a single summary metric that should be the most - // informative to users. - Summary *StatValue - - // Stats contains the verbose statistics for the device. - Stats *StatObject - - // Timestamp is the time the statistics were collected. - Timestamp time.Time -} - -// StatObject is a collection of statistics either exposed at the top -// level or via nested StatObjects. -type StatObject struct { - // Nested is a mapping of object name to a nested stats object. - Nested map[string]*StatObject - - // Attributes is a mapping of statistic name to its value. - Attributes map[string]*StatValue -} - -// StatValue exposes the values of a particular statistic. The value may be of -// type float, integer, string or boolean. Numeric types can be exposed as a -// single value or as a fraction. -type StatValue struct { - // FloatNumeratorVal exposes a floating point value. If denominator is set - // it is assumed to be a fractional value, otherwise it is a scalar. - FloatNumeratorVal *float64 `json:",omitempty"` - FloatDenominatorVal *float64 `json:",omitempty"` - - // IntNumeratorVal exposes a int value. If denominator is set it is assumed - // to be a fractional value, otherwise it is a scalar. - IntNumeratorVal *int64 `json:",omitempty"` - IntDenominatorVal *int64 `json:",omitempty"` - - // StringVal exposes a string value. These are likely annotations. - StringVal *string `json:",omitempty"` - - // BoolVal exposes a boolean statistic. - BoolVal *bool `json:",omitempty"` - - // Unit gives the unit type: °F, %, MHz, MB, etc. - Unit string `json:",omitempty"` - - // Desc provides a human readable description of the statistic. - Desc string `json:",omitempty"` -} - -func (v *StatValue) String() string { - switch { - case v == nil: - return "" - case v.BoolVal != nil: - return strconv.FormatBool(*v.BoolVal) - case v.StringVal != nil: - return *v.StringVal - case v.FloatNumeratorVal != nil: - str := formatFloat(*v.FloatNumeratorVal, 3) - if v.FloatDenominatorVal != nil { - str += " / " + formatFloat(*v.FloatDenominatorVal, 3) - } - - if v.Unit != "" { - str += " " + v.Unit - } - return str - case v.IntNumeratorVal != nil: - str := strconv.FormatInt(*v.IntNumeratorVal, 10) - if v.IntDenominatorVal != nil { - str += " / " + strconv.FormatInt(*v.IntDenominatorVal, 10) - } - - if v.Unit != "" { - str += " " + v.Unit - } - return str - default: - return "" - } -} - -// NodeListStub is a subset of information returned during -// node list operations. -type NodeListStub struct { - Address string - ID string - Attributes map[string]string `json:",omitempty"` - Datacenter string - Name string - NodeClass string - Version string - Drain bool - SchedulingEligibility string - Status string - StatusDescription string - Drivers map[string]*DriverInfo - NodeResources *NodeResources `json:",omitempty"` - ReservedResources *NodeReservedResources `json:",omitempty"` - LastDrain *DrainMetadata - CreateIndex uint64 - ModifyIndex uint64 -} - -// NodeIndexSort reverse sorts nodes by CreateIndex -type NodeIndexSort []*NodeListStub - -func (n NodeIndexSort) Len() int { - return len(n) -} - -func (n NodeIndexSort) Less(i, j int) bool { - return n[i].CreateIndex > n[j].CreateIndex -} - -func (n NodeIndexSort) Swap(i, j int) { - n[i], n[j] = n[j], n[i] -} - -// nodeEvalResponse is used to decode a force-eval. -type nodeEvalResponse struct { - EvalID string -} - -// AllocationSort reverse sorts allocs by CreateIndex. -type AllocationSort []*Allocation - -func (a AllocationSort) Len() int { - return len(a) -} - -func (a AllocationSort) Less(i, j int) bool { - return a[i].CreateIndex > a[j].CreateIndex -} - -func (a AllocationSort) Swap(i, j int) { - a[i], a[j] = a[j], a[i] -} diff --git a/vendor/github.com/hashicorp/nomad/api/operator.go b/vendor/github.com/hashicorp/nomad/api/operator.go deleted file mode 100644 index 8a3f74de..00000000 --- a/vendor/github.com/hashicorp/nomad/api/operator.go +++ /dev/null @@ -1,361 +0,0 @@ -package api - -import ( - "encoding/json" - "errors" - "fmt" - "io" - "io/ioutil" - "strconv" - "strings" - "time" -) - -// Operator can be used to perform low-level operator tasks for Nomad. -type Operator struct { - c *Client -} - -// Operator returns a handle to the operator endpoints. -func (c *Client) Operator() *Operator { - return &Operator{c} -} - -// RaftServer has information about a server in the Raft configuration. -type RaftServer struct { - // ID is the unique ID for the server. These are currently the same - // as the address, but they will be changed to a real GUID in a future - // release of Nomad. - ID string - - // Node is the node name of the server, as known by Nomad, or this - // will be set to "(unknown)" otherwise. - Node string - - // Address is the IP:port of the server, used for Raft communications. - Address string - - // Leader is true if this server is the current cluster leader. - Leader bool - - // Voter is true if this server has a vote in the cluster. This might - // be false if the server is staging and still coming online, or if - // it's a non-voting server, which will be added in a future release of - // Nomad. - Voter bool - - // RaftProtocol is the version of the Raft protocol spoken by this server. - RaftProtocol string -} - -// RaftConfiguration is returned when querying for the current Raft configuration. -type RaftConfiguration struct { - // Servers has the list of servers in the Raft configuration. - Servers []*RaftServer - - // Index has the Raft index of this configuration. - Index uint64 -} - -// RaftGetConfiguration is used to query the current Raft peer set. -func (op *Operator) RaftGetConfiguration(q *QueryOptions) (*RaftConfiguration, error) { - r, err := op.c.newRequest("GET", "/v1/operator/raft/configuration") - if err != nil { - return nil, err - } - r.setQueryOptions(q) - _, resp, err := requireOK(op.c.doRequest(r)) - if err != nil { - return nil, err - } - defer resp.Body.Close() - - var out RaftConfiguration - if err := decodeBody(resp, &out); err != nil { - return nil, err - } - return &out, nil -} - -// RaftRemovePeerByAddress is used to kick a stale peer (one that it in the Raft -// quorum but no longer known to Serf or the catalog) by address in the form of -// "IP:port". -func (op *Operator) RaftRemovePeerByAddress(address string, q *WriteOptions) error { - r, err := op.c.newRequest("DELETE", "/v1/operator/raft/peer") - if err != nil { - return err - } - r.setWriteOptions(q) - - r.params.Set("address", address) - - _, resp, err := requireOK(op.c.doRequest(r)) - if err != nil { - return err - } - - resp.Body.Close() - return nil -} - -// RaftRemovePeerByID is used to kick a stale peer (one that is in the Raft -// quorum but no longer known to Serf or the catalog) by ID. -func (op *Operator) RaftRemovePeerByID(id string, q *WriteOptions) error { - r, err := op.c.newRequest("DELETE", "/v1/operator/raft/peer") - if err != nil { - return err - } - r.setWriteOptions(q) - - r.params.Set("id", id) - - _, resp, err := requireOK(op.c.doRequest(r)) - if err != nil { - return err - } - - resp.Body.Close() - return nil -} - -// SchedulerConfiguration is the config for controlling scheduler behavior -type SchedulerConfiguration struct { - // SchedulerAlgorithm lets you select between available scheduling algorithms. - SchedulerAlgorithm SchedulerAlgorithm - - // PreemptionConfig specifies whether to enable eviction of lower - // priority jobs to place higher priority jobs. - PreemptionConfig PreemptionConfig - - // MemoryOversubscriptionEnabled specifies whether memory oversubscription is enabled - MemoryOversubscriptionEnabled bool - - // RejectJobRegistration disables new job registrations except with a - // management ACL token - RejectJobRegistration bool - - // PauseEvalBroker stops the leader evaluation broker process from running - // until the configuration is updated and written to the Nomad servers. - PauseEvalBroker bool - - // CreateIndex/ModifyIndex store the create/modify indexes of this configuration. - CreateIndex uint64 - ModifyIndex uint64 -} - -// SchedulerConfigurationResponse is the response object that wraps SchedulerConfiguration -type SchedulerConfigurationResponse struct { - // SchedulerConfig contains scheduler config options - SchedulerConfig *SchedulerConfiguration - - QueryMeta -} - -// SchedulerSetConfigurationResponse is the response object used -// when updating scheduler configuration -type SchedulerSetConfigurationResponse struct { - // Updated returns whether the config was actually updated - // Only set when the request uses CAS - Updated bool - - WriteMeta -} - -// SchedulerAlgorithm is an enum string that encapsulates the valid options for a -// SchedulerConfiguration stanza's SchedulerAlgorithm. These modes will allow the -// scheduler to be user-selectable. -type SchedulerAlgorithm string - -const ( - SchedulerAlgorithmBinpack SchedulerAlgorithm = "binpack" - SchedulerAlgorithmSpread SchedulerAlgorithm = "spread" -) - -// PreemptionConfig specifies whether preemption is enabled based on scheduler type -type PreemptionConfig struct { - SystemSchedulerEnabled bool - SysBatchSchedulerEnabled bool - BatchSchedulerEnabled bool - ServiceSchedulerEnabled bool -} - -// SchedulerGetConfiguration is used to query the current Scheduler configuration. -func (op *Operator) SchedulerGetConfiguration(q *QueryOptions) (*SchedulerConfigurationResponse, *QueryMeta, error) { - var resp SchedulerConfigurationResponse - qm, err := op.c.query("/v1/operator/scheduler/configuration", &resp, q) - if err != nil { - return nil, nil, err - } - return &resp, qm, nil -} - -// SchedulerSetConfiguration is used to set the current Scheduler configuration. -func (op *Operator) SchedulerSetConfiguration(conf *SchedulerConfiguration, q *WriteOptions) (*SchedulerSetConfigurationResponse, *WriteMeta, error) { - var out SchedulerSetConfigurationResponse - wm, err := op.c.write("/v1/operator/scheduler/configuration", conf, &out, q) - if err != nil { - return nil, nil, err - } - return &out, wm, nil -} - -// SchedulerCASConfiguration is used to perform a Check-And-Set update on the -// Scheduler configuration. The ModifyIndex value will be respected. Returns -// true on success or false on failures. -func (op *Operator) SchedulerCASConfiguration(conf *SchedulerConfiguration, q *WriteOptions) (*SchedulerSetConfigurationResponse, *WriteMeta, error) { - var out SchedulerSetConfigurationResponse - wm, err := op.c.write("/v1/operator/scheduler/configuration?cas="+strconv.FormatUint(conf.ModifyIndex, 10), conf, &out, q) - if err != nil { - return nil, nil, err - } - - return &out, wm, nil -} - -// Snapshot is used to capture a snapshot state of a running cluster. -// The returned reader that must be consumed fully -func (op *Operator) Snapshot(q *QueryOptions) (io.ReadCloser, error) { - r, err := op.c.newRequest("GET", "/v1/operator/snapshot") - if err != nil { - return nil, err - } - r.setQueryOptions(q) - _, resp, err := requireOK(op.c.doRequest(r)) - if err != nil { - return nil, err - } - - digest := resp.Header.Get("Digest") - - cr, err := newChecksumValidatingReader(resp.Body, digest) - if err != nil { - io.Copy(ioutil.Discard, resp.Body) - resp.Body.Close() - - return nil, err - } - - return cr, nil -} - -// SnapshotRestore is used to restore a running nomad cluster to an original -// state. -func (op *Operator) SnapshotRestore(in io.Reader, q *WriteOptions) (*WriteMeta, error) { - wm, err := op.c.write("/v1/operator/snapshot", in, nil, q) - if err != nil { - return nil, err - } - - return wm, nil -} - -type License struct { - // The unique identifier of the license - LicenseID string - - // The customer ID associated with the license - CustomerID string - - // If set, an identifier that should be used to lock the license to a - // particular site, cluster, etc. - InstallationID string - - // The time at which the license was issued - IssueTime time.Time - - // The time at which the license starts being valid - StartTime time.Time - - // The time after which the license expires - ExpirationTime time.Time - - // The time at which the license ceases to function and can - // no longer be used in any capacity - TerminationTime time.Time - - // The product the license is valid for - Product string - - // License Specific Flags - Flags map[string]interface{} - - // Modules is a list of the licensed enterprise modules - Modules []string - - // List of features enabled by the license - Features []string -} - -type LicenseReply struct { - License *License - ConfigOutdated bool - QueryMeta -} - -type ApplyLicenseOptions struct { - Force bool -} - -func (op *Operator) LicensePut(license string, q *WriteOptions) (*WriteMeta, error) { - return op.ApplyLicense(license, nil, q) -} - -func (op *Operator) ApplyLicense(license string, opts *ApplyLicenseOptions, q *WriteOptions) (*WriteMeta, error) { - r, err := op.c.newRequest("PUT", "/v1/operator/license") - if err != nil { - return nil, err - } - - if opts != nil && opts.Force { - r.params.Add("force", "true") - } - - r.setWriteOptions(q) - r.body = strings.NewReader(license) - - rtt, resp, err := requireOK(op.c.doRequest(r)) - if err != nil { - return nil, err - } - defer resp.Body.Close() - - wm := &WriteMeta{RequestTime: rtt} - parseWriteMeta(resp, wm) - - return wm, nil -} - -func (op *Operator) LicenseGet(q *QueryOptions) (*LicenseReply, *QueryMeta, error) { - req, err := op.c.newRequest("GET", "/v1/operator/license") - if err != nil { - return nil, nil, err - } - req.setQueryOptions(q) - - var reply LicenseReply - rtt, resp, err := op.c.doRequest(req) - if err != nil { - return nil, nil, err - } - defer resp.Body.Close() - - if resp.StatusCode == 204 { - return nil, nil, errors.New("Nomad Enterprise only endpoint") - } - - if resp.StatusCode != 200 { - body, _ := io.ReadAll(resp.Body) - return nil, nil, fmt.Errorf("Unexpected response code: %d (%s)", resp.StatusCode, body) - } - - err = json.NewDecoder(resp.Body).Decode(&reply) - if err != nil { - return nil, nil, err - } - - qm := &QueryMeta{} - parseQueryMeta(resp, qm) - qm.RequestTime = rtt - - return &reply, qm, nil -} diff --git a/vendor/github.com/hashicorp/nomad/api/operator_autopilot.go b/vendor/github.com/hashicorp/nomad/api/operator_autopilot.go deleted file mode 100644 index a44befa8..00000000 --- a/vendor/github.com/hashicorp/nomad/api/operator_autopilot.go +++ /dev/null @@ -1,222 +0,0 @@ -package api - -import ( - "encoding/json" - "strconv" - "time" -) - -// AutopilotConfiguration is used for querying/setting the Autopilot configuration. -// Autopilot helps manage operator tasks related to Nomad servers like removing -// failed servers from the Raft quorum. -type AutopilotConfiguration struct { - // CleanupDeadServers controls whether to remove dead servers from the Raft - // peer list when a new server joins - CleanupDeadServers bool - - // LastContactThreshold is the limit on the amount of time a server can go - // without leader contact before being considered unhealthy. - LastContactThreshold time.Duration - - // MaxTrailingLogs is the amount of entries in the Raft Log that a server can - // be behind before being considered unhealthy. - MaxTrailingLogs uint64 - - // MinQuorum sets the minimum number of servers allowed in a cluster before - // autopilot can prune dead servers. - MinQuorum uint - - // ServerStabilizationTime is the minimum amount of time a server must be - // in a stable, healthy state before it can be added to the cluster. Only - // applicable with Raft protocol version 3 or higher. - ServerStabilizationTime time.Duration - - // (Enterprise-only) EnableRedundancyZones specifies whether to enable redundancy zones. - EnableRedundancyZones bool - - // (Enterprise-only) DisableUpgradeMigration will disable Autopilot's upgrade migration - // strategy of waiting until enough newer-versioned servers have been added to the - // cluster before promoting them to voters. - DisableUpgradeMigration bool - - // (Enterprise-only) EnableCustomUpgrades specifies whether to enable using custom - // upgrade versions when performing migrations. - EnableCustomUpgrades bool - - // CreateIndex holds the index corresponding the creation of this configuration. - // This is a read-only field. - CreateIndex uint64 - - // ModifyIndex will be set to the index of the last update when retrieving the - // Autopilot configuration. Resubmitting a configuration with - // AutopilotCASConfiguration will perform a check-and-set operation which ensures - // there hasn't been a subsequent update since the configuration was retrieved. - ModifyIndex uint64 -} - -func (u *AutopilotConfiguration) MarshalJSON() ([]byte, error) { - type Alias AutopilotConfiguration - return json.Marshal(&struct { - LastContactThreshold string - ServerStabilizationTime string - *Alias - }{ - LastContactThreshold: u.LastContactThreshold.String(), - ServerStabilizationTime: u.ServerStabilizationTime.String(), - Alias: (*Alias)(u), - }) -} - -func (u *AutopilotConfiguration) UnmarshalJSON(data []byte) error { - type Alias AutopilotConfiguration - aux := &struct { - LastContactThreshold string - ServerStabilizationTime string - *Alias - }{ - Alias: (*Alias)(u), - } - if err := json.Unmarshal(data, &aux); err != nil { - return err - } - var err error - if aux.LastContactThreshold != "" { - if u.LastContactThreshold, err = time.ParseDuration(aux.LastContactThreshold); err != nil { - return err - } - } - if aux.ServerStabilizationTime != "" { - if u.ServerStabilizationTime, err = time.ParseDuration(aux.ServerStabilizationTime); err != nil { - return err - } - } - return nil -} - -// ServerHealth is the health (from the leader's point of view) of a server. -type ServerHealth struct { - // ID is the raft ID of the server. - ID string - - // Name is the node name of the server. - Name string - - // Address is the address of the server. - Address string - - // The status of the SerfHealth check for the server. - SerfStatus string - - // Version is the Nomad version of the server. - Version string - - // Leader is whether this server is currently the leader. - Leader bool - - // LastContact is the time since this node's last contact with the leader. - LastContact time.Duration - - // LastTerm is the highest leader term this server has a record of in its Raft log. - LastTerm uint64 - - // LastIndex is the last log index this server has a record of in its Raft log. - LastIndex uint64 - - // Healthy is whether or not the server is healthy according to the current - // Autopilot config. - Healthy bool - - // Voter is whether this is a voting server. - Voter bool - - // StableSince is the last time this server's Healthy value changed. - StableSince time.Time -} - -func (u *ServerHealth) MarshalJSON() ([]byte, error) { - type Alias ServerHealth - return json.Marshal(&struct { - LastContact string - *Alias - }{ - LastContact: u.LastContact.String(), - Alias: (*Alias)(u), - }) -} - -func (u *ServerHealth) UnmarshalJSON(data []byte) error { - type Alias ServerHealth - aux := &struct { - LastContact string - *Alias - }{ - Alias: (*Alias)(u), - } - if err := json.Unmarshal(data, &aux); err != nil { - return err - } - var err error - if aux.LastContact != "" { - if u.LastContact, err = time.ParseDuration(aux.LastContact); err != nil { - return err - } - } - return nil -} - -// OperatorHealthReply is a representation of the overall health of the cluster -type OperatorHealthReply struct { - // Healthy is true if all the servers in the cluster are healthy. - Healthy bool - - // FailureTolerance is the number of healthy servers that could be lost without - // an outage occurring. - FailureTolerance int - - // Servers holds the health of each server. - Servers []ServerHealth -} - -// AutopilotGetConfiguration is used to query the current Autopilot configuration. -func (op *Operator) AutopilotGetConfiguration(q *QueryOptions) (*AutopilotConfiguration, *QueryMeta, error) { - var resp AutopilotConfiguration - qm, err := op.c.query("/v1/operator/autopilot/configuration", &resp, q) - if err != nil { - return nil, nil, err - } - return &resp, qm, nil -} - -// AutopilotSetConfiguration is used to set the current Autopilot configuration. -func (op *Operator) AutopilotSetConfiguration(conf *AutopilotConfiguration, q *WriteOptions) (*WriteMeta, error) { - var out bool - wm, err := op.c.write("/v1/operator/autopilot/configuration", conf, &out, q) - if err != nil { - return nil, err - } - return wm, nil -} - -// AutopilotCASConfiguration is used to perform a Check-And-Set update on the -// Autopilot configuration. The ModifyIndex value will be respected. Returns -// true on success or false on failures. -func (op *Operator) AutopilotCASConfiguration(conf *AutopilotConfiguration, q *WriteOptions) (bool, *WriteMeta, error) { - var out bool - wm, err := op.c.write("/v1/operator/autopilot/configuration?cas="+strconv.FormatUint(conf.ModifyIndex, 10), conf, &out, q) - if err != nil { - return false, nil, err - } - - return out, wm, nil -} - -// AutopilotServerHealth is used to query Autopilot's top-level view of the health -// of each Nomad server. -func (op *Operator) AutopilotServerHealth(q *QueryOptions) (*OperatorHealthReply, *QueryMeta, error) { - var out OperatorHealthReply - qm, err := op.c.query("/v1/operator/autopilot/health", &out, q) - if err != nil { - return nil, nil, err - } - return &out, qm, nil -} diff --git a/vendor/github.com/hashicorp/nomad/api/operator_metrics.go b/vendor/github.com/hashicorp/nomad/api/operator_metrics.go deleted file mode 100644 index e6419819..00000000 --- a/vendor/github.com/hashicorp/nomad/api/operator_metrics.go +++ /dev/null @@ -1,87 +0,0 @@ -package api - -import ( - "io/ioutil" - "time" -) - -// MetricsSummary holds a roll-up of metrics info for a given interval -type MetricsSummary struct { - Timestamp string - Gauges []GaugeValue - Points []PointValue - Counters []SampledValue - Samples []SampledValue -} - -type GaugeValue struct { - Name string - Hash string `json:"-"` - Value float32 - - Labels []Label `json:"-"` - DisplayLabels map[string]string `json:"Labels"` -} - -type PointValue struct { - Name string - Points []float32 -} - -type SampledValue struct { - Name string - Hash string `json:"-"` - *AggregateSample - Mean float64 - Stddev float64 - - Labels []Label `json:"-"` - DisplayLabels map[string]string `json:"Labels"` -} - -// AggregateSample is used to hold aggregate metrics -// about a sample -type AggregateSample struct { - Count int // The count of emitted pairs - Rate float64 // The values rate per time unit (usually 1 second) - Sum float64 // The sum of values - SumSq float64 `json:"-"` // The sum of squared values - Min float64 // Minimum value - Max float64 // Maximum value - LastUpdated time.Time `json:"-"` // When value was last updated -} - -type Label struct { - Name string - Value string -} - -// Metrics returns a slice of bytes containing metrics, optionally formatted as either json or prometheus -func (op *Operator) Metrics(q *QueryOptions) ([]byte, error) { - if q == nil { - q = &QueryOptions{} - } - - metricsReader, err := op.c.rawQuery("/v1/metrics", q) - if err != nil { - return nil, err - } - - metricsBytes, err := ioutil.ReadAll(metricsReader) - if err != nil { - return nil, err - } - - return metricsBytes, nil -} - -// MetricsSummary returns a MetricsSummary struct and query metadata -func (op *Operator) MetricsSummary(q *QueryOptions) (*MetricsSummary, *QueryMeta, error) { - var resp *MetricsSummary - qm, err := op.c.query("/v1/metrics", &resp, q) - if err != nil { - return nil, nil, err - } - - return resp, qm, nil -} diff --git a/vendor/github.com/hashicorp/nomad/api/quota.go b/vendor/github.com/hashicorp/nomad/api/quota.go deleted file mode 100644 index acbd5295..00000000 --- a/vendor/github.com/hashicorp/nomad/api/quota.go +++ /dev/null @@ -1,191 +0,0 @@ -package api - -import ( - "fmt" - "sort" -) - -// Quotas is used to query the quotas endpoints. -type Quotas struct { - client *Client -} - -// Quotas returns a new handle on the quotas. -func (c *Client) Quotas() *Quotas { - return &Quotas{client: c} -} - -// List is used to dump all of the quota specs -func (q *Quotas) List(qo *QueryOptions) ([]*QuotaSpec, *QueryMeta, error) { - var resp []*QuotaSpec - qm, err := q.client.query("/v1/quotas", &resp, qo) - if err != nil { - return nil, nil, err - } - sort.Sort(QuotaSpecIndexSort(resp)) - return resp, qm, nil -} - -// PrefixList is used to do a PrefixList search over quota specs -func (q *Quotas) PrefixList(prefix string, qo *QueryOptions) ([]*QuotaSpec, *QueryMeta, error) { - if qo == nil { - qo = &QueryOptions{Prefix: prefix} - } else { - qo.Prefix = prefix - } - - return q.List(qo) -} - -// ListUsage is used to dump all of the quota usages -func (q *Quotas) ListUsage(qo *QueryOptions) ([]*QuotaUsage, *QueryMeta, error) { - var resp []*QuotaUsage - qm, err := q.client.query("/v1/quota-usages", &resp, qo) - if err != nil { - return nil, nil, err - } - sort.Sort(QuotaUsageIndexSort(resp)) - return resp, qm, nil -} - -// PrefixList is used to do a PrefixList search over quota usages -func (q *Quotas) PrefixListUsage(prefix string, qo *QueryOptions) ([]*QuotaUsage, *QueryMeta, error) { - if qo == nil { - qo = &QueryOptions{Prefix: prefix} - } else { - qo.Prefix = prefix - } - - return q.ListUsage(qo) -} - -// Info is used to query a single quota spec by its name. -func (q *Quotas) Info(name string, qo *QueryOptions) (*QuotaSpec, *QueryMeta, error) { - var resp QuotaSpec - qm, err := q.client.query("/v1/quota/"+name, &resp, qo) - if err != nil { - return nil, nil, err - } - return &resp, qm, nil -} - -// Usage is used to query a single quota usage by its name. -func (q *Quotas) Usage(name string, qo *QueryOptions) (*QuotaUsage, *QueryMeta, error) { - var resp QuotaUsage - qm, err := q.client.query("/v1/quota/usage/"+name, &resp, qo) - if err != nil { - return nil, nil, err - } - return &resp, qm, nil -} - -// Register is used to register a quota spec. -func (q *Quotas) Register(spec *QuotaSpec, qo *WriteOptions) (*WriteMeta, error) { - wm, err := q.client.write("/v1/quota", spec, nil, qo) - if err != nil { - return nil, err - } - return wm, nil -} - -// Delete is used to delete a quota spec -func (q *Quotas) Delete(quota string, qo *WriteOptions) (*WriteMeta, error) { - wm, err := q.client.delete(fmt.Sprintf("/v1/quota/%s", quota), nil, nil, qo) - if err != nil { - return nil, err - } - return wm, nil -} - -// QuotaSpec specifies the allowed resource usage across regions. -type QuotaSpec struct { - // Name is the name for the quota object - Name string - - // Description is an optional description for the quota object - Description string - - // Limits is the set of quota limits encapsulated by this quota object. Each - // limit applies quota in a particular region and in the future over a - // particular priority range and datacenter set. - Limits []*QuotaLimit - - // Raft indexes to track creation and modification - CreateIndex uint64 - ModifyIndex uint64 -} - -// QuotaLimit describes the resource limit in a particular region. -type QuotaLimit struct { - // Region is the region in which this limit has affect - Region string - - // RegionLimit is the quota limit that applies to any allocation within a - // referencing namespace in the region. A value of zero is treated as - // unlimited and a negative value is treated as fully disallowed. This is - // useful for once we support GPUs - RegionLimit *Resources - - // VariablesLimit is the maximum total size of all variables - // Variable.EncryptedData. A value of zero is treated as unlimited and a - // negative value is treated as fully disallowed. - VariablesLimit *int `mapstructure:"variables_limit" hcl:"variables_limit,optional"` - - // Hash is the hash of the object and is used to make replication efficient. - Hash []byte -} - -// QuotaUsage is the resource usage of a Quota -type QuotaUsage struct { - Name string - Used map[string]*QuotaLimit - CreateIndex uint64 - ModifyIndex uint64 -} - -// QuotaSpecIndexSort is a wrapper to sort QuotaSpecs by CreateIndex. We -// reverse the test so that we get the highest index first. -type QuotaSpecIndexSort []*QuotaSpec - -func (q QuotaSpecIndexSort) Len() int { - return len(q) -} - -func (q QuotaSpecIndexSort) Less(i, j int) bool { - return q[i].CreateIndex > q[j].CreateIndex -} - -func (q QuotaSpecIndexSort) Swap(i, j int) { - q[i], q[j] = q[j], q[i] -} - -// QuotaUsageIndexSort is a wrapper to sort QuotaUsages by CreateIndex. We -// reverse the test so that we get the highest index first. -type QuotaUsageIndexSort []*QuotaUsage - -func (q QuotaUsageIndexSort) Len() int { - return len(q) -} - -func (q QuotaUsageIndexSort) Less(i, j int) bool { - return q[i].CreateIndex > q[j].CreateIndex -} - -func (q QuotaUsageIndexSort) Swap(i, j int) { - q[i], q[j] = q[j], q[i] -} - -// QuotaLimitSort is a wrapper to sort QuotaLimits -type QuotaLimitSort []*QuotaLimit - -func (q QuotaLimitSort) Len() int { - return len(q) -} - -func (q QuotaLimitSort) Less(i, j int) bool { - return q[i].Region < q[j].Region -} - -func (q QuotaLimitSort) Swap(i, j int) { - q[i], q[j] = q[j], q[i] -} diff --git a/vendor/github.com/hashicorp/nomad/api/raw.go b/vendor/github.com/hashicorp/nomad/api/raw.go deleted file mode 100644 index 077f87dd..00000000 --- a/vendor/github.com/hashicorp/nomad/api/raw.go +++ /dev/null @@ -1,38 +0,0 @@ -package api - -import "io" - -// Raw can be used to do raw queries against custom endpoints -type Raw struct { - c *Client -} - -// Raw returns a handle to query endpoints -func (c *Client) Raw() *Raw { - return &Raw{c} -} - -// Query is used to do a GET request against an endpoint -// and deserialize the response into an interface using -// standard Nomad conventions. -func (raw *Raw) Query(endpoint string, out interface{}, q *QueryOptions) (*QueryMeta, error) { - return raw.c.query(endpoint, out, q) -} - -// Response is used to make a GET request against an endpoint and returns the -// response body -func (raw *Raw) Response(endpoint string, q *QueryOptions) (io.ReadCloser, error) { - return raw.c.rawQuery(endpoint, q) -} - -// Write is used to do a PUT request against an endpoint -// and serialize/deserialized using the standard Nomad conventions. -func (raw *Raw) Write(endpoint string, in, out interface{}, q *WriteOptions) (*WriteMeta, error) { - return raw.c.write(endpoint, in, out, q) -} - -// Delete is used to do a DELETE request against an endpoint -// and serialize/deserialized using the standard Nomad conventions. -func (raw *Raw) Delete(endpoint string, out interface{}, q *WriteOptions) (*WriteMeta, error) { - return raw.c.delete(endpoint, nil, out, q) -} diff --git a/vendor/github.com/hashicorp/nomad/api/recommendations.go b/vendor/github.com/hashicorp/nomad/api/recommendations.go deleted file mode 100644 index 47414f24..00000000 --- a/vendor/github.com/hashicorp/nomad/api/recommendations.go +++ /dev/null @@ -1,123 +0,0 @@ -package api - -// Recommendations is used to query the recommendations endpoints. -type Recommendations struct { - client *Client -} - -// Recommendations returns a new handle on the recommendations endpoints. -func (c *Client) Recommendations() *Recommendations { - return &Recommendations{client: c} -} - -// List is used to dump all of the recommendations in the cluster -func (r *Recommendations) List(q *QueryOptions) ([]*Recommendation, *QueryMeta, error) { - var resp []*Recommendation - qm, err := r.client.query("/v1/recommendations", &resp, q) - if err != nil { - return nil, qm, err - } - return resp, qm, nil -} - -// Info is used to return information on a single recommendation -func (r *Recommendations) Info(id string, q *QueryOptions) (*Recommendation, *QueryMeta, error) { - var resp Recommendation - qm, err := r.client.query("/v1/recommendation/"+id, &resp, q) - if err != nil { - return nil, nil, err - } - return &resp, qm, nil -} - -// Upsert is used to create or update a recommendation -func (r *Recommendations) Upsert(rec *Recommendation, q *WriteOptions) (*Recommendation, *WriteMeta, error) { - var resp Recommendation - wm, err := r.client.write("/v1/recommendation", rec, &resp, q) - if err != nil { - return nil, nil, err - } - return &resp, wm, nil -} - -// Delete is used to delete a list of recommendations -func (r *Recommendations) Delete(ids []string, q *WriteOptions) (*WriteMeta, error) { - req := &RecommendationApplyRequest{ - Apply: []string{}, - Dismiss: ids, - } - wm, err := r.client.write("/v1/recommendations/apply", req, nil, q) - if err != nil { - return nil, err - } - return wm, nil -} - -// Apply is used to apply a set of recommendations -func (r *Recommendations) Apply(ids []string, policyOverride bool) ( - *RecommendationApplyResponse, *WriteMeta, error) { - req := &RecommendationApplyRequest{ - Apply: ids, - PolicyOverride: policyOverride, - } - var resp RecommendationApplyResponse - wm, err := r.client.write("/v1/recommendations/apply", req, &resp, nil) - if err != nil { - return nil, nil, err - } - resp.WriteMeta = *wm - return &resp, wm, nil -} - -// Recommendation is used to serialize a recommendation. -type Recommendation struct { - ID string - Region string - Namespace string - JobID string - JobVersion uint64 - Group string - Task string - Resource string - Value int - Current int - Meta map[string]interface{} - Stats map[string]float64 - EnforceVersion bool - - SubmitTime int64 - - CreateIndex uint64 - ModifyIndex uint64 -} - -// RecommendationApplyRequest is used to apply and/or dismiss a set of recommendations -type RecommendationApplyRequest struct { - Apply []string - Dismiss []string - PolicyOverride bool -} - -// RecommendationApplyResponse is used to apply a set of recommendations -type RecommendationApplyResponse struct { - UpdatedJobs []*SingleRecommendationApplyResult - Errors []*SingleRecommendationApplyError - WriteMeta -} - -type SingleRecommendationApplyResult struct { - Namespace string - JobID string - JobModifyIndex uint64 - EvalID string - EvalCreateIndex uint64 - Warnings string - Recommendations []string -} - -type SingleRecommendationApplyError struct { - Namespace string - JobID string - Recommendations []string - Error string -} diff --git a/vendor/github.com/hashicorp/nomad/api/regions.go b/vendor/github.com/hashicorp/nomad/api/regions.go deleted file mode 100644 index 98df011d..00000000 --- a/vendor/github.com/hashicorp/nomad/api/regions.go +++ /dev/null @@ -1,24 +0,0 @@ -package api - -import "sort" - -// Regions is used to query the regions in the cluster. -type Regions struct { - client *Client -} - -// Regions returns a handle on the regions endpoints. -func (c *Client) Regions() *Regions { - return &Regions{client: c} -} - -// List returns a list of all of the regions from the server -// that serves the request. It is never forwarded to a leader. -func (r *Regions) List() ([]string, error) { - var resp []string - if _, err := r.client.query("/v1/regions", &resp, nil); err != nil { - return nil, err - } - sort.Strings(resp) - return resp, nil -} diff --git a/vendor/github.com/hashicorp/nomad/api/resources.go b/vendor/github.com/hashicorp/nomad/api/resources.go deleted file mode 100644 index 43f6bbe8..00000000 --- a/vendor/github.com/hashicorp/nomad/api/resources.go +++ /dev/null @@ -1,277 +0,0 @@ -package api - -import ( - "strconv" -) - -// Resources encapsulates the required resources of -// a given task or task group. -type Resources struct { - CPU *int `hcl:"cpu,optional"` - Cores *int `hcl:"cores,optional"` - MemoryMB *int `mapstructure:"memory" hcl:"memory,optional"` - MemoryMaxMB *int `mapstructure:"memory_max" hcl:"memory_max,optional"` - DiskMB *int `mapstructure:"disk" hcl:"disk,optional"` - Networks []*NetworkResource `hcl:"network,block"` - Devices []*RequestedDevice `hcl:"device,block"` - - // COMPAT(0.10) - // XXX Deprecated. Please do not use. The field will be removed in Nomad - // 0.10 and is only being kept to allow any references to be removed before - // then. - IOPS *int `hcl:"iops,optional"` -} - -// Canonicalize will supply missing values in the cases -// where they are not provided. -func (r *Resources) Canonicalize() { - defaultResources := DefaultResources() - if r.Cores == nil { - r.Cores = defaultResources.Cores - - // only set cpu to the default value if it and cores is not defined - if r.CPU == nil { - r.CPU = defaultResources.CPU - } - } - - // CPU will be set to the default if cores is nil above. - // If cpu is nil here then cores has been set and cpu should be 0 - if r.CPU == nil { - r.CPU = pointerOf(0) - } - - if r.MemoryMB == nil { - r.MemoryMB = defaultResources.MemoryMB - } - for _, d := range r.Devices { - d.Canonicalize() - } -} - -// DefaultResources is a small resources object that contains the -// default resources requests that we will provide to an object. -// --- THIS FUNCTION IS REPLICATED IN nomad/structs/structs.go -// and should be kept in sync. -func DefaultResources() *Resources { - return &Resources{ - CPU: pointerOf(100), - Cores: pointerOf(0), - MemoryMB: pointerOf(300), - } -} - -// MinResources is a small resources object that contains the -// absolute minimum resources that we will provide to an object. -// This should not be confused with the defaults which are -// provided in DefaultResources() --- THIS LOGIC IS REPLICATED -// IN nomad/structs/structs.go and should be kept in sync. -func MinResources() *Resources { - return &Resources{ - CPU: pointerOf(1), - Cores: pointerOf(0), - MemoryMB: pointerOf(10), - } -} - -// Merge merges this resource with another resource. -func (r *Resources) Merge(other *Resources) { - if other == nil { - return - } - if other.CPU != nil { - r.CPU = other.CPU - } - if other.MemoryMB != nil { - r.MemoryMB = other.MemoryMB - } - if other.DiskMB != nil { - r.DiskMB = other.DiskMB - } - if len(other.Networks) != 0 { - r.Networks = other.Networks - } - if len(other.Devices) != 0 { - r.Devices = other.Devices - } -} - -type Port struct { - Label string `hcl:",label"` - Value int `mapstructure:"static" hcl:"static,optional"` - To int `mapstructure:"to" hcl:"to,optional"` - HostNetwork string `mapstructure:"host_network" hcl:"host_network,optional"` -} - -type DNSConfig struct { - Servers []string `mapstructure:"servers" hcl:"servers,optional"` - Searches []string `mapstructure:"searches" hcl:"searches,optional"` - Options []string `mapstructure:"options" hcl:"options,optional"` -} - -// NetworkResource is used to describe required network -// resources of a given task. -type NetworkResource struct { - Mode string `hcl:"mode,optional"` - Device string `hcl:"device,optional"` - CIDR string `hcl:"cidr,optional"` - IP string `hcl:"ip,optional"` - DNS *DNSConfig `hcl:"dns,block"` - ReservedPorts []Port `hcl:"reserved_ports,block"` - DynamicPorts []Port `hcl:"port,block"` - Hostname string `hcl:"hostname,optional"` - - // COMPAT(0.13) - // XXX Deprecated. Please do not use. The field will be removed in Nomad - // 0.13 and is only being kept to allow any references to be removed before - // then. - MBits *int `hcl:"mbits,optional"` -} - -// COMPAT(0.13) -// XXX Deprecated. Please do not use. The method will be removed in Nomad -// 0.13 and is only being kept to allow any references to be removed before -// then. -func (n *NetworkResource) Megabits() int { - if n == nil || n.MBits == nil { - return 0 - } - return *n.MBits -} - -func (n *NetworkResource) Canonicalize() { - // COMPAT(0.13) - // Noop to maintain backwards compatibility -} - -func (n *NetworkResource) HasPorts() bool { - if n == nil { - return false - } - - return len(n.ReservedPorts)+len(n.DynamicPorts) > 0 -} - -// NodeDeviceResource captures a set of devices sharing a common -// vendor/type/device_name tuple. -type NodeDeviceResource struct { - - // Vendor specifies the vendor of device - Vendor string - - // Type specifies the type of the device - Type string - - // Name specifies the specific model of the device - Name string - - // Instances are list of the devices matching the vendor/type/name - Instances []*NodeDevice - - Attributes map[string]*Attribute -} - -func (r NodeDeviceResource) ID() string { - return r.Vendor + "/" + r.Type + "/" + r.Name -} - -// NodeDevice is an instance of a particular device. -type NodeDevice struct { - // ID is the ID of the device. - ID string - - // Healthy captures whether the device is healthy. - Healthy bool - - // HealthDescription is used to provide a human readable description of why - // the device may be unhealthy. - HealthDescription string - - // Locality stores HW locality information for the node to optionally be - // used when making placement decisions. - Locality *NodeDeviceLocality -} - -// Attribute is used to describe the value of an attribute, optionally -// specifying units -type Attribute struct { - // Float is the float value for the attribute - FloatVal *float64 `json:"Float,omitempty"` - - // Int is the int value for the attribute - IntVal *int64 `json:"Int,omitempty"` - - // String is the string value for the attribute - StringVal *string `json:"String,omitempty"` - - // Bool is the bool value for the attribute - BoolVal *bool `json:"Bool,omitempty"` - - // Unit is the optional unit for the set int or float value - Unit string -} - -func (a Attribute) String() string { - switch { - case a.FloatVal != nil: - str := formatFloat(*a.FloatVal, 3) - if a.Unit != "" { - str += " " + a.Unit - } - return str - case a.IntVal != nil: - str := strconv.FormatInt(*a.IntVal, 10) - if a.Unit != "" { - str += " " + a.Unit - } - return str - case a.StringVal != nil: - return *a.StringVal - case a.BoolVal != nil: - return strconv.FormatBool(*a.BoolVal) - default: - return "" - } -} - -// NodeDeviceLocality stores information about the devices hardware locality on -// the node. -type NodeDeviceLocality struct { - // PciBusID is the PCI Bus ID for the device. - PciBusID string -} - -// RequestedDevice is used to request a device for a task. -type RequestedDevice struct { - // Name is the request name. The possible values are as follows: - // * : A single value only specifies the type of request. - // * /: A single slash delimiter assumes the vendor and type of device is specified. - // * //: Two slash delimiters assume vendor, type and specific model are specified. - // - // Examples are as follows: - // * "gpu" - // * "nvidia/gpu" - // * "nvidia/gpu/GTX2080Ti" - Name string `hcl:",label"` - - // Count is the number of requested devices - Count *uint64 `hcl:"count,optional"` - - // Constraints are a set of constraints to apply when selecting the device - // to use. - Constraints []*Constraint `hcl:"constraint,block"` - - // Affinities are a set of affinites to apply when selecting the device - // to use. - Affinities []*Affinity `hcl:"affinity,block"` -} - -func (d *RequestedDevice) Canonicalize() { - if d.Count == nil { - d.Count = pointerOf(uint64(1)) - } - - for _, a := range d.Affinities { - a.Canonicalize() - } -} diff --git a/vendor/github.com/hashicorp/nomad/api/scaling.go b/vendor/github.com/hashicorp/nomad/api/scaling.go deleted file mode 100644 index 32259c9f..00000000 --- a/vendor/github.com/hashicorp/nomad/api/scaling.go +++ /dev/null @@ -1,119 +0,0 @@ -package api - -const ( - // ScalingPolicyTypeHorizontal indicates a policy that does horizontal scaling. - ScalingPolicyTypeHorizontal = "horizontal" -) - -// Scaling is used to query scaling-related API endpoints -type Scaling struct { - client *Client -} - -// Scaling returns a handle on the scaling endpoints. -func (c *Client) Scaling() *Scaling { - return &Scaling{client: c} -} - -func (s *Scaling) ListPolicies(q *QueryOptions) ([]*ScalingPolicyListStub, *QueryMeta, error) { - var resp []*ScalingPolicyListStub - qm, err := s.client.query("/v1/scaling/policies", &resp, q) - if err != nil { - return nil, nil, err - } - return resp, qm, nil -} - -func (s *Scaling) GetPolicy(id string, q *QueryOptions) (*ScalingPolicy, *QueryMeta, error) { - var policy ScalingPolicy - qm, err := s.client.query("/v1/scaling/policy/"+id, &policy, q) - if err != nil { - return nil, nil, err - } - return &policy, qm, nil -} - -func (p *ScalingPolicy) Canonicalize(taskGroupCount int) { - if p.Enabled == nil { - p.Enabled = pointerOf(true) - } - if p.Min == nil { - var m int64 = int64(taskGroupCount) - p.Min = &m - } - if p.Type == "" { - p.Type = ScalingPolicyTypeHorizontal - } -} - -// ScalingRequest is the payload for a generic scaling action -type ScalingRequest struct { - Count *int64 - Target map[string]string - Message string - Error bool - Meta map[string]interface{} - WriteRequest - // this is effectively a job update, so we need the ability to override policy. - PolicyOverride bool -} - -// ScalingPolicy is the user-specified API object for an autoscaling policy -type ScalingPolicy struct { - /* fields set by user in HCL config */ - - Min *int64 `hcl:"min,optional"` - Max *int64 `hcl:"max,optional"` - Policy map[string]interface{} `hcl:"policy,block"` - Enabled *bool `hcl:"enabled,optional"` - Type string `hcl:"type,optional"` - - /* fields set by server */ - - ID string - Namespace string - Target map[string]string - CreateIndex uint64 - ModifyIndex uint64 -} - -// ScalingPolicyListStub is used to return a subset of scaling policy information -// for the scaling policy list -type ScalingPolicyListStub struct { - ID string - Enabled bool - Type string - Target map[string]string - CreateIndex uint64 - ModifyIndex uint64 -} - -// JobScaleStatusResponse is used to return information about job scaling status -type JobScaleStatusResponse struct { - JobID string - Namespace string - JobCreateIndex uint64 - JobModifyIndex uint64 - JobStopped bool - TaskGroups map[string]TaskGroupScaleStatus -} - -type TaskGroupScaleStatus struct { - Desired int - Placed int - Running int - Healthy int - Unhealthy int - Events []ScalingEvent -} - -type ScalingEvent struct { - Count *int64 - PreviousCount int64 - Error bool - Message string - Meta map[string]interface{} - EvalID *string - Time uint64 - CreateIndex uint64 -} diff --git a/vendor/github.com/hashicorp/nomad/api/search.go b/vendor/github.com/hashicorp/nomad/api/search.go deleted file mode 100644 index 3b020827..00000000 --- a/vendor/github.com/hashicorp/nomad/api/search.go +++ /dev/null @@ -1,97 +0,0 @@ -package api - -import ( - "github.com/hashicorp/nomad/api/contexts" -) - -type Search struct { - client *Client -} - -// Search returns a handle on the Search endpoints -func (c *Client) Search() *Search { - return &Search{client: c} -} - -// PrefixSearch returns a set of matches for a particular context and prefix. -func (s *Search) PrefixSearch(prefix string, context contexts.Context, q *QueryOptions) (*SearchResponse, *QueryMeta, error) { - var resp SearchResponse - req := &SearchRequest{Prefix: prefix, Context: context} - - qm, err := s.client.putQuery("/v1/search", req, &resp, q) - if err != nil { - return nil, nil, err - } - - return &resp, qm, nil -} - -type SearchResponse struct { - Matches map[contexts.Context][]string - Truncations map[contexts.Context]bool - QueryMeta -} - -type SearchRequest struct { - Prefix string - Context contexts.Context - QueryOptions -} - -// FuzzySearch returns a set of matches for a given context and string. -func (s *Search) FuzzySearch(text string, context contexts.Context, q *QueryOptions) (*FuzzySearchResponse, *QueryMeta, error) { - var resp FuzzySearchResponse - - req := &FuzzySearchRequest{ - Context: context, - Text: text, - } - - qm, err := s.client.putQuery("/v1/search/fuzzy", req, &resp, q) - if err != nil { - return nil, nil, err - } - - return &resp, qm, nil -} - -// FuzzyMatch is used to describe the ID of an object which may be a machine -// readable UUID or a human readable Name. If the object is a component of a Job, -// the Scope is a list of IDs starting from Namespace down to the parent object of -// ID. -// -// e.g. A Task-level service would have scope like, -// ["", "", "", ""] -type FuzzyMatch struct { - ID string // ID is UUID or Name of object - Scope []string `json:",omitempty"` // IDs of parent objects -} - -// FuzzySearchResponse is used to return fuzzy matches and information about -// whether the match list is truncated specific to each type of searchable Context. -type FuzzySearchResponse struct { - // Matches is a map of Context types to IDs which fuzzy match a specified query. - Matches map[contexts.Context][]FuzzyMatch - - // Truncations indicates whether the matches for a particular Context have - // been truncated. - Truncations map[contexts.Context]bool - - QueryMeta -} - -// FuzzySearchRequest is used to parameterize a fuzzy search request, and returns -// a list of matches made up of jobs, allocations, evaluations, and/or nodes, -// along with whether or not the information returned is truncated. -type FuzzySearchRequest struct { - // Text is what names are fuzzy-matched to. E.g. if the given text were - // "py", potential matches might be "python", "mypy", etc. of jobs, nodes, - // allocs, groups, services, commands, images, classes. - Text string - - // Context is the type that can be matched against. A Context of "all" indicates - // all Contexts types are queried for matching. - Context contexts.Context - - QueryOptions -} diff --git a/vendor/github.com/hashicorp/nomad/api/sentinel.go b/vendor/github.com/hashicorp/nomad/api/sentinel.go deleted file mode 100644 index 74c88dd6..00000000 --- a/vendor/github.com/hashicorp/nomad/api/sentinel.go +++ /dev/null @@ -1,81 +0,0 @@ -package api - -import ( - "errors" -) - -// SentinelPolicies is used to query the Sentinel Policy endpoints. -type SentinelPolicies struct { - client *Client -} - -// SentinelPolicies returns a new handle on the Sentinel policies. -func (c *Client) SentinelPolicies() *SentinelPolicies { - return &SentinelPolicies{client: c} -} - -// List is used to dump all of the policies. -func (a *SentinelPolicies) List(q *QueryOptions) ([]*SentinelPolicyListStub, *QueryMeta, error) { - var resp []*SentinelPolicyListStub - qm, err := a.client.query("/v1/sentinel/policies", &resp, q) - if err != nil { - return nil, nil, err - } - return resp, qm, nil -} - -// Upsert is used to create or update a policy -func (a *SentinelPolicies) Upsert(policy *SentinelPolicy, q *WriteOptions) (*WriteMeta, error) { - if policy == nil || policy.Name == "" { - return nil, errors.New("missing policy name") - } - wm, err := a.client.write("/v1/sentinel/policy/"+policy.Name, policy, nil, q) - if err != nil { - return nil, err - } - return wm, nil -} - -// Delete is used to delete a policy -func (a *SentinelPolicies) Delete(policyName string, q *WriteOptions) (*WriteMeta, error) { - if policyName == "" { - return nil, errors.New("missing policy name") - } - wm, err := a.client.delete("/v1/sentinel/policy/"+policyName, nil, nil, q) - if err != nil { - return nil, err - } - return wm, nil -} - -// Info is used to query a specific policy -func (a *SentinelPolicies) Info(policyName string, q *QueryOptions) (*SentinelPolicy, *QueryMeta, error) { - if policyName == "" { - return nil, nil, errors.New("missing policy name") - } - var resp SentinelPolicy - wm, err := a.client.query("/v1/sentinel/policy/"+policyName, &resp, q) - if err != nil { - return nil, nil, err - } - return &resp, wm, nil -} - -type SentinelPolicy struct { - Name string - Description string - Scope string - EnforcementLevel string - Policy string - CreateIndex uint64 - ModifyIndex uint64 -} - -type SentinelPolicyListStub struct { - Name string - Description string - Scope string - EnforcementLevel string - CreateIndex uint64 - ModifyIndex uint64 -} diff --git a/vendor/github.com/hashicorp/nomad/api/services.go b/vendor/github.com/hashicorp/nomad/api/services.go deleted file mode 100644 index 8d9b4157..00000000 --- a/vendor/github.com/hashicorp/nomad/api/services.go +++ /dev/null @@ -1,317 +0,0 @@ -package api - -import ( - "fmt" - "net/url" - "time" -) - -// ServiceRegistration is an instance of a single allocation advertising itself -// as a named service with a specific address. Each registration is constructed -// from the job specification Service block. Whether the service is registered -// within Nomad, and therefore generates a ServiceRegistration is controlled by -// the Service.Provider parameter. -type ServiceRegistration struct { - - // ID is the unique identifier for this registration. It currently follows - // the Consul service registration format to provide consistency between - // the two solutions. - ID string - - // ServiceName is the human friendly identifier for this service - // registration. - ServiceName string - - // Namespace represents the namespace within which this service is - // registered. - Namespace string - - // NodeID is Node.ID on which this service registration is currently - // running. - NodeID string - - // Datacenter is the DC identifier of the node as identified by - // Node.Datacenter. - Datacenter string - - // JobID is Job.ID and represents the job which contained the service block - // which resulted in this service registration. - JobID string - - // AllocID is Allocation.ID and represents the allocation within which this - // service is running. - AllocID string - - // Tags are determined from either Service.Tags or Service.CanaryTags and - // help identify this service. Tags can also be used to perform lookups of - // services depending on their state and role. - Tags []string - - // Address is the IP address of this service registration. This information - // comes from the client and is not guaranteed to be routable; this depends - // on cluster network topology. - Address string - - // Port is the port number on which this service registration is bound. It - // is determined by a combination of factors on the client. - Port int - - CreateIndex uint64 - ModifyIndex uint64 -} - -// ServiceRegistrationListStub represents all service registrations held within a -// single namespace. -type ServiceRegistrationListStub struct { - - // Namespace details the namespace in which these services have been - // registered. - Namespace string - - // Services is a list of services found within the namespace. - Services []*ServiceRegistrationStub -} - -// ServiceRegistrationStub is the stub object describing an individual -// namespaced service. The object is built in a manner which would allow us to -// add additional fields in the future, if we wanted. -type ServiceRegistrationStub struct { - - // ServiceName is the human friendly name for this service as specified - // within Service.Name. - ServiceName string - - // Tags is a list of unique tags found for this service. The list is - // de-duplicated automatically by Nomad. - Tags []string -} - -// Services is used to query the service endpoints. -type Services struct { - client *Client -} - -// Services returns a new handle on the services endpoints. -func (c *Client) Services() *Services { - return &Services{client: c} -} - -// List can be used to list all service registrations currently stored within -// the target namespace. It returns a stub response object. -func (s *Services) List(q *QueryOptions) ([]*ServiceRegistrationListStub, *QueryMeta, error) { - var resp []*ServiceRegistrationListStub - qm, err := s.client.query("/v1/services", &resp, q) - if err != nil { - return nil, qm, err - } - return resp, qm, nil -} - -// Get is used to return a list of service registrations whose name matches the -// specified parameter. -func (s *Services) Get(serviceName string, q *QueryOptions) ([]*ServiceRegistration, *QueryMeta, error) { - var resp []*ServiceRegistration - qm, err := s.client.query("/v1/service/"+url.PathEscape(serviceName), &resp, q) - if err != nil { - return nil, qm, err - } - return resp, qm, nil -} - -// Delete can be used to delete an individual service registration as defined -// by its service name and service ID. -func (s *Services) Delete(serviceName, serviceID string, q *WriteOptions) (*WriteMeta, error) { - path := fmt.Sprintf("/v1/service/%s/%s", url.PathEscape(serviceName), url.PathEscape(serviceID)) - wm, err := s.client.delete(path, nil, nil, q) - if err != nil { - return nil, err - } - return wm, nil -} - -// CheckRestart describes if and when a task should be restarted based on -// failing health checks. -type CheckRestart struct { - Limit int `mapstructure:"limit" hcl:"limit,optional"` - Grace *time.Duration `mapstructure:"grace" hcl:"grace,optional"` - IgnoreWarnings bool `mapstructure:"ignore_warnings" hcl:"ignore_warnings,optional"` -} - -// Canonicalize CheckRestart fields if not nil. -func (c *CheckRestart) Canonicalize() { - if c == nil { - return - } - - if c.Grace == nil { - c.Grace = pointerOf(1 * time.Second) - } -} - -// Copy returns a copy of CheckRestart or nil if unset. -func (c *CheckRestart) Copy() *CheckRestart { - if c == nil { - return nil - } - - nc := new(CheckRestart) - nc.Limit = c.Limit - if c.Grace != nil { - g := *c.Grace - nc.Grace = &g - } - nc.IgnoreWarnings = c.IgnoreWarnings - return nc -} - -// Merge values from other CheckRestart over default values on this -// CheckRestart and return merged copy. -func (c *CheckRestart) Merge(o *CheckRestart) *CheckRestart { - if c == nil { - // Just return other - return o - } - - nc := c.Copy() - - if o == nil { - // Nothing to merge - return nc - } - - if o.Limit > 0 { - nc.Limit = o.Limit - } - - if o.Grace != nil { - nc.Grace = o.Grace - } - - if o.IgnoreWarnings { - nc.IgnoreWarnings = o.IgnoreWarnings - } - - return nc -} - -// ServiceCheck represents a Nomad job-submitters view of a Consul service health check. -type ServiceCheck struct { - Name string `hcl:"name,optional"` - Type string `hcl:"type,optional"` - Command string `hcl:"command,optional"` - Args []string `hcl:"args,optional"` - Path string `hcl:"path,optional"` - Protocol string `hcl:"protocol,optional"` - PortLabel string `mapstructure:"port" hcl:"port,optional"` - Expose bool `hcl:"expose,optional"` - AddressMode string `mapstructure:"address_mode" hcl:"address_mode,optional"` - Advertise string `hcl:"advertise,optional"` - Interval time.Duration `hcl:"interval,optional"` - Timeout time.Duration `hcl:"timeout,optional"` - InitialStatus string `mapstructure:"initial_status" hcl:"initial_status,optional"` - TLSSkipVerify bool `mapstructure:"tls_skip_verify" hcl:"tls_skip_verify,optional"` - Header map[string][]string `hcl:"header,block"` - Method string `hcl:"method,optional"` - CheckRestart *CheckRestart `mapstructure:"check_restart" hcl:"check_restart,block"` - GRPCService string `mapstructure:"grpc_service" hcl:"grpc_service,optional"` - GRPCUseTLS bool `mapstructure:"grpc_use_tls" hcl:"grpc_use_tls,optional"` - TaskName string `mapstructure:"task" hcl:"task,optional"` - SuccessBeforePassing int `mapstructure:"success_before_passing" hcl:"success_before_passing,optional"` - FailuresBeforeCritical int `mapstructure:"failures_before_critical" hcl:"failures_before_critical,optional"` - Body string `hcl:"body,optional"` - OnUpdate string `mapstructure:"on_update" hcl:"on_update,optional"` -} - -// Service represents a Nomad job-submitters view of a Consul or Nomad service. -type Service struct { - Name string `hcl:"name,optional"` - Tags []string `hcl:"tags,optional"` - CanaryTags []string `mapstructure:"canary_tags" hcl:"canary_tags,optional"` - EnableTagOverride bool `mapstructure:"enable_tag_override" hcl:"enable_tag_override,optional"` - PortLabel string `mapstructure:"port" hcl:"port,optional"` - AddressMode string `mapstructure:"address_mode" hcl:"address_mode,optional"` - Address string `hcl:"address,optional"` - Checks []ServiceCheck `hcl:"check,block"` - CheckRestart *CheckRestart `mapstructure:"check_restart" hcl:"check_restart,block"` - Connect *ConsulConnect `hcl:"connect,block"` - Meta map[string]string `hcl:"meta,block"` - CanaryMeta map[string]string `hcl:"canary_meta,block"` - TaggedAddresses map[string]string `hcl:"tagged_addresses,block"` - TaskName string `mapstructure:"task" hcl:"task,optional"` - OnUpdate string `mapstructure:"on_update" hcl:"on_update,optional"` - - // Provider defines which backend system provides the service registration, - // either "consul" (default) or "nomad". - Provider string `hcl:"provider,optional"` -} - -const ( - OnUpdateRequireHealthy = "require_healthy" - OnUpdateIgnoreWarn = "ignore_warnings" - OnUpdateIgnore = "ignore" - - // ServiceProviderConsul is the default provider for services when no - // parameter is set. - ServiceProviderConsul = "consul" -) - -// Canonicalize the Service by ensuring its name and address mode are set. Task -// will be nil for group services. -func (s *Service) Canonicalize(t *Task, tg *TaskGroup, job *Job) { - if s.Name == "" { - if t != nil { - s.Name = fmt.Sprintf("%s-%s-%s", *job.Name, *tg.Name, t.Name) - } else { - s.Name = fmt.Sprintf("%s-%s", *job.Name, *tg.Name) - } - } - - // Default to AddressModeAuto - if s.AddressMode == "" { - s.AddressMode = "auto" - } - - // Default to OnUpdateRequireHealthy - if s.OnUpdate == "" { - s.OnUpdate = OnUpdateRequireHealthy - } - - // Default the service provider. - if s.Provider == "" { - s.Provider = ServiceProviderConsul - } - - if len(s.Meta) == 0 { - s.Meta = nil - } - - if len(s.CanaryMeta) == 0 { - s.CanaryMeta = nil - } - - if len(s.TaggedAddresses) == 0 { - s.TaggedAddresses = nil - } - - s.Connect.Canonicalize() - - // Canonicalize CheckRestart on Checks and merge Service.CheckRestart - // into each check. - for i, check := range s.Checks { - s.Checks[i].CheckRestart = s.CheckRestart.Merge(check.CheckRestart) - s.Checks[i].CheckRestart.Canonicalize() - - if s.Checks[i].SuccessBeforePassing < 0 { - s.Checks[i].SuccessBeforePassing = 0 - } - - if s.Checks[i].FailuresBeforeCritical < 0 { - s.Checks[i].FailuresBeforeCritical = 0 - } - - // Inhert Service - if s.Checks[i].OnUpdate == "" { - s.Checks[i].OnUpdate = s.OnUpdate - } - } -} diff --git a/vendor/github.com/hashicorp/nomad/api/status.go b/vendor/github.com/hashicorp/nomad/api/status.go deleted file mode 100644 index da1cb4c0..00000000 --- a/vendor/github.com/hashicorp/nomad/api/status.go +++ /dev/null @@ -1,43 +0,0 @@ -package api - -// Status is used to query the status-related endpoints. -type Status struct { - client *Client -} - -// Status returns a handle on the status endpoints. -func (c *Client) Status() *Status { - return &Status{client: c} -} - -// Leader is used to query for the current cluster leader. -func (s *Status) Leader() (string, error) { - var resp string - _, err := s.client.query("/v1/status/leader", &resp, nil) - if err != nil { - return "", err - } - return resp, nil -} - -// RegionLeader is used to query for the leader in the passed region. -func (s *Status) RegionLeader(region string) (string, error) { - var resp string - q := QueryOptions{Region: region} - _, err := s.client.query("/v1/status/leader", &resp, &q) - if err != nil { - return "", err - } - return resp, nil -} - -// Peers is used to query the addresses of the server peers -// in the cluster. -func (s *Status) Peers() ([]string, error) { - var resp []string - _, err := s.client.query("/v1/status/peers", &resp, nil) - if err != nil { - return nil, err - } - return resp, nil -} diff --git a/vendor/github.com/hashicorp/nomad/api/system.go b/vendor/github.com/hashicorp/nomad/api/system.go deleted file mode 100644 index 3717b9ae..00000000 --- a/vendor/github.com/hashicorp/nomad/api/system.go +++ /dev/null @@ -1,23 +0,0 @@ -package api - -// Status is used to query the status-related endpoints. -type System struct { - client *Client -} - -// System returns a handle on the system endpoints. -func (c *Client) System() *System { - return &System{client: c} -} - -func (s *System) GarbageCollect() error { - var req struct{} - _, err := s.client.write("/v1/system/gc", &req, nil, nil) - return err -} - -func (s *System) ReconcileSummaries() error { - var req struct{} - _, err := s.client.write("/v1/system/reconcile/summaries", &req, nil, nil) - return err -} diff --git a/vendor/github.com/hashicorp/nomad/api/tasks.go b/vendor/github.com/hashicorp/nomad/api/tasks.go deleted file mode 100644 index c8a1c0b3..00000000 --- a/vendor/github.com/hashicorp/nomad/api/tasks.go +++ /dev/null @@ -1,1109 +0,0 @@ -package api - -import ( - "fmt" - "path" - "path/filepath" - "strings" - "time" -) - -const ( - // RestartPolicyModeDelay causes an artificial delay till the next interval is - // reached when the specified attempts have been reached in the interval. - RestartPolicyModeDelay = "delay" - - // RestartPolicyModeFail causes a job to fail if the specified number of - // attempts are reached within an interval. - RestartPolicyModeFail = "fail" -) - -// MemoryStats holds memory usage related stats -type MemoryStats struct { - RSS uint64 - Cache uint64 - Swap uint64 - Usage uint64 - MaxUsage uint64 - KernelUsage uint64 - KernelMaxUsage uint64 - Measured []string -} - -// CpuStats holds cpu usage related stats -type CpuStats struct { - SystemMode float64 - UserMode float64 - TotalTicks float64 - ThrottledPeriods uint64 - ThrottledTime uint64 - Percent float64 - Measured []string -} - -// ResourceUsage holds information related to cpu and memory stats -type ResourceUsage struct { - MemoryStats *MemoryStats - CpuStats *CpuStats - DeviceStats []*DeviceGroupStats -} - -// TaskResourceUsage holds aggregated resource usage of all processes in a Task -// and the resource usage of the individual pids -type TaskResourceUsage struct { - ResourceUsage *ResourceUsage - Timestamp int64 - Pids map[string]*ResourceUsage -} - -// AllocResourceUsage holds the aggregated task resource usage of the -// allocation. -type AllocResourceUsage struct { - ResourceUsage *ResourceUsage - Tasks map[string]*TaskResourceUsage - Timestamp int64 -} - -// AllocCheckStatus contains the current status of a nomad service discovery check. -type AllocCheckStatus struct { - ID string - Check string - Group string - Mode string - Output string - Service string - Task string - Status string - StatusCode int - Timestamp int64 -} - -// AllocCheckStatuses holds the set of nomad service discovery checks within -// the allocation (including group and task level service checks). -type AllocCheckStatuses map[string]AllocCheckStatus - -// RestartPolicy defines how the Nomad client restarts -// tasks in a taskgroup when they fail -type RestartPolicy struct { - Interval *time.Duration `hcl:"interval,optional"` - Attempts *int `hcl:"attempts,optional"` - Delay *time.Duration `hcl:"delay,optional"` - Mode *string `hcl:"mode,optional"` -} - -func (r *RestartPolicy) Merge(rp *RestartPolicy) { - if rp.Interval != nil { - r.Interval = rp.Interval - } - if rp.Attempts != nil { - r.Attempts = rp.Attempts - } - if rp.Delay != nil { - r.Delay = rp.Delay - } - if rp.Mode != nil { - r.Mode = rp.Mode - } -} - -// Reschedule configures how Tasks are rescheduled when they crash or fail. -type ReschedulePolicy struct { - // Attempts limits the number of rescheduling attempts that can occur in an interval. - Attempts *int `mapstructure:"attempts" hcl:"attempts,optional"` - - // Interval is a duration in which we can limit the number of reschedule attempts. - Interval *time.Duration `mapstructure:"interval" hcl:"interval,optional"` - - // Delay is a minimum duration to wait between reschedule attempts. - // The delay function determines how much subsequent reschedule attempts are delayed by. - Delay *time.Duration `mapstructure:"delay" hcl:"delay,optional"` - - // DelayFunction determines how the delay progressively changes on subsequent reschedule - // attempts. Valid values are "exponential", "constant", and "fibonacci". - DelayFunction *string `mapstructure:"delay_function" hcl:"delay_function,optional"` - - // MaxDelay is an upper bound on the delay. - MaxDelay *time.Duration `mapstructure:"max_delay" hcl:"max_delay,optional"` - - // Unlimited allows rescheduling attempts until they succeed - Unlimited *bool `mapstructure:"unlimited" hcl:"unlimited,optional"` -} - -func (r *ReschedulePolicy) Merge(rp *ReschedulePolicy) { - if rp == nil { - return - } - if rp.Interval != nil { - r.Interval = rp.Interval - } - if rp.Attempts != nil { - r.Attempts = rp.Attempts - } - if rp.Delay != nil { - r.Delay = rp.Delay - } - if rp.DelayFunction != nil { - r.DelayFunction = rp.DelayFunction - } - if rp.MaxDelay != nil { - r.MaxDelay = rp.MaxDelay - } - if rp.Unlimited != nil { - r.Unlimited = rp.Unlimited - } -} - -func (r *ReschedulePolicy) Canonicalize(jobType string) { - dp := NewDefaultReschedulePolicy(jobType) - if r.Interval == nil { - r.Interval = dp.Interval - } - if r.Attempts == nil { - r.Attempts = dp.Attempts - } - if r.Delay == nil { - r.Delay = dp.Delay - } - if r.DelayFunction == nil { - r.DelayFunction = dp.DelayFunction - } - if r.MaxDelay == nil { - r.MaxDelay = dp.MaxDelay - } - if r.Unlimited == nil { - r.Unlimited = dp.Unlimited - } -} - -// Affinity is used to serialize task group affinities -type Affinity struct { - LTarget string `hcl:"attribute,optional"` // Left-hand target - RTarget string `hcl:"value,optional"` // Right-hand target - Operand string `hcl:"operator,optional"` // Constraint operand (<=, <, =, !=, >, >=), set_contains_all, set_contains_any - Weight *int8 `hcl:"weight,optional"` // Weight applied to nodes that match the affinity. Can be negative -} - -func NewAffinity(lTarget string, operand string, rTarget string, weight int8) *Affinity { - return &Affinity{ - LTarget: lTarget, - RTarget: rTarget, - Operand: operand, - Weight: pointerOf(int8(weight)), - } -} - -func (a *Affinity) Canonicalize() { - if a.Weight == nil { - a.Weight = pointerOf(int8(50)) - } -} - -func NewDefaultReschedulePolicy(jobType string) *ReschedulePolicy { - var dp *ReschedulePolicy - switch jobType { - case "service": - // This needs to be in sync with DefaultServiceJobReschedulePolicy - // in nomad/structs/structs.go - dp = &ReschedulePolicy{ - Delay: pointerOf(30 * time.Second), - DelayFunction: pointerOf("exponential"), - MaxDelay: pointerOf(1 * time.Hour), - Unlimited: pointerOf(true), - - Attempts: pointerOf(0), - Interval: pointerOf(time.Duration(0)), - } - case "batch": - // This needs to be in sync with DefaultBatchJobReschedulePolicy - // in nomad/structs/structs.go - dp = &ReschedulePolicy{ - Attempts: pointerOf(1), - Interval: pointerOf(24 * time.Hour), - Delay: pointerOf(5 * time.Second), - DelayFunction: pointerOf("constant"), - - MaxDelay: pointerOf(time.Duration(0)), - Unlimited: pointerOf(false), - } - - case "system": - dp = &ReschedulePolicy{ - Attempts: pointerOf(0), - Interval: pointerOf(time.Duration(0)), - Delay: pointerOf(time.Duration(0)), - DelayFunction: pointerOf(""), - MaxDelay: pointerOf(time.Duration(0)), - Unlimited: pointerOf(false), - } - - default: - // GH-7203: it is possible an unknown job type is passed to this - // function and we need to ensure a non-nil object is returned so that - // the canonicalization runs without panicking. - dp = &ReschedulePolicy{ - Attempts: pointerOf(0), - Interval: pointerOf(time.Duration(0)), - Delay: pointerOf(time.Duration(0)), - DelayFunction: pointerOf(""), - MaxDelay: pointerOf(time.Duration(0)), - Unlimited: pointerOf(false), - } - } - return dp -} - -func (r *ReschedulePolicy) Copy() *ReschedulePolicy { - if r == nil { - return nil - } - nrp := new(ReschedulePolicy) - *nrp = *r - return nrp -} - -func (p *ReschedulePolicy) String() string { - if p == nil { - return "" - } - if *p.Unlimited { - return fmt.Sprintf("unlimited with %v delay, max_delay = %v", *p.DelayFunction, *p.MaxDelay) - } - return fmt.Sprintf("%v in %v with %v delay, max_delay = %v", *p.Attempts, *p.Interval, *p.DelayFunction, *p.MaxDelay) -} - -// Spread is used to serialize task group allocation spread preferences -type Spread struct { - Attribute string `hcl:"attribute,optional"` - Weight *int8 `hcl:"weight,optional"` - SpreadTarget []*SpreadTarget `hcl:"target,block"` -} - -// SpreadTarget is used to serialize target allocation spread percentages -type SpreadTarget struct { - Value string `hcl:",label"` - Percent uint8 `hcl:"percent,optional"` -} - -func NewSpreadTarget(value string, percent uint8) *SpreadTarget { - return &SpreadTarget{ - Value: value, - Percent: percent, - } -} - -func NewSpread(attribute string, weight int8, spreadTargets []*SpreadTarget) *Spread { - return &Spread{ - Attribute: attribute, - Weight: pointerOf(int8(weight)), - SpreadTarget: spreadTargets, - } -} - -func (s *Spread) Canonicalize() { - if s.Weight == nil { - s.Weight = pointerOf(int8(50)) - } -} - -// EphemeralDisk is an ephemeral disk object -type EphemeralDisk struct { - Sticky *bool `hcl:"sticky,optional"` - Migrate *bool `hcl:"migrate,optional"` - SizeMB *int `mapstructure:"size" hcl:"size,optional"` -} - -func DefaultEphemeralDisk() *EphemeralDisk { - return &EphemeralDisk{ - Sticky: pointerOf(false), - Migrate: pointerOf(false), - SizeMB: pointerOf(300), - } -} - -func (e *EphemeralDisk) Canonicalize() { - if e.Sticky == nil { - e.Sticky = pointerOf(false) - } - if e.Migrate == nil { - e.Migrate = pointerOf(false) - } - if e.SizeMB == nil { - e.SizeMB = pointerOf(300) - } -} - -// MigrateStrategy describes how allocations for a task group should be -// migrated between nodes (eg when draining). -type MigrateStrategy struct { - MaxParallel *int `mapstructure:"max_parallel" hcl:"max_parallel,optional"` - HealthCheck *string `mapstructure:"health_check" hcl:"health_check,optional"` - MinHealthyTime *time.Duration `mapstructure:"min_healthy_time" hcl:"min_healthy_time,optional"` - HealthyDeadline *time.Duration `mapstructure:"healthy_deadline" hcl:"healthy_deadline,optional"` -} - -func DefaultMigrateStrategy() *MigrateStrategy { - return &MigrateStrategy{ - MaxParallel: pointerOf(1), - HealthCheck: pointerOf("checks"), - MinHealthyTime: pointerOf(10 * time.Second), - HealthyDeadline: pointerOf(5 * time.Minute), - } -} - -func (m *MigrateStrategy) Canonicalize() { - if m == nil { - return - } - defaults := DefaultMigrateStrategy() - if m.MaxParallel == nil { - m.MaxParallel = defaults.MaxParallel - } - if m.HealthCheck == nil { - m.HealthCheck = defaults.HealthCheck - } - if m.MinHealthyTime == nil { - m.MinHealthyTime = defaults.MinHealthyTime - } - if m.HealthyDeadline == nil { - m.HealthyDeadline = defaults.HealthyDeadline - } -} - -func (m *MigrateStrategy) Merge(o *MigrateStrategy) { - if o.MaxParallel != nil { - m.MaxParallel = o.MaxParallel - } - if o.HealthCheck != nil { - m.HealthCheck = o.HealthCheck - } - if o.MinHealthyTime != nil { - m.MinHealthyTime = o.MinHealthyTime - } - if o.HealthyDeadline != nil { - m.HealthyDeadline = o.HealthyDeadline - } -} - -func (m *MigrateStrategy) Copy() *MigrateStrategy { - if m == nil { - return nil - } - nm := new(MigrateStrategy) - *nm = *m - return nm -} - -// VolumeRequest is a representation of a storage volume that a TaskGroup wishes to use. -type VolumeRequest struct { - Name string `hcl:"name,label"` - Type string `hcl:"type,optional"` - Source string `hcl:"source,optional"` - ReadOnly bool `hcl:"read_only,optional"` - AccessMode string `hcl:"access_mode,optional"` - AttachmentMode string `hcl:"attachment_mode,optional"` - MountOptions *CSIMountOptions `hcl:"mount_options,block"` - PerAlloc bool `hcl:"per_alloc,optional"` - ExtraKeysHCL []string `hcl1:",unusedKeys,optional" json:"-"` -} - -const ( - VolumeMountPropagationPrivate = "private" - VolumeMountPropagationHostToTask = "host-to-task" - VolumeMountPropagationBidirectional = "bidirectional" -) - -// VolumeMount represents the relationship between a destination path in a task -// and the task group volume that should be mounted there. -type VolumeMount struct { - Volume *string `hcl:"volume,optional"` - Destination *string `hcl:"destination,optional"` - ReadOnly *bool `mapstructure:"read_only" hcl:"read_only,optional"` - PropagationMode *string `mapstructure:"propagation_mode" hcl:"propagation_mode,optional"` -} - -func (vm *VolumeMount) Canonicalize() { - if vm.PropagationMode == nil { - vm.PropagationMode = pointerOf(VolumeMountPropagationPrivate) - } - if vm.ReadOnly == nil { - vm.ReadOnly = pointerOf(false) - } -} - -// TaskGroup is the unit of scheduling. -type TaskGroup struct { - Name *string `hcl:"name,label"` - Count *int `hcl:"count,optional"` - Constraints []*Constraint `hcl:"constraint,block"` - Affinities []*Affinity `hcl:"affinity,block"` - Tasks []*Task `hcl:"task,block"` - Spreads []*Spread `hcl:"spread,block"` - Volumes map[string]*VolumeRequest `hcl:"volume,block"` - RestartPolicy *RestartPolicy `hcl:"restart,block"` - ReschedulePolicy *ReschedulePolicy `hcl:"reschedule,block"` - EphemeralDisk *EphemeralDisk `hcl:"ephemeral_disk,block"` - Update *UpdateStrategy `hcl:"update,block"` - Migrate *MigrateStrategy `hcl:"migrate,block"` - Networks []*NetworkResource `hcl:"network,block"` - Meta map[string]string `hcl:"meta,block"` - Services []*Service `hcl:"service,block"` - ShutdownDelay *time.Duration `mapstructure:"shutdown_delay" hcl:"shutdown_delay,optional"` - StopAfterClientDisconnect *time.Duration `mapstructure:"stop_after_client_disconnect" hcl:"stop_after_client_disconnect,optional"` - MaxClientDisconnect *time.Duration `mapstructure:"max_client_disconnect" hcl:"max_client_disconnect,optional"` - Scaling *ScalingPolicy `hcl:"scaling,block"` - Consul *Consul `hcl:"consul,block"` -} - -// NewTaskGroup creates a new TaskGroup. -func NewTaskGroup(name string, count int) *TaskGroup { - return &TaskGroup{ - Name: pointerOf(name), - Count: pointerOf(count), - } -} - -// Canonicalize sets defaults and merges settings that should be inherited from the job -func (g *TaskGroup) Canonicalize(job *Job) { - if g.Name == nil { - g.Name = pointerOf("") - } - - if g.Count == nil { - if g.Scaling != nil && g.Scaling.Min != nil { - g.Count = pointerOf(int(*g.Scaling.Min)) - } else { - g.Count = pointerOf(1) - } - } - if g.Scaling != nil { - g.Scaling.Canonicalize(*g.Count) - } - if g.EphemeralDisk == nil { - g.EphemeralDisk = DefaultEphemeralDisk() - } else { - g.EphemeralDisk.Canonicalize() - } - - // Merge job.consul onto group.consul - if g.Consul == nil { - g.Consul = new(Consul) - } - g.Consul.MergeNamespace(job.ConsulNamespace) - g.Consul.Canonicalize() - - // Merge the update policy from the job - if ju, tu := job.Update != nil, g.Update != nil; ju && tu { - // Merge the jobs and task groups definition of the update strategy - jc := job.Update.Copy() - jc.Merge(g.Update) - g.Update = jc - } else if ju && !job.Update.Empty() { - // Inherit the jobs as long as it is non-empty. - jc := job.Update.Copy() - g.Update = jc - } - - if g.Update != nil { - g.Update.Canonicalize() - } - - // Merge the reschedule policy from the job - if jr, tr := job.Reschedule != nil, g.ReschedulePolicy != nil; jr && tr { - jobReschedule := job.Reschedule.Copy() - jobReschedule.Merge(g.ReschedulePolicy) - g.ReschedulePolicy = jobReschedule - } else if jr { - jobReschedule := job.Reschedule.Copy() - g.ReschedulePolicy = jobReschedule - } - // Only use default reschedule policy for non system jobs - if g.ReschedulePolicy == nil && *job.Type != "system" { - g.ReschedulePolicy = NewDefaultReschedulePolicy(*job.Type) - } - if g.ReschedulePolicy != nil { - g.ReschedulePolicy.Canonicalize(*job.Type) - } - // Merge the migrate strategy from the job - if jm, tm := job.Migrate != nil, g.Migrate != nil; jm && tm { - jobMigrate := job.Migrate.Copy() - jobMigrate.Merge(g.Migrate) - g.Migrate = jobMigrate - } else if jm { - jobMigrate := job.Migrate.Copy() - g.Migrate = jobMigrate - } - - // Merge with default reschedule policy - if g.Migrate == nil && *job.Type == "service" { - g.Migrate = &MigrateStrategy{} - } - if g.Migrate != nil { - g.Migrate.Canonicalize() - } - - var defaultRestartPolicy *RestartPolicy - switch *job.Type { - case "service", "system": - defaultRestartPolicy = defaultServiceJobRestartPolicy() - default: - defaultRestartPolicy = defaultBatchJobRestartPolicy() - } - - if g.RestartPolicy != nil { - defaultRestartPolicy.Merge(g.RestartPolicy) - } - g.RestartPolicy = defaultRestartPolicy - - for _, t := range g.Tasks { - t.Canonicalize(g, job) - } - - for _, spread := range g.Spreads { - spread.Canonicalize() - } - for _, a := range g.Affinities { - a.Canonicalize() - } - for _, n := range g.Networks { - n.Canonicalize() - } - for _, s := range g.Services { - s.Canonicalize(nil, g, job) - } -} - -// These needs to be in sync with DefaultServiceJobRestartPolicy in -// in nomad/structs/structs.go -func defaultServiceJobRestartPolicy() *RestartPolicy { - return &RestartPolicy{ - Delay: pointerOf(15 * time.Second), - Attempts: pointerOf(2), - Interval: pointerOf(30 * time.Minute), - Mode: pointerOf(RestartPolicyModeFail), - } -} - -// These needs to be in sync with DefaultBatchJobRestartPolicy in -// in nomad/structs/structs.go -func defaultBatchJobRestartPolicy() *RestartPolicy { - return &RestartPolicy{ - Delay: pointerOf(15 * time.Second), - Attempts: pointerOf(3), - Interval: pointerOf(24 * time.Hour), - Mode: pointerOf(RestartPolicyModeFail), - } -} - -// Constrain is used to add a constraint to a task group. -func (g *TaskGroup) Constrain(c *Constraint) *TaskGroup { - g.Constraints = append(g.Constraints, c) - return g -} - -// AddMeta is used to add a meta k/v pair to a task group -func (g *TaskGroup) SetMeta(key, val string) *TaskGroup { - if g.Meta == nil { - g.Meta = make(map[string]string) - } - g.Meta[key] = val - return g -} - -// AddTask is used to add a new task to a task group. -func (g *TaskGroup) AddTask(t *Task) *TaskGroup { - g.Tasks = append(g.Tasks, t) - return g -} - -// AddAffinity is used to add a new affinity to a task group. -func (g *TaskGroup) AddAffinity(a *Affinity) *TaskGroup { - g.Affinities = append(g.Affinities, a) - return g -} - -// RequireDisk adds a ephemeral disk to the task group -func (g *TaskGroup) RequireDisk(disk *EphemeralDisk) *TaskGroup { - g.EphemeralDisk = disk - return g -} - -// AddSpread is used to add a new spread preference to a task group. -func (g *TaskGroup) AddSpread(s *Spread) *TaskGroup { - g.Spreads = append(g.Spreads, s) - return g -} - -// LogConfig provides configuration for log rotation -type LogConfig struct { - MaxFiles *int `mapstructure:"max_files" hcl:"max_files,optional"` - MaxFileSizeMB *int `mapstructure:"max_file_size" hcl:"max_file_size,optional"` -} - -func DefaultLogConfig() *LogConfig { - return &LogConfig{ - MaxFiles: pointerOf(10), - MaxFileSizeMB: pointerOf(10), - } -} - -func (l *LogConfig) Canonicalize() { - if l.MaxFiles == nil { - l.MaxFiles = pointerOf(10) - } - if l.MaxFileSizeMB == nil { - l.MaxFileSizeMB = pointerOf(10) - } -} - -// DispatchPayloadConfig configures how a task gets its input from a job dispatch -type DispatchPayloadConfig struct { - File string `hcl:"file,optional"` -} - -const ( - TaskLifecycleHookPrestart = "prestart" - TaskLifecycleHookPoststart = "poststart" - TaskLifecycleHookPoststop = "poststop" -) - -type TaskLifecycle struct { - Hook string `mapstructure:"hook" hcl:"hook,optional"` - Sidecar bool `mapstructure:"sidecar" hcl:"sidecar,optional"` -} - -// Determine if lifecycle has user-input values -func (l *TaskLifecycle) Empty() bool { - return l == nil || (l.Hook == "") -} - -// Task is a single process in a task group. -type Task struct { - Name string `hcl:"name,label"` - Driver string `hcl:"driver,optional"` - User string `hcl:"user,optional"` - Lifecycle *TaskLifecycle `hcl:"lifecycle,block"` - Config map[string]interface{} `hcl:"config,block"` - Constraints []*Constraint `hcl:"constraint,block"` - Affinities []*Affinity `hcl:"affinity,block"` - Env map[string]string `hcl:"env,block"` - Services []*Service `hcl:"service,block"` - Resources *Resources `hcl:"resources,block"` - RestartPolicy *RestartPolicy `hcl:"restart,block"` - Meta map[string]string `hcl:"meta,block"` - KillTimeout *time.Duration `mapstructure:"kill_timeout" hcl:"kill_timeout,optional"` - LogConfig *LogConfig `mapstructure:"logs" hcl:"logs,block"` - Artifacts []*TaskArtifact `hcl:"artifact,block"` - Vault *Vault `hcl:"vault,block"` - Templates []*Template `hcl:"template,block"` - DispatchPayload *DispatchPayloadConfig `hcl:"dispatch_payload,block"` - VolumeMounts []*VolumeMount `hcl:"volume_mount,block"` - CSIPluginConfig *TaskCSIPluginConfig `mapstructure:"csi_plugin" json:",omitempty" hcl:"csi_plugin,block"` - Leader bool `hcl:"leader,optional"` - ShutdownDelay time.Duration `mapstructure:"shutdown_delay" hcl:"shutdown_delay,optional"` - KillSignal string `mapstructure:"kill_signal" hcl:"kill_signal,optional"` - Kind string `hcl:"kind,optional"` - ScalingPolicies []*ScalingPolicy `hcl:"scaling,block"` -} - -func (t *Task) Canonicalize(tg *TaskGroup, job *Job) { - if t.Resources == nil { - t.Resources = &Resources{} - } - t.Resources.Canonicalize() - - if t.KillTimeout == nil { - t.KillTimeout = pointerOf(5 * time.Second) - } - if t.LogConfig == nil { - t.LogConfig = DefaultLogConfig() - } else { - t.LogConfig.Canonicalize() - } - for _, artifact := range t.Artifacts { - artifact.Canonicalize() - } - if t.Vault != nil { - t.Vault.Canonicalize() - } - for _, tmpl := range t.Templates { - tmpl.Canonicalize() - } - for _, s := range t.Services { - s.Canonicalize(t, tg, job) - } - for _, a := range t.Affinities { - a.Canonicalize() - } - for _, vm := range t.VolumeMounts { - vm.Canonicalize() - } - if t.Lifecycle.Empty() { - t.Lifecycle = nil - } - if t.CSIPluginConfig != nil { - t.CSIPluginConfig.Canonicalize() - } - if t.RestartPolicy == nil { - t.RestartPolicy = tg.RestartPolicy - } else { - tgrp := &RestartPolicy{} - *tgrp = *tg.RestartPolicy - tgrp.Merge(t.RestartPolicy) - t.RestartPolicy = tgrp - } -} - -// TaskArtifact is used to download artifacts before running a task. -type TaskArtifact struct { - GetterSource *string `mapstructure:"source" hcl:"source,optional"` - GetterOptions map[string]string `mapstructure:"options" hcl:"options,block"` - GetterHeaders map[string]string `mapstructure:"headers" hcl:"headers,block"` - GetterMode *string `mapstructure:"mode" hcl:"mode,optional"` - RelativeDest *string `mapstructure:"destination" hcl:"destination,optional"` -} - -func (a *TaskArtifact) Canonicalize() { - if a.GetterMode == nil { - a.GetterMode = pointerOf("any") - } - if a.GetterSource == nil { - // Shouldn't be possible, but we don't want to panic - a.GetterSource = pointerOf("") - } - if len(a.GetterOptions) == 0 { - a.GetterOptions = nil - } - if len(a.GetterHeaders) == 0 { - a.GetterHeaders = nil - } - if a.RelativeDest == nil { - switch *a.GetterMode { - case "file": - // File mode should default to local/filename - dest := *a.GetterSource - dest = path.Base(dest) - dest = filepath.Join("local", dest) - a.RelativeDest = &dest - default: - // Default to a directory - a.RelativeDest = pointerOf("local/") - } - } -} - -// WaitConfig is the Min/Max duration to wait for the Consul cluster to reach a -// consistent state before attempting to render Templates. -type WaitConfig struct { - Min *time.Duration `mapstructure:"min" hcl:"min"` - Max *time.Duration `mapstructure:"max" hcl:"max"` -} - -func (wc *WaitConfig) Copy() *WaitConfig { - if wc == nil { - return nil - } - - nwc := new(WaitConfig) - *nwc = *wc - - return nwc -} - -type ChangeScript struct { - Command *string `mapstructure:"command" hcl:"command"` - Args []string `mapstructure:"args" hcl:"args,optional"` - Timeout *time.Duration `mapstructure:"timeout" hcl:"timeout,optional"` - FailOnError *bool `mapstructure:"fail_on_error" hcl:"fail_on_error"` -} - -func (ch *ChangeScript) Canonicalize() { - if ch.Command == nil { - ch.Command = pointerOf("") - } - if ch.Args == nil { - ch.Args = []string{} - } - if ch.Timeout == nil { - ch.Timeout = pointerOf(5 * time.Second) - } - if ch.FailOnError == nil { - ch.FailOnError = pointerOf(false) - } -} - -type Template struct { - SourcePath *string `mapstructure:"source" hcl:"source,optional"` - DestPath *string `mapstructure:"destination" hcl:"destination,optional"` - EmbeddedTmpl *string `mapstructure:"data" hcl:"data,optional"` - ChangeMode *string `mapstructure:"change_mode" hcl:"change_mode,optional"` - ChangeScript *ChangeScript `mapstructure:"change_script" hcl:"change_script,block"` - ChangeSignal *string `mapstructure:"change_signal" hcl:"change_signal,optional"` - Splay *time.Duration `mapstructure:"splay" hcl:"splay,optional"` - Perms *string `mapstructure:"perms" hcl:"perms,optional"` - Uid *int `mapstructure:"uid" hcl:"uid,optional"` - Gid *int `mapstructure:"gid" hcl:"gid,optional"` - LeftDelim *string `mapstructure:"left_delimiter" hcl:"left_delimiter,optional"` - RightDelim *string `mapstructure:"right_delimiter" hcl:"right_delimiter,optional"` - Envvars *bool `mapstructure:"env" hcl:"env,optional"` - VaultGrace *time.Duration `mapstructure:"vault_grace" hcl:"vault_grace,optional"` - Wait *WaitConfig `mapstructure:"wait" hcl:"wait,block"` -} - -func (tmpl *Template) Canonicalize() { - if tmpl.SourcePath == nil { - tmpl.SourcePath = pointerOf("") - } - if tmpl.DestPath == nil { - tmpl.DestPath = pointerOf("") - } - if tmpl.EmbeddedTmpl == nil { - tmpl.EmbeddedTmpl = pointerOf("") - } - if tmpl.ChangeMode == nil { - tmpl.ChangeMode = pointerOf("restart") - } - if tmpl.ChangeSignal == nil { - if *tmpl.ChangeMode == "signal" { - tmpl.ChangeSignal = pointerOf("SIGHUP") - } else { - tmpl.ChangeSignal = pointerOf("") - } - } else { - sig := *tmpl.ChangeSignal - tmpl.ChangeSignal = pointerOf(strings.ToUpper(sig)) - } - if tmpl.ChangeScript != nil { - tmpl.ChangeScript.Canonicalize() - } - if tmpl.Splay == nil { - tmpl.Splay = pointerOf(5 * time.Second) - } - if tmpl.Perms == nil { - tmpl.Perms = pointerOf("0644") - } - if tmpl.LeftDelim == nil { - tmpl.LeftDelim = pointerOf("{{") - } - if tmpl.RightDelim == nil { - tmpl.RightDelim = pointerOf("}}") - } - if tmpl.Envvars == nil { - tmpl.Envvars = pointerOf(false) - } - - //COMPAT(0.12) VaultGrace is deprecated and unused as of Vault 0.5 - if tmpl.VaultGrace == nil { - tmpl.VaultGrace = pointerOf(time.Duration(0)) - } -} - -type Vault struct { - Policies []string `hcl:"policies,optional"` - Namespace *string `mapstructure:"namespace" hcl:"namespace,optional"` - Env *bool `hcl:"env,optional"` - ChangeMode *string `mapstructure:"change_mode" hcl:"change_mode,optional"` - ChangeSignal *string `mapstructure:"change_signal" hcl:"change_signal,optional"` -} - -func (v *Vault) Canonicalize() { - if v.Env == nil { - v.Env = pointerOf(true) - } - if v.Namespace == nil { - v.Namespace = pointerOf("") - } - if v.ChangeMode == nil { - v.ChangeMode = pointerOf("restart") - } - if v.ChangeSignal == nil { - v.ChangeSignal = pointerOf("SIGHUP") - } -} - -// NewTask creates and initializes a new Task. -func NewTask(name, driver string) *Task { - return &Task{ - Name: name, - Driver: driver, - } -} - -// Configure is used to configure a single k/v pair on -// the task. -func (t *Task) SetConfig(key string, val interface{}) *Task { - if t.Config == nil { - t.Config = make(map[string]interface{}) - } - t.Config[key] = val - return t -} - -// SetMeta is used to add metadata k/v pairs to the task. -func (t *Task) SetMeta(key, val string) *Task { - if t.Meta == nil { - t.Meta = make(map[string]string) - } - t.Meta[key] = val - return t -} - -// Require is used to add resource requirements to a task. -func (t *Task) Require(r *Resources) *Task { - t.Resources = r - return t -} - -// Constraint adds a new constraints to a single task. -func (t *Task) Constrain(c *Constraint) *Task { - t.Constraints = append(t.Constraints, c) - return t -} - -// AddAffinity adds a new affinity to a single task. -func (t *Task) AddAffinity(a *Affinity) *Task { - t.Affinities = append(t.Affinities, a) - return t -} - -// SetLogConfig sets a log config to a task -func (t *Task) SetLogConfig(l *LogConfig) *Task { - t.LogConfig = l - return t -} - -// TaskState tracks the current state of a task and events that caused state -// transitions. -type TaskState struct { - State string - Failed bool - Restarts uint64 - LastRestart time.Time - StartedAt time.Time - FinishedAt time.Time - Events []*TaskEvent - - // Experimental - TaskHandle is based on drivers.TaskHandle and used - // by remote task drivers to migrate task handles between allocations. - TaskHandle *TaskHandle -} - -// Experimental - TaskHandle is based on drivers.TaskHandle and used by remote -// task drivers to migrate task handles between allocations. -type TaskHandle struct { - Version int - DriverState []byte -} - -const ( - TaskSetup = "Task Setup" - TaskSetupFailure = "Setup Failure" - TaskDriverFailure = "Driver Failure" - TaskDriverMessage = "Driver" - TaskReceived = "Received" - TaskFailedValidation = "Failed Validation" - TaskStarted = "Started" - TaskTerminated = "Terminated" - TaskKilling = "Killing" - TaskKilled = "Killed" - TaskRestarting = "Restarting" - TaskNotRestarting = "Not Restarting" - TaskDownloadingArtifacts = "Downloading Artifacts" - TaskArtifactDownloadFailed = "Failed Artifact Download" - TaskSiblingFailed = "Sibling Task Failed" - TaskSignaling = "Signaling" - TaskRestartSignal = "Restart Signaled" - TaskLeaderDead = "Leader Task Dead" - TaskBuildingTaskDir = "Building Task Directory" - TaskClientReconnected = "Reconnected" -) - -// TaskEvent is an event that effects the state of a task and contains meta-data -// appropriate to the events type. -type TaskEvent struct { - Type string - Time int64 - DisplayMessage string - Details map[string]string - Message string - // DEPRECATION NOTICE: The following fields are all deprecated. see TaskEvent struct in structs.go for details. - FailsTask bool - RestartReason string - SetupError string - DriverError string - DriverMessage string - ExitCode int - Signal int - KillReason string - KillTimeout time.Duration - KillError string - StartDelay int64 - DownloadError string - ValidationError string - DiskLimit int64 - DiskSize int64 - FailedSibling string - VaultError string - TaskSignalReason string - TaskSignal string - GenericSource string -} - -// CSIPluginType is an enum string that encapsulates the valid options for a -// CSIPlugin stanza's Type. These modes will allow the plugin to be used in -// different ways by the client. -type CSIPluginType string - -const ( - // CSIPluginTypeNode indicates that Nomad should only use the plugin for - // performing Node RPCs against the provided plugin. - CSIPluginTypeNode CSIPluginType = "node" - - // CSIPluginTypeController indicates that Nomad should only use the plugin for - // performing Controller RPCs against the provided plugin. - CSIPluginTypeController CSIPluginType = "controller" - - // CSIPluginTypeMonolith indicates that Nomad can use the provided plugin for - // both controller and node rpcs. - CSIPluginTypeMonolith CSIPluginType = "monolith" -) - -// TaskCSIPluginConfig contains the data that is required to setup a task as a -// CSI plugin. This will be used by the csi_plugin_supervisor_hook to configure -// mounts for the plugin and initiate the connection to the plugin catalog. -type TaskCSIPluginConfig struct { - // ID is the identifier of the plugin. - // Ideally this should be the FQDN of the plugin. - ID string `mapstructure:"id" hcl:"id,optional"` - - // CSIPluginType instructs Nomad on how to handle processing a plugin - Type CSIPluginType `mapstructure:"type" hcl:"type,optional"` - - // MountDir is the directory (within its container) in which the plugin creates a - // socket (called CSISocketName) for communication with Nomad. Default is /csi. - MountDir string `mapstructure:"mount_dir" hcl:"mount_dir,optional"` - - // StagePublishBaseDir is the base directory (within its container) in which the plugin - // mounts volumes being staged and bind mounts volumes being published. - // e.g. staging_target_path = {StagePublishBaseDir}/staging/{volume-id}/{usage-mode} - // e.g. target_path = {StagePublishBaseDir}/per-alloc/{alloc-id}/{volume-id}/{usage-mode} - // Default is /local/csi. - StagePublishBaseDir string `mapstructure:"stage_publish_base_dir" hcl:"stage_publish_base_dir,optional"` - - // HealthTimeout is the time after which the CSI plugin tasks will be killed - // if the CSI Plugin is not healthy. - HealthTimeout time.Duration `mapstructure:"health_timeout" hcl:"health_timeout,optional"` -} - -func (t *TaskCSIPluginConfig) Canonicalize() { - if t.MountDir == "" { - t.MountDir = "/csi" - } - - if t.StagePublishBaseDir == "" { - t.StagePublishBaseDir = filepath.Join("/local", "csi") - } - - if t.HealthTimeout == 0 { - t.HealthTimeout = 30 * time.Second - } -} diff --git a/vendor/github.com/hashicorp/nomad/api/utils.go b/vendor/github.com/hashicorp/nomad/api/utils.go deleted file mode 100644 index a8e1c02e..00000000 --- a/vendor/github.com/hashicorp/nomad/api/utils.go +++ /dev/null @@ -1,32 +0,0 @@ -package api - -import ( - "strconv" - "strings" -) - -// formatFloat converts the floating-point number f to a string, -// after rounding it to the passed unit. -// -// Uses 'f' format (-ddd.dddddd, no exponent), and uses at most -// maxPrec digits after the decimal point. -func formatFloat(f float64, maxPrec int) string { - v := strconv.FormatFloat(f, 'f', -1, 64) - - idx := strings.LastIndex(v, ".") - if idx == -1 { - return v - } - - sublen := idx + maxPrec + 1 - if sublen > len(v) { - sublen = len(v) - } - - return v[:sublen] -} - -// pointerOf returns a pointer to a. -func pointerOf[A any](a A) *A { - return &a -} diff --git a/vendor/github.com/hashicorp/nomad/api/variables.go b/vendor/github.com/hashicorp/nomad/api/variables.go deleted file mode 100644 index 468776f1..00000000 --- a/vendor/github.com/hashicorp/nomad/api/variables.go +++ /dev/null @@ -1,472 +0,0 @@ -package api - -import ( - "bytes" - "encoding/json" - "errors" - "fmt" - "io" - "net/http" - "strings" - "time" -) - -const ( - ErrVariableNotFound = "variable not found" - ErrVariableMissingItems = "variable missing Items field" -) - -// Variables is used to access variables. -type Variables struct { - client *Client -} - -// Variables returns a new handle on the variables. -func (c *Client) Variables() *Variables { - return &Variables{client: c} -} - -// Create is used to create a variable. -func (sv *Variables) Create(v *Variable, qo *WriteOptions) (*Variable, *WriteMeta, error) { - - v.Path = cleanPathString(v.Path) - var out Variable - wm, err := sv.client.write("/v1/var/"+v.Path, v, &out, qo) - if err != nil { - return nil, wm, err - } - return &out, wm, nil -} - -// CheckedCreate is used to create a variable if it doesn't exist -// already. If it does, it will return a ErrCASConflict that can be unwrapped -// for more details. -func (sv *Variables) CheckedCreate(v *Variable, qo *WriteOptions) (*Variable, *WriteMeta, error) { - - v.Path = cleanPathString(v.Path) - var out Variable - wm, err := sv.writeChecked("/v1/var/"+v.Path+"?cas=0", v, &out, qo) - if err != nil { - return nil, wm, err - } - - return &out, wm, nil -} - -// Read is used to query a single variable by path. This will error -// if the variable is not found. -func (sv *Variables) Read(path string, qo *QueryOptions) (*Variable, *QueryMeta, error) { - - path = cleanPathString(path) - var svar = new(Variable) - qm, err := sv.readInternal("/v1/var/"+path, &svar, qo) - if err != nil { - return nil, nil, err - } - if svar == nil { - return nil, qm, errors.New(ErrVariableNotFound) - } - return svar, qm, nil -} - -// Peek is used to query a single variable by path, but does not error -// when the variable is not found -func (sv *Variables) Peek(path string, qo *QueryOptions) (*Variable, *QueryMeta, error) { - - path = cleanPathString(path) - var svar = new(Variable) - qm, err := sv.readInternal("/v1/var/"+path, &svar, qo) - if err != nil { - return nil, nil, err - } - return svar, qm, nil -} - -// Update is used to update a variable. -func (sv *Variables) Update(v *Variable, qo *WriteOptions) (*Variable, *WriteMeta, error) { - - v.Path = cleanPathString(v.Path) - var out Variable - - wm, err := sv.client.write("/v1/var/"+v.Path, v, &out, qo) - if err != nil { - return nil, wm, err - } - return &out, wm, nil -} - -// CheckedUpdate is used to updated a variable if the modify index -// matches the one on the server. If it does not, it will return an -// ErrCASConflict that can be unwrapped for more details. -func (sv *Variables) CheckedUpdate(v *Variable, qo *WriteOptions) (*Variable, *WriteMeta, error) { - - v.Path = cleanPathString(v.Path) - var out Variable - wm, err := sv.writeChecked("/v1/var/"+v.Path+"?cas="+fmt.Sprint(v.ModifyIndex), v, &out, qo) - if err != nil { - return nil, wm, err - } - - return &out, wm, nil -} - -// Delete is used to delete a variable -func (sv *Variables) Delete(path string, qo *WriteOptions) (*WriteMeta, error) { - - path = cleanPathString(path) - wm, err := sv.deleteInternal(path, qo) - if err != nil { - return nil, err - } - return wm, nil -} - -// CheckedDelete is used to conditionally delete a variable. If the -// existing variable does not match the provided checkIndex, it will return an -// ErrCASConflict that can be unwrapped for more details. -func (sv *Variables) CheckedDelete(path string, checkIndex uint64, qo *WriteOptions) (*WriteMeta, error) { - - path = cleanPathString(path) - wm, err := sv.deleteChecked(path, checkIndex, qo) - if err != nil { - return nil, err - } - - return wm, nil -} - -// List is used to dump all of the variables, can be used to pass prefix -// via QueryOptions rather than as a parameter -func (sv *Variables) List(qo *QueryOptions) ([]*VariableMetadata, *QueryMeta, error) { - - var resp []*VariableMetadata - qm, err := sv.client.query("/v1/vars", &resp, qo) - if err != nil { - return nil, nil, err - } - return resp, qm, nil -} - -// PrefixList is used to do a prefix List search over variables. -func (sv *Variables) PrefixList(prefix string, qo *QueryOptions) ([]*VariableMetadata, *QueryMeta, error) { - - if qo == nil { - qo = &QueryOptions{Prefix: prefix} - } else { - qo.Prefix = prefix - } - - return sv.List(qo) -} - -// GetItems returns the inner Items collection from a variable at a -// given path -func (sv *Variables) GetItems(path string, qo *QueryOptions) (*VariableItems, *QueryMeta, error) { - - path = cleanPathString(path) - svar := new(Variable) - - qm, err := sv.readInternal("/v1/var/"+path, &svar, qo) - if err != nil { - return nil, nil, err - } - - return &svar.Items, qm, nil -} - -// readInternal exists because the API's higher-level read method requires -// the status code to be 200 (OK). For Peek(), we do not consider 403 (Permission -// Denied or 404 (Not Found) an error, this function just returns a nil in those -// cases. -func (sv *Variables) readInternal(endpoint string, out **Variable, q *QueryOptions) (*QueryMeta, error) { - - r, err := sv.client.newRequest("GET", endpoint) - if err != nil { - return nil, err - } - r.setQueryOptions(q) - - checkFn := requireStatusIn(http.StatusOK, http.StatusNotFound, http.StatusForbidden) - rtt, resp, err := checkFn(sv.client.doRequest(r)) - if err != nil { - return nil, err - } - - qm := &QueryMeta{} - parseQueryMeta(resp, qm) - qm.RequestTime = rtt - - if resp.StatusCode == http.StatusNotFound { - *out = nil - resp.Body.Close() - return qm, nil - } - - if resp.StatusCode == http.StatusForbidden { - *out = nil - resp.Body.Close() - // On a 403, there is no QueryMeta to parse, but consul-template--the - // main consumer of the Peek() func that calls this method needs the - // value to be non-zero; so set them to a reasonable but artificial - // value. Index 1 doesn't say anything about the cluster, and there - // has to be a KnownLeader to get a 403. - qm.LastIndex = 1 - qm.KnownLeader = true - return qm, nil - } - - defer resp.Body.Close() - if err := decodeBody(resp, out); err != nil { - return nil, err - } - - return qm, nil -} - -// deleteInternal exists because the API's higher-level delete method requires -// the status code to be 200 (OK). The SV HTTP API returns a 204 (No Content) -// on success. -func (sv *Variables) deleteInternal(path string, q *WriteOptions) (*WriteMeta, error) { - - r, err := sv.client.newRequest("DELETE", fmt.Sprintf("/v1/var/%s", path)) - if err != nil { - return nil, err - } - r.setWriteOptions(q) - - checkFn := requireStatusIn(http.StatusOK, http.StatusNoContent) - rtt, resp, err := checkFn(sv.client.doRequest(r)) - - if err != nil { - return nil, err - } - - wm := &WriteMeta{RequestTime: rtt} - parseWriteMeta(resp, wm) - return wm, nil -} - -// deleteChecked exists because the API's higher-level delete method requires -// the status code to be OK. The SV HTTP API returns a 204 (No Content) on -// success and a 409 (Conflict) on a CAS error. -func (sv *Variables) deleteChecked(path string, checkIndex uint64, q *WriteOptions) (*WriteMeta, error) { - - r, err := sv.client.newRequest("DELETE", fmt.Sprintf("/v1/var/%s?cas=%v", path, checkIndex)) - if err != nil { - return nil, err - } - r.setWriteOptions(q) - checkFn := requireStatusIn(http.StatusOK, http.StatusNoContent, http.StatusConflict) - rtt, resp, err := checkFn(sv.client.doRequest(r)) - if err != nil { - return nil, err - } - - wm := &WriteMeta{RequestTime: rtt} - parseWriteMeta(resp, wm) - - // The only reason we should decode the response body is if - // it is a conflict response. Otherwise, there won't be one. - if resp.StatusCode == http.StatusConflict { - - conflict := new(Variable) - if err := decodeBody(resp, &conflict); err != nil { - return nil, err - } - return nil, ErrCASConflict{ - Conflict: conflict, - CheckIndex: checkIndex, - } - } - return wm, nil -} - -// writeChecked exists because the API's higher-level write method requires -// the status code to be OK. The SV HTTP API returns a 200 (OK) on -// success and a 409 (Conflict) on a CAS error. -func (sv *Variables) writeChecked(endpoint string, in *Variable, out *Variable, q *WriteOptions) (*WriteMeta, error) { - - r, err := sv.client.newRequest("PUT", endpoint) - if err != nil { - return nil, err - } - r.setWriteOptions(q) - r.obj = in - - checkFn := requireStatusIn(http.StatusOK, http.StatusNoContent, http.StatusConflict) - rtt, resp, err := checkFn(sv.client.doRequest(r)) - - if err != nil { - return nil, err - } - defer resp.Body.Close() - - wm := &WriteMeta{RequestTime: rtt} - parseWriteMeta(resp, wm) - - if resp.StatusCode == http.StatusConflict { - - conflict := new(Variable) - if err := decodeBody(resp, &conflict); err != nil { - return nil, err - } - return nil, ErrCASConflict{ - Conflict: conflict, - CheckIndex: in.ModifyIndex, - } - } - if out != nil { - if err := decodeBody(resp, &out); err != nil { - return nil, err - } - } - return wm, nil -} - -// Variable specifies the metadata and contents to be stored in the -// encrypted Nomad backend. -type Variable struct { - // Namespace is the Nomad namespace associated with the variable - Namespace string `hcl:"namespace"` - // Path is the path to the variable - Path string `hcl:"path"` - - // Raft indexes to track creation and modification - CreateIndex uint64 `hcl:"create_index"` - ModifyIndex uint64 `hcl:"modify_index"` - - // Times provided as a convenience for operators expressed time.UnixNanos - CreateTime int64 `hcl:"create_time"` - ModifyTime int64 `hcl:"modify_time"` - - Items VariableItems `hcl:"items"` -} - -// VariableMetadata specifies the metadata for a variable and -// is used as the list object -type VariableMetadata struct { - // Namespace is the Nomad namespace associated with the variable - Namespace string `hcl:"namespace"` - // Path is the path to the variable - Path string `hcl:"path"` - - // Raft indexes to track creation and modification - CreateIndex uint64 `hcl:"create_index"` - ModifyIndex uint64 `hcl:"modify_index"` - - // Times provided as a convenience for operators expressed time.UnixNanos - CreateTime int64 `hcl:"create_time"` - ModifyTime int64 `hcl:"modify_time"` -} - -type VariableItems map[string]string - -// NewVariable is a convenience method to more easily create a -// ready-to-use variable -func NewVariable(path string) *Variable { - - return &Variable{ - Path: path, - Items: make(VariableItems), - } -} - -// Copy returns a new deep copy of this Variable -func (sv1 *Variable) Copy() *Variable { - - var out Variable = *sv1 - out.Items = make(VariableItems) - for k, v := range sv1.Items { - out.Items[k] = v - } - return &out -} - -// Metadata returns the VariableMetadata component of -// a Variable. This can be useful for comparing against -// a List result. -func (sv *Variable) Metadata() *VariableMetadata { - - return &VariableMetadata{ - Namespace: sv.Namespace, - Path: sv.Path, - CreateIndex: sv.CreateIndex, - ModifyIndex: sv.ModifyIndex, - CreateTime: sv.CreateTime, - ModifyTime: sv.ModifyTime, - } -} - -// IsZeroValue can be used to test if a Variable has been changed -// from the default values it gets at creation -func (sv *Variable) IsZeroValue() bool { - return *sv.Metadata() == VariableMetadata{} && sv.Items == nil -} - -// cleanPathString removes leading and trailing slashes since they -// would trigger go's path cleaning/redirection behavior in the -// standard HTTP router -func cleanPathString(path string) string { - return strings.Trim(path, " /") -} - -// AsJSON returns the Variable as a JSON-formatted string -func (sv Variable) AsJSON() string { - var b []byte - b, _ = json.Marshal(sv) - return string(b) -} - -// AsPrettyJSON returns the Variable as a JSON-formatted string with -// indentation -func (sv Variable) AsPrettyJSON() string { - var b []byte - b, _ = json.MarshalIndent(sv, "", " ") - return string(b) -} - -type ErrCASConflict struct { - CheckIndex uint64 - Conflict *Variable -} - -func (e ErrCASConflict) Error() string { - return fmt.Sprintf("cas conflict: expected ModifyIndex %v; found %v", e.CheckIndex, e.Conflict.ModifyIndex) -} - -// doRequestWrapper is a function that wraps the client's doRequest method -// and can be used to provide error and response handling -type doRequestWrapper = func(time.Duration, *http.Response, error) (time.Duration, *http.Response, error) - -// requireStatusIn is a doRequestWrapper generator that takes expected HTTP -// response codes and validates that the received response code is among them -func requireStatusIn(statuses ...int) doRequestWrapper { - fn := func(d time.Duration, resp *http.Response, e error) (time.Duration, *http.Response, error) { - statuses := statuses - if e != nil { - if resp != nil { - resp.Body.Close() - } - return d, nil, e - } - - for _, status := range statuses { - if resp.StatusCode == status { - return d, resp, nil - } - } - - return d, nil, generateUnexpectedResponseCodeError(resp) - } - return fn -} - -// generateUnexpectedResponseCodeError creates a standardized error -// when the the API client's newRequest method receives an unexpected -// HTTP response code when accessing the variable's HTTP API -func generateUnexpectedResponseCodeError(resp *http.Response) error { - var buf bytes.Buffer - io.Copy(&buf, resp.Body) - resp.Body.Close() - return fmt.Errorf("Unexpected response code: %d (%s)", resp.StatusCode, buf.Bytes()) -} diff --git a/vendor/github.com/hashicorp/nomad/client/allocrunner/taskrunner/interfaces/events.go b/vendor/github.com/hashicorp/nomad/client/allocrunner/taskrunner/interfaces/events.go deleted file mode 100644 index dd2fc3d0..00000000 --- a/vendor/github.com/hashicorp/nomad/client/allocrunner/taskrunner/interfaces/events.go +++ /dev/null @@ -1,7 +0,0 @@ -package interfaces - -import "github.com/hashicorp/nomad/nomad/structs" - -type EventEmitter interface { - EmitEvent(event *structs.TaskEvent) -} diff --git a/vendor/github.com/hashicorp/nomad/client/allocrunner/taskrunner/interfaces/handle.go b/vendor/github.com/hashicorp/nomad/client/allocrunner/taskrunner/interfaces/handle.go deleted file mode 100644 index 3fafaba1..00000000 --- a/vendor/github.com/hashicorp/nomad/client/allocrunner/taskrunner/interfaces/handle.go +++ /dev/null @@ -1,11 +0,0 @@ -package interfaces - -import ( - "time" -) - -// ScriptExecutor is an interface that supports Exec()ing commands in the -// driver's context. Split out of DriverHandle to ease testing. -type ScriptExecutor interface { - Exec(timeout time.Duration, cmd string, args []string) ([]byte, int, error) -} diff --git a/vendor/github.com/hashicorp/nomad/client/allocrunner/taskrunner/interfaces/lifecycle.go b/vendor/github.com/hashicorp/nomad/client/allocrunner/taskrunner/interfaces/lifecycle.go deleted file mode 100644 index 325a13e3..00000000 --- a/vendor/github.com/hashicorp/nomad/client/allocrunner/taskrunner/interfaces/lifecycle.go +++ /dev/null @@ -1,27 +0,0 @@ -package interfaces - -import ( - "context" - - "github.com/hashicorp/nomad/nomad/structs" -) - -type TaskLifecycle interface { - // Restart a task in place. If failure=false then the restart does not - // count as an attempt in the restart policy. - Restart(ctx context.Context, event *structs.TaskEvent, failure bool) error - - // Sends a signal to a task. - Signal(event *structs.TaskEvent, signal string) error - - // Kill a task permanently. - Kill(ctx context.Context, event *structs.TaskEvent) error - - // IsRunning returns true if the task runner has a handle to the task - // driver, which is useful for distinguishing restored tasks during - // prestart hooks. But note that the driver handle could go away after you - // check this, so callers should make sure they're handling that case - // safely. Ideally prestart hooks should be idempotent whenever possible - // to handle restored tasks; use this as an escape hatch. - IsRunning() bool -} diff --git a/vendor/github.com/hashicorp/nomad/client/allocrunner/taskrunner/state/state.go b/vendor/github.com/hashicorp/nomad/client/allocrunner/taskrunner/state/state.go deleted file mode 100644 index cda14400..00000000 --- a/vendor/github.com/hashicorp/nomad/client/allocrunner/taskrunner/state/state.go +++ /dev/null @@ -1,111 +0,0 @@ -package state - -import ( - "github.com/hashicorp/nomad/plugins/drivers" - "golang.org/x/exp/maps" -) - -// LocalState is Task state which is persisted for use when restarting Nomad -// agents. -type LocalState struct { - Hooks map[string]*HookState - - // DriverNetwork is the network information returned by the task - // driver's Start method - DriverNetwork *drivers.DriverNetwork - - // TaskHandle is the handle used to reattach to the task during recovery - TaskHandle *drivers.TaskHandle - - // RunComplete is set to true when the TaskRunner.Run() method finishes. - // It is used to distinguish between a dead task that could be restarted - // and one that will never run again. - RunComplete bool -} - -func NewLocalState() *LocalState { - return &LocalState{ - Hooks: make(map[string]*HookState), - } -} - -// Canonicalize ensures LocalState is in a consistent state by initializing -// Hooks and ensuring no HookState's are nil. Useful for cleaning unmarshalled -// state which may be in an unknown state. -func (s *LocalState) Canonicalize() { - if s.Hooks == nil { - // Hooks is nil, create it - s.Hooks = make(map[string]*HookState) - } else { - for k, v := range s.Hooks { - // Remove invalid nil entries from Hooks map - if v == nil { - delete(s.Hooks, k) - } - } - } -} - -// Copy LocalState. Returns nil if nil. -func (s *LocalState) Copy() *LocalState { - if s == nil { - return nil - } - - // Create a copy - c := &LocalState{ - Hooks: make(map[string]*HookState, len(s.Hooks)), - DriverNetwork: s.DriverNetwork.Copy(), - TaskHandle: s.TaskHandle.Copy(), - RunComplete: s.RunComplete, - } - - // Copy the hook state - for h, state := range s.Hooks { - c.Hooks[h] = state.Copy() - } - - return c -} - -type HookState struct { - // Prestart is true if the hook has run Prestart successfully and does - // not need to run again - PrestartDone bool - - // Data allows hooks to persist arbitrary state. - Data map[string]string - - // Environment variables set by the hook that will continue to be set - // even if PrestartDone=true. - Env map[string]string -} - -// Copy HookState. Returns nil if its nil. -func (h *HookState) Copy() *HookState { - if h == nil { - return nil - } - - c := new(HookState) - *c = *h - c.Data = maps.Clone(h.Data) - c.Env = maps.Clone(h.Env) - return c -} - -func (h *HookState) Equal(o *HookState) bool { - if h == nil || o == nil { - return h == o - } - - if h.PrestartDone != o.PrestartDone { - return false - } - - if !maps.Equal(h.Data, o.Data) { - return false - } - - return maps.Equal(h.Env, o.Env) -} diff --git a/vendor/github.com/hashicorp/nomad/client/config/artifact.go b/vendor/github.com/hashicorp/nomad/client/config/artifact.go deleted file mode 100644 index 3b1a6a94..00000000 --- a/vendor/github.com/hashicorp/nomad/client/config/artifact.go +++ /dev/null @@ -1,86 +0,0 @@ -package config - -import ( - "fmt" - "time" - - "github.com/dustin/go-humanize" - "github.com/hashicorp/nomad/nomad/structs/config" -) - -// ArtifactConfig is the internal readonly copy of the client agent's -// ArtifactConfig. -type ArtifactConfig struct { - HTTPReadTimeout time.Duration - HTTPMaxBytes int64 - - GCSTimeout time.Duration - GitTimeout time.Duration - HgTimeout time.Duration - S3Timeout time.Duration - - DecompressionLimitSize int64 - DecompressionLimitFileCount int -} - -// ArtifactConfigFromAgent creates a new internal readonly copy of the client -// agent's ArtifactConfig. The config should have already been validated. -func ArtifactConfigFromAgent(c *config.ArtifactConfig) (*ArtifactConfig, error) { - newConfig := &ArtifactConfig{} - - t, err := time.ParseDuration(*c.HTTPReadTimeout) - if err != nil { - return nil, fmt.Errorf("error parsing HTTPReadTimeout: %w", err) - } - newConfig.HTTPReadTimeout = t - - s, err := humanize.ParseBytes(*c.HTTPMaxSize) - if err != nil { - return nil, fmt.Errorf("error parsing HTTPMaxSize: %w", err) - } - newConfig.HTTPMaxBytes = int64(s) - - t, err = time.ParseDuration(*c.GCSTimeout) - if err != nil { - return nil, fmt.Errorf("error parsing GCSTimeout: %w", err) - } - newConfig.GCSTimeout = t - - t, err = time.ParseDuration(*c.GitTimeout) - if err != nil { - return nil, fmt.Errorf("error parsing GitTimeout: %w", err) - } - newConfig.GitTimeout = t - - t, err = time.ParseDuration(*c.HgTimeout) - if err != nil { - return nil, fmt.Errorf("error parsing HgTimeout: %w", err) - } - newConfig.HgTimeout = t - - t, err = time.ParseDuration(*c.S3Timeout) - if err != nil { - return nil, fmt.Errorf("error parsing S3Timeout: %w", err) - } - newConfig.S3Timeout = t - - s, err = humanize.ParseBytes(*c.DecompressionSizeLimit) - if err != nil { - return nil, fmt.Errorf("error parsing DecompressionLimitSize: %w", err) - } - newConfig.DecompressionLimitSize = int64(s) - - // no parsing its just an int - newConfig.DecompressionLimitFileCount = *c.DecompressionFileCountLimit - - return newConfig, nil -} - -func (a *ArtifactConfig) Copy() *ArtifactConfig { - if a == nil { - return nil - } - - newCopy := *a - return &newCopy -} diff --git a/vendor/github.com/hashicorp/nomad/client/config/config.go b/vendor/github.com/hashicorp/nomad/client/config/config.go deleted file mode 100644 index 8ff6e986..00000000 --- a/vendor/github.com/hashicorp/nomad/client/config/config.go +++ /dev/null @@ -1,906 +0,0 @@ -package config - -import ( - "errors" - "fmt" - "io" - "os" - "reflect" - "strconv" - "strings" - "time" - - "github.com/hashicorp/consul-template/config" - "github.com/hashicorp/nomad/client/lib/cgutil" - "github.com/hashicorp/nomad/command/agent/host" - "golang.org/x/exp/maps" - "golang.org/x/exp/slices" - - log "github.com/hashicorp/go-hclog" - "github.com/hashicorp/nomad/client/state" - "github.com/hashicorp/nomad/helper/bufconndialer" - "github.com/hashicorp/nomad/helper/pluginutils/loader" - "github.com/hashicorp/nomad/helper/pointer" - "github.com/hashicorp/nomad/nomad/structs" - structsc "github.com/hashicorp/nomad/nomad/structs/config" - "github.com/hashicorp/nomad/plugins/base" - "github.com/hashicorp/nomad/version" -) - -var ( - // DefaultEnvDenylist is the default set of environment variables that are - // filtered when passing the environment variables of the host to a task. - DefaultEnvDenylist = strings.Join(host.DefaultEnvDenyList, ",") - - // DefaultUserDenylist is the default set of users that tasks are not - // allowed to run as when using a driver in "user.checked_drivers" - DefaultUserDenylist = strings.Join([]string{ - "root", - "Administrator", - }, ",") - - // DefaultUserCheckedDrivers is the set of drivers we apply the user - // denylist onto. For virtualized drivers it often doesn't make sense to - // make this stipulation so by default they are ignored. - DefaultUserCheckedDrivers = strings.Join([]string{ - "exec", - "qemu", - "java", - }, ",") - - // A mapping of directories on the host OS to attempt to embed inside each - // task's chroot. - DefaultChrootEnv = map[string]string{ - "/bin": "/bin", - "/etc": "/etc", - "/lib": "/lib", - "/lib32": "/lib32", - "/lib64": "/lib64", - "/run/resolvconf": "/run/resolvconf", - "/sbin": "/sbin", - "/usr": "/usr", - - // embed systemd-resolved paths for systemd-resolved paths: - // /etc/resolv.conf is a symlink to /run/systemd/resolve/stub-resolv.conf in such systems. - // In non-systemd systems, this mount is a no-op and the path is ignored if not present. - "/run/systemd/resolve": "/run/systemd/resolve", - } - - DefaultTemplateMaxStale = 87600 * time.Hour - - DefaultTemplateFunctionDenylist = []string{"plugin", "writeToFile"} -) - -// RPCHandler can be provided to the Client if there is a local server -// to avoid going over the network. If not provided, the Client will -// maintain a connection pool to the servers -type RPCHandler interface { - RPC(method string, args interface{}, reply interface{}) error -} - -// Config is used to parameterize and configure the behavior of the client -type Config struct { - // DevMode controls if we are in a development mode which - // avoids persistent storage. - DevMode bool - - // EnableDebug is used to enable debugging RPC endpoints - // in the absence of ACLs - EnableDebug bool - - // StateDir is where we store our state - StateDir string - - // AllocDir is where we store data for allocations - AllocDir string - - // LogOutput is the destination for logs - LogOutput io.Writer - - // Logger provides a logger to the client - Logger log.InterceptLogger - - // Region is the clients region - Region string - - // Network interface to be used in network fingerprinting - NetworkInterface string - - // Network speed is the default speed of network interfaces if they can not - // be determined dynamically. - NetworkSpeed int - - // CpuCompute is the default total CPU compute if they can not be determined - // dynamically. It should be given as Cores * MHz (2 Cores * 2 Ghz = 4000) - CpuCompute int - - // MemoryMB is the default node total memory in megabytes if it cannot be - // determined dynamically. - MemoryMB int - - // MaxKillTimeout allows capping the user-specifiable KillTimeout. If the - // task's KillTimeout is greater than the MaxKillTimeout, MaxKillTimeout is - // used. - MaxKillTimeout time.Duration - - // Servers is a list of known server addresses. These are as "host:port" - Servers []string - - // RPCHandler can be provided to avoid network traffic if the - // server is running locally. - RPCHandler RPCHandler - - // Node provides the base node - Node *structs.Node - - // ClientMaxPort is the upper range of the ports that the client uses for - // communicating with plugin subsystems over loopback - ClientMaxPort uint - - // ClientMinPort is the lower range of the ports that the client uses for - // communicating with plugin subsystems over loopback - ClientMinPort uint - - // MaxDynamicPort is the largest dynamic port generated - MaxDynamicPort int - - // MinDynamicPort is the smallest dynamic port generated - MinDynamicPort int - - // A mapping of directories on the host OS to attempt to embed inside each - // task's chroot. - ChrootEnv map[string]string - - // Options provides arbitrary key-value configuration for nomad internals, - // like fingerprinters and drivers. The format is: - // - // namespace.option = value - Options map[string]string - - // Version is the version of the Nomad client - Version *version.VersionInfo - - // ConsulConfig is this Agent's Consul configuration - ConsulConfig *structsc.ConsulConfig - - // VaultConfig is this Agent's Vault configuration - VaultConfig *structsc.VaultConfig - - // StatsCollectionInterval is the interval at which the Nomad client - // collects resource usage stats - StatsCollectionInterval time.Duration - - // PublishNodeMetrics determines whether nomad is going to publish node - // level metrics to remote Telemetry sinks - PublishNodeMetrics bool - - // PublishAllocationMetrics determines whether nomad is going to publish - // allocation metrics to remote Telemetry sinks - PublishAllocationMetrics bool - - // TLSConfig holds various TLS related configurations - TLSConfig *structsc.TLSConfig - - // GCInterval is the time interval at which the client triggers garbage - // collection - GCInterval time.Duration - - // GCParallelDestroys is the number of parallel destroys the garbage - // collector will allow. - GCParallelDestroys int - - // GCDiskUsageThreshold is the disk usage threshold given as a percent - // beyond which the Nomad client triggers GC of terminal allocations - GCDiskUsageThreshold float64 - - // GCInodeUsageThreshold is the inode usage threshold given as a percent - // beyond which the Nomad client triggers GC of the terminal allocations - GCInodeUsageThreshold float64 - - // GCMaxAllocs is the maximum number of allocations a node can have - // before garbage collection is triggered. - GCMaxAllocs int - - // LogLevel is the level of the logs to putout - LogLevel string - - // NoHostUUID disables using the host's UUID and will force generation of a - // random UUID. - NoHostUUID bool - - // ACLEnabled controls if ACL enforcement and management is enabled. - ACLEnabled bool - - // ACLTokenTTL is how long we cache token values for - ACLTokenTTL time.Duration - - // ACLPolicyTTL is how long we cache policy values for - ACLPolicyTTL time.Duration - - // ACLRoleTTL is how long we cache ACL role value for within each Nomad - // client. - ACLRoleTTL time.Duration - - // DisableRemoteExec disables remote exec targeting tasks on this client - DisableRemoteExec bool - - // TemplateConfig includes configuration for template rendering - TemplateConfig *ClientTemplateConfig - - // RPCHoldTimeout is how long an RPC can be "held" before it is errored. - // This is used to paper over a loss of leadership by instead holding RPCs, - // so that the caller experiences a slow response rather than an error. - // This period is meant to be long enough for a leader election to take - // place, and a small jitter is applied to avoid a thundering herd. - RPCHoldTimeout time.Duration - - // PluginLoader is used to load plugins. - PluginLoader loader.PluginCatalog - - // PluginSingletonLoader is a plugin loader that will returns singleton - // instances of the plugins. - PluginSingletonLoader loader.PluginCatalog - - // StateDBFactory is used to override stateDB implementations, - StateDBFactory state.NewStateDBFunc - - // CNIPath is the path used to search for CNI plugins. Multiple paths can - // be specified with colon delimited - CNIPath string - - // CNIConfigDir is the directory where CNI network configuration is located. The - // client will use this path when fingerprinting CNI networks. - CNIConfigDir string - - // CNIInterfacePrefix is the prefix to use when creating CNI network interfaces. This - // defaults to 'eth', therefore the first interface created by CNI inside the alloc - // network will be 'eth0'. - CNIInterfacePrefix string - - // BridgeNetworkName is the name to use for the bridge created in bridge - // networking mode. This defaults to 'nomad' if not set - BridgeNetworkName string - - // BridgeNetworkAllocSubnet is the IP subnet to use for address allocation - // for allocations in bridge networking mode. Subnet must be in CIDR - // notation - BridgeNetworkAllocSubnet string - - // HostVolumes is a map of the configured host volumes by name. - HostVolumes map[string]*structs.ClientHostVolumeConfig - - // HostNetworks is a map of the conigured host networks by name. - HostNetworks map[string]*structs.ClientHostNetworkConfig - - // BindWildcardDefaultHostNetwork toggles if the default host network should accept all - // destinations (true) or only filter on the IP of the default host network (false) when - // port mapping. This allows Nomad clients with no defined host networks to accept and - // port forward traffic only matching on the destination port. An example use of this - // is when a network loadbalancer is utilizing direct server return and the destination - // address of incomming packets does not match the IP address of the host interface. - // - // This configuration is only considered if no host networks are defined. - BindWildcardDefaultHostNetwork bool - - // CgroupParent is the parent cgroup Nomad should use when managing any cgroup subsystems. - // Currently this only includes the 'cpuset' cgroup subsystem. - CgroupParent string - - // ReservableCores if set overrides the set of reservable cores reported in fingerprinting. - ReservableCores []uint16 - - // NomadServiceDiscovery determines whether the Nomad native service - // discovery client functionality is enabled. - NomadServiceDiscovery bool - - // TemplateDialer is our custom HTTP dialer for consul-template. This is - // used for template functions which require access to the Nomad API. - TemplateDialer *bufconndialer.BufConnWrapper - - // Artifact configuration from the agent's config file. - Artifact *ArtifactConfig -} - -// ClientTemplateConfig is configuration on the client specific to template -// rendering -type ClientTemplateConfig struct { - // FunctionDenylist disables functions in consul-template that - // are unsafe because they expose information from the client host. - FunctionDenylist []string `hcl:"function_denylist"` - - // Deprecated: COMPAT(1.0) consul-template uses inclusive language from - // v0.25.0 - function_blacklist is kept for compatibility - FunctionBlacklist []string `hcl:"function_blacklist"` - - // DisableSandbox allows templates to access arbitrary files on the - // client host. By default templates can access files only within - // the task directory. - DisableSandbox bool `hcl:"disable_file_sandbox"` - - // This is the maximum interval to allow "stale" data. By default, only the - // Consul leader will respond to queries; any requests to a follower will - // forward to the leader. In large clusters with many requests, this is not as - // scalable, so this option allows any follower to respond to a query, so long - // as the last-replicated data is within these bounds. Higher values result in - // less cluster load, but are more likely to have outdated data. - // NOTE: Since Consul Template uses a pointer, this field uses a pointer which - // is inconsistent with how Nomad typically works. This decision was made to - // maintain parity with the external subsystem, not to establish a new standard. - MaxStale *time.Duration `hcl:"-"` - MaxStaleHCL string `hcl:"max_stale,optional"` - - // BlockQueryWaitTime is amount of time in seconds to do a blocking query for. - // Many endpoints in Consul support a feature known as "blocking queries". - // A blocking query is used to wait for a potential change using long polling. - // NOTE: Since Consul Template uses a pointer, this field uses a pointer which - // is inconsistent with how Nomad typically works. This decision was made to - // maintain parity with the external subsystem, not to establish a new standard. - BlockQueryWaitTime *time.Duration `hcl:"-"` - BlockQueryWaitTimeHCL string `hcl:"block_query_wait,optional"` - - // Wait is the quiescence timers; it defines the minimum and maximum amount of - // time to wait for the Consul cluster to reach a consistent state before rendering a - // template. This is useful to enable in systems where Consul is experiencing - // a lot of flapping because it will reduce the number of times a template is rendered. - Wait *WaitConfig `hcl:"wait,optional" json:"-"` - - // WaitBounds allows operators to define boundaries on individual template wait - // configuration overrides. If set, this ensures that if a job author specifies - // a wait configuration with values the cluster operator does not allow, the - // cluster operator's boundary will be applied rather than the job author's - // out of bounds configuration. - WaitBounds *WaitConfig `hcl:"wait_bounds,optional" json:"-"` - - // This controls the retry behavior when an error is returned from Consul. - // Consul Template is highly fault tolerant, meaning it does not exit in the - // face of failure. Instead, it uses exponential back-off and retry functions - // to wait for the cluster to become available, as is customary in distributed - // systems. - ConsulRetry *RetryConfig `hcl:"consul_retry,optional"` - - // This controls the retry behavior when an error is returned from Vault. - // Consul Template is highly fault tolerant, meaning it does not exit in the - // face of failure. Instead, it uses exponential back-off and retry functions - // to wait for the cluster to become available, as is customary in distributed - // systems. - VaultRetry *RetryConfig `hcl:"vault_retry,optional"` - - // This controls the retry behavior when an error is returned from Nomad. - // Consul Template is highly fault tolerant, meaning it does not exit in the - // face of failure. Instead, it uses exponential back-off and retry functions - // to wait for the cluster to become available, as is customary in distributed - // systems. - NomadRetry *RetryConfig `hcl:"nomad_retry,optional"` -} - -// Copy returns a deep copy of a ClientTemplateConfig -func (c *ClientTemplateConfig) Copy() *ClientTemplateConfig { - if c == nil { - return nil - } - - nc := new(ClientTemplateConfig) - *nc = *c - - if len(c.FunctionDenylist) > 0 { - nc.FunctionDenylist = slices.Clone(nc.FunctionDenylist) - } else if c.FunctionDenylist != nil { - // Explicitly no functions denied (which is different than nil) - nc.FunctionDenylist = []string{} - } - - if c.BlockQueryWaitTime != nil { - nc.BlockQueryWaitTime = &*c.BlockQueryWaitTime - } - - if c.MaxStale != nil { - nc.MaxStale = &*c.MaxStale - } - - if c.Wait != nil { - nc.Wait = c.Wait.Copy() - } - - if c.ConsulRetry != nil { - nc.ConsulRetry = c.ConsulRetry.Copy() - } - - if c.VaultRetry != nil { - nc.VaultRetry = c.VaultRetry.Copy() - } - - if c.NomadRetry != nil { - nc.NomadRetry = c.NomadRetry.Copy() - } - - return nc -} - -func (c *ClientTemplateConfig) IsEmpty() bool { - if c == nil { - return true - } - - return !c.DisableSandbox && - c.FunctionDenylist == nil && - c.FunctionBlacklist == nil && - c.BlockQueryWaitTime == nil && - c.BlockQueryWaitTimeHCL == "" && - c.MaxStale == nil && - c.MaxStaleHCL == "" && - c.Wait.IsEmpty() && - c.ConsulRetry.IsEmpty() && - c.VaultRetry.IsEmpty() && - c.NomadRetry.IsEmpty() -} - -// WaitConfig is mirrored from templateconfig.WaitConfig because we need to handle -// the HCL conversion which happens in agent.ParseConfigFile -// NOTE: Since Consul Template requires pointers, this type uses pointers to fields -// which is inconsistent with how Nomad typically works. This decision was made -// to maintain parity with the external subsystem, not to establish a new standard. -type WaitConfig struct { - Min *time.Duration `hcl:"-"` - MinHCL string `hcl:"min,optional" json:"-"` - Max *time.Duration `hcl:"-"` - MaxHCL string `hcl:"max,optional" json:"-"` -} - -// Copy returns a deep copy of the receiver. -func (wc *WaitConfig) Copy() *WaitConfig { - if wc == nil { - return nil - } - - nwc := new(WaitConfig) - - if wc.Min != nil { - nwc.Min = &*wc.Min - } - - if wc.Max != nil { - nwc.Max = &*wc.Max - } - - return wc -} - -// Equals returns the result of reflect.DeepEqual -func (wc *WaitConfig) Equals(other *WaitConfig) bool { - return reflect.DeepEqual(wc, other) -} - -// IsEmpty returns true if the receiver only contains an instance with no fields set. -func (wc *WaitConfig) IsEmpty() bool { - if wc == nil { - return true - } - return wc.Equals(&WaitConfig{}) -} - -// Validate returns an error if the receiver is nil or empty or if Min is greater -// than Max the user specified Max. -func (wc *WaitConfig) Validate() error { - // If the config is nil or empty return false so that it is never assigned. - if wc == nil || wc.IsEmpty() { - return errors.New("wait config is nil or empty") - } - - // If min is nil, return - if wc.Min == nil { - return nil - } - - // If min isn't nil, make sure Max is less than Min. - if wc.Max != nil { - if *wc.Min > *wc.Max { - return fmt.Errorf("wait config min %d is greater than max %d", *wc.Min, *wc.Max) - } - } - - // Otherwise, return nil. Consul Template will set a Max based off of Min. - return nil -} - -// Merge merges two WaitConfigs. The passed instance always takes precedence. -func (wc *WaitConfig) Merge(b *WaitConfig) *WaitConfig { - if wc == nil { - return b - } - - result := *wc - if b == nil { - return &result - } - - if b.Min != nil { - result.Min = &*b.Min - } - - if b.MinHCL != "" { - result.MinHCL = b.MinHCL - } - - if b.Max != nil { - result.Max = &*b.Max - } - - if b.MaxHCL != "" { - result.MaxHCL = b.MaxHCL - } - - return &result -} - -// ToConsulTemplate converts a client WaitConfig instance to a consul-template WaitConfig -func (wc *WaitConfig) ToConsulTemplate() (*config.WaitConfig, error) { - if wc.IsEmpty() { - return nil, errors.New("wait config is empty") - } - - if err := wc.Validate(); err != nil { - return nil, err - } - - result := &config.WaitConfig{Enabled: pointer.Of(true)} - - if wc.Min != nil { - result.Min = wc.Min - } - - if wc.Max != nil { - result.Max = wc.Max - } - - return result, nil -} - -// RetryConfig is mirrored from templateconfig.WaitConfig because we need to handle -// the HCL indirection to support mapping in agent.ParseConfigFile. -// NOTE: Since Consul Template requires pointers, this type uses pointers to fields -// which is inconsistent with how Nomad typically works. However, since zero in -// Attempts and MaxBackoff have special meaning, it is necessary to know if the -// value was actually set rather than if it defaulted to 0. The rest of the fields -// use pointers to maintain parity with the external subystem, not to establish -// a new standard. -type RetryConfig struct { - // Attempts is the total number of maximum attempts to retry before letting - // the error fall through. - // 0 means unlimited. - Attempts *int `hcl:"attempts,optional"` - // Backoff is the base of the exponential backoff. This number will be - // multiplied by the next power of 2 on each iteration. - Backoff *time.Duration `hcl:"-"` - BackoffHCL string `hcl:"backoff,optional" json:"-"` - // MaxBackoff is an upper limit to the sleep time between retries - // A MaxBackoff of 0 means there is no limit to the exponential growth of the backoff. - MaxBackoff *time.Duration `hcl:"-"` - MaxBackoffHCL string `hcl:"max_backoff,optional" json:"-"` -} - -func (rc *RetryConfig) Copy() *RetryConfig { - if rc == nil { - return nil - } - - nrc := new(RetryConfig) - *nrc = *rc - - // Now copy pointer values - if rc.Attempts != nil { - nrc.Attempts = &*rc.Attempts - } - if rc.Backoff != nil { - nrc.Backoff = &*rc.Backoff - } - if rc.MaxBackoff != nil { - nrc.MaxBackoff = &*rc.MaxBackoff - } - - return nrc -} - -// Equals returns the result of reflect.DeepEqual -func (rc *RetryConfig) Equals(other *RetryConfig) bool { - return reflect.DeepEqual(rc, other) -} - -// IsEmpty returns true if the receiver only contains an instance with no fields set. -func (rc *RetryConfig) IsEmpty() bool { - if rc == nil { - return true - } - - return rc.Equals(&RetryConfig{}) -} - -// Validate returns an error if the receiver is nil or empty, or if Backoff -// is greater than MaxBackoff. -func (rc *RetryConfig) Validate() error { - // If the config is nil or empty return false so that it is never assigned. - if rc == nil || rc.IsEmpty() { - return errors.New("retry config is nil or empty") - } - - // If Backoff not set, no need to validate - if rc.Backoff == nil { - return nil - } - - // MaxBackoff nil will end up defaulted to 1 minutes. We should validate that - // the user supplied backoff does not exceed that. - if rc.MaxBackoff == nil && *rc.Backoff > config.DefaultRetryMaxBackoff { - return fmt.Errorf("retry config backoff %d is greater than default max_backoff %d", *rc.Backoff, config.DefaultRetryMaxBackoff) - } - - // MaxBackoff == 0 means backoff is unbounded. No need to validate. - if rc.MaxBackoff != nil && *rc.MaxBackoff == 0 { - return nil - } - - if rc.MaxBackoff != nil && *rc.Backoff > *rc.MaxBackoff { - return fmt.Errorf("retry config backoff %d is greater than max_backoff %d", *rc.Backoff, *rc.MaxBackoff) - } - - return nil -} - -// Merge merges two RetryConfigs. The passed instance always takes precedence. -func (rc *RetryConfig) Merge(b *RetryConfig) *RetryConfig { - if rc == nil { - return b - } - - result := *rc - if b == nil { - return &result - } - - if b.Attempts != nil { - result.Attempts = &*b.Attempts - } - - if b.Backoff != nil { - result.Backoff = &*b.Backoff - } - - if b.BackoffHCL != "" { - result.BackoffHCL = b.BackoffHCL - } - - if b.MaxBackoff != nil { - result.MaxBackoff = &*b.MaxBackoff - } - - if b.MaxBackoffHCL != "" { - result.MaxBackoffHCL = b.MaxBackoffHCL - } - - return &result -} - -// ToConsulTemplate converts a client RetryConfig instance to a consul-template RetryConfig -func (rc *RetryConfig) ToConsulTemplate() (*config.RetryConfig, error) { - if err := rc.Validate(); err != nil { - return nil, err - } - - result := &config.RetryConfig{Enabled: pointer.Of(true)} - - if rc.Attempts != nil { - result.Attempts = rc.Attempts - } - - if rc.Backoff != nil { - result.Backoff = rc.Backoff - } - - if rc.MaxBackoff != nil { - result.MaxBackoff = &*rc.MaxBackoff - } - - return result, nil -} - -func (c *Config) Copy() *Config { - if c == nil { - return nil - } - - nc := *c - nc.Node = nc.Node.Copy() - nc.Servers = slices.Clone(nc.Servers) - nc.Options = maps.Clone(nc.Options) - nc.HostVolumes = structs.CopyMapStringClientHostVolumeConfig(nc.HostVolumes) - nc.ConsulConfig = c.ConsulConfig.Copy() - nc.VaultConfig = c.VaultConfig.Copy() - nc.TemplateConfig = c.TemplateConfig.Copy() - nc.ReservableCores = slices.Clone(c.ReservableCores) - nc.Artifact = c.Artifact.Copy() - return &nc -} - -// DefaultConfig returns the default configuration -func DefaultConfig() *Config { - return &Config{ - Version: version.GetVersion(), - VaultConfig: structsc.DefaultVaultConfig(), - ConsulConfig: structsc.DefaultConsulConfig(), - LogOutput: os.Stderr, - Region: "global", - StatsCollectionInterval: 1 * time.Second, - TLSConfig: &structsc.TLSConfig{}, - LogLevel: "DEBUG", - GCInterval: 1 * time.Minute, - GCParallelDestroys: 2, - GCDiskUsageThreshold: 80, - GCInodeUsageThreshold: 70, - GCMaxAllocs: 50, - NoHostUUID: true, - DisableRemoteExec: false, - TemplateConfig: &ClientTemplateConfig{ - FunctionDenylist: DefaultTemplateFunctionDenylist, - DisableSandbox: false, - BlockQueryWaitTime: pointer.Of(5 * time.Minute), // match Consul default - MaxStale: pointer.Of(DefaultTemplateMaxStale), // match Consul default - Wait: &WaitConfig{ - Min: pointer.Of(5 * time.Second), - Max: pointer.Of(4 * time.Minute), - }, - ConsulRetry: &RetryConfig{ - Attempts: pointer.Of(0), // unlimited - }, - VaultRetry: &RetryConfig{ - Attempts: pointer.Of(0), // unlimited - }, - NomadRetry: &RetryConfig{ - Attempts: pointer.Of(0), // unlimited - }, - }, - RPCHoldTimeout: 5 * time.Second, - CNIPath: "/opt/cni/bin", - CNIConfigDir: "/opt/cni/config", - CNIInterfacePrefix: "eth", - HostNetworks: map[string]*structs.ClientHostNetworkConfig{}, - CgroupParent: cgutil.GetCgroupParent(""), - MaxDynamicPort: structs.DefaultMinDynamicPort, - MinDynamicPort: structs.DefaultMaxDynamicPort, - } -} - -// Read returns the specified configuration value or "". -func (c *Config) Read(id string) string { - return c.Options[id] -} - -// ReadDefault returns the specified configuration value, or the specified -// default value if none is set. -func (c *Config) ReadDefault(id string, defaultValue string) string { - return c.ReadAlternativeDefault([]string{id}, defaultValue) -} - -// ReadAlternativeDefault returns the specified configuration value, or the -// specified value if none is set. -func (c *Config) ReadAlternativeDefault(ids []string, defaultValue string) string { - for _, id := range ids { - val, ok := c.Options[id] - if ok { - return val - } - } - - return defaultValue -} - -// ReadBool parses the specified option as a boolean. -func (c *Config) ReadBool(id string) (bool, error) { - val, ok := c.Options[id] - if !ok { - return false, fmt.Errorf("Specified config is missing from options") - } - bval, err := strconv.ParseBool(val) - if err != nil { - return false, fmt.Errorf("Failed to parse %s as bool: %s", val, err) - } - return bval, nil -} - -// ReadBoolDefault tries to parse the specified option as a boolean. If there is -// an error in parsing, the default option is returned. -func (c *Config) ReadBoolDefault(id string, defaultValue bool) bool { - val, err := c.ReadBool(id) - if err != nil { - return defaultValue - } - return val -} - -// ReadInt parses the specified option as a int. -func (c *Config) ReadInt(id string) (int, error) { - val, ok := c.Options[id] - if !ok { - return 0, fmt.Errorf("Specified config is missing from options") - } - ival, err := strconv.Atoi(val) - if err != nil { - return 0, fmt.Errorf("Failed to parse %s as int: %s", val, err) - } - return ival, nil -} - -// ReadIntDefault tries to parse the specified option as a int. If there is -// an error in parsing, the default option is returned. -func (c *Config) ReadIntDefault(id string, defaultValue int) int { - val, err := c.ReadInt(id) - if err != nil { - return defaultValue - } - return val -} - -// ReadDuration parses the specified option as a duration. -func (c *Config) ReadDuration(id string) (time.Duration, error) { - val, ok := c.Options[id] - if !ok { - return time.Duration(0), fmt.Errorf("Specified config is missing from options") - } - dval, err := time.ParseDuration(val) - if err != nil { - return time.Duration(0), fmt.Errorf("Failed to parse %s as time duration: %s", val, err) - } - return dval, nil -} - -// ReadDurationDefault tries to parse the specified option as a duration. If there is -// an error in parsing, the default option is returned. -func (c *Config) ReadDurationDefault(id string, defaultValue time.Duration) time.Duration { - val, err := c.ReadDuration(id) - if err != nil { - return defaultValue - } - return val -} - -// ReadStringListToMap tries to parse the specified option(s) as a comma separated list. -// If there is an error in parsing, an empty list is returned. -func (c *Config) ReadStringListToMap(keys ...string) map[string]struct{} { - val := c.ReadAlternativeDefault(keys, "") - - return splitValue(val) -} - -// ReadStringListToMapDefault tries to parse the specified option as a comma -// separated list. If there is an error in parsing, an empty list is returned. -func (c *Config) ReadStringListToMapDefault(key, defaultValue string) map[string]struct{} { - return c.ReadStringListAlternativeToMapDefault([]string{key}, defaultValue) -} - -// ReadStringListAlternativeToMapDefault tries to parse the specified options as a comma sparated list. -// If there is an error in parsing, an empty list is returned. -func (c *Config) ReadStringListAlternativeToMapDefault(keys []string, defaultValue string) map[string]struct{} { - val := c.ReadAlternativeDefault(keys, defaultValue) - - return splitValue(val) -} - -// splitValue parses the value as a comma separated list. -func splitValue(val string) map[string]struct{} { - list := make(map[string]struct{}) - if val != "" { - for _, e := range strings.Split(val, ",") { - trimmed := strings.TrimSpace(e) - list[trimmed] = struct{}{} - } - } - return list -} - -// NomadPluginConfig produces the NomadConfig struct which is sent to Nomad plugins -func (c *Config) NomadPluginConfig() *base.AgentConfig { - return &base.AgentConfig{ - Driver: &base.ClientDriverConfig{ - ClientMinPort: c.ClientMinPort, - ClientMaxPort: c.ClientMaxPort, - }, - } -} diff --git a/vendor/github.com/hashicorp/nomad/client/config/testing.go b/vendor/github.com/hashicorp/nomad/client/config/testing.go deleted file mode 100644 index f06fbe74..00000000 --- a/vendor/github.com/hashicorp/nomad/client/config/testing.go +++ /dev/null @@ -1,72 +0,0 @@ -package config - -import ( - "os" - "path/filepath" - "time" - - "github.com/hashicorp/nomad/ci" - "github.com/hashicorp/nomad/helper/pointer" - "github.com/hashicorp/nomad/helper/testlog" - "github.com/hashicorp/nomad/nomad/mock" - testing "github.com/mitchellh/go-testing-interface" -) - -// TestClientConfig returns a default client configuration for test clients and -// a cleanup func to remove the state and alloc dirs when finished. -func TestClientConfig(t testing.T) (*Config, func()) { - conf := DefaultConfig() - conf.Node = mock.Node() - conf.Logger = testlog.HCLogger(t) - - // On macOS, os.TempDir returns a symlinked path under /var which - // is outside of the directories shared into the VM used for Docker. - // Expand the symlink to get the real path in /private, which is ok. - dirName := os.TempDir() - tmpDir, err := filepath.EvalSymlinks(dirName) - if err != nil { - t.Fatalf("Could not resolve temporary directory links for %s: %v", tmpDir, err) - } - tmpDir = filepath.Clean(tmpDir) - - // Create a tempdir to hold state and alloc subdirs - parent, err := os.MkdirTemp(tmpDir, "nomadtest") - if err != nil { - t.Fatalf("error creating client dir: %v", err) - } - cleanup := func() { - os.RemoveAll(parent) - } - - allocDir := filepath.Join(parent, "allocs") - if err := os.Mkdir(allocDir, 0777); err != nil { - cleanup() - t.Fatalf("error creating alloc dir: %v", err) - } - conf.AllocDir = allocDir - - stateDir := filepath.Join(parent, "client") - if err := os.Mkdir(stateDir, 0777); err != nil { - cleanup() - t.Fatalf("error creating alloc dir: %v", err) - } - conf.StateDir = stateDir - - // Use a minimal chroot environment - conf.ChrootEnv = ci.TinyChroot - - // Helps make sure we are respecting configured parent - conf.CgroupParent = "testing.slice" - - conf.VaultConfig.Enabled = pointer.Of(false) - conf.DevMode = true - - // Loosen GC threshold - conf.GCDiskUsageThreshold = 98.0 - conf.GCInodeUsageThreshold = 98.0 - - // Same as default; necessary for task Event messages - conf.MaxKillTimeout = 30 * time.Second - - return conf, cleanup -} diff --git a/vendor/github.com/hashicorp/nomad/client/devicemanager/state/state.go b/vendor/github.com/hashicorp/nomad/client/devicemanager/state/state.go deleted file mode 100644 index 20eb789f..00000000 --- a/vendor/github.com/hashicorp/nomad/client/devicemanager/state/state.go +++ /dev/null @@ -1,11 +0,0 @@ -package state - -import pstructs "github.com/hashicorp/nomad/plugins/shared/structs" - -// PluginState is used to store the device manager's state across restarts of the -// agent -type PluginState struct { - // ReattachConfigs are the set of reattach configs for plugins launched by - // the device manager - ReattachConfigs map[string]*pstructs.ReattachConfig -} diff --git a/vendor/github.com/hashicorp/nomad/client/dynamicplugins/registry.go b/vendor/github.com/hashicorp/nomad/client/dynamicplugins/registry.go index 4515c64f..49745973 100644 --- a/vendor/github.com/hashicorp/nomad/client/dynamicplugins/registry.go +++ b/vendor/github.com/hashicorp/nomad/client/dynamicplugins/registry.go @@ -9,6 +9,9 @@ import ( "errors" "fmt" "sync" + "time" + + "github.com/hashicorp/nomad/helper" ) const ( @@ -22,6 +25,7 @@ type Registry interface { RegisterPlugin(info *PluginInfo) error DeregisterPlugin(ptype, name, allocID string) error + WaitForPlugin(ctx context.Context, ptype, pname string) (*PluginInfo, error) ListPlugins(ptype string) []*PluginInfo DispensePlugin(ptype, name string) (interface{}, error) PluginForAlloc(ptype, name, allocID string) (*PluginInfo, error) @@ -301,6 +305,62 @@ func (d *dynamicRegistry) ListPlugins(ptype string) []*PluginInfo { return plugins } +// WaitForPlugin repeatedly checks until a plugin with a given type and name +// becomes available or its context is canceled or times out. +// Callers should pass in a context with a sensible timeout +// for the plugin they're expecting to find. +func (d *dynamicRegistry) WaitForPlugin(ctx context.Context, ptype, name string) (*PluginInfo, error) { + // this is our actual goal, which may be run repeatedly + findPlugin := func() *PluginInfo { + for _, p := range d.ListPlugins(ptype) { + if p.Name == name { + return p + } + } + return nil + } + + // try immediately first, before any timers get involved + if p := findPlugin(); p != nil { + return p, nil + } + + // next, loop until found or context is done + + // these numbers are almost arbitrary... + delay := 200 // milliseconds between checks, will backoff + maxDelay := 5000 // up to 5 seconds between each check + + // put a long upper bound on total time, + // just in case callers don't follow directions. + ctx, cancel := context.WithTimeout(ctx, 24*time.Hour) + defer cancel() + + timer, stop := helper.NewSafeTimer(time.Duration(delay) * time.Millisecond) + defer stop() + for { + select { + case <-ctx.Done(): + // an externally-defined timeout wins the day + return nil, ctx.Err() + case <-timer.C: + // continue after our internal delay + } + + if p := findPlugin(); p != nil { + return p, nil + } + + if delay < maxDelay { + delay += delay + } + if delay > maxDelay { + delay = maxDelay + } + timer.Reset(time.Duration(delay) * time.Millisecond) + } +} + func (d *dynamicRegistry) DispensePlugin(ptype string, name string) (interface{}, error) { d.pluginsLock.Lock() defer d.pluginsLock.Unlock() diff --git a/vendor/github.com/hashicorp/nomad/client/lib/cgutil/cgutil_linux.go b/vendor/github.com/hashicorp/nomad/client/lib/cgutil/cgutil_linux.go index 84ce6b40..802b5f71 100644 --- a/vendor/github.com/hashicorp/nomad/client/lib/cgutil/cgutil_linux.go +++ b/vendor/github.com/hashicorp/nomad/client/lib/cgutil/cgutil_linux.go @@ -8,6 +8,7 @@ import ( "path/filepath" "github.com/hashicorp/go-hclog" + "github.com/hashicorp/nomad/helper/pointer" "github.com/hashicorp/nomad/helper/uuid" "github.com/opencontainers/runc/libcontainer/cgroups" lcc "github.com/opencontainers/runc/libcontainer/configs" @@ -145,3 +146,26 @@ func CopyCpuset(source, destination string) error { return nil } + +// MaybeDisableMemorySwappiness will disable memory swappiness, if that controller +// is available. Always the case for cgroups v2, but is not always the case on +// very old kernels with cgroups v1. +func MaybeDisableMemorySwappiness() *uint64 { + bypass := (*uint64)(nil) + zero := pointer.Of[uint64](0) + + // cgroups v2 always set zero + if UseV2 { + return zero + } + + // cgroups v1 detect if swappiness is supported by attempting to write to + // the nomad parent cgroup swappiness interface + e := &editor{fromRoot: "memory/nomad"} + err := e.write("memory.swappiness", "0") + if err != nil { + return bypass + } + + return zero +} diff --git a/vendor/github.com/hashicorp/nomad/client/lib/cgutil/cpuset_manager_v1.go b/vendor/github.com/hashicorp/nomad/client/lib/cgutil/cpuset_manager_v1.go index f0ba00ad..1f02d15d 100644 --- a/vendor/github.com/hashicorp/nomad/client/lib/cgutil/cpuset_manager_v1.go +++ b/vendor/github.com/hashicorp/nomad/client/lib/cgutil/cpuset_manager_v1.go @@ -4,6 +4,7 @@ package cgutil import ( "context" + "errors" "fmt" "os" "path/filepath" @@ -59,8 +60,10 @@ func NewCpusetManagerV1(cgroupParent string, _ []uint16, logger hclog.Logger) Cp // ensure the reserved cpuset exists, but only copy the mems from the parent if creating the cgroup if err = os.Mkdir(filepath.Join(cgroupParentPath, ReservedCpusetCgroupName), 0755); err != nil { - logger.Warn("failed to ensure reserved cpuset.cpus interface exists; disable cpuset management", "error", err) - return new(NoopCpusetManager) + if !errors.Is(err, os.ErrExist) { + logger.Warn("failed to ensure reserved cpuset.cpus interface exists; disable cpuset management", "error", err) + return new(NoopCpusetManager) + } } if err = cgroups.WriteFile(filepath.Join(cgroupParentPath, ReservedCpusetCgroupName), "cpuset.mems", parentMems); err != nil { diff --git a/vendor/github.com/hashicorp/nomad/client/lib/cgutil/cpuset_manager_v2.go b/vendor/github.com/hashicorp/nomad/client/lib/cgutil/cpuset_manager_v2.go index 1157ad37..b6eeea6c 100644 --- a/vendor/github.com/hashicorp/nomad/client/lib/cgutil/cpuset_manager_v2.go +++ b/vendor/github.com/hashicorp/nomad/client/lib/cgutil/cpuset_manager_v2.go @@ -322,7 +322,8 @@ func (c *cpusetManagerV2) write(id identity, set cpuset.CPUSet) { // set the cpuset value for the cgroup if err = m.Set(&configs.Resources{ - CpusetCpus: set.String(), + CpusetCpus: set.String(), + SkipDevices: true, }); err != nil { c.logger.Error("failed to set cgroup", "path", path, "error", err) return diff --git a/vendor/github.com/hashicorp/nomad/client/pluginmanager/csimanager/instance.go b/vendor/github.com/hashicorp/nomad/client/pluginmanager/csimanager/instance.go index 3839113d..4de770d4 100644 --- a/vendor/github.com/hashicorp/nomad/client/pluginmanager/csimanager/instance.go +++ b/vendor/github.com/hashicorp/nomad/client/pluginmanager/csimanager/instance.go @@ -91,7 +91,13 @@ func (i *instanceManager) setupVolumeManager() { case <-i.shutdownCtx.Done(): return case <-i.fp.hadFirstSuccessfulFingerprintCh: - i.volumeManager = newVolumeManager(i.logger, i.eventer, i.client, i.mountPoint, i.containerMountPoint, i.fp.requiresStaging) + + var externalID string + if i.fp.basicInfo != nil && i.fp.basicInfo.NodeInfo != nil { + externalID = i.fp.basicInfo.NodeInfo.ID + } + + i.volumeManager = newVolumeManager(i.logger, i.eventer, i.client, i.mountPoint, i.containerMountPoint, i.fp.requiresStaging, externalID) i.logger.Debug("volume manager setup complete") close(i.volumeManagerSetupCh) return diff --git a/vendor/github.com/hashicorp/nomad/client/pluginmanager/csimanager/interface.go b/vendor/github.com/hashicorp/nomad/client/pluginmanager/csimanager/interface.go index 9f7e067c..d6d90040 100644 --- a/vendor/github.com/hashicorp/nomad/client/pluginmanager/csimanager/interface.go +++ b/vendor/github.com/hashicorp/nomad/client/pluginmanager/csimanager/interface.go @@ -13,6 +13,16 @@ type MountInfo struct { IsDevice bool } +func (mi *MountInfo) Copy() *MountInfo { + if mi == nil { + return nil + } + + nmi := new(MountInfo) + *nmi = *mi + return nmi +} + type UsageOptions struct { ReadOnly bool AttachmentMode structs.CSIVolumeAttachmentMode @@ -43,12 +53,18 @@ func (u *UsageOptions) ToFS() string { type VolumeMounter interface { MountVolume(ctx context.Context, vol *structs.CSIVolume, alloc *structs.Allocation, usageOpts *UsageOptions, publishContext map[string]string) (*MountInfo, error) UnmountVolume(ctx context.Context, volID, remoteID, allocID string, usageOpts *UsageOptions) error + HasMount(ctx context.Context, mountInfo *MountInfo) (bool, error) + ExternalID() string } type Manager interface { // PluginManager returns a PluginManager for use by the node fingerprinter. PluginManager() pluginmanager.PluginManager + // WaitForPlugin waits for the plugin to become available, + // or until its context is canceled or times out. + WaitForPlugin(ctx context.Context, pluginType, pluginID string) error + // MounterForPlugin returns a VolumeMounter for the plugin ID associated // with the volume. Returns an error if this plugin isn't registered. MounterForPlugin(ctx context.Context, pluginID string) (VolumeMounter, error) diff --git a/vendor/github.com/hashicorp/nomad/client/pluginmanager/csimanager/manager.go b/vendor/github.com/hashicorp/nomad/client/pluginmanager/csimanager/manager.go index 178b0fe1..59879e35 100644 --- a/vendor/github.com/hashicorp/nomad/client/pluginmanager/csimanager/manager.go +++ b/vendor/github.com/hashicorp/nomad/client/pluginmanager/csimanager/manager.go @@ -39,7 +39,7 @@ func New(config *Config) Manager { } return &csiManager{ - logger: config.Logger, + logger: config.Logger.Named("csi_manager"), eventer: config.TriggerNodeEvent, registry: config.DynamicRegistry, instances: make(map[string]map[string]*instanceManager), @@ -75,6 +75,21 @@ func (c *csiManager) PluginManager() pluginmanager.PluginManager { return c } +// WaitForPlugin waits for a specific plugin to be registered and available, +// unless the context is canceled, or it takes longer than a minute. +func (c *csiManager) WaitForPlugin(ctx context.Context, pType, pID string) error { + ctx, cancel := context.WithTimeout(ctx, time.Minute) + defer cancel() + p, err := c.registry.WaitForPlugin(ctx, pType, pID) + if err != nil { + return fmt.Errorf("%s plugin '%s' did not become ready: %w", pType, pID, err) + } + c.instancesLock.Lock() + defer c.instancesLock.Unlock() + c.ensureInstance(p) + return nil +} + func (c *csiManager) MounterForPlugin(ctx context.Context, pluginID string) (VolumeMounter, error) { c.instancesLock.RLock() defer c.instancesLock.RUnlock() diff --git a/vendor/github.com/hashicorp/nomad/client/pluginmanager/csimanager/volume.go b/vendor/github.com/hashicorp/nomad/client/pluginmanager/csimanager/volume.go index 4c6bf1d1..246a147f 100644 --- a/vendor/github.com/hashicorp/nomad/client/pluginmanager/csimanager/volume.go +++ b/vendor/github.com/hashicorp/nomad/client/pluginmanager/csimanager/volume.go @@ -50,9 +50,14 @@ type volumeManager struct { // requiresStaging shows whether the plugin requires that the volume manager // calls NodeStageVolume and NodeUnstageVolume RPCs during setup and teardown requiresStaging bool + + // externalNodeID is the identity of a given nomad client as observed by the + // storage provider (ex. a hostname, VM instance ID, etc.) + externalNodeID string } -func newVolumeManager(logger hclog.Logger, eventer TriggerNodeEvent, plugin csi.CSIPlugin, rootDir, containerRootDir string, requiresStaging bool) *volumeManager { +func newVolumeManager(logger hclog.Logger, eventer TriggerNodeEvent, plugin csi.CSIPlugin, rootDir, containerRootDir string, requiresStaging bool, externalID string) *volumeManager { + return &volumeManager{ logger: logger.Named("volume_manager"), eventer: eventer, @@ -61,6 +66,7 @@ func newVolumeManager(logger hclog.Logger, eventer TriggerNodeEvent, plugin csi. containerMountPoint: containerRootDir, requiresStaging: requiresStaging, usageTracker: newVolumeUsageTracker(), + externalNodeID: externalID, } } @@ -373,3 +379,13 @@ func (v *volumeManager) UnmountVolume(ctx context.Context, volID, remoteID, allo return err } + +func (v *volumeManager) ExternalID() string { + return v.externalNodeID +} + +func (v *volumeManager) HasMount(_ context.Context, mountInfo *MountInfo) (bool, error) { + m := mount.New() + isNotMount, err := m.IsNotAMountPoint(mountInfo.Source) + return !isNotMount, err +} diff --git a/vendor/github.com/hashicorp/nomad/client/pluginmanager/drivermanager/state/state.go b/vendor/github.com/hashicorp/nomad/client/pluginmanager/drivermanager/state/state.go deleted file mode 100644 index f37717c3..00000000 --- a/vendor/github.com/hashicorp/nomad/client/pluginmanager/drivermanager/state/state.go +++ /dev/null @@ -1,11 +0,0 @@ -package state - -import pstructs "github.com/hashicorp/nomad/plugins/shared/structs" - -// PluginState is used to store the driver manager's state across restarts of the -// agent -type PluginState struct { - // ReattachConfigs are the set of reattach configs for plugins launched by - // the driver manager - ReattachConfigs map[string]*pstructs.ReattachConfig -} diff --git a/vendor/github.com/hashicorp/nomad/client/serviceregistration/address.go b/vendor/github.com/hashicorp/nomad/client/serviceregistration/address.go deleted file mode 100644 index 9ddb4774..00000000 --- a/vendor/github.com/hashicorp/nomad/client/serviceregistration/address.go +++ /dev/null @@ -1,174 +0,0 @@ -package serviceregistration - -import ( - "fmt" - "strconv" - - "github.com/hashicorp/nomad/nomad/structs" - "github.com/hashicorp/nomad/plugins/drivers" -) - -// GetAddress returns the IP (or custom advertise address) and port to use for a -// service or check registration. If no port label is specified (an empty value) -// and no custom address is specified, zero values are returned because no address -// could be resolved. -func GetAddress( - address, // custom address, if set - addressMode, - portLabel string, - networks structs.Networks, - driverNet *drivers.DriverNetwork, - ports structs.AllocatedPorts, - netStatus *structs.AllocNetworkStatus, -) (string, int, error) { - switch addressMode { - case structs.AddressModeAuto: - switch { - case address != "": - // No port no problem, just return the advertise address. - if portLabel == "" { - return address, 0, nil - } - // A custom advertise address can be used with a port map; using the - // Value and ignoring the IP. The routing from your custom address to - // the group network address is DIY. (e.g. EC2 public address) - if mapping, exists := ports.Get(portLabel); exists { - return address, mapping.Value, nil - } - // If not a port map we can interpolate a numeric port for you. - port, err := strconv.Atoi(portLabel) - if err != nil { - return "", 0, fmt.Errorf("invalid port: %q: not a valid port mapping or numeric port", portLabel) - } - return address, port, nil - case driverNet.Advertise(): - return GetAddress("", structs.AddressModeDriver, portLabel, networks, driverNet, ports, netStatus) - default: - return GetAddress("", structs.AddressModeHost, portLabel, networks, driverNet, ports, netStatus) - } - case structs.AddressModeHost: - // Cannot use address mode host with custom advertise address. - if address != "" { - return "", 0, fmt.Errorf("cannot use custom advertise address with %q address mode", structs.AddressModeHost) - } - - if portLabel == "" { - if len(networks) != 1 { - // If no networks are specified return zero - // values. Consul will advertise the host IP - // with no port. This is the pre-0.7.1 behavior - // some people rely on. - return "", 0, nil - } - return networks[0].IP, 0, nil - } - - // Default path: use host ip:port - // Try finding port in the AllocatedPorts struct first - // Check in Networks struct for backwards compatibility if not found - mapping, ok := ports.Get(portLabel) - if !ok { - mapping = networks.Port(portLabel) - if mapping.Value > 0 { - return mapping.HostIP, mapping.Value, nil - } - - // If port isn't a label, try to parse it as a literal port number - port, err := strconv.Atoi(portLabel) - if err != nil { - // Don't include Atoi error message as user likely - // never intended it to be a numeric and it creates a - // confusing error message - return "", 0, fmt.Errorf("invalid port %q: port label not found", portLabel) - } - if port <= 0 { - return "", 0, fmt.Errorf("invalid port: %q: port must be >0", portLabel) - } - - // A number was given which will use the Consul agent's address and the given port - // Returning a blank string as an address will use the Consul agent's address - return "", port, nil - } - return mapping.HostIP, mapping.Value, nil - - case structs.AddressModeDriver: - // Cannot use address mode driver with custom advertise address. - if address != "" { - return "", 0, fmt.Errorf("cannot use custom advertise address with %q address mode", structs.AddressModeDriver) - } - - // Require a driver network if driver address mode is used - if driverNet == nil { - return "", 0, fmt.Errorf(`cannot use address_mode="driver": no driver network exists`) - } - - // If no port label is specified just return the IP - if portLabel == "" { - return driverNet.IP, 0, nil - } - - // If the port is a label, use the driver's port (not the host's) - if port, ok := ports.Get(portLabel); ok { - return driverNet.IP, port.To, nil - } - - // Check if old style driver portmap is used - if port, ok := driverNet.PortMap[portLabel]; ok { - return driverNet.IP, port, nil - } - - // If port isn't a label, try to parse it as a literal port number - port, err := strconv.Atoi(portLabel) - if err != nil { - // Don't include Atoi error message as user likely - // never intended it to be a numeric and it creates a - // confusing error message - return "", 0, fmt.Errorf("invalid port label %q: port labels in driver address_mode must be numeric or in the driver's port map", portLabel) - } - if port <= 0 { - return "", 0, fmt.Errorf("invalid port: %q: port must be >0", portLabel) - } - - return driverNet.IP, port, nil - - case structs.AddressModeAlloc: - // Cannot use address mode alloc with custom advertise address. - if address != "" { - return "", 0, fmt.Errorf("cannot use custom advertise address with %q address mode", structs.AddressModeAlloc) - } - - // Going to need a network for this. - if netStatus == nil { - return "", 0, fmt.Errorf(`cannot use address_mode="alloc": no allocation network status reported`) - } - - // If no port label is specified just return the IP - if portLabel == "" { - return netStatus.Address, 0, nil - } - - // If port is a label and is found then return it - if port, ok := ports.Get(portLabel); ok { - // Use port.To value unless not set - if port.To > 0 { - return netStatus.Address, port.To, nil - } - return netStatus.Address, port.Value, nil - } - - // Check if port is a literal number - port, err := strconv.Atoi(portLabel) - if err != nil { - // User likely specified wrong port label here - return "", 0, fmt.Errorf("invalid port %q: port label not found or is not numeric", portLabel) - } - if port <= 0 { - return "", 0, fmt.Errorf("invalid port: %q: port must be >0", portLabel) - } - return netStatus.Address, port, nil - - default: - // Shouldn't happen due to validation, but enforce invariants - return "", 0, fmt.Errorf("invalid address mode %q", addressMode) - } -} diff --git a/vendor/github.com/hashicorp/nomad/client/serviceregistration/checks/client.go b/vendor/github.com/hashicorp/nomad/client/serviceregistration/checks/client.go deleted file mode 100644 index ab9c35fb..00000000 --- a/vendor/github.com/hashicorp/nomad/client/serviceregistration/checks/client.go +++ /dev/null @@ -1,219 +0,0 @@ -package checks - -import ( - "bytes" - "context" - "fmt" - "io" - "net" - "net/http" - "net/url" - "strconv" - "strings" - "time" - - "github.com/hashicorp/go-cleanhttp" - "github.com/hashicorp/go-hclog" - "github.com/hashicorp/nomad/client/serviceregistration" - "github.com/hashicorp/nomad/nomad/structs" - "oss.indeed.com/go/libtime" -) - -const ( - // maxTimeoutHTTP is a fail-safe value for the HTTP client, ensuring a Nomad - // Client does not leak goroutines hanging on to unresponsive endpoints. - maxTimeoutHTTP = 10 * time.Minute -) - -// Checker executes a check given an allocation-specific context, and produces -// a resulting structs.CheckQueryResult -type Checker interface { - Do(context.Context, *QueryContext, *Query) *structs.CheckQueryResult -} - -// New creates a new Checker capable of executing HTTP and TCP checks. -func New(log hclog.Logger) Checker { - httpClient := cleanhttp.DefaultPooledClient() - httpClient.Timeout = maxTimeoutHTTP - return &checker{ - log: log.Named("checks"), - httpClient: httpClient, - clock: libtime.SystemClock(), - } -} - -type checker struct { - log hclog.Logger - clock libtime.Clock - httpClient *http.Client -} - -func (c *checker) now() int64 { - return c.clock.Now().UTC().Unix() -} - -// Do will execute the Query given the QueryContext and produce a structs.CheckQueryResult -func (c *checker) Do(ctx context.Context, qc *QueryContext, q *Query) *structs.CheckQueryResult { - var qr *structs.CheckQueryResult - - timeout, cancel := context.WithTimeout(ctx, q.Timeout) - defer cancel() - - switch q.Type { - case "http": - qr = c.checkHTTP(timeout, qc, q) - default: - qr = c.checkTCP(timeout, qc, q) - } - - qr.ID = qc.ID - qr.Group = qc.Group - qr.Task = qc.Task - qr.Service = qc.Service - qr.Check = qc.Check - return qr -} - -// resolve the address to use when executing Query given a QueryContext -func address(qc *QueryContext, q *Query) (string, error) { - mode := q.AddressMode - if mode == "" { // determine resolution for check address - if qc.CustomAddress != "" { - // if the service is using a custom address, enable the check to - // inherit that custom address - mode = structs.AddressModeAuto - } else { - // otherwise a check defaults to the host address - mode = structs.AddressModeHost - } - } - - label := q.PortLabel - if label == "" { - label = qc.ServicePortLabel - } - - status := qc.NetworkStatus.NetworkStatus() - addr, port, err := serviceregistration.GetAddress( - qc.CustomAddress, // custom address - mode, // check address mode - label, // port label - qc.Networks, // allocation networks - nil, // driver network (not supported) - qc.Ports, // ports - status, // allocation network status - ) - if err != nil { - return "", err - } - if port > 0 { - addr = net.JoinHostPort(addr, strconv.Itoa(port)) - } - return addr, nil -} - -func (c *checker) checkTCP(ctx context.Context, qc *QueryContext, q *Query) *structs.CheckQueryResult { - qr := &structs.CheckQueryResult{ - Mode: q.Mode, - Timestamp: c.now(), - Status: structs.CheckPending, - } - - addr, err := address(qc, q) - if err != nil { - qr.Output = err.Error() - qr.Status = structs.CheckFailure - return qr - } - - if _, err = new(net.Dialer).DialContext(ctx, "tcp", addr); err != nil { - qr.Output = err.Error() - qr.Status = structs.CheckFailure - return qr - } - - qr.Output = "nomad: tcp ok" - qr.Status = structs.CheckSuccess - return qr -} - -func (c *checker) checkHTTP(ctx context.Context, qc *QueryContext, q *Query) *structs.CheckQueryResult { - qr := &structs.CheckQueryResult{ - Mode: q.Mode, - Timestamp: c.now(), - Status: structs.CheckPending, - } - - addr, err := address(qc, q) - if err != nil { - qr.Output = err.Error() - qr.Status = structs.CheckFailure - return qr - } - - u := (&url.URL{ - Scheme: q.Protocol, - Host: addr, - Path: q.Path, - }).String() - - request, err := http.NewRequest(q.Method, u, nil) - if err != nil { - qr.Output = fmt.Sprintf("nomad: %s", err.Error()) - qr.Status = structs.CheckFailure - return qr - } - for header, values := range q.Headers { - for _, value := range values { - request.Header.Add(header, value) - } - } - - request.Host = request.Header.Get("Host") - - request.Body = io.NopCloser(strings.NewReader(q.Body)) - request = request.WithContext(ctx) - - result, err := c.httpClient.Do(request) - if err != nil { - qr.Output = fmt.Sprintf("nomad: %s", err.Error()) - qr.Status = structs.CheckFailure - return qr - } - - // match the result status code to the http status code - qr.StatusCode = result.StatusCode - - switch { - case result.StatusCode == 200: - qr.Status = structs.CheckSuccess - qr.Output = "nomad: http ok" - return qr - case result.StatusCode < 400: - qr.Status = structs.CheckSuccess - default: - qr.Status = structs.CheckFailure - } - - // status code was not 200; read the response body and set that as the - // check result output content - qr.Output = limitRead(result.Body) - - return qr -} - -const ( - // outputSizeLimit is the maximum number of bytes to read and store of an http - // check output. Set to 3kb which fits in 1 page with room for other fields. - outputSizeLimit = 3 * 1024 -) - -func limitRead(r io.Reader) string { - b := make([]byte, 0, outputSizeLimit) - output := bytes.NewBuffer(b) - limited := io.LimitReader(r, outputSizeLimit) - if _, err := io.Copy(output, limited); err != nil { - return fmt.Sprintf("nomad: %s", err.Error()) - } - return output.String() -} diff --git a/vendor/github.com/hashicorp/nomad/client/serviceregistration/checks/result.go b/vendor/github.com/hashicorp/nomad/client/serviceregistration/checks/result.go deleted file mode 100644 index f7813288..00000000 --- a/vendor/github.com/hashicorp/nomad/client/serviceregistration/checks/result.go +++ /dev/null @@ -1,108 +0,0 @@ -package checks - -import ( - "net/http" - "time" - - "github.com/hashicorp/nomad/nomad/structs" - "golang.org/x/exp/maps" -) - -// GetCheckQuery extracts the needed info from c to actually execute the check. -func GetCheckQuery(c *structs.ServiceCheck) *Query { - var protocol = c.Protocol // ensure appropriate default - if c.Type == "http" && protocol == "" { - protocol = "http" - } - return &Query{ - Mode: structs.GetCheckMode(c), - Type: c.Type, - Timeout: c.Timeout, - AddressMode: c.AddressMode, - PortLabel: c.PortLabel, - Protocol: protocol, - Path: c.Path, - Method: c.Method, - Headers: maps.Clone(c.Header), - Body: c.Body, - } -} - -// A Query is derived from a ServiceCheck and contains the minimal -// amount of information needed to actually execute that check. -type Query struct { - Mode structs.CheckMode // readiness or healthiness - Type string // tcp or http - - Timeout time.Duration // connection / request timeout - - AddressMode string // host, driver, or alloc - PortLabel string // label or value - - Protocol string // http checks only (http or https) - Path string // http checks only - Method string // http checks only - Headers http.Header // http checks only - Body string // http checks only -} - -// A QueryContext contains allocation and service parameters necessary for -// address resolution. -type QueryContext struct { - ID structs.CheckID - CustomAddress string - ServicePortLabel string - Networks structs.Networks - NetworkStatus structs.NetworkStatus - Ports structs.AllocatedPorts - - Group string - Task string - Service string - Check string -} - -// Stub creates a temporary QueryResult for the check of ID in the Pending state -// so we can represent the status of not being checked yet. -func Stub( - id structs.CheckID, kind structs.CheckMode, now int64, - group, task, service, check string, -) *structs.CheckQueryResult { - return &structs.CheckQueryResult{ - ID: id, - Mode: kind, - Status: structs.CheckPending, - Output: "nomad: waiting to run", - Timestamp: now, - Group: group, - Task: task, - Service: service, - Check: check, - } -} - -// AllocationResults is a view of the check_id -> latest result for group and task -// checks in an allocation. -type AllocationResults map[structs.CheckID]*structs.CheckQueryResult - -// diff returns the set of IDs in ids that are not in m. -func (m AllocationResults) diff(ids []structs.CheckID) []structs.CheckID { - var missing []structs.CheckID - for _, id := range ids { - if _, exists := m[id]; !exists { - missing = append(missing, id) - } - } - return missing -} - -// ClientResults is a holistic view of alloc_id -> check_id -> latest result -// group and task checks across all allocations on a client. -type ClientResults map[string]AllocationResults - -func (cr ClientResults) Insert(allocID string, result *structs.CheckQueryResult) { - if _, exists := cr[allocID]; !exists { - cr[allocID] = make(AllocationResults) - } - cr[allocID][result.ID] = result -} diff --git a/vendor/github.com/hashicorp/nomad/client/serviceregistration/id.go b/vendor/github.com/hashicorp/nomad/client/serviceregistration/id.go deleted file mode 100644 index edcabd4c..00000000 --- a/vendor/github.com/hashicorp/nomad/client/serviceregistration/id.go +++ /dev/null @@ -1,27 +0,0 @@ -package serviceregistration - -import ( - "fmt" - - "github.com/hashicorp/nomad/nomad/structs" -) - -const ( - // nomadServicePrefix is the prefix that scopes all Nomad registered - // services (both agent and task entries). - nomadServicePrefix = "_nomad" - - // nomadTaskPrefix is the prefix that scopes Nomad registered services - // for tasks. - nomadTaskPrefix = nomadServicePrefix + "-task-" -) - -// MakeAllocServiceID creates a unique ID for identifying an alloc service in -// a service registration provider. Both Nomad and Consul solutions use the -// same ID format to provide consistency. -// -// Example Service ID: _nomad-task-b4e61df9-b095-d64e-f241-23860da1375f-redis-http-http -func MakeAllocServiceID(allocID, taskName string, service *structs.Service) string { - return fmt.Sprintf("%s%s-%s-%s-%s", - nomadTaskPrefix, allocID, taskName, service.Name, service.PortLabel) -} diff --git a/vendor/github.com/hashicorp/nomad/client/serviceregistration/service_registration.go b/vendor/github.com/hashicorp/nomad/client/serviceregistration/service_registration.go deleted file mode 100644 index e04aa459..00000000 --- a/vendor/github.com/hashicorp/nomad/client/serviceregistration/service_registration.go +++ /dev/null @@ -1,156 +0,0 @@ -package serviceregistration - -import ( - "context" - - "github.com/hashicorp/consul/api" - "github.com/hashicorp/nomad/nomad/structs" - "golang.org/x/exp/maps" -) - -// Handler is the interface the Nomad Client uses to register, update and -// remove services and checks from service registration providers. Currently, -// Consul and Nomad are supported providers. -// -// When utilising Consul, the ACL "service:write" is required. It supports all -// functionality and is the OG/GOAT. -// -// When utilising Nomad, the client secret ID is used for authorisation. It -// currently supports service registrations only. -type Handler interface { - - // RegisterWorkload adds all service entries and checks to the backend - // provider. Whilst callers attempt to ensure WorkloadServices.Services is - // not empty before calling this function, implementations should also - // perform this. - RegisterWorkload(workload *WorkloadServices) error - - // RemoveWorkload all service entries and checks from the backend provider - // that are found within the passed WorkloadServices object. Whilst callers - // attempt to ensure WorkloadServices.Services is not empty before calling - // this function, implementations should also perform this. - RemoveWorkload(workload *WorkloadServices) - - // UpdateWorkload removes workload as specified by the old parameter, and - // adds workload as specified by the new parameter. Callers do not perform - // any deduplication on both objects, it is therefore the responsibility of - // the implementation. - UpdateWorkload(old, newTask *WorkloadServices) error - - // AllocRegistrations returns the registrations for the given allocation. - AllocRegistrations(allocID string) (*AllocRegistration, error) - - // UpdateTTL is used to update the TTL of an individual service - // registration check. - UpdateTTL(id, namespace, output, status string) error -} - -// WorkloadRestarter allows the checkWatcher to restart tasks or entire task -// groups. -type WorkloadRestarter interface { - Restart(ctx context.Context, event *structs.TaskEvent, failure bool) error -} - -// AllocRegistration holds the status of services registered for a particular -// allocations by task. -type AllocRegistration struct { - // Tasks maps the name of a task to its registered services and checks. - Tasks map[string]*ServiceRegistrations -} - -// Copy performs a deep copy of the AllocRegistration object. -func (a *AllocRegistration) Copy() *AllocRegistration { - c := &AllocRegistration{ - Tasks: make(map[string]*ServiceRegistrations, len(a.Tasks)), - } - - for k, v := range a.Tasks { - c.Tasks[k] = v.copy() - } - - return c -} - -// NumServices returns the number of registered task AND group services. -// Group services are prefixed with "group-". -func (a *AllocRegistration) NumServices() int { - if a == nil { - return 0 - } - - total := 0 - for _, task := range a.Tasks { - total += len(task.Services) - } - - return total -} - -// NumChecks returns the number of registered checks from both task AND group -// services. Group services are prefixed with "group-". -func (a *AllocRegistration) NumChecks() int { - if a == nil { - return 0 - } - - total := 0 - for _, task := range a.Tasks { - for _, service := range task.Services { - total += len(service.Checks) - } - } - - return total -} - -// ServiceRegistrations holds the status of services registered for a -// particular task or task group. -type ServiceRegistrations struct { - // Services maps service_id -> service registration - Services map[string]*ServiceRegistration -} - -func (t *ServiceRegistrations) copy() *ServiceRegistrations { - c := &ServiceRegistrations{ - Services: make(map[string]*ServiceRegistration, len(t.Services)), - } - - for k, v := range t.Services { - c.Services[k] = v.copy() - } - - return c -} - -// ServiceRegistration holds the status of a registered Consul Service and its -// Checks. -type ServiceRegistration struct { - // serviceID and checkIDs are internal fields that track just the IDs of the - // services/checks registered in Consul. It is used to materialize the other - // fields when queried. - ServiceID string - CheckIDs map[string]struct{} // todo: use a Set? - - // CheckOnUpdate is a map of checkIDs and the associated OnUpdate value - // from the ServiceCheck It is used to determine how a reported checks - // status should be evaluated. - CheckOnUpdate map[string]string - - // Service is the AgentService registered in Consul. - Service *api.AgentService - - // Checks is the status of the registered checks. - Checks []*api.AgentCheck -} - -func (s *ServiceRegistration) copy() *ServiceRegistration { - // Copy does not copy the external fields but only the internal fields. - // This is so that the caller of AllocRegistrations can not access the - // internal fields and that method uses these fields to populate the - // external fields. - return &ServiceRegistration{ - ServiceID: s.ServiceID, - CheckIDs: maps.Clone(s.CheckIDs), - CheckOnUpdate: maps.Clone(s.CheckOnUpdate), - } -} diff --git a/vendor/github.com/hashicorp/nomad/client/serviceregistration/watcher.go b/vendor/github.com/hashicorp/nomad/client/serviceregistration/watcher.go deleted file mode 100644 index 4f92f00d..00000000 --- a/vendor/github.com/hashicorp/nomad/client/serviceregistration/watcher.go +++ /dev/null @@ -1,326 +0,0 @@ -package serviceregistration - -import ( - "context" - "fmt" - "time" - - "github.com/hashicorp/go-hclog" - "github.com/hashicorp/go-set" - "github.com/hashicorp/nomad/helper" - "github.com/hashicorp/nomad/nomad/structs" -) - -// composite of allocID + taskName for uniqueness -type key string - -type restarter struct { - allocID string - taskName string - checkID string - checkName string - taskKey key - - logger hclog.Logger - task WorkloadRestarter - grace time.Duration - interval time.Duration - timeLimit time.Duration - ignoreWarnings bool - - // unhealthyState is the time a check first went unhealthy. Set to the - // zero value if the check passes before timeLimit. - unhealthyState time.Time - - // graceUntil is when the check's grace period expires and unhealthy - // checks should be counted. - graceUntil time.Time -} - -// apply restart state for check and restart task if necessary. Current -// timestamp is passed in so all check updates have the same view of time (and -// to ease testing). -// -// Returns true if a restart was triggered in which case this check should be -// removed (checks are added on task startup). -func (r *restarter) apply(ctx context.Context, now time.Time, status string) bool { - healthy := func() { - if !r.unhealthyState.IsZero() { - r.logger.Debug("canceling restart because check became healthy") - r.unhealthyState = time.Time{} - } - } - switch status { - case "critical": // consul - case string(structs.CheckFailure): // nomad - case string(structs.CheckPending): // nomad - case "warning": // consul - if r.ignoreWarnings { - // Warnings are ignored, reset state and exit - healthy() - return false - } - default: - // All other statuses are ok, reset state and exit - healthy() - return false - } - - if now.Before(r.graceUntil) { - // In grace period, exit - return false - } - - if r.unhealthyState.IsZero() { - // First failure, set restart deadline - if r.timeLimit != 0 { - r.logger.Debug("check became unhealthy. Will restart if check doesn't become healthy", "time_limit", r.timeLimit) - } - r.unhealthyState = now - } - - // restart timeLimit after start of this check becoming unhealthy - restartAt := r.unhealthyState.Add(r.timeLimit) - - // Must test >= because if limit=1, restartAt == first failure - if now.Equal(restartAt) || now.After(restartAt) { - // hasn't become healthy by deadline, restart! - r.logger.Debug("restarting due to unhealthy check") - - // Tell TaskRunner to restart due to failure - reason := fmt.Sprintf("healthcheck: check %q unhealthy", r.checkName) - event := structs.NewTaskEvent(structs.TaskRestartSignal).SetRestartReason(reason) - go asyncRestart(ctx, r.logger, r.task, event) - return true - } - - return false -} - -// asyncRestart mimics the pre-0.9 TaskRunner.Restart behavior and is intended -// to be called in a goroutine. -func asyncRestart(ctx context.Context, logger hclog.Logger, task WorkloadRestarter, event *structs.TaskEvent) { - // Check watcher restarts are always failures - const failure = true - - // Restarting is asynchronous so there's no reason to allow this - // goroutine to block indefinitely. - ctx, cancel := context.WithTimeout(ctx, 10*time.Second) - defer cancel() - - if err := task.Restart(ctx, event, failure); err != nil { - // Restart errors are not actionable and only relevant when - // debugging allocation lifecycle management. - logger.Debug("failed to restart task", "error", err, "event_time", event.Time, "event_type", event.Type) - } -} - -// CheckStatusGetter is implemented per-provider. -type CheckStatusGetter interface { - // Get returns a map from CheckID -> (minimal) CheckStatus - Get() (map[string]string, error) -} - -// checkWatchUpdates add or remove checks from the watcher -type checkWatchUpdate struct { - checkID string - remove bool - restart *restarter -} - -// A CheckWatcher watches for check failures and restarts tasks according to -// their check_restart policy. -type CheckWatcher interface { - // Run the CheckWatcher. Maintains a background process to continuously - // monitor active checks. Must be called before Watch or Unwatch. Must be - // called as a goroutine. - Run(ctx context.Context) - - // Watch the given check. If the check status enters a failing state, the - // task associated with the check will be restarted according to its check_restart - // policy via wr. - Watch(allocID, taskName, checkID string, check *structs.ServiceCheck, wr WorkloadRestarter) - - // Unwatch will cause the CheckWatcher to no longer monitor the check of given checkID. - Unwatch(checkID string) -} - -// UniversalCheckWatcher is an implementation of CheckWatcher capable of watching -// checks in the Nomad or Consul service providers. -type UniversalCheckWatcher struct { - logger hclog.Logger - getter CheckStatusGetter - - // pollFrequency is how often to poll the checks API - pollFrequency time.Duration - - // checkUpdateCh sends watches/removals to the main loop - checkUpdateCh chan checkWatchUpdate - - // done is closed when Run has exited - done chan struct{} - - // failedPreviousInterval is used to indicate whether something went wrong during - // the previous poll interval - if so we can silence ongoing errors - failedPreviousInterval bool -} - -func NewCheckWatcher(logger hclog.Logger, getter CheckStatusGetter) *UniversalCheckWatcher { - return &UniversalCheckWatcher{ - logger: logger.ResetNamed("watch.checks"), - getter: getter, - pollFrequency: 1 * time.Second, - checkUpdateCh: make(chan checkWatchUpdate, 8), - done: make(chan struct{}), - } -} - -// Watch a check and restart its task if unhealthy. -func (w *UniversalCheckWatcher) Watch(allocID, taskName, checkID string, check *structs.ServiceCheck, wr WorkloadRestarter) { - if !check.TriggersRestarts() { - return // check_restart not set; no-op - } - - c := &restarter{ - allocID: allocID, - taskName: taskName, - checkID: checkID, - checkName: check.Name, - taskKey: key(allocID + taskName), - task: wr, - interval: check.Interval, - grace: check.CheckRestart.Grace, - graceUntil: time.Now().Add(check.CheckRestart.Grace), - timeLimit: check.Interval * time.Duration(check.CheckRestart.Limit-1), - ignoreWarnings: check.CheckRestart.IgnoreWarnings, - logger: w.logger.With("alloc_id", allocID, "task", taskName, "check", check.Name), - } - - select { - case w.checkUpdateCh <- checkWatchUpdate{ - checkID: checkID, - restart: c, - }: // activate watch - case <-w.done: // exited; nothing to do - } -} - -// Unwatch a check. -func (w *UniversalCheckWatcher) Unwatch(checkID string) { - select { - case w.checkUpdateCh <- checkWatchUpdate{ - checkID: checkID, - remove: true, - }: // deactivate watch - case <-w.done: // exited; nothing to do - } -} - -func (w *UniversalCheckWatcher) Run(ctx context.Context) { - defer close(w.done) - - // map of checkID to their restarter handle (contains only checks we are watching) - watched := make(map[string]*restarter) - - checkTimer, cleanupCheckTimer := helper.NewSafeTimer(0) - defer cleanupCheckTimer() - - stopCheckTimer := func() { // todo: refactor using that other pattern - checkTimer.Stop() - select { - case <-checkTimer.C: - default: - } - } - - // initialize with checkTimer disabled - stopCheckTimer() - - for { - // disable polling if there are no checks - if len(watched) == 0 { - stopCheckTimer() - } - - select { - // caller cancelled us; goodbye - case <-ctx.Done(): - return - - // received an update; add or remove check - case update := <-w.checkUpdateCh: - if update.remove { - delete(watched, update.checkID) - continue - } - - watched[update.checkID] = update.restart - allocID := update.restart.allocID - taskName := update.restart.taskName - checkName := update.restart.checkName - w.logger.Trace("now watching check", "alloc_i", allocID, "task", taskName, "check", checkName) - - // turn on the timer if we are now active - if len(watched) == 1 { - stopCheckTimer() - checkTimer.Reset(w.pollFrequency) - } - - // poll time; refresh check statuses - case now := <-checkTimer.C: - w.interval(ctx, now, watched) - checkTimer.Reset(w.pollFrequency) - } - } -} - -func (w *UniversalCheckWatcher) interval(ctx context.Context, now time.Time, watched map[string]*restarter) { - statuses, err := w.getter.Get() - if err != nil && !w.failedPreviousInterval { - w.failedPreviousInterval = true - w.logger.Error("failed to retrieve check statuses", "error", err) - return - } - w.failedPreviousInterval = false - - // keep track of tasks restarted this interval - restarts := set.New[key](len(statuses)) - - // iterate over status of all checks, and update the status of checks - // we care about watching - for checkID, checkRestarter := range watched { - if ctx.Err() != nil { - return // short circuit; caller cancelled us - } - - if restarts.Contains(checkRestarter.taskKey) { - // skip; task is already being restarted - delete(watched, checkID) - continue - } - - status, exists := statuses[checkID] - if !exists { - // warn only if outside grace period; avoiding race with check registration - if now.After(checkRestarter.graceUntil) { - w.logger.Warn("watched check not found", "check_id", checkID) - } - continue - } - - if checkRestarter.apply(ctx, now, status) { - // check will be re-registered & re-watched on startup - delete(watched, checkID) - restarts.Insert(checkRestarter.taskKey) - } - } - - // purge passing checks of tasks that are being restarted - if restarts.Size() > 0 { - for checkID, checkRestarter := range watched { - if restarts.Contains(checkRestarter.taskKey) { - delete(watched, checkID) - } - } - } -} diff --git a/vendor/github.com/hashicorp/nomad/client/serviceregistration/workload.go b/vendor/github.com/hashicorp/nomad/client/serviceregistration/workload.go deleted file mode 100644 index f752cd22..00000000 --- a/vendor/github.com/hashicorp/nomad/client/serviceregistration/workload.go +++ /dev/null @@ -1,83 +0,0 @@ -package serviceregistration - -import ( - "github.com/hashicorp/nomad/client/allocrunner/taskrunner/interfaces" - "github.com/hashicorp/nomad/nomad/structs" - "github.com/hashicorp/nomad/plugins/drivers" -) - -// WorkloadServices describes services defined in either a Task or TaskGroup -// that need to be syncronized with a service registration provider. -type WorkloadServices struct { - AllocInfo structs.AllocInfo - - // Canary indicates whether, or not the allocation is a canary. This is - // used to build the correct tags mapping. - Canary bool - - // ProviderNamespace is the provider namespace in which services will be - // registered, if the provider supports this functionality. - ProviderNamespace string - - // Restarter allows restarting the task or task group depending on the - // check_restart blocks. - Restarter WorkloadRestarter - - // Services and checks to register for the task. - Services []*structs.Service - - // Networks from the task's resources block. - // TODO: remove and use Ports - Networks structs.Networks - - // NetworkStatus from alloc if network namespace is created. - // Can be nil. - NetworkStatus *structs.AllocNetworkStatus - - // AllocatedPorts is the list of port mappings. - Ports structs.AllocatedPorts - - // DriverExec is the script executor for the task's driver. For group - // services this is nil and script execution is managed by a tasklet in the - // taskrunner script_check_hook. - DriverExec interfaces.ScriptExecutor - - // DriverNetwork is the network specified by the driver and may be nil. - DriverNetwork *drivers.DriverNetwork -} - -// RegistrationProvider identifies the service registration provider for the -// WorkloadServices. -func (ws *WorkloadServices) RegistrationProvider() string { - - // Protect against an empty array; it would be embarrassing to panic here. - if len(ws.Services) == 0 { - return "" - } - - // Note(jrasell): a Nomad task group can only currently utilise a single - // service provider for all services included within it. In the event we - // remove this restriction, this will need to change along which a lot of - // other logic. - return ws.Services[0].Provider -} - -// Copy method for easing tests. -func (ws *WorkloadServices) Copy() *WorkloadServices { - newTS := new(WorkloadServices) - *newTS = *ws - - // Deep copy Services - newTS.Services = make([]*structs.Service, len(ws.Services)) - for i := range ws.Services { - newTS.Services[i] = ws.Services[i].Copy() - } - return newTS -} - -func (ws *WorkloadServices) Name() string { - if ws.AllocInfo.Task != "" { - return ws.AllocInfo.Task - } - return "group-" + ws.AllocInfo.Group -} diff --git a/vendor/github.com/hashicorp/nomad/client/state/08types.go b/vendor/github.com/hashicorp/nomad/client/state/08types.go deleted file mode 100644 index b4cc54bf..00000000 --- a/vendor/github.com/hashicorp/nomad/client/state/08types.go +++ /dev/null @@ -1,131 +0,0 @@ -package state - -import ( - "encoding/json" - "fmt" - "strings" - - "github.com/hashicorp/nomad/client/allocrunner/taskrunner/state" - "github.com/hashicorp/nomad/helper/uuid" - "github.com/hashicorp/nomad/nomad/structs" - "github.com/hashicorp/nomad/plugins/drivers" - pstructs "github.com/hashicorp/nomad/plugins/shared/structs" -) - -// allocRunnerMutableState08 is state that had to be written on each save as it -// changed over the life-cycle of the alloc_runner in Nomad 0.8. -// -// https://github.com/hashicorp/nomad/blob/v0.8.6/client/alloc_runner.go#L146-L153 -type allocRunnerMutableState08 struct { - // AllocClientStatus does not need to be upgraded as it is computed - // from task states. - AllocClientStatus string - - // AllocClientDescription does not need to be upgraded as it is computed - // from task states. - AllocClientDescription string - - TaskStates map[string]*structs.TaskState - DeploymentStatus *structs.AllocDeploymentStatus -} - -// taskRunnerState08 was used to snapshot the state of the task runner in Nomad -// 0.8. -// -// https://github.com/hashicorp/nomad/blob/v0.8.6/client/task_runner.go#L188-L197 -// COMPAT(0.10): Allows upgrading from 0.8.X to 0.9.0. -type taskRunnerState08 struct { - Version string - HandleID string - ArtifactDownloaded bool - TaskDirBuilt bool - PayloadRendered bool - DriverNetwork *drivers.DriverNetwork - // Created Resources are no longer used. - //CreatedResources *driver.CreatedResources -} - -type TaskRunnerHandle08 struct { - // Docker specific handle info - ContainerID string `json:"ContainerID"` - Image string `json:"Image"` - - // LXC specific handle info - ContainerName string `json:"ContainerName"` - LxcPath string `json:"LxcPath"` - - // Executor reattach config - PluginConfig struct { - Pid int `json:"Pid"` - AddrNet string `json:"AddrNet"` - AddrName string `json:"AddrName"` - } `json:"PluginConfig"` -} - -func (t *TaskRunnerHandle08) ReattachConfig() *pstructs.ReattachConfig { - return &pstructs.ReattachConfig{ - Network: t.PluginConfig.AddrNet, - Addr: t.PluginConfig.AddrName, - Pid: t.PluginConfig.Pid, - } -} - -func (t *taskRunnerState08) Upgrade(allocID, taskName string) (*state.LocalState, error) { - ls := state.NewLocalState() - - // Reuse DriverNetwork - ls.DriverNetwork = t.DriverNetwork - - // Upgrade artifact state - ls.Hooks["artifacts"] = &state.HookState{ - PrestartDone: t.ArtifactDownloaded, - } - - // Upgrade task dir state - ls.Hooks["task_dir"] = &state.HookState{ - Data: map[string]string{ - // "is_done" is equivalent to task_dir_hook.TaskDirHookIsDoneKey - // Does not import to avoid import cycle - "is_done": fmt.Sprintf("%v", t.TaskDirBuilt), - }, - } - - // Upgrade dispatch payload state - ls.Hooks["dispatch_payload"] = &state.HookState{ - PrestartDone: t.PayloadRendered, - } - - // Add necessary fields to TaskConfig - ls.TaskHandle = drivers.NewTaskHandle(drivers.Pre09TaskHandleVersion) - ls.TaskHandle.Config = &drivers.TaskConfig{ - ID: fmt.Sprintf("pre09-%s", uuid.Generate()), - Name: taskName, - AllocID: allocID, - } - - ls.TaskHandle.State = drivers.TaskStateUnknown - - // The docker driver prefixed the handle with 'DOCKER:' - // Strip so that it can be unmarshalled - data := strings.TrimPrefix(t.HandleID, "DOCKER:") - - // The pre09 driver handle ID is given to the driver. It is unmarshalled - // here to check for errors - if _, err := UnmarshalPre09HandleID([]byte(data)); err != nil { - return nil, err - } - - ls.TaskHandle.DriverState = []byte(data) - - return ls, nil -} - -// UnmarshalPre09HandleID decodes the pre09 json encoded handle ID -func UnmarshalPre09HandleID(raw []byte) (*TaskRunnerHandle08, error) { - var handle TaskRunnerHandle08 - if err := json.Unmarshal(raw, &handle); err != nil { - return nil, fmt.Errorf("failed to decode 0.8 driver state: %v", err) - } - - return &handle, nil -} diff --git a/vendor/github.com/hashicorp/nomad/client/state/12types.go b/vendor/github.com/hashicorp/nomad/client/state/12types.go deleted file mode 100644 index bc93c258..00000000 --- a/vendor/github.com/hashicorp/nomad/client/state/12types.go +++ /dev/null @@ -1,9 +0,0 @@ -package state - -import "github.com/hashicorp/nomad/client/dynamicplugins" - -// RegistryState12 is the dynamic plugin registry state persisted -// before 1.3.0. -type RegistryState12 struct { - Plugins map[string]map[string]*dynamicplugins.PluginInfo -} diff --git a/vendor/github.com/hashicorp/nomad/client/state/db_bolt.go b/vendor/github.com/hashicorp/nomad/client/state/db_bolt.go deleted file mode 100644 index 1be688a1..00000000 --- a/vendor/github.com/hashicorp/nomad/client/state/db_bolt.go +++ /dev/null @@ -1,869 +0,0 @@ -package state - -import ( - "bytes" - "fmt" - "os" - "path/filepath" - "time" - - hclog "github.com/hashicorp/go-hclog" - trstate "github.com/hashicorp/nomad/client/allocrunner/taskrunner/state" - dmstate "github.com/hashicorp/nomad/client/devicemanager/state" - "github.com/hashicorp/nomad/client/dynamicplugins" - driverstate "github.com/hashicorp/nomad/client/pluginmanager/drivermanager/state" - "github.com/hashicorp/nomad/client/serviceregistration/checks" - "github.com/hashicorp/nomad/helper/boltdd" - "github.com/hashicorp/nomad/nomad/structs" - "go.etcd.io/bbolt" -) - -/* -The client has a boltDB backed state store. The schema as of 0.9 looks as follows: - -meta/ -|--> version -> '2' (not msgpack encoded) -|--> upgraded -> time.Now().Format(timeRFC3339) -allocations/ -|--> / - |--> alloc -> allocEntry{*structs.Allocation} - |--> deploy_status -> deployStatusEntry{*structs.AllocDeploymentStatus} - |--> network_status -> networkStatusEntry{*structs.AllocNetworkStatus} - |--> task-/ - |--> local_state -> *trstate.LocalState # Local-only state - |--> task_state -> *structs.TaskState # Syncs to servers - |--> checks/ - |--> check- -> *structs.CheckState # Syncs to servers - -devicemanager/ -|--> plugin_state -> *dmstate.PluginState - -drivermanager/ -|--> plugin_state -> *driverstate.PluginState - -dynamicplugins/ -|--> registry_state -> *dynamicplugins.RegistryState -*/ - -var ( - // metaBucketName is the name of the metadata bucket - metaBucketName = []byte("meta") - - // metaVersionKey is the key the state schema version is stored under. - metaVersionKey = []byte("version") - - // metaVersion is the value of the state schema version to detect when - // an upgrade is needed. It skips the usual boltdd/msgpack backend to - // be as portable and futureproof as possible. - metaVersion = []byte{'3'} - - // metaUpgradedKey is the key that stores the timestamp of the last - // time the schema was upgraded. - metaUpgradedKey = []byte("upgraded") - - // allocationsBucketName is the bucket name containing all allocation related - // data - allocationsBucketName = []byte("allocations") - - // allocKey is the key Allocations are stored under encapsulated in - // allocEntry structs. - allocKey = []byte("alloc") - - // allocDeployStatusKey is the key *structs.AllocDeploymentStatus is - // stored under. - allocDeployStatusKey = []byte("deploy_status") - - // allocNetworkStatusKey is the key *structs.AllocNetworkStatus is - // stored under - allocNetworkStatusKey = []byte("network_status") - - // checkResultsBucket is the bucket name in which check query results are stored - checkResultsBucket = []byte("check_results") - - // allocations -> $allocid -> task-$taskname -> the keys below - taskLocalStateKey = []byte("local_state") - taskStateKey = []byte("task_state") - - // devManagerBucket is the bucket name containing all device manager related - // data - devManagerBucket = []byte("devicemanager") - - // driverManagerBucket is the bucket name containing all driver manager - // related data - driverManagerBucket = []byte("drivermanager") - - // managerPluginStateKey is the key by which plugin manager plugin state is - // stored at - managerPluginStateKey = []byte("plugin_state") - - // dynamicPluginBucketName is the bucket name containing all dynamic plugin - // registry data. each dynamic plugin registry will have its own subbucket. - dynamicPluginBucketName = []byte("dynamicplugins") - - // registryStateKey is the key at which dynamic plugin registry state is stored - registryStateKey = []byte("registry_state") -) - -// taskBucketName returns the bucket name for the given task name. -func taskBucketName(taskName string) []byte { - return []byte("task-" + taskName) -} - -// NewStateDBFunc creates a StateDB given a state directory. -type NewStateDBFunc func(logger hclog.Logger, stateDir string) (StateDB, error) - -// GetStateDBFactory returns a func for creating a StateDB -func GetStateDBFactory(devMode bool) NewStateDBFunc { - // Return a noop state db implementation when in debug mode - if devMode { - return func(hclog.Logger, string) (StateDB, error) { - return NoopDB{}, nil - } - } - - return NewBoltStateDB -} - -// BoltStateDB persists and restores Nomad client state in a boltdb. All -// methods are safe for concurrent access. -type BoltStateDB struct { - stateDir string - db *boltdd.DB - logger hclog.Logger -} - -// NewBoltStateDB creates or opens an existing boltdb state file or returns an -// error. -func NewBoltStateDB(logger hclog.Logger, stateDir string) (StateDB, error) { - fn := filepath.Join(stateDir, "state.db") - - // Check to see if the DB already exists - fi, err := os.Stat(fn) - if err != nil && !os.IsNotExist(err) { - return nil, err - } - firstRun := fi == nil - - // Timeout to force failure when accessing a data dir that is already in use - timeout := &bbolt.Options{Timeout: 5 * time.Second} - - // Create or open the boltdb state database - db, err := boltdd.Open(fn, 0600, timeout) - if err == bbolt.ErrTimeout { - return nil, fmt.Errorf("timed out while opening database, is another Nomad process accessing data_dir %s?", stateDir) - } else if err != nil { - return nil, fmt.Errorf("failed to create state database: %v", err) - } - - sdb := &BoltStateDB{ - stateDir: stateDir, - db: db, - logger: logger, - } - - // If db did not already exist, initialize metadata fields - if firstRun { - if err := sdb.init(); err != nil { - return nil, err - } - } - - return sdb, nil -} - -func (s *BoltStateDB) Name() string { - return "boltdb" -} - -// GetAllAllocations gets all allocations persisted by this client and returns -// a map of alloc ids to errors for any allocations that could not be restored. -// -// If a fatal error was encountered it will be returned and the other two -// values will be nil. -func (s *BoltStateDB) GetAllAllocations() ([]*structs.Allocation, map[string]error, error) { - var allocs []*structs.Allocation - var errs map[string]error - - err := s.db.View(func(tx *boltdd.Tx) error { - allocs, errs = s.getAllAllocations(tx) - return nil - }) - - // db.View itself may return an error, so still check - if err != nil { - return nil, nil, err - } - - return allocs, errs, nil -} - -// allocEntry wraps values in the Allocations buckets -type allocEntry struct { - Alloc *structs.Allocation -} - -func (s *BoltStateDB) getAllAllocations(tx *boltdd.Tx) ([]*structs.Allocation, map[string]error) { - allocs := make([]*structs.Allocation, 0, 10) - errs := map[string]error{} - - allocationsBkt := tx.Bucket(allocationsBucketName) - if allocationsBkt == nil { - // No allocs - return allocs, errs - } - - // Create a cursor for iteration. - c := allocationsBkt.BoltBucket().Cursor() - - // Iterate over all the allocation buckets - for k, _ := c.First(); k != nil; k, _ = c.Next() { - allocID := string(k) - allocBkt := allocationsBkt.Bucket(k) - if allocBkt == nil { - errs[allocID] = fmt.Errorf("missing alloc bucket") - continue - } - - var ae allocEntry - if err := allocBkt.Get(allocKey, &ae); err != nil { - errs[allocID] = fmt.Errorf("failed to decode alloc: %v", err) - continue - } - - // Handle upgrade path - ae.Alloc.Canonicalize() - ae.Alloc.Job.Canonicalize() - - allocs = append(allocs, ae.Alloc) - } - - return allocs, errs -} - -// PutAllocation stores an allocation or returns an error. -func (s *BoltStateDB) PutAllocation(alloc *structs.Allocation, opts ...WriteOption) error { - return s.updateWithOptions(opts, func(tx *boltdd.Tx) error { - // Retrieve the root allocations bucket - allocsBkt, err := tx.CreateBucketIfNotExists(allocationsBucketName) - if err != nil { - return err - } - - // Retrieve the specific allocations bucket - key := []byte(alloc.ID) - allocBkt, err := allocsBkt.CreateBucketIfNotExists(key) - if err != nil { - return err - } - - allocState := allocEntry{ - Alloc: alloc, - } - return allocBkt.Put(allocKey, &allocState) - }) -} - -// deployStatusEntry wraps values for DeploymentStatus keys. -type deployStatusEntry struct { - DeploymentStatus *structs.AllocDeploymentStatus -} - -// PutDeploymentStatus stores an allocation's DeploymentStatus or returns an -// error. -func (s *BoltStateDB) PutDeploymentStatus(allocID string, ds *structs.AllocDeploymentStatus) error { - return s.db.Update(func(tx *boltdd.Tx) error { - return putDeploymentStatusImpl(tx, allocID, ds) - }) -} - -func putDeploymentStatusImpl(tx *boltdd.Tx, allocID string, ds *structs.AllocDeploymentStatus) error { - allocBkt, err := getAllocationBucket(tx, allocID) - if err != nil { - return err - } - - entry := deployStatusEntry{ - DeploymentStatus: ds, - } - return allocBkt.Put(allocDeployStatusKey, &entry) -} - -// GetDeploymentStatus retrieves an allocation's DeploymentStatus or returns an -// error. -func (s *BoltStateDB) GetDeploymentStatus(allocID string) (*structs.AllocDeploymentStatus, error) { - var entry deployStatusEntry - - err := s.db.View(func(tx *boltdd.Tx) error { - allAllocsBkt := tx.Bucket(allocationsBucketName) - if allAllocsBkt == nil { - // No state, return - return nil - } - - allocBkt := allAllocsBkt.Bucket([]byte(allocID)) - if allocBkt == nil { - // No state for alloc, return - return nil - } - - return allocBkt.Get(allocDeployStatusKey, &entry) - }) - - // It's valid for this field to be nil/missing - if boltdd.IsErrNotFound(err) { - return nil, nil - } - - if err != nil { - return nil, err - } - - return entry.DeploymentStatus, nil -} - -// networkStatusEntry wraps values for NetworkStatus keys. -type networkStatusEntry struct { - NetworkStatus *structs.AllocNetworkStatus -} - -// PutNetworkStatus stores an allocation's DeploymentStatus or returns an -// error. -func (s *BoltStateDB) PutNetworkStatus(allocID string, ds *structs.AllocNetworkStatus, opts ...WriteOption) error { - return s.updateWithOptions(opts, func(tx *boltdd.Tx) error { - return putNetworkStatusImpl(tx, allocID, ds) - }) -} - -func putNetworkStatusImpl(tx *boltdd.Tx, allocID string, ds *structs.AllocNetworkStatus) error { - allocBkt, err := getAllocationBucket(tx, allocID) - if err != nil { - return err - } - - entry := networkStatusEntry{ - NetworkStatus: ds, - } - return allocBkt.Put(allocNetworkStatusKey, &entry) -} - -// GetNetworkStatus retrieves an allocation's NetworkStatus or returns an -// error. -func (s *BoltStateDB) GetNetworkStatus(allocID string) (*structs.AllocNetworkStatus, error) { - var entry networkStatusEntry - - err := s.db.View(func(tx *boltdd.Tx) error { - allAllocsBkt := tx.Bucket(allocationsBucketName) - if allAllocsBkt == nil { - // No state, return - return nil - } - - allocBkt := allAllocsBkt.Bucket([]byte(allocID)) - if allocBkt == nil { - // No state for alloc, return - return nil - } - - return allocBkt.Get(allocNetworkStatusKey, &entry) - }) - - // It's valid for this field to be nil/missing - if boltdd.IsErrNotFound(err) { - return nil, nil - } - - if err != nil { - return nil, err - } - - return entry.NetworkStatus, nil -} - -// GetTaskRunnerState returns the LocalState and TaskState for a -// TaskRunner. LocalState or TaskState will be nil if they do not exist. -// -// If an error is encountered both LocalState and TaskState will be nil. -func (s *BoltStateDB) GetTaskRunnerState(allocID, taskName string) (*trstate.LocalState, *structs.TaskState, error) { - var ls *trstate.LocalState - var ts *structs.TaskState - - err := s.db.View(func(tx *boltdd.Tx) error { - allAllocsBkt := tx.Bucket(allocationsBucketName) - if allAllocsBkt == nil { - // No state, return - return nil - } - - allocBkt := allAllocsBkt.Bucket([]byte(allocID)) - if allocBkt == nil { - // No state for alloc, return - return nil - } - - taskBkt := allocBkt.Bucket(taskBucketName(taskName)) - if taskBkt == nil { - // No state for task, return - return nil - } - - // Restore Local State if it exists - ls = &trstate.LocalState{} - if err := taskBkt.Get(taskLocalStateKey, ls); err != nil { - if !boltdd.IsErrNotFound(err) { - return fmt.Errorf("failed to read local task runner state: %v", err) - } - - // Key not found, reset ls to nil - ls = nil - } - - // Restore Task State if it exists - ts = &structs.TaskState{} - if err := taskBkt.Get(taskStateKey, ts); err != nil { - if !boltdd.IsErrNotFound(err) { - return fmt.Errorf("failed to read task state: %v", err) - } - - // Key not found, reset ts to nil - ts = nil - } - - return nil - }) - - if err != nil { - return nil, nil, err - } - - return ls, ts, nil -} - -// PutTaskRunnerLocalState stores TaskRunner's LocalState or returns an error. -func (s *BoltStateDB) PutTaskRunnerLocalState(allocID, taskName string, val *trstate.LocalState) error { - return s.db.Update(func(tx *boltdd.Tx) error { - return putTaskRunnerLocalStateImpl(tx, allocID, taskName, val) - }) -} - -// putTaskRunnerLocalStateImpl stores TaskRunner's LocalState in an ongoing -// transaction or returns an error. -func putTaskRunnerLocalStateImpl(tx *boltdd.Tx, allocID, taskName string, val *trstate.LocalState) error { - taskBkt, err := getTaskBucket(tx, allocID, taskName) - if err != nil { - return fmt.Errorf("failed to retrieve allocation bucket: %v", err) - } - - if err := taskBkt.Put(taskLocalStateKey, val); err != nil { - return fmt.Errorf("failed to write task_runner state: %v", err) - } - - return nil -} - -// PutTaskState stores a task's state or returns an error. -func (s *BoltStateDB) PutTaskState(allocID, taskName string, state *structs.TaskState) error { - return s.db.Update(func(tx *boltdd.Tx) error { - return putTaskStateImpl(tx, allocID, taskName, state) - }) -} - -// putTaskStateImpl stores a task's state in an ongoing transaction or returns -// an error. -func putTaskStateImpl(tx *boltdd.Tx, allocID, taskName string, state *structs.TaskState) error { - taskBkt, err := getTaskBucket(tx, allocID, taskName) - if err != nil { - return fmt.Errorf("failed to retrieve allocation bucket: %v", err) - } - - return taskBkt.Put(taskStateKey, state) -} - -// DeleteTaskBucket is used to delete a task bucket if it exists. -func (s *BoltStateDB) DeleteTaskBucket(allocID, taskName string) error { - return s.db.Update(func(tx *boltdd.Tx) error { - // Retrieve the root allocations bucket - allocations := tx.Bucket(allocationsBucketName) - if allocations == nil { - return nil - } - - // Retrieve the specific allocations bucket - alloc := allocations.Bucket([]byte(allocID)) - if alloc == nil { - return nil - } - - // Check if the bucket exists - key := taskBucketName(taskName) - return alloc.DeleteBucket(key) - }) -} - -// DeleteAllocationBucket is used to delete an allocation bucket if it exists. -func (s *BoltStateDB) DeleteAllocationBucket(allocID string, opts ...WriteOption) error { - return s.updateWithOptions(opts, func(tx *boltdd.Tx) error { - // Retrieve the root allocations bucket - allocations := tx.Bucket(allocationsBucketName) - if allocations == nil { - return nil - } - - key := []byte(allocID) - return allocations.DeleteBucket(key) - }) -} - -// Close releases all database resources and unlocks the database file on disk. -// All transactions must be closed before closing the database. -func (s *BoltStateDB) Close() error { - return s.db.Close() -} - -// getAllocationBucket returns the bucket used to persist state about a -// particular allocation. If the root allocation bucket or the specific -// allocation bucket doesn't exist, it will be created as long as the -// transaction is writable. -func getAllocationBucket(tx *boltdd.Tx, allocID string) (*boltdd.Bucket, error) { - var err error - w := tx.Writable() - - // Retrieve the root allocations bucket - allocations := tx.Bucket(allocationsBucketName) - if allocations == nil { - if !w { - return nil, fmt.Errorf("Allocations bucket doesn't exist and transaction is not writable") - } - - allocations, err = tx.CreateBucketIfNotExists(allocationsBucketName) - if err != nil { - return nil, err - } - } - - // Retrieve the specific allocations bucket - key := []byte(allocID) - alloc := allocations.Bucket(key) - if alloc == nil { - if !w { - return nil, fmt.Errorf("Allocation bucket doesn't exist and transaction is not writable") - } - - alloc, err = allocations.CreateBucket(key) - if err != nil { - return nil, err - } - } - - return alloc, nil -} - -// getTaskBucket returns the bucket used to persist state about a -// particular task. If the root allocation bucket, the specific -// allocation or task bucket doesn't exist, they will be created as long as the -// transaction is writable. -func getTaskBucket(tx *boltdd.Tx, allocID, taskName string) (*boltdd.Bucket, error) { - alloc, err := getAllocationBucket(tx, allocID) - if err != nil { - return nil, err - } - - // Retrieve the specific task bucket - w := tx.Writable() - key := taskBucketName(taskName) - task := alloc.Bucket(key) - if task == nil { - if !w { - return nil, fmt.Errorf("Task bucket doesn't exist and transaction is not writable") - } - - task, err = alloc.CreateBucket(key) - if err != nil { - return nil, err - } - } - - return task, nil -} - -// PutDevicePluginState stores the device manager's plugin state or returns an -// error. -func (s *BoltStateDB) PutDevicePluginState(ps *dmstate.PluginState) error { - return s.db.Update(func(tx *boltdd.Tx) error { - // Retrieve the root device manager bucket - devBkt, err := tx.CreateBucketIfNotExists(devManagerBucket) - if err != nil { - return err - } - - return devBkt.Put(managerPluginStateKey, ps) - }) -} - -// GetDevicePluginState stores the device manager's plugin state or returns an -// error. -func (s *BoltStateDB) GetDevicePluginState() (*dmstate.PluginState, error) { - var ps *dmstate.PluginState - - err := s.db.View(func(tx *boltdd.Tx) error { - devBkt := tx.Bucket(devManagerBucket) - if devBkt == nil { - // No state, return - return nil - } - - // Restore Plugin State if it exists - ps = &dmstate.PluginState{} - if err := devBkt.Get(managerPluginStateKey, ps); err != nil { - if !boltdd.IsErrNotFound(err) { - return fmt.Errorf("failed to read device manager plugin state: %v", err) - } - - // Key not found, reset ps to nil - ps = nil - } - - return nil - }) - - if err != nil { - return nil, err - } - - return ps, nil -} - -// PutDriverPluginState stores the driver manager's plugin state or returns an -// error. -func (s *BoltStateDB) PutDriverPluginState(ps *driverstate.PluginState) error { - return s.db.Update(func(tx *boltdd.Tx) error { - // Retrieve the root driver manager bucket - driverBkt, err := tx.CreateBucketIfNotExists(driverManagerBucket) - if err != nil { - return err - } - - return driverBkt.Put(managerPluginStateKey, ps) - }) -} - -// GetDriverPluginState stores the driver manager's plugin state or returns an -// error. -func (s *BoltStateDB) GetDriverPluginState() (*driverstate.PluginState, error) { - var ps *driverstate.PluginState - - err := s.db.View(func(tx *boltdd.Tx) error { - driverBkt := tx.Bucket(driverManagerBucket) - if driverBkt == nil { - // No state, return - return nil - } - - // Restore Plugin State if it exists - ps = &driverstate.PluginState{} - if err := driverBkt.Get(managerPluginStateKey, ps); err != nil { - if !boltdd.IsErrNotFound(err) { - return fmt.Errorf("failed to read driver manager plugin state: %v", err) - } - - // Key not found, reset ps to nil - ps = nil - } - - return nil - }) - - if err != nil { - return nil, err - } - - return ps, nil -} - -// PutDynamicPluginRegistryState stores the dynamic plugin registry's -// state or returns an error. -func (s *BoltStateDB) PutDynamicPluginRegistryState(ps *dynamicplugins.RegistryState) error { - return s.db.Update(func(tx *boltdd.Tx) error { - // Retrieve the root dynamic plugin manager bucket - dynamicBkt, err := tx.CreateBucketIfNotExists(dynamicPluginBucketName) - if err != nil { - return err - } - return dynamicBkt.Put(registryStateKey, ps) - }) -} - -// GetDynamicPluginRegistryState stores the dynamic plugin registry's -// registry state or returns an error. -func (s *BoltStateDB) GetDynamicPluginRegistryState() (*dynamicplugins.RegistryState, error) { - var ps *dynamicplugins.RegistryState - - err := s.db.View(func(tx *boltdd.Tx) error { - dynamicBkt := tx.Bucket(dynamicPluginBucketName) - if dynamicBkt == nil { - // No state, return - return nil - } - - // Restore Plugin State if it exists - ps = &dynamicplugins.RegistryState{} - if err := dynamicBkt.Get(registryStateKey, ps); err != nil { - if !boltdd.IsErrNotFound(err) { - return fmt.Errorf("failed to read dynamic plugin registry state: %v", err) - } - - // Key not found, reset ps to nil - ps = nil - } - - return nil - }) - - if err != nil { - return nil, err - } - - return ps, nil -} - -func keyForCheck(allocID string, checkID structs.CheckID) []byte { - return []byte(fmt.Sprintf("%s_%s", allocID, checkID)) -} - -// PutCheckResult puts qr into the state store. -func (s *BoltStateDB) PutCheckResult(allocID string, qr *structs.CheckQueryResult) error { - return s.db.Update(func(tx *boltdd.Tx) error { - bkt, err := tx.CreateBucketIfNotExists(checkResultsBucket) - if err != nil { - return err - } - key := keyForCheck(allocID, qr.ID) - return bkt.Put(key, qr) - }) -} - -// GetCheckResults gets the check results associated with allocID from the state store. -func (s *BoltStateDB) GetCheckResults() (checks.ClientResults, error) { - m := make(checks.ClientResults) - - err := s.db.View(func(tx *boltdd.Tx) error { - bkt := tx.Bucket(checkResultsBucket) - if bkt == nil { - return nil // nothing set yet - } - - if err := boltdd.Iterate(bkt, nil, func(key []byte, qr structs.CheckQueryResult) { - parts := bytes.SplitN(key, []byte("_"), 2) - allocID, _ := parts[0], parts[1] - m.Insert(string(allocID), &qr) - }); err != nil { - return err - } - return nil - }) - return m, err -} - -func (s *BoltStateDB) DeleteCheckResults(allocID string, checkIDs []structs.CheckID) error { - return s.db.Update(func(tx *boltdd.Tx) error { - bkt := tx.Bucket(checkResultsBucket) - if bkt == nil { - return nil // nothing set yet - } - - for _, id := range checkIDs { - key := keyForCheck(allocID, id) - if err := bkt.Delete(key); err != nil { - return err - } - } - return nil - }) -} - -func (s *BoltStateDB) PurgeCheckResults(allocID string) error { - return s.db.Update(func(tx *boltdd.Tx) error { - bkt := tx.Bucket(checkResultsBucket) - if bkt == nil { - return nil // nothing set yet - } - return bkt.DeletePrefix([]byte(allocID + "_")) - }) -} - -// init initializes metadata entries in a newly created state database. -func (s *BoltStateDB) init() error { - return s.db.Update(func(tx *boltdd.Tx) error { - return addMeta(tx.BoltTx()) - }) -} - -// updateWithOptions enables adjustments to db.Update operation, including Batch mode. -func (s *BoltStateDB) updateWithOptions(opts []WriteOption, updateFn func(tx *boltdd.Tx) error) error { - writeOpts := mergeWriteOptions(opts) - - if writeOpts.BatchMode { - // In Batch mode, BoltDB opportunistically combines multiple concurrent writes into one or - // several transactions. See boltdb.Batch() documentation for details. - return s.db.Batch(updateFn) - } else { - return s.db.Update(updateFn) - } -} - -// Upgrade bolt state db from 0.8 schema to 0.9 schema. Noop if already using -// 0.9 schema. Creates a backup before upgrading. -func (s *BoltStateDB) Upgrade() error { - // Check to see if the underlying DB needs upgrading. - upgrade09, upgrade13, err := NeedsUpgrade(s.db.BoltDB()) - if err != nil { - return err - } - if !upgrade09 && !upgrade13 { - // No upgrade needed! - return nil - } - - // Upgraded needed. Backup the boltdb first. - backupFileName := filepath.Join(s.stateDir, "state.db.backup") - if err := backupDB(s.db.BoltDB(), backupFileName); err != nil { - return fmt.Errorf("error backing up state db: %v", err) - } - - // Perform the upgrade - if err := s.db.Update(func(tx *boltdd.Tx) error { - - if upgrade09 { - if err := UpgradeAllocs(s.logger, tx); err != nil { - return err - } - } - if upgrade13 { - if err := UpgradeDynamicPluginRegistry(s.logger, tx); err != nil { - return err - } - } - - // Add standard metadata - if err := addMeta(tx.BoltTx()); err != nil { - return err - } - - // Write the time the upgrade was done - bkt, err := tx.CreateBucketIfNotExists(metaBucketName) - if err != nil { - return err - } - - return bkt.Put(metaUpgradedKey, time.Now().Format(time.RFC3339)) - }); err != nil { - return err - } - - s.logger.Info("successfully upgraded state") - return nil -} - -// DB allows access to the underlying BoltDB for testing purposes. -func (s *BoltStateDB) DB() *boltdd.DB { - return s.db -} diff --git a/vendor/github.com/hashicorp/nomad/client/state/db_error.go b/vendor/github.com/hashicorp/nomad/client/state/db_error.go deleted file mode 100644 index 77e51e63..00000000 --- a/vendor/github.com/hashicorp/nomad/client/state/db_error.go +++ /dev/null @@ -1,114 +0,0 @@ -package state - -import ( - "fmt" - - "github.com/hashicorp/nomad/client/allocrunner/taskrunner/state" - dmstate "github.com/hashicorp/nomad/client/devicemanager/state" - "github.com/hashicorp/nomad/client/dynamicplugins" - driverstate "github.com/hashicorp/nomad/client/pluginmanager/drivermanager/state" - "github.com/hashicorp/nomad/client/serviceregistration/checks" - "github.com/hashicorp/nomad/nomad/structs" -) - -// ErrDB implements a StateDB that returns errors on restore methods, used for testing -type ErrDB struct { - // Allocs is a preset slice of allocations used in GetAllAllocations - Allocs []*structs.Allocation -} - -func (m *ErrDB) Name() string { - return "errdb" -} - -func (m *ErrDB) Upgrade() error { - return nil -} - -func (m *ErrDB) GetAllAllocations() ([]*structs.Allocation, map[string]error, error) { - return m.Allocs, nil, nil -} - -func (m *ErrDB) PutAllocation(alloc *structs.Allocation, opts ...WriteOption) error { - return fmt.Errorf("Error!") -} - -func (m *ErrDB) GetDeploymentStatus(allocID string) (*structs.AllocDeploymentStatus, error) { - return nil, fmt.Errorf("Error!") -} - -func (m *ErrDB) PutDeploymentStatus(allocID string, ds *structs.AllocDeploymentStatus) error { - return fmt.Errorf("Error!") -} - -func (m *ErrDB) GetNetworkStatus(allocID string) (*structs.AllocNetworkStatus, error) { - return nil, fmt.Errorf("Error!") -} - -func (m *ErrDB) PutNetworkStatus(allocID string, ns *structs.AllocNetworkStatus, opts ...WriteOption) error { - return fmt.Errorf("Error!") -} - -func (m *ErrDB) GetTaskRunnerState(allocID string, taskName string) (*state.LocalState, *structs.TaskState, error) { - return nil, nil, fmt.Errorf("Error!") -} - -func (m *ErrDB) PutTaskRunnerLocalState(allocID string, taskName string, val *state.LocalState) error { - return fmt.Errorf("Error!") -} - -func (m *ErrDB) PutTaskState(allocID string, taskName string, state *structs.TaskState) error { - return fmt.Errorf("Error!") -} - -func (m *ErrDB) DeleteTaskBucket(allocID, taskName string) error { - return fmt.Errorf("Error!") -} - -func (m *ErrDB) DeleteAllocationBucket(allocID string, opts ...WriteOption) error { - return fmt.Errorf("Error!") -} - -func (m *ErrDB) PutDevicePluginState(ps *dmstate.PluginState) error { - return fmt.Errorf("Error!") -} - -func (m *ErrDB) GetDynamicPluginRegistryState() (*dynamicplugins.RegistryState, error) { - return nil, fmt.Errorf("Error!") -} - -func (m *ErrDB) PutDynamicPluginRegistryState(state *dynamicplugins.RegistryState) error { - return fmt.Errorf("Error!") -} - -func (m *ErrDB) GetDevicePluginState() (*dmstate.PluginState, error) { - return nil, fmt.Errorf("Error!") -} - -func (m *ErrDB) GetDriverPluginState() (*driverstate.PluginState, error) { - return nil, fmt.Errorf("Error!") -} - -func (m *ErrDB) PutDriverPluginState(ps *driverstate.PluginState) error { - return fmt.Errorf("Error!") -} - -func (m *ErrDB) PutCheckResult(allocID string, qr *structs.CheckQueryResult) error { - return fmt.Errorf("Error!") -} - -func (m *ErrDB) GetCheckResults() (checks.ClientResults, error) { - return nil, fmt.Errorf("Error!") -} - -func (m *ErrDB) DeleteCheckResults(allocID string, checkIDs []structs.CheckID) error { - return fmt.Errorf("Error!") -} - -func (m *ErrDB) PurgeCheckResults(allocID string) error { - return fmt.Errorf("Error!") -} - -func (m *ErrDB) Close() error { - return fmt.Errorf("Error!") -} diff --git a/vendor/github.com/hashicorp/nomad/client/state/db_mem.go b/vendor/github.com/hashicorp/nomad/client/state/db_mem.go deleted file mode 100644 index 284fc3f4..00000000 --- a/vendor/github.com/hashicorp/nomad/client/state/db_mem.go +++ /dev/null @@ -1,280 +0,0 @@ -package state - -import ( - "sync" - - "github.com/hashicorp/go-hclog" - "github.com/hashicorp/nomad/client/allocrunner/taskrunner/state" - dmstate "github.com/hashicorp/nomad/client/devicemanager/state" - "github.com/hashicorp/nomad/client/dynamicplugins" - driverstate "github.com/hashicorp/nomad/client/pluginmanager/drivermanager/state" - "github.com/hashicorp/nomad/client/serviceregistration/checks" - "github.com/hashicorp/nomad/nomad/structs" - "golang.org/x/exp/maps" -) - -// MemDB implements a StateDB that stores data in memory and should only be -// used for testing. All methods are safe for concurrent use. -type MemDB struct { - // alloc_id -> value - allocs map[string]*structs.Allocation - - // alloc_id -> value - deployStatus map[string]*structs.AllocDeploymentStatus - - // alloc_id -> value - networkStatus map[string]*structs.AllocNetworkStatus - - // alloc_id -> task_name -> value - localTaskState map[string]map[string]*state.LocalState - taskState map[string]map[string]*structs.TaskState - - // alloc_id -> check_id -> result - checks checks.ClientResults - - // devicemanager -> plugin-state - devManagerPs *dmstate.PluginState - - // drivermanager -> plugin-state - driverManagerPs *driverstate.PluginState - - // dynamicmanager -> registry-state - dynamicManagerPs *dynamicplugins.RegistryState - - logger hclog.Logger - - mu sync.RWMutex -} - -func NewMemDB(logger hclog.Logger) *MemDB { - logger = logger.Named("memdb") - return &MemDB{ - allocs: make(map[string]*structs.Allocation), - deployStatus: make(map[string]*structs.AllocDeploymentStatus), - networkStatus: make(map[string]*structs.AllocNetworkStatus), - localTaskState: make(map[string]map[string]*state.LocalState), - taskState: make(map[string]map[string]*structs.TaskState), - checks: make(checks.ClientResults), - logger: logger, - } -} - -func (m *MemDB) Name() string { - return "memdb" -} - -func (m *MemDB) Upgrade() error { - return nil -} - -func (m *MemDB) GetAllAllocations() ([]*structs.Allocation, map[string]error, error) { - m.mu.RLock() - defer m.mu.RUnlock() - - allocs := make([]*structs.Allocation, 0, len(m.allocs)) - for _, v := range m.allocs { - allocs = append(allocs, v) - } - - return allocs, map[string]error{}, nil -} - -func (m *MemDB) PutAllocation(alloc *structs.Allocation, _ ...WriteOption) error { - m.mu.Lock() - defer m.mu.Unlock() - m.allocs[alloc.ID] = alloc - return nil -} - -func (m *MemDB) GetDeploymentStatus(allocID string) (*structs.AllocDeploymentStatus, error) { - m.mu.Lock() - defer m.mu.Unlock() - return m.deployStatus[allocID], nil -} - -func (m *MemDB) PutDeploymentStatus(allocID string, ds *structs.AllocDeploymentStatus) error { - m.mu.Lock() - m.deployStatus[allocID] = ds - defer m.mu.Unlock() - return nil -} - -func (m *MemDB) GetNetworkStatus(allocID string) (*structs.AllocNetworkStatus, error) { - m.mu.Lock() - defer m.mu.Unlock() - return m.networkStatus[allocID], nil -} - -func (m *MemDB) PutNetworkStatus(allocID string, ns *structs.AllocNetworkStatus, _ ...WriteOption) error { - m.mu.Lock() - m.networkStatus[allocID] = ns - defer m.mu.Unlock() - return nil -} - -func (m *MemDB) GetTaskRunnerState(allocID string, taskName string) (*state.LocalState, *structs.TaskState, error) { - m.mu.RLock() - defer m.mu.RUnlock() - - var ls *state.LocalState - var ts *structs.TaskState - - // Local Task State - allocLocalTS := m.localTaskState[allocID] - if len(allocLocalTS) != 0 { - ls = allocLocalTS[taskName] - } - - // Task State - allocTS := m.taskState[allocID] - if len(allocTS) != 0 { - ts = allocTS[taskName] - } - - return ls, ts, nil -} - -func (m *MemDB) PutTaskRunnerLocalState(allocID string, taskName string, val *state.LocalState) error { - m.mu.Lock() - defer m.mu.Unlock() - - if alts, ok := m.localTaskState[allocID]; ok { - alts[taskName] = val.Copy() - return nil - } - - m.localTaskState[allocID] = map[string]*state.LocalState{ - taskName: val.Copy(), - } - - return nil -} - -func (m *MemDB) PutTaskState(allocID string, taskName string, state *structs.TaskState) error { - m.mu.Lock() - defer m.mu.Unlock() - - if ats, ok := m.taskState[allocID]; ok { - ats[taskName] = state.Copy() - return nil - } - - m.taskState[allocID] = map[string]*structs.TaskState{ - taskName: state.Copy(), - } - - return nil -} - -func (m *MemDB) DeleteTaskBucket(allocID, taskName string) error { - m.mu.Lock() - defer m.mu.Unlock() - - if ats, ok := m.taskState[allocID]; ok { - delete(ats, taskName) - } - - if alts, ok := m.localTaskState[allocID]; ok { - delete(alts, taskName) - } - - return nil -} - -func (m *MemDB) DeleteAllocationBucket(allocID string, _ ...WriteOption) error { - m.mu.Lock() - defer m.mu.Unlock() - - delete(m.allocs, allocID) - delete(m.taskState, allocID) - delete(m.localTaskState, allocID) - - return nil -} - -func (m *MemDB) PutDevicePluginState(ps *dmstate.PluginState) error { - m.mu.Lock() - defer m.mu.Unlock() - m.devManagerPs = ps - return nil -} - -// GetDevicePluginState stores the device manager's plugin state or returns an -// error. -func (m *MemDB) GetDevicePluginState() (*dmstate.PluginState, error) { - m.mu.Lock() - defer m.mu.Unlock() - return m.devManagerPs, nil -} - -func (m *MemDB) GetDriverPluginState() (*driverstate.PluginState, error) { - m.mu.Lock() - defer m.mu.Unlock() - return m.driverManagerPs, nil -} - -func (m *MemDB) PutDriverPluginState(ps *driverstate.PluginState) error { - m.mu.Lock() - defer m.mu.Unlock() - m.driverManagerPs = ps - return nil -} - -func (m *MemDB) GetDynamicPluginRegistryState() (*dynamicplugins.RegistryState, error) { - m.mu.Lock() - defer m.mu.Unlock() - return m.dynamicManagerPs, nil -} - -func (m *MemDB) PutDynamicPluginRegistryState(ps *dynamicplugins.RegistryState) error { - m.mu.Lock() - defer m.mu.Unlock() - m.dynamicManagerPs = ps - return nil -} - -func (m *MemDB) PutCheckResult(allocID string, qr *structs.CheckQueryResult) error { - m.mu.Lock() - defer m.mu.Unlock() - - if _, exists := m.checks[allocID]; !exists { - m.checks[allocID] = make(checks.AllocationResults) - } - - m.checks[allocID][qr.ID] = qr - return nil -} - -func (m *MemDB) GetCheckResults() (checks.ClientResults, error) { - m.mu.Lock() - defer m.mu.Unlock() - return maps.Clone(m.checks), nil -} - -func (m *MemDB) DeleteCheckResults(allocID string, checkIDs []structs.CheckID) error { - m.mu.Lock() - defer m.mu.Unlock() - for _, id := range checkIDs { - delete(m.checks[allocID], id) - } - return nil -} - -func (m *MemDB) PurgeCheckResults(allocID string) error { - m.mu.Lock() - defer m.mu.Unlock() - delete(m.checks, allocID) - return nil -} - -func (m *MemDB) Close() error { - m.mu.Lock() - defer m.mu.Unlock() - - // Set everything to nil to blow up on further use - m.allocs = nil - m.taskState = nil - m.localTaskState = nil - - return nil -} diff --git a/vendor/github.com/hashicorp/nomad/client/state/db_noop.go b/vendor/github.com/hashicorp/nomad/client/state/db_noop.go deleted file mode 100644 index 0b433e14..00000000 --- a/vendor/github.com/hashicorp/nomad/client/state/db_noop.go +++ /dev/null @@ -1,109 +0,0 @@ -package state - -import ( - "github.com/hashicorp/nomad/client/allocrunner/taskrunner/state" - dmstate "github.com/hashicorp/nomad/client/devicemanager/state" - "github.com/hashicorp/nomad/client/dynamicplugins" - driverstate "github.com/hashicorp/nomad/client/pluginmanager/drivermanager/state" - "github.com/hashicorp/nomad/client/serviceregistration/checks" - "github.com/hashicorp/nomad/nomad/structs" -) - -// NoopDB implements a StateDB that does not persist any data. -type NoopDB struct{} - -func (n NoopDB) Name() string { - return "noopdb" -} - -func (n NoopDB) Upgrade() error { - return nil -} - -func (n NoopDB) GetAllAllocations() ([]*structs.Allocation, map[string]error, error) { - return nil, nil, nil -} - -func (n NoopDB) PutAllocation(alloc *structs.Allocation, opts ...WriteOption) error { - return nil -} - -func (n NoopDB) GetDeploymentStatus(allocID string) (*structs.AllocDeploymentStatus, error) { - return nil, nil -} - -func (n NoopDB) PutDeploymentStatus(allocID string, ds *structs.AllocDeploymentStatus) error { - return nil -} - -func (n NoopDB) GetNetworkStatus(allocID string) (*structs.AllocNetworkStatus, error) { - return nil, nil -} - -func (n NoopDB) PutNetworkStatus(allocID string, ds *structs.AllocNetworkStatus, opts ...WriteOption) error { - return nil -} - -func (n NoopDB) GetTaskRunnerState(allocID string, taskName string) (*state.LocalState, *structs.TaskState, error) { - return nil, nil, nil -} - -func (n NoopDB) PutTaskRunnerLocalState(allocID string, taskName string, val *state.LocalState) error { - return nil -} - -func (n NoopDB) PutTaskState(allocID string, taskName string, state *structs.TaskState) error { - return nil -} - -func (n NoopDB) DeleteTaskBucket(allocID, taskName string) error { - return nil -} - -func (n NoopDB) DeleteAllocationBucket(allocID string, opts ...WriteOption) error { - return nil -} - -func (n NoopDB) PutDevicePluginState(ps *dmstate.PluginState) error { - return nil -} - -func (n NoopDB) GetDevicePluginState() (*dmstate.PluginState, error) { - return nil, nil -} - -func (n NoopDB) PutDriverPluginState(ps *driverstate.PluginState) error { - return nil -} - -func (n NoopDB) GetDriverPluginState() (*driverstate.PluginState, error) { - return nil, nil -} - -func (n NoopDB) PutDynamicPluginRegistryState(ps *dynamicplugins.RegistryState) error { - return nil -} - -func (n NoopDB) GetDynamicPluginRegistryState() (*dynamicplugins.RegistryState, error) { - return nil, nil -} - -func (n NoopDB) PutCheckResult(allocID string, qr *structs.CheckQueryResult) error { - return nil -} - -func (n NoopDB) GetCheckResults() (checks.ClientResults, error) { - return nil, nil -} - -func (n NoopDB) DeleteCheckResults(allocID string, checkIDs []structs.CheckID) error { - return nil -} - -func (n NoopDB) PurgeCheckResults(allocID string) error { - return nil -} - -func (n NoopDB) Close() error { - return nil -} diff --git a/vendor/github.com/hashicorp/nomad/client/state/interface.go b/vendor/github.com/hashicorp/nomad/client/state/interface.go deleted file mode 100644 index f1250350..00000000 --- a/vendor/github.com/hashicorp/nomad/client/state/interface.go +++ /dev/null @@ -1,136 +0,0 @@ -package state - -import ( - "github.com/hashicorp/nomad/client/allocrunner/taskrunner/state" - dmstate "github.com/hashicorp/nomad/client/devicemanager/state" - "github.com/hashicorp/nomad/client/dynamicplugins" - driverstate "github.com/hashicorp/nomad/client/pluginmanager/drivermanager/state" - "github.com/hashicorp/nomad/client/serviceregistration/checks" - "github.com/hashicorp/nomad/nomad/structs" -) - -// StateDB implementations store and load Nomad client state. -type StateDB interface { - // Name of implementation. - Name() string - - // Upgrade ensures the layout of the database is at the latest version - // or returns an error. Corrupt data will be dropped when possible. - // Errors should be considered critical and unrecoverable. - Upgrade() error - - // GetAllAllocations returns all valid allocations and a map of - // allocation IDs to retrieval errors. - // - // If a single error is returned then both allocations and the map will be nil. - GetAllAllocations() ([]*structs.Allocation, map[string]error, error) - - // PutAllocation stores an allocation or returns an error if it could - // not be stored. - PutAllocation(*structs.Allocation, ...WriteOption) error - - // GetDeploymentStatus gets the allocation's deployment status. It may be nil. - GetDeploymentStatus(allocID string) (*structs.AllocDeploymentStatus, error) - - // PutDeploymentStatus sets the allocation's deployment status. It may be nil. - PutDeploymentStatus(allocID string, ds *structs.AllocDeploymentStatus) error - - // GetNetworkStatus gets the allocation's network status. It may be nil. - GetNetworkStatus(allocID string) (*structs.AllocNetworkStatus, error) - - // PutNetworkStatus puts the allocation's network status. It may be nil. - PutNetworkStatus(allocID string, ns *structs.AllocNetworkStatus, opts ...WriteOption) error - - // GetTaskRunnerState returns the LocalState and TaskState for a - // TaskRunner. Either state may be nil if it is not found, but if an - // error is encountered only the error will be non-nil. - GetTaskRunnerState(allocID, taskName string) (*state.LocalState, *structs.TaskState, error) - - // PutTaskRunnerLocalState stores the LocalState for a TaskRunner or - // returns an error. - PutTaskRunnerLocalState(allocID, taskName string, val *state.LocalState) error - - // PutTaskState stores the TaskState for a TaskRunner or returns an - // error. - PutTaskState(allocID, taskName string, state *structs.TaskState) error - - // DeleteTaskBucket deletes a task's state bucket if it exists. No - // error is returned if it does not exist. - DeleteTaskBucket(allocID, taskName string) error - - // DeleteAllocationBucket deletes an allocation's state bucket if it - // exists. No error is returned if it does not exist. - DeleteAllocationBucket(allocID string, opts ...WriteOption) error - - // GetDevicePluginState is used to retrieve the device manager's plugin - // state. - GetDevicePluginState() (*dmstate.PluginState, error) - - // PutDevicePluginState is used to store the device manager's plugin - // state. - PutDevicePluginState(state *dmstate.PluginState) error - - // GetDriverPluginState is used to retrieve the driver manager's plugin - // state. - GetDriverPluginState() (*driverstate.PluginState, error) - - // PutDriverPluginState is used to store the driver manager's plugin - // state. - PutDriverPluginState(state *driverstate.PluginState) error - - // GetDynamicPluginRegistryState is used to retrieve a dynamic plugin manager's state. - GetDynamicPluginRegistryState() (*dynamicplugins.RegistryState, error) - - // PutDynamicPluginRegistryState is used to store the dynamic plugin manager's state. - PutDynamicPluginRegistryState(state *dynamicplugins.RegistryState) error - - // PutCheckResult sets the query result for the check implied in qr. - PutCheckResult(allocID string, qr *structs.CheckQueryResult) error - - // DeleteCheckResults removes the given set of check results. - DeleteCheckResults(allocID string, checkIDs []structs.CheckID) error - - // PurgeCheckResults removes all check results of the given allocation. - PurgeCheckResults(allocID string) error - - // GetCheckResults is used to restore the set of check results on this Client. - GetCheckResults() (checks.ClientResults, error) - - // Close the database. Unsafe for further use after calling regardless - // of return value. - Close() error -} - -// WriteOptions adjusts the way the data is persisted by the StateDB above. Default is -// zero/false values for all fields. To provide different values, use With* functions -// below, like this: statedb.PutAllocation(alloc, WithBatchMode()) -type WriteOptions struct { - // In Batch mode, concurrent writes (Put* and Delete* operations above) are - // coalesced into a single transaction, increasing write performance. To benefit - // from this mode, writes must happen concurrently in goroutines, as every write - // request still waits for the shared transaction to commit before returning. - // See https://github.com/boltdb/bolt#batch-read-write-transactions for details. - // This mode is only supported for BoltDB state backend and is ignored in other backends. - BatchMode bool -} - -// WriteOption is a function that modifies WriteOptions struct above. -type WriteOption func(*WriteOptions) - -// mergeWriteOptions creates a final WriteOptions struct to be used by the write methods above -// from a list of WriteOption-s provided as variadic arguments. -func mergeWriteOptions(opts []WriteOption) WriteOptions { - writeOptions := WriteOptions{} // Default WriteOptions is zero value. - for _, opt := range opts { - opt(&writeOptions) - } - return writeOptions -} - -// WithBatchMode enables Batch mode for write requests (Put* and Delete* -// operations above). -func WithBatchMode() WriteOption { - return func(s *WriteOptions) { - s.BatchMode = true - } -} diff --git a/vendor/github.com/hashicorp/nomad/client/state/upgrade.go b/vendor/github.com/hashicorp/nomad/client/state/upgrade.go deleted file mode 100644 index b2a1394e..00000000 --- a/vendor/github.com/hashicorp/nomad/client/state/upgrade.go +++ /dev/null @@ -1,349 +0,0 @@ -package state - -import ( - "bytes" - "container/list" - "fmt" - "os" - - hclog "github.com/hashicorp/go-hclog" - "github.com/hashicorp/go-msgpack/codec" - "github.com/hashicorp/nomad/client/dynamicplugins" - "github.com/hashicorp/nomad/helper/boltdd" - "github.com/hashicorp/nomad/nomad/structs" - "go.etcd.io/bbolt" -) - -// NeedsUpgrade returns true if the BoltDB needs upgrading or false if it is -// already up to date. -func NeedsUpgrade(bdb *bbolt.DB) (upgradeTo09, upgradeTo13 bool, err error) { - upgradeTo09 = true - upgradeTo13 = true - err = bdb.View(func(tx *bbolt.Tx) error { - b := tx.Bucket(metaBucketName) - if b == nil { - // No meta bucket; upgrade - return nil - } - - v := b.Get(metaVersionKey) - if len(v) == 0 { - // No version; upgrade - return nil - } - - if bytes.Equal(v, []byte{'2'}) { - upgradeTo09 = false - return nil - } - if bytes.Equal(v, metaVersion) { - upgradeTo09 = false - upgradeTo13 = false - return nil - } - - // Version exists but does not match. Abort. - return fmt.Errorf("incompatible state version. expected %q but found %q", - metaVersion, v) - - }) - - return -} - -// addMeta adds version metadata to BoltDB to mark it as upgraded and -// should be run at the end of the upgrade transaction. -func addMeta(tx *bbolt.Tx) error { - // Create the meta bucket if it doesn't exist - bkt, err := tx.CreateBucketIfNotExists(metaBucketName) - if err != nil { - return err - } - return bkt.Put(metaVersionKey, metaVersion) -} - -// backupDB backs up the existing state database prior to upgrade overwriting -// previous backups. -func backupDB(bdb *bbolt.DB, dst string) error { - fd, err := os.Create(dst) - if err != nil { - return err - } - - return bdb.View(func(tx *bbolt.Tx) error { - if _, err := tx.WriteTo(fd); err != nil { - fd.Close() - return err - } - - return fd.Close() - }) -} - -// UpgradeAllocs upgrades the boltdb schema. Example 0.8 schema: -// -// allocations -// 15d83e8a-74a2-b4da-3f17-ed5c12895ea8 -// echo -// simple-all (342 bytes) -// alloc (2827 bytes) -// alloc-dir (166 bytes) -// immutable (15 bytes) -// mutable (1294 bytes) -func UpgradeAllocs(logger hclog.Logger, tx *boltdd.Tx) error { - btx := tx.BoltTx() - allocationsBucket := btx.Bucket(allocationsBucketName) - if allocationsBucket == nil { - // No state! - return nil - } - - // Gather alloc buckets and remove unexpected key/value pairs - allocBuckets := [][]byte{} - cur := allocationsBucket.Cursor() - for k, v := cur.First(); k != nil; k, v = cur.Next() { - if v != nil { - logger.Warn("deleting unexpected key in state db", - "key", string(k), "value_bytes", len(v), - ) - - if err := cur.Delete(); err != nil { - return fmt.Errorf("error deleting unexpected key %q: %v", string(k), err) - } - continue - } - - allocBuckets = append(allocBuckets, k) - } - - for _, allocBucket := range allocBuckets { - allocID := string(allocBucket) - - bkt := allocationsBucket.Bucket(allocBucket) - if bkt == nil { - // This should never happen as we just read the bucket. - return fmt.Errorf("unexpected bucket missing %q", allocID) - } - - allocLogger := logger.With("alloc_id", allocID) - if err := upgradeAllocBucket(allocLogger, tx, bkt, allocID); err != nil { - // Log and drop invalid allocs - allocLogger.Error("dropping invalid allocation due to error while upgrading state", - "error", err, - ) - - // If we can't delete the bucket something is seriously - // wrong, fail hard. - if err := allocationsBucket.DeleteBucket(allocBucket); err != nil { - return fmt.Errorf("error deleting invalid allocation state: %v", err) - } - } - } - - return nil -} - -// upgradeAllocBucket upgrades an alloc bucket. -func upgradeAllocBucket(logger hclog.Logger, tx *boltdd.Tx, bkt *bbolt.Bucket, allocID string) error { - allocFound := false - taskBuckets := [][]byte{} - cur := bkt.Cursor() - for k, v := cur.First(); k != nil; k, v = cur.Next() { - switch string(k) { - case "alloc": - // Alloc has not changed; leave it be - allocFound = true - case "alloc-dir": - // Drop alloc-dir entries as they're no longer needed. - cur.Delete() - case "immutable": - // Drop immutable state. Nothing from it needs to be - // upgraded. - cur.Delete() - case "mutable": - // Decode and upgrade - if err := upgradeOldAllocMutable(tx, allocID, v); err != nil { - return err - } - cur.Delete() - default: - if v != nil { - logger.Warn("deleting unexpected state entry for allocation", - "key", string(k), "value_bytes", len(v), - ) - - if err := cur.Delete(); err != nil { - return err - } - - continue - } - - // Nested buckets are tasks - taskBuckets = append(taskBuckets, k) - } - } - - // If the alloc entry was not found, abandon this allocation as the - // state has been corrupted. - if !allocFound { - return fmt.Errorf("alloc entry not found") - } - - // Upgrade tasks - for _, taskBucket := range taskBuckets { - taskName := string(taskBucket) - taskLogger := logger.With("task_name", taskName) - - taskBkt := bkt.Bucket(taskBucket) - if taskBkt == nil { - // This should never happen as we just read the bucket. - return fmt.Errorf("unexpected bucket missing %q", taskName) - } - - oldState, err := upgradeTaskBucket(taskLogger, taskBkt) - if err != nil { - taskLogger.Warn("dropping invalid task due to error while upgrading state", - "error", err, - ) - - // Delete the invalid task bucket and treat failures - // here as unrecoverable errors. - if err := bkt.DeleteBucket(taskBucket); err != nil { - return fmt.Errorf("error deleting invalid task state for task %q: %v", - taskName, err, - ) - } - continue - } - - // Convert 0.8 task state to 0.9 task state - localTaskState, err := oldState.Upgrade(allocID, taskName) - if err != nil { - taskLogger.Warn("dropping invalid task due to error while upgrading state", - "error", err, - ) - - // Delete the invalid task bucket and treat failures - // here as unrecoverable errors. - if err := bkt.DeleteBucket(taskBucket); err != nil { - return fmt.Errorf("error deleting invalid task state for task %q: %v", - taskName, err, - ) - } - continue - } - - // Insert the new task state - if err := putTaskRunnerLocalStateImpl(tx, allocID, taskName, localTaskState); err != nil { - return err - } - - // Delete the old task bucket - if err := bkt.DeleteBucket(taskBucket); err != nil { - return err - } - - taskLogger.Trace("upgraded", "from", oldState.Version) - } - - return nil -} - -// upgradeTaskBucket iterates over keys in a task bucket, deleting invalid keys -// and returning the 0.8 version of the state. -func upgradeTaskBucket(logger hclog.Logger, bkt *bbolt.Bucket) (*taskRunnerState08, error) { - simpleFound := false - var trState taskRunnerState08 - - cur := bkt.Cursor() - for k, v := cur.First(); k != nil; k, v = cur.Next() { - if v == nil { - // value is nil: delete unexpected bucket - logger.Warn("deleting unexpected task state bucket", - "bucket", string(k), - ) - - if err := bkt.DeleteBucket(k); err != nil { - return nil, fmt.Errorf("error deleting unexpected task bucket %q: %v", string(k), err) - } - continue - } - - if !bytes.Equal(k, []byte("simple-all")) { - // value is non-nil: delete unexpected entry - logger.Warn("deleting unexpected task state entry", - "key", string(k), "value_bytes", len(v), - ) - - if err := cur.Delete(); err != nil { - return nil, fmt.Errorf("error delting unexpected task key %q: %v", string(k), err) - } - continue - } - - // Decode simple-all - simpleFound = true - if err := codec.NewDecoderBytes(v, structs.MsgpackHandle).Decode(&trState); err != nil { - return nil, fmt.Errorf("failed to decode task state from 'simple-all' entry: %v", err) - } - } - - if !simpleFound { - return nil, fmt.Errorf("task state entry not found") - } - - return &trState, nil -} - -// upgradeOldAllocMutable upgrades Nomad 0.8 alloc runner state. -func upgradeOldAllocMutable(tx *boltdd.Tx, allocID string, oldBytes []byte) error { - var oldMutable allocRunnerMutableState08 - err := codec.NewDecoderBytes(oldBytes, structs.MsgpackHandle).Decode(&oldMutable) - if err != nil { - return err - } - - // Upgrade Deployment Status - if err := putDeploymentStatusImpl(tx, allocID, oldMutable.DeploymentStatus); err != nil { - return err - } - - // Upgrade Task States - for taskName, taskState := range oldMutable.TaskStates { - if err := putTaskStateImpl(tx, allocID, taskName, taskState); err != nil { - return err - } - } - - return nil -} - -func UpgradeDynamicPluginRegistry(logger hclog.Logger, tx *boltdd.Tx) error { - - dynamicBkt := tx.Bucket(dynamicPluginBucketName) - if dynamicBkt == nil { - return nil // no previous plugins upgrade - } - - oldState := &RegistryState12{} - if err := dynamicBkt.Get(registryStateKey, oldState); err != nil { - if !boltdd.IsErrNotFound(err) { - return fmt.Errorf("failed to read dynamic plugin registry state: %v", err) - } - } - - newState := &dynamicplugins.RegistryState{ - Plugins: make(map[string]map[string]*list.List), - } - - for ptype, plugins := range oldState.Plugins { - newState.Plugins[ptype] = make(map[string]*list.List) - for pname, pluginInfo := range plugins { - newState.Plugins[ptype][pname] = list.New() - entry := list.Element{Value: pluginInfo} - newState.Plugins[ptype][pname].PushFront(entry) - } - } - return dynamicBkt.Put(registryStateKey, newState) -} diff --git a/vendor/github.com/hashicorp/nomad/client/structs/allochook.go b/vendor/github.com/hashicorp/nomad/client/structs/allochook.go index 59c56c0f..3a694d85 100644 --- a/vendor/github.com/hashicorp/nomad/client/structs/allochook.go +++ b/vendor/github.com/hashicorp/nomad/client/structs/allochook.go @@ -4,26 +4,39 @@ import ( "sync" "github.com/hashicorp/nomad/client/pluginmanager/csimanager" + "github.com/hashicorp/nomad/helper" ) // AllocHookResources contains data that is provided by AllocRunner Hooks for -// consumption by TaskRunners +// consumption by TaskRunners. This should be instantiated once in the +// AllocRunner and then only accessed via getters and setters that hold the +// lock. type AllocHookResources struct { - CSIMounts map[string]*csimanager.MountInfo + csiMounts map[string]*csimanager.MountInfo mu sync.RWMutex } +func NewAllocHookResources() *AllocHookResources { + return &AllocHookResources{ + csiMounts: map[string]*csimanager.MountInfo{}, + } +} + +// GetCSIMounts returns a copy of the CSI mount info previously written by the +// CSI allocrunner hook func (a *AllocHookResources) GetCSIMounts() map[string]*csimanager.MountInfo { a.mu.RLock() defer a.mu.RUnlock() - return a.CSIMounts + return helper.DeepCopyMap(a.csiMounts) } +// SetCSIMounts stores the CSI mount info for later use by the volume taskrunner +// hook func (a *AllocHookResources) SetCSIMounts(m map[string]*csimanager.MountInfo) { a.mu.Lock() defer a.mu.Unlock() - a.CSIMounts = m + a.csiMounts = m } diff --git a/vendor/github.com/hashicorp/nomad/client/taskenv/env.go b/vendor/github.com/hashicorp/nomad/client/taskenv/env.go index 1ed6f518..6bbc28b8 100644 --- a/vendor/github.com/hashicorp/nomad/client/taskenv/env.go +++ b/vendor/github.com/hashicorp/nomad/client/taskenv/env.go @@ -1021,7 +1021,7 @@ func (b *Builder) SetVaultToken(token, namespace string, inject bool) *Builder { // addPort keys and values for other tasks to an env var map func addPort(m map[string]string, taskName, ip, portLabel string, port int) { key := fmt.Sprintf("%s%s_%s", AddrPrefix, taskName, portLabel) - m[key] = fmt.Sprintf("%s:%d", ip, port) + m[key] = net.JoinHostPort(ip, strconv.Itoa(port)) key = fmt.Sprintf("%s%s_%s", IpPrefix, taskName, portLabel) m[key] = ip key = fmt.Sprintf("%s%s_%s", PortPrefix, taskName, portLabel) @@ -1042,8 +1042,9 @@ func addGroupPort(m map[string]string, port structs.Port) { func addPorts(m map[string]string, ports structs.AllocatedPorts) { for _, p := range ports { - m[AddrPrefix+p.Label] = fmt.Sprintf("%s:%d", p.HostIP, p.Value) - m[HostAddrPrefix+p.Label] = fmt.Sprintf("%s:%d", p.HostIP, p.Value) + port := strconv.Itoa(p.Value) + m[AddrPrefix+p.Label] = net.JoinHostPort(p.HostIP, port) + m[HostAddrPrefix+p.Label] = net.JoinHostPort(p.HostIP, port) m[IpPrefix+p.Label] = p.HostIP m[HostIpPrefix+p.Label] = p.HostIP if p.To > 0 { @@ -1056,6 +1057,6 @@ func addPorts(m map[string]string, ports structs.AllocatedPorts) { m[AllocPortPrefix+p.Label] = val } - m[HostPortPrefix+p.Label] = strconv.Itoa(p.Value) + m[HostPortPrefix+p.Label] = port } } diff --git a/vendor/github.com/hashicorp/nomad/drivers/shared/executor/executor_linux.go b/vendor/github.com/hashicorp/nomad/drivers/shared/executor/executor_linux.go index 4ab8367b..3cce11e7 100644 --- a/vendor/github.com/hashicorp/nomad/drivers/shared/executor/executor_linux.go +++ b/vendor/github.com/hashicorp/nomad/drivers/shared/executor/executor_linux.go @@ -526,8 +526,17 @@ func configureCapabilities(cfg *lconfigs.Config, command *ExecCommand) { } default: // otherwise apply the plugin + task capability configuration + // + // The capabilities must be set in the Ambient set as libcontainer + // performs `execve`` as an unprivileged user. Ambient also requires + // that capabilities are Permitted and Inheritable. Setting Effective + // is unnecessary, because we only need the capabilities to become + // effective _after_ execve, not before. cfg.Capabilities = &lconfigs.Capabilities{ - Bounding: command.Capabilities, + Bounding: command.Capabilities, + Permitted: command.Capabilities, + Inheritable: command.Capabilities, + Ambient: command.Capabilities, } } } @@ -677,9 +686,8 @@ func configureCgroups(cfg *lconfigs.Config, command *ExecCommand) error { cfg.Cgroups.Resources.Memory = memHard * 1024 * 1024 cfg.Cgroups.Resources.MemoryReservation = memSoft * 1024 * 1024 - // Disable swap to avoid issues on the machine - var memSwappiness uint64 - cfg.Cgroups.Resources.MemorySwappiness = &memSwappiness + // Disable swap if possible, to avoid issues on the machine + cfg.Cgroups.Resources.MemorySwappiness = cgutil.MaybeDisableMemorySwappiness() } cpuShares := res.Cpu.CpuShares diff --git a/vendor/github.com/hashicorp/nomad/helper/boltdd/boltdd.go b/vendor/github.com/hashicorp/nomad/helper/boltdd/boltdd.go deleted file mode 100644 index 5b492e09..00000000 --- a/vendor/github.com/hashicorp/nomad/helper/boltdd/boltdd.go +++ /dev/null @@ -1,457 +0,0 @@ -// Package boltdd contains a wrapper around BBoltDB to deduplicate writes and encode -// values using mgspack. (dd stands for de-duplicate) -package boltdd - -import ( - "bytes" - "fmt" - "os" - "sync" - - "github.com/hashicorp/go-msgpack/codec" - "github.com/hashicorp/nomad/nomad/structs" - "go.etcd.io/bbolt" - "golang.org/x/crypto/blake2b" -) - -// ErrNotFound is returned when a key is not found. -type ErrNotFound struct { - name string -} - -func (e *ErrNotFound) Error() string { - return fmt.Sprintf("key not found: %s", e.name) -} - -// NotFound returns a new error for a key that was not found. -func NotFound(name string) error { - return &ErrNotFound{name} -} - -// IsErrNotFound returns true if the error is an ErrNotFound error. -func IsErrNotFound(e error) bool { - if e == nil { - return false - } - _, ok := e.(*ErrNotFound) - return ok -} - -// DB wraps an underlying bolt.DB to create write de-duplicating buckets and -// msgpack encoded values. -type DB struct { - rootBuckets map[string]*bucketMeta - rootBucketsLock sync.Mutex - - boltDB *bbolt.DB -} - -// Open a bolt.DB and wrap it in a write-de-duplicating msgpack-encoding -// implementation. -func Open(path string, mode os.FileMode, options *bbolt.Options) (*DB, error) { - bdb, err := bbolt.Open(path, mode, options) - if err != nil { - return nil, err - } - - return New(bdb), nil -} - -// New de-duplicating wrapper for the given bboltdb. -func New(bdb *bbolt.DB) *DB { - return &DB{ - rootBuckets: make(map[string]*bucketMeta), - boltDB: bdb, - } -} - -func (db *DB) bucket(btx *bbolt.Tx, name []byte) *Bucket { - bb := btx.Bucket(name) - if bb == nil { - return nil - } - - db.rootBucketsLock.Lock() - defer db.rootBucketsLock.Unlock() - - if db.isClosed() { - return nil - } - - b, ok := db.rootBuckets[string(name)] - if !ok { - b = newBucketMeta() - db.rootBuckets[string(name)] = b - } - - return newBucket(b, bb) -} - -func (db *DB) createBucket(btx *bbolt.Tx, name []byte) (*Bucket, error) { - bb, err := btx.CreateBucket(name) - if err != nil { - return nil, err - } - - db.rootBucketsLock.Lock() - defer db.rootBucketsLock.Unlock() - - // While creating a bucket on a closed db would error, we must recheck - // after acquiring the lock to avoid races. - if db.isClosed() { - return nil, bbolt.ErrDatabaseNotOpen - } - - // Always create a new Bucket since CreateBucket above fails if the - // bucket already exists. - b := newBucketMeta() - db.rootBuckets[string(name)] = b - - return newBucket(b, bb), nil -} - -func (db *DB) createBucketIfNotExists(btx *bbolt.Tx, name []byte) (*Bucket, error) { - bb, err := btx.CreateBucketIfNotExists(name) - if err != nil { - return nil, err - } - - db.rootBucketsLock.Lock() - defer db.rootBucketsLock.Unlock() - - // While creating a bucket on a closed db would error, we must recheck - // after acquiring the lock to avoid races. - if db.isClosed() { - return nil, bbolt.ErrDatabaseNotOpen - } - - b, ok := db.rootBuckets[string(name)] - if !ok { - b = newBucketMeta() - db.rootBuckets[string(name)] = b - } - - return newBucket(b, bb), nil -} - -func (db *DB) Update(fn func(*Tx) error) error { - return db.boltDB.Update(func(btx *bbolt.Tx) error { - tx := newTx(db, btx) - return fn(tx) - }) -} - -func (db *DB) Batch(fn func(*Tx) error) error { - return db.boltDB.Batch(func(btx *bbolt.Tx) error { - tx := newTx(db, btx) - return fn(tx) - }) -} - -func (db *DB) View(fn func(*Tx) error) error { - return db.boltDB.View(func(btx *bbolt.Tx) error { - tx := newTx(db, btx) - return fn(tx) - }) -} - -// isClosed returns true if the database is closed and must be called while -// db.rootBucketsLock is acquired. -func (db *DB) isClosed() bool { - return db.rootBuckets == nil -} - -// Close closes the underlying bolt.DB and clears all bucket hashes. DB is -// unusable after closing. -func (db *DB) Close() error { - db.rootBucketsLock.Lock() - db.rootBuckets = nil - db.rootBucketsLock.Unlock() - return db.boltDB.Close() -} - -// BoltDB returns the underlying bolt.DB. -func (db *DB) BoltDB() *bbolt.DB { - return db.boltDB -} - -type Tx struct { - db *DB - btx *bbolt.Tx -} - -func newTx(db *DB, btx *bbolt.Tx) *Tx { - return &Tx{ - db: db, - btx: btx, - } -} - -// Bucket returns a root bucket or nil if it doesn't exist. -func (tx *Tx) Bucket(name []byte) *Bucket { - return tx.db.bucket(tx.btx, name) -} - -func (tx *Tx) CreateBucket(name []byte) (*Bucket, error) { - return tx.db.createBucket(tx.btx, name) -} - -// CreateBucketIfNotExists returns a root bucket or creates a new one if it -// doesn't already exist. -func (tx *Tx) CreateBucketIfNotExists(name []byte) (*Bucket, error) { - return tx.db.createBucketIfNotExists(tx.btx, name) -} - -// Writable wraps boltdb Tx.Writable. -func (tx *Tx) Writable() bool { - return tx.btx.Writable() -} - -// BoltTx returns the underlying bolt.Tx. -func (tx *Tx) BoltTx() *bbolt.Tx { - return tx.btx -} - -// bucketMeta persists metadata -- such as key hashes and child buckets -- -// about boltdb Buckets across transactions. -type bucketMeta struct { - // hashes holds all of the value hashes for keys in this bucket - hashes map[string][]byte - hashesLock sync.Mutex - - // buckets holds all of the child buckets - buckets map[string]*bucketMeta - bucketsLock sync.Mutex -} - -func newBucketMeta() *bucketMeta { - return &bucketMeta{ - hashes: make(map[string][]byte), - buckets: make(map[string]*bucketMeta), - } -} - -// getHash of last value written to a key or nil if no hash exists. -func (bm *bucketMeta) getHash(hashKey string) []byte { - bm.hashesLock.Lock() - lastHash := bm.hashes[hashKey] - bm.hashesLock.Unlock() - return lastHash -} - -// setHash of last value written to key. -func (bm *bucketMeta) setHash(hashKey string, hashVal []byte) { - bm.hashesLock.Lock() - bm.hashes[hashKey] = hashVal - bm.hashesLock.Unlock() -} - -// delHash deletes a hash value or does nothing if the hash key does not exist. -func (bm *bucketMeta) delHash(hashKey string) { - bm.hashesLock.Lock() - delete(bm.hashes, hashKey) - bm.hashesLock.Unlock() -} - -// createBucket metadata entry for the given nested bucket. Overwrites any -// existing entry so caller should ensure bucket does not already exist. -func (bm *bucketMeta) createBucket(name []byte) *bucketMeta { - bm.bucketsLock.Lock() - defer bm.bucketsLock.Unlock() - - // Always create a new Bucket since CreateBucket above fails if the - // bucket already exists. - b := newBucketMeta() - bm.buckets[string(name)] = b - return b -} - -// deleteBucket metadata entry for the given nested bucket. Does nothing if -// nested bucket metadata does not exist. -func (bm *bucketMeta) deleteBucket(name []byte) { - bm.bucketsLock.Lock() - delete(bm.buckets, string(name)) - bm.bucketsLock.Unlock() - -} - -// getOrCreateBucket metadata entry for the given nested bucket. -func (bm *bucketMeta) getOrCreateBucket(name []byte) *bucketMeta { - bm.bucketsLock.Lock() - defer bm.bucketsLock.Unlock() - - b, ok := bm.buckets[string(name)] - if !ok { - b = newBucketMeta() - bm.buckets[string(name)] = b - } - return b -} - -type Bucket struct { - bm *bucketMeta - boltBucket *bbolt.Bucket -} - -// newBucket creates a new view into a bucket backed by a boltdb -// transaction. -func newBucket(b *bucketMeta, bb *bbolt.Bucket) *Bucket { - return &Bucket{ - bm: b, - boltBucket: bb, - } -} - -// Put into boltdb iff it has changed since the last write. -func (b *Bucket) Put(key []byte, val interface{}) error { - // buffer for writing serialized state to - var buf bytes.Buffer - - // Serialize the object - if err := codec.NewEncoder(&buf, structs.MsgpackHandle).Encode(val); err != nil { - return fmt.Errorf("failed to encode passed object: %v", err) - } - - // Hash for skipping unnecessary writes - hashKey := string(key) - hashVal := blake2b.Sum256(buf.Bytes()) - - // lastHash value or nil if it hasn't been hashed yet - lastHash := b.bm.getHash(hashKey) - - // If the hashes are equal, skip the write - if bytes.Equal(hashVal[:], lastHash) { - return nil - } - - // New value: write it to the underlying boltdb - if err := b.boltBucket.Put(key, buf.Bytes()); err != nil { - return fmt.Errorf("failed to write data at key %s: %v", key, err) - } - - // New value written, store hash (bucket path map was created above) - b.bm.setHash(hashKey, hashVal[:]) - - return nil - -} - -// Get value by key from boltdb or return an ErrNotFound error if key not -// found. -func (b *Bucket) Get(key []byte, obj interface{}) error { - // Get the raw data from the underlying boltdb - data := b.boltBucket.Get(key) - if data == nil { - return NotFound(string(key)) - } - - // Deserialize the object - if err := codec.NewDecoderBytes(data, structs.MsgpackHandle).Decode(obj); err != nil { - return fmt.Errorf("failed to decode data into passed object: %v", err) - } - - return nil -} - -// Iterate iterates each key in Bucket b that starts with prefix. fn is called on -// the key and msg-pack decoded value. If prefix is empty or nil, all keys in the -// bucket are iterated. -// -// b must already exist. -func Iterate[T any](b *Bucket, prefix []byte, fn func([]byte, T)) error { - c := b.boltBucket.Cursor() - for k, data := c.Seek(prefix); k != nil && bytes.HasPrefix(k, prefix); k, data = c.Next() { - var obj T - if err := codec.NewDecoderBytes(data, structs.MsgpackHandle).Decode(&obj); err != nil { - return fmt.Errorf("failed to decode data into passed object: %v", err) - } - fn(k, obj) - } - return nil -} - -// DeletePrefix removes all keys starting with prefix from the bucket. If no keys -// with prefix exist then nothing is done and a nil error is returned. Returns an -// error if the bucket was created from a read-only transaction. -// -// b must already exist. -func (b *Bucket) DeletePrefix(prefix []byte) error { - c := b.boltBucket.Cursor() - for k, _ := c.Seek(prefix); k != nil && bytes.HasPrefix(k, prefix); k, _ = c.Next() { - if err := c.Delete(); err != nil { - return err - } - b.bm.delHash(string(k)) - } - return nil -} - -// Delete removes a key from the bucket. If the key does not exist then nothing -// is done and a nil error is returned. Returns an error if the bucket was -// created from a read-only transaction. -func (b *Bucket) Delete(key []byte) error { - err := b.boltBucket.Delete(key) - b.bm.delHash(string(key)) - return err -} - -// Bucket represents a boltdb Bucket and its associated metadata necessary for -// write deduplication. Like bolt.Buckets it is only valid for the duration of -// the transaction that created it. -func (b *Bucket) Bucket(name []byte) *Bucket { - bb := b.boltBucket.Bucket(name) - if bb == nil { - return nil - } - - bmeta := b.bm.getOrCreateBucket(name) - return newBucket(bmeta, bb) -} - -// CreateBucket creates a new bucket at the given key and returns the new -// bucket. Returns an error if the key already exists, if the bucket name is -// blank, or if the bucket name is too long. The bucket instance is only valid -// for the lifetime of the transaction. -func (b *Bucket) CreateBucket(name []byte) (*Bucket, error) { - bb, err := b.boltBucket.CreateBucket(name) - if err != nil { - return nil, err - } - - bmeta := b.bm.createBucket(name) - return newBucket(bmeta, bb), nil -} - -// CreateBucketIfNotExists creates a new bucket if it doesn't already exist and -// returns a reference to it. The bucket instance is only valid for the -// lifetime of the transaction. -func (b *Bucket) CreateBucketIfNotExists(name []byte) (*Bucket, error) { - bb, err := b.boltBucket.CreateBucketIfNotExists(name) - if err != nil { - return nil, err - } - - bmeta := b.bm.getOrCreateBucket(name) - return newBucket(bmeta, bb), nil -} - -// DeleteBucket deletes a child bucket. Returns an error if the bucket -// corresponds to a non-bucket key or another error is encountered. No error is -// returned if the bucket does not exist. -func (b *Bucket) DeleteBucket(name []byte) error { - // Delete the bucket from the underlying boltdb - err := b.boltBucket.DeleteBucket(name) - if err == bbolt.ErrBucketNotFound { - err = nil - } - - // Remove reference to child bucket - b.bm.deleteBucket(name) - return err -} - -// BoltBucket returns the internal bolt.Bucket for this Bucket. Only valid -// for the duration of the current transaction. -func (b *Bucket) BoltBucket() *bbolt.Bucket { - return b.boltBucket -} diff --git a/vendor/github.com/hashicorp/nomad/helper/bufconndialer/bufconndialer.go b/vendor/github.com/hashicorp/nomad/helper/bufconndialer/bufconndialer.go deleted file mode 100644 index 3985d1de..00000000 --- a/vendor/github.com/hashicorp/nomad/helper/bufconndialer/bufconndialer.go +++ /dev/null @@ -1,48 +0,0 @@ -// BufConnWrapper implements consul-template's TransportDialer using a -// bufconn listener, to provide a way to Dial the in-memory listener -// -// Copied from github.com/hashicorp/vault/internalshared/listenerutil/bufconn.go - -package bufconndialer - -import ( - "context" - "net" - - "google.golang.org/grpc/test/bufconn" -) - -// BufConnWrapper implements consul-template's TransportDialer using a -// bufconn listener, to provide a way to Dial the in-memory listener -type BufConnWrapper struct { - listener *bufconn.Listener -} - -// New returns a new BufConnWrapper with a new bufconn.Listener. The wrapper -// provides a dialer for creating connections to the listener. -func New() (net.Listener, *BufConnWrapper) { - ln := bufconn.Listen(1024 * 1024) - return ln, &BufConnWrapper{listener: ln} -} - -// NewBufConnWrapper returns a new BufConnWrapper using an -// existing bufconn.Listener -func NewBufConnWrapper(bcl *bufconn.Listener) *BufConnWrapper { - return &BufConnWrapper{ - listener: bcl, - } -} - -// Dial connects to the listening end of the bufconn (satisfies -// consul-template's TransportDialer interface). This is essentially the client -// side of the bufconn connection. -func (bcl *BufConnWrapper) Dial(_, _ string) (net.Conn, error) { - return bcl.listener.Dial() -} - -// DialContext connects to the listening end of the bufconn (satisfies -// consul-template's TransportDialer interface). This is essentially the client -// side of the bufconn connection. -func (bcl *BufConnWrapper) DialContext(ctx context.Context, _, _ string) (net.Conn, error) { - return bcl.listener.DialContext(ctx) -} diff --git a/vendor/github.com/hashicorp/nomad/helper/funcs.go b/vendor/github.com/hashicorp/nomad/helper/funcs.go index 7d8f1d3b..598dcda8 100644 --- a/vendor/github.com/hashicorp/nomad/helper/funcs.go +++ b/vendor/github.com/hashicorp/nomad/helper/funcs.go @@ -409,6 +409,17 @@ func ConvertSlice[A, B any](original []A, conversion func(a A) B) []B { return result } +// ConvertMap takes the input map and generates a new one using the supplied +// conversion function to convert the values. This is useful when converting one +// map to another using the same keys. +func ConvertMap[K comparable, A, B any](original map[K]A, conversion func(a A) B) map[K]B { + result := make(map[K]B, len(original)) + for k, a := range original { + result[k] = conversion(a) + } + return result +} + // IsMethodHTTP returns whether s is a known HTTP method, ignoring case. func IsMethodHTTP(s string) bool { switch strings.ToUpper(s) { diff --git a/vendor/github.com/hashicorp/nomad/helper/pluginutils/loader/api_versions.go b/vendor/github.com/hashicorp/nomad/helper/pluginutils/loader/api_versions.go deleted file mode 100644 index c35db68a..00000000 --- a/vendor/github.com/hashicorp/nomad/helper/pluginutils/loader/api_versions.go +++ /dev/null @@ -1,16 +0,0 @@ -package loader - -import ( - "github.com/hashicorp/nomad/plugins/base" - "github.com/hashicorp/nomad/plugins/device" - "github.com/hashicorp/nomad/plugins/drivers" -) - -var ( - // AgentSupportedApiVersions is the set of API versions supported by the - // Nomad agent by plugin type. - AgentSupportedApiVersions = map[string][]string{ - base.PluginTypeDevice: {device.ApiVersion010}, - base.PluginTypeDriver: {drivers.ApiVersion010}, - } -) diff --git a/vendor/github.com/hashicorp/nomad/helper/pluginutils/loader/filter_unix.go b/vendor/github.com/hashicorp/nomad/helper/pluginutils/loader/filter_unix.go deleted file mode 100644 index 5ef82140..00000000 --- a/vendor/github.com/hashicorp/nomad/helper/pluginutils/loader/filter_unix.go +++ /dev/null @@ -1,11 +0,0 @@ -//go:build !windows -// +build !windows - -package loader - -import "os" - -// executable Checks to see if the file is executable by anyone. -func executable(path string, f os.FileInfo) bool { - return f.Mode().Perm()&0111 != 0 -} diff --git a/vendor/github.com/hashicorp/nomad/helper/pluginutils/loader/filter_windows.go b/vendor/github.com/hashicorp/nomad/helper/pluginutils/loader/filter_windows.go deleted file mode 100644 index 021e1cc2..00000000 --- a/vendor/github.com/hashicorp/nomad/helper/pluginutils/loader/filter_windows.go +++ /dev/null @@ -1,16 +0,0 @@ -//go:build windows -// +build windows - -package loader - -import ( - "os" - "path/filepath" -) - -// On windows, an executable can be any file with any extension. To avoid -// introspecting the file, here we skip executability checks on windows systems -// and simply check for the convention of an `.exe` extension. -func executable(path string, s os.FileInfo) bool { - return filepath.Ext(path) == ".exe" -} diff --git a/vendor/github.com/hashicorp/nomad/helper/pluginutils/loader/init.go b/vendor/github.com/hashicorp/nomad/helper/pluginutils/loader/init.go deleted file mode 100644 index 1cda5b15..00000000 --- a/vendor/github.com/hashicorp/nomad/helper/pluginutils/loader/init.go +++ /dev/null @@ -1,502 +0,0 @@ -package loader - -import ( - "context" - "fmt" - "os" - "os/exec" - "path/filepath" - "sort" - - multierror "github.com/hashicorp/go-multierror" - plugin "github.com/hashicorp/go-plugin" - version "github.com/hashicorp/go-version" - "github.com/hashicorp/nomad/helper/pluginutils/hclspecutils" - "github.com/hashicorp/nomad/helper/pluginutils/hclutils" - "github.com/hashicorp/nomad/nomad/structs/config" - "github.com/hashicorp/nomad/plugins/base" - "github.com/zclconf/go-cty/cty/msgpack" -) - -// validateConfig returns whether or not the configuration is valid -func validateConfig(config *PluginLoaderConfig) error { - var mErr multierror.Error - if config == nil { - return fmt.Errorf("nil config passed") - } else if config.Logger == nil { - _ = multierror.Append(&mErr, fmt.Errorf("nil logger passed")) - } - - // Validate that all plugins have a binary name - for _, c := range config.Configs { - if c.Name == "" { - _ = multierror.Append(&mErr, fmt.Errorf("plugin config passed without binary name")) - } - } - - // Validate internal plugins - for k, config := range config.InternalPlugins { - // Validate config - if config == nil { - _ = multierror.Append(&mErr, fmt.Errorf("nil config passed for internal plugin %s", k)) - continue - } else if config.Factory == nil { - _ = multierror.Append(&mErr, fmt.Errorf("nil factory passed for internal plugin %s", k)) - continue - } - } - - return mErr.ErrorOrNil() -} - -// init initializes the plugin loader by compiling both internal and external -// plugins and selecting the highest versioned version of any given plugin. -func (l *PluginLoader) init(config *PluginLoaderConfig) error { - // Create a mapping of name to config - configMap := configMap(config.Configs) - - // Initialize the internal plugins - internal, err := l.initInternal(config.InternalPlugins, configMap) - if err != nil { - return fmt.Errorf("failed to fingerprint internal plugins: %v", err) - } - - // Scan for eligibile binaries - plugins, err := l.scan() - if err != nil { - return fmt.Errorf("failed to scan plugin directory %q: %v", l.pluginDir, err) - } - - // Fingerprint the passed plugins - external, err := l.fingerprintPlugins(plugins, configMap) - if err != nil { - return fmt.Errorf("failed to fingerprint plugins: %v", err) - } - - // Merge external and internal plugins - l.plugins = l.mergePlugins(internal, external) - - // Validate that the configs are valid for the plugins - if err := l.validatePluginConfigs(); err != nil { - return fmt.Errorf("parsing plugin configurations failed: %v", err) - } - - return nil -} - -// initInternal initializes internal plugins. -func (l *PluginLoader) initInternal(plugins map[PluginID]*InternalPluginConfig, configs map[string]*config.PluginConfig) (map[PluginID]*pluginInfo, error) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - - var mErr multierror.Error - fingerprinted := make(map[PluginID]*pluginInfo, len(plugins)) - for k, config := range plugins { - // Create an instance - raw := config.Factory(ctx, l.logger) - base, ok := raw.(base.BasePlugin) - if !ok { - _ = multierror.Append(&mErr, fmt.Errorf("internal plugin %s doesn't meet base plugin interface", k)) - continue - } - - info := &pluginInfo{ - factory: config.Factory, - config: config.Config, - } - - // Try to retrieve a user specified config - if userConfig, ok := configs[k.Name]; ok && userConfig.Config != nil { - info.config = userConfig.Config - } - - // Fingerprint base info - i, err := base.PluginInfo() - if err != nil { - _ = multierror.Append(&mErr, fmt.Errorf("PluginInfo info failed for internal plugin %s: %v", k, err)) - continue - } - info.baseInfo = i - - // Parse and set the plugin version - v, err := version.NewVersion(i.PluginVersion) - if err != nil { - _ = multierror.Append(&mErr, fmt.Errorf("failed to parse version %q for internal plugin %s: %v", i.PluginVersion, k, err)) - continue - } - info.version = v - - // Detect the plugin API version to use - av, err := l.selectApiVersion(i) - if err != nil { - _ = multierror.Append(&mErr, fmt.Errorf("failed to validate API versions %v for internal plugin %s: %v", i.PluginApiVersions, k, err)) - continue - } - if av == "" { - l.logger.Warn("skipping plugin because supported API versions for plugin and Nomad do not overlap", "plugin", k) - continue - } - info.apiVersion = av - - // Get the config schema - schema, err := base.ConfigSchema() - if err != nil { - _ = multierror.Append(&mErr, fmt.Errorf("failed to retrieve config schema for internal plugin %s: %v", k, err)) - continue - } - info.configSchema = schema - - // Store the fingerprinted config - fingerprinted[k] = info - } - - if err := mErr.ErrorOrNil(); err != nil { - return nil, err - } - - return fingerprinted, nil -} - -// selectApiVersion takes in PluginInfo and returns the highest compatable -// version or an error if the plugins response is malformed. If there is no -// overlap, an empty string is returned. -func (l *PluginLoader) selectApiVersion(i *base.PluginInfoResponse) (string, error) { - if i == nil { - return "", fmt.Errorf("nil plugin info given") - } - if len(i.PluginApiVersions) == 0 { - return "", fmt.Errorf("plugin provided no compatible API versions") - } - - pluginVersions, err := convertVersions(i.PluginApiVersions) - if err != nil { - return "", fmt.Errorf("plugin provided invalid versions: %v", err) - } - - // Lookup the supported versions. These will be sorted highest to lowest - supportedVersions, ok := l.supportedVersions[i.Type] - if !ok { - return "", fmt.Errorf("unsupported plugin type %q", i.Type) - } - - for _, sv := range supportedVersions { - for _, pv := range pluginVersions { - if sv.Equal(pv) { - return pv.Original(), nil - } - } - } - - return "", nil -} - -// convertVersions takes a list of string versions and returns a sorted list of -// versions from highest to lowest. -func convertVersions(in []string) ([]*version.Version, error) { - converted := make([]*version.Version, len(in)) - for i, v := range in { - vv, err := version.NewVersion(v) - if err != nil { - return nil, fmt.Errorf("failed to convert version %q : %v", v, err) - } - - converted[i] = vv - } - - sort.Slice(converted, func(i, j int) bool { - return converted[i].GreaterThan(converted[j]) - }) - - return converted, nil -} - -// scan scans the plugin directory and retrieves potentially eligible binaries -func (l *PluginLoader) scan() ([]os.FileInfo, error) { - if l.pluginDir == "" { - return nil, nil - } - - // Capture the list of binaries in the plugins folder - f, err := os.Open(l.pluginDir) - if err != nil { - // There are no plugins to scan - if os.IsNotExist(err) { - l.logger.Warn("skipping external plugins since plugin_dir doesn't exist") - return nil, nil - } - - return nil, fmt.Errorf("failed to open plugin directory %q: %v", l.pluginDir, err) - } - files, err := f.Readdirnames(-1) - f.Close() - if err != nil { - return nil, fmt.Errorf("failed to read plugin directory %q: %v", l.pluginDir, err) - } - - var plugins []os.FileInfo - for _, f := range files { - f = filepath.Join(l.pluginDir, f) - s, err := os.Stat(f) - if err != nil { - return nil, fmt.Errorf("failed to stat file %q: %v", f, err) - } - if s.IsDir() { - l.logger.Warn("skipping subdir in plugin folder", "subdir", f) - continue - } - - if !executable(f, s) { - l.logger.Warn("skipping un-executable file in plugin folder", "file", f) - continue - } - plugins = append(plugins, s) - } - - return plugins, nil -} - -// fingerprintPlugins fingerprints all external plugin binaries -func (l *PluginLoader) fingerprintPlugins(plugins []os.FileInfo, configs map[string]*config.PluginConfig) (map[PluginID]*pluginInfo, error) { - var mErr multierror.Error - fingerprinted := make(map[PluginID]*pluginInfo, len(plugins)) - for _, p := range plugins { - name := cleanPluginExecutable(p.Name()) - c := configs[name] - info, err := l.fingerprintPlugin(p, c) - if err != nil { - l.logger.Error("failed to fingerprint plugin", "plugin", name, "error", err) - _ = multierror.Append(&mErr, err) - continue - } - if info == nil { - // Plugin was skipped for validation reasons - continue - } - - id := PluginID{ - Name: info.baseInfo.Name, - PluginType: info.baseInfo.Type, - } - - // Detect if we already have seen a version of this plugin - if prev, ok := fingerprinted[id]; ok { - oldVersion := prev.version - selectedVersion := info.version - skip := false - - // Determine if we should keep the previous version or override - if prev.version.GreaterThan(info.version) { - oldVersion = info.version - selectedVersion = prev.version - skip = true - } - l.logger.Info("multiple versions of plugin detected", - "plugin", info.baseInfo.Name, "older_version", oldVersion, "selected_version", selectedVersion) - - if skip { - continue - } - } - - // Add the plugin - fingerprinted[id] = info - } - - if err := mErr.ErrorOrNil(); err != nil { - return nil, err - } - - return fingerprinted, nil -} - -// fingerprintPlugin fingerprints the passed external plugin -func (l *PluginLoader) fingerprintPlugin(pluginExe os.FileInfo, config *config.PluginConfig) (*pluginInfo, error) { - info := &pluginInfo{ - exePath: filepath.Join(l.pluginDir, pluginExe.Name()), - } - - // Build the command - cmd := exec.Command(info.exePath) - if config != nil { - cmd.Args = append(cmd.Args, config.Args...) - info.args = config.Args - info.config = config.Config - } - - // Launch the plugin - client := plugin.NewClient(&plugin.ClientConfig{ - HandshakeConfig: base.Handshake, - Plugins: map[string]plugin.Plugin{ - base.PluginTypeBase: &base.PluginBase{}, - }, - Cmd: cmd, - AllowedProtocols: []plugin.Protocol{plugin.ProtocolGRPC}, - Logger: l.logger, - }) - defer client.Kill() - - // Connect via RPC - rpcClient, err := client.Client() - if err != nil { - return nil, err - } - - // Request the plugin - raw, err := rpcClient.Dispense(base.PluginTypeBase) - if err != nil { - return nil, err - } - - // Cast the plugin to the base type - bplugin := raw.(base.BasePlugin) - - // Retrieve base plugin information - i, err := bplugin.PluginInfo() - if err != nil { - return nil, fmt.Errorf("failed to get plugin info for plugin %q: %v", info.exePath, err) - } - info.baseInfo = i - - // Parse and set the plugin version - v, err := version.NewVersion(i.PluginVersion) - if err != nil { - return nil, fmt.Errorf("failed to parse plugin %q (%v) version %q: %v", - i.Name, info.exePath, i.PluginVersion, err) - } - info.version = v - - // Detect the plugin API version to use - av, err := l.selectApiVersion(i) - if err != nil { - return nil, fmt.Errorf("failed to validate API versions %v for plugin %s (%v): %v", i.PluginApiVersions, i.Name, info.exePath, err) - } - if av == "" { - l.logger.Warn("skipping plugin because supported API versions for plugin and Nomad do not overlap", "plugin", i.Name, "path", info.exePath) - return nil, nil - } - info.apiVersion = av - - // Retrieve the schema - schema, err := bplugin.ConfigSchema() - if err != nil { - return nil, fmt.Errorf("failed to get plugin config schema for plugin %q: %v", info.exePath, err) - } - info.configSchema = schema - - return info, nil -} - -// mergePlugins merges internal and external plugins, preferring the highest -// version. -func (l *PluginLoader) mergePlugins(internal, external map[PluginID]*pluginInfo) map[PluginID]*pluginInfo { - finalized := make(map[PluginID]*pluginInfo, len(internal)) - - // Load the internal plugins - for k, v := range internal { - finalized[k] = v - } - - for k, extPlugin := range external { - internal, ok := finalized[k] - if ok { - // We have overlapping plugins, determine if we should keep the - // internal version or override - if extPlugin.version.LessThan(internal.version) { - l.logger.Info("preferring internal version of plugin", - "plugin", extPlugin.baseInfo.Name, "internal_version", internal.version, - "external_version", extPlugin.version) - continue - } - } - - // Add external plugin - finalized[k] = extPlugin - } - - return finalized -} - -// validatePluginConfigs is used to validate each plugins' configuration. If the -// plugin has a config, it is parsed with the plugins config schema and -// SetConfig is called to ensure the config is valid. -func (l *PluginLoader) validatePluginConfigs() error { - var mErr multierror.Error - for id, info := range l.plugins { - if err := l.validatePluginConfig(id, info); err != nil { - wrapped := multierror.Prefix(err, fmt.Sprintf("plugin %s:", id)) - _ = multierror.Append(&mErr, wrapped) - } - } - - return mErr.ErrorOrNil() -} - -// validatePluginConfig is used to validate the plugin's configuration. If the -// plugin has a config, it is parsed with the plugins config schema and -// SetConfig is called to ensure the config is valid. -func (l *PluginLoader) validatePluginConfig(id PluginID, info *pluginInfo) error { - var mErr multierror.Error - - // Check if a config is allowed - if info.configSchema == nil { - if info.config != nil { - return fmt.Errorf("configuration not allowed but config passed") - } - - // Nothing to do - return nil - } - - // Convert the schema to hcl - spec, diag := hclspecutils.Convert(info.configSchema) - if diag.HasErrors() { - _ = multierror.Append(&mErr, diag.Errs()...) - return multierror.Prefix(&mErr, "failed converting config schema:") - } - - // If there is no config, initialize it to an empty map so we can still - // handle defaults - if info.config == nil { - info.config = map[string]interface{}{} - } - - // Parse the config using the spec - val, diag, diagErrs := hclutils.ParseHclInterface(info.config, spec, nil) - if diag.HasErrors() { - _ = multierror.Append(&mErr, diagErrs...) - return multierror.Prefix(&mErr, "failed to parse config: ") - - } - - // Marshal the value - cdata, err := msgpack.Marshal(val, val.Type()) - if err != nil { - return fmt.Errorf("failed to msgpack encode config: %v", err) - } - - // Store the marshalled config - info.msgpackConfig = cdata - - // Dispense the plugin and set its config and ensure it is error free - instance, err := l.Dispense(id.Name, id.PluginType, nil, l.logger) - if err != nil { - return fmt.Errorf("failed to dispense plugin: %v", err) - } - defer instance.Kill() - - b, ok := instance.Plugin().(base.BasePlugin) - if !ok { - return fmt.Errorf("dispensed plugin %s doesn't meet base plugin interface", id) - } - - c := &base.Config{ - PluginConfig: cdata, - AgentConfig: nil, - ApiVersion: info.apiVersion, - } - - if err := b.SetConfig(c); err != nil { - return fmt.Errorf("setting config on plugin failed: %v", err) - } - return nil -} diff --git a/vendor/github.com/hashicorp/nomad/helper/pluginutils/loader/instance.go b/vendor/github.com/hashicorp/nomad/helper/pluginutils/loader/instance.go deleted file mode 100644 index e396eb45..00000000 --- a/vendor/github.com/hashicorp/nomad/helper/pluginutils/loader/instance.go +++ /dev/null @@ -1,63 +0,0 @@ -package loader - -import plugin "github.com/hashicorp/go-plugin" - -// PluginInstance wraps an instance of a plugin. If the plugin is external, it -// provides methods to retrieve the ReattachConfig and to kill the plugin. -type PluginInstance interface { - // Internal returns if the plugin is internal - Internal() bool - - // Kill kills the plugin if it is external. It is safe to call on internal - // plugins. - Kill() - - // ReattachConfig returns the ReattachConfig and whether the plugin is internal - // or not. If the second return value is false, no ReattachConfig is - // possible to return. - ReattachConfig() (config *plugin.ReattachConfig, canReattach bool) - - // Plugin returns the wrapped plugin instance. - Plugin() interface{} - - // Exited returns whether the plugin has exited - Exited() bool - - // ApiVersion returns the API version to be used with the plugin - ApiVersion() string -} - -// internalPluginInstance wraps an internal plugin -type internalPluginInstance struct { - instance interface{} - apiVersion string - killFn func() -} - -func (p *internalPluginInstance) Internal() bool { return true } -func (p *internalPluginInstance) Kill() { p.killFn() } - -func (p *internalPluginInstance) ReattachConfig() (*plugin.ReattachConfig, bool) { return nil, false } -func (p *internalPluginInstance) Plugin() interface{} { return p.instance } -func (p *internalPluginInstance) Exited() bool { return false } -func (p *internalPluginInstance) ApiVersion() string { return p.apiVersion } - -// externalPluginInstance wraps an external plugin -type externalPluginInstance struct { - client *plugin.Client - instance interface{} - apiVersion string -} - -func (p *externalPluginInstance) Internal() bool { return false } -func (p *externalPluginInstance) Plugin() interface{} { return p.instance } -func (p *externalPluginInstance) Exited() bool { return p.client.Exited() } -func (p *externalPluginInstance) ApiVersion() string { return p.apiVersion } - -func (p *externalPluginInstance) ReattachConfig() (*plugin.ReattachConfig, bool) { - return p.client.ReattachConfig(), true -} - -func (p *externalPluginInstance) Kill() { - p.client.Kill() -} diff --git a/vendor/github.com/hashicorp/nomad/helper/pluginutils/loader/loader.go b/vendor/github.com/hashicorp/nomad/helper/pluginutils/loader/loader.go deleted file mode 100644 index 01644cb1..00000000 --- a/vendor/github.com/hashicorp/nomad/helper/pluginutils/loader/loader.go +++ /dev/null @@ -1,287 +0,0 @@ -package loader - -import ( - "context" - "fmt" - "os/exec" - - log "github.com/hashicorp/go-hclog" - plugin "github.com/hashicorp/go-plugin" - version "github.com/hashicorp/go-version" - "github.com/hashicorp/nomad/nomad/structs/config" - "github.com/hashicorp/nomad/plugins" - "github.com/hashicorp/nomad/plugins/base" - "github.com/hashicorp/nomad/plugins/device" - "github.com/hashicorp/nomad/plugins/drivers" - "github.com/hashicorp/nomad/plugins/shared/hclspec" -) - -// PluginCatalog is used to retrieve plugins, either external or internal -type PluginCatalog interface { - // Dispense returns the plugin given its name and type. This will also - // configure the plugin - Dispense(name, pluginType string, config *base.AgentConfig, logger log.Logger) (PluginInstance, error) - - // Reattach is used to reattach to a previously launched external plugin. - Reattach(name, pluginType string, config *plugin.ReattachConfig) (PluginInstance, error) - - // Catalog returns the catalog of all plugins keyed by plugin type - Catalog() map[string][]*base.PluginInfoResponse -} - -// InternalPluginConfig is used to configure launching an internal plugin. -type InternalPluginConfig struct { - Config map[string]interface{} - Factory plugins.PluginCtxFactory -} - -// PluginID is a tuple identifying a plugin -type PluginID struct { - // Name is the name of the plugin - Name string - - // PluginType is the plugin's type - PluginType string -} - -// String returns a friendly representation of the plugin. -func (id PluginID) String() string { - return fmt.Sprintf("%q (%v)", id.Name, id.PluginType) -} - -func PluginInfoID(resp *base.PluginInfoResponse) PluginID { - return PluginID{ - Name: resp.Name, - PluginType: resp.Type, - } -} - -// PluginLoaderConfig configures a plugin loader. -type PluginLoaderConfig struct { - // Logger is the logger used by the plugin loader - Logger log.Logger - - // PluginDir is the directory scanned for loading plugins - PluginDir string - - // Configs is an optional set of configs for plugins - Configs []*config.PluginConfig - - // InternalPlugins allows registering internal plugins. - InternalPlugins map[PluginID]*InternalPluginConfig - - // SupportedVersions is a mapping of plugin type to the supported versions - SupportedVersions map[string][]string -} - -// PluginLoader is used to retrieve plugins either externally or from internal -// factories. -type PluginLoader struct { - // logger is the plugin loaders logger - logger log.Logger - - // supportedVersions is a mapping of plugin type to the supported versions - supportedVersions map[string][]*version.Version - - // pluginDir is the directory containing plugin binaries - pluginDir string - - // plugins maps a plugin to information required to launch it - plugins map[PluginID]*pluginInfo -} - -// pluginInfo captures the necessary information to launch and configure a -// plugin. -type pluginInfo struct { - factory plugins.PluginCtxFactory - - exePath string - args []string - - baseInfo *base.PluginInfoResponse - version *version.Version - apiVersion string - - configSchema *hclspec.Spec - config map[string]interface{} - msgpackConfig []byte -} - -// NewPluginLoader returns an instance of a plugin loader or an error if the -// plugins could not be loaded -func NewPluginLoader(config *PluginLoaderConfig) (*PluginLoader, error) { - if err := validateConfig(config); err != nil { - return nil, fmt.Errorf("invalid plugin loader configuration passed: %v", err) - } - - // Convert the versions - supportedVersions := make(map[string][]*version.Version, len(config.SupportedVersions)) - for pType, versions := range config.SupportedVersions { - converted, err := convertVersions(versions) - if err != nil { - return nil, err - } - supportedVersions[pType] = converted - } - - logger := config.Logger.Named("plugin_loader").With("plugin_dir", config.PluginDir) - l := &PluginLoader{ - logger: logger, - supportedVersions: supportedVersions, - pluginDir: config.PluginDir, - plugins: make(map[PluginID]*pluginInfo), - } - - if err := l.init(config); err != nil { - return nil, fmt.Errorf("failed to initialize plugin loader: %v", err) - } - - return l, nil -} - -// Dispense returns a plugin instance, loading it either internally or by -// launching an external plugin. -func (l *PluginLoader) Dispense(name, pluginType string, config *base.AgentConfig, logger log.Logger) (PluginInstance, error) { - id := PluginID{ - Name: name, - PluginType: pluginType, - } - pinfo, ok := l.plugins[id] - if !ok { - return nil, fmt.Errorf("unknown plugin with name %q and type %q", name, pluginType) - } - - // If the plugin is internal, launch via the factory - var instance PluginInstance - if pinfo.factory != nil { - ctx, cancel := context.WithCancel(context.Background()) - instance = &internalPluginInstance{ - instance: pinfo.factory(ctx, logger), - apiVersion: pinfo.apiVersion, - killFn: cancel, - } - } else { - var err error - instance, err = l.dispensePlugin(pinfo.baseInfo.Type, pinfo.apiVersion, pinfo.exePath, pinfo.args, nil, logger) - if err != nil { - return nil, fmt.Errorf("failed to launch plugin: %v", err) - } - } - - // Cast to the base type and set the config - b, ok := instance.Plugin().(base.BasePlugin) - if !ok { - return nil, fmt.Errorf("plugin %s doesn't implement base plugin interface", id) - } - - c := &base.Config{ - PluginConfig: pinfo.msgpackConfig, - AgentConfig: config, - ApiVersion: pinfo.apiVersion, - } - - if err := b.SetConfig(c); err != nil { - return nil, fmt.Errorf("setting config for plugin %s failed: %v", id, err) - } - - return instance, nil -} - -// Reattach reattaches to a previously launched external plugin. -func (l *PluginLoader) Reattach(name, pluginType string, config *plugin.ReattachConfig) (PluginInstance, error) { - return l.dispensePlugin(pluginType, "", "", nil, config, l.logger) -} - -// dispensePlugin is used to launch or reattach to an external plugin. -func (l *PluginLoader) dispensePlugin( - pluginType, apiVersion, cmd string, args []string, reattach *plugin.ReattachConfig, - logger log.Logger) (PluginInstance, error) { - - var pluginCmd *exec.Cmd - if cmd != "" && reattach != nil { - return nil, fmt.Errorf("both launch command and reattach config specified") - } else if cmd == "" && reattach == nil { - return nil, fmt.Errorf("one of launch command or reattach config must be specified") - } else if cmd != "" { - pluginCmd = exec.Command(cmd, args...) - } - - client := plugin.NewClient(&plugin.ClientConfig{ - HandshakeConfig: base.Handshake, - Plugins: getPluginMap(pluginType, logger), - Cmd: pluginCmd, - AllowedProtocols: []plugin.Protocol{plugin.ProtocolGRPC}, - Logger: logger, - Reattach: reattach, - }) - - // Connect via RPC - rpcClient, err := client.Client() - if err != nil { - client.Kill() - return nil, err - } - - // Request the plugin - raw, err := rpcClient.Dispense(pluginType) - if err != nil { - client.Kill() - return nil, err - } - - instance := &externalPluginInstance{ - client: client, - instance: raw, - } - - if apiVersion != "" { - instance.apiVersion = apiVersion - } else { - // We do not know the API version since we are reattaching, so discover - // it - bplugin := raw.(base.BasePlugin) - - // Retrieve base plugin information - i, err := bplugin.PluginInfo() - if err != nil { - return nil, fmt.Errorf("failed to get plugin info for plugin: %v", err) - } - - apiVersion, err := l.selectApiVersion(i) - if err != nil { - return nil, fmt.Errorf("failed to validate API versions %v for plugin %s: %v", i.PluginApiVersions, i.Name, err) - } - if apiVersion == "" { - return nil, fmt.Errorf("failed to reattach to plugin because supported API versions for the plugin and Nomad do not overlap") - } - - instance.apiVersion = apiVersion - } - - return instance, nil -} - -// getPluginMap returns a plugin map based on the type of plugin being launched. -func getPluginMap(pluginType string, logger log.Logger) map[string]plugin.Plugin { - pmap := map[string]plugin.Plugin{ - base.PluginTypeBase: &base.PluginBase{}, - } - - switch pluginType { - case base.PluginTypeDevice: - pmap[base.PluginTypeDevice] = &device.PluginDevice{} - case base.PluginTypeDriver: - pmap[base.PluginTypeDriver] = drivers.NewDriverPlugin(nil, logger) - } - - return pmap -} - -// Catalog returns the catalog of all plugins -func (l *PluginLoader) Catalog() map[string][]*base.PluginInfoResponse { - c := make(map[string][]*base.PluginInfoResponse, 3) - for id, info := range l.plugins { - c[id.PluginType] = append(c[id.PluginType], info.baseInfo) - } - return c -} diff --git a/vendor/github.com/hashicorp/nomad/helper/pluginutils/loader/testing.go b/vendor/github.com/hashicorp/nomad/helper/pluginutils/loader/testing.go deleted file mode 100644 index 75d2432f..00000000 --- a/vendor/github.com/hashicorp/nomad/helper/pluginutils/loader/testing.go +++ /dev/null @@ -1,87 +0,0 @@ -package loader - -import ( - "net" - "sync" - - log "github.com/hashicorp/go-hclog" - plugin "github.com/hashicorp/go-plugin" - "github.com/hashicorp/nomad/helper/pointer" - "github.com/hashicorp/nomad/plugins/base" -) - -// MockCatalog provides a mock PluginCatalog to be used for testing -type MockCatalog struct { - DispenseF func(name, pluginType string, cfg *base.AgentConfig, logger log.Logger) (PluginInstance, error) - ReattachF func(name, pluginType string, config *plugin.ReattachConfig) (PluginInstance, error) - CatalogF func() map[string][]*base.PluginInfoResponse -} - -func (m *MockCatalog) Dispense(name, pluginType string, cfg *base.AgentConfig, logger log.Logger) (PluginInstance, error) { - return m.DispenseF(name, pluginType, cfg, logger) -} - -func (m *MockCatalog) Reattach(name, pluginType string, config *plugin.ReattachConfig) (PluginInstance, error) { - return m.ReattachF(name, pluginType, config) -} - -func (m *MockCatalog) Catalog() map[string][]*base.PluginInfoResponse { - return m.CatalogF() -} - -// MockInstance provides a mock PluginInstance to be used for testing -type MockInstance struct { - InternalPlugin bool - KillF func() - ReattachConfigF func() (*plugin.ReattachConfig, bool) - PluginF func() interface{} - ExitedF func() bool - ApiVersionF func() string -} - -func (m *MockInstance) Internal() bool { return m.InternalPlugin } -func (m *MockInstance) Kill() { m.KillF() } -func (m *MockInstance) ReattachConfig() (*plugin.ReattachConfig, bool) { return m.ReattachConfigF() } -func (m *MockInstance) Plugin() interface{} { return m.PluginF() } -func (m *MockInstance) Exited() bool { return m.ExitedF() } -func (m *MockInstance) ApiVersion() string { return m.ApiVersionF() } - -// MockBasicExternalPlugin returns a MockInstance that simulates an external -// plugin returning it has been exited after kill is called. It returns the -// passed inst as the plugin -func MockBasicExternalPlugin(inst interface{}, apiVersion string) *MockInstance { - var killedLock sync.Mutex - killed := pointer.Of(false) - return &MockInstance{ - InternalPlugin: false, - KillF: func() { - killedLock.Lock() - defer killedLock.Unlock() - *killed = true - }, - - ReattachConfigF: func() (*plugin.ReattachConfig, bool) { - return &plugin.ReattachConfig{ - Protocol: "tcp", - Addr: &net.TCPAddr{ - IP: net.IPv4(127, 0, 0, 1), - Port: 3200, - Zone: "", - }, - Pid: 1000, - }, true - }, - - PluginF: func() interface{} { - return inst - }, - - ExitedF: func() bool { - killedLock.Lock() - defer killedLock.Unlock() - return *killed - }, - - ApiVersionF: func() string { return apiVersion }, - } -} diff --git a/vendor/github.com/hashicorp/nomad/helper/pluginutils/loader/util.go b/vendor/github.com/hashicorp/nomad/helper/pluginutils/loader/util.go deleted file mode 100644 index 6409b95d..00000000 --- a/vendor/github.com/hashicorp/nomad/helper/pluginutils/loader/util.go +++ /dev/null @@ -1,26 +0,0 @@ -package loader - -import ( - "strings" - - "github.com/hashicorp/nomad/nomad/structs/config" -) - -// configMap returns a mapping of plugin binary name to config. -func configMap(configs []*config.PluginConfig) map[string]*config.PluginConfig { - pluginMapping := make(map[string]*config.PluginConfig, len(configs)) - for _, c := range configs { - pluginMapping[c.Name] = c - } - return pluginMapping -} - -// cleanPluginExecutable strips the executable name of common suffixes -func cleanPluginExecutable(name string) string { - switch { - case strings.HasSuffix(name, ".exe"): - return strings.TrimSuffix(name, ".exe") - default: - return name - } -} diff --git a/vendor/github.com/hashicorp/nomad/nomad/mock/alloc.go b/vendor/github.com/hashicorp/nomad/nomad/mock/alloc.go index fe99ee73..8e66a26b 100644 --- a/vendor/github.com/hashicorp/nomad/nomad/mock/alloc.go +++ b/vendor/github.com/hashicorp/nomad/nomad/mock/alloc.go @@ -83,6 +83,36 @@ func Alloc() *structs.Allocation { return alloc } +func MinAlloc() *structs.Allocation { + job := MinJob() + group := job.TaskGroups[0] + task := group.Tasks[0] + return &structs.Allocation{ + ID: uuid.Generate(), + EvalID: uuid.Generate(), + NodeID: uuid.Generate(), + Job: job, + TaskGroup: group.Name, + ClientStatus: structs.AllocClientStatusPending, + DesiredStatus: structs.AllocDesiredStatusRun, + AllocatedResources: &structs.AllocatedResources{ + Tasks: map[string]*structs.AllocatedTaskResources{ + task.Name: { + Cpu: structs.AllocatedCpuResources{ + CpuShares: 100, + }, + Memory: structs.AllocatedMemoryResources{ + MemoryMB: 256, + }, + }, + }, + Shared: structs.AllocatedSharedResources{ + DiskMB: 150, + }, + }, + } +} + func AllocWithoutReservedPort() *structs.Allocation { alloc := Alloc() alloc.Resources.Networks[0].ReservedPorts = nil diff --git a/vendor/github.com/hashicorp/nomad/nomad/mock/job.go b/vendor/github.com/hashicorp/nomad/nomad/mock/job.go index 3422c7e5..f6da3474 100644 --- a/vendor/github.com/hashicorp/nomad/nomad/mock/job.go +++ b/vendor/github.com/hashicorp/nomad/nomad/mock/job.go @@ -122,6 +122,38 @@ func Job() *structs.Job { return job } +// MinJob returns a minimal service job with a mock driver task. +func MinJob() *structs.Job { + job := &structs.Job{ + ID: "j" + uuid.Short(), + Name: "j", + Region: "global", + Type: "service", + TaskGroups: []*structs.TaskGroup{ + { + Name: "g", + Count: 1, + Tasks: []*structs.Task{ + { + Name: "t", + Driver: "mock_driver", + Config: map[string]any{ + // An empty config actually causes an error, so set a reasonably + // long run_for duration. + "run_for": "10m", + }, + LogConfig: structs.DefaultLogConfig(), + }, + }, + }, + }, + Datacenters: []string{"dc1"}, + Priority: 50, + } + job.Canonicalize() + return job +} + func JobWithScalingPolicy() (*structs.Job, *structs.ScalingPolicy) { job := Job() policy := &structs.ScalingPolicy{ @@ -287,6 +319,26 @@ func MultiregionJob() *structs.Job { return job } +func MultiregionMinJob() *structs.Job { + job := MinJob() + update := *structs.DefaultUpdateStrategy + job.Update = update + job.TaskGroups[0].Update = &update + job.Multiregion = &structs.Multiregion{ + Regions: []*structs.MultiregionRegion{ + { + Name: "west", + Count: 1, + }, + { + Name: "east", + Count: 1, + }, + }, + } + return job +} + func BatchJob() *structs.Job { job := &structs.Job{ Region: "global", diff --git a/vendor/github.com/hashicorp/nomad/nomad/structs/diff.go b/vendor/github.com/hashicorp/nomad/nomad/structs/diff.go index fab52060..d4d381c8 100644 --- a/vendor/github.com/hashicorp/nomad/nomad/structs/diff.go +++ b/vendor/github.com/hashicorp/nomad/nomad/structs/diff.go @@ -2,8 +2,10 @@ package structs import ( "fmt" + "net" "reflect" "sort" + "strconv" "strings" "github.com/hashicorp/nomad/helper/flatmap" @@ -1360,11 +1362,11 @@ func connectGatewayProxyEnvoyBindAddrsDiff(prev, next map[string]*ConsulGatewayB nextMap := make(map[string]string, len(next)) for k, v := range prev { - prevMap[k] = fmt.Sprintf("%s:%d", v.Address, v.Port) + prevMap[k] = net.JoinHostPort(v.Address, strconv.Itoa(v.Port)) } for k, v := range next { - nextMap[k] = fmt.Sprintf("%s:%d", v.Address, v.Port) + nextMap[k] = net.JoinHostPort(v.Address, strconv.Itoa(v.Port)) } oldPrimitiveFlat := flatmap.Flatten(prevMap, nil, false) diff --git a/vendor/github.com/hashicorp/nomad/nomad/structs/funcs.go b/vendor/github.com/hashicorp/nomad/nomad/structs/funcs.go index b3a11407..dc4e78ca 100644 --- a/vendor/github.com/hashicorp/nomad/nomad/structs/funcs.go +++ b/vendor/github.com/hashicorp/nomad/nomad/structs/funcs.go @@ -407,7 +407,7 @@ func DenormalizeAllocationJobs(job *Job, allocs []*Allocation) { // AllocName returns the name of the allocation given the input. func AllocName(job, group string, idx uint) string { - return fmt.Sprintf("%s.%s[%d]", job, group, idx) + return job + "." + group + "[" + strconv.FormatUint(uint64(idx), 10) + "]" } // AllocSuffix returns the alloc index suffix that was added by the AllocName diff --git a/vendor/github.com/hashicorp/nomad/nomad/structs/network.go b/vendor/github.com/hashicorp/nomad/nomad/structs/network.go index 254071df..1062bc6c 100644 --- a/vendor/github.com/hashicorp/nomad/nomad/structs/network.go +++ b/vendor/github.com/hashicorp/nomad/nomad/structs/network.go @@ -746,7 +746,11 @@ func getDynamicPortsStochastic(nodeUsed Bitmap, portsInOffer []int, minDynamicPo return nil, fmt.Errorf("stochastic dynamic port selection failed") } - randPort := minDynamicPort + rand.Intn(maxDynamicPort-minDynamicPort) + randPort := minDynamicPort + if maxDynamicPort-minDynamicPort > 0 { + randPort = randPort + rand.Intn(maxDynamicPort-minDynamicPort) + } + if nodeUsed != nil && nodeUsed.Check(uint(randPort)) { goto PICK } diff --git a/vendor/github.com/hashicorp/nomad/nomad/structs/structs.go b/vendor/github.com/hashicorp/nomad/nomad/structs/structs.go index dbf68ef2..ee4d7d8e 100644 --- a/vendor/github.com/hashicorp/nomad/nomad/structs/structs.go +++ b/vendor/github.com/hashicorp/nomad/nomad/structs/structs.go @@ -4972,6 +4972,12 @@ func (u *UpdateStrategy) IsEmpty() bool { return true } + // When the Job is transformed from api to struct, the Update Strategy block is + // copied into the existing task groups, the only things that are passed along + // are MaxParallel and Stagger, because they are enforced at job level. + // That is why checking if MaxParallel is zero is enough to know if the + // update block is empty. + return u.MaxParallel == 0 } @@ -6540,6 +6546,8 @@ func (tg *TaskGroup) Validate(j *Job) error { mErr.Errors = append(mErr.Errors, outer) } + isTypeService := j.Type == JobTypeService + // Validate the tasks for _, task := range tg.Tasks { // Validate the task does not reference undefined volume mounts @@ -6559,6 +6567,16 @@ func (tg *TaskGroup) Validate(j *Job) error { outer := fmt.Errorf("Task %s validation failed: %v", task.Name, err) mErr.Errors = append(mErr.Errors, outer) } + + // Validate the group's Update Strategy does not conflict with the Task's kill_timeout for service type jobs + if isTypeService && tg.Update != nil { + // progress_deadline = 0 has a special meaning so it should not be + // validated against the task's kill_timeout. + if tg.Update.ProgressDeadline > 0 && task.KillTimeout > tg.Update.ProgressDeadline { + mErr.Errors = append(mErr.Errors, fmt.Errorf("Task %s has a kill timout (%s) longer than the group's progress deadline (%s)", + task.Name, task.KillTimeout.String(), tg.Update.ProgressDeadline.String())) + } + } } return mErr.ErrorOrNil() @@ -9469,6 +9487,14 @@ func (d *Deployment) GoString() string { return base } +// GetNamespace implements the NamespaceGetter interface, required for pagination. +func (d *Deployment) GetNamespace() string { + if d == nil { + return "" + } + return d.Namespace +} + // DeploymentState tracks the state of a deployment for a given task group. type DeploymentState struct { // AutoRevert marks whether the task group has indicated the job should be diff --git a/vendor/github.com/hashicorp/nomad/nomad/structs/variables.go b/vendor/github.com/hashicorp/nomad/nomad/structs/variables.go index 8c7260d1..1d693922 100644 --- a/vendor/github.com/hashicorp/nomad/nomad/structs/variables.go +++ b/vendor/github.com/hashicorp/nomad/nomad/structs/variables.go @@ -190,9 +190,10 @@ func (sv *VariableDecrypted) Canonicalize() { } } -// GetNamespace returns the variable's namespace. Used for pagination. +// Copy returns a fully hydrated copy of VariableMetadata that can be +// manipulated while ensuring the original is not touched. func (sv *VariableMetadata) Copy() *VariableMetadata { - var out VariableMetadata = *sv + var out = *sv return &out } diff --git a/vendor/github.com/hashicorp/nomad/nomad/structs/volumes.go b/vendor/github.com/hashicorp/nomad/nomad/structs/volumes.go index 0b16f9b3..1671e137 100644 --- a/vendor/github.com/hashicorp/nomad/nomad/structs/volumes.go +++ b/vendor/github.com/hashicorp/nomad/nomad/structs/volumes.go @@ -198,6 +198,14 @@ func (v *VolumeRequest) Copy() *VolumeRequest { return nv } +func (v *VolumeRequest) VolumeID(tgName string) string { + source := v.Source + if v.PerAlloc { + source = source + AllocSuffix(tgName) + } + return source +} + func CopyMapVolumeRequest(s map[string]*VolumeRequest) map[string]*VolumeRequest { if s == nil { return nil diff --git a/vendor/github.com/hashicorp/nomad/plugins/drivers/proto/driver.pb.go b/vendor/github.com/hashicorp/nomad/plugins/drivers/proto/driver.pb.go index 180d5a23..04a7ba12 100644 --- a/vendor/github.com/hashicorp/nomad/plugins/drivers/proto/driver.pb.go +++ b/vendor/github.com/hashicorp/nomad/plugins/drivers/proto/driver.pb.go @@ -2133,10 +2133,18 @@ type TaskConfig struct { // to use for the task. *Only supported on Linux NetworkIsolationSpec *NetworkIsolationSpec `protobuf:"bytes,16,opt,name=network_isolation_spec,json=networkIsolationSpec,proto3" json:"network_isolation_spec,omitempty"` // DNSConfig is the configuration for task DNS resolvers and other options - Dns *DNSConfig `protobuf:"bytes,17,opt,name=dns,proto3" json:"dns,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + Dns *DNSConfig `protobuf:"bytes,17,opt,name=dns,proto3" json:"dns,omitempty"` + // JobId is the ID of the job of which this task is part of + JobId string `protobuf:"bytes,18,opt,name=job_id,json=jobId,proto3" json:"job_id,omitempty"` + // Namespace is the namespace of the job of which this task is part of + Namespace string `protobuf:"bytes,19,opt,name=namespace,proto3" json:"namespace,omitempty"` + // NodeName is the name of the node where the associated allocation is running + NodeName string `protobuf:"bytes,20,opt,name=node_name,json=nodeName,proto3" json:"node_name,omitempty"` + // NodeId is the ID of the node where the associated allocation is running + NodeId string `protobuf:"bytes,21,opt,name=node_id,json=nodeId,proto3" json:"node_id,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } func (m *TaskConfig) Reset() { *m = TaskConfig{} } @@ -2283,6 +2291,34 @@ func (m *TaskConfig) GetDns() *DNSConfig { return nil } +func (m *TaskConfig) GetJobId() string { + if m != nil { + return m.JobId + } + return "" +} + +func (m *TaskConfig) GetNamespace() string { + if m != nil { + return m.Namespace + } + return "" +} + +func (m *TaskConfig) GetNodeName() string { + if m != nil { + return m.NodeName + } + return "" +} + +func (m *TaskConfig) GetNodeId() string { + if m != nil { + return m.NodeId + } + return "" +} + type Resources struct { // AllocatedResources are the resources set for the task AllocatedResources *AllocatedTaskResources `protobuf:"bytes,1,opt,name=allocated_resources,json=allocatedResources,proto3" json:"allocated_resources,omitempty"` @@ -3692,245 +3728,248 @@ func init() { } var fileDescriptor_4a8f45747846a74d = []byte{ - // 3795 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x5a, 0xcd, 0x73, 0xdb, 0x48, - 0x76, 0x17, 0x08, 0x7e, 0x3e, 0x4a, 0x14, 0xd4, 0x96, 0x6d, 0x9a, 0x9b, 0x64, 0xbc, 0x48, 0x4d, - 0x4a, 0xd9, 0x9d, 0xa1, 0x67, 0xb5, 0x95, 0xf1, 0x78, 0xd6, 0xb3, 0x1e, 0x0e, 0x45, 0x5b, 0x1a, - 0x4b, 0x94, 0xd2, 0xa4, 0xca, 0xeb, 0x38, 0x19, 0x04, 0x02, 0xda, 0x14, 0x2c, 0xe2, 0x63, 0xd0, - 0xa0, 0x2c, 0x6d, 0x2a, 0x95, 0xd4, 0xa6, 0x2a, 0x35, 0xa9, 0x4a, 0x2a, 0xb9, 0x4c, 0xf6, 0x92, - 0xd3, 0x56, 0xe5, 0x94, 0x7f, 0x20, 0xb5, 0xa9, 0x3d, 0xe5, 0x90, 0x7f, 0x22, 0x97, 0xdc, 0x72, - 0x4c, 0x4e, 0xb9, 0xa6, 0xfa, 0x03, 0x20, 0x20, 0xd2, 0x63, 0x90, 0xf2, 0x89, 0x78, 0xaf, 0xbb, - 0x7f, 0xfd, 0xf8, 0xde, 0xeb, 0xd7, 0xaf, 0xbb, 0x1f, 0xe8, 0xc1, 0x78, 0x32, 0x72, 0x3c, 0x7a, - 0xcf, 0x0e, 0x9d, 0x73, 0x12, 0xd2, 0x7b, 0x41, 0xe8, 0x47, 0xbe, 0xa4, 0xda, 0x9c, 0x40, 0xef, - 0x9f, 0x9a, 0xf4, 0xd4, 0xb1, 0xfc, 0x30, 0x68, 0x7b, 0xbe, 0x6b, 0xda, 0x6d, 0x39, 0xa6, 0x2d, - 0xc7, 0x88, 0x6e, 0xad, 0xdf, 0x19, 0xf9, 0xfe, 0x68, 0x4c, 0x04, 0xc2, 0xc9, 0xe4, 0xe5, 0x3d, - 0x7b, 0x12, 0x9a, 0x91, 0xe3, 0x7b, 0xb2, 0xfd, 0xbd, 0xab, 0xed, 0x91, 0xe3, 0x12, 0x1a, 0x99, - 0x6e, 0x20, 0x3b, 0xbc, 0x1f, 0xcb, 0x42, 0x4f, 0xcd, 0x90, 0xd8, 0xf7, 0x4e, 0xad, 0x31, 0x0d, - 0x88, 0xc5, 0x7e, 0x0d, 0xf6, 0x21, 0xbb, 0x7d, 0x70, 0xa5, 0x1b, 0x8d, 0xc2, 0x89, 0x15, 0xc5, - 0x92, 0x9b, 0x51, 0x14, 0x3a, 0x27, 0x93, 0x88, 0x88, 0xde, 0xfa, 0x1d, 0xb8, 0x3d, 0x34, 0xe9, - 0x59, 0xd7, 0xf7, 0x5e, 0x3a, 0xa3, 0x81, 0x75, 0x4a, 0x5c, 0x13, 0x93, 0xaf, 0x27, 0x84, 0x46, - 0xfa, 0x1f, 0x43, 0x73, 0xb6, 0x89, 0x06, 0xbe, 0x47, 0x09, 0xfa, 0x1c, 0x8a, 0x6c, 0xca, 0xa6, - 0x72, 0x57, 0xd9, 0xaa, 0x6f, 0x7f, 0xd0, 0x7e, 0x93, 0x0a, 0x84, 0x0c, 0x6d, 0x29, 0x6a, 0x7b, - 0x10, 0x10, 0x0b, 0xf3, 0x91, 0xfa, 0x4d, 0xb8, 0xd1, 0x35, 0x03, 0xf3, 0xc4, 0x19, 0x3b, 0x91, - 0x43, 0x68, 0x3c, 0xe9, 0x04, 0x36, 0xb3, 0x6c, 0x39, 0xe1, 0x9f, 0xc0, 0xaa, 0x95, 0xe2, 0xcb, - 0x89, 0x1f, 0xb4, 0x73, 0xe9, 0xbe, 0xbd, 0xc3, 0xa9, 0x0c, 0x70, 0x06, 0x4e, 0xdf, 0x04, 0xf4, - 0xd8, 0xf1, 0x46, 0x24, 0x0c, 0x42, 0xc7, 0x8b, 0x62, 0x61, 0x7e, 0xa3, 0xc2, 0x8d, 0x0c, 0x5b, - 0x0a, 0xf3, 0x0a, 0x20, 0xd1, 0x23, 0x13, 0x45, 0xdd, 0xaa, 0x6f, 0x7f, 0x99, 0x53, 0x94, 0x39, - 0x78, 0xed, 0x4e, 0x02, 0xd6, 0xf3, 0xa2, 0xf0, 0x12, 0xa7, 0xd0, 0xd1, 0x57, 0x50, 0x3e, 0x25, - 0xe6, 0x38, 0x3a, 0x6d, 0x16, 0xee, 0x2a, 0x5b, 0x8d, 0xed, 0xc7, 0xd7, 0x98, 0x67, 0x97, 0x03, - 0x0d, 0x22, 0x33, 0x22, 0x58, 0xa2, 0xa2, 0x0f, 0x01, 0x89, 0x2f, 0xc3, 0x26, 0xd4, 0x0a, 0x9d, - 0x80, 0xb9, 0x64, 0x53, 0xbd, 0xab, 0x6c, 0xd5, 0xf0, 0x86, 0x68, 0xd9, 0x99, 0x36, 0xb4, 0x02, - 0x58, 0xbf, 0x22, 0x2d, 0xd2, 0x40, 0x3d, 0x23, 0x97, 0xdc, 0x22, 0x35, 0xcc, 0x3e, 0xd1, 0x13, - 0x28, 0x9d, 0x9b, 0xe3, 0x09, 0xe1, 0x22, 0xd7, 0xb7, 0x7f, 0xf4, 0x36, 0xf7, 0x90, 0x2e, 0x3a, - 0xd5, 0x03, 0x16, 0xe3, 0x3f, 0x2d, 0x7c, 0xa2, 0xe8, 0x0f, 0xa0, 0x9e, 0x92, 0x1b, 0x35, 0x00, - 0x8e, 0xfb, 0x3b, 0xbd, 0x61, 0xaf, 0x3b, 0xec, 0xed, 0x68, 0x2b, 0x68, 0x0d, 0x6a, 0xc7, 0xfd, - 0xdd, 0x5e, 0x67, 0x7f, 0xb8, 0xfb, 0x5c, 0x53, 0x50, 0x1d, 0x2a, 0x31, 0x51, 0xd0, 0x2f, 0x00, - 0x61, 0x62, 0xf9, 0xe7, 0x24, 0x64, 0x8e, 0x2c, 0xad, 0x8a, 0x6e, 0x43, 0x25, 0x32, 0xe9, 0x99, - 0xe1, 0xd8, 0x52, 0xe6, 0x32, 0x23, 0xf7, 0x6c, 0xb4, 0x07, 0xe5, 0x53, 0xd3, 0xb3, 0xc7, 0x6f, - 0x97, 0x3b, 0xab, 0x6a, 0x06, 0xbe, 0xcb, 0x07, 0x62, 0x09, 0xc0, 0xbc, 0x3b, 0x33, 0xb3, 0x30, - 0x80, 0xfe, 0x1c, 0xb4, 0x41, 0x64, 0x86, 0x51, 0x5a, 0x9c, 0x1e, 0x14, 0xd9, 0xfc, 0xd2, 0xa3, - 0x17, 0x99, 0x53, 0xac, 0x4c, 0xcc, 0x87, 0xeb, 0xff, 0x5b, 0x80, 0x8d, 0x14, 0xb6, 0xf4, 0xd4, - 0x67, 0x50, 0x0e, 0x09, 0x9d, 0x8c, 0x23, 0x0e, 0xdf, 0xd8, 0x7e, 0x94, 0x13, 0x7e, 0x06, 0xa9, - 0x8d, 0x39, 0x0c, 0x96, 0x70, 0x68, 0x0b, 0x34, 0x31, 0xc2, 0x20, 0x61, 0xe8, 0x87, 0x86, 0x4b, - 0x47, 0x5c, 0x6b, 0x35, 0xdc, 0x10, 0xfc, 0x1e, 0x63, 0x1f, 0xd0, 0x51, 0x4a, 0xab, 0xea, 0x35, - 0xb5, 0x8a, 0x4c, 0xd0, 0x3c, 0x12, 0xbd, 0xf6, 0xc3, 0x33, 0x83, 0xa9, 0x36, 0x74, 0x6c, 0xd2, - 0x2c, 0x72, 0xd0, 0x8f, 0x73, 0x82, 0xf6, 0xc5, 0xf0, 0x43, 0x39, 0x1a, 0xaf, 0x7b, 0x59, 0x86, - 0xfe, 0x43, 0x28, 0x8b, 0x7f, 0xca, 0x3c, 0x69, 0x70, 0xdc, 0xed, 0xf6, 0x06, 0x03, 0x6d, 0x05, - 0xd5, 0xa0, 0x84, 0x7b, 0x43, 0xcc, 0x3c, 0xac, 0x06, 0xa5, 0xc7, 0x9d, 0x61, 0x67, 0x5f, 0x2b, - 0xe8, 0x3f, 0x80, 0xf5, 0x67, 0xa6, 0x13, 0xe5, 0x71, 0x2e, 0xdd, 0x07, 0x6d, 0xda, 0x57, 0x5a, - 0x67, 0x2f, 0x63, 0x9d, 0xfc, 0xaa, 0xe9, 0x5d, 0x38, 0xd1, 0x15, 0x7b, 0x68, 0xa0, 0x92, 0x30, - 0x94, 0x26, 0x60, 0x9f, 0xfa, 0x6b, 0x58, 0x1f, 0x44, 0x7e, 0x90, 0xcb, 0xf3, 0x7f, 0x0c, 0x15, - 0xb6, 0xdb, 0xf8, 0x93, 0x48, 0xba, 0xfe, 0x9d, 0xb6, 0xd8, 0x8d, 0xda, 0xf1, 0x6e, 0xd4, 0xde, - 0x91, 0xbb, 0x15, 0x8e, 0x7b, 0xa2, 0x5b, 0x50, 0xa6, 0xce, 0xc8, 0x33, 0xc7, 0x32, 0x5a, 0x48, - 0x4a, 0x47, 0xcc, 0xc9, 0xe3, 0x89, 0xa5, 0xe3, 0x77, 0x01, 0xed, 0x10, 0x1a, 0x85, 0xfe, 0x65, - 0x2e, 0x79, 0x36, 0xa1, 0xf4, 0xd2, 0x0f, 0x2d, 0xb1, 0x10, 0xab, 0x58, 0x10, 0x6c, 0x51, 0x65, - 0x40, 0x24, 0xf6, 0x87, 0x80, 0xf6, 0x3c, 0xb6, 0xa7, 0xe4, 0x33, 0xc4, 0x3f, 0x14, 0xe0, 0x46, - 0xa6, 0xbf, 0x34, 0xc6, 0xf2, 0xeb, 0x90, 0x05, 0xa6, 0x09, 0x15, 0xeb, 0x10, 0x1d, 0x42, 0x59, - 0xf4, 0x90, 0x9a, 0xbc, 0xbf, 0x00, 0x90, 0xd8, 0xa6, 0x24, 0x9c, 0x84, 0x99, 0xeb, 0xf4, 0xea, - 0xbb, 0x75, 0xfa, 0xd7, 0xa0, 0xc5, 0xff, 0x83, 0xbe, 0xd5, 0x36, 0x5f, 0xc2, 0x0d, 0xcb, 0x1f, - 0x8f, 0x89, 0xc5, 0xbc, 0xc1, 0x70, 0xbc, 0x88, 0x84, 0xe7, 0xe6, 0xf8, 0xed, 0x7e, 0x83, 0xa6, - 0xa3, 0xf6, 0xe4, 0x20, 0xfd, 0x05, 0x6c, 0xa4, 0x26, 0x96, 0x86, 0x78, 0x0c, 0x25, 0xca, 0x18, - 0xd2, 0x12, 0x1f, 0x2d, 0x68, 0x09, 0x8a, 0xc5, 0x70, 0xfd, 0x86, 0x00, 0xef, 0x9d, 0x13, 0x2f, - 0xf9, 0x5b, 0xfa, 0x0e, 0x6c, 0x0c, 0xb8, 0x9b, 0xe6, 0xf2, 0xc3, 0xa9, 0x8b, 0x17, 0x32, 0x2e, - 0xbe, 0x09, 0x28, 0x8d, 0x22, 0x1d, 0xf1, 0x12, 0xd6, 0x7b, 0x17, 0xc4, 0xca, 0x85, 0xdc, 0x84, - 0x8a, 0xe5, 0xbb, 0xae, 0xe9, 0xd9, 0xcd, 0xc2, 0x5d, 0x75, 0xab, 0x86, 0x63, 0x32, 0xbd, 0x16, - 0xd5, 0xbc, 0x6b, 0x51, 0xff, 0x3b, 0x05, 0xb4, 0xe9, 0xdc, 0x52, 0x91, 0x4c, 0xfa, 0xc8, 0x66, - 0x40, 0x6c, 0xee, 0x55, 0x2c, 0x29, 0xc9, 0x8f, 0xc3, 0x85, 0xe0, 0x93, 0x30, 0x4c, 0x85, 0x23, - 0xf5, 0x9a, 0xe1, 0x48, 0xdf, 0x85, 0xdf, 0x8a, 0xc5, 0x19, 0x44, 0x21, 0x31, 0x5d, 0xc7, 0x1b, - 0xed, 0x1d, 0x1e, 0x06, 0x44, 0x08, 0x8e, 0x10, 0x14, 0x6d, 0x33, 0x32, 0xa5, 0x60, 0xfc, 0x9b, - 0x2d, 0x7a, 0x6b, 0xec, 0xd3, 0x64, 0xd1, 0x73, 0x42, 0xff, 0x0f, 0x15, 0x9a, 0x33, 0x50, 0xb1, - 0x7a, 0x5f, 0x40, 0x89, 0x92, 0x68, 0x12, 0x48, 0x57, 0xe9, 0xe5, 0x16, 0x78, 0x3e, 0x5e, 0x7b, - 0xc0, 0xc0, 0xb0, 0xc0, 0x44, 0x23, 0xa8, 0x46, 0xd1, 0xa5, 0x41, 0x9d, 0x9f, 0xc7, 0x09, 0xc1, - 0xfe, 0x75, 0xf1, 0x87, 0x24, 0x74, 0x1d, 0xcf, 0x1c, 0x0f, 0x9c, 0x9f, 0x13, 0x5c, 0x89, 0xa2, - 0x4b, 0xf6, 0x81, 0x9e, 0x33, 0x87, 0xb7, 0x1d, 0x4f, 0xaa, 0xbd, 0xbb, 0xec, 0x2c, 0x29, 0x05, - 0x63, 0x81, 0xd8, 0xda, 0x87, 0x12, 0xff, 0x4f, 0xcb, 0x38, 0xa2, 0x06, 0x6a, 0x14, 0x5d, 0x72, - 0xa1, 0xaa, 0x98, 0x7d, 0xb6, 0x1e, 0xc2, 0x6a, 0xfa, 0x1f, 0x30, 0x47, 0x3a, 0x25, 0xce, 0xe8, - 0x54, 0x38, 0x58, 0x09, 0x4b, 0x8a, 0x59, 0xf2, 0xb5, 0x63, 0xcb, 0x94, 0xb5, 0x84, 0x05, 0xa1, - 0xff, 0x6b, 0x01, 0xee, 0xcc, 0xd1, 0x8c, 0x74, 0xd6, 0x17, 0x19, 0x67, 0x7d, 0x47, 0x5a, 0x88, - 0x3d, 0xfe, 0x45, 0xc6, 0xe3, 0xdf, 0x21, 0x38, 0x5b, 0x36, 0xb7, 0xa0, 0x4c, 0x2e, 0x9c, 0x88, - 0xd8, 0x52, 0x55, 0x92, 0x4a, 0x2d, 0xa7, 0xe2, 0x75, 0x97, 0xd3, 0x01, 0x6c, 0x76, 0x43, 0x62, - 0x46, 0x44, 0x86, 0xf2, 0xd8, 0xff, 0xef, 0x40, 0xd5, 0x1c, 0x8f, 0x7d, 0x6b, 0x6a, 0xd6, 0x0a, - 0xa7, 0xf7, 0x6c, 0xd4, 0x82, 0xea, 0xa9, 0x4f, 0x23, 0xcf, 0x74, 0x89, 0x0c, 0x5e, 0x09, 0xad, - 0x7f, 0xab, 0xc0, 0xcd, 0x2b, 0x78, 0xd2, 0x0a, 0x27, 0xd0, 0x70, 0xa8, 0x3f, 0xe6, 0x7f, 0xd0, - 0x48, 0x9d, 0xf0, 0x7e, 0xb2, 0xd8, 0x56, 0xb3, 0x17, 0x63, 0xf0, 0x03, 0xdf, 0x9a, 0x93, 0x26, - 0xb9, 0xc7, 0xf1, 0xc9, 0x6d, 0xb9, 0xd2, 0x63, 0x52, 0xff, 0x47, 0x05, 0x6e, 0xca, 0x1d, 0x3e, - 0xff, 0x1f, 0x9d, 0x15, 0xb9, 0xf0, 0xae, 0x45, 0xd6, 0x9b, 0x70, 0xeb, 0xaa, 0x5c, 0x32, 0xe6, - 0xff, 0x5f, 0x11, 0xd0, 0xec, 0xe9, 0x12, 0x7d, 0x1f, 0x56, 0x29, 0xf1, 0x6c, 0x43, 0xec, 0x17, - 0x62, 0x2b, 0xab, 0xe2, 0x3a, 0xe3, 0x89, 0x8d, 0x83, 0xb2, 0x10, 0x48, 0x2e, 0xa4, 0xb4, 0x55, - 0xcc, 0xbf, 0xd1, 0x29, 0xac, 0xbe, 0xa4, 0x46, 0x32, 0x37, 0x77, 0xa8, 0x46, 0xee, 0xb0, 0x36, - 0x2b, 0x47, 0xfb, 0xf1, 0x20, 0xf9, 0x5f, 0xb8, 0xfe, 0x92, 0x26, 0x04, 0xfa, 0x46, 0x81, 0xdb, - 0x71, 0x5a, 0x31, 0x55, 0x9f, 0xeb, 0xdb, 0x84, 0x36, 0x8b, 0x77, 0xd5, 0xad, 0xc6, 0xf6, 0xd1, - 0x35, 0xf4, 0x37, 0xc3, 0x3c, 0xf0, 0x6d, 0x82, 0x6f, 0x7a, 0x73, 0xb8, 0x14, 0xb5, 0xe1, 0x86, - 0x3b, 0xa1, 0x91, 0x21, 0xbc, 0xc0, 0x90, 0x9d, 0x9a, 0x25, 0xae, 0x97, 0x0d, 0xd6, 0x94, 0xf1, - 0x55, 0x74, 0x06, 0x6b, 0xae, 0x3f, 0xf1, 0x22, 0xc3, 0xe2, 0xe7, 0x1f, 0xda, 0x2c, 0x2f, 0x74, - 0x30, 0x9e, 0xa3, 0xa5, 0x03, 0x06, 0x27, 0x4e, 0x53, 0x14, 0xaf, 0xba, 0x29, 0x8a, 0x19, 0x32, - 0x24, 0xae, 0x1f, 0x11, 0x83, 0xc5, 0x4b, 0xda, 0xac, 0x08, 0x43, 0x0a, 0x1e, 0x0b, 0x0d, 0x54, - 0x6f, 0x43, 0x3d, 0xa5, 0x66, 0x54, 0x85, 0x62, 0xff, 0xb0, 0xdf, 0xd3, 0x56, 0x10, 0x40, 0xb9, - 0xbb, 0x8b, 0x0f, 0x0f, 0x87, 0xe2, 0xd4, 0xb0, 0x77, 0xd0, 0x79, 0xd2, 0xd3, 0x0a, 0x7a, 0x0f, - 0x56, 0xd3, 0x13, 0x22, 0x04, 0x8d, 0xe3, 0xfe, 0xd3, 0xfe, 0xe1, 0xb3, 0xbe, 0x71, 0x70, 0x78, - 0xdc, 0x1f, 0xb2, 0xf3, 0x46, 0x03, 0xa0, 0xd3, 0x7f, 0x3e, 0xa5, 0xd7, 0xa0, 0xd6, 0x3f, 0x8c, - 0x49, 0xa5, 0x55, 0xd0, 0x14, 0xfd, 0xdf, 0x55, 0xd8, 0x9c, 0xa7, 0x7b, 0x64, 0x43, 0x91, 0xd9, - 0x51, 0x9e, 0xf8, 0xde, 0xbd, 0x19, 0x39, 0x3a, 0x73, 0xdf, 0xc0, 0x94, 0x21, 0xbe, 0x86, 0xf9, - 0x37, 0x32, 0xa0, 0x3c, 0x36, 0x4f, 0xc8, 0x98, 0x36, 0x55, 0x7e, 0x27, 0xf2, 0xe4, 0x3a, 0x73, - 0xef, 0x73, 0x24, 0x71, 0x21, 0x22, 0x61, 0xd1, 0x10, 0xea, 0x2c, 0x88, 0x51, 0xa1, 0x3a, 0x19, - 0x57, 0xb7, 0x73, 0xce, 0xb2, 0x3b, 0x1d, 0x89, 0xd3, 0x30, 0xad, 0x07, 0x50, 0x4f, 0x4d, 0x36, - 0xe7, 0x3e, 0x63, 0x33, 0x7d, 0x9f, 0x51, 0x4b, 0x5f, 0x4e, 0x3c, 0x9a, 0xb5, 0x01, 0xd3, 0x11, - 0x73, 0x82, 0xdd, 0xc3, 0xc1, 0x50, 0x9c, 0x1c, 0x9f, 0xe0, 0xc3, 0xe3, 0x23, 0x4d, 0x61, 0xcc, - 0x61, 0x67, 0xf0, 0x54, 0x2b, 0x24, 0x3e, 0xa2, 0xea, 0x5d, 0xa8, 0xa7, 0xe4, 0xca, 0x44, 0x6d, - 0x25, 0x1b, 0xb5, 0x59, 0xdc, 0x34, 0x6d, 0x3b, 0x24, 0x94, 0x4a, 0x39, 0x62, 0x52, 0x7f, 0x01, - 0xb5, 0x9d, 0xfe, 0x40, 0x42, 0x34, 0xa1, 0x42, 0x49, 0xc8, 0xfe, 0x37, 0xbf, 0x99, 0xaa, 0xe1, - 0x98, 0x64, 0xe0, 0x94, 0x98, 0xa1, 0x75, 0x4a, 0xa8, 0xdc, 0xeb, 0x13, 0x9a, 0x8d, 0xf2, 0xf9, - 0x0d, 0x8f, 0xb0, 0x5d, 0x0d, 0xc7, 0xa4, 0xfe, 0x3f, 0x15, 0x80, 0xe9, 0x6d, 0x03, 0x6a, 0x40, - 0x21, 0x89, 0xc1, 0x05, 0xc7, 0x66, 0x7e, 0x90, 0xda, 0x63, 0xf8, 0x37, 0xda, 0x86, 0x9b, 0x2e, - 0x1d, 0x05, 0xa6, 0x75, 0x66, 0xc8, 0x4b, 0x02, 0xb1, 0x54, 0x79, 0x3c, 0x5b, 0xc5, 0x37, 0x64, - 0xa3, 0x5c, 0x89, 0x02, 0x77, 0x1f, 0x54, 0xe2, 0x9d, 0xf3, 0xd8, 0x53, 0xdf, 0xfe, 0x74, 0xe1, - 0x5b, 0x90, 0x76, 0xcf, 0x3b, 0x17, 0xbe, 0xc2, 0x60, 0x90, 0x01, 0x60, 0x93, 0x73, 0xc7, 0x22, - 0x06, 0x03, 0x2d, 0x71, 0xd0, 0xcf, 0x17, 0x07, 0xdd, 0xe1, 0x18, 0x09, 0x74, 0xcd, 0x8e, 0x69, - 0xd4, 0x87, 0x5a, 0x48, 0xa8, 0x3f, 0x09, 0x2d, 0x22, 0x02, 0x50, 0xfe, 0x83, 0x0a, 0x8e, 0xc7, - 0xe1, 0x29, 0x04, 0xda, 0x81, 0x32, 0x8f, 0x3b, 0x2c, 0xc2, 0xa8, 0xdf, 0x79, 0xa5, 0x9a, 0x05, - 0xe3, 0x91, 0x04, 0xcb, 0xb1, 0xe8, 0x09, 0x54, 0x84, 0x88, 0xb4, 0x59, 0xe5, 0x30, 0x1f, 0xe6, - 0x0d, 0x8a, 0x7c, 0x14, 0x8e, 0x47, 0x33, 0xab, 0x4e, 0x28, 0x09, 0x9b, 0x35, 0x61, 0x55, 0xf6, - 0x8d, 0xbe, 0x07, 0x35, 0xb1, 0x07, 0xdb, 0x4e, 0xd8, 0x04, 0xe1, 0x9c, 0x9c, 0xb1, 0xe3, 0x84, - 0xe8, 0x3d, 0xa8, 0x8b, 0x5c, 0xcb, 0xe0, 0x51, 0xa1, 0xce, 0x9b, 0x41, 0xb0, 0x8e, 0x58, 0x6c, - 0x10, 0x1d, 0x48, 0x18, 0x8a, 0x0e, 0xab, 0x49, 0x07, 0x12, 0x86, 0xbc, 0xc3, 0xef, 0xc1, 0x3a, - 0xcf, 0x50, 0x47, 0xa1, 0x3f, 0x09, 0x0c, 0xee, 0x53, 0x6b, 0xbc, 0xd3, 0x1a, 0x63, 0x3f, 0x61, - 0xdc, 0x3e, 0x73, 0xae, 0x3b, 0x50, 0x7d, 0xe5, 0x9f, 0x88, 0x0e, 0x0d, 0xb1, 0x0e, 0x5e, 0xf9, - 0x27, 0x71, 0x53, 0x92, 0x25, 0xac, 0x67, 0xb3, 0x84, 0xaf, 0xe1, 0xd6, 0xec, 0x76, 0xc7, 0xb3, - 0x05, 0xed, 0xfa, 0xd9, 0xc2, 0xa6, 0x37, 0x2f, 0x0e, 0x7f, 0x01, 0xaa, 0xed, 0xd1, 0xe6, 0xc6, - 0x42, 0xce, 0x91, 0xac, 0x63, 0xcc, 0x06, 0xb7, 0x3e, 0x86, 0x6a, 0xec, 0x7d, 0x8b, 0xc4, 0xa5, - 0xd6, 0x43, 0x68, 0x64, 0x7d, 0x77, 0xa1, 0xa8, 0xf6, 0xcf, 0x05, 0xa8, 0x25, 0x5e, 0x8a, 0x3c, - 0xb8, 0xc1, 0xb5, 0xc8, 0x52, 0x34, 0x63, 0xea, 0xf4, 0x22, 0x31, 0xfc, 0x2c, 0xe7, 0xff, 0xea, - 0xc4, 0x08, 0xf2, 0x84, 0x2a, 0x57, 0x00, 0x4a, 0x90, 0xa7, 0xf3, 0x7d, 0x05, 0xeb, 0x63, 0xc7, - 0x9b, 0x5c, 0xa4, 0xe6, 0x12, 0x19, 0xdd, 0x1f, 0xe4, 0x9c, 0x6b, 0x9f, 0x8d, 0x9e, 0xce, 0xd1, - 0x18, 0x67, 0x68, 0xb4, 0x0b, 0xa5, 0xc0, 0x0f, 0xa3, 0x78, 0x93, 0xca, 0xbb, 0x7d, 0x1c, 0xf9, - 0x61, 0x74, 0x60, 0x06, 0x01, 0x3b, 0xb4, 0x08, 0x00, 0xfd, 0xdb, 0x02, 0xdc, 0x9a, 0xff, 0xc7, - 0x50, 0x1f, 0x54, 0x2b, 0x98, 0x48, 0x25, 0x3d, 0x5c, 0x54, 0x49, 0xdd, 0x60, 0x32, 0x95, 0x9f, - 0x01, 0xa1, 0x67, 0x50, 0x76, 0x89, 0xeb, 0x87, 0x97, 0x52, 0x17, 0x8f, 0x16, 0x85, 0x3c, 0xe0, - 0xa3, 0xa7, 0xa8, 0x12, 0x0e, 0x61, 0xa8, 0x4a, 0xef, 0xa5, 0x32, 0x4e, 0x2e, 0x78, 0xad, 0x14, - 0x43, 0xe2, 0x04, 0x47, 0xff, 0x18, 0x6e, 0xce, 0xfd, 0x2b, 0xe8, 0xb7, 0x01, 0xac, 0x60, 0x62, - 0xf0, 0x6b, 0x7f, 0xe1, 0x41, 0x2a, 0xae, 0x59, 0xc1, 0x64, 0xc0, 0x19, 0xfa, 0x0b, 0x68, 0xbe, - 0x49, 0x5e, 0x16, 0x7d, 0x84, 0xc4, 0x86, 0x7b, 0xc2, 0x75, 0xa0, 0xe2, 0xaa, 0x60, 0x1c, 0x9c, - 0x20, 0x1d, 0xd6, 0xe2, 0x46, 0xf3, 0x82, 0x75, 0x50, 0x79, 0x87, 0xba, 0xec, 0x60, 0x5e, 0x1c, - 0x9c, 0xe8, 0xbf, 0x2c, 0xc0, 0xfa, 0x15, 0x91, 0xd9, 0xd1, 0x4d, 0x44, 0xbc, 0xf8, 0x50, 0x2c, - 0x28, 0x16, 0xfe, 0x2c, 0xc7, 0x8e, 0xaf, 0x53, 0xf9, 0x37, 0xdf, 0xf8, 0x02, 0x79, 0xd5, 0x59, - 0x70, 0x02, 0xb6, 0x7c, 0xdc, 0x13, 0x27, 0xa2, 0x3c, 0x0b, 0x29, 0x61, 0x41, 0xa0, 0xe7, 0xd0, - 0x08, 0x09, 0xdf, 0x70, 0x6d, 0x43, 0x78, 0x59, 0x69, 0x21, 0x2f, 0x93, 0x12, 0x32, 0x67, 0xc3, - 0x6b, 0x31, 0x12, 0xa3, 0x28, 0x7a, 0x06, 0x6b, 0xf6, 0xa5, 0x67, 0xba, 0x8e, 0x25, 0x91, 0xcb, - 0x4b, 0x23, 0xaf, 0x4a, 0x20, 0x0e, 0xac, 0x3f, 0x80, 0x7a, 0xaa, 0x91, 0xfd, 0x31, 0x9e, 0x6e, - 0x49, 0x9d, 0x08, 0x22, 0x1b, 0x2d, 0x4a, 0x32, 0x5a, 0xe8, 0x27, 0x50, 0x4f, 0xad, 0x8b, 0x45, - 0x86, 0x32, 0x7d, 0x46, 0x3e, 0xd7, 0x67, 0x09, 0x17, 0x22, 0x1f, 0xdd, 0x86, 0x0a, 0x4b, 0x75, - 0x0c, 0x27, 0xe0, 0x1a, 0xad, 0xe1, 0x32, 0x23, 0xf7, 0x02, 0xfd, 0xd7, 0x05, 0x68, 0x64, 0x97, - 0x74, 0xec, 0x47, 0x01, 0x09, 0x1d, 0xdf, 0x4e, 0xf9, 0xd1, 0x11, 0x67, 0x30, 0x5f, 0x61, 0xcd, - 0x5f, 0x4f, 0xfc, 0xc8, 0x8c, 0x7d, 0xc5, 0x0a, 0x26, 0x7f, 0xc8, 0xe8, 0x2b, 0x3e, 0xa8, 0x5e, - 0xf1, 0x41, 0xf4, 0x01, 0x20, 0xe9, 0x4a, 0x63, 0xc7, 0x75, 0x22, 0xe3, 0xe4, 0x32, 0x22, 0xc2, - 0xc6, 0x2a, 0xd6, 0x44, 0xcb, 0x3e, 0x6b, 0xf8, 0x82, 0xf1, 0x99, 0xe3, 0xf9, 0xbe, 0x6b, 0x50, - 0xcb, 0x0f, 0x89, 0x61, 0xda, 0xaf, 0xf8, 0xa9, 0x45, 0xc5, 0x75, 0xdf, 0x77, 0x07, 0x8c, 0xd7, - 0xb1, 0x5f, 0xb1, 0x9d, 0xcf, 0x0a, 0x26, 0x94, 0x44, 0x06, 0xfb, 0xe1, 0xc9, 0x42, 0x0d, 0x83, - 0x60, 0x75, 0x83, 0x09, 0x45, 0xbf, 0x0b, 0x6b, 0x71, 0x07, 0xbe, 0xf9, 0xc9, 0x5d, 0x77, 0x55, - 0x76, 0xe1, 0x3c, 0xa4, 0xc3, 0xea, 0x11, 0x09, 0x2d, 0xe2, 0x45, 0x43, 0xc7, 0x3a, 0x63, 0xfb, - 0xbb, 0xb2, 0xa5, 0xe0, 0x0c, 0xef, 0xcb, 0x62, 0xb5, 0xa2, 0x55, 0x71, 0x3c, 0x9b, 0x4b, 0x5c, - 0xaa, 0x7f, 0xa3, 0x40, 0x89, 0xe7, 0x08, 0x4c, 0x29, 0x7c, 0x7f, 0xe5, 0xdb, 0xaf, 0xcc, 0x2d, - 0x19, 0x83, 0x6f, 0xbe, 0xdf, 0x83, 0x1a, 0x57, 0x7e, 0x2a, 0xa5, 0xe7, 0x89, 0x27, 0x6f, 0x6c, - 0x41, 0x35, 0x24, 0xa6, 0xed, 0x7b, 0xe3, 0xf8, 0x36, 0x28, 0xa1, 0xd1, 0xef, 0x83, 0x16, 0x84, - 0x7e, 0x60, 0x8e, 0xa6, 0x07, 0x48, 0x69, 0xbe, 0xf5, 0x14, 0x9f, 0xe5, 0xc4, 0xfa, 0xd7, 0x50, - 0x16, 0x7b, 0xd2, 0x35, 0x44, 0xf9, 0x10, 0x90, 0xd0, 0x11, 0xb3, 0xbd, 0xeb, 0x50, 0x2a, 0x33, - 0x56, 0xfe, 0x5a, 0x29, 0x5a, 0x8e, 0xa6, 0x0d, 0xfa, 0x7f, 0x2a, 0x22, 0x77, 0x15, 0xef, 0x48, - 0x2c, 0xc9, 0x65, 0x0b, 0x82, 0x9d, 0xac, 0xc5, 0x85, 0x55, 0x4c, 0xa2, 0x3d, 0x28, 0xcb, 0x14, - 0xb5, 0xb0, 0xec, 0x33, 0x9c, 0x04, 0x88, 0xaf, 0xaf, 0x89, 0x3c, 0xbc, 0x2f, 0x7a, 0x7d, 0x4d, - 0xc4, 0xf5, 0x35, 0x61, 0x27, 0x4f, 0x99, 0x3c, 0x0b, 0xb8, 0x22, 0xcf, 0x9d, 0xeb, 0x76, 0xf2, - 0x46, 0x40, 0xf4, 0xff, 0x56, 0x92, 0x90, 0x16, 0xdf, 0xe5, 0xa3, 0xaf, 0xa0, 0xca, 0xa2, 0x83, - 0xe1, 0x9a, 0x81, 0x7c, 0x99, 0xee, 0x2e, 0xf7, 0x4c, 0x10, 0x6f, 0x78, 0x22, 0xf5, 0xad, 0x04, - 0x82, 0x62, 0xa1, 0x91, 0x1d, 0x3b, 0xe2, 0xd0, 0xc8, 0xbe, 0xd1, 0xfb, 0xd0, 0x30, 0x27, 0x91, - 0x6f, 0x98, 0xf6, 0x39, 0x09, 0x23, 0x87, 0x12, 0xe9, 0x26, 0x6b, 0x8c, 0xdb, 0x89, 0x99, 0xad, - 0x4f, 0x61, 0x35, 0x8d, 0xf9, 0xb6, 0x94, 0xa4, 0x94, 0x4e, 0x49, 0xfe, 0x14, 0x60, 0x7a, 0x2f, - 0xc6, 0x7c, 0x84, 0x5c, 0x38, 0x91, 0x61, 0xc5, 0xe7, 0xdc, 0x12, 0xae, 0x32, 0x46, 0x97, 0x9d, - 0xbd, 0xb2, 0x97, 0xf6, 0xa5, 0xf8, 0xd2, 0x9e, 0x2d, 0x7c, 0xb6, 0x56, 0xcf, 0x9c, 0xf1, 0x38, - 0xb9, 0xab, 0xab, 0xf9, 0xbe, 0xfb, 0x94, 0x33, 0xf4, 0xdf, 0x14, 0x84, 0xaf, 0x88, 0xe7, 0x97, - 0x5c, 0xe7, 0x9c, 0x77, 0x65, 0xea, 0x07, 0x00, 0x34, 0x32, 0x43, 0x96, 0x5f, 0x99, 0xf1, 0x6d, - 0x61, 0x6b, 0xe6, 0xd6, 0x7f, 0x18, 0xd7, 0x83, 0xe0, 0x9a, 0xec, 0xdd, 0x89, 0xd0, 0x67, 0xb0, - 0x6a, 0xf9, 0x6e, 0x30, 0x26, 0x72, 0x70, 0xe9, 0xad, 0x83, 0xeb, 0x49, 0xff, 0x4e, 0x94, 0xba, - 0xa3, 0x2c, 0x5f, 0xf7, 0x8e, 0xf2, 0xd7, 0x8a, 0x78, 0x45, 0x4a, 0x3f, 0x62, 0xa1, 0xd1, 0x9c, - 0x4a, 0x89, 0x27, 0x4b, 0xbe, 0x88, 0x7d, 0x57, 0x99, 0x44, 0xeb, 0xb3, 0x3c, 0x75, 0x09, 0x6f, - 0xce, 0x78, 0xff, 0x4d, 0x85, 0x5a, 0xf2, 0x80, 0x34, 0x63, 0xfb, 0x4f, 0xa0, 0x96, 0x14, 0xe3, - 0xc8, 0x00, 0xf1, 0x9d, 0xe6, 0x49, 0x3a, 0xa3, 0x97, 0x80, 0xcc, 0xd1, 0x28, 0xc9, 0x64, 0x8d, - 0x09, 0x35, 0x47, 0xf1, 0xf3, 0xdd, 0x27, 0x0b, 0xe8, 0x21, 0xde, 0xfa, 0x8e, 0xd9, 0x78, 0xac, - 0x99, 0xa3, 0x51, 0x86, 0x83, 0xfe, 0x0c, 0x6e, 0x66, 0xe7, 0x30, 0x4e, 0x2e, 0x8d, 0xc0, 0xb1, - 0xe5, 0x79, 0x7a, 0x77, 0xd1, 0x37, 0xb4, 0x76, 0x06, 0xfe, 0x8b, 0xcb, 0x23, 0xc7, 0x16, 0x3a, - 0x47, 0xe1, 0x4c, 0x43, 0xeb, 0x2f, 0xe0, 0xf6, 0x1b, 0xba, 0xcf, 0xb1, 0x41, 0x3f, 0x5b, 0x1b, - 0xb2, 0xbc, 0x12, 0x52, 0xd6, 0xfb, 0x95, 0x22, 0x9e, 0xfa, 0xb2, 0x3a, 0xe9, 0xa4, 0x53, 0xf0, - 0x7b, 0x39, 0xe7, 0xe9, 0x1e, 0x1d, 0x0b, 0x78, 0x9e, 0x75, 0x7f, 0x79, 0x25, 0xeb, 0xce, 0x9b, - 0x6b, 0x89, 0xe4, 0x55, 0x00, 0x49, 0x04, 0xfd, 0x5f, 0x54, 0xa8, 0xc6, 0xe8, 0xfc, 0x34, 0x7c, - 0x49, 0x23, 0xe2, 0x1a, 0xc9, 0x55, 0x9d, 0x82, 0x41, 0xb0, 0xf8, 0x05, 0xd2, 0xf7, 0xa0, 0xc6, - 0x0e, 0xdd, 0xa2, 0xb9, 0xc0, 0x9b, 0xab, 0x8c, 0xc1, 0x1b, 0xdf, 0x83, 0x7a, 0xe4, 0x47, 0xe6, - 0xd8, 0x88, 0x78, 0x2a, 0xa0, 0x8a, 0xd1, 0x9c, 0xc5, 0x13, 0x01, 0xf4, 0x43, 0xd8, 0x88, 0x4e, - 0x43, 0x3f, 0x8a, 0xc6, 0x2c, 0x0d, 0xe5, 0x49, 0x91, 0xc8, 0x61, 0x8a, 0x58, 0x4b, 0x1a, 0x44, - 0xb2, 0x44, 0x59, 0xf4, 0x9e, 0x76, 0x66, 0xae, 0xcb, 0x83, 0x48, 0x11, 0xaf, 0x25, 0x5c, 0xe6, - 0xda, 0x6c, 0xf3, 0x0c, 0x44, 0xb2, 0xc1, 0x63, 0x85, 0x82, 0x63, 0x12, 0x19, 0xb0, 0xee, 0x12, - 0x93, 0x4e, 0x42, 0x62, 0x1b, 0x2f, 0x1d, 0x32, 0xb6, 0xc5, 0x25, 0x46, 0x23, 0xf7, 0x49, 0x22, - 0x56, 0x4b, 0xfb, 0x31, 0x1f, 0x8d, 0x1b, 0x31, 0x9c, 0xa0, 0x59, 0xe6, 0x20, 0xbe, 0xd0, 0x3a, - 0xd4, 0x07, 0xcf, 0x07, 0xc3, 0xde, 0x81, 0x71, 0x70, 0xb8, 0xd3, 0x93, 0xe5, 0x3f, 0x83, 0x1e, - 0x16, 0xa4, 0xc2, 0xda, 0x87, 0x87, 0xc3, 0xce, 0xbe, 0x31, 0xdc, 0xeb, 0x3e, 0x1d, 0x68, 0x05, - 0x74, 0x13, 0x36, 0x86, 0xbb, 0xf8, 0x70, 0x38, 0xdc, 0xef, 0xed, 0x18, 0x47, 0x3d, 0xbc, 0x77, - 0xb8, 0x33, 0xd0, 0x54, 0x84, 0xa0, 0x31, 0x65, 0x0f, 0xf7, 0x0e, 0x7a, 0x5a, 0x11, 0xd5, 0xa1, - 0x72, 0xd4, 0xc3, 0xdd, 0x5e, 0x7f, 0xa8, 0x95, 0xf4, 0x5f, 0xaa, 0x50, 0x4f, 0x59, 0x91, 0x39, - 0x72, 0x48, 0xc5, 0x91, 0xa5, 0x88, 0xd9, 0x27, 0x7f, 0xae, 0x34, 0xad, 0x53, 0x61, 0x9d, 0x22, - 0x16, 0x04, 0x3f, 0xa6, 0x98, 0x17, 0xa9, 0x75, 0x5e, 0xc4, 0x55, 0xd7, 0xbc, 0x10, 0x20, 0xdf, - 0x87, 0xd5, 0x33, 0x12, 0x7a, 0x64, 0x2c, 0xdb, 0x85, 0x45, 0xea, 0x82, 0x27, 0xba, 0x6c, 0x81, - 0x26, 0xbb, 0x4c, 0x61, 0x84, 0x39, 0x1a, 0x82, 0x7f, 0x10, 0x83, 0x6d, 0x42, 0x49, 0x34, 0x57, - 0xc4, 0xfc, 0x9c, 0x60, 0xdb, 0x14, 0x7d, 0x6d, 0x06, 0x3c, 0x3d, 0x2c, 0x62, 0xfe, 0x8d, 0x4e, - 0x66, 0xed, 0x53, 0xe6, 0xf6, 0x79, 0xb0, 0xb8, 0x3b, 0xbf, 0xc9, 0x44, 0xa7, 0x89, 0x89, 0x2a, - 0xa0, 0xe2, 0xb8, 0x66, 0xa6, 0xdb, 0xe9, 0xee, 0x32, 0xb3, 0xac, 0x41, 0xed, 0xa0, 0xf3, 0x33, - 0xe3, 0x78, 0xc0, 0x6f, 0xc0, 0x91, 0x06, 0xab, 0x4f, 0x7b, 0xb8, 0xdf, 0xdb, 0x97, 0x1c, 0x15, - 0x6d, 0x82, 0x26, 0x39, 0xd3, 0x7e, 0x45, 0x86, 0x20, 0x3e, 0x4b, 0xa8, 0x0a, 0xc5, 0xc1, 0xb3, - 0xce, 0x91, 0x56, 0xd6, 0xff, 0xab, 0x00, 0xeb, 0x62, 0x5b, 0x48, 0x5e, 0xf7, 0xdf, 0xfc, 0xba, - 0x99, 0xbe, 0x11, 0x2a, 0x64, 0x6f, 0x84, 0xe2, 0x24, 0x94, 0xef, 0xea, 0xea, 0x34, 0x09, 0xe5, - 0x37, 0x49, 0x99, 0x88, 0x5f, 0x5c, 0x24, 0xe2, 0x37, 0xa1, 0xe2, 0x12, 0x9a, 0xd8, 0xad, 0x86, - 0x63, 0x12, 0x39, 0x50, 0x37, 0x3d, 0xcf, 0x8f, 0x4c, 0x71, 0xcd, 0x5a, 0x5e, 0x68, 0x33, 0xbc, - 0xf2, 0x8f, 0xdb, 0x9d, 0x29, 0x92, 0x08, 0xcc, 0x69, 0xec, 0xd6, 0x4f, 0x41, 0xbb, 0xda, 0x61, - 0x91, 0xed, 0xf0, 0x07, 0x3f, 0x9a, 0xee, 0x86, 0x84, 0xad, 0x0b, 0xf9, 0x3e, 0xa1, 0xad, 0x30, - 0x02, 0x1f, 0xf7, 0xfb, 0x7b, 0xfd, 0x27, 0x9a, 0x82, 0x00, 0xca, 0xbd, 0x9f, 0xed, 0x0d, 0x7b, - 0x3b, 0x5a, 0x61, 0xfb, 0x57, 0x1b, 0x50, 0x16, 0x42, 0xa2, 0x6f, 0x65, 0x26, 0x90, 0xae, 0x1c, - 0x45, 0x3f, 0x5d, 0x38, 0xa3, 0xce, 0x54, 0xa3, 0xb6, 0x1e, 0x2d, 0x3d, 0x5e, 0xbe, 0xd4, 0xad, - 0xa0, 0xbf, 0x51, 0x60, 0x35, 0xf3, 0x4a, 0x97, 0xf7, 0x9a, 0x79, 0x4e, 0xa1, 0x6a, 0xeb, 0x27, - 0x4b, 0x8d, 0x4d, 0x64, 0xf9, 0x46, 0x81, 0x7a, 0xaa, 0x44, 0x13, 0x3d, 0x58, 0xa6, 0xac, 0x53, - 0x48, 0xf2, 0xe9, 0xf2, 0x15, 0xa1, 0xfa, 0xca, 0x47, 0x0a, 0xfa, 0x6b, 0x05, 0xea, 0xa9, 0x62, - 0xc5, 0xdc, 0xa2, 0xcc, 0x96, 0x56, 0xe6, 0x16, 0x65, 0x5e, 0x6d, 0xe4, 0x0a, 0xfa, 0x4b, 0x05, - 0x6a, 0x49, 0xe1, 0x21, 0xba, 0xbf, 0x78, 0xa9, 0xa2, 0x10, 0xe2, 0x93, 0x65, 0x6b, 0x1c, 0xf5, - 0x15, 0xf4, 0xe7, 0x50, 0x8d, 0xab, 0xf4, 0x50, 0xde, 0xdd, 0xeb, 0x4a, 0x09, 0x60, 0xeb, 0xfe, - 0xc2, 0xe3, 0xd2, 0xd3, 0xc7, 0xa5, 0x73, 0xb9, 0xa7, 0xbf, 0x52, 0xe4, 0xd7, 0xba, 0xbf, 0xf0, - 0xb8, 0x64, 0x7a, 0xe6, 0x09, 0xa9, 0x0a, 0xbb, 0xdc, 0x9e, 0x30, 0x5b, 0xda, 0x97, 0xdb, 0x13, - 0xe6, 0x15, 0xf4, 0x09, 0x41, 0x52, 0x35, 0x7a, 0xb9, 0x05, 0x99, 0xad, 0x03, 0xcc, 0x2d, 0xc8, - 0x9c, 0x92, 0x40, 0x7d, 0x05, 0xfd, 0x42, 0x49, 0x9f, 0x0b, 0xee, 0x2f, 0x5c, 0x8a, 0xb6, 0xa0, - 0x4b, 0xce, 0x14, 0xc3, 0xf1, 0x05, 0xfa, 0x0b, 0x79, 0x8b, 0x21, 0x2a, 0xd9, 0xd0, 0x22, 0x60, - 0x99, 0xe2, 0xb7, 0xd6, 0xc7, 0xcb, 0x6d, 0x36, 0x5c, 0x88, 0xbf, 0x52, 0x00, 0xa6, 0x35, 0x6f, - 0xb9, 0x85, 0x98, 0x29, 0xb6, 0x6b, 0x3d, 0x58, 0x62, 0x64, 0x7a, 0x81, 0xc4, 0x35, 0x39, 0xb9, - 0x17, 0xc8, 0x95, 0x9a, 0xbc, 0xdc, 0x0b, 0xe4, 0x6a, 0x3d, 0x9d, 0xbe, 0x82, 0xfe, 0x49, 0x81, - 0x8d, 0x99, 0x9a, 0x20, 0xf4, 0xe8, 0x9a, 0x65, 0x61, 0xad, 0xcf, 0x97, 0x07, 0x88, 0x45, 0xdb, - 0x52, 0x3e, 0x52, 0xd0, 0xdf, 0x2a, 0xb0, 0x96, 0xad, 0x95, 0xc8, 0xbd, 0x4b, 0xcd, 0xa9, 0x2e, - 0x6a, 0x3d, 0x5c, 0x6e, 0x70, 0xa2, 0xad, 0xbf, 0x57, 0xa0, 0x91, 0x2d, 0x9b, 0x41, 0x0f, 0x17, - 0x0b, 0x0b, 0x57, 0x04, 0xfa, 0x6c, 0xc9, 0xd1, 0xb1, 0x44, 0x5f, 0x54, 0xfe, 0xa8, 0x24, 0xb2, - 0xb7, 0x32, 0xff, 0xf9, 0xf1, 0xff, 0x07, 0x00, 0x00, 0xff, 0xff, 0xa0, 0x4e, 0xae, 0x7e, 0xe0, - 0x33, 0x00, 0x00, + // 3851 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x5a, 0xcd, 0x73, 0x1b, 0x47, + 0x76, 0xe7, 0xe0, 0x8b, 0xc0, 0x03, 0x09, 0x0e, 0x9b, 0xa4, 0x05, 0xc1, 0x9b, 0x58, 0x3b, 0x29, + 0xa7, 0x98, 0x5d, 0x1b, 0xf2, 0x72, 0x2b, 0x96, 0xe5, 0x95, 0x57, 0x86, 0x41, 0x48, 0x84, 0x45, + 0x82, 0x4c, 0x03, 0x2c, 0xad, 0xa2, 0xc4, 0x93, 0xe1, 0x4c, 0x0b, 0x1c, 0x11, 0xf3, 0xe1, 0xe9, + 0x01, 0x45, 0x6e, 0x2a, 0x95, 0xd4, 0xa6, 0x2a, 0xe5, 0x54, 0x25, 0x95, 0x5c, 0x9c, 0xbd, 0xe4, + 0xb4, 0x55, 0x39, 0xe5, 0x1f, 0x48, 0x6d, 0x6a, 0x4f, 0x39, 0xe4, 0x8f, 0x48, 0x2e, 0xb9, 0xe5, + 0x9a, 0x53, 0xae, 0x5b, 0xfd, 0x31, 0x83, 0x19, 0x00, 0xb2, 0x06, 0xa0, 0x4e, 0x98, 0xf7, 0xba, + 0xfb, 0xd7, 0x0f, 0xef, 0xbd, 0x7e, 0xfd, 0xba, 0xfb, 0x81, 0xe6, 0x8f, 0xc6, 0x43, 0xdb, 0xa5, + 0x77, 0xad, 0xc0, 0xbe, 0x24, 0x01, 0xbd, 0xeb, 0x07, 0x5e, 0xe8, 0x49, 0xaa, 0xc9, 0x09, 0xf4, + 0xfe, 0xb9, 0x41, 0xcf, 0x6d, 0xd3, 0x0b, 0xfc, 0xa6, 0xeb, 0x39, 0x86, 0xd5, 0x94, 0x63, 0x9a, + 0x72, 0x8c, 0xe8, 0xd6, 0xf8, 0xdd, 0xa1, 0xe7, 0x0d, 0x47, 0x44, 0x20, 0x9c, 0x8d, 0x5f, 0xdc, + 0xb5, 0xc6, 0x81, 0x11, 0xda, 0x9e, 0x2b, 0xdb, 0xdf, 0x9b, 0x6e, 0x0f, 0x6d, 0x87, 0xd0, 0xd0, + 0x70, 0x7c, 0xd9, 0xe1, 0xfd, 0x48, 0x16, 0x7a, 0x6e, 0x04, 0xc4, 0xba, 0x7b, 0x6e, 0x8e, 0xa8, + 0x4f, 0x4c, 0xf6, 0xab, 0xb3, 0x0f, 0xd9, 0xed, 0x83, 0xa9, 0x6e, 0x34, 0x0c, 0xc6, 0x66, 0x18, + 0x49, 0x6e, 0x84, 0x61, 0x60, 0x9f, 0x8d, 0x43, 0x22, 0x7a, 0x6b, 0xb7, 0xe1, 0xd6, 0xc0, 0xa0, + 0x17, 0x6d, 0xcf, 0x7d, 0x61, 0x0f, 0xfb, 0xe6, 0x39, 0x71, 0x0c, 0x4c, 0xbe, 0x1e, 0x13, 0x1a, + 0x6a, 0x7f, 0x02, 0xf5, 0xd9, 0x26, 0xea, 0x7b, 0x2e, 0x25, 0xe8, 0x73, 0x28, 0xb0, 0x29, 0xeb, + 0xca, 0x1d, 0x65, 0xb7, 0xba, 0xf7, 0x41, 0xf3, 0x75, 0x2a, 0x10, 0x32, 0x34, 0xa5, 0xa8, 0xcd, + 0xbe, 0x4f, 0x4c, 0xcc, 0x47, 0x6a, 0x3b, 0xb0, 0xd5, 0x36, 0x7c, 0xe3, 0xcc, 0x1e, 0xd9, 0xa1, + 0x4d, 0x68, 0x34, 0xe9, 0x18, 0xb6, 0xd3, 0x6c, 0x39, 0xe1, 0x9f, 0xc2, 0x9a, 0x99, 0xe0, 0xcb, + 0x89, 0xef, 0x37, 0x33, 0xe9, 0xbe, 0xb9, 0xcf, 0xa9, 0x14, 0x70, 0x0a, 0x4e, 0xdb, 0x06, 0xf4, + 0xc8, 0x76, 0x87, 0x24, 0xf0, 0x03, 0xdb, 0x0d, 0x23, 0x61, 0x7e, 0x93, 0x87, 0xad, 0x14, 0x5b, + 0x0a, 0xf3, 0x12, 0x20, 0xd6, 0x23, 0x13, 0x25, 0xbf, 0x5b, 0xdd, 0xfb, 0x32, 0xa3, 0x28, 0x73, + 0xf0, 0x9a, 0xad, 0x18, 0xac, 0xe3, 0x86, 0xc1, 0x35, 0x4e, 0xa0, 0xa3, 0xaf, 0xa0, 0x74, 0x4e, + 0x8c, 0x51, 0x78, 0x5e, 0xcf, 0xdd, 0x51, 0x76, 0x6b, 0x7b, 0x8f, 0x6e, 0x30, 0xcf, 0x01, 0x07, + 0xea, 0x87, 0x46, 0x48, 0xb0, 0x44, 0x45, 0x1f, 0x02, 0x12, 0x5f, 0xba, 0x45, 0xa8, 0x19, 0xd8, + 0x3e, 0x73, 0xc9, 0x7a, 0xfe, 0x8e, 0xb2, 0x5b, 0xc1, 0x9b, 0xa2, 0x65, 0x7f, 0xd2, 0xd0, 0xf0, + 0x61, 0x63, 0x4a, 0x5a, 0xa4, 0x42, 0xfe, 0x82, 0x5c, 0x73, 0x8b, 0x54, 0x30, 0xfb, 0x44, 0x8f, + 0xa1, 0x78, 0x69, 0x8c, 0xc6, 0x84, 0x8b, 0x5c, 0xdd, 0xfb, 0xd1, 0x9b, 0xdc, 0x43, 0xba, 0xe8, + 0x44, 0x0f, 0x58, 0x8c, 0xff, 0x34, 0xf7, 0x89, 0xa2, 0xdd, 0x87, 0x6a, 0x42, 0x6e, 0x54, 0x03, + 0x38, 0xed, 0xed, 0x77, 0x06, 0x9d, 0xf6, 0xa0, 0xb3, 0xaf, 0xae, 0xa0, 0x75, 0xa8, 0x9c, 0xf6, + 0x0e, 0x3a, 0xad, 0xc3, 0xc1, 0xc1, 0x33, 0x55, 0x41, 0x55, 0x58, 0x8d, 0x88, 0x9c, 0x76, 0x05, + 0x08, 0x13, 0xd3, 0xbb, 0x24, 0x01, 0x73, 0x64, 0x69, 0x55, 0x74, 0x0b, 0x56, 0x43, 0x83, 0x5e, + 0xe8, 0xb6, 0x25, 0x65, 0x2e, 0x31, 0xb2, 0x6b, 0xa1, 0x2e, 0x94, 0xce, 0x0d, 0xd7, 0x1a, 0xbd, + 0x59, 0xee, 0xb4, 0xaa, 0x19, 0xf8, 0x01, 0x1f, 0x88, 0x25, 0x00, 0xf3, 0xee, 0xd4, 0xcc, 0xc2, + 0x00, 0xda, 0x33, 0x50, 0xfb, 0xa1, 0x11, 0x84, 0x49, 0x71, 0x3a, 0x50, 0x60, 0xf3, 0x4b, 0x8f, + 0x5e, 0x64, 0x4e, 0xb1, 0x32, 0x31, 0x1f, 0xae, 0xfd, 0x5f, 0x0e, 0x36, 0x13, 0xd8, 0xd2, 0x53, + 0x9f, 0x42, 0x29, 0x20, 0x74, 0x3c, 0x0a, 0x39, 0x7c, 0x6d, 0xef, 0x61, 0x46, 0xf8, 0x19, 0xa4, + 0x26, 0xe6, 0x30, 0x58, 0xc2, 0xa1, 0x5d, 0x50, 0xc5, 0x08, 0x9d, 0x04, 0x81, 0x17, 0xe8, 0x0e, + 0x1d, 0x72, 0xad, 0x55, 0x70, 0x4d, 0xf0, 0x3b, 0x8c, 0x7d, 0x44, 0x87, 0x09, 0xad, 0xe6, 0x6f, + 0xa8, 0x55, 0x64, 0x80, 0xea, 0x92, 0xf0, 0x95, 0x17, 0x5c, 0xe8, 0x4c, 0xb5, 0x81, 0x6d, 0x91, + 0x7a, 0x81, 0x83, 0x7e, 0x9c, 0x11, 0xb4, 0x27, 0x86, 0x1f, 0xcb, 0xd1, 0x78, 0xc3, 0x4d, 0x33, + 0xb4, 0x1f, 0x42, 0x49, 0xfc, 0x53, 0xe6, 0x49, 0xfd, 0xd3, 0x76, 0xbb, 0xd3, 0xef, 0xab, 0x2b, + 0xa8, 0x02, 0x45, 0xdc, 0x19, 0x60, 0xe6, 0x61, 0x15, 0x28, 0x3e, 0x6a, 0x0d, 0x5a, 0x87, 0x6a, + 0x4e, 0xfb, 0x01, 0x6c, 0x3c, 0x35, 0xec, 0x30, 0x8b, 0x73, 0x69, 0x1e, 0xa8, 0x93, 0xbe, 0xd2, + 0x3a, 0xdd, 0x94, 0x75, 0xb2, 0xab, 0xa6, 0x73, 0x65, 0x87, 0x53, 0xf6, 0x50, 0x21, 0x4f, 0x82, + 0x40, 0x9a, 0x80, 0x7d, 0x6a, 0xaf, 0x60, 0xa3, 0x1f, 0x7a, 0x7e, 0x26, 0xcf, 0xff, 0x31, 0xac, + 0xb2, 0xdd, 0xc6, 0x1b, 0x87, 0xd2, 0xf5, 0x6f, 0x37, 0xc5, 0x6e, 0xd4, 0x8c, 0x76, 0xa3, 0xe6, + 0xbe, 0xdc, 0xad, 0x70, 0xd4, 0x13, 0xbd, 0x03, 0x25, 0x6a, 0x0f, 0x5d, 0x63, 0x24, 0xa3, 0x85, + 0xa4, 0x34, 0xc4, 0x9c, 0x3c, 0x9a, 0x58, 0x3a, 0x7e, 0x1b, 0xd0, 0x3e, 0xa1, 0x61, 0xe0, 0x5d, + 0x67, 0x92, 0x67, 0x1b, 0x8a, 0x2f, 0xbc, 0xc0, 0x14, 0x0b, 0xb1, 0x8c, 0x05, 0xc1, 0x16, 0x55, + 0x0a, 0x44, 0x62, 0x7f, 0x08, 0xa8, 0xeb, 0xb2, 0x3d, 0x25, 0x9b, 0x21, 0xfe, 0x31, 0x07, 0x5b, + 0xa9, 0xfe, 0xd2, 0x18, 0xcb, 0xaf, 0x43, 0x16, 0x98, 0xc6, 0x54, 0xac, 0x43, 0x74, 0x0c, 0x25, + 0xd1, 0x43, 0x6a, 0xf2, 0xde, 0x02, 0x40, 0x62, 0x9b, 0x92, 0x70, 0x12, 0x66, 0xae, 0xd3, 0xe7, + 0xdf, 0xae, 0xd3, 0xbf, 0x02, 0x35, 0xfa, 0x1f, 0xf4, 0x8d, 0xb6, 0xf9, 0x12, 0xb6, 0x4c, 0x6f, + 0x34, 0x22, 0x26, 0xf3, 0x06, 0xdd, 0x76, 0x43, 0x12, 0x5c, 0x1a, 0xa3, 0x37, 0xfb, 0x0d, 0x9a, + 0x8c, 0xea, 0xca, 0x41, 0xda, 0x73, 0xd8, 0x4c, 0x4c, 0x2c, 0x0d, 0xf1, 0x08, 0x8a, 0x94, 0x31, + 0xa4, 0x25, 0x3e, 0x5a, 0xd0, 0x12, 0x14, 0x8b, 0xe1, 0xda, 0x96, 0x00, 0xef, 0x5c, 0x12, 0x37, + 0xfe, 0x5b, 0xda, 0x3e, 0x6c, 0xf6, 0xb9, 0x9b, 0x66, 0xf2, 0xc3, 0x89, 0x8b, 0xe7, 0x52, 0x2e, + 0xbe, 0x0d, 0x28, 0x89, 0x22, 0x1d, 0xf1, 0x1a, 0x36, 0x3a, 0x57, 0xc4, 0xcc, 0x84, 0x5c, 0x87, + 0x55, 0xd3, 0x73, 0x1c, 0xc3, 0xb5, 0xea, 0xb9, 0x3b, 0xf9, 0xdd, 0x0a, 0x8e, 0xc8, 0xe4, 0x5a, + 0xcc, 0x67, 0x5d, 0x8b, 0xda, 0xdf, 0x2b, 0xa0, 0x4e, 0xe6, 0x96, 0x8a, 0x64, 0xd2, 0x87, 0x16, + 0x03, 0x62, 0x73, 0xaf, 0x61, 0x49, 0x49, 0x7e, 0x14, 0x2e, 0x04, 0x9f, 0x04, 0x41, 0x22, 0x1c, + 0xe5, 0x6f, 0x18, 0x8e, 0xb4, 0x03, 0xf8, 0x5e, 0x24, 0x4e, 0x3f, 0x0c, 0x88, 0xe1, 0xd8, 0xee, + 0xb0, 0x7b, 0x7c, 0xec, 0x13, 0x21, 0x38, 0x42, 0x50, 0xb0, 0x8c, 0xd0, 0x90, 0x82, 0xf1, 0x6f, + 0xb6, 0xe8, 0xcd, 0x91, 0x47, 0xe3, 0x45, 0xcf, 0x09, 0xed, 0x3f, 0xf3, 0x50, 0x9f, 0x81, 0x8a, + 0xd4, 0xfb, 0x1c, 0x8a, 0x94, 0x84, 0x63, 0x5f, 0xba, 0x4a, 0x27, 0xb3, 0xc0, 0xf3, 0xf1, 0x9a, + 0x7d, 0x06, 0x86, 0x05, 0x26, 0x1a, 0x42, 0x39, 0x0c, 0xaf, 0x75, 0x6a, 0xff, 0x3c, 0x4a, 0x08, + 0x0e, 0x6f, 0x8a, 0x3f, 0x20, 0x81, 0x63, 0xbb, 0xc6, 0xa8, 0x6f, 0xff, 0x9c, 0xe0, 0xd5, 0x30, + 0xbc, 0x66, 0x1f, 0xe8, 0x19, 0x73, 0x78, 0xcb, 0x76, 0xa5, 0xda, 0xdb, 0xcb, 0xce, 0x92, 0x50, + 0x30, 0x16, 0x88, 0x8d, 0x43, 0x28, 0xf2, 0xff, 0xb4, 0x8c, 0x23, 0xaa, 0x90, 0x0f, 0xc3, 0x6b, + 0x2e, 0x54, 0x19, 0xb3, 0xcf, 0xc6, 0x03, 0x58, 0x4b, 0xfe, 0x03, 0xe6, 0x48, 0xe7, 0xc4, 0x1e, + 0x9e, 0x0b, 0x07, 0x2b, 0x62, 0x49, 0x31, 0x4b, 0xbe, 0xb2, 0x2d, 0x99, 0xb2, 0x16, 0xb1, 0x20, + 0xb4, 0x7f, 0xcb, 0xc1, 0xed, 0x39, 0x9a, 0x91, 0xce, 0xfa, 0x3c, 0xe5, 0xac, 0x6f, 0x49, 0x0b, + 0x91, 0xc7, 0x3f, 0x4f, 0x79, 0xfc, 0x5b, 0x04, 0x67, 0xcb, 0xe6, 0x1d, 0x28, 0x91, 0x2b, 0x3b, + 0x24, 0x96, 0x54, 0x95, 0xa4, 0x12, 0xcb, 0xa9, 0x70, 0xd3, 0xe5, 0x74, 0x04, 0xdb, 0xed, 0x80, + 0x18, 0x21, 0x91, 0xa1, 0x3c, 0xf2, 0xff, 0xdb, 0x50, 0x36, 0x46, 0x23, 0xcf, 0x9c, 0x98, 0x75, + 0x95, 0xd3, 0x5d, 0x0b, 0x35, 0xa0, 0x7c, 0xee, 0xd1, 0xd0, 0x35, 0x1c, 0x22, 0x83, 0x57, 0x4c, + 0x6b, 0xdf, 0x2a, 0xb0, 0x33, 0x85, 0x27, 0xad, 0x70, 0x06, 0x35, 0x9b, 0x7a, 0x23, 0xfe, 0x07, + 0xf5, 0xc4, 0x09, 0xef, 0x27, 0x8b, 0x6d, 0x35, 0xdd, 0x08, 0x83, 0x1f, 0xf8, 0xd6, 0xed, 0x24, + 0xc9, 0x3d, 0x8e, 0x4f, 0x6e, 0xc9, 0x95, 0x1e, 0x91, 0xda, 0x3f, 0x29, 0xb0, 0x23, 0x77, 0xf8, + 0xec, 0x7f, 0x74, 0x56, 0xe4, 0xdc, 0xdb, 0x16, 0x59, 0xab, 0xc3, 0x3b, 0xd3, 0x72, 0xc9, 0x98, + 0xff, 0xff, 0x05, 0x40, 0xb3, 0xa7, 0x4b, 0xf4, 0x7d, 0x58, 0xa3, 0xc4, 0xb5, 0x74, 0xb1, 0x5f, + 0x88, 0xad, 0xac, 0x8c, 0xab, 0x8c, 0x27, 0x36, 0x0e, 0xca, 0x42, 0x20, 0xb9, 0x92, 0xd2, 0x96, + 0x31, 0xff, 0x46, 0xe7, 0xb0, 0xf6, 0x82, 0xea, 0xf1, 0xdc, 0xdc, 0xa1, 0x6a, 0x99, 0xc3, 0xda, + 0xac, 0x1c, 0xcd, 0x47, 0xfd, 0xf8, 0x7f, 0xe1, 0xea, 0x0b, 0x1a, 0x13, 0xe8, 0x1b, 0x05, 0x6e, + 0x45, 0x69, 0xc5, 0x44, 0x7d, 0x8e, 0x67, 0x11, 0x5a, 0x2f, 0xdc, 0xc9, 0xef, 0xd6, 0xf6, 0x4e, + 0x6e, 0xa0, 0xbf, 0x19, 0xe6, 0x91, 0x67, 0x11, 0xbc, 0xe3, 0xce, 0xe1, 0x52, 0xd4, 0x84, 0x2d, + 0x67, 0x4c, 0x43, 0x5d, 0x78, 0x81, 0x2e, 0x3b, 0xd5, 0x8b, 0x5c, 0x2f, 0x9b, 0xac, 0x29, 0xe5, + 0xab, 0xe8, 0x02, 0xd6, 0x1d, 0x6f, 0xec, 0x86, 0xba, 0xc9, 0xcf, 0x3f, 0xb4, 0x5e, 0x5a, 0xe8, + 0x60, 0x3c, 0x47, 0x4b, 0x47, 0x0c, 0x4e, 0x9c, 0xa6, 0x28, 0x5e, 0x73, 0x12, 0x14, 0x33, 0x64, + 0x40, 0x1c, 0x2f, 0x24, 0x3a, 0x8b, 0x97, 0xb4, 0xbe, 0x2a, 0x0c, 0x29, 0x78, 0x2c, 0x34, 0x50, + 0xad, 0x09, 0xd5, 0x84, 0x9a, 0x51, 0x19, 0x0a, 0xbd, 0xe3, 0x5e, 0x47, 0x5d, 0x41, 0x00, 0xa5, + 0xf6, 0x01, 0x3e, 0x3e, 0x1e, 0x88, 0x53, 0x43, 0xf7, 0xa8, 0xf5, 0xb8, 0xa3, 0xe6, 0xb4, 0x0e, + 0xac, 0x25, 0x27, 0x44, 0x08, 0x6a, 0xa7, 0xbd, 0x27, 0xbd, 0xe3, 0xa7, 0x3d, 0xfd, 0xe8, 0xf8, + 0xb4, 0x37, 0x60, 0xe7, 0x8d, 0x1a, 0x40, 0xab, 0xf7, 0x6c, 0x42, 0xaf, 0x43, 0xa5, 0x77, 0x1c, + 0x91, 0x4a, 0x23, 0xa7, 0x2a, 0xda, 0x7f, 0xe4, 0x61, 0x7b, 0x9e, 0xee, 0x91, 0x05, 0x05, 0x66, + 0x47, 0x79, 0xe2, 0x7b, 0xfb, 0x66, 0xe4, 0xe8, 0xcc, 0x7d, 0x7d, 0x43, 0x86, 0xf8, 0x0a, 0xe6, + 0xdf, 0x48, 0x87, 0xd2, 0xc8, 0x38, 0x23, 0x23, 0x5a, 0xcf, 0xf3, 0x3b, 0x91, 0xc7, 0x37, 0x99, + 0xfb, 0x90, 0x23, 0x89, 0x0b, 0x11, 0x09, 0x8b, 0x06, 0x50, 0x65, 0x41, 0x8c, 0x0a, 0xd5, 0xc9, + 0xb8, 0xba, 0x97, 0x71, 0x96, 0x83, 0xc9, 0x48, 0x9c, 0x84, 0x69, 0xdc, 0x87, 0x6a, 0x62, 0xb2, + 0x39, 0xf7, 0x19, 0xdb, 0xc9, 0xfb, 0x8c, 0x4a, 0xf2, 0x72, 0xe2, 0xe1, 0xac, 0x0d, 0x98, 0x8e, + 0x98, 0x13, 0x1c, 0x1c, 0xf7, 0x07, 0xe2, 0xe4, 0xf8, 0x18, 0x1f, 0x9f, 0x9e, 0xa8, 0x0a, 0x63, + 0x0e, 0x5a, 0xfd, 0x27, 0x6a, 0x2e, 0xf6, 0x91, 0xbc, 0xd6, 0x86, 0x6a, 0x42, 0xae, 0x54, 0xd4, + 0x56, 0xd2, 0x51, 0x9b, 0xc5, 0x4d, 0xc3, 0xb2, 0x02, 0x42, 0xa9, 0x94, 0x23, 0x22, 0xb5, 0xe7, + 0x50, 0xd9, 0xef, 0xf5, 0x25, 0x44, 0x1d, 0x56, 0x29, 0x09, 0xd8, 0xff, 0xe6, 0x37, 0x53, 0x15, + 0x1c, 0x91, 0x0c, 0x9c, 0x12, 0x23, 0x30, 0xcf, 0x09, 0x95, 0x7b, 0x7d, 0x4c, 0xb3, 0x51, 0x1e, + 0xbf, 0xe1, 0x11, 0xb6, 0xab, 0xe0, 0x88, 0xd4, 0xfe, 0xab, 0x0c, 0x30, 0xb9, 0x6d, 0x40, 0x35, + 0xc8, 0xc5, 0x31, 0x38, 0x67, 0x5b, 0xcc, 0x0f, 0x12, 0x7b, 0x0c, 0xff, 0x46, 0x7b, 0xb0, 0xe3, + 0xd0, 0xa1, 0x6f, 0x98, 0x17, 0xba, 0xbc, 0x24, 0x10, 0x4b, 0x95, 0xc7, 0xb3, 0x35, 0xbc, 0x25, + 0x1b, 0xe5, 0x4a, 0x14, 0xb8, 0x87, 0x90, 0x27, 0xee, 0x25, 0x8f, 0x3d, 0xd5, 0xbd, 0x4f, 0x17, + 0xbe, 0x05, 0x69, 0x76, 0xdc, 0x4b, 0xe1, 0x2b, 0x0c, 0x06, 0xe9, 0x00, 0x16, 0xb9, 0xb4, 0x4d, + 0xa2, 0x33, 0xd0, 0x22, 0x07, 0xfd, 0x7c, 0x71, 0xd0, 0x7d, 0x8e, 0x11, 0x43, 0x57, 0xac, 0x88, + 0x46, 0x3d, 0xa8, 0x04, 0x84, 0x7a, 0xe3, 0xc0, 0x24, 0x22, 0x00, 0x65, 0x3f, 0xa8, 0xe0, 0x68, + 0x1c, 0x9e, 0x40, 0xa0, 0x7d, 0x28, 0xf1, 0xb8, 0xc3, 0x22, 0x4c, 0xfe, 0x3b, 0xaf, 0x54, 0xd3, + 0x60, 0x3c, 0x92, 0x60, 0x39, 0x16, 0x3d, 0x86, 0x55, 0x21, 0x22, 0xad, 0x97, 0x39, 0xcc, 0x87, + 0x59, 0x83, 0x22, 0x1f, 0x85, 0xa3, 0xd1, 0xcc, 0xaa, 0x63, 0x4a, 0x82, 0x7a, 0x45, 0x58, 0x95, + 0x7d, 0xa3, 0x77, 0xa1, 0x22, 0xf6, 0x60, 0xcb, 0x0e, 0xea, 0x20, 0x9c, 0x93, 0x33, 0xf6, 0xed, + 0x00, 0xbd, 0x07, 0x55, 0x91, 0x6b, 0xe9, 0x3c, 0x2a, 0x54, 0x79, 0x33, 0x08, 0xd6, 0x09, 0x8b, + 0x0d, 0xa2, 0x03, 0x09, 0x02, 0xd1, 0x61, 0x2d, 0xee, 0x40, 0x82, 0x80, 0x77, 0xf8, 0x7d, 0xd8, + 0xe0, 0x19, 0xea, 0x30, 0xf0, 0xc6, 0xbe, 0xce, 0x7d, 0x6a, 0x9d, 0x77, 0x5a, 0x67, 0xec, 0xc7, + 0x8c, 0xdb, 0x63, 0xce, 0x75, 0x1b, 0xca, 0x2f, 0xbd, 0x33, 0xd1, 0xa1, 0x26, 0xd6, 0xc1, 0x4b, + 0xef, 0x2c, 0x6a, 0x8a, 0xb3, 0x84, 0x8d, 0x74, 0x96, 0xf0, 0x35, 0xbc, 0x33, 0xbb, 0xdd, 0xf1, + 0x6c, 0x41, 0xbd, 0x79, 0xb6, 0xb0, 0xed, 0xce, 0x8b, 0xc3, 0x5f, 0x40, 0xde, 0x72, 0x69, 0x7d, + 0x73, 0x21, 0xe7, 0x88, 0xd7, 0x31, 0x66, 0x83, 0xd1, 0x0e, 0x94, 0xd8, 0x9f, 0xb5, 0xad, 0x3a, + 0x12, 0xa1, 0xe7, 0xa5, 0x77, 0xd6, 0xb5, 0xd0, 0xf7, 0xa0, 0xc2, 0xfe, 0x3f, 0xf5, 0x0d, 0x93, + 0xd4, 0xb7, 0x78, 0xcb, 0x84, 0xc1, 0x0c, 0xe5, 0x7a, 0x16, 0x11, 0x2a, 0xda, 0x16, 0x86, 0x62, + 0x0c, 0xae, 0xa3, 0x5b, 0xb0, 0xca, 0x1b, 0x6d, 0xab, 0xbe, 0x23, 0x0e, 0x02, 0x8c, 0xec, 0x5a, + 0x8d, 0x8f, 0xa1, 0x1c, 0x39, 0xfa, 0x22, 0x21, 0xb0, 0xf1, 0x00, 0x6a, 0xe9, 0x65, 0xb2, 0x50, + 0x00, 0xfd, 0x97, 0x1c, 0x54, 0xe2, 0x05, 0x81, 0x5c, 0xd8, 0xe2, 0x06, 0x63, 0xd9, 0xa0, 0x3e, + 0x59, 0x5f, 0x22, 0x07, 0xfd, 0x2c, 0xa3, 0x0a, 0x5b, 0x11, 0x82, 0x3c, 0x0c, 0xcb, 0xc5, 0x86, + 0x62, 0xe4, 0xc9, 0x7c, 0x5f, 0xc1, 0xc6, 0xc8, 0x76, 0xc7, 0x57, 0x89, 0xb9, 0x44, 0xf2, 0xf8, + 0x87, 0x19, 0xe7, 0x3a, 0x64, 0xa3, 0x27, 0x73, 0xd4, 0x46, 0x29, 0x1a, 0x1d, 0x40, 0xd1, 0xf7, + 0x82, 0x30, 0xda, 0x0f, 0xb3, 0xee, 0x54, 0x27, 0x5e, 0x10, 0x1e, 0x19, 0xbe, 0xcf, 0xce, 0x47, + 0x02, 0x40, 0xfb, 0x36, 0x07, 0xef, 0xcc, 0xff, 0x63, 0xa8, 0x07, 0x79, 0xd3, 0x1f, 0x4b, 0x25, + 0x3d, 0x58, 0x54, 0x49, 0x6d, 0x7f, 0x3c, 0x91, 0x9f, 0x01, 0xa1, 0xa7, 0x50, 0x72, 0x88, 0xe3, + 0x05, 0xd7, 0x52, 0x17, 0x0f, 0x17, 0x85, 0x3c, 0xe2, 0xa3, 0x27, 0xa8, 0x12, 0x0e, 0x61, 0x28, + 0xcb, 0x85, 0x42, 0x65, 0x48, 0x5e, 0xf0, 0x06, 0x2b, 0x82, 0xc4, 0x31, 0x8e, 0xf6, 0x31, 0xec, + 0xcc, 0xfd, 0x2b, 0xe8, 0x77, 0x00, 0x4c, 0x7f, 0xac, 0xf3, 0x17, 0x06, 0xe1, 0x41, 0x79, 0x5c, + 0x31, 0xfd, 0x71, 0x9f, 0x33, 0xb4, 0xe7, 0x50, 0x7f, 0x9d, 0xbc, 0x6c, 0xfd, 0x08, 0x89, 0x75, + 0xe7, 0x8c, 0xeb, 0x20, 0x8f, 0xcb, 0x82, 0x71, 0x74, 0x86, 0x34, 0x58, 0x8f, 0x1a, 0x8d, 0x2b, + 0xd6, 0x21, 0xcf, 0x3b, 0x54, 0x65, 0x07, 0xe3, 0xea, 0xe8, 0x4c, 0xfb, 0x65, 0x0e, 0x36, 0xa6, + 0x44, 0x66, 0xa7, 0x44, 0x11, 0x5c, 0xa3, 0xf3, 0xb7, 0xa0, 0x58, 0xa4, 0x35, 0x6d, 0x2b, 0xba, + 0xb9, 0xe5, 0xdf, 0x7c, 0x8f, 0xf5, 0xe5, 0xad, 0x6a, 0xce, 0xf6, 0xd9, 0xf2, 0x71, 0xce, 0xec, + 0x90, 0xf2, 0x84, 0xa7, 0x88, 0x05, 0x81, 0x9e, 0x41, 0x2d, 0x20, 0x7c, 0x6f, 0xb7, 0x74, 0xe1, + 0x65, 0xc5, 0x85, 0xbc, 0x4c, 0x4a, 0xc8, 0x9c, 0x0d, 0xaf, 0x47, 0x48, 0x8c, 0xa2, 0xe8, 0x29, + 0xac, 0x5b, 0xd7, 0xae, 0xe1, 0xd8, 0xa6, 0x44, 0x2e, 0x2d, 0x8d, 0xbc, 0x26, 0x81, 0x38, 0xb0, + 0x76, 0x1f, 0xaa, 0x89, 0x46, 0xf6, 0xc7, 0x78, 0x66, 0x27, 0x75, 0x22, 0x88, 0x74, 0xb4, 0x28, + 0xca, 0x68, 0xa1, 0x9d, 0x41, 0x35, 0xb1, 0x2e, 0x16, 0x19, 0xca, 0xf4, 0x19, 0x7a, 0x5c, 0x9f, + 0x45, 0x9c, 0x0b, 0x3d, 0x16, 0x03, 0x59, 0x56, 0xa5, 0xdb, 0x3e, 0xd7, 0x68, 0x05, 0x97, 0x18, + 0xd9, 0xf5, 0xb5, 0x5f, 0xe7, 0xa0, 0x96, 0x5e, 0xd2, 0x91, 0x1f, 0xf9, 0x24, 0xb0, 0x3d, 0x2b, + 0xe1, 0x47, 0x27, 0x9c, 0xc1, 0x7c, 0x85, 0x35, 0x7f, 0x3d, 0xf6, 0x42, 0x23, 0xf2, 0x15, 0xd3, + 0x1f, 0xff, 0x11, 0xa3, 0xa7, 0x7c, 0x30, 0x3f, 0xe5, 0x83, 0xe8, 0x03, 0x40, 0xd2, 0x95, 0x46, + 0xb6, 0x63, 0x87, 0xfa, 0xd9, 0x75, 0x48, 0x84, 0x8d, 0xf3, 0x58, 0x15, 0x2d, 0x87, 0xac, 0xe1, + 0x0b, 0xc6, 0x67, 0x8e, 0xe7, 0x79, 0x8e, 0x4e, 0x4d, 0x2f, 0x20, 0xba, 0x61, 0xbd, 0xe4, 0x07, + 0xa4, 0x3c, 0xae, 0x7a, 0x9e, 0xd3, 0x67, 0xbc, 0x96, 0xf5, 0x92, 0x6d, 0xb2, 0xa6, 0x3f, 0xa6, + 0x24, 0xd4, 0xd9, 0x0f, 0xcf, 0x4b, 0x2a, 0x18, 0x04, 0xab, 0xed, 0x8f, 0x29, 0xfa, 0x3d, 0x58, + 0x8f, 0x3a, 0xf0, 0x7d, 0x56, 0x6e, 0xf0, 0x6b, 0xb2, 0x0b, 0xe7, 0x21, 0x0d, 0xd6, 0x4e, 0x48, + 0x60, 0x12, 0x37, 0x1c, 0xd8, 0xe6, 0x05, 0x4b, 0x25, 0x94, 0x5d, 0x05, 0xa7, 0x78, 0x5f, 0x16, + 0xca, 0xab, 0x6a, 0x19, 0x47, 0xb3, 0x39, 0xc4, 0xa1, 0xda, 0x37, 0x0a, 0x14, 0x79, 0x3a, 0xc2, + 0x94, 0xc2, 0xb7, 0x72, 0xbe, 0xd3, 0xcb, 0x34, 0x96, 0x31, 0xf8, 0x3e, 0xff, 0x2e, 0x54, 0xb8, + 0xf2, 0x13, 0xa7, 0x07, 0x9e, 0xe3, 0xf2, 0xc6, 0x06, 0x94, 0x03, 0x62, 0x58, 0x9e, 0x3b, 0x8a, + 0x2e, 0x9e, 0x62, 0x1a, 0xfd, 0x01, 0xa8, 0x7e, 0xe0, 0xf9, 0xc6, 0x70, 0x72, 0x56, 0x95, 0xe6, + 0xdb, 0x48, 0xf0, 0x59, 0xfa, 0xad, 0x7d, 0x0d, 0x25, 0xb1, 0x27, 0xdd, 0x40, 0x94, 0x0f, 0x01, + 0x09, 0x1d, 0x31, 0xdb, 0x3b, 0x36, 0xa5, 0x32, 0x39, 0xe6, 0x0f, 0xa3, 0xa2, 0xe5, 0x64, 0xd2, + 0xa0, 0xfd, 0xb7, 0x22, 0xd2, 0x64, 0xf1, 0x64, 0xc5, 0xf2, 0x69, 0xb6, 0x20, 0xd8, 0x21, 0x5e, + 0xdc, 0x8d, 0x45, 0x24, 0xea, 0x42, 0x49, 0x66, 0xc3, 0xb9, 0x65, 0x5f, 0xfc, 0x24, 0x40, 0x74, + 0x53, 0x4e, 0xe4, 0x3d, 0xc1, 0xa2, 0x37, 0xe5, 0x44, 0xdc, 0x94, 0x13, 0x76, 0xc8, 0x95, 0x79, + 0xba, 0x80, 0x2b, 0xf0, 0x34, 0xbd, 0x6a, 0xc5, 0xcf, 0x11, 0x44, 0xfb, 0x5f, 0x25, 0x0e, 0x69, + 0xd1, 0xb3, 0x01, 0xfa, 0x0a, 0xca, 0x2c, 0x3a, 0xe8, 0x8e, 0xe1, 0xcb, 0x47, 0xf0, 0xf6, 0x72, + 0x2f, 0x12, 0xd1, 0x86, 0x27, 0xb2, 0xec, 0x55, 0x5f, 0x50, 0x2c, 0x34, 0xb2, 0x13, 0x4e, 0x14, + 0x1a, 0xd9, 0x37, 0x7a, 0x1f, 0x6a, 0xc6, 0x38, 0xf4, 0x74, 0xc3, 0xba, 0x24, 0x41, 0x68, 0x53, + 0x22, 0xdd, 0x64, 0x9d, 0x71, 0x5b, 0x11, 0xb3, 0xf1, 0x29, 0xac, 0x25, 0x31, 0xdf, 0x94, 0x92, + 0x14, 0x93, 0x29, 0xc9, 0x9f, 0x01, 0x4c, 0xae, 0xe0, 0x98, 0x8f, 0x90, 0x2b, 0x3b, 0xd4, 0xcd, + 0xe8, 0x48, 0x5d, 0xc4, 0x65, 0xc6, 0x68, 0xb3, 0x63, 0x5e, 0xfa, 0x7d, 0xa0, 0x18, 0xbd, 0x0f, + 0xb0, 0x85, 0xcf, 0xd6, 0xea, 0x85, 0x3d, 0x1a, 0xc5, 0xd7, 0x82, 0x15, 0xcf, 0x73, 0x9e, 0x70, + 0x86, 0xf6, 0x9b, 0x9c, 0xf0, 0x15, 0xf1, 0xd2, 0x93, 0xe9, 0x48, 0xf5, 0xb6, 0x4c, 0x7d, 0x1f, + 0x80, 0x86, 0x46, 0xc0, 0xf2, 0x2b, 0x23, 0xba, 0x98, 0x6c, 0xcc, 0x3c, 0x30, 0x0c, 0xa2, 0xd2, + 0x13, 0x5c, 0x91, 0xbd, 0x5b, 0x21, 0xfa, 0x0c, 0xd6, 0x4c, 0xcf, 0xf1, 0x47, 0x44, 0x0e, 0x2e, + 0xbe, 0x71, 0x70, 0x35, 0xee, 0xdf, 0x0a, 0x13, 0xd7, 0xa1, 0xa5, 0x9b, 0x5e, 0x87, 0xfe, 0x5a, + 0x11, 0x0f, 0x56, 0xc9, 0xf7, 0x32, 0x34, 0x9c, 0x53, 0x94, 0xf1, 0x78, 0xc9, 0xc7, 0xb7, 0xef, + 0xaa, 0xc8, 0x68, 0x7c, 0x96, 0xa5, 0x04, 0xe2, 0xf5, 0x19, 0xef, 0xbf, 0xe7, 0xa1, 0x12, 0xbf, + 0x55, 0xcd, 0xd8, 0xfe, 0x13, 0xa8, 0xc4, 0x75, 0x3f, 0x32, 0x40, 0x7c, 0xa7, 0x79, 0xe2, 0xce, + 0xe8, 0x05, 0x20, 0x63, 0x38, 0x8c, 0x33, 0x59, 0x7d, 0x4c, 0x8d, 0x61, 0xf4, 0x52, 0xf8, 0xc9, + 0x02, 0x7a, 0x88, 0xb6, 0xbe, 0x53, 0x36, 0x1e, 0xab, 0xc6, 0x70, 0x98, 0xe2, 0xa0, 0x3f, 0x87, + 0x9d, 0xf4, 0x1c, 0xfa, 0xd9, 0xb5, 0xee, 0xdb, 0x96, 0x3c, 0xba, 0x1f, 0x2c, 0xfa, 0x5c, 0xd7, + 0x4c, 0xc1, 0x7f, 0x71, 0x7d, 0x62, 0x5b, 0x42, 0xe7, 0x28, 0x98, 0x69, 0x68, 0xfc, 0x25, 0xdc, + 0x7a, 0x4d, 0xf7, 0x39, 0x36, 0xe8, 0xa5, 0xcb, 0x50, 0x96, 0x57, 0x42, 0xc2, 0x7a, 0xbf, 0x52, + 0xc4, 0xab, 0x62, 0x5a, 0x27, 0xad, 0x64, 0x0a, 0x7e, 0x37, 0xe3, 0x3c, 0xed, 0x93, 0x53, 0x01, + 0xcf, 0xb3, 0xee, 0x2f, 0xa7, 0xb2, 0xee, 0xac, 0xb9, 0x96, 0x48, 0x5e, 0x05, 0x90, 0x44, 0xd0, + 0xfe, 0x35, 0x0f, 0xe5, 0x08, 0x9d, 0x1f, 0xbc, 0xaf, 0x69, 0x48, 0x1c, 0x3d, 0xbe, 0x15, 0x54, + 0x30, 0x08, 0x16, 0xbf, 0xab, 0x7a, 0x17, 0x2a, 0xec, 0x7c, 0x2f, 0x9a, 0x73, 0xbc, 0xb9, 0xcc, + 0x18, 0xbc, 0xf1, 0x3d, 0xa8, 0x86, 0x5e, 0x68, 0x8c, 0xf4, 0x90, 0xa7, 0x02, 0x79, 0x31, 0x9a, + 0xb3, 0x78, 0x22, 0x80, 0x7e, 0x08, 0x9b, 0xe1, 0x79, 0xe0, 0x85, 0xe1, 0x88, 0xa5, 0xa1, 0x3c, + 0x29, 0x12, 0x39, 0x4c, 0x01, 0xab, 0x71, 0x83, 0x48, 0x96, 0x28, 0x8b, 0xde, 0x93, 0xce, 0xcc, + 0x75, 0x79, 0x10, 0x29, 0xe0, 0xf5, 0x98, 0xcb, 0x5c, 0x9b, 0x6d, 0x9e, 0xbe, 0x48, 0x36, 0x78, + 0xac, 0x50, 0x70, 0x44, 0x22, 0x1d, 0x36, 0x1c, 0x62, 0xd0, 0x71, 0x40, 0x2c, 0xfd, 0x85, 0x4d, + 0x46, 0x96, 0xb8, 0x2f, 0xa9, 0x65, 0x3e, 0x49, 0x44, 0x6a, 0x69, 0x3e, 0xe2, 0xa3, 0x71, 0x2d, + 0x82, 0x13, 0x34, 0xcb, 0x1c, 0xc4, 0x17, 0xda, 0x80, 0x6a, 0xff, 0x59, 0x7f, 0xd0, 0x39, 0xd2, + 0x8f, 0x8e, 0xf7, 0x3b, 0xb2, 0xd2, 0xa8, 0xdf, 0xc1, 0x82, 0x54, 0x58, 0xfb, 0xe0, 0x78, 0xd0, + 0x3a, 0xd4, 0x07, 0xdd, 0xf6, 0x93, 0xbe, 0x9a, 0x43, 0x3b, 0xb0, 0x39, 0x38, 0xc0, 0xc7, 0x83, + 0xc1, 0x61, 0x67, 0x5f, 0x3f, 0xe9, 0xe0, 0xee, 0xf1, 0x7e, 0x5f, 0xcd, 0x23, 0x04, 0xb5, 0x09, + 0x7b, 0xd0, 0x3d, 0xea, 0xa8, 0x05, 0x54, 0x85, 0xd5, 0x93, 0x0e, 0x6e, 0x77, 0x7a, 0x03, 0xb5, + 0xa8, 0xfd, 0x32, 0x0f, 0xd5, 0x84, 0x15, 0x99, 0x23, 0x07, 0x54, 0x1c, 0x59, 0x0a, 0x98, 0x7d, + 0xf2, 0x97, 0x51, 0xc3, 0x3c, 0x17, 0xd6, 0x29, 0x60, 0x41, 0xf0, 0x63, 0x8a, 0x71, 0x95, 0x58, + 0xe7, 0x05, 0x5c, 0x76, 0x8c, 0x2b, 0x01, 0xf2, 0x7d, 0x58, 0xbb, 0x20, 0x81, 0x4b, 0x46, 0xb2, + 0x5d, 0x58, 0xa4, 0x2a, 0x78, 0xa2, 0xcb, 0x2e, 0xa8, 0xb2, 0xcb, 0x04, 0x46, 0x98, 0xa3, 0x26, + 0xf8, 0x47, 0x11, 0xd8, 0x36, 0x14, 0x45, 0xf3, 0xaa, 0x98, 0x9f, 0x13, 0x6c, 0x9b, 0xa2, 0xaf, + 0x0c, 0x9f, 0xa7, 0x87, 0x05, 0xcc, 0xbf, 0xd1, 0xd9, 0xac, 0x7d, 0x4a, 0xdc, 0x3e, 0xf7, 0x17, + 0x77, 0xe7, 0xd7, 0x99, 0xe8, 0x3c, 0x36, 0xd1, 0x2a, 0xe4, 0x71, 0x54, 0x9e, 0xd3, 0x6e, 0xb5, + 0x0f, 0x98, 0x59, 0xd6, 0xa1, 0x72, 0xd4, 0xfa, 0x99, 0x7e, 0xda, 0xe7, 0x97, 0xed, 0x48, 0x85, + 0xb5, 0x27, 0x1d, 0xdc, 0xeb, 0x1c, 0x4a, 0x4e, 0x1e, 0x6d, 0x83, 0x2a, 0x39, 0x93, 0x7e, 0x05, + 0x86, 0x20, 0x3e, 0x8b, 0xa8, 0x0c, 0x85, 0xfe, 0xd3, 0xd6, 0x89, 0x5a, 0xd2, 0xfe, 0x27, 0x07, + 0x1b, 0x62, 0x5b, 0x88, 0x0b, 0x09, 0x5e, 0xff, 0x90, 0x9a, 0xbc, 0x7c, 0xca, 0xa5, 0x2f, 0x9f, + 0xa2, 0x24, 0x94, 0xef, 0xea, 0xf9, 0x49, 0x12, 0xca, 0x2f, 0x64, 0x52, 0x11, 0xbf, 0xb0, 0x48, + 0xc4, 0xaf, 0xc3, 0xaa, 0x43, 0x68, 0x6c, 0xb7, 0x0a, 0x8e, 0x48, 0x64, 0x43, 0xd5, 0x70, 0x5d, + 0x2f, 0x34, 0xc4, 0x8d, 0x6e, 0x69, 0xa1, 0xcd, 0x70, 0xea, 0x1f, 0x37, 0x5b, 0x13, 0x24, 0x11, + 0x98, 0x93, 0xd8, 0x8d, 0x9f, 0x82, 0x3a, 0xdd, 0x61, 0x91, 0xed, 0xf0, 0x07, 0x3f, 0x9a, 0xec, + 0x86, 0x84, 0xad, 0x0b, 0xf9, 0x14, 0xa2, 0xae, 0x30, 0x02, 0x9f, 0xf6, 0x7a, 0xdd, 0xde, 0x63, + 0x55, 0x41, 0x00, 0xa5, 0xce, 0xcf, 0xba, 0x83, 0xce, 0xbe, 0x9a, 0xdb, 0xfb, 0xd5, 0x26, 0x94, + 0x84, 0x90, 0xe8, 0x5b, 0x99, 0x09, 0x24, 0x8b, 0x54, 0xd1, 0x4f, 0x17, 0xce, 0xa8, 0x53, 0x85, + 0xaf, 0x8d, 0x87, 0x4b, 0x8f, 0x97, 0x8f, 0x82, 0x2b, 0xe8, 0x6f, 0x15, 0x58, 0x4b, 0x3d, 0x08, + 0x66, 0xbd, 0xd1, 0x9e, 0x53, 0x13, 0xdb, 0xf8, 0xc9, 0x52, 0x63, 0x63, 0x59, 0xbe, 0x51, 0xa0, + 0x9a, 0xa8, 0x06, 0x45, 0xf7, 0x97, 0xa9, 0x20, 0x15, 0x92, 0x7c, 0xba, 0x7c, 0xf1, 0xa9, 0xb6, + 0xf2, 0x91, 0x82, 0xfe, 0x46, 0x81, 0x6a, 0xa2, 0x2e, 0x32, 0xb3, 0x28, 0xb3, 0x55, 0x9c, 0x99, + 0x45, 0x99, 0x57, 0x86, 0xb9, 0x82, 0xfe, 0x4a, 0x81, 0x4a, 0x5c, 0xe3, 0x88, 0xee, 0x2d, 0x5e, + 0x15, 0x29, 0x84, 0xf8, 0x64, 0xd9, 0x72, 0x4a, 0x6d, 0x05, 0xfd, 0x05, 0x94, 0xa3, 0x82, 0x40, + 0x94, 0x75, 0xf7, 0x9a, 0xaa, 0x36, 0x6c, 0xdc, 0x5b, 0x78, 0x5c, 0x72, 0xfa, 0xa8, 0x4a, 0x2f, + 0xf3, 0xf4, 0x53, 0xf5, 0x84, 0x8d, 0x7b, 0x0b, 0x8f, 0x8b, 0xa7, 0x67, 0x9e, 0x90, 0x28, 0xe6, + 0xcb, 0xec, 0x09, 0xb3, 0x55, 0x84, 0x99, 0x3d, 0x61, 0x5e, 0xed, 0xa0, 0x10, 0x24, 0x51, 0x0e, + 0x98, 0x59, 0x90, 0xd9, 0x92, 0xc3, 0xcc, 0x82, 0xcc, 0xa9, 0x3e, 0xd4, 0x56, 0xd0, 0x2f, 0x94, + 0xe4, 0xb9, 0xe0, 0xde, 0xc2, 0x55, 0x6f, 0x0b, 0xba, 0xe4, 0x4c, 0xdd, 0x1d, 0x5f, 0xa0, 0xbf, + 0x90, 0xb7, 0x18, 0xa2, 0x68, 0x0e, 0x2d, 0x02, 0x96, 0xaa, 0xb3, 0x6b, 0x7c, 0xbc, 0xdc, 0x66, + 0xc3, 0x85, 0xf8, 0x6b, 0x05, 0x60, 0x52, 0x5e, 0x97, 0x59, 0x88, 0x99, 0xba, 0xbe, 0xc6, 0xfd, + 0x25, 0x46, 0x26, 0x17, 0x48, 0x54, 0xfe, 0x93, 0x79, 0x81, 0x4c, 0x95, 0xff, 0x65, 0x5e, 0x20, + 0xd3, 0xa5, 0x7b, 0xda, 0x0a, 0xfa, 0x67, 0x05, 0x36, 0x67, 0xca, 0x8f, 0xd0, 0xc3, 0x1b, 0x56, + 0xa0, 0x35, 0x3e, 0x5f, 0x1e, 0x20, 0x12, 0x6d, 0x57, 0xf9, 0x48, 0x41, 0x7f, 0xa7, 0xc0, 0x7a, + 0xba, 0x2c, 0x23, 0xf3, 0x2e, 0x35, 0xa7, 0x90, 0xa9, 0xf1, 0x60, 0xb9, 0xc1, 0xb1, 0xb6, 0xfe, + 0x41, 0x81, 0x5a, 0xba, 0x42, 0x07, 0x3d, 0x58, 0x2c, 0x2c, 0x4c, 0x09, 0xf4, 0xd9, 0x92, 0xa3, + 0x23, 0x89, 0xbe, 0x58, 0xfd, 0xe3, 0xa2, 0xc8, 0xde, 0x4a, 0xfc, 0xe7, 0xc7, 0xbf, 0x0d, 0x00, + 0x00, 0xff, 0xff, 0x65, 0x60, 0xd8, 0xbc, 0x4b, 0x34, 0x00, 0x00, } // Reference imports to suppress errors if they are not otherwise used. diff --git a/vendor/github.com/hashicorp/nomad/plugins/drivers/proto/driver.proto b/vendor/github.com/hashicorp/nomad/plugins/drivers/proto/driver.proto index 8511e61c..a7405b6b 100644 --- a/vendor/github.com/hashicorp/nomad/plugins/drivers/proto/driver.proto +++ b/vendor/github.com/hashicorp/nomad/plugins/drivers/proto/driver.proto @@ -476,6 +476,18 @@ message TaskConfig { // DNSConfig is the configuration for task DNS resolvers and other options DNSConfig dns = 17; + + // JobId is the ID of the job of which this task is part of + string job_id = 18; + + // Namespace is the namespace of the job of which this task is part of + string namespace = 19; + + // NodeName is the name of the node where the associated allocation is running + string node_name = 20; + + // NodeId is the ID of the node where the associated allocation is running + string node_id = 21; } message Resources { diff --git a/vendor/github.com/hashicorp/nomad/plugins/drivers/testutils/dns_testing.go b/vendor/github.com/hashicorp/nomad/plugins/drivers/testutils/dns_testing.go index ccd0f0e2..fef3a2d9 100644 --- a/vendor/github.com/hashicorp/nomad/plugins/drivers/testutils/dns_testing.go +++ b/vendor/github.com/hashicorp/nomad/plugins/drivers/testutils/dns_testing.go @@ -4,17 +4,16 @@ import ( "strings" "testing" - dresolvconf "github.com/docker/libnetwork/resolvconf" - dtypes "github.com/docker/libnetwork/types" + "github.com/docker/docker/libnetwork/resolvconf" "github.com/hashicorp/nomad/plugins/drivers" - "github.com/stretchr/testify/require" + "github.com/shoenig/test/must" ) // TestTaskDNSConfig asserts that a task is running with the given DNSConfig func TestTaskDNSConfig(t *testing.T, driver *DriverHarness, taskID string, dns *drivers.DNSConfig) { t.Run("dns_config", func(t *testing.T) { caps, err := driver.Capabilities() - require.NoError(t, err) + must.NoError(t, err) // FS isolation is used here as a proxy for network isolation. // This is true for the current built-in drivers but it is not necessarily so. @@ -28,31 +27,31 @@ func TestTaskDNSConfig(t *testing.T, driver *DriverHarness, taskID string, dns * // write to a file and check it presence in host r := execTask(t, driver, taskID, `cat /etc/resolv.conf`, false, "") - require.Zero(t, r.exitCode) + must.Zero(t, r.exitCode) resolvConf := []byte(strings.TrimSpace(r.stdout)) if dns != nil { if len(dns.Servers) > 0 { - require.ElementsMatch(t, dns.Servers, dresolvconf.GetNameservers(resolvConf, dtypes.IP)) + must.SliceContainsAll(t, dns.Servers, resolvconf.GetNameservers(resolvConf, resolvconf.IP)) } if len(dns.Searches) > 0 { - require.ElementsMatch(t, dns.Searches, dresolvconf.GetSearchDomains(resolvConf)) + must.SliceContainsAll(t, dns.Searches, resolvconf.GetSearchDomains(resolvConf)) } if len(dns.Options) > 0 { - require.ElementsMatch(t, dns.Options, dresolvconf.GetOptions(resolvConf)) + must.SliceContainsAll(t, dns.Options, resolvconf.GetOptions(resolvConf)) } } else { systemPath := "/etc/resolv.conf" if !usesHostNetwork { - systemPath = dresolvconf.Path() + systemPath = resolvconf.Path() } - system, err := dresolvconf.GetSpecific(systemPath) - require.NoError(t, err) - require.ElementsMatch(t, dresolvconf.GetNameservers(system.Content, dtypes.IP), dresolvconf.GetNameservers(resolvConf, dtypes.IP)) - require.ElementsMatch(t, dresolvconf.GetSearchDomains(system.Content), dresolvconf.GetSearchDomains(resolvConf)) - require.ElementsMatch(t, dresolvconf.GetOptions(system.Content), dresolvconf.GetOptions(resolvConf)) + system, specificErr := resolvconf.GetSpecific(systemPath) + must.NoError(t, specificErr) + must.SliceContainsAll(t, resolvconf.GetNameservers(system.Content, resolvconf.IP), resolvconf.GetNameservers(resolvConf, resolvconf.IP)) + must.SliceContainsAll(t, resolvconf.GetSearchDomains(system.Content), resolvconf.GetSearchDomains(resolvConf)) + must.SliceContainsAll(t, resolvconf.GetOptions(system.Content), resolvconf.GetOptions(resolvConf)) } }) } diff --git a/vendor/github.com/hashicorp/nomad/plugins/drivers/testutils/testing.go b/vendor/github.com/hashicorp/nomad/plugins/drivers/testutils/testing.go index 89ff1496..2bb87a64 100644 --- a/vendor/github.com/hashicorp/nomad/plugins/drivers/testutils/testing.go +++ b/vendor/github.com/hashicorp/nomad/plugins/drivers/testutils/testing.go @@ -6,14 +6,12 @@ import ( "os" "path/filepath" "runtime" - "strings" "time" hclog "github.com/hashicorp/go-hclog" plugin "github.com/hashicorp/go-plugin" "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/client/allocdir" - "github.com/hashicorp/nomad/client/config" "github.com/hashicorp/nomad/client/lib/cgutil" "github.com/hashicorp/nomad/client/logmon" "github.com/hashicorp/nomad/client/taskenv" @@ -145,7 +143,7 @@ func (h *DriverHarness) MkAllocDir(t *drivers.TaskConfig, enableLogs bool) func( } taskBuilder := taskenv.NewBuilder(mock.Node(), alloc, task, "global") - SetEnvvars(taskBuilder, fsi, taskDir, config.DefaultConfig()) + SetEnvvars(taskBuilder, fsi, taskDir) taskEnv := taskBuilder.Build() if t.Env == nil { @@ -291,7 +289,7 @@ func (d *MockDriver) ExecTaskStreaming(ctx context.Context, taskID string, execO } // SetEnvvars sets path and host env vars depending on the FS isolation used. -func SetEnvvars(envBuilder *taskenv.Builder, fsi drivers.FSIsolation, taskDir *allocdir.TaskDir, conf *config.Config) { +func SetEnvvars(envBuilder *taskenv.Builder, fsi drivers.FSIsolation, taskDir *allocdir.TaskDir) { envBuilder.SetClientTaskRoot(taskDir.Dir) envBuilder.SetClientSharedAllocDir(taskDir.SharedAllocDir) @@ -314,11 +312,6 @@ func SetEnvvars(envBuilder *taskenv.Builder, fsi drivers.FSIsolation, taskDir *a // Set the host environment variables for non-image based drivers if fsi != drivers.FSIsolationImage { - // COMPAT(1.0) using inclusive language, blacklist is kept for backward compatibility. - filter := strings.Split(conf.ReadAlternativeDefault( - []string{"env.denylist", "env.blacklist"}, - config.DefaultEnvDenylist, - ), ",") - envBuilder.SetHostEnvvars(filter) + envBuilder.SetHostEnvvars([]string{"env.denylist"}) } } diff --git a/vendor/github.com/hashicorp/nomad/plugins/drivers/utils.go b/vendor/github.com/hashicorp/nomad/plugins/drivers/utils.go index b516e337..91ccc387 100644 --- a/vendor/github.com/hashicorp/nomad/plugins/drivers/utils.go +++ b/vendor/github.com/hashicorp/nomad/plugins/drivers/utils.go @@ -51,16 +51,20 @@ func taskConfigFromProto(pb *proto.TaskConfig) *TaskConfig { return &TaskConfig{ ID: pb.Id, JobName: pb.JobName, + JobID: pb.JobId, TaskGroupName: pb.TaskGroupName, Name: pb.Name, + Namespace: pb.Namespace, + NodeName: pb.NodeName, + NodeID: pb.NodeId, Env: pb.Env, DeviceEnv: pb.DeviceEnv, - rawDriverConfig: pb.MsgpackDriverConfig, Resources: ResourcesFromProto(pb.Resources), Devices: DevicesFromProto(pb.Devices), Mounts: MountsFromProto(pb.Mounts), User: pb.User, AllocDir: pb.AllocDir, + rawDriverConfig: pb.MsgpackDriverConfig, StdoutPath: pb.StdoutPath, StderrPath: pb.StderrPath, AllocID: pb.AllocId, @@ -76,8 +80,12 @@ func taskConfigToProto(cfg *TaskConfig) *proto.TaskConfig { pb := &proto.TaskConfig{ Id: cfg.ID, JobName: cfg.JobName, + JobId: cfg.JobID, TaskGroupName: cfg.TaskGroupName, Name: cfg.Name, + Namespace: cfg.Namespace, + NodeName: cfg.NodeName, + NodeId: cfg.NodeID, Env: cfg.Env, DeviceEnv: cfg.DeviceEnv, Resources: ResourcesToProto(cfg.Resources), diff --git a/vendor/github.com/hashicorp/nomad/testutil/container_images.go b/vendor/github.com/hashicorp/nomad/testutil/container_images.go new file mode 100644 index 00000000..e247d5e9 --- /dev/null +++ b/vendor/github.com/hashicorp/nomad/testutil/container_images.go @@ -0,0 +1,43 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package testutil + +import ( + "os" + "runtime" +) + +const ( + // BusyboxImageEnvName is always used, if set, by TestBusyboxImage() + BusyboxImageEnvName = "BUSYBOX_IMAGE" +) + +func TestDockerImage(name, tag string) string { + img := name + ":" + tag + if IsCI() { + // use our mirror to avoid rate-limiting in CI + img = "docker.mirror.hashicorp.services/" + img + } else { + // explicitly include docker.io for podman + img = "docker.io/" + img + } + return img +} + +func TestBusyboxImage() string { + // if env is set, use it verbatim + if img, ok := os.LookupEnv(BusyboxImageEnvName); ok { + return img + } + + // otherwise, figure it out + name := "busybox" + tag := "1" + if runtime.GOOS == "windows" { + // this image is maintained in https://github.com/hashicorp/busybox-windows + name = "hashicorpdev/busybox-windows" + tag = "server2016-0.1" + } + return TestDockerImage(name, tag) +} diff --git a/vendor/github.com/hashicorp/nomad/testutil/wait.go b/vendor/github.com/hashicorp/nomad/testutil/wait.go index 006d2e21..4c7b2eae 100644 --- a/vendor/github.com/hashicorp/nomad/testutil/wait.go +++ b/vendor/github.com/hashicorp/nomad/testutil/wait.go @@ -7,10 +7,12 @@ import ( "time" "github.com/google/go-cmp/cmp" - "github.com/hashicorp/nomad/nomad/structs" "github.com/kr/pretty" "github.com/shoenig/test/must" + "github.com/shoenig/test/wait" "github.com/stretchr/testify/require" + + "github.com/hashicorp/nomad/nomad/structs" ) type testFn func() (bool, error) @@ -135,6 +137,25 @@ func WaitForLeader(t testing.TB, rpc rpcFn) { }) } +// WaitForLeaders blocks until each serverRPC knows the leader. +func WaitForLeaders(t testing.TB, serverRPCs ...rpcFn) { + t.Helper() + + for i := 0; i < len(serverRPCs); i++ { + ok := func() (bool, error) { + args := &structs.GenericRequest{} + var leader string + err := serverRPCs[i]("Status.Leader", args, &leader) + return leader != "", err + } + must.Wait(t, wait.InitialSuccess( + wait.TestFunc(ok), + wait.Timeout(10*time.Second), + wait.Gap(1*time.Second), + )) + } +} + // WaitForClient blocks until the client can be found func WaitForClient(t testing.TB, rpc rpcFn, nodeID string, region string) { t.Helper() @@ -208,6 +229,7 @@ func WaitForVotingMembers(t testing.TB, rpc rpcFn, nPeers int) { // RegisterJobWithToken registers a job and uses the job's Region and Namespace. func RegisterJobWithToken(t testing.TB, rpc rpcFn, job *structs.Job, token string) { + t.Helper() WaitForResult(func() (bool, error) { args := &structs.JobRegisterRequest{} args.Job = job diff --git a/vendor/github.com/hashicorp/nomad/version/version.go b/vendor/github.com/hashicorp/nomad/version/version.go deleted file mode 100644 index ebe59466..00000000 --- a/vendor/github.com/hashicorp/nomad/version/version.go +++ /dev/null @@ -1,92 +0,0 @@ -package version - -import ( - "bytes" - "fmt" -) - -var ( - // The git commit that was compiled. This will be filled in by the compiler. - GitCommit string - GitDescribe string - - // The main version number that is being run at the moment. - Version = "1.4.6" - - // A pre-release marker for the version. If this is "" (empty string) - // then it means that it is a final release. Otherwise, this is a pre-release - // such as "dev" (in development), "beta", "rc1", etc. - VersionPrerelease = "" - - // VersionMetadata is metadata further describing the build type. - VersionMetadata = "" -) - -// VersionInfo -type VersionInfo struct { - Revision string - Version string - VersionPrerelease string - VersionMetadata string -} - -func (v *VersionInfo) Copy() *VersionInfo { - if v == nil { - return nil - } - - nv := *v - return &nv -} - -func GetVersion() *VersionInfo { - ver := Version - rel := VersionPrerelease - md := VersionMetadata - if GitDescribe != "" { - ver = GitDescribe - } - if GitDescribe == "" && rel == "" && VersionPrerelease != "" { - rel = "dev" - } - - return &VersionInfo{ - Revision: GitCommit, - Version: ver, - VersionPrerelease: rel, - VersionMetadata: md, - } -} - -func (c *VersionInfo) VersionNumber() string { - version := c.Version - - if c.VersionPrerelease != "" { - version = fmt.Sprintf("%s-%s", version, c.VersionPrerelease) - } - - if c.VersionMetadata != "" { - version = fmt.Sprintf("%s+%s", version, c.VersionMetadata) - } - - return version -} - -func (c *VersionInfo) FullVersionNumber(rev bool) string { - var versionString bytes.Buffer - - fmt.Fprintf(&versionString, "Nomad v%s", c.Version) - if c.VersionPrerelease != "" { - fmt.Fprintf(&versionString, "-%s", c.VersionPrerelease) - } - - if c.VersionMetadata != "" { - fmt.Fprintf(&versionString, "+%s", c.VersionMetadata) - } - - if rev && c.Revision != "" { - fmt.Fprintf(&versionString, " (%s)", c.Revision) - } - - return versionString.String() -} diff --git a/vendor/github.com/hashicorp/vault/api/auth.go b/vendor/github.com/hashicorp/vault/api/auth.go index fa92de4b..c1ef7a77 100644 --- a/vendor/github.com/hashicorp/vault/api/auth.go +++ b/vendor/github.com/hashicorp/vault/api/auth.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package api import ( @@ -63,7 +66,7 @@ func (a *Auth) MFAValidate(ctx context.Context, mfaSecret *Secret, payload map[s return nil, fmt.Errorf("secret does not contain MFARequirements") } - s, err := a.c.Sys().MFAValidateWithContext(ctx, mfaSecret.Auth.MFARequirement.GetMFARequestID(), payload) + s, err := a.c.Sys().MFAValidateWithContext(ctx, mfaSecret.Auth.MFARequirement.MFARequestID, payload) if err != nil { return nil, err } diff --git a/vendor/github.com/hashicorp/vault/api/auth/kubernetes/LICENSE b/vendor/github.com/hashicorp/vault/api/auth/kubernetes/LICENSE deleted file mode 100644 index e87a115e..00000000 --- a/vendor/github.com/hashicorp/vault/api/auth/kubernetes/LICENSE +++ /dev/null @@ -1,363 +0,0 @@ -Mozilla Public License, version 2.0 - -1. Definitions - -1.1. "Contributor" - - means each individual or legal entity that creates, contributes to the - creation of, or owns Covered Software. - -1.2. "Contributor Version" - - means the combination of the Contributions of others (if any) used by a - Contributor and that particular Contributor's Contribution. - -1.3. "Contribution" - - means Covered Software of a particular Contributor. - -1.4. "Covered Software" - - means Source Code Form to which the initial Contributor has attached the - notice in Exhibit A, the Executable Form of such Source Code Form, and - Modifications of such Source Code Form, in each case including portions - thereof. - -1.5. "Incompatible With Secondary Licenses" - means - - a. that the initial Contributor has attached the notice described in - Exhibit B to the Covered Software; or - - b. that the Covered Software was made available under the terms of - version 1.1 or earlier of the License, but not also under the terms of - a Secondary License. - -1.6. "Executable Form" - - means any form of the work other than Source Code Form. - -1.7. "Larger Work" - - means a work that combines Covered Software with other material, in a - separate file or files, that is not Covered Software. - -1.8. "License" - - means this document. - -1.9. "Licensable" - - means having the right to grant, to the maximum extent possible, whether - at the time of the initial grant or subsequently, any and all of the - rights conveyed by this License. - -1.10. "Modifications" - - means any of the following: - - a. any file in Source Code Form that results from an addition to, - deletion from, or modification of the contents of Covered Software; or - - b. any new file in Source Code Form that contains any Covered Software. - -1.11. "Patent Claims" of a Contributor - - means any patent claim(s), including without limitation, method, - process, and apparatus claims, in any patent Licensable by such - Contributor that would be infringed, but for the grant of the License, - by the making, using, selling, offering for sale, having made, import, - or transfer of either its Contributions or its Contributor Version. - -1.12. "Secondary License" - - means either the GNU General Public License, Version 2.0, the GNU Lesser - General Public License, Version 2.1, the GNU Affero General Public - License, Version 3.0, or any later versions of those licenses. - -1.13. "Source Code Form" - - means the form of the work preferred for making modifications. - -1.14. "You" (or "Your") - - means an individual or a legal entity exercising rights under this - License. For legal entities, "You" includes any entity that controls, is - controlled by, or is under common control with You. For purposes of this - definition, "control" means (a) the power, direct or indirect, to cause - the direction or management of such entity, whether by contract or - otherwise, or (b) ownership of more than fifty percent (50%) of the - outstanding shares or beneficial ownership of such entity. - - -2. License Grants and Conditions - -2.1. Grants - - Each Contributor hereby grants You a world-wide, royalty-free, - non-exclusive license: - - a. under intellectual property rights (other than patent or trademark) - Licensable by such Contributor to use, reproduce, make available, - modify, display, perform, distribute, and otherwise exploit its - Contributions, either on an unmodified basis, with Modifications, or - as part of a Larger Work; and - - b. under Patent Claims of such Contributor to make, use, sell, offer for - sale, have made, import, and otherwise transfer either its - Contributions or its Contributor Version. - -2.2. Effective Date - - The licenses granted in Section 2.1 with respect to any Contribution - become effective for each Contribution on the date the Contributor first - distributes such Contribution. - -2.3. Limitations on Grant Scope - - The licenses granted in this Section 2 are the only rights granted under - this License. No additional rights or licenses will be implied from the - distribution or licensing of Covered Software under this License. - Notwithstanding Section 2.1(b) above, no patent license is granted by a - Contributor: - - a. for any code that a Contributor has removed from Covered Software; or - - b. for infringements caused by: (i) Your and any other third party's - modifications of Covered Software, or (ii) the combination of its - Contributions with other software (except as part of its Contributor - Version); or - - c. under Patent Claims infringed by Covered Software in the absence of - its Contributions. - - This License does not grant any rights in the trademarks, service marks, - or logos of any Contributor (except as may be necessary to comply with - the notice requirements in Section 3.4). - -2.4. Subsequent Licenses - - No Contributor makes additional grants as a result of Your choice to - distribute the Covered Software under a subsequent version of this - License (see Section 10.2) or under the terms of a Secondary License (if - permitted under the terms of Section 3.3). - -2.5. Representation - - Each Contributor represents that the Contributor believes its - Contributions are its original creation(s) or it has sufficient rights to - grant the rights to its Contributions conveyed by this License. - -2.6. Fair Use - - This License is not intended to limit any rights You have under - applicable copyright doctrines of fair use, fair dealing, or other - equivalents. - -2.7. Conditions - - Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in - Section 2.1. - - -3. Responsibilities - -3.1. Distribution of Source Form - - All distribution of Covered Software in Source Code Form, including any - Modifications that You create or to which You contribute, must be under - the terms of this License. You must inform recipients that the Source - Code Form of the Covered Software is governed by the terms of this - License, and how they can obtain a copy of this License. You may not - attempt to alter or restrict the recipients' rights in the Source Code - Form. - -3.2. Distribution of Executable Form - - If You distribute Covered Software in Executable Form then: - - a. such Covered Software must also be made available in Source Code Form, - as described in Section 3.1, and You must inform recipients of the - Executable Form how they can obtain a copy of such Source Code Form by - reasonable means in a timely manner, at a charge no more than the cost - of distribution to the recipient; and - - b. You may distribute such Executable Form under the terms of this - License, or sublicense it under different terms, provided that the - license for the Executable Form does not attempt to limit or alter the - recipients' rights in the Source Code Form under this License. - -3.3. Distribution of a Larger Work - - You may create and distribute a Larger Work under terms of Your choice, - provided that You also comply with the requirements of this License for - the Covered Software. If the Larger Work is a combination of Covered - Software with a work governed by one or more Secondary Licenses, and the - Covered Software is not Incompatible With Secondary Licenses, this - License permits You to additionally distribute such Covered Software - under the terms of such Secondary License(s), so that the recipient of - the Larger Work may, at their option, further distribute the Covered - Software under the terms of either this License or such Secondary - License(s). - -3.4. Notices - - You may not remove or alter the substance of any license notices - (including copyright notices, patent notices, disclaimers of warranty, or - limitations of liability) contained within the Source Code Form of the - Covered Software, except that You may alter any license notices to the - extent required to remedy known factual inaccuracies. - -3.5. Application of Additional Terms - - You may choose to offer, and to charge a fee for, warranty, support, - indemnity or liability obligations to one or more recipients of Covered - Software. However, You may do so only on Your own behalf, and not on - behalf of any Contributor. You must make it absolutely clear that any - such warranty, support, indemnity, or liability obligation is offered by - You alone, and You hereby agree to indemnify every Contributor for any - liability incurred by such Contributor as a result of warranty, support, - indemnity or liability terms You offer. You may include additional - disclaimers of warranty and limitations of liability specific to any - jurisdiction. - -4. Inability to Comply Due to Statute or Regulation - - If it is impossible for You to comply with any of the terms of this License - with respect to some or all of the Covered Software due to statute, - judicial order, or regulation then You must: (a) comply with the terms of - this License to the maximum extent possible; and (b) describe the - limitations and the code they affect. Such description must be placed in a - text file included with all distributions of the Covered Software under - this License. Except to the extent prohibited by statute or regulation, - such description must be sufficiently detailed for a recipient of ordinary - skill to be able to understand it. - -5. Termination - -5.1. The rights granted under this License will terminate automatically if You - fail to comply with any of its terms. However, if You become compliant, - then the rights granted under this License from a particular Contributor - are reinstated (a) provisionally, unless and until such Contributor - explicitly and finally terminates Your grants, and (b) on an ongoing - basis, if such Contributor fails to notify You of the non-compliance by - some reasonable means prior to 60 days after You have come back into - compliance. Moreover, Your grants from a particular Contributor are - reinstated on an ongoing basis if such Contributor notifies You of the - non-compliance by some reasonable means, this is the first time You have - received notice of non-compliance with this License from such - Contributor, and You become compliant prior to 30 days after Your receipt - of the notice. - -5.2. If You initiate litigation against any entity by asserting a patent - infringement claim (excluding declaratory judgment actions, - counter-claims, and cross-claims) alleging that a Contributor Version - directly or indirectly infringes any patent, then the rights granted to - You by any and all Contributors for the Covered Software under Section - 2.1 of this License shall terminate. - -5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user - license agreements (excluding distributors and resellers) which have been - validly granted by You or Your distributors under this License prior to - termination shall survive termination. - -6. Disclaimer of Warranty - - Covered Software is provided under this License on an "as is" basis, - without warranty of any kind, either expressed, implied, or statutory, - including, without limitation, warranties that the Covered Software is free - of defects, merchantable, fit for a particular purpose or non-infringing. - The entire risk as to the quality and performance of the Covered Software - is with You. Should any Covered Software prove defective in any respect, - You (not any Contributor) assume the cost of any necessary servicing, - repair, or correction. This disclaimer of warranty constitutes an essential - part of this License. No use of any Covered Software is authorized under - this License except under this disclaimer. - -7. Limitation of Liability - - Under no circumstances and under no legal theory, whether tort (including - negligence), contract, or otherwise, shall any Contributor, or anyone who - distributes Covered Software as permitted above, be liable to You for any - direct, indirect, special, incidental, or consequential damages of any - character including, without limitation, damages for lost profits, loss of - goodwill, work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses, even if such party shall have been - informed of the possibility of such damages. This limitation of liability - shall not apply to liability for death or personal injury resulting from - such party's negligence to the extent applicable law prohibits such - limitation. Some jurisdictions do not allow the exclusion or limitation of - incidental or consequential damages, so this exclusion and limitation may - not apply to You. - -8. Litigation - - Any litigation relating to this License may be brought only in the courts - of a jurisdiction where the defendant maintains its principal place of - business and such litigation shall be governed by laws of that - jurisdiction, without reference to its conflict-of-law provisions. Nothing - in this Section shall prevent a party's ability to bring cross-claims or - counter-claims. - -9. Miscellaneous - - This License represents the complete agreement concerning the subject - matter hereof. If any provision of this License is held to be - unenforceable, such provision shall be reformed only to the extent - necessary to make it enforceable. Any law or regulation which provides that - the language of a contract shall be construed against the drafter shall not - be used to construe this License against a Contributor. - - -10. Versions of the License - -10.1. New Versions - - Mozilla Foundation is the license steward. Except as provided in Section - 10.3, no one other than the license steward has the right to modify or - publish new versions of this License. Each version will be given a - distinguishing version number. - -10.2. Effect of New Versions - - You may distribute the Covered Software under the terms of the version - of the License under which You originally received the Covered Software, - or under the terms of any subsequent version published by the license - steward. - -10.3. Modified Versions - - If you create software not governed by this License, and you want to - create a new license for such software, you may create and use a - modified version of this License if you rename the license and remove - any references to the name of the license steward (except to note that - such modified license differs from this License). - -10.4. Distributing Source Code Form that is Incompatible With Secondary - Licenses If You choose to distribute Source Code Form that is - Incompatible With Secondary Licenses under the terms of this version of - the License, the notice described in Exhibit B of this License must be - attached. - -Exhibit A - Source Code Form License Notice - - This Source Code Form is subject to the - terms of the Mozilla Public License, v. - 2.0. If a copy of the MPL was not - distributed with this file, You can - obtain one at - http://mozilla.org/MPL/2.0/. - -If it is not possible or desirable to put the notice in a particular file, -then You may include the notice in a location (such as a LICENSE file in a -relevant directory) where a recipient would be likely to look for such a -notice. - -You may add additional accurate notices of copyright ownership. - -Exhibit B - "Incompatible With Secondary Licenses" Notice - - This Source Code Form is "Incompatible - With Secondary Licenses", as defined by - the Mozilla Public License, v. 2.0. - diff --git a/vendor/github.com/hashicorp/vault/api/auth/kubernetes/kubernetes.go b/vendor/github.com/hashicorp/vault/api/auth/kubernetes/kubernetes.go deleted file mode 100644 index c2fef86a..00000000 --- a/vendor/github.com/hashicorp/vault/api/auth/kubernetes/kubernetes.go +++ /dev/null @@ -1,133 +0,0 @@ -package kubernetes - -import ( - "context" - "fmt" - "os" - - "github.com/hashicorp/vault/api" -) - -type KubernetesAuth struct { - roleName string - mountPath string - serviceAccountToken string -} - -var _ api.AuthMethod = (*KubernetesAuth)(nil) - -type LoginOption func(a *KubernetesAuth) error - -const ( - defaultMountPath = "kubernetes" - defaultServiceAccountTokenPath = "/var/run/secrets/kubernetes.io/serviceaccount/token" -) - -// NewKubernetesAuth creates a KubernetesAuth struct which can be passed to -// the client.Auth().Login method to authenticate to Vault. The roleName -// parameter should be the name of the role in Vault that was created with -// this app's Kubernetes service account bound to it. -// -// The Kubernetes service account token JWT is retrieved from -// /var/run/secrets/kubernetes.io/serviceaccount/token by default. To change this -// path, pass the WithServiceAccountTokenPath option. To instead pass the -// JWT directly as a string, or to read the value from an environment -// variable, use WithServiceAccountToken and WithServiceAccountTokenEnv respectively. -// -// Supported options: WithMountPath, WithServiceAccountTokenPath, WithServiceAccountTokenEnv, WithServiceAccountToken -func NewKubernetesAuth(roleName string, opts ...LoginOption) (*KubernetesAuth, error) { - if roleName == "" { - return nil, fmt.Errorf("no role name was provided") - } - - a := &KubernetesAuth{ - roleName: roleName, - mountPath: defaultMountPath, - } - - // Loop through each option - for _, opt := range opts { - // Call the option giving the instantiated - // *KubernetesAuth as the argument - err := opt(a) - if err != nil { - return nil, fmt.Errorf("error with login option: %w", err) - } - } - - if a.serviceAccountToken == "" { - token, err := readTokenFromFile(defaultServiceAccountTokenPath) - if err != nil { - return nil, fmt.Errorf("error reading service account token from default location: %w", err) - } - a.serviceAccountToken = token - } - - // return the modified auth struct instance - return a, nil -} - -func (a *KubernetesAuth) Login(ctx context.Context, client *api.Client) (*api.Secret, error) { - if ctx == nil { - ctx = context.Background() - } - - loginData := map[string]interface{}{ - "jwt": a.serviceAccountToken, - "role": a.roleName, - } - - path := fmt.Sprintf("auth/%s/login", a.mountPath) - resp, err := client.Logical().WriteWithContext(ctx, path, loginData) - if err != nil { - return nil, fmt.Errorf("unable to log in with Kubernetes auth: %w", err) - } - return resp, nil -} - -func WithMountPath(mountPath string) LoginOption { - return func(a *KubernetesAuth) error { - a.mountPath = mountPath - return nil - } -} - -// WithServiceAccountTokenPath allows you to specify a different path to -// where your application's Kubernetes service account token is mounted, -// instead of the default of /var/run/secrets/kubernetes.io/serviceaccount/token -func WithServiceAccountTokenPath(pathToToken string) LoginOption { - return func(a *KubernetesAuth) error { - token, err := readTokenFromFile(pathToToken) - if err != nil { - return fmt.Errorf("unable to read service account token from file: %w", err) - } - a.serviceAccountToken = token - return nil - } -} - -func WithServiceAccountToken(jwt string) LoginOption { - return func(a *KubernetesAuth) error { - a.serviceAccountToken = jwt - return nil - } -} - -func WithServiceAccountTokenEnv(envVar string) LoginOption { - return func(a *KubernetesAuth) error { - token := os.Getenv(envVar) - if token == "" { - return fmt.Errorf("service account token was specified with an environment variable with an empty value") - } - a.serviceAccountToken = token - return nil - } -} - -func readTokenFromFile(filepath string) (string, error) { - jwt, err := os.ReadFile(filepath) - if err != nil { - return "", fmt.Errorf("unable to read file containing service account token: %w", err) - } - return string(jwt), nil -} diff --git a/vendor/github.com/hashicorp/vault/api/auth_token.go b/vendor/github.com/hashicorp/vault/api/auth_token.go index 52be1e78..1980be06 100644 --- a/vendor/github.com/hashicorp/vault/api/auth_token.go +++ b/vendor/github.com/hashicorp/vault/api/auth_token.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package api import ( diff --git a/vendor/github.com/hashicorp/vault/api/client.go b/vendor/github.com/hashicorp/vault/api/client.go index c6843348..c5ddee1c 100644 --- a/vendor/github.com/hashicorp/vault/api/client.go +++ b/vendor/github.com/hashicorp/vault/api/client.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package api import ( @@ -24,12 +27,9 @@ import ( "github.com/hashicorp/go-retryablehttp" "github.com/hashicorp/go-rootcerts" "github.com/hashicorp/go-secure-stdlib/parseutil" + "github.com/hashicorp/go-secure-stdlib/strutil" "golang.org/x/net/http2" "golang.org/x/time/rate" - - "github.com/hashicorp/vault/sdk/helper/consts" - "github.com/hashicorp/vault/sdk/helper/strutil" - "github.com/hashicorp/vault/sdk/logical" ) const ( @@ -56,7 +56,19 @@ const ( HeaderIndex = "X-Vault-Index" HeaderForward = "X-Vault-Forward" HeaderInconsistent = "X-Vault-Inconsistent" - TLSErrorString = "This error usually means that the server is running with TLS disabled\n" + + + // NamespaceHeaderName is the header set to specify which namespace the + // request is indented for. + NamespaceHeaderName = "X-Vault-Namespace" + + // AuthHeaderName is the name of the header containing the token. + AuthHeaderName = "X-Vault-Token" + + // RequestHeaderName is the name of the header used by the Agent for + // SSRF protection. + RequestHeaderName = "X-Vault-Request" + + TLSErrorString = "This error usually means that the server is running with TLS disabled\n" + "but the client is configured to use TLS. Please either enable TLS\n" + "on the server or run the client with -address set to an address\n" + "that uses the http protocol:\n\n" + @@ -114,7 +126,11 @@ type Config struct { // of three tries). MaxRetries int - // Timeout is for setting custom timeout parameter in the HttpClient + // Timeout, given a non-negative value, will apply the request timeout + // to each request function unless an earlier deadline is passed to the + // request function through context.Context. Note that this timeout is + // not applicable to Logical().ReadRaw* (raw response) functions. + // Defaults to 60 seconds. Timeout time.Duration // If there is an error when creating the configuration, this will be the @@ -187,6 +203,7 @@ type Config struct { // commands such as 'vault operator raft snapshot' as this redirects to the // primary node. DisableRedirects bool + clientTLSConfig *tls.Config } // TLSConfig contains the parameters needed to configure TLS on the HTTP client @@ -321,10 +338,17 @@ func (c *Config) configureTLS(t *TLSConfig) error { if t.TLSServerName != "" { clientTLSConfig.ServerName = t.TLSServerName } + c.clientTLSConfig = clientTLSConfig return nil } +func (c *Config) TLSConfig() *tls.Config { + c.modifyLock.RLock() + defer c.modifyLock.RUnlock() + return c.clientTLSConfig.Clone() +} + // ConfigureTLS takes a set of TLS configurations and applies those to the // HTTP client. func (c *Config) ConfigureTLS(t *TLSConfig) error { @@ -617,7 +641,7 @@ func NewClient(c *Config) (*Client, error) { } // Add the VaultRequest SSRF protection header - client.headers[consts.RequestHeaderName] = []string{"true"} + client.headers[RequestHeaderName] = []string{"true"} if token := os.Getenv(EnvVaultToken); token != "" { client.token = token @@ -649,6 +673,7 @@ func (c *Client) CloneConfig() *Config { newConfig.CloneHeaders = c.config.CloneHeaders newConfig.CloneToken = c.config.CloneToken newConfig.ReadYourWrites = c.config.ReadYourWrites + newConfig.clientTLSConfig = c.config.clientTLSConfig // we specifically want a _copy_ of the client here, not a pointer to the original one newClient := *c.config.HttpClient @@ -934,7 +959,7 @@ func (c *Client) setNamespace(namespace string) { c.headers = make(http.Header) } - c.headers.Set(consts.NamespaceHeaderName, namespace) + c.headers.Set(NamespaceHeaderName, namespace) } // ClearNamespace removes the namespace header if set. @@ -942,7 +967,7 @@ func (c *Client) ClearNamespace() { c.modifyLock.Lock() defer c.modifyLock.Unlock() if c.headers != nil { - c.headers.Del(consts.NamespaceHeaderName) + c.headers.Del(NamespaceHeaderName) } } @@ -954,7 +979,7 @@ func (c *Client) Namespace() string { if c.headers == nil { return "" } - return c.headers.Get(consts.NamespaceHeaderName) + return c.headers.Get(NamespaceHeaderName) } // WithNamespace makes a shallow copy of Client, modifies it to use @@ -1288,7 +1313,7 @@ func (c *Client) rawRequestWithContext(ctx context.Context, r *Request) (*Respon checkRetry := c.config.CheckRetry backoff := c.config.Backoff httpClient := c.config.HttpClient - ns := c.headers.Get(consts.NamespaceHeaderName) + ns := c.headers.Get(NamespaceHeaderName) outputCurlString := c.config.OutputCurlString outputPolicy := c.config.OutputPolicy logger := c.config.Logger @@ -1301,9 +1326,9 @@ func (c *Client) rawRequestWithContext(ctx context.Context, r *Request) (*Respon // e.g. calls using (*Client).WithNamespace switch ns { case "": - r.Headers.Del(consts.NamespaceHeaderName) + r.Headers.Del(NamespaceHeaderName) default: - r.Headers.Set(consts.NamespaceHeaderName, ns) + r.Headers.Set(NamespaceHeaderName, ns) } for _, cb := range c.requestCallbacks { @@ -1349,6 +1374,7 @@ START: LastOutputPolicyError = &OutputPolicyError{ method: req.Method, path: strings.TrimPrefix(req.URL.Path, "/v1"), + params: req.URL.Query(), } return nil, LastOutputPolicyError } @@ -1456,8 +1482,8 @@ func (c *Client) httpRequestWithContext(ctx context.Context, r *Request) (*Respo } } // explicitly set the namespace header to current client - if ns := c.headers.Get(consts.NamespaceHeaderName); ns != "" { - r.Headers.Set(consts.NamespaceHeaderName, ns) + if ns := c.headers.Get(NamespaceHeaderName); ns != "" { + r.Headers.Set(NamespaceHeaderName, ns) } } @@ -1478,7 +1504,7 @@ func (c *Client) httpRequestWithContext(ctx context.Context, r *Request) (*Respo req.Host = r.URL.Host if len(r.ClientToken) != 0 { - req.Header.Set(consts.AuthHeaderName, r.ClientToken) + req.Header.Set(AuthHeaderName, r.ClientToken) } if len(r.WrapTTL) != 0 { @@ -1668,7 +1694,13 @@ func MergeReplicationStates(old []string, new string) []string { return strutil.RemoveDuplicates(ret, false) } -func ParseReplicationState(raw string, hmacKey []byte) (*logical.WALState, error) { +type WALState struct { + ClusterID string + LocalIndex uint64 + ReplicatedIndex uint64 +} + +func ParseReplicationState(raw string, hmacKey []byte) (*WALState, error) { cooked, err := base64.StdEncoding.DecodeString(raw) if err != nil { return nil, err @@ -1706,7 +1738,7 @@ func ParseReplicationState(raw string, hmacKey []byte) (*logical.WALState, error return nil, fmt.Errorf("invalid replicated index in state header: %w", err) } - return &logical.WALState{ + return &WALState{ ClusterID: pieces[1], LocalIndex: localIndex, ReplicatedIndex: replicatedIndex, diff --git a/vendor/github.com/hashicorp/vault/api/help.go b/vendor/github.com/hashicorp/vault/api/help.go index 0988ebcd..c119f6c3 100644 --- a/vendor/github.com/hashicorp/vault/api/help.go +++ b/vendor/github.com/hashicorp/vault/api/help.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package api import ( diff --git a/vendor/github.com/hashicorp/vault/api/kv.go b/vendor/github.com/hashicorp/vault/api/kv.go index 37699df2..20862fbf 100644 --- a/vendor/github.com/hashicorp/vault/api/kv.go +++ b/vendor/github.com/hashicorp/vault/api/kv.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package api import "errors" diff --git a/vendor/github.com/hashicorp/vault/api/kv_v1.go b/vendor/github.com/hashicorp/vault/api/kv_v1.go index 22ba9923..a914e035 100644 --- a/vendor/github.com/hashicorp/vault/api/kv_v1.go +++ b/vendor/github.com/hashicorp/vault/api/kv_v1.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package api import ( diff --git a/vendor/github.com/hashicorp/vault/api/kv_v2.go b/vendor/github.com/hashicorp/vault/api/kv_v2.go index 335c2100..72c29eaa 100644 --- a/vendor/github.com/hashicorp/vault/api/kv_v2.go +++ b/vendor/github.com/hashicorp/vault/api/kv_v2.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package api import ( diff --git a/vendor/github.com/hashicorp/vault/api/lifetime_watcher.go b/vendor/github.com/hashicorp/vault/api/lifetime_watcher.go index 5f3eadbf..5c060e5a 100644 --- a/vendor/github.com/hashicorp/vault/api/lifetime_watcher.go +++ b/vendor/github.com/hashicorp/vault/api/lifetime_watcher.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package api import ( @@ -147,6 +150,13 @@ func (c *Client) NewLifetimeWatcher(i *LifetimeWatcherInput) (*LifetimeWatcher, random := i.Rand if random == nil { + // NOTE: + // Rather than a cryptographically secure random number generator (RNG), + // the default behavior uses the math/rand package. The random number is + // used to introduce a slight jitter when calculating the grace period + // for a monitored secret monitoring. This is intended to stagger renewal + // requests to the Vault server, but in a semi-predictable way, so there + // is no need to use a cryptographically secure RNG. random = rand.New(rand.NewSource(int64(time.Now().Nanosecond()))) } @@ -337,25 +347,15 @@ func (r *LifetimeWatcher) doRenewWithOptions(tokenMode bool, nonRenewable bool, var sleepDuration time.Duration - if errorBackoff != nil { - sleepDuration = errorBackoff.NextBackOff() - if sleepDuration == backoff.Stop { - return err - } - } else { - // We keep evaluating a new grace period so long as the lease is - // extending. Once it stops extending, we've hit the max and need to - // rely on the grace duration. - if remainingLeaseDuration > priorDuration { - r.calculateGrace(remainingLeaseDuration, time.Duration(r.increment)*time.Second) - } - priorDuration = remainingLeaseDuration - - // The sleep duration is set to 2/3 of the current lease duration plus - // 1/3 of the current grace period, which adds jitter. - sleepDuration = time.Duration(float64(remainingLeaseDuration.Nanoseconds())*2/3 + float64(r.grace.Nanoseconds())/3) + if errorBackoff == nil { + sleepDuration = r.calculateSleepDuration(remainingLeaseDuration, priorDuration) + } else if errorBackoff.NextBackOff() == backoff.Stop { + return err } + // remainingLeaseDuration becomes the priorDuration for the next loop + priorDuration = remainingLeaseDuration + // If we are within grace, return now; or, if the amount of time we // would sleep would land us in the grace period. This helps with short // tokens; for example, you don't want a current lease duration of 4 @@ -366,15 +366,32 @@ func (r *LifetimeWatcher) doRenewWithOptions(tokenMode bool, nonRenewable bool, return nil } + timer := time.NewTimer(sleepDuration) select { case <-r.stopCh: + timer.Stop() return nil - case <-time.After(sleepDuration): + case <-timer.C: continue } } } +// calculateSleepDuration calculates the amount of time the LifeTimeWatcher should sleep +// before re-entering its loop. +func (r *LifetimeWatcher) calculateSleepDuration(remainingLeaseDuration, priorDuration time.Duration) time.Duration { + // We keep evaluating a new grace period so long as the lease is + // extending. Once it stops extending, we've hit the max and need to + // rely on the grace duration. + if remainingLeaseDuration > priorDuration { + r.calculateGrace(remainingLeaseDuration, time.Duration(r.increment)*time.Second) + } + + // The sleep duration is set to 2/3 of the current lease duration plus + // 1/3 of the current grace period, which adds jitter. + return time.Duration(float64(remainingLeaseDuration.Nanoseconds())*2/3 + float64(r.grace.Nanoseconds())/3) +} + // calculateGrace calculates the grace period based on the minimum of the // remaining lease duration and the token increment value; it also adds some // jitter to not have clients be in sync. diff --git a/vendor/github.com/hashicorp/vault/api/logical.go b/vendor/github.com/hashicorp/vault/api/logical.go index d2e5bb5e..927dd168 100644 --- a/vendor/github.com/hashicorp/vault/api/logical.go +++ b/vendor/github.com/hashicorp/vault/api/logical.go @@ -1,8 +1,12 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package api import ( "bytes" "context" + "encoding/json" "fmt" "io" "net/http" @@ -11,7 +15,6 @@ import ( "strings" "github.com/hashicorp/errwrap" - "github.com/hashicorp/vault/sdk/helper/jsonutil" ) const ( @@ -66,6 +69,53 @@ func (c *Logical) ReadWithDataWithContext(ctx context.Context, path string, data defer cancelFunc() resp, err := c.readRawWithDataWithContext(ctx, path, data) + return c.ParseRawResponseAndCloseBody(resp, err) +} + +// ReadRaw attempts to read the value stored at the given Vault path +// (without '/v1/' prefix) and returns a raw *http.Response. +// +// Note: the raw-response functions do not respect the client-configured +// request timeout; if a timeout is desired, please use ReadRawWithContext +// instead and set the timeout through context.WithTimeout or context.WithDeadline. +func (c *Logical) ReadRaw(path string) (*Response, error) { + return c.ReadRawWithDataWithContext(context.Background(), path, nil) +} + +// ReadRawWithContext attempts to read the value stored at the give Vault path +// (without '/v1/' prefix) and returns a raw *http.Response. +// +// Note: the raw-response functions do not respect the client-configured +// request timeout; if a timeout is desired, please set it through +// context.WithTimeout or context.WithDeadline. +func (c *Logical) ReadRawWithContext(ctx context.Context, path string) (*Response, error) { + return c.ReadRawWithDataWithContext(ctx, path, nil) +} + +// ReadRawWithData attempts to read the value stored at the given Vault +// path (without '/v1/' prefix) and returns a raw *http.Response. The 'data' map +// is added as query parameters to the request. +// +// Note: the raw-response functions do not respect the client-configured +// request timeout; if a timeout is desired, please use +// ReadRawWithDataWithContext instead and set the timeout through +// context.WithTimeout or context.WithDeadline. +func (c *Logical) ReadRawWithData(path string, data map[string][]string) (*Response, error) { + return c.ReadRawWithDataWithContext(context.Background(), path, data) +} + +// ReadRawWithDataWithContext attempts to read the value stored at the given +// Vault path (without '/v1/' prefix) and returns a raw *http.Response. The 'data' +// map is added as query parameters to the request. +// +// Note: the raw-response functions do not respect the client-configured +// request timeout; if a timeout is desired, please set it through +// context.WithTimeout or context.WithDeadline. +func (c *Logical) ReadRawWithDataWithContext(ctx context.Context, path string, data map[string][]string) (*Response, error) { + return c.readRawWithDataWithContext(ctx, path, data) +} + +func (c *Logical) ParseRawResponseAndCloseBody(resp *Response, err error) (*Secret, error) { if resp != nil { defer resp.Body.Close() } @@ -90,21 +140,6 @@ func (c *Logical) ReadWithDataWithContext(ctx context.Context, path string, data return ParseSecret(resp.Body) } -func (c *Logical) ReadRaw(path string) (*Response, error) { - return c.ReadRawWithData(path, nil) -} - -func (c *Logical) ReadRawWithData(path string, data map[string][]string) (*Response, error) { - return c.ReadRawWithDataWithContext(context.Background(), path, data) -} - -func (c *Logical) ReadRawWithDataWithContext(ctx context.Context, path string, data map[string][]string) (*Response, error) { - ctx, cancelFunc := c.c.withConfiguredTimeout(ctx) - defer cancelFunc() - - return c.readRawWithDataWithContext(ctx, path, data) -} - func (c *Logical) readRawWithDataWithContext(ctx context.Context, path string, data map[string][]string) (*Response, error) { r := c.c.NewRequest(http.MethodGet, "/v1/"+path) @@ -358,7 +393,9 @@ func (c *Logical) UnwrapWithContext(ctx context.Context, wrappingToken string) ( wrappedSecret := new(Secret) buf := bytes.NewBufferString(secret.Data["response"].(string)) - if err := jsonutil.DecodeJSONFromReader(buf, wrappedSecret); err != nil { + dec := json.NewDecoder(buf) + dec.UseNumber() + if err := dec.Decode(wrappedSecret); err != nil { return nil, errwrap.Wrapf("error unmarshalling wrapped secret: {{err}}", err) } diff --git a/vendor/github.com/hashicorp/vault/api/output_policy.go b/vendor/github.com/hashicorp/vault/api/output_policy.go index 85d1617e..c3ec5228 100644 --- a/vendor/github.com/hashicorp/vault/api/output_policy.go +++ b/vendor/github.com/hashicorp/vault/api/output_policy.go @@ -1,9 +1,13 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package api import ( "fmt" "net/http" "net/url" + "strconv" "strings" ) @@ -16,6 +20,7 @@ var LastOutputPolicyError *OutputPolicyError type OutputPolicyError struct { method string path string + params url.Values finalHCLString string } @@ -44,8 +49,22 @@ func (d *OutputPolicyError) HCLString() (string, error) { // Builds a sample policy document from the request func (d *OutputPolicyError) buildSamplePolicy() (string, error) { + operation := d.method + // List is often defined as a URL param instead of as an http.Method + // this will check for the header and properly switch off of the intended functionality + if d.params.Has("list") { + isList, err := strconv.ParseBool(d.params.Get("list")) + if err != nil { + return "", fmt.Errorf("the value of the list url param is not a bool: %v", err) + } + + if isList { + operation = "LIST" + } + } + var capabilities []string - switch d.method { + switch operation { case http.MethodGet, "": capabilities = append(capabilities, "read") case http.MethodPost, http.MethodPut: @@ -59,17 +78,15 @@ func (d *OutputPolicyError) buildSamplePolicy() (string, error) { capabilities = append(capabilities, "list") } - // sanitize, then trim the Vault address and v1 from the front of the path - path, err := url.PathUnescape(d.path) - if err != nil { - return "", fmt.Errorf("failed to unescape request URL characters: %v", err) - } - // determine whether to add sudo capability - if IsSudoPath(path) { + if IsSudoPath(d.path) { capabilities = append(capabilities, "sudo") } + return formatOutputPolicy(d.path, capabilities), nil +} + +func formatOutputPolicy(path string, capabilities []string) string { // the OpenAPI response has a / in front of each path, // but policies need the path without that leading slash path = strings.TrimLeft(path, "/") @@ -78,5 +95,5 @@ func (d *OutputPolicyError) buildSamplePolicy() (string, error) { return fmt.Sprintf( `path "%s" { capabilities = ["%s"] -}`, path, capStr), nil +}`, path, capStr) } diff --git a/vendor/github.com/hashicorp/vault/api/output_string.go b/vendor/github.com/hashicorp/vault/api/output_string.go index 80c591f2..d7777712 100644 --- a/vendor/github.com/hashicorp/vault/api/output_string.go +++ b/vendor/github.com/hashicorp/vault/api/output_string.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package api import ( diff --git a/vendor/github.com/hashicorp/vault/api/plugin_helpers.go b/vendor/github.com/hashicorp/vault/api/plugin_helpers.go index 2b1b35c3..2d6416d7 100644 --- a/vendor/github.com/hashicorp/vault/api/plugin_helpers.go +++ b/vendor/github.com/hashicorp/vault/api/plugin_helpers.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package api import ( @@ -37,7 +40,7 @@ const ( // path matches that path or not (useful specifically for the paths that // contain templated fields.) var sudoPaths = map[string]*regexp.Regexp{ - "/auth/token/accessors/": regexp.MustCompile(`^/auth/token/accessors/$`), + "/auth/token/accessors/": regexp.MustCompile(`^/auth/token/accessors/?$`), "/pki/root": regexp.MustCompile(`^/pki/root$`), "/pki/root/sign-self-issued": regexp.MustCompile(`^/pki/root/sign-self-issued$`), "/sys/audit": regexp.MustCompile(`^/sys/audit$`), @@ -47,10 +50,10 @@ var sudoPaths = map[string]*regexp.Regexp{ "/sys/config/auditing/request-headers": regexp.MustCompile(`^/sys/config/auditing/request-headers$`), "/sys/config/auditing/request-headers/{header}": regexp.MustCompile(`^/sys/config/auditing/request-headers/.+$`), "/sys/config/cors": regexp.MustCompile(`^/sys/config/cors$`), - "/sys/config/ui/headers/": regexp.MustCompile(`^/sys/config/ui/headers/$`), + "/sys/config/ui/headers/": regexp.MustCompile(`^/sys/config/ui/headers/?$`), "/sys/config/ui/headers/{header}": regexp.MustCompile(`^/sys/config/ui/headers/.+$`), "/sys/leases": regexp.MustCompile(`^/sys/leases$`), - "/sys/leases/lookup/": regexp.MustCompile(`^/sys/leases/lookup/$`), + "/sys/leases/lookup/": regexp.MustCompile(`^/sys/leases/lookup/?$`), "/sys/leases/lookup/{prefix}": regexp.MustCompile(`^/sys/leases/lookup/.+$`), "/sys/leases/revoke-force/{prefix}": regexp.MustCompile(`^/sys/leases/revoke-force/.+$`), "/sys/leases/revoke-prefix/{prefix}": regexp.MustCompile(`^/sys/leases/revoke-prefix/.+$`), @@ -63,13 +66,14 @@ var sudoPaths = map[string]*regexp.Regexp{ "/sys/revoke-force/{prefix}": regexp.MustCompile(`^/sys/revoke-force/.+$`), "/sys/revoke-prefix/{prefix}": regexp.MustCompile(`^/sys/revoke-prefix/.+$`), "/sys/rotate": regexp.MustCompile(`^/sys/rotate$`), + "/sys/internal/inspect/router/{tag}": regexp.MustCompile(`^/sys/internal/inspect/router/.+$`), // enterprise-only paths "/sys/replication/dr/primary/secondary-token": regexp.MustCompile(`^/sys/replication/dr/primary/secondary-token$`), "/sys/replication/performance/primary/secondary-token": regexp.MustCompile(`^/sys/replication/performance/primary/secondary-token$`), "/sys/replication/primary/secondary-token": regexp.MustCompile(`^/sys/replication/primary/secondary-token$`), "/sys/replication/reindex": regexp.MustCompile(`^/sys/replication/reindex$`), - "/sys/storage/raft/snapshot-auto/config/": regexp.MustCompile(`^/sys/storage/raft/snapshot-auto/config/$`), + "/sys/storage/raft/snapshot-auto/config/": regexp.MustCompile(`^/sys/storage/raft/snapshot-auto/config/?$`), "/sys/storage/raft/snapshot-auto/config/{name}": regexp.MustCompile(`^/sys/storage/raft/snapshot-auto/config/[^/]+$`), } diff --git a/vendor/github.com/hashicorp/vault/sdk/helper/consts/plugin_types.go b/vendor/github.com/hashicorp/vault/api/plugin_types.go similarity index 84% rename from vendor/github.com/hashicorp/vault/sdk/helper/consts/plugin_types.go rename to vendor/github.com/hashicorp/vault/api/plugin_types.go index e0a00e48..4c759a2d 100644 --- a/vendor/github.com/hashicorp/vault/sdk/helper/consts/plugin_types.go +++ b/vendor/github.com/hashicorp/vault/api/plugin_types.go @@ -1,4 +1,11 @@ -package consts +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package api + +// NOTE: this file was copied from +// https://github.com/hashicorp/vault/blob/main/sdk/helper/consts/plugin_types.go +// Any changes made should be made to both files at the same time. import "fmt" diff --git a/vendor/github.com/hashicorp/vault/api/request.go b/vendor/github.com/hashicorp/vault/api/request.go index 1cbbc62f..ecf78370 100644 --- a/vendor/github.com/hashicorp/vault/api/request.go +++ b/vendor/github.com/hashicorp/vault/api/request.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package api import ( @@ -8,8 +11,6 @@ import ( "net/http" "net/url" - "github.com/hashicorp/vault/sdk/helper/consts" - retryablehttp "github.com/hashicorp/go-retryablehttp" ) @@ -127,7 +128,7 @@ func (r *Request) toRetryableHTTP() (*retryablehttp.Request, error) { } if len(r.ClientToken) != 0 { - req.Header.Set(consts.AuthHeaderName, r.ClientToken) + req.Header.Set(AuthHeaderName, r.ClientToken) } if len(r.WrapTTL) != 0 { diff --git a/vendor/github.com/hashicorp/vault/api/response.go b/vendor/github.com/hashicorp/vault/api/response.go index 9ce3d12a..2842c125 100644 --- a/vendor/github.com/hashicorp/vault/api/response.go +++ b/vendor/github.com/hashicorp/vault/api/response.go @@ -1,14 +1,15 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package api import ( "bytes" + "encoding/json" "fmt" "io" "io/ioutil" "net/http" - - "github.com/hashicorp/vault/sdk/helper/consts" - "github.com/hashicorp/vault/sdk/helper/jsonutil" ) // Response is a raw response that wraps an HTTP response. @@ -20,7 +21,9 @@ type Response struct { // will consume the response body, but will not close it. Close must // still be called. func (r *Response) DecodeJSON(out interface{}) error { - return jsonutil.DecodeJSONFromReader(r.Body, out) + dec := json.NewDecoder(r.Body) + dec.UseNumber() + return dec.Decode(out) } // Error returns an error response if there is one. If there is an error, @@ -42,7 +45,7 @@ func (r *Response) Error() error { r.Body.Close() r.Body = ioutil.NopCloser(bodyBuf) - ns := r.Header.Get(consts.NamespaceHeaderName) + ns := r.Header.Get(NamespaceHeaderName) // Build up the error object respErr := &ResponseError{ @@ -56,7 +59,9 @@ func (r *Response) Error() error { // in a bytes.Reader here so that the JSON decoder doesn't move the // read pointer for the original buffer. var resp ErrorResponse - if err := jsonutil.DecodeJSON(bodyBuf.Bytes(), &resp); err != nil { + dec := json.NewDecoder(bytes.NewReader(bodyBuf.Bytes())) + dec.UseNumber() + if err := dec.Decode(&resp); err != nil { // Store the fact that we couldn't decode the errors respErr.RawError = true respErr.Errors = []string{bodyBuf.String()} diff --git a/vendor/github.com/hashicorp/vault/api/secret.go b/vendor/github.com/hashicorp/vault/api/secret.go index 77e3ee9a..be159d70 100644 --- a/vendor/github.com/hashicorp/vault/api/secret.go +++ b/vendor/github.com/hashicorp/vault/api/secret.go @@ -1,15 +1,19 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package api import ( "bytes" + "encoding/json" "fmt" "io" + "reflect" + "strings" "time" "github.com/hashicorp/errwrap" "github.com/hashicorp/go-secure-stdlib/parseutil" - "github.com/hashicorp/vault/sdk/helper/jsonutil" - "github.com/hashicorp/vault/sdk/logical" ) // Secret is the structure returned for every secret within Vault. @@ -280,6 +284,22 @@ type SecretWrapInfo struct { WrappedAccessor string `json:"wrapped_accessor"` } +type MFAMethodID struct { + Type string `json:"type,omitempty"` + ID string `json:"id,omitempty"` + UsesPasscode bool `json:"uses_passcode,omitempty"` + Name string `json:"name,omitempty"` +} + +type MFAConstraintAny struct { + Any []*MFAMethodID `json:"any,omitempty"` +} + +type MFARequirement struct { + MFARequestID string `json:"mfa_request_id,omitempty"` + MFAConstraints map[string]*MFAConstraintAny `json:"mfa_constraints,omitempty"` +} + // SecretAuth is the structure containing auth information if we have it. type SecretAuth struct { ClientToken string `json:"client_token"` @@ -294,7 +314,7 @@ type SecretAuth struct { LeaseDuration int `json:"lease_duration"` Renewable bool `json:"renewable"` - MFARequirement *logical.MFARequirement `json:"mfa_requirement"` + MFARequirement *MFARequirement `json:"mfa_requirement"` } // ParseSecret is used to parse a secret value from JSON from an io.Reader. @@ -302,7 +322,15 @@ func ParseSecret(r io.Reader) (*Secret, error) { // First read the data into a buffer. Not super efficient but we want to // know if we actually have a body or not. var buf bytes.Buffer - _, err := buf.ReadFrom(r) + + // io.Reader is treated like a stream and cannot be read + // multiple times. Duplicating this stream using TeeReader + // to use this data in case there is no top-level data from + // api response + var teebuf bytes.Buffer + tee := io.TeeReader(r, &teebuf) + + _, err := buf.ReadFrom(tee) if err != nil { return nil, err } @@ -312,9 +340,46 @@ func ParseSecret(r io.Reader) (*Secret, error) { // First decode the JSON into a map[string]interface{} var secret Secret - if err := jsonutil.DecodeJSONFromReader(&buf, &secret); err != nil { + dec := json.NewDecoder(&buf) + dec.UseNumber() + if err := dec.Decode(&secret); err != nil { return nil, err } + // If the secret is null, add raw data to secret data if present + if reflect.DeepEqual(secret, Secret{}) { + data := make(map[string]interface{}) + dec := json.NewDecoder(&teebuf) + dec.UseNumber() + if err := dec.Decode(&data); err != nil { + return nil, err + } + errRaw, errPresent := data["errors"] + + // if only errors are present in the resp.Body return nil + // to return value not found as it does not have any raw data + if len(data) == 1 && errPresent { + return nil, nil + } + + // if errors are present along with raw data return the error + if errPresent { + var errStrArray []string + errBytes, err := json.Marshal(errRaw) + if err != nil { + return nil, err + } + if err := json.Unmarshal(errBytes, &errStrArray); err != nil { + return nil, err + } + return nil, fmt.Errorf(strings.Join(errStrArray, " ")) + } + + // if any raw data is present in resp.Body, add it to secret + if len(data) > 0 { + secret.Data = data + } + } + return &secret, nil } diff --git a/vendor/github.com/hashicorp/vault/api/ssh.go b/vendor/github.com/hashicorp/vault/api/ssh.go index b832e274..28510eec 100644 --- a/vendor/github.com/hashicorp/vault/api/ssh.go +++ b/vendor/github.com/hashicorp/vault/api/ssh.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package api import ( diff --git a/vendor/github.com/hashicorp/vault/api/ssh_agent.go b/vendor/github.com/hashicorp/vault/api/ssh_agent.go index 03fe2bea..e6150377 100644 --- a/vendor/github.com/hashicorp/vault/api/ssh_agent.go +++ b/vendor/github.com/hashicorp/vault/api/ssh_agent.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package api import ( @@ -15,7 +18,6 @@ import ( rootcerts "github.com/hashicorp/go-rootcerts" "github.com/hashicorp/hcl" "github.com/hashicorp/hcl/hcl/ast" - "github.com/hashicorp/vault/sdk/helper/hclutil" "github.com/mitchellh/mapstructure" ) @@ -169,7 +171,7 @@ func ParseSSHHelperConfig(contents string) (*SSHHelperConfig, error) { "tls_skip_verify", "tls_server_name", } - if err := hclutil.CheckHCLKeys(list, valid); err != nil { + if err := CheckHCLKeys(list, valid); err != nil { return nil, multierror.Prefix(err, "ssh_helper:") } @@ -185,6 +187,33 @@ func ParseSSHHelperConfig(contents string) (*SSHHelperConfig, error) { return &c, nil } +func CheckHCLKeys(node ast.Node, valid []string) error { + var list *ast.ObjectList + switch n := node.(type) { + case *ast.ObjectList: + list = n + case *ast.ObjectType: + list = n.List + default: + return fmt.Errorf("cannot check HCL keys of type %T", n) + } + + validMap := make(map[string]struct{}, len(valid)) + for _, v := range valid { + validMap[v] = struct{}{} + } + + var result error + for _, item := range list.Items { + key := item.Keys[0].Token.Value().(string) + if _, ok := validMap[key]; !ok { + result = multierror.Append(result, fmt.Errorf("invalid key %q on line %d", key, item.Assign.Line)) + } + } + + return result +} + // SSHHelper creates an SSHHelper object which can talk to Vault server with SSH backend // mounted at default path ("ssh"). func (c *Client) SSHHelper() *SSHHelper { diff --git a/vendor/github.com/hashicorp/vault/api/sys.go b/vendor/github.com/hashicorp/vault/api/sys.go index 5fb11188..81ebb3a2 100644 --- a/vendor/github.com/hashicorp/vault/api/sys.go +++ b/vendor/github.com/hashicorp/vault/api/sys.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package api // Sys is used to perform system-related operations on Vault. diff --git a/vendor/github.com/hashicorp/vault/api/sys_audit.go b/vendor/github.com/hashicorp/vault/api/sys_audit.go index 82d9aab0..2244087a 100644 --- a/vendor/github.com/hashicorp/vault/api/sys_audit.go +++ b/vendor/github.com/hashicorp/vault/api/sys_audit.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package api import ( diff --git a/vendor/github.com/hashicorp/vault/api/sys_auth.go b/vendor/github.com/hashicorp/vault/api/sys_auth.go index 238bd5e4..e8144121 100644 --- a/vendor/github.com/hashicorp/vault/api/sys_auth.go +++ b/vendor/github.com/hashicorp/vault/api/sys_auth.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package api import ( diff --git a/vendor/github.com/hashicorp/vault/api/sys_capabilities.go b/vendor/github.com/hashicorp/vault/api/sys_capabilities.go index af306a07..6310d42f 100644 --- a/vendor/github.com/hashicorp/vault/api/sys_capabilities.go +++ b/vendor/github.com/hashicorp/vault/api/sys_capabilities.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package api import ( diff --git a/vendor/github.com/hashicorp/vault/api/sys_config_cors.go b/vendor/github.com/hashicorp/vault/api/sys_config_cors.go index 1e2cda4f..e80aa9d8 100644 --- a/vendor/github.com/hashicorp/vault/api/sys_config_cors.go +++ b/vendor/github.com/hashicorp/vault/api/sys_config_cors.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package api import ( diff --git a/vendor/github.com/hashicorp/vault/api/sys_generate_root.go b/vendor/github.com/hashicorp/vault/api/sys_generate_root.go index 096cadb7..da4ad2f9 100644 --- a/vendor/github.com/hashicorp/vault/api/sys_generate_root.go +++ b/vendor/github.com/hashicorp/vault/api/sys_generate_root.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package api import ( diff --git a/vendor/github.com/hashicorp/vault/api/sys_hastatus.go b/vendor/github.com/hashicorp/vault/api/sys_hastatus.go index d89d5965..2b2aa7c3 100644 --- a/vendor/github.com/hashicorp/vault/api/sys_hastatus.go +++ b/vendor/github.com/hashicorp/vault/api/sys_hastatus.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package api import ( diff --git a/vendor/github.com/hashicorp/vault/api/sys_health.go b/vendor/github.com/hashicorp/vault/api/sys_health.go index 953c1c21..13fd8d4d 100644 --- a/vendor/github.com/hashicorp/vault/api/sys_health.go +++ b/vendor/github.com/hashicorp/vault/api/sys_health.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package api import ( diff --git a/vendor/github.com/hashicorp/vault/api/sys_init.go b/vendor/github.com/hashicorp/vault/api/sys_init.go index 05dea86f..13fa9480 100644 --- a/vendor/github.com/hashicorp/vault/api/sys_init.go +++ b/vendor/github.com/hashicorp/vault/api/sys_init.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package api import ( diff --git a/vendor/github.com/hashicorp/vault/api/sys_leader.go b/vendor/github.com/hashicorp/vault/api/sys_leader.go index a74e206e..868914d3 100644 --- a/vendor/github.com/hashicorp/vault/api/sys_leader.go +++ b/vendor/github.com/hashicorp/vault/api/sys_leader.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package api import ( diff --git a/vendor/github.com/hashicorp/vault/api/sys_leases.go b/vendor/github.com/hashicorp/vault/api/sys_leases.go index c02402f5..c46f07e6 100644 --- a/vendor/github.com/hashicorp/vault/api/sys_leases.go +++ b/vendor/github.com/hashicorp/vault/api/sys_leases.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package api import ( diff --git a/vendor/github.com/hashicorp/vault/api/sys_mfa.go b/vendor/github.com/hashicorp/vault/api/sys_mfa.go index a1ba1bd8..2be66958 100644 --- a/vendor/github.com/hashicorp/vault/api/sys_mfa.go +++ b/vendor/github.com/hashicorp/vault/api/sys_mfa.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package api import ( diff --git a/vendor/github.com/hashicorp/vault/api/sys_monitor.go b/vendor/github.com/hashicorp/vault/api/sys_monitor.go index 6813799f..15a8a13d 100644 --- a/vendor/github.com/hashicorp/vault/api/sys_monitor.go +++ b/vendor/github.com/hashicorp/vault/api/sys_monitor.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package api import ( @@ -5,8 +8,6 @@ import ( "context" "fmt" "net/http" - - "github.com/hashicorp/vault/sdk/helper/logging" ) // Monitor returns a channel that outputs strings containing the log messages @@ -20,7 +21,7 @@ func (c *Sys) Monitor(ctx context.Context, logLevel string, logFormat string) (c r.Params.Add("log_level", logLevel) } - if logFormat == "" || logFormat == logging.UnspecifiedFormat.String() { + if logFormat == "" { r.Params.Add("log_format", "standard") } else { r.Params.Add("log_format", logFormat) diff --git a/vendor/github.com/hashicorp/vault/api/sys_mounts.go b/vendor/github.com/hashicorp/vault/api/sys_mounts.go index 75173b4d..a6c2a0f5 100644 --- a/vendor/github.com/hashicorp/vault/api/sys_mounts.go +++ b/vendor/github.com/hashicorp/vault/api/sys_mounts.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package api import ( @@ -254,20 +257,20 @@ type MountInput struct { } type MountConfigInput struct { - Options map[string]string `json:"options" mapstructure:"options"` - DefaultLeaseTTL string `json:"default_lease_ttl" mapstructure:"default_lease_ttl"` - Description *string `json:"description,omitempty" mapstructure:"description"` - MaxLeaseTTL string `json:"max_lease_ttl" mapstructure:"max_lease_ttl"` - ForceNoCache bool `json:"force_no_cache" mapstructure:"force_no_cache"` - AuditNonHMACRequestKeys []string `json:"audit_non_hmac_request_keys,omitempty" mapstructure:"audit_non_hmac_request_keys"` - AuditNonHMACResponseKeys []string `json:"audit_non_hmac_response_keys,omitempty" mapstructure:"audit_non_hmac_response_keys"` - ListingVisibility string `json:"listing_visibility,omitempty" mapstructure:"listing_visibility"` - PassthroughRequestHeaders []string `json:"passthrough_request_headers,omitempty" mapstructure:"passthrough_request_headers"` - AllowedResponseHeaders []string `json:"allowed_response_headers,omitempty" mapstructure:"allowed_response_headers"` - TokenType string `json:"token_type,omitempty" mapstructure:"token_type"` - AllowedManagedKeys []string `json:"allowed_managed_keys,omitempty" mapstructure:"allowed_managed_keys"` - PluginVersion string `json:"plugin_version,omitempty"` - + Options map[string]string `json:"options" mapstructure:"options"` + DefaultLeaseTTL string `json:"default_lease_ttl" mapstructure:"default_lease_ttl"` + Description *string `json:"description,omitempty" mapstructure:"description"` + MaxLeaseTTL string `json:"max_lease_ttl" mapstructure:"max_lease_ttl"` + ForceNoCache bool `json:"force_no_cache" mapstructure:"force_no_cache"` + AuditNonHMACRequestKeys []string `json:"audit_non_hmac_request_keys,omitempty" mapstructure:"audit_non_hmac_request_keys"` + AuditNonHMACResponseKeys []string `json:"audit_non_hmac_response_keys,omitempty" mapstructure:"audit_non_hmac_response_keys"` + ListingVisibility string `json:"listing_visibility,omitempty" mapstructure:"listing_visibility"` + PassthroughRequestHeaders []string `json:"passthrough_request_headers,omitempty" mapstructure:"passthrough_request_headers"` + AllowedResponseHeaders []string `json:"allowed_response_headers,omitempty" mapstructure:"allowed_response_headers"` + TokenType string `json:"token_type,omitempty" mapstructure:"token_type"` + AllowedManagedKeys []string `json:"allowed_managed_keys,omitempty" mapstructure:"allowed_managed_keys"` + PluginVersion string `json:"plugin_version,omitempty"` + UserLockoutConfig *UserLockoutConfigInput `json:"user_lockout_config,omitempty"` // Deprecated: This field will always be blank for newer server responses. PluginName string `json:"plugin_name,omitempty" mapstructure:"plugin_name"` } @@ -289,21 +292,35 @@ type MountOutput struct { } type MountConfigOutput struct { - DefaultLeaseTTL int `json:"default_lease_ttl" mapstructure:"default_lease_ttl"` - MaxLeaseTTL int `json:"max_lease_ttl" mapstructure:"max_lease_ttl"` - ForceNoCache bool `json:"force_no_cache" mapstructure:"force_no_cache"` - AuditNonHMACRequestKeys []string `json:"audit_non_hmac_request_keys,omitempty" mapstructure:"audit_non_hmac_request_keys"` - AuditNonHMACResponseKeys []string `json:"audit_non_hmac_response_keys,omitempty" mapstructure:"audit_non_hmac_response_keys"` - ListingVisibility string `json:"listing_visibility,omitempty" mapstructure:"listing_visibility"` - PassthroughRequestHeaders []string `json:"passthrough_request_headers,omitempty" mapstructure:"passthrough_request_headers"` - AllowedResponseHeaders []string `json:"allowed_response_headers,omitempty" mapstructure:"allowed_response_headers"` - TokenType string `json:"token_type,omitempty" mapstructure:"token_type"` - AllowedManagedKeys []string `json:"allowed_managed_keys,omitempty" mapstructure:"allowed_managed_keys"` - + DefaultLeaseTTL int `json:"default_lease_ttl" mapstructure:"default_lease_ttl"` + MaxLeaseTTL int `json:"max_lease_ttl" mapstructure:"max_lease_ttl"` + ForceNoCache bool `json:"force_no_cache" mapstructure:"force_no_cache"` + AuditNonHMACRequestKeys []string `json:"audit_non_hmac_request_keys,omitempty" mapstructure:"audit_non_hmac_request_keys"` + AuditNonHMACResponseKeys []string `json:"audit_non_hmac_response_keys,omitempty" mapstructure:"audit_non_hmac_response_keys"` + ListingVisibility string `json:"listing_visibility,omitempty" mapstructure:"listing_visibility"` + PassthroughRequestHeaders []string `json:"passthrough_request_headers,omitempty" mapstructure:"passthrough_request_headers"` + AllowedResponseHeaders []string `json:"allowed_response_headers,omitempty" mapstructure:"allowed_response_headers"` + TokenType string `json:"token_type,omitempty" mapstructure:"token_type"` + AllowedManagedKeys []string `json:"allowed_managed_keys,omitempty" mapstructure:"allowed_managed_keys"` + UserLockoutConfig *UserLockoutConfigOutput `json:"user_lockout_config,omitempty"` // Deprecated: This field will always be blank for newer server responses. PluginName string `json:"plugin_name,omitempty" mapstructure:"plugin_name"` } +type UserLockoutConfigInput struct { + LockoutThreshold string `json:"lockout_threshold,omitempty" structs:"lockout_threshold" mapstructure:"lockout_threshold"` + LockoutDuration string `json:"lockout_duration,omitempty" structs:"lockout_duration" mapstructure:"lockout_duration"` + LockoutCounterResetDuration string `json:"lockout_counter_reset_duration,omitempty" structs:"lockout_counter_reset_duration" mapstructure:"lockout_counter_reset_duration"` + DisableLockout *bool `json:"lockout_disable,omitempty" structs:"lockout_disable" mapstructure:"lockout_disable"` +} + +type UserLockoutConfigOutput struct { + LockoutThreshold uint `json:"lockout_threshold,omitempty" structs:"lockout_threshold" mapstructure:"lockout_threshold"` + LockoutDuration int `json:"lockout_duration,omitempty" structs:"lockout_duration" mapstructure:"lockout_duration"` + LockoutCounterReset int `json:"lockout_counter_reset,omitempty" structs:"lockout_counter_reset" mapstructure:"lockout_counter_reset"` + DisableLockout *bool `json:"disable_lockout,omitempty" structs:"disable_lockout" mapstructure:"disable_lockout"` +} + type MountMigrationOutput struct { MigrationID string `mapstructure:"migration_id"` } diff --git a/vendor/github.com/hashicorp/vault/api/sys_plugins.go b/vendor/github.com/hashicorp/vault/api/sys_plugins.go index 989c78f1..2ee024d9 100644 --- a/vendor/github.com/hashicorp/vault/api/sys_plugins.go +++ b/vendor/github.com/hashicorp/vault/api/sys_plugins.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package api import ( @@ -7,20 +10,19 @@ import ( "net/http" "time" - "github.com/hashicorp/vault/sdk/helper/consts" "github.com/mitchellh/mapstructure" ) // ListPluginsInput is used as input to the ListPlugins function. type ListPluginsInput struct { // Type of the plugin. Required. - Type consts.PluginType `json:"type"` + Type PluginType `json:"type"` } // ListPluginsResponse is the response from the ListPlugins call. type ListPluginsResponse struct { // PluginsByType is the list of plugins by type. - PluginsByType map[consts.PluginType][]string `json:"types"` + PluginsByType map[PluginType][]string `json:"types"` Details []PluginDetails `json:"details,omitempty"` @@ -68,11 +70,11 @@ func (c *Sys) ListPluginsWithContext(ctx context.Context, i *ListPluginsInput) ( } result := &ListPluginsResponse{ - PluginsByType: make(map[consts.PluginType][]string), + PluginsByType: make(map[PluginType][]string), } switch i.Type { - case consts.PluginTypeUnknown: - for _, pluginType := range consts.PluginTypes { + case PluginTypeUnknown: + for _, pluginType := range PluginTypes { pluginsRaw, ok := secret.Data[pluginType.String()] if !ok { continue @@ -113,7 +115,7 @@ func (c *Sys) ListPluginsWithContext(ctx context.Context, i *ListPluginsInput) ( } switch i.Type { - case consts.PluginTypeUnknown: + case PluginTypeUnknown: result.Details = details default: // Filter for just the queried type. @@ -133,8 +135,8 @@ type GetPluginInput struct { Name string `json:"-"` // Type of the plugin. Required. - Type consts.PluginType `json:"type"` - Version string `json:"version"` + Type PluginType `json:"type"` + Version string `json:"version"` } // GetPluginResponse is the response from the GetPlugin call. @@ -186,7 +188,7 @@ type RegisterPluginInput struct { Name string `json:"-"` // Type of the plugin. Required. - Type consts.PluginType `json:"type"` + Type PluginType `json:"type"` // Args is the list of args to spawn the process with. Args []string `json:"args,omitempty"` @@ -231,7 +233,7 @@ type DeregisterPluginInput struct { Name string `json:"-"` // Type of the plugin. Required. - Type consts.PluginType `json:"type"` + Type PluginType `json:"type"` // Version of the plugin. Optional. Version string `json:"version,omitempty"` @@ -368,11 +370,11 @@ func (c *Sys) ReloadPluginStatusWithContext(ctx context.Context, reloadStatusInp } // catalogPathByType is a helper to construct the proper API path by plugin type -func catalogPathByType(pluginType consts.PluginType, name string) string { +func catalogPathByType(pluginType PluginType, name string) string { path := fmt.Sprintf("/v1/sys/plugins/catalog/%s/%s", pluginType, name) // Backwards compat, if type is not provided then use old path - if pluginType == consts.PluginTypeUnknown { + if pluginType == PluginTypeUnknown { path = fmt.Sprintf("/v1/sys/plugins/catalog/%s", name) } diff --git a/vendor/github.com/hashicorp/vault/api/sys_policy.go b/vendor/github.com/hashicorp/vault/api/sys_policy.go index 4a4f91b0..9ddffe4e 100644 --- a/vendor/github.com/hashicorp/vault/api/sys_policy.go +++ b/vendor/github.com/hashicorp/vault/api/sys_policy.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package api import ( diff --git a/vendor/github.com/hashicorp/vault/api/sys_raft.go b/vendor/github.com/hashicorp/vault/api/sys_raft.go index 7806a141..29bfed0f 100644 --- a/vendor/github.com/hashicorp/vault/api/sys_raft.go +++ b/vendor/github.com/hashicorp/vault/api/sys_raft.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package api import ( diff --git a/vendor/github.com/hashicorp/vault/api/sys_rekey.go b/vendor/github.com/hashicorp/vault/api/sys_rekey.go index 2ac8a474..57320175 100644 --- a/vendor/github.com/hashicorp/vault/api/sys_rekey.go +++ b/vendor/github.com/hashicorp/vault/api/sys_rekey.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package api import ( diff --git a/vendor/github.com/hashicorp/vault/api/sys_rotate.go b/vendor/github.com/hashicorp/vault/api/sys_rotate.go index fa86886c..295d989f 100644 --- a/vendor/github.com/hashicorp/vault/api/sys_rotate.go +++ b/vendor/github.com/hashicorp/vault/api/sys_rotate.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package api import ( diff --git a/vendor/github.com/hashicorp/vault/api/sys_seal.go b/vendor/github.com/hashicorp/vault/api/sys_seal.go index c772ae0f..7a9c5621 100644 --- a/vendor/github.com/hashicorp/vault/api/sys_seal.go +++ b/vendor/github.com/hashicorp/vault/api/sys_seal.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package api import ( @@ -93,22 +96,23 @@ func sealStatusRequestWithContext(ctx context.Context, c *Sys, r *Request) (*Sea } type SealStatusResponse struct { - Type string `json:"type"` - Initialized bool `json:"initialized"` - Sealed bool `json:"sealed"` - T int `json:"t"` - N int `json:"n"` - Progress int `json:"progress"` - Nonce string `json:"nonce"` - Version string `json:"version"` - BuildDate string `json:"build_date"` - Migration bool `json:"migration"` - ClusterName string `json:"cluster_name,omitempty"` - ClusterID string `json:"cluster_id,omitempty"` - RecoverySeal bool `json:"recovery_seal"` - StorageType string `json:"storage_type,omitempty"` - HCPLinkStatus string `json:"hcp_link_status,omitempty"` - HCPLinkResourceID string `json:"hcp_link_resource_ID,omitempty"` + Type string `json:"type"` + Initialized bool `json:"initialized"` + Sealed bool `json:"sealed"` + T int `json:"t"` + N int `json:"n"` + Progress int `json:"progress"` + Nonce string `json:"nonce"` + Version string `json:"version"` + BuildDate string `json:"build_date"` + Migration bool `json:"migration"` + ClusterName string `json:"cluster_name,omitempty"` + ClusterID string `json:"cluster_id,omitempty"` + RecoverySeal bool `json:"recovery_seal"` + StorageType string `json:"storage_type,omitempty"` + HCPLinkStatus string `json:"hcp_link_status,omitempty"` + HCPLinkResourceID string `json:"hcp_link_resource_ID,omitempty"` + Warnings []string `json:"warnings,omitempty"` } type UnsealOpts struct { diff --git a/vendor/github.com/hashicorp/vault/api/sys_stepdown.go b/vendor/github.com/hashicorp/vault/api/sys_stepdown.go index 833f31a6..c55ed1e6 100644 --- a/vendor/github.com/hashicorp/vault/api/sys_stepdown.go +++ b/vendor/github.com/hashicorp/vault/api/sys_stepdown.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package api import ( diff --git a/vendor/github.com/hashicorp/vault/sdk/LICENSE b/vendor/github.com/hashicorp/vault/sdk/LICENSE deleted file mode 100644 index f4f97ee5..00000000 --- a/vendor/github.com/hashicorp/vault/sdk/LICENSE +++ /dev/null @@ -1,365 +0,0 @@ -Copyright (c) 2015 HashiCorp, Inc. - -Mozilla Public License, version 2.0 - -1. Definitions - -1.1. "Contributor" - - means each individual or legal entity that creates, contributes to the - creation of, or owns Covered Software. - -1.2. "Contributor Version" - - means the combination of the Contributions of others (if any) used by a - Contributor and that particular Contributor's Contribution. - -1.3. "Contribution" - - means Covered Software of a particular Contributor. - -1.4. "Covered Software" - - means Source Code Form to which the initial Contributor has attached the - notice in Exhibit A, the Executable Form of such Source Code Form, and - Modifications of such Source Code Form, in each case including portions - thereof. - -1.5. "Incompatible With Secondary Licenses" - means - - a. that the initial Contributor has attached the notice described in - Exhibit B to the Covered Software; or - - b. that the Covered Software was made available under the terms of - version 1.1 or earlier of the License, but not also under the terms of - a Secondary License. - -1.6. "Executable Form" - - means any form of the work other than Source Code Form. - -1.7. "Larger Work" - - means a work that combines Covered Software with other material, in a - separate file or files, that is not Covered Software. - -1.8. "License" - - means this document. - -1.9. "Licensable" - - means having the right to grant, to the maximum extent possible, whether - at the time of the initial grant or subsequently, any and all of the - rights conveyed by this License. - -1.10. "Modifications" - - means any of the following: - - a. any file in Source Code Form that results from an addition to, - deletion from, or modification of the contents of Covered Software; or - - b. any new file in Source Code Form that contains any Covered Software. - -1.11. "Patent Claims" of a Contributor - - means any patent claim(s), including without limitation, method, - process, and apparatus claims, in any patent Licensable by such - Contributor that would be infringed, but for the grant of the License, - by the making, using, selling, offering for sale, having made, import, - or transfer of either its Contributions or its Contributor Version. - -1.12. "Secondary License" - - means either the GNU General Public License, Version 2.0, the GNU Lesser - General Public License, Version 2.1, the GNU Affero General Public - License, Version 3.0, or any later versions of those licenses. - -1.13. "Source Code Form" - - means the form of the work preferred for making modifications. - -1.14. "You" (or "Your") - - means an individual or a legal entity exercising rights under this - License. For legal entities, "You" includes any entity that controls, is - controlled by, or is under common control with You. For purposes of this - definition, "control" means (a) the power, direct or indirect, to cause - the direction or management of such entity, whether by contract or - otherwise, or (b) ownership of more than fifty percent (50%) of the - outstanding shares or beneficial ownership of such entity. - - -2. License Grants and Conditions - -2.1. Grants - - Each Contributor hereby grants You a world-wide, royalty-free, - non-exclusive license: - - a. under intellectual property rights (other than patent or trademark) - Licensable by such Contributor to use, reproduce, make available, - modify, display, perform, distribute, and otherwise exploit its - Contributions, either on an unmodified basis, with Modifications, or - as part of a Larger Work; and - - b. under Patent Claims of such Contributor to make, use, sell, offer for - sale, have made, import, and otherwise transfer either its - Contributions or its Contributor Version. - -2.2. Effective Date - - The licenses granted in Section 2.1 with respect to any Contribution - become effective for each Contribution on the date the Contributor first - distributes such Contribution. - -2.3. Limitations on Grant Scope - - The licenses granted in this Section 2 are the only rights granted under - this License. No additional rights or licenses will be implied from the - distribution or licensing of Covered Software under this License. - Notwithstanding Section 2.1(b) above, no patent license is granted by a - Contributor: - - a. for any code that a Contributor has removed from Covered Software; or - - b. for infringements caused by: (i) Your and any other third party's - modifications of Covered Software, or (ii) the combination of its - Contributions with other software (except as part of its Contributor - Version); or - - c. under Patent Claims infringed by Covered Software in the absence of - its Contributions. - - This License does not grant any rights in the trademarks, service marks, - or logos of any Contributor (except as may be necessary to comply with - the notice requirements in Section 3.4). - -2.4. Subsequent Licenses - - No Contributor makes additional grants as a result of Your choice to - distribute the Covered Software under a subsequent version of this - License (see Section 10.2) or under the terms of a Secondary License (if - permitted under the terms of Section 3.3). - -2.5. Representation - - Each Contributor represents that the Contributor believes its - Contributions are its original creation(s) or it has sufficient rights to - grant the rights to its Contributions conveyed by this License. - -2.6. Fair Use - - This License is not intended to limit any rights You have under - applicable copyright doctrines of fair use, fair dealing, or other - equivalents. - -2.7. Conditions - - Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in - Section 2.1. - - -3. Responsibilities - -3.1. Distribution of Source Form - - All distribution of Covered Software in Source Code Form, including any - Modifications that You create or to which You contribute, must be under - the terms of this License. You must inform recipients that the Source - Code Form of the Covered Software is governed by the terms of this - License, and how they can obtain a copy of this License. You may not - attempt to alter or restrict the recipients' rights in the Source Code - Form. - -3.2. Distribution of Executable Form - - If You distribute Covered Software in Executable Form then: - - a. such Covered Software must also be made available in Source Code Form, - as described in Section 3.1, and You must inform recipients of the - Executable Form how they can obtain a copy of such Source Code Form by - reasonable means in a timely manner, at a charge no more than the cost - of distribution to the recipient; and - - b. You may distribute such Executable Form under the terms of this - License, or sublicense it under different terms, provided that the - license for the Executable Form does not attempt to limit or alter the - recipients' rights in the Source Code Form under this License. - -3.3. Distribution of a Larger Work - - You may create and distribute a Larger Work under terms of Your choice, - provided that You also comply with the requirements of this License for - the Covered Software. If the Larger Work is a combination of Covered - Software with a work governed by one or more Secondary Licenses, and the - Covered Software is not Incompatible With Secondary Licenses, this - License permits You to additionally distribute such Covered Software - under the terms of such Secondary License(s), so that the recipient of - the Larger Work may, at their option, further distribute the Covered - Software under the terms of either this License or such Secondary - License(s). - -3.4. Notices - - You may not remove or alter the substance of any license notices - (including copyright notices, patent notices, disclaimers of warranty, or - limitations of liability) contained within the Source Code Form of the - Covered Software, except that You may alter any license notices to the - extent required to remedy known factual inaccuracies. - -3.5. Application of Additional Terms - - You may choose to offer, and to charge a fee for, warranty, support, - indemnity or liability obligations to one or more recipients of Covered - Software. However, You may do so only on Your own behalf, and not on - behalf of any Contributor. You must make it absolutely clear that any - such warranty, support, indemnity, or liability obligation is offered by - You alone, and You hereby agree to indemnify every Contributor for any - liability incurred by such Contributor as a result of warranty, support, - indemnity or liability terms You offer. You may include additional - disclaimers of warranty and limitations of liability specific to any - jurisdiction. - -4. Inability to Comply Due to Statute or Regulation - - If it is impossible for You to comply with any of the terms of this License - with respect to some or all of the Covered Software due to statute, - judicial order, or regulation then You must: (a) comply with the terms of - this License to the maximum extent possible; and (b) describe the - limitations and the code they affect. Such description must be placed in a - text file included with all distributions of the Covered Software under - this License. Except to the extent prohibited by statute or regulation, - such description must be sufficiently detailed for a recipient of ordinary - skill to be able to understand it. - -5. Termination - -5.1. The rights granted under this License will terminate automatically if You - fail to comply with any of its terms. However, if You become compliant, - then the rights granted under this License from a particular Contributor - are reinstated (a) provisionally, unless and until such Contributor - explicitly and finally terminates Your grants, and (b) on an ongoing - basis, if such Contributor fails to notify You of the non-compliance by - some reasonable means prior to 60 days after You have come back into - compliance. Moreover, Your grants from a particular Contributor are - reinstated on an ongoing basis if such Contributor notifies You of the - non-compliance by some reasonable means, this is the first time You have - received notice of non-compliance with this License from such - Contributor, and You become compliant prior to 30 days after Your receipt - of the notice. - -5.2. If You initiate litigation against any entity by asserting a patent - infringement claim (excluding declaratory judgment actions, - counter-claims, and cross-claims) alleging that a Contributor Version - directly or indirectly infringes any patent, then the rights granted to - You by any and all Contributors for the Covered Software under Section - 2.1 of this License shall terminate. - -5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user - license agreements (excluding distributors and resellers) which have been - validly granted by You or Your distributors under this License prior to - termination shall survive termination. - -6. Disclaimer of Warranty - - Covered Software is provided under this License on an "as is" basis, - without warranty of any kind, either expressed, implied, or statutory, - including, without limitation, warranties that the Covered Software is free - of defects, merchantable, fit for a particular purpose or non-infringing. - The entire risk as to the quality and performance of the Covered Software - is with You. Should any Covered Software prove defective in any respect, - You (not any Contributor) assume the cost of any necessary servicing, - repair, or correction. This disclaimer of warranty constitutes an essential - part of this License. No use of any Covered Software is authorized under - this License except under this disclaimer. - -7. Limitation of Liability - - Under no circumstances and under no legal theory, whether tort (including - negligence), contract, or otherwise, shall any Contributor, or anyone who - distributes Covered Software as permitted above, be liable to You for any - direct, indirect, special, incidental, or consequential damages of any - character including, without limitation, damages for lost profits, loss of - goodwill, work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses, even if such party shall have been - informed of the possibility of such damages. This limitation of liability - shall not apply to liability for death or personal injury resulting from - such party's negligence to the extent applicable law prohibits such - limitation. Some jurisdictions do not allow the exclusion or limitation of - incidental or consequential damages, so this exclusion and limitation may - not apply to You. - -8. Litigation - - Any litigation relating to this License may be brought only in the courts - of a jurisdiction where the defendant maintains its principal place of - business and such litigation shall be governed by laws of that - jurisdiction, without reference to its conflict-of-law provisions. Nothing - in this Section shall prevent a party's ability to bring cross-claims or - counter-claims. - -9. Miscellaneous - - This License represents the complete agreement concerning the subject - matter hereof. If any provision of this License is held to be - unenforceable, such provision shall be reformed only to the extent - necessary to make it enforceable. Any law or regulation which provides that - the language of a contract shall be construed against the drafter shall not - be used to construe this License against a Contributor. - - -10. Versions of the License - -10.1. New Versions - - Mozilla Foundation is the license steward. Except as provided in Section - 10.3, no one other than the license steward has the right to modify or - publish new versions of this License. Each version will be given a - distinguishing version number. - -10.2. Effect of New Versions - - You may distribute the Covered Software under the terms of the version - of the License under which You originally received the Covered Software, - or under the terms of any subsequent version published by the license - steward. - -10.3. Modified Versions - - If you create software not governed by this License, and you want to - create a new license for such software, you may create and use a - modified version of this License if you rename the license and remove - any references to the name of the license steward (except to note that - such modified license differs from this License). - -10.4. Distributing Source Code Form that is Incompatible With Secondary - Licenses If You choose to distribute Source Code Form that is - Incompatible With Secondary Licenses under the terms of this version of - the License, the notice described in Exhibit B of this License must be - attached. - -Exhibit A - Source Code Form License Notice - - This Source Code Form is subject to the - terms of the Mozilla Public License, v. - 2.0. If a copy of the MPL was not - distributed with this file, You can - obtain one at - http://mozilla.org/MPL/2.0/. - -If it is not possible or desirable to put the notice in a particular file, -then You may include the notice in a location (such as a LICENSE file in a -relevant directory) where a recipient would be likely to look for such a -notice. - -You may add additional accurate notices of copyright ownership. - -Exhibit B - "Incompatible With Secondary Licenses" Notice - - This Source Code Form is "Incompatible - With Secondary Licenses", as defined by - the Mozilla Public License, v. 2.0. - diff --git a/vendor/github.com/hashicorp/vault/sdk/helper/certutil/helpers.go b/vendor/github.com/hashicorp/vault/sdk/helper/certutil/helpers.go deleted file mode 100644 index 58ebc06f..00000000 --- a/vendor/github.com/hashicorp/vault/sdk/helper/certutil/helpers.go +++ /dev/null @@ -1,1386 +0,0 @@ -package certutil - -import ( - "bytes" - "crypto" - "crypto/dsa" - "crypto/ecdsa" - "crypto/ed25519" - "crypto/elliptic" - "crypto/rand" - "crypto/rsa" - "crypto/sha1" - "crypto/x509" - "crypto/x509/pkix" - "encoding/asn1" - "encoding/pem" - "errors" - "fmt" - "io" - "io/ioutil" - "math/big" - "net" - "net/url" - "strconv" - "strings" - "time" - - "github.com/hashicorp/errwrap" - "github.com/hashicorp/vault/sdk/helper/errutil" - "github.com/hashicorp/vault/sdk/helper/jsonutil" - "github.com/mitchellh/mapstructure" - "golang.org/x/crypto/cryptobyte" - cbasn1 "golang.org/x/crypto/cryptobyte/asn1" -) - -const rsaMinimumSecureKeySize = 2048 - -// Mapping of key types to default key lengths -var defaultAlgorithmKeyBits = map[string]int{ - "rsa": 2048, - "ec": 256, -} - -// Mapping of NIST P-Curve's key length to expected signature bits. -var expectedNISTPCurveHashBits = map[int]int{ - 224: 256, - 256: 256, - 384: 384, - 521: 512, -} - -// Mapping of constant names<->constant values for SignatureAlgorithm -var SignatureAlgorithmNames = map[string]x509.SignatureAlgorithm{ - "sha256withrsa": x509.SHA256WithRSA, - "sha384withrsa": x509.SHA384WithRSA, - "sha512withrsa": x509.SHA512WithRSA, - "ecdsawithsha256": x509.ECDSAWithSHA256, - "ecdsawithsha384": x509.ECDSAWithSHA384, - "ecdsawithsha512": x509.ECDSAWithSHA512, - "sha256withrsapss": x509.SHA256WithRSAPSS, - "sha384withrsapss": x509.SHA384WithRSAPSS, - "sha512withrsapss": x509.SHA512WithRSAPSS, - "pureed25519": x509.PureEd25519, - "ed25519": x509.PureEd25519, // Duplicated for clarity; most won't expect the "Pure" prefix. -} - -// Mapping of constant values<->constant names for SignatureAlgorithm -var InvSignatureAlgorithmNames = map[x509.SignatureAlgorithm]string{ - x509.SHA256WithRSA: "SHA256WithRSA", - x509.SHA384WithRSA: "SHA384WithRSA", - x509.SHA512WithRSA: "SHA512WithRSA", - x509.ECDSAWithSHA256: "ECDSAWithSHA256", - x509.ECDSAWithSHA384: "ECDSAWithSHA384", - x509.ECDSAWithSHA512: "ECDSAWithSHA512", - x509.SHA256WithRSAPSS: "SHA256WithRSAPSS", - x509.SHA384WithRSAPSS: "SHA384WithRSAPSS", - x509.SHA512WithRSAPSS: "SHA512WithRSAPSS", - x509.PureEd25519: "Ed25519", -} - -// OID for RFC 5280 Delta CRL Indicator CRL extension. -// -// > id-ce-deltaCRLIndicator OBJECT IDENTIFIER ::= { id-ce 27 } -var DeltaCRLIndicatorOID = asn1.ObjectIdentifier([]int{2, 5, 29, 27}) - -// GetHexFormatted returns the byte buffer formatted in hex with -// the specified separator between bytes. -func GetHexFormatted(buf []byte, sep string) string { - var ret bytes.Buffer - for _, cur := range buf { - if ret.Len() > 0 { - fmt.Fprintf(&ret, sep) - } - fmt.Fprintf(&ret, "%02x", cur) - } - return ret.String() -} - -// ParseHexFormatted returns the raw bytes from a formatted hex string -func ParseHexFormatted(in, sep string) []byte { - var ret bytes.Buffer - var err error - var inBits int64 - inBytes := strings.Split(in, sep) - for _, inByte := range inBytes { - if inBits, err = strconv.ParseInt(inByte, 16, 8); err != nil { - return nil - } - ret.WriteByte(byte(inBits)) - } - return ret.Bytes() -} - -// GetSubjKeyID returns the subject key ID. The computed ID is the SHA-1 hash of -// the marshaled public key according to -// https://tools.ietf.org/html/rfc5280#section-4.2.1.2 (1) -func GetSubjKeyID(privateKey crypto.Signer) ([]byte, error) { - if privateKey == nil { - return nil, errutil.InternalError{Err: "passed-in private key is nil"} - } - return getSubjectKeyID(privateKey.Public()) -} - -// Returns the explicit SKID when used for cross-signing, else computes a new -// SKID from the key itself. -func getSubjectKeyIDFromBundle(data *CreationBundle) ([]byte, error) { - if len(data.Params.SKID) > 0 { - return data.Params.SKID, nil - } - - return getSubjectKeyID(data.CSR.PublicKey) -} - -func getSubjectKeyID(pub interface{}) ([]byte, error) { - var publicKeyBytes []byte - switch pub := pub.(type) { - case *rsa.PublicKey: - type pkcs1PublicKey struct { - N *big.Int - E int - } - - var err error - publicKeyBytes, err = asn1.Marshal(pkcs1PublicKey{ - N: pub.N, - E: pub.E, - }) - if err != nil { - return nil, errutil.InternalError{Err: fmt.Sprintf("error marshalling public key: %s", err)} - } - case *ecdsa.PublicKey: - publicKeyBytes = elliptic.Marshal(pub.Curve, pub.X, pub.Y) - case ed25519.PublicKey: - publicKeyBytes = pub - default: - return nil, errutil.InternalError{Err: fmt.Sprintf("unsupported public key type: %T", pub)} - } - skid := sha1.Sum(publicKeyBytes) - return skid[:], nil -} - -// ParsePKIMap takes a map (for instance, the Secret.Data -// returned from the PKI backend) and returns a ParsedCertBundle. -func ParsePKIMap(data map[string]interface{}) (*ParsedCertBundle, error) { - result := &CertBundle{} - err := mapstructure.Decode(data, result) - if err != nil { - return nil, errutil.UserError{Err: err.Error()} - } - - return result.ToParsedCertBundle() -} - -// ParsePKIJSON takes a JSON-encoded string and returns a ParsedCertBundle. -// -// This can be either the output of an -// issue call from the PKI backend or just its data member; or, -// JSON not coming from the PKI backend. -func ParsePKIJSON(input []byte) (*ParsedCertBundle, error) { - result := &CertBundle{} - err := jsonutil.DecodeJSON(input, &result) - - if err == nil { - return result.ToParsedCertBundle() - } - - var secret Secret - err = jsonutil.DecodeJSON(input, &secret) - - if err == nil { - return ParsePKIMap(secret.Data) - } - - return nil, errutil.UserError{Err: "unable to parse out of either secret data or a secret object"} -} - -func ParseDERKey(privateKeyBytes []byte) (signer crypto.Signer, format BlockType, err error) { - var firstError error - if signer, firstError = x509.ParseECPrivateKey(privateKeyBytes); firstError == nil { - format = ECBlock - return - } - - var secondError error - if signer, secondError = x509.ParsePKCS1PrivateKey(privateKeyBytes); secondError == nil { - format = PKCS1Block - return - } - - var thirdError error - var rawKey interface{} - if rawKey, thirdError = x509.ParsePKCS8PrivateKey(privateKeyBytes); thirdError == nil { - switch rawSigner := rawKey.(type) { - case *rsa.PrivateKey: - signer = rawSigner - case *ecdsa.PrivateKey: - signer = rawSigner - case ed25519.PrivateKey: - signer = rawSigner - default: - return nil, UnknownBlock, errutil.InternalError{Err: "unknown type for parsed PKCS8 Private Key"} - } - - format = PKCS8Block - return - } - - return nil, UnknownBlock, fmt.Errorf("got errors attempting to parse DER private key:\n1. %v\n2. %v\n3. %v", firstError, secondError, thirdError) -} - -func ParsePEMKey(keyPem string) (crypto.Signer, BlockType, error) { - pemBlock, _ := pem.Decode([]byte(keyPem)) - if pemBlock == nil { - return nil, UnknownBlock, errutil.UserError{Err: "no data found in PEM block"} - } - - return ParseDERKey(pemBlock.Bytes) -} - -// ParsePEMBundle takes a string of concatenated PEM-format certificate -// and private key values and decodes/parses them, checking validity along -// the way. The first certificate must be the subject certificate and issuing -// certificates may follow. There must be at most one private key. -func ParsePEMBundle(pemBundle string) (*ParsedCertBundle, error) { - if len(pemBundle) == 0 { - return nil, errutil.UserError{Err: "empty pem bundle"} - } - - pemBytes := []byte(pemBundle) - var pemBlock *pem.Block - parsedBundle := &ParsedCertBundle{} - var certPath []*CertBlock - - for len(pemBytes) > 0 { - pemBlock, pemBytes = pem.Decode(pemBytes) - if pemBlock == nil { - return nil, errutil.UserError{Err: "no data found in PEM block"} - } - - if signer, format, err := ParseDERKey(pemBlock.Bytes); err == nil { - if parsedBundle.PrivateKeyType != UnknownPrivateKey { - return nil, errutil.UserError{Err: "more than one private key given; provide only one private key in the bundle"} - } - - parsedBundle.PrivateKeyFormat = format - parsedBundle.PrivateKeyType = GetPrivateKeyTypeFromSigner(signer) - if parsedBundle.PrivateKeyType == UnknownPrivateKey { - return nil, errutil.UserError{Err: "Unknown type of private key included in the bundle: %v"} - } - - parsedBundle.PrivateKeyBytes = pemBlock.Bytes - parsedBundle.PrivateKey = signer - } else if certificates, err := x509.ParseCertificates(pemBlock.Bytes); err == nil { - certPath = append(certPath, &CertBlock{ - Certificate: certificates[0], - Bytes: pemBlock.Bytes, - }) - } else if x509.IsEncryptedPEMBlock(pemBlock) { - return nil, errutil.UserError{Err: "Encrypted private key given; provide only decrypted private key in the bundle"} - } - } - - for i, certBlock := range certPath { - if i == 0 { - parsedBundle.Certificate = certBlock.Certificate - parsedBundle.CertificateBytes = certBlock.Bytes - } else { - parsedBundle.CAChain = append(parsedBundle.CAChain, certBlock) - } - } - - if err := parsedBundle.Verify(); err != nil { - return nil, errutil.UserError{Err: fmt.Sprintf("verification of parsed bundle failed: %s", err)} - } - - return parsedBundle, nil -} - -// GeneratePrivateKey generates a private key with the specified type and key bits. -func GeneratePrivateKey(keyType string, keyBits int, container ParsedPrivateKeyContainer) error { - return generatePrivateKey(keyType, keyBits, container, nil) -} - -// GeneratePrivateKeyWithRandomSource generates a private key with the specified type and key bits. -// GeneratePrivateKeyWithRandomSource uses randomness from the entropyReader to generate the private key. -func GeneratePrivateKeyWithRandomSource(keyType string, keyBits int, container ParsedPrivateKeyContainer, entropyReader io.Reader) error { - return generatePrivateKey(keyType, keyBits, container, entropyReader) -} - -// generatePrivateKey generates a private key with the specified type and key bits. -// generatePrivateKey uses randomness from the entropyReader to generate the private key. -func generatePrivateKey(keyType string, keyBits int, container ParsedPrivateKeyContainer, entropyReader io.Reader) error { - var err error - var privateKeyType PrivateKeyType - var privateKeyBytes []byte - var privateKey crypto.Signer - - var randReader io.Reader = rand.Reader - if entropyReader != nil { - randReader = entropyReader - } - - switch keyType { - case "rsa": - // XXX: there is a false-positive CodeQL path here around keyBits; - // because of a default zero value in the TypeDurationSecond and - // TypeSignedDurationSecond cases of schema.DefaultOrZero(), it - // thinks it is possible to end up with < 2048 bit RSA Key here. - // While this is true for SSH keys, it isn't true for PKI keys - // due to ValidateKeyTypeLength(...) below. While we could close - // the report as a false-positive, enforcing a minimum keyBits size - // here of 2048 would ensure no other paths exist. - if keyBits < 2048 { - return errutil.InternalError{Err: fmt.Sprintf("insecure bit length for RSA private key: %d", keyBits)} - } - privateKeyType = RSAPrivateKey - privateKey, err = rsa.GenerateKey(randReader, keyBits) - if err != nil { - return errutil.InternalError{Err: fmt.Sprintf("error generating RSA private key: %v", err)} - } - privateKeyBytes = x509.MarshalPKCS1PrivateKey(privateKey.(*rsa.PrivateKey)) - case "ec": - privateKeyType = ECPrivateKey - var curve elliptic.Curve - switch keyBits { - case 224: - curve = elliptic.P224() - case 256: - curve = elliptic.P256() - case 384: - curve = elliptic.P384() - case 521: - curve = elliptic.P521() - default: - return errutil.UserError{Err: fmt.Sprintf("unsupported bit length for EC key: %d", keyBits)} - } - privateKey, err = ecdsa.GenerateKey(curve, randReader) - if err != nil { - return errutil.InternalError{Err: fmt.Sprintf("error generating EC private key: %v", err)} - } - privateKeyBytes, err = x509.MarshalECPrivateKey(privateKey.(*ecdsa.PrivateKey)) - if err != nil { - return errutil.InternalError{Err: fmt.Sprintf("error marshalling EC private key: %v", err)} - } - case "ed25519": - privateKeyType = Ed25519PrivateKey - _, privateKey, err = ed25519.GenerateKey(randReader) - if err != nil { - return errutil.InternalError{Err: fmt.Sprintf("error generating ed25519 private key: %v", err)} - } - privateKeyBytes, err = x509.MarshalPKCS8PrivateKey(privateKey.(ed25519.PrivateKey)) - if err != nil { - return errutil.InternalError{Err: fmt.Sprintf("error marshalling Ed25519 private key: %v", err)} - } - default: - return errutil.UserError{Err: fmt.Sprintf("unknown key type: %s", keyType)} - } - - container.SetParsedPrivateKey(privateKey, privateKeyType, privateKeyBytes) - return nil -} - -// GenerateSerialNumber generates a serial number suitable for a certificate -func GenerateSerialNumber() (*big.Int, error) { - return generateSerialNumber(rand.Reader) -} - -// GenerateSerialNumberWithRandomSource generates a serial number suitable -// for a certificate with custom entropy. -func GenerateSerialNumberWithRandomSource(randReader io.Reader) (*big.Int, error) { - return generateSerialNumber(randReader) -} - -func generateSerialNumber(randReader io.Reader) (*big.Int, error) { - serial, err := rand.Int(randReader, (&big.Int{}).Exp(big.NewInt(2), big.NewInt(159), nil)) - if err != nil { - return nil, errutil.InternalError{Err: fmt.Sprintf("error generating serial number: %v", err)} - } - return serial, nil -} - -// ComparePublicKeysAndType compares two public keys and returns true if they match, -// false if their types or contents differ, and an error on unsupported key types. -func ComparePublicKeysAndType(key1Iface, key2Iface crypto.PublicKey) (bool, error) { - equal, err := ComparePublicKeys(key1Iface, key2Iface) - if err != nil { - if strings.Contains(err.Error(), "key types do not match:") { - return false, nil - } - } - - return equal, err -} - -// ComparePublicKeys compares two public keys and returns true if they match, -// returns an error if public key types are mismatched, or they are an unsupported key type. -func ComparePublicKeys(key1Iface, key2Iface crypto.PublicKey) (bool, error) { - switch key1Iface.(type) { - case *rsa.PublicKey: - key1 := key1Iface.(*rsa.PublicKey) - key2, ok := key2Iface.(*rsa.PublicKey) - if !ok { - return false, fmt.Errorf("key types do not match: %T and %T", key1Iface, key2Iface) - } - if key1.N.Cmp(key2.N) != 0 || - key1.E != key2.E { - return false, nil - } - return true, nil - - case *ecdsa.PublicKey: - key1 := key1Iface.(*ecdsa.PublicKey) - key2, ok := key2Iface.(*ecdsa.PublicKey) - if !ok { - return false, fmt.Errorf("key types do not match: %T and %T", key1Iface, key2Iface) - } - if key1.X.Cmp(key2.X) != 0 || - key1.Y.Cmp(key2.Y) != 0 { - return false, nil - } - key1Params := key1.Params() - key2Params := key2.Params() - if key1Params.P.Cmp(key2Params.P) != 0 || - key1Params.N.Cmp(key2Params.N) != 0 || - key1Params.B.Cmp(key2Params.B) != 0 || - key1Params.Gx.Cmp(key2Params.Gx) != 0 || - key1Params.Gy.Cmp(key2Params.Gy) != 0 || - key1Params.BitSize != key2Params.BitSize { - return false, nil - } - return true, nil - case ed25519.PublicKey: - key1 := key1Iface.(ed25519.PublicKey) - key2, ok := key2Iface.(ed25519.PublicKey) - if !ok { - return false, fmt.Errorf("key types do not match: %T and %T", key1Iface, key2Iface) - } - if !key1.Equal(key2) { - return false, nil - } - return true, nil - default: - return false, fmt.Errorf("cannot compare key with type %T", key1Iface) - } -} - -// ParsePublicKeyPEM is used to parse RSA and ECDSA public keys from PEMs -func ParsePublicKeyPEM(data []byte) (interface{}, error) { - block, data := pem.Decode(data) - if block != nil { - if len(bytes.TrimSpace(data)) > 0 { - return nil, errutil.UserError{Err: "unexpected trailing data after parsed PEM block"} - } - var rawKey interface{} - var err error - if rawKey, err = x509.ParsePKIXPublicKey(block.Bytes); err != nil { - if cert, err := x509.ParseCertificate(block.Bytes); err == nil { - rawKey = cert.PublicKey - } else { - return nil, err - } - } - - switch key := rawKey.(type) { - case *rsa.PublicKey: - return key, nil - case *ecdsa.PublicKey: - return key, nil - case ed25519.PublicKey: - return key, nil - } - } - return nil, errors.New("data does not contain any valid public keys") -} - -// AddPolicyIdentifiers adds certificate policies extension, based on CreationBundle -func AddPolicyIdentifiers(data *CreationBundle, certTemplate *x509.Certificate) { - oidOnly := true - for _, oidStr := range data.Params.PolicyIdentifiers { - oid, err := StringToOid(oidStr) - if err == nil { - certTemplate.PolicyIdentifiers = append(certTemplate.PolicyIdentifiers, oid) - } - if err != nil { - oidOnly = false - } - } - if !oidOnly { // Because all policy information is held in the same extension, when we use an extra extension to - // add policy qualifier information, that overwrites any information in the PolicyIdentifiers field on the Cert - // Template, so we need to reparse all the policy identifiers here - extension, err := CreatePolicyInformationExtensionFromStorageStrings(data.Params.PolicyIdentifiers) - if err == nil { - // If this errors out, don't add it, rely on the OIDs parsed into PolicyIdentifiers above - certTemplate.ExtraExtensions = append(certTemplate.ExtraExtensions, *extension) - } - } -} - -// AddExtKeyUsageOids adds custom extended key usage OIDs to certificate -func AddExtKeyUsageOids(data *CreationBundle, certTemplate *x509.Certificate) { - for _, oidstr := range data.Params.ExtKeyUsageOIDs { - oid, err := StringToOid(oidstr) - if err == nil { - certTemplate.UnknownExtKeyUsage = append(certTemplate.UnknownExtKeyUsage, oid) - } - } -} - -func HandleOtherCSRSANs(in *x509.CertificateRequest, sans map[string][]string) error { - certTemplate := &x509.Certificate{ - DNSNames: in.DNSNames, - IPAddresses: in.IPAddresses, - EmailAddresses: in.EmailAddresses, - URIs: in.URIs, - } - if err := HandleOtherSANs(certTemplate, sans); err != nil { - return err - } - if len(certTemplate.ExtraExtensions) > 0 { - for _, v := range certTemplate.ExtraExtensions { - in.ExtraExtensions = append(in.ExtraExtensions, v) - } - } - return nil -} - -func HandleOtherSANs(in *x509.Certificate, sans map[string][]string) error { - // If other SANs is empty we return which causes normal Go stdlib parsing - // of the other SAN types - if len(sans) == 0 { - return nil - } - - var rawValues []asn1.RawValue - - // We need to generate an IMPLICIT sequence for compatibility with OpenSSL - // -- it's an open question what the default for RFC 5280 actually is, see - // https://github.com/openssl/openssl/issues/5091 -- so we have to use - // cryptobyte because using the asn1 package's marshaling always produces - // an EXPLICIT sequence. Note that asn1 is way too magical according to - // agl, and cryptobyte is modeled after the CBB/CBS bits that agl put into - // boringssl. - for oid, vals := range sans { - for _, val := range vals { - var b cryptobyte.Builder - oidStr, err := StringToOid(oid) - if err != nil { - return err - } - b.AddASN1ObjectIdentifier(oidStr) - b.AddASN1(cbasn1.Tag(0).ContextSpecific().Constructed(), func(b *cryptobyte.Builder) { - b.AddASN1(cbasn1.UTF8String, func(b *cryptobyte.Builder) { - b.AddBytes([]byte(val)) - }) - }) - m, err := b.Bytes() - if err != nil { - return err - } - rawValues = append(rawValues, asn1.RawValue{Tag: 0, Class: 2, IsCompound: true, Bytes: m}) - } - } - - // If other SANs is empty we return which causes normal Go stdlib parsing - // of the other SAN types - if len(rawValues) == 0 { - return nil - } - - // Append any existing SANs, sans marshalling - rawValues = append(rawValues, marshalSANs(in.DNSNames, in.EmailAddresses, in.IPAddresses, in.URIs)...) - - // Marshal and add to ExtraExtensions - ext := pkix.Extension{ - // This is the defined OID for subjectAltName - Id: asn1.ObjectIdentifier{2, 5, 29, 17}, - } - var err error - ext.Value, err = asn1.Marshal(rawValues) - if err != nil { - return err - } - in.ExtraExtensions = append(in.ExtraExtensions, ext) - - return nil -} - -// Note: Taken from the Go source code since it's not public, and used in the -// modified function below (which also uses these consts upstream) -const ( - nameTypeEmail = 1 - nameTypeDNS = 2 - nameTypeURI = 6 - nameTypeIP = 7 -) - -// Note: Taken from the Go source code since it's not public, plus changed to not marshal -// marshalSANs marshals a list of addresses into a the contents of an X.509 -// SubjectAlternativeName extension. -func marshalSANs(dnsNames, emailAddresses []string, ipAddresses []net.IP, uris []*url.URL) []asn1.RawValue { - var rawValues []asn1.RawValue - for _, name := range dnsNames { - rawValues = append(rawValues, asn1.RawValue{Tag: nameTypeDNS, Class: 2, Bytes: []byte(name)}) - } - for _, email := range emailAddresses { - rawValues = append(rawValues, asn1.RawValue{Tag: nameTypeEmail, Class: 2, Bytes: []byte(email)}) - } - for _, rawIP := range ipAddresses { - // If possible, we always want to encode IPv4 addresses in 4 bytes. - ip := rawIP.To4() - if ip == nil { - ip = rawIP - } - rawValues = append(rawValues, asn1.RawValue{Tag: nameTypeIP, Class: 2, Bytes: ip}) - } - for _, uri := range uris { - rawValues = append(rawValues, asn1.RawValue{Tag: nameTypeURI, Class: 2, Bytes: []byte(uri.String())}) - } - return rawValues -} - -func StringToOid(in string) (asn1.ObjectIdentifier, error) { - split := strings.Split(in, ".") - ret := make(asn1.ObjectIdentifier, 0, len(split)) - for _, v := range split { - i, err := strconv.Atoi(v) - if err != nil { - return nil, err - } - ret = append(ret, i) - } - return asn1.ObjectIdentifier(ret), nil -} - -// Returns default key bits for the specified key type, or the present value -// if keyBits is non-zero. -func DefaultOrValueKeyBits(keyType string, keyBits int) (int, error) { - if keyBits == 0 { - newValue, present := defaultAlgorithmKeyBits[keyType] - if present { - keyBits = newValue - } /* else { - // We cannot return an error here as ed25519 (and potentially ed448 - // in the future) aren't in defaultAlgorithmKeyBits -- the value of - // the keyBits parameter is ignored under that algorithm. - } */ - } - - return keyBits, nil -} - -// Returns default signature hash bit length for the specified key type and -// bits, or the present value if hashBits is non-zero. Returns an error under -// certain internal circumstances. -func DefaultOrValueHashBits(keyType string, keyBits int, hashBits int) (int, error) { - if keyType == "ec" { - // Enforcement of curve moved to selectSignatureAlgorithmForECDSA. See - // note there about why. - } else if keyType == "rsa" && hashBits == 0 { - // To match previous behavior (and ignoring NIST's recommendations for - // hash size to align with RSA key sizes), default to SHA-2-256. - hashBits = 256 - } else if keyType == "ed25519" || keyType == "ed448" || keyType == "any" { - // No-op; ed25519 and ed448 internally specify their own hash and - // we do not need to select one. Double hashing isn't supported in - // certificate signing. Additionally, the any key type can't know - // what hash algorithm to use yet, so default to zero. - return 0, nil - } - - return hashBits, nil -} - -// Validates that the combination of keyType, keyBits, and hashBits are -// valid together; replaces individual calls to ValidateSignatureLength and -// ValidateKeyTypeLength. Also updates the value of keyBits and hashBits on -// return. -func ValidateDefaultOrValueKeyTypeSignatureLength(keyType string, keyBits int, hashBits int) (int, int, error) { - var err error - - if keyBits, err = DefaultOrValueKeyBits(keyType, keyBits); err != nil { - return keyBits, hashBits, err - } - - if err = ValidateKeyTypeLength(keyType, keyBits); err != nil { - return keyBits, hashBits, err - } - - if hashBits, err = DefaultOrValueHashBits(keyType, keyBits, hashBits); err != nil { - return keyBits, hashBits, err - } - - // Note that this check must come after we've selected a value for - // hashBits above, in the event it was left as the default, but we - // were allowed to update it. - if err = ValidateSignatureLength(keyType, hashBits); err != nil { - return keyBits, hashBits, err - } - - return keyBits, hashBits, nil -} - -// Validates that the length of the hash (in bits) used in the signature -// calculation is a known, approved value. -func ValidateSignatureLength(keyType string, hashBits int) error { - if keyType == "any" || keyType == "ec" || keyType == "ed25519" || keyType == "ed448" { - // ed25519 and ed448 include built-in hashing and is not externally - // configurable. There are three modes for each of these schemes: - // - // 1. Built-in hash (default, used in TLS, x509). - // 2. Double hash (notably used in some block-chain implementations, - // but largely regarded as a specialized use case with security - // concerns). - // 3. No hash (bring your own hash function, less commonly used). - // - // In all cases, we won't have a hash algorithm to validate here, so - // return nil. - // - // Additionally, when KeyType is any, we can't yet validate the - // signature algorithm size, so it takes the default zero value. - // - // When KeyType is ec, we also can't validate this value as we're - // forcefully ignoring the users' choice and specifying a value based - // on issuer type. - return nil - } - - switch hashBits { - case 256: - case 384: - case 512: - default: - return fmt.Errorf("unsupported hash signature algorithm: %d", hashBits) - } - - return nil -} - -func ValidateKeyTypeLength(keyType string, keyBits int) error { - switch keyType { - case "rsa": - if keyBits < rsaMinimumSecureKeySize { - return fmt.Errorf("RSA keys < %d bits are unsafe and not supported: got %d", rsaMinimumSecureKeySize, keyBits) - } - - switch keyBits { - case 2048: - case 3072: - case 4096: - case 8192: - default: - return fmt.Errorf("unsupported bit length for RSA key: %d", keyBits) - } - case "ec": - _, present := expectedNISTPCurveHashBits[keyBits] - if !present { - return fmt.Errorf("unsupported bit length for EC key: %d", keyBits) - } - case "any", "ed25519": - default: - return fmt.Errorf("unknown key type %s", keyType) - } - - return nil -} - -// CreateCertificate uses CreationBundle and the default rand.Reader to -// generate a cert/keypair. -func CreateCertificate(data *CreationBundle) (*ParsedCertBundle, error) { - return createCertificate(data, rand.Reader, generatePrivateKey) -} - -// CreateCertificateWithRandomSource uses CreationBundle and a custom -// io.Reader for randomness to generate a cert/keypair. -func CreateCertificateWithRandomSource(data *CreationBundle, randReader io.Reader) (*ParsedCertBundle, error) { - return createCertificate(data, randReader, generatePrivateKey) -} - -// KeyGenerator Allow us to override how/what generates the private key -type KeyGenerator func(keyType string, keyBits int, container ParsedPrivateKeyContainer, entropyReader io.Reader) error - -func CreateCertificateWithKeyGenerator(data *CreationBundle, randReader io.Reader, keyGenerator KeyGenerator) (*ParsedCertBundle, error) { - return createCertificate(data, randReader, keyGenerator) -} - -// Set correct RSA sig algo -func certTemplateSetSigAlgo(certTemplate *x509.Certificate, data *CreationBundle) { - if data.Params.UsePSS { - switch data.Params.SignatureBits { - case 256: - certTemplate.SignatureAlgorithm = x509.SHA256WithRSAPSS - case 384: - certTemplate.SignatureAlgorithm = x509.SHA384WithRSAPSS - case 512: - certTemplate.SignatureAlgorithm = x509.SHA512WithRSAPSS - } - } else { - switch data.Params.SignatureBits { - case 256: - certTemplate.SignatureAlgorithm = x509.SHA256WithRSA - case 384: - certTemplate.SignatureAlgorithm = x509.SHA384WithRSA - case 512: - certTemplate.SignatureAlgorithm = x509.SHA512WithRSA - } - } -} - -// selectSignatureAlgorithmForRSA returns the proper x509.SignatureAlgorithm based on various properties set in the -// Creation Bundle parameter. This method will default to a SHA256 signature algorithm if the requested signature -// bits is not set/unknown. -func selectSignatureAlgorithmForRSA(data *CreationBundle) x509.SignatureAlgorithm { - if data.Params.UsePSS { - switch data.Params.SignatureBits { - case 256: - return x509.SHA256WithRSAPSS - case 384: - return x509.SHA384WithRSAPSS - case 512: - return x509.SHA512WithRSAPSS - default: - return x509.SHA256WithRSAPSS - } - } - - switch data.Params.SignatureBits { - case 256: - return x509.SHA256WithRSA - case 384: - return x509.SHA384WithRSA - case 512: - return x509.SHA512WithRSA - default: - return x509.SHA256WithRSA - } -} - -func createCertificate(data *CreationBundle, randReader io.Reader, privateKeyGenerator KeyGenerator) (*ParsedCertBundle, error) { - var err error - result := &ParsedCertBundle{} - - serialNumber, err := GenerateSerialNumber() - if err != nil { - return nil, err - } - - if err := privateKeyGenerator(data.Params.KeyType, - data.Params.KeyBits, - result, randReader); err != nil { - return nil, err - } - - subjKeyID, err := GetSubjKeyID(result.PrivateKey) - if err != nil { - return nil, errutil.InternalError{Err: fmt.Sprintf("error getting subject key ID: %s", err)} - } - - certTemplate := &x509.Certificate{ - SerialNumber: serialNumber, - NotBefore: time.Now().Add(-30 * time.Second), - NotAfter: data.Params.NotAfter, - IsCA: false, - SubjectKeyId: subjKeyID, - Subject: data.Params.Subject, - DNSNames: data.Params.DNSNames, - EmailAddresses: data.Params.EmailAddresses, - IPAddresses: data.Params.IPAddresses, - URIs: data.Params.URIs, - } - if data.Params.NotBeforeDuration > 0 { - certTemplate.NotBefore = time.Now().Add(-1 * data.Params.NotBeforeDuration) - } - - if err := HandleOtherSANs(certTemplate, data.Params.OtherSANs); err != nil { - return nil, errutil.InternalError{Err: errwrap.Wrapf("error marshaling other SANs: {{err}}", err).Error()} - } - - // Add this before calling addKeyUsages - if data.SigningBundle == nil { - certTemplate.IsCA = true - } else if data.Params.BasicConstraintsValidForNonCA { - certTemplate.BasicConstraintsValid = true - certTemplate.IsCA = false - } - - // This will only be filled in from the generation paths - if len(data.Params.PermittedDNSDomains) > 0 { - certTemplate.PermittedDNSDomains = data.Params.PermittedDNSDomains - certTemplate.PermittedDNSDomainsCritical = true - } - - AddPolicyIdentifiers(data, certTemplate) - - AddKeyUsages(data, certTemplate) - - AddExtKeyUsageOids(data, certTemplate) - - certTemplate.IssuingCertificateURL = data.Params.URLs.IssuingCertificates - certTemplate.CRLDistributionPoints = data.Params.URLs.CRLDistributionPoints - certTemplate.OCSPServer = data.Params.URLs.OCSPServers - - var certBytes []byte - if data.SigningBundle != nil { - privateKeyType := data.SigningBundle.PrivateKeyType - if privateKeyType == ManagedPrivateKey { - privateKeyType = GetPrivateKeyTypeFromSigner(data.SigningBundle.PrivateKey) - } - switch privateKeyType { - case RSAPrivateKey: - certTemplateSetSigAlgo(certTemplate, data) - case Ed25519PrivateKey: - certTemplate.SignatureAlgorithm = x509.PureEd25519 - case ECPrivateKey: - certTemplate.SignatureAlgorithm = selectSignatureAlgorithmForECDSA(data.SigningBundle.PrivateKey.Public(), data.Params.SignatureBits) - } - - caCert := data.SigningBundle.Certificate - certTemplate.AuthorityKeyId = caCert.SubjectKeyId - - certBytes, err = x509.CreateCertificate(randReader, certTemplate, caCert, result.PrivateKey.Public(), data.SigningBundle.PrivateKey) - } else { - // Creating a self-signed root - if data.Params.MaxPathLength == 0 { - certTemplate.MaxPathLen = 0 - certTemplate.MaxPathLenZero = true - } else { - certTemplate.MaxPathLen = data.Params.MaxPathLength - } - - switch data.Params.KeyType { - case "rsa": - certTemplateSetSigAlgo(certTemplate, data) - case "ed25519": - certTemplate.SignatureAlgorithm = x509.PureEd25519 - case "ec": - certTemplate.SignatureAlgorithm = selectSignatureAlgorithmForECDSA(result.PrivateKey.Public(), data.Params.SignatureBits) - } - - certTemplate.AuthorityKeyId = subjKeyID - certTemplate.BasicConstraintsValid = true - certBytes, err = x509.CreateCertificate(randReader, certTemplate, certTemplate, result.PrivateKey.Public(), result.PrivateKey) - } - - if err != nil { - return nil, errutil.InternalError{Err: fmt.Sprintf("unable to create certificate: %s", err)} - } - - result.CertificateBytes = certBytes - result.Certificate, err = x509.ParseCertificate(certBytes) - if err != nil { - return nil, errutil.InternalError{Err: fmt.Sprintf("unable to parse created certificate: %s", err)} - } - - if data.SigningBundle != nil { - if (len(data.SigningBundle.Certificate.AuthorityKeyId) > 0 && - !bytes.Equal(data.SigningBundle.Certificate.AuthorityKeyId, data.SigningBundle.Certificate.SubjectKeyId)) || - data.Params.ForceAppendCaChain { - var chain []*CertBlock - - signingChain := data.SigningBundle.CAChain - // Some bundles already include the root included in the chain, so don't include it twice. - if len(signingChain) == 0 || !bytes.Equal(signingChain[0].Bytes, data.SigningBundle.CertificateBytes) { - chain = append(chain, &CertBlock{ - Certificate: data.SigningBundle.Certificate, - Bytes: data.SigningBundle.CertificateBytes, - }) - } - - if len(signingChain) > 0 { - chain = append(chain, signingChain...) - } - - result.CAChain = chain - } - } - - return result, nil -} - -func selectSignatureAlgorithmForECDSA(pub crypto.PublicKey, signatureBits int) x509.SignatureAlgorithm { - // Previously we preferred the user-specified signature bits for ECDSA - // keys. However, this could result in using a longer hash function than - // the underlying NIST P-curve will encode (e.g., a SHA-512 hash with a - // P-256 key). This isn't ideal: the hash is implicitly truncated - // (effectively turning it into SHA-512/256) and we then need to rely - // on the prefix security of the hash. Since both NIST and Mozilla guidance - // suggest instead using the correct hash function, we should prefer that - // over the operator-specified signatureBits. - // - // Lastly, note that pub above needs to be the _signer's_ public key; - // the issue with DefaultOrValueHashBits is that it is called at role - // configuration time, which might _precede_ issuer generation. Thus - // it only has access to the desired key type and not the actual issuer. - // The reference from that function is reproduced below: - // - // > To comply with BSI recommendations Section 4.2 and Mozilla root - // > store policy section 5.1.2, enforce that NIST P-curves use a hash - // > length corresponding to curve length. Note that ed25519 does not - // > implement the "ec" key type. - key, ok := pub.(*ecdsa.PublicKey) - if !ok { - return x509.ECDSAWithSHA256 - } - switch key.Curve { - case elliptic.P224(), elliptic.P256(): - return x509.ECDSAWithSHA256 - case elliptic.P384(): - return x509.ECDSAWithSHA384 - case elliptic.P521(): - return x509.ECDSAWithSHA512 - default: - return x509.ECDSAWithSHA256 - } -} - -var ( - oidExtensionBasicConstraints = []int{2, 5, 29, 19} - oidExtensionSubjectAltName = []int{2, 5, 29, 17} -) - -// CreateCSR creates a CSR with the default rand.Reader to -// generate a cert/keypair. This is currently only meant -// for use when generating an intermediate certificate. -func CreateCSR(data *CreationBundle, addBasicConstraints bool) (*ParsedCSRBundle, error) { - return createCSR(data, addBasicConstraints, rand.Reader, generatePrivateKey) -} - -// CreateCSRWithRandomSource creates a CSR with a custom io.Reader -// for randomness to generate a cert/keypair. -func CreateCSRWithRandomSource(data *CreationBundle, addBasicConstraints bool, randReader io.Reader) (*ParsedCSRBundle, error) { - return createCSR(data, addBasicConstraints, randReader, generatePrivateKey) -} - -// CreateCSRWithKeyGenerator creates a CSR with a custom io.Reader -// for randomness to generate a cert/keypair with the provided private key generator. -func CreateCSRWithKeyGenerator(data *CreationBundle, addBasicConstraints bool, randReader io.Reader, keyGenerator KeyGenerator) (*ParsedCSRBundle, error) { - return createCSR(data, addBasicConstraints, randReader, keyGenerator) -} - -func createCSR(data *CreationBundle, addBasicConstraints bool, randReader io.Reader, keyGenerator KeyGenerator) (*ParsedCSRBundle, error) { - var err error - result := &ParsedCSRBundle{} - - if err := keyGenerator(data.Params.KeyType, - data.Params.KeyBits, - result, randReader); err != nil { - return nil, err - } - - // Like many root CAs, other information is ignored - csrTemplate := &x509.CertificateRequest{ - Subject: data.Params.Subject, - DNSNames: data.Params.DNSNames, - EmailAddresses: data.Params.EmailAddresses, - IPAddresses: data.Params.IPAddresses, - URIs: data.Params.URIs, - } - - if err := HandleOtherCSRSANs(csrTemplate, data.Params.OtherSANs); err != nil { - return nil, errutil.InternalError{Err: errwrap.Wrapf("error marshaling other SANs: {{err}}", err).Error()} - } - - if addBasicConstraints { - type basicConstraints struct { - IsCA bool `asn1:"optional"` - MaxPathLen int `asn1:"optional,default:-1"` - } - val, err := asn1.Marshal(basicConstraints{IsCA: true, MaxPathLen: -1}) - if err != nil { - return nil, errutil.InternalError{Err: errwrap.Wrapf("error marshaling basic constraints: {{err}}", err).Error()} - } - ext := pkix.Extension{ - Id: oidExtensionBasicConstraints, - Value: val, - Critical: true, - } - csrTemplate.ExtraExtensions = append(csrTemplate.ExtraExtensions, ext) - } - - switch data.Params.KeyType { - case "rsa": - // use specified RSA algorithm defaulting to the appropriate SHA256 RSA signature type - csrTemplate.SignatureAlgorithm = selectSignatureAlgorithmForRSA(data) - case "ec": - csrTemplate.SignatureAlgorithm = selectSignatureAlgorithmForECDSA(result.PrivateKey.Public(), data.Params.SignatureBits) - case "ed25519": - csrTemplate.SignatureAlgorithm = x509.PureEd25519 - } - - csr, err := x509.CreateCertificateRequest(randReader, csrTemplate, result.PrivateKey) - if err != nil { - return nil, errutil.InternalError{Err: fmt.Sprintf("unable to create certificate: %s", err)} - } - - result.CSRBytes = csr - result.CSR, err = x509.ParseCertificateRequest(csr) - if err != nil { - return nil, errutil.InternalError{Err: fmt.Sprintf("unable to parse created certificate: %v", err)} - } - - if err = result.CSR.CheckSignature(); err != nil { - return nil, errors.New("failed signature validation for CSR") - } - - return result, nil -} - -// SignCertificate performs the heavy lifting -// of generating a certificate from a CSR. -// Returns a ParsedCertBundle sans private keys. -func SignCertificate(data *CreationBundle) (*ParsedCertBundle, error) { - return signCertificate(data, rand.Reader) -} - -// SignCertificateWithRandomSource generates a certificate -// from a CSR, using custom randomness from the randReader. -// Returns a ParsedCertBundle sans private keys. -func SignCertificateWithRandomSource(data *CreationBundle, randReader io.Reader) (*ParsedCertBundle, error) { - return signCertificate(data, randReader) -} - -func signCertificate(data *CreationBundle, randReader io.Reader) (*ParsedCertBundle, error) { - switch { - case data == nil: - return nil, errutil.UserError{Err: "nil data bundle given to signCertificate"} - case data.Params == nil: - return nil, errutil.UserError{Err: "nil parameters given to signCertificate"} - case data.SigningBundle == nil: - return nil, errutil.UserError{Err: "nil signing bundle given to signCertificate"} - case data.CSR == nil: - return nil, errutil.UserError{Err: "nil csr given to signCertificate"} - } - - err := data.CSR.CheckSignature() - if err != nil { - return nil, errutil.UserError{Err: "request signature invalid"} - } - - result := &ParsedCertBundle{} - - serialNumber, err := GenerateSerialNumber() - if err != nil { - return nil, err - } - - subjKeyID, err := getSubjectKeyIDFromBundle(data) - if err != nil { - return nil, err - } - - caCert := data.SigningBundle.Certificate - - certTemplate := &x509.Certificate{ - SerialNumber: serialNumber, - Subject: data.Params.Subject, - NotBefore: time.Now().Add(-30 * time.Second), - NotAfter: data.Params.NotAfter, - SubjectKeyId: subjKeyID[:], - AuthorityKeyId: caCert.SubjectKeyId, - } - if data.Params.NotBeforeDuration > 0 { - certTemplate.NotBefore = time.Now().Add(-1 * data.Params.NotBeforeDuration) - } - - privateKeyType := data.SigningBundle.PrivateKeyType - if privateKeyType == ManagedPrivateKey { - privateKeyType = GetPrivateKeyTypeFromSigner(data.SigningBundle.PrivateKey) - } - - switch privateKeyType { - case RSAPrivateKey: - certTemplateSetSigAlgo(certTemplate, data) - case ECPrivateKey: - switch data.Params.SignatureBits { - case 256: - certTemplate.SignatureAlgorithm = x509.ECDSAWithSHA256 - case 384: - certTemplate.SignatureAlgorithm = x509.ECDSAWithSHA384 - case 512: - certTemplate.SignatureAlgorithm = x509.ECDSAWithSHA512 - } - } - - if data.Params.UseCSRValues { - certTemplate.Subject = data.CSR.Subject - certTemplate.Subject.ExtraNames = certTemplate.Subject.Names - - certTemplate.DNSNames = data.CSR.DNSNames - certTemplate.EmailAddresses = data.CSR.EmailAddresses - certTemplate.IPAddresses = data.CSR.IPAddresses - certTemplate.URIs = data.CSR.URIs - - for _, name := range data.CSR.Extensions { - if !name.Id.Equal(oidExtensionBasicConstraints) && !(len(data.Params.OtherSANs) > 0 && name.Id.Equal(oidExtensionSubjectAltName)) { - certTemplate.ExtraExtensions = append(certTemplate.ExtraExtensions, name) - } - } - - } else { - certTemplate.DNSNames = data.Params.DNSNames - certTemplate.EmailAddresses = data.Params.EmailAddresses - certTemplate.IPAddresses = data.Params.IPAddresses - certTemplate.URIs = data.Params.URIs - } - - if err := HandleOtherSANs(certTemplate, data.Params.OtherSANs); err != nil { - return nil, errutil.InternalError{Err: errwrap.Wrapf("error marshaling other SANs: {{err}}", err).Error()} - } - - AddPolicyIdentifiers(data, certTemplate) - - AddKeyUsages(data, certTemplate) - - AddExtKeyUsageOids(data, certTemplate) - - var certBytes []byte - - certTemplate.IssuingCertificateURL = data.Params.URLs.IssuingCertificates - certTemplate.CRLDistributionPoints = data.Params.URLs.CRLDistributionPoints - certTemplate.OCSPServer = data.SigningBundle.URLs.OCSPServers - - if data.Params.IsCA { - certTemplate.BasicConstraintsValid = true - certTemplate.IsCA = true - - if data.SigningBundle.Certificate.MaxPathLen == 0 && - data.SigningBundle.Certificate.MaxPathLenZero { - return nil, errutil.UserError{Err: "signing certificate has a max path length of zero, and cannot issue further CA certificates"} - } - - certTemplate.MaxPathLen = data.Params.MaxPathLength - if certTemplate.MaxPathLen == 0 { - certTemplate.MaxPathLenZero = true - } - } else if data.Params.BasicConstraintsValidForNonCA { - certTemplate.BasicConstraintsValid = true - certTemplate.IsCA = false - } - - if len(data.Params.PermittedDNSDomains) > 0 { - certTemplate.PermittedDNSDomains = data.Params.PermittedDNSDomains - certTemplate.PermittedDNSDomainsCritical = true - } - - certBytes, err = x509.CreateCertificate(randReader, certTemplate, caCert, data.CSR.PublicKey, data.SigningBundle.PrivateKey) - - if err != nil { - return nil, errutil.InternalError{Err: fmt.Sprintf("unable to create certificate: %s", err)} - } - - result.CertificateBytes = certBytes - result.Certificate, err = x509.ParseCertificate(certBytes) - if err != nil { - return nil, errutil.InternalError{Err: fmt.Sprintf("unable to parse created certificate: %s", err)} - } - - result.CAChain = data.SigningBundle.GetFullChain() - - return result, nil -} - -func NewCertPool(reader io.Reader) (*x509.CertPool, error) { - pemBlock, err := ioutil.ReadAll(reader) - if err != nil { - return nil, err - } - certs, err := parseCertsPEM(pemBlock) - if err != nil { - return nil, fmt.Errorf("error reading certs: %s", err) - } - pool := x509.NewCertPool() - for _, cert := range certs { - pool.AddCert(cert) - } - return pool, nil -} - -// parseCertsPEM returns the x509.Certificates contained in the given PEM-encoded byte array -// Returns an error if a certificate could not be parsed, or if the data does not contain any certificates -func parseCertsPEM(pemCerts []byte) ([]*x509.Certificate, error) { - ok := false - certs := []*x509.Certificate{} - for len(pemCerts) > 0 { - var block *pem.Block - block, pemCerts = pem.Decode(pemCerts) - if block == nil { - break - } - // Only use PEM "CERTIFICATE" blocks without extra headers - if block.Type != "CERTIFICATE" || len(block.Headers) != 0 { - continue - } - - cert, err := x509.ParseCertificate(block.Bytes) - if err != nil { - return certs, err - } - - certs = append(certs, cert) - ok = true - } - - if !ok { - return certs, errors.New("data does not contain any valid RSA or ECDSA certificates") - } - return certs, nil -} - -// GetPublicKeySize returns the key size in bits for a given arbitrary crypto.PublicKey -// Returns -1 for an unsupported key type. -func GetPublicKeySize(key crypto.PublicKey) int { - if key, ok := key.(*rsa.PublicKey); ok { - return key.Size() * 8 - } - if key, ok := key.(*ecdsa.PublicKey); ok { - return key.Params().BitSize - } - if key, ok := key.(ed25519.PublicKey); ok { - return len(key) * 8 - } - if key, ok := key.(dsa.PublicKey); ok { - return key.Y.BitLen() - } - - return -1 -} - -// CreateKeyBundle create a KeyBundle struct object which includes a generated key -// of keyType with keyBits leveraging the randomness from randReader. -func CreateKeyBundle(keyType string, keyBits int, randReader io.Reader) (KeyBundle, error) { - return CreateKeyBundleWithKeyGenerator(keyType, keyBits, randReader, generatePrivateKey) -} - -// CreateKeyBundleWithKeyGenerator create a KeyBundle struct object which includes -// a generated key of keyType with keyBits leveraging the randomness from randReader and -// delegates the actual key generation to keyGenerator -func CreateKeyBundleWithKeyGenerator(keyType string, keyBits int, randReader io.Reader, keyGenerator KeyGenerator) (KeyBundle, error) { - result := KeyBundle{} - if err := keyGenerator(keyType, keyBits, &result, randReader); err != nil { - return result, err - } - return result, nil -} - -// CreateDeltaCRLIndicatorExt allows creating correctly formed delta CRLs -// that point back to the last complete CRL that they're based on. -func CreateDeltaCRLIndicatorExt(completeCRLNumber int64) (pkix.Extension, error) { - bigNum := big.NewInt(completeCRLNumber) - bigNumValue, err := asn1.Marshal(bigNum) - if err != nil { - return pkix.Extension{}, fmt.Errorf("unable to marshal complete CRL number (%v): %v", completeCRLNumber, err) - } - return pkix.Extension{ - Id: DeltaCRLIndicatorOID, - // > When a conforming CRL issuer generates a delta CRL, the delta - // > CRL MUST include a critical delta CRL indicator extension. - Critical: true, - // This extension only includes the complete CRL number: - // - // > BaseCRLNumber ::= CRLNumber - // - // But, this needs to be encoded as a big number for encoding/asn1 - // to work properly. - Value: bigNumValue, - }, nil -} diff --git a/vendor/github.com/hashicorp/vault/sdk/helper/certutil/types.go b/vendor/github.com/hashicorp/vault/sdk/helper/certutil/types.go deleted file mode 100644 index 15b816f0..00000000 --- a/vendor/github.com/hashicorp/vault/sdk/helper/certutil/types.go +++ /dev/null @@ -1,1015 +0,0 @@ -// Package certutil contains helper functions that are mostly used -// with the PKI backend but can be generally useful. Functionality -// includes helpers for converting a certificate/private key bundle -// between DER and PEM, printing certificate serial numbers, and more. -// -// Functionality specific to the PKI backend includes some types -// and helper methods to make requesting certificates from the -// backend easy. -package certutil - -import ( - "bytes" - "crypto" - "crypto/ecdsa" - "crypto/ed25519" - "crypto/rsa" - "crypto/tls" - "crypto/x509" - "crypto/x509/pkix" - "encoding/asn1" - "encoding/json" - "encoding/pem" - "errors" - "fmt" - "math/big" - "net" - "net/url" - "strings" - "time" - - "github.com/hashicorp/errwrap" - "github.com/hashicorp/vault/sdk/helper/errutil" -) - -const ( - PrivateKeyTypeP521 = "p521" -) - -// This can be one of a few key types so the different params may or may not be filled -type ClusterKeyParams struct { - Type string `json:"type" structs:"type" mapstructure:"type"` - X *big.Int `json:"x" structs:"x" mapstructure:"x"` - Y *big.Int `json:"y" structs:"y" mapstructure:"y"` - D *big.Int `json:"d" structs:"d" mapstructure:"d"` -} - -// Secret is used to attempt to unmarshal a Vault secret -// JSON response, as a convenience -type Secret struct { - Data map[string]interface{} `json:"data"` -} - -// PrivateKeyType holds a string representation of the type of private key (ec -// or rsa) referenced in CertBundle and ParsedCertBundle. This uses colloquial -// names rather than official names, to eliminate confusion -type PrivateKeyType string - -// Well-known PrivateKeyTypes -const ( - UnknownPrivateKey PrivateKeyType = "" - RSAPrivateKey PrivateKeyType = "rsa" - ECPrivateKey PrivateKeyType = "ec" - Ed25519PrivateKey PrivateKeyType = "ed25519" - ManagedPrivateKey PrivateKeyType = "ManagedPrivateKey" -) - -// TLSUsage controls whether the intended usage of a *tls.Config -// returned from ParsedCertBundle.getTLSConfig is for server use, -// client use, or both, which affects which values are set -type TLSUsage int - -// Well-known TLSUsage types -const ( - TLSUnknown TLSUsage = 0 - TLSServer TLSUsage = 1 << iota - TLSClient -) - -// BlockType indicates the serialization format of the key -type BlockType string - -// Well-known formats -const ( - UnknownBlock BlockType = "" - PKCS1Block BlockType = "RSA PRIVATE KEY" - PKCS8Block BlockType = "PRIVATE KEY" - ECBlock BlockType = "EC PRIVATE KEY" -) - -// ParsedPrivateKeyContainer allows common key setting for certs and CSRs -type ParsedPrivateKeyContainer interface { - SetParsedPrivateKey(crypto.Signer, PrivateKeyType, []byte) -} - -// CertBlock contains the DER-encoded certificate and the PEM -// block's byte array -type CertBlock struct { - Certificate *x509.Certificate - Bytes []byte -} - -// CertBundle contains a key type, a PEM-encoded private key, -// a PEM-encoded certificate, and a string-encoded serial number, -// returned from a successful Issue request -type CertBundle struct { - PrivateKeyType PrivateKeyType `json:"private_key_type" structs:"private_key_type" mapstructure:"private_key_type"` - Certificate string `json:"certificate" structs:"certificate" mapstructure:"certificate"` - IssuingCA string `json:"issuing_ca" structs:"issuing_ca" mapstructure:"issuing_ca"` - CAChain []string `json:"ca_chain" structs:"ca_chain" mapstructure:"ca_chain"` - PrivateKey string `json:"private_key" structs:"private_key" mapstructure:"private_key"` - SerialNumber string `json:"serial_number" structs:"serial_number" mapstructure:"serial_number"` -} - -// ParsedCertBundle contains a key type, a DER-encoded private key, -// and a DER-encoded certificate -type ParsedCertBundle struct { - PrivateKeyType PrivateKeyType - PrivateKeyFormat BlockType - PrivateKeyBytes []byte - PrivateKey crypto.Signer - CertificateBytes []byte - Certificate *x509.Certificate - CAChain []*CertBlock -} - -// CSRBundle contains a key type, a PEM-encoded private key, -// and a PEM-encoded CSR -type CSRBundle struct { - PrivateKeyType PrivateKeyType `json:"private_key_type" structs:"private_key_type" mapstructure:"private_key_type"` - CSR string `json:"csr" structs:"csr" mapstructure:"csr"` - PrivateKey string `json:"private_key" structs:"private_key" mapstructure:"private_key"` -} - -// ParsedCSRBundle contains a key type, a DER-encoded private key, -// and a DER-encoded certificate request -type ParsedCSRBundle struct { - PrivateKeyType PrivateKeyType - PrivateKeyBytes []byte - PrivateKey crypto.Signer - CSRBytes []byte - CSR *x509.CertificateRequest -} - -type KeyBundle struct { - PrivateKeyType PrivateKeyType - PrivateKeyBytes []byte - PrivateKey crypto.Signer -} - -func GetPrivateKeyTypeFromSigner(signer crypto.Signer) PrivateKeyType { - // We look at the public key types to work-around limitations/typing of managed keys. - switch signer.Public().(type) { - case *rsa.PublicKey: - return RSAPrivateKey - case *ecdsa.PublicKey: - return ECPrivateKey - case ed25519.PublicKey: - return Ed25519PrivateKey - } - return UnknownPrivateKey -} - -// ToPEMBundle converts a string-based certificate bundle -// to a PEM-based string certificate bundle in trust path -// order, leaf certificate first -func (c *CertBundle) ToPEMBundle() string { - var result []string - - if len(c.PrivateKey) > 0 { - result = append(result, c.PrivateKey) - } - if len(c.Certificate) > 0 { - result = append(result, c.Certificate) - } - if len(c.CAChain) > 0 { - result = append(result, c.CAChain...) - } - - return strings.Join(result, "\n") -} - -// ToParsedCertBundle converts a string-based certificate bundle -// to a byte-based raw certificate bundle -func (c *CertBundle) ToParsedCertBundle() (*ParsedCertBundle, error) { - return c.ToParsedCertBundleWithExtractor(extractAndSetPrivateKey) -} - -// PrivateKeyExtractor extract out a private key from the passed in -// CertBundle and set the appropriate bits within the ParsedCertBundle. -type PrivateKeyExtractor func(c *CertBundle, parsedBundle *ParsedCertBundle) error - -func (c *CertBundle) ToParsedCertBundleWithExtractor(privateKeyExtractor PrivateKeyExtractor) (*ParsedCertBundle, error) { - var err error - var pemBlock *pem.Block - result := &ParsedCertBundle{} - - err = privateKeyExtractor(c, result) - if err != nil { - return nil, err - } - - if len(c.Certificate) > 0 { - pemBlock, _ = pem.Decode([]byte(c.Certificate)) - if pemBlock == nil { - return nil, errutil.UserError{Err: "Error decoding certificate from cert bundle"} - } - result.CertificateBytes = pemBlock.Bytes - result.Certificate, err = x509.ParseCertificate(result.CertificateBytes) - if err != nil { - return nil, errutil.UserError{Err: fmt.Sprintf("Error encountered parsing certificate bytes from raw bundle: %v", err)} - } - } - switch { - case len(c.CAChain) > 0: - for _, cert := range c.CAChain { - pemBlock, _ := pem.Decode([]byte(cert)) - if pemBlock == nil { - return nil, errutil.UserError{Err: "Error decoding certificate from cert bundle"} - } - - parsedCert, err := x509.ParseCertificate(pemBlock.Bytes) - if err != nil { - return nil, errutil.UserError{Err: fmt.Sprintf("Error encountered parsing certificate bytes from raw bundle via CA chain: %v", err)} - } - - certBlock := &CertBlock{ - Bytes: pemBlock.Bytes, - Certificate: parsedCert, - } - result.CAChain = append(result.CAChain, certBlock) - } - - // For backwards compatibility - case len(c.IssuingCA) > 0: - pemBlock, _ = pem.Decode([]byte(c.IssuingCA)) - if pemBlock == nil { - return nil, errutil.UserError{Err: "Error decoding ca certificate from cert bundle"} - } - - parsedCert, err := x509.ParseCertificate(pemBlock.Bytes) - if err != nil { - return nil, errutil.UserError{Err: fmt.Sprintf("Error encountered parsing certificate bytes from raw bundle via issuing CA: %v", err)} - } - - certBlock := &CertBlock{ - Bytes: pemBlock.Bytes, - Certificate: parsedCert, - } - result.CAChain = append(result.CAChain, certBlock) - } - - // Populate if it isn't there already - if len(c.SerialNumber) == 0 && len(c.Certificate) > 0 { - c.SerialNumber = GetHexFormatted(result.Certificate.SerialNumber.Bytes(), ":") - } - - return result, nil -} - -func extractAndSetPrivateKey(c *CertBundle, parsedBundle *ParsedCertBundle) error { - if len(c.PrivateKey) == 0 { - return nil - } - - pemBlock, _ := pem.Decode([]byte(c.PrivateKey)) - if pemBlock == nil { - return errutil.UserError{Err: "Error decoding private key from cert bundle"} - } - - parsedBundle.PrivateKeyBytes = pemBlock.Bytes - parsedBundle.PrivateKeyFormat = BlockType(strings.TrimSpace(pemBlock.Type)) - - switch parsedBundle.PrivateKeyFormat { - case ECBlock: - parsedBundle.PrivateKeyType, c.PrivateKeyType = ECPrivateKey, ECPrivateKey - case PKCS1Block: - c.PrivateKeyType, parsedBundle.PrivateKeyType = RSAPrivateKey, RSAPrivateKey - case PKCS8Block: - t, err := getPKCS8Type(pemBlock.Bytes) - if err != nil { - return errutil.UserError{Err: fmt.Sprintf("Error getting key type from pkcs#8: %v", err)} - } - parsedBundle.PrivateKeyType = t - switch t { - case ECPrivateKey: - c.PrivateKeyType = ECPrivateKey - case RSAPrivateKey: - c.PrivateKeyType = RSAPrivateKey - case Ed25519PrivateKey: - c.PrivateKeyType = Ed25519PrivateKey - case ManagedPrivateKey: - c.PrivateKeyType = ManagedPrivateKey - } - default: - return errutil.UserError{Err: fmt.Sprintf("Unsupported key block type: %s", pemBlock.Type)} - } - - var err error - parsedBundle.PrivateKey, err = parsedBundle.getSigner() - if err != nil { - return errutil.UserError{Err: fmt.Sprintf("Error getting signer: %s", err)} - } - return nil -} - -// ToCertBundle converts a byte-based raw DER certificate bundle -// to a PEM-based string certificate bundle -func (p *ParsedCertBundle) ToCertBundle() (*CertBundle, error) { - result := &CertBundle{} - block := pem.Block{ - Type: "CERTIFICATE", - } - - if p.Certificate != nil { - result.SerialNumber = strings.TrimSpace(GetHexFormatted(p.Certificate.SerialNumber.Bytes(), ":")) - } - - if p.CertificateBytes != nil && len(p.CertificateBytes) > 0 { - block.Bytes = p.CertificateBytes - result.Certificate = strings.TrimSpace(string(pem.EncodeToMemory(&block))) - } - - for _, caCert := range p.CAChain { - block.Bytes = caCert.Bytes - certificate := strings.TrimSpace(string(pem.EncodeToMemory(&block))) - - result.CAChain = append(result.CAChain, certificate) - } - - if p.PrivateKeyBytes != nil && len(p.PrivateKeyBytes) > 0 { - block.Type = string(p.PrivateKeyFormat) - block.Bytes = p.PrivateKeyBytes - result.PrivateKeyType = p.PrivateKeyType - - // Handle bundle not parsed by us - if block.Type == "" { - switch p.PrivateKeyType { - case ECPrivateKey: - block.Type = string(ECBlock) - case RSAPrivateKey: - block.Type = string(PKCS1Block) - case Ed25519PrivateKey: - block.Type = string(PKCS8Block) - } - } - - result.PrivateKey = strings.TrimSpace(string(pem.EncodeToMemory(&block))) - } - - return result, nil -} - -// Verify checks if the parsed bundle is valid. It validates the public -// key of the certificate to the private key and checks the certificate trust -// chain for path issues. -func (p *ParsedCertBundle) Verify() error { - // If private key exists, check if it matches the public key of cert - if p.PrivateKey != nil && p.Certificate != nil { - equal, err := ComparePublicKeys(p.Certificate.PublicKey, p.PrivateKey.Public()) - if err != nil { - return errwrap.Wrapf("could not compare public and private keys: {{err}}", err) - } - if !equal { - return fmt.Errorf("public key of certificate does not match private key") - } - } - - certPath := p.GetCertificatePath() - if len(certPath) > 1 { - for i, caCert := range certPath[1:] { - if !caCert.Certificate.IsCA { - return fmt.Errorf("certificate %d of certificate chain is not a certificate authority", i+1) - } - if !bytes.Equal(certPath[i].Certificate.AuthorityKeyId, caCert.Certificate.SubjectKeyId) { - return fmt.Errorf("certificate %d of certificate chain ca trust path is incorrect (%q/%q) (%X/%X)", - i+1, - certPath[i].Certificate.Subject.CommonName, caCert.Certificate.Subject.CommonName, - certPath[i].Certificate.AuthorityKeyId, caCert.Certificate.SubjectKeyId) - } - } - } - - return nil -} - -// GetCertificatePath returns a slice of certificates making up a path, pulled -// from the parsed cert bundle -func (p *ParsedCertBundle) GetCertificatePath() []*CertBlock { - var certPath []*CertBlock - - certPath = append(certPath, &CertBlock{ - Certificate: p.Certificate, - Bytes: p.CertificateBytes, - }) - - if len(p.CAChain) > 0 { - // Root CA puts itself in the chain - if p.CAChain[0].Certificate.SerialNumber != p.Certificate.SerialNumber { - certPath = append(certPath, p.CAChain...) - } - } - - return certPath -} - -// GetSigner returns a crypto.Signer corresponding to the private key -// contained in this ParsedCertBundle. The Signer contains a Public() function -// for getting the corresponding public. The Signer can also be -// type-converted to private keys -func (p *ParsedCertBundle) getSigner() (crypto.Signer, error) { - var signer crypto.Signer - var err error - - if p.PrivateKeyBytes == nil || len(p.PrivateKeyBytes) == 0 { - return nil, errutil.UserError{Err: "Given parsed cert bundle does not have private key information"} - } - - switch p.PrivateKeyFormat { - case ECBlock: - signer, err = x509.ParseECPrivateKey(p.PrivateKeyBytes) - if err != nil { - return nil, errutil.UserError{Err: fmt.Sprintf("Unable to parse CA's private EC key: %s", err)} - } - - case PKCS1Block: - signer, err = x509.ParsePKCS1PrivateKey(p.PrivateKeyBytes) - if err != nil { - return nil, errutil.UserError{Err: fmt.Sprintf("Unable to parse CA's private RSA key: %s", err)} - } - - case PKCS8Block: - if k, err := x509.ParsePKCS8PrivateKey(p.PrivateKeyBytes); err == nil { - switch k := k.(type) { - case *rsa.PrivateKey, *ecdsa.PrivateKey, ed25519.PrivateKey: - return k.(crypto.Signer), nil - default: - return nil, errutil.UserError{Err: "Found unknown private key type in pkcs#8 wrapping"} - } - } - return nil, errutil.UserError{Err: fmt.Sprintf("Failed to parse pkcs#8 key: %v", err)} - default: - return nil, errutil.UserError{Err: "Unable to determine type of private key; only RSA and EC are supported"} - } - return signer, nil -} - -// SetParsedPrivateKey sets the private key parameters on the bundle -func (p *ParsedCertBundle) SetParsedPrivateKey(privateKey crypto.Signer, privateKeyType PrivateKeyType, privateKeyBytes []byte) { - p.PrivateKey = privateKey - p.PrivateKeyType = privateKeyType - p.PrivateKeyBytes = privateKeyBytes -} - -func getPKCS8Type(bs []byte) (PrivateKeyType, error) { - k, err := x509.ParsePKCS8PrivateKey(bs) - if err != nil { - return UnknownPrivateKey, errutil.UserError{Err: fmt.Sprintf("Failed to parse pkcs#8 key: %v", err)} - } - - switch k.(type) { - case *ecdsa.PrivateKey: - return ECPrivateKey, nil - case *rsa.PrivateKey: - return RSAPrivateKey, nil - case ed25519.PrivateKey: - return Ed25519PrivateKey, nil - default: - return UnknownPrivateKey, errutil.UserError{Err: "Found unknown private key type in pkcs#8 wrapping"} - } -} - -// ToParsedCSRBundle converts a string-based CSR bundle -// to a byte-based raw CSR bundle -func (c *CSRBundle) ToParsedCSRBundle() (*ParsedCSRBundle, error) { - result := &ParsedCSRBundle{} - var err error - var pemBlock *pem.Block - - if len(c.PrivateKey) > 0 { - pemBlock, _ = pem.Decode([]byte(c.PrivateKey)) - if pemBlock == nil { - return nil, errutil.UserError{Err: "Error decoding private key from cert bundle"} - } - result.PrivateKeyBytes = pemBlock.Bytes - - switch BlockType(pemBlock.Type) { - case ECBlock: - result.PrivateKeyType = ECPrivateKey - case PKCS1Block: - result.PrivateKeyType = RSAPrivateKey - default: - // Try to figure it out and correct - if _, err := x509.ParseECPrivateKey(pemBlock.Bytes); err == nil { - result.PrivateKeyType = ECPrivateKey - c.PrivateKeyType = "ec" - } else if _, err := x509.ParsePKCS1PrivateKey(pemBlock.Bytes); err == nil { - result.PrivateKeyType = RSAPrivateKey - c.PrivateKeyType = "rsa" - } else if _, err := x509.ParsePKCS8PrivateKey(pemBlock.Bytes); err == nil { - result.PrivateKeyType = Ed25519PrivateKey - c.PrivateKeyType = "ed25519" - } else { - return nil, errutil.UserError{Err: fmt.Sprintf("Unknown private key type in bundle: %s", c.PrivateKeyType)} - } - } - - result.PrivateKey, err = result.getSigner() - if err != nil { - return nil, errutil.UserError{Err: fmt.Sprintf("Error getting signer: %s", err)} - } - } - - if len(c.CSR) > 0 { - pemBlock, _ = pem.Decode([]byte(c.CSR)) - if pemBlock == nil { - return nil, errutil.UserError{Err: "Error decoding certificate from cert bundle"} - } - result.CSRBytes = pemBlock.Bytes - result.CSR, err = x509.ParseCertificateRequest(result.CSRBytes) - if err != nil { - return nil, errutil.UserError{Err: fmt.Sprintf("Error encountered parsing certificate bytes from raw bundle via CSR: %v", err)} - } - } - - return result, nil -} - -// ToCSRBundle converts a byte-based raw DER certificate bundle -// to a PEM-based string certificate bundle -func (p *ParsedCSRBundle) ToCSRBundle() (*CSRBundle, error) { - result := &CSRBundle{} - block := pem.Block{ - Type: "CERTIFICATE REQUEST", - } - - if p.CSRBytes != nil && len(p.CSRBytes) > 0 { - block.Bytes = p.CSRBytes - result.CSR = strings.TrimSpace(string(pem.EncodeToMemory(&block))) - } - - if p.PrivateKeyBytes != nil && len(p.PrivateKeyBytes) > 0 { - block.Bytes = p.PrivateKeyBytes - switch p.PrivateKeyType { - case RSAPrivateKey: - result.PrivateKeyType = "rsa" - block.Type = "RSA PRIVATE KEY" - case ECPrivateKey: - result.PrivateKeyType = "ec" - block.Type = "EC PRIVATE KEY" - case Ed25519PrivateKey: - result.PrivateKeyType = "ed25519" - block.Type = "PRIVATE KEY" - case ManagedPrivateKey: - result.PrivateKeyType = ManagedPrivateKey - block.Type = "PRIVATE KEY" - default: - return nil, errutil.InternalError{Err: "Could not determine private key type when creating block"} - } - result.PrivateKey = strings.TrimSpace(string(pem.EncodeToMemory(&block))) - } - - return result, nil -} - -// GetSigner returns a crypto.Signer corresponding to the private key -// contained in this ParsedCSRBundle. The Signer contains a Public() function -// for getting the corresponding public. The Signer can also be -// type-converted to private keys -func (p *ParsedCSRBundle) getSigner() (crypto.Signer, error) { - var signer crypto.Signer - var err error - - if p.PrivateKeyBytes == nil || len(p.PrivateKeyBytes) == 0 { - return nil, errutil.UserError{Err: "Given parsed cert bundle does not have private key information"} - } - - switch p.PrivateKeyType { - case ECPrivateKey: - signer, err = x509.ParseECPrivateKey(p.PrivateKeyBytes) - if err != nil { - return nil, errutil.UserError{Err: fmt.Sprintf("Unable to parse CA's private EC key: %s", err)} - } - - case RSAPrivateKey: - signer, err = x509.ParsePKCS1PrivateKey(p.PrivateKeyBytes) - if err != nil { - return nil, errutil.UserError{Err: fmt.Sprintf("Unable to parse CA's private RSA key: %s", err)} - } - - case Ed25519PrivateKey: - signerd, err := x509.ParsePKCS8PrivateKey(p.PrivateKeyBytes) - signer = signerd.(ed25519.PrivateKey) - if err != nil { - return nil, errutil.UserError{Err: fmt.Sprintf("Unable to parse CA's private Ed25519 key: %s", err)} - } - - default: - return nil, errutil.UserError{Err: "Unable to determine type of private key; only RSA, Ed25519 and EC are supported"} - } - return signer, nil -} - -// SetParsedPrivateKey sets the private key parameters on the bundle -func (p *ParsedCSRBundle) SetParsedPrivateKey(privateKey crypto.Signer, privateKeyType PrivateKeyType, privateKeyBytes []byte) { - p.PrivateKey = privateKey - p.PrivateKeyType = privateKeyType - p.PrivateKeyBytes = privateKeyBytes -} - -// getTLSConfig returns a TLS config generally suitable for client -// authentication. The returned TLS config can be modified slightly -// to be made suitable for a server requiring client authentication; -// specifically, you should set the value of ClientAuth in the returned -// config to match your needs. -func (p *ParsedCertBundle) GetTLSConfig(usage TLSUsage) (*tls.Config, error) { - tlsCert := tls.Certificate{ - Certificate: [][]byte{}, - } - - tlsConfig := &tls.Config{ - MinVersion: tls.VersionTLS12, - } - - if p.Certificate != nil { - tlsCert.Leaf = p.Certificate - } - - if p.PrivateKey != nil { - tlsCert.PrivateKey = p.PrivateKey - } - - if p.CertificateBytes != nil && len(p.CertificateBytes) > 0 { - tlsCert.Certificate = append(tlsCert.Certificate, p.CertificateBytes) - } - - if len(p.CAChain) > 0 { - for _, cert := range p.CAChain { - tlsCert.Certificate = append(tlsCert.Certificate, cert.Bytes) - } - - // Technically we only need one cert, but this doesn't duplicate code - certBundle, err := p.ToCertBundle() - if err != nil { - return nil, errwrap.Wrapf("error converting parsed bundle to string bundle when getting TLS config: {{err}}", err) - } - - caPool := x509.NewCertPool() - ok := caPool.AppendCertsFromPEM([]byte(certBundle.CAChain[0])) - if !ok { - return nil, fmt.Errorf("could not append CA certificate") - } - - if usage&TLSServer > 0 { - tlsConfig.ClientCAs = caPool - tlsConfig.ClientAuth = tls.VerifyClientCertIfGiven - } - if usage&TLSClient > 0 { - tlsConfig.RootCAs = caPool - } - } - - if tlsCert.Certificate != nil && len(tlsCert.Certificate) > 0 { - tlsConfig.Certificates = []tls.Certificate{tlsCert} - } - - return tlsConfig, nil -} - -// IssueData is a structure that is suitable for marshaling into a request; -// either via JSON, or into a map[string]interface{} via the structs package -type IssueData struct { - TTL string `json:"ttl" structs:"ttl" mapstructure:"ttl"` - CommonName string `json:"common_name" structs:"common_name" mapstructure:"common_name"` - OU string `json:"ou" structs:"ou" mapstructure:"ou"` - AltNames string `json:"alt_names" structs:"alt_names" mapstructure:"alt_names"` - IPSANs string `json:"ip_sans" structs:"ip_sans" mapstructure:"ip_sans"` - CSR string `json:"csr" structs:"csr" mapstructure:"csr"` - OtherSANs string `json:"other_sans" structs:"other_sans" mapstructure:"other_sans"` -} - -type URLEntries struct { - IssuingCertificates []string `json:"issuing_certificates" structs:"issuing_certificates" mapstructure:"issuing_certificates"` - CRLDistributionPoints []string `json:"crl_distribution_points" structs:"crl_distribution_points" mapstructure:"crl_distribution_points"` - OCSPServers []string `json:"ocsp_servers" structs:"ocsp_servers" mapstructure:"ocsp_servers"` -} - -type NotAfterBehavior int - -const ( - ErrNotAfterBehavior NotAfterBehavior = iota - TruncateNotAfterBehavior - PermitNotAfterBehavior -) - -var notAfterBehaviorNames = map[NotAfterBehavior]string{ - ErrNotAfterBehavior: "err", - TruncateNotAfterBehavior: "truncate", - PermitNotAfterBehavior: "permit", -} - -func (n NotAfterBehavior) String() string { - if name, ok := notAfterBehaviorNames[n]; ok && len(name) > 0 { - return name - } - - return "unknown" -} - -type CAInfoBundle struct { - ParsedCertBundle - URLs *URLEntries - LeafNotAfterBehavior NotAfterBehavior - RevocationSigAlg x509.SignatureAlgorithm -} - -func (b *CAInfoBundle) GetCAChain() []*CertBlock { - chain := []*CertBlock{} - - // Include issuing CA in Chain, not including Root Authority - if (len(b.Certificate.AuthorityKeyId) > 0 && - !bytes.Equal(b.Certificate.AuthorityKeyId, b.Certificate.SubjectKeyId)) || - (len(b.Certificate.AuthorityKeyId) == 0 && - !bytes.Equal(b.Certificate.RawIssuer, b.Certificate.RawSubject)) { - - chain = b.GetFullChain() - } - - return chain -} - -func (b *CAInfoBundle) GetFullChain() []*CertBlock { - var chain []*CertBlock - - // Some bundles already include the root included in the chain, - // so don't include it twice. - if len(b.CAChain) == 0 || !bytes.Equal(b.CAChain[0].Bytes, b.CertificateBytes) { - chain = append(chain, &CertBlock{ - Certificate: b.Certificate, - Bytes: b.CertificateBytes, - }) - } - - if len(b.CAChain) > 0 { - chain = append(chain, b.CAChain...) - } - - return chain -} - -type CertExtKeyUsage int - -const ( - AnyExtKeyUsage CertExtKeyUsage = 1 << iota - ServerAuthExtKeyUsage - ClientAuthExtKeyUsage - CodeSigningExtKeyUsage - EmailProtectionExtKeyUsage - IpsecEndSystemExtKeyUsage - IpsecTunnelExtKeyUsage - IpsecUserExtKeyUsage - TimeStampingExtKeyUsage - OcspSigningExtKeyUsage - MicrosoftServerGatedCryptoExtKeyUsage - NetscapeServerGatedCryptoExtKeyUsage - MicrosoftCommercialCodeSigningExtKeyUsage - MicrosoftKernelCodeSigningExtKeyUsage -) - -type CreationParameters struct { - Subject pkix.Name - DNSNames []string - EmailAddresses []string - IPAddresses []net.IP - URIs []*url.URL - OtherSANs map[string][]string - IsCA bool - KeyType string - KeyBits int - NotAfter time.Time - KeyUsage x509.KeyUsage - ExtKeyUsage CertExtKeyUsage - ExtKeyUsageOIDs []string - PolicyIdentifiers []string - BasicConstraintsValidForNonCA bool - SignatureBits int - UsePSS bool - ForceAppendCaChain bool - - // Only used when signing a CA cert - UseCSRValues bool - PermittedDNSDomains []string - - // URLs to encode into the certificate - URLs *URLEntries - - // The maximum path length to encode - MaxPathLength int - - // The duration the certificate will use NotBefore - NotBeforeDuration time.Duration - - // The explicit SKID to use; especially useful for cross-signing. - SKID []byte -} - -type CreationBundle struct { - Params *CreationParameters - SigningBundle *CAInfoBundle - CSR *x509.CertificateRequest -} - -// addKeyUsages adds appropriate key usages to the template given the creation -// information -func AddKeyUsages(data *CreationBundle, certTemplate *x509.Certificate) { - if data.Params.IsCA { - certTemplate.KeyUsage = x509.KeyUsage(x509.KeyUsageCertSign | x509.KeyUsageCRLSign) - return - } - - certTemplate.KeyUsage = data.Params.KeyUsage - - if data.Params.ExtKeyUsage&AnyExtKeyUsage != 0 { - certTemplate.ExtKeyUsage = append(certTemplate.ExtKeyUsage, x509.ExtKeyUsageAny) - } - - if data.Params.ExtKeyUsage&ServerAuthExtKeyUsage != 0 { - certTemplate.ExtKeyUsage = append(certTemplate.ExtKeyUsage, x509.ExtKeyUsageServerAuth) - } - - if data.Params.ExtKeyUsage&ClientAuthExtKeyUsage != 0 { - certTemplate.ExtKeyUsage = append(certTemplate.ExtKeyUsage, x509.ExtKeyUsageClientAuth) - } - - if data.Params.ExtKeyUsage&CodeSigningExtKeyUsage != 0 { - certTemplate.ExtKeyUsage = append(certTemplate.ExtKeyUsage, x509.ExtKeyUsageCodeSigning) - } - - if data.Params.ExtKeyUsage&EmailProtectionExtKeyUsage != 0 { - certTemplate.ExtKeyUsage = append(certTemplate.ExtKeyUsage, x509.ExtKeyUsageEmailProtection) - } - - if data.Params.ExtKeyUsage&IpsecEndSystemExtKeyUsage != 0 { - certTemplate.ExtKeyUsage = append(certTemplate.ExtKeyUsage, x509.ExtKeyUsageIPSECEndSystem) - } - - if data.Params.ExtKeyUsage&IpsecTunnelExtKeyUsage != 0 { - certTemplate.ExtKeyUsage = append(certTemplate.ExtKeyUsage, x509.ExtKeyUsageIPSECTunnel) - } - - if data.Params.ExtKeyUsage&IpsecUserExtKeyUsage != 0 { - certTemplate.ExtKeyUsage = append(certTemplate.ExtKeyUsage, x509.ExtKeyUsageIPSECUser) - } - - if data.Params.ExtKeyUsage&TimeStampingExtKeyUsage != 0 { - certTemplate.ExtKeyUsage = append(certTemplate.ExtKeyUsage, x509.ExtKeyUsageTimeStamping) - } - - if data.Params.ExtKeyUsage&OcspSigningExtKeyUsage != 0 { - certTemplate.ExtKeyUsage = append(certTemplate.ExtKeyUsage, x509.ExtKeyUsageOCSPSigning) - } - - if data.Params.ExtKeyUsage&MicrosoftServerGatedCryptoExtKeyUsage != 0 { - certTemplate.ExtKeyUsage = append(certTemplate.ExtKeyUsage, x509.ExtKeyUsageMicrosoftServerGatedCrypto) - } - - if data.Params.ExtKeyUsage&NetscapeServerGatedCryptoExtKeyUsage != 0 { - certTemplate.ExtKeyUsage = append(certTemplate.ExtKeyUsage, x509.ExtKeyUsageNetscapeServerGatedCrypto) - } - - if data.Params.ExtKeyUsage&MicrosoftCommercialCodeSigningExtKeyUsage != 0 { - certTemplate.ExtKeyUsage = append(certTemplate.ExtKeyUsage, x509.ExtKeyUsageMicrosoftCommercialCodeSigning) - } - - if data.Params.ExtKeyUsage&MicrosoftKernelCodeSigningExtKeyUsage != 0 { - certTemplate.ExtKeyUsage = append(certTemplate.ExtKeyUsage, x509.ExtKeyUsageMicrosoftKernelCodeSigning) - } -} - -// SetParsedPrivateKey sets the private key parameters on the bundle -func (p *KeyBundle) SetParsedPrivateKey(privateKey crypto.Signer, privateKeyType PrivateKeyType, privateKeyBytes []byte) { - p.PrivateKey = privateKey - p.PrivateKeyType = privateKeyType - p.PrivateKeyBytes = privateKeyBytes -} - -func (p *KeyBundle) ToPrivateKeyPemString() (string, error) { - block := pem.Block{} - - if p.PrivateKeyBytes != nil && len(p.PrivateKeyBytes) > 0 { - block.Bytes = p.PrivateKeyBytes - switch p.PrivateKeyType { - case RSAPrivateKey: - block.Type = "RSA PRIVATE KEY" - case ECPrivateKey: - block.Type = "EC PRIVATE KEY" - default: - block.Type = "PRIVATE KEY" - } - privateKeyPemString := strings.TrimSpace(string(pem.EncodeToMemory(&block))) - return privateKeyPemString, nil - } - - return "", errutil.InternalError{Err: "No Private Key Bytes to Wrap"} -} - -// PolicyIdentifierWithQualifierEntry Structure for Internal Storage -type PolicyIdentifierWithQualifierEntry struct { - PolicyIdentifierOid string `json:"oid",mapstructure:"oid"` - CPS string `json:"cps,omitempty",mapstructure:"cps"` - Notice string `json:"notice,omitempty",mapstructure:"notice"` -} - -// GetPolicyIdentifierFromString parses out the internal structure of a Policy Identifier -func GetPolicyIdentifierFromString(policyIdentifier string) (*PolicyIdentifierWithQualifierEntry, error) { - if policyIdentifier == "" { - return nil, nil - } - entry := &PolicyIdentifierWithQualifierEntry{} - // Either a OID, or a JSON Entry: First check OID: - _, err := StringToOid(policyIdentifier) - if err == nil { - entry.PolicyIdentifierOid = policyIdentifier - return entry, nil - } - // Now Check If JSON Entry - jsonErr := json.Unmarshal([]byte(policyIdentifier), &entry) - if jsonErr != nil { // Neither, if we got here - return entry, errors.New(fmt.Sprintf("Policy Identifier %q is neither a valid OID: %s, Nor JSON Policy Identifier: %s", policyIdentifier, err.Error(), jsonErr.Error())) - } - return entry, nil -} - -// Policy Identifier with Qualifier Structure for ASN Marshalling: - -var policyInformationOid = asn1.ObjectIdentifier{2, 5, 29, 32} - -type policyInformation struct { - PolicyIdentifier asn1.ObjectIdentifier - Qualifiers []interface{} `asn1:"tag:optional,omitempty"` -} - -var cpsPolicyQualifierID = asn1.ObjectIdentifier{1, 3, 6, 1, 5, 5, 7, 2, 1} - -type cpsUrlPolicyQualifier struct { - PolicyQualifierID asn1.ObjectIdentifier - Qualifier string `asn1:"tag:optional,ia5"` -} - -var userNoticePolicyQualifierID = asn1.ObjectIdentifier{1, 3, 6, 1, 5, 5, 7, 2, 2} - -type userNoticePolicyQualifier struct { - PolicyQualifierID asn1.ObjectIdentifier - Qualifier userNotice -} - -type userNotice struct { - ExplicitText string `asn1:"tag:optional,utf8"` -} - -func createPolicyIdentifierWithQualifier(entry PolicyIdentifierWithQualifierEntry) (*policyInformation, error) { - // Each Policy is Identified by a Unique ID, as designated here: - policyOid, err := StringToOid(entry.PolicyIdentifierOid) - if err != nil { - return nil, err - } - pi := policyInformation{ - PolicyIdentifier: policyOid, - } - if entry.CPS != "" { - qualifier := cpsUrlPolicyQualifier{ - PolicyQualifierID: cpsPolicyQualifierID, - Qualifier: entry.CPS, - } - pi.Qualifiers = append(pi.Qualifiers, qualifier) - } - if entry.Notice != "" { - qualifier := userNoticePolicyQualifier{ - PolicyQualifierID: userNoticePolicyQualifierID, - Qualifier: userNotice{ - ExplicitText: entry.Notice, - }, - } - pi.Qualifiers = append(pi.Qualifiers, qualifier) - } - return &pi, nil -} - -// CreatePolicyInformationExtensionFromStorageStrings parses the stored policyIdentifiers, which might be JSON Policy -// Identifier with Qualifier Entries or String OIDs, and returns an extension if everything parsed correctly, and an -// error if constructing -func CreatePolicyInformationExtensionFromStorageStrings(policyIdentifiers []string) (*pkix.Extension, error) { - var policyInformationList []policyInformation - for _, policyIdentifierStr := range policyIdentifiers { - policyIdentifierEntry, err := GetPolicyIdentifierFromString(policyIdentifierStr) - if err != nil { - return nil, err - } - if policyIdentifierEntry != nil { // Okay to skip empty entries if there is no error - policyInformationStruct, err := createPolicyIdentifierWithQualifier(*policyIdentifierEntry) - if err != nil { - return nil, err - } - policyInformationList = append(policyInformationList, *policyInformationStruct) - } - } - asn1Bytes, err := asn1.Marshal(policyInformationList) - if err != nil { - return nil, err - } - return &pkix.Extension{ - Id: policyInformationOid, - Critical: false, - Value: asn1Bytes, - }, nil -} diff --git a/vendor/github.com/hashicorp/vault/sdk/helper/compressutil/compress.go b/vendor/github.com/hashicorp/vault/sdk/helper/compressutil/compress.go deleted file mode 100644 index 924f82a2..00000000 --- a/vendor/github.com/hashicorp/vault/sdk/helper/compressutil/compress.go +++ /dev/null @@ -1,222 +0,0 @@ -package compressutil - -import ( - "bytes" - "compress/gzip" - "compress/lzw" - "fmt" - "io" - - "github.com/golang/snappy" - "github.com/hashicorp/errwrap" - "github.com/pierrec/lz4" -) - -const ( - // A byte value used as a canary prefix for the compressed information - // which is used to distinguish if a JSON input is compressed or not. - // The value of this constant should not be a first character of any - // valid JSON string. - - CompressionTypeGzip = "gzip" - CompressionCanaryGzip byte = 'G' - - CompressionTypeLZW = "lzw" - CompressionCanaryLZW byte = 'L' - - CompressionTypeSnappy = "snappy" - CompressionCanarySnappy byte = 'S' - - CompressionTypeLZ4 = "lz4" - CompressionCanaryLZ4 byte = '4' -) - -// SnappyReadCloser embeds the snappy reader which implements the io.Reader -// interface. The decompress procedure in this utility expects an -// io.ReadCloser. This type implements the io.Closer interface to retain the -// generic way of decompression. -type CompressUtilReadCloser struct { - io.Reader -} - -// Close is a noop method implemented only to satisfy the io.Closer interface -func (c *CompressUtilReadCloser) Close() error { - return nil -} - -// CompressionConfig is used to select a compression type to be performed by -// Compress and Decompress utilities. -// Supported types are: -// * CompressionTypeLZW -// * CompressionTypeGzip -// * CompressionTypeSnappy -// * CompressionTypeLZ4 -// -// When using CompressionTypeGzip, the compression levels can also be chosen: -// * gzip.DefaultCompression -// * gzip.BestSpeed -// * gzip.BestCompression -type CompressionConfig struct { - // Type of the compression algorithm to be used - Type string - - // When using Gzip format, the compression level to employ - GzipCompressionLevel int -} - -// Compress places the canary byte in a buffer and uses the same buffer to fill -// in the compressed information of the given input. The configuration supports -// two type of compression: LZW and Gzip. When using Gzip compression format, -// if GzipCompressionLevel is not specified, the 'gzip.DefaultCompression' will -// be assumed. -func Compress(data []byte, config *CompressionConfig) ([]byte, error) { - var buf bytes.Buffer - var writer io.WriteCloser - var err error - - if config == nil { - return nil, fmt.Errorf("config is nil") - } - - // Write the canary into the buffer and create writer to compress the - // input data based on the configured type - switch config.Type { - case CompressionTypeLZW: - buf.Write([]byte{CompressionCanaryLZW}) - writer = lzw.NewWriter(&buf, lzw.LSB, 8) - - case CompressionTypeGzip: - buf.Write([]byte{CompressionCanaryGzip}) - - switch { - case config.GzipCompressionLevel == gzip.BestCompression, - config.GzipCompressionLevel == gzip.BestSpeed, - config.GzipCompressionLevel == gzip.DefaultCompression: - // These are valid compression levels - default: - // If compression level is set to NoCompression or to - // any invalid value, fallback to Defaultcompression - config.GzipCompressionLevel = gzip.DefaultCompression - } - writer, err = gzip.NewWriterLevel(&buf, config.GzipCompressionLevel) - - case CompressionTypeSnappy: - buf.Write([]byte{CompressionCanarySnappy}) - writer = snappy.NewBufferedWriter(&buf) - - case CompressionTypeLZ4: - buf.Write([]byte{CompressionCanaryLZ4}) - writer = lz4.NewWriter(&buf) - - default: - return nil, fmt.Errorf("unsupported compression type") - } - - if err != nil { - return nil, errwrap.Wrapf("failed to create a compression writer: {{err}}", err) - } - - if writer == nil { - return nil, fmt.Errorf("failed to create a compression writer") - } - - // Compress the input and place it in the same buffer containing the - // canary byte. - if _, err = writer.Write(data); err != nil { - return nil, errwrap.Wrapf("failed to compress input data: err: {{err}}", err) - } - - // Close the io.WriteCloser - if err = writer.Close(); err != nil { - return nil, err - } - - // Return the compressed bytes with canary byte at the start - return buf.Bytes(), nil -} - -// Decompress checks if the first byte in the input matches the canary byte. -// If the first byte is a canary byte, then the input past the canary byte -// will be decompressed using the method specified in the given configuration. -// If the first byte isn't a canary byte, then the utility returns a boolean -// value indicating that the input was not compressed. -func Decompress(data []byte) ([]byte, bool, error) { - bytes, _, notCompressed, err := DecompressWithCanary(data) - return bytes, notCompressed, err -} - -// DecompressWithCanary checks if the first byte in the input matches the canary byte. -// If the first byte is a canary byte, then the input past the canary byte -// will be decompressed using the method specified in the given configuration. The type of compression used is also -// returned. If the first byte isn't a canary byte, then the utility returns a boolean -// value indicating that the input was not compressed. -func DecompressWithCanary(data []byte) ([]byte, string, bool, error) { - var err error - var reader io.ReadCloser - var compressionType string - if data == nil || len(data) == 0 { - return nil, "", false, fmt.Errorf("'data' being decompressed is empty") - } - - canary := data[0] - cData := data[1:] - - switch canary { - // If the first byte matches the canary byte, remove the canary - // byte and try to decompress the data that is after the canary. - case CompressionCanaryGzip: - if len(data) < 2 { - return nil, "", false, fmt.Errorf("invalid 'data' after the canary") - } - reader, err = gzip.NewReader(bytes.NewReader(cData)) - compressionType = CompressionTypeGzip - - case CompressionCanaryLZW: - if len(data) < 2 { - return nil, "", false, fmt.Errorf("invalid 'data' after the canary") - } - reader = lzw.NewReader(bytes.NewReader(cData), lzw.LSB, 8) - compressionType = CompressionTypeLZW - - case CompressionCanarySnappy: - if len(data) < 2 { - return nil, "", false, fmt.Errorf("invalid 'data' after the canary") - } - reader = &CompressUtilReadCloser{ - Reader: snappy.NewReader(bytes.NewReader(cData)), - } - compressionType = CompressionTypeSnappy - - case CompressionCanaryLZ4: - if len(data) < 2 { - return nil, "", false, fmt.Errorf("invalid 'data' after the canary") - } - reader = &CompressUtilReadCloser{ - Reader: lz4.NewReader(bytes.NewReader(cData)), - } - compressionType = CompressionTypeLZ4 - - default: - // If the first byte doesn't match the canary byte, it means - // that the content was not compressed at all. Indicate the - // caller that the input was not compressed. - return nil, "", true, nil - } - if err != nil { - return nil, "", false, errwrap.Wrapf("failed to create a compression reader: {{err}}", err) - } - if reader == nil { - return nil, "", false, fmt.Errorf("failed to create a compression reader") - } - - // Close the io.ReadCloser - defer reader.Close() - - // Read all the compressed data into a buffer - var buf bytes.Buffer - if _, err = io.Copy(&buf, reader); err != nil { - return nil, "", false, err - } - - return buf.Bytes(), compressionType, false, nil -} diff --git a/vendor/github.com/hashicorp/vault/sdk/helper/consts/agent.go b/vendor/github.com/hashicorp/vault/sdk/helper/consts/agent.go deleted file mode 100644 index 55be844e..00000000 --- a/vendor/github.com/hashicorp/vault/sdk/helper/consts/agent.go +++ /dev/null @@ -1,12 +0,0 @@ -package consts - -// AgentPathCacheClear is the path that the agent will use as its cache-clear -// endpoint. -const AgentPathCacheClear = "/agent/v1/cache-clear" - -// AgentPathMetrics is the path the the agent will use to expose its internal -// metrics. -const AgentPathMetrics = "/agent/v1/metrics" - -// AgentPathQuit is the path that the agent will use to trigger stopping it. -const AgentPathQuit = "/agent/v1/quit" diff --git a/vendor/github.com/hashicorp/vault/sdk/helper/consts/consts.go b/vendor/github.com/hashicorp/vault/sdk/helper/consts/consts.go deleted file mode 100644 index c431e2e5..00000000 --- a/vendor/github.com/hashicorp/vault/sdk/helper/consts/consts.go +++ /dev/null @@ -1,37 +0,0 @@ -package consts - -const ( - // ExpirationRestoreWorkerCount specifies the number of workers to use while - // restoring leases into the expiration manager - ExpirationRestoreWorkerCount = 64 - - // NamespaceHeaderName is the header set to specify which namespace the - // request is indented for. - NamespaceHeaderName = "X-Vault-Namespace" - - // AuthHeaderName is the name of the header containing the token. - AuthHeaderName = "X-Vault-Token" - - // RequestHeaderName is the name of the header used by the Agent for - // SSRF protection. - RequestHeaderName = "X-Vault-Request" - - // PerformanceReplicationALPN is the negotiated protocol used for - // performance replication. - PerformanceReplicationALPN = "replication_v1" - - // DRReplicationALPN is the negotiated protocol used for dr replication. - DRReplicationALPN = "replication_dr_v1" - - PerfStandbyALPN = "perf_standby_v1" - - RequestForwardingALPN = "req_fw_sb-act_v1" - - RaftStorageALPN = "raft_storage_v1" - - // ReplicationResolverALPN is the negotiated protocol used for - // resolving replicaiton addresses - ReplicationResolverALPN = "replication_resolver_v1" - - VaultEnableFilePermissionsCheckEnv = "VAULT_ENABLE_FILE_PERMISSIONS_CHECK" -) diff --git a/vendor/github.com/hashicorp/vault/sdk/helper/consts/deprecation_status.go b/vendor/github.com/hashicorp/vault/sdk/helper/consts/deprecation_status.go deleted file mode 100644 index 5591924a..00000000 --- a/vendor/github.com/hashicorp/vault/sdk/helper/consts/deprecation_status.go +++ /dev/null @@ -1,31 +0,0 @@ -package consts - -const VaultAllowPendingRemovalMountsEnv = "VAULT_ALLOW_PENDING_REMOVAL_MOUNTS" - -// DeprecationStatus represents the current deprecation state for builtins -type DeprecationStatus uint32 - -// These are the states of deprecation for builtin plugins -const ( - Supported = iota - Deprecated - PendingRemoval - Removed - Unknown -) - -// String returns the string representation of a builtin deprecation status -func (s DeprecationStatus) String() string { - switch s { - case Supported: - return "supported" - case Deprecated: - return "deprecated" - case PendingRemoval: - return "pending removal" - case Removed: - return "removed" - default: - return "" - } -} diff --git a/vendor/github.com/hashicorp/vault/sdk/helper/consts/error.go b/vendor/github.com/hashicorp/vault/sdk/helper/consts/error.go deleted file mode 100644 index 1a9175c6..00000000 --- a/vendor/github.com/hashicorp/vault/sdk/helper/consts/error.go +++ /dev/null @@ -1,25 +0,0 @@ -package consts - -import "errors" - -var ( - // ErrSealed is returned if an operation is performed on a sealed barrier. - // No operation is expected to succeed before unsealing - ErrSealed = errors.New("Vault is sealed") - - // ErrAPILocked is returned if an operation is performed when the API is - // locked for the request namespace. - ErrAPILocked = errors.New("API access to this namespace has been locked by an administrator") - - // ErrStandby is returned if an operation is performed on a standby Vault. - // No operation is expected to succeed until active. - ErrStandby = errors.New("Vault is in standby mode") - - // ErrPathContainsParentReferences is returned when a path contains parent - // references. - ErrPathContainsParentReferences = errors.New("path cannot contain parent references") - - // ErrInvalidWrappingToken is returned when checking for the validity of - // a wrapping token that turns out to be invalid. - ErrInvalidWrappingToken = errors.New("wrapping token is not valid or does not exist") -) diff --git a/vendor/github.com/hashicorp/vault/sdk/helper/consts/replication.go b/vendor/github.com/hashicorp/vault/sdk/helper/consts/replication.go deleted file mode 100644 index f72c2f47..00000000 --- a/vendor/github.com/hashicorp/vault/sdk/helper/consts/replication.go +++ /dev/null @@ -1,159 +0,0 @@ -package consts - -const ( - // N.B. This needs to be excluded from replication despite the name; it's - // merely saying that this is cluster information for the replicated - // cluster. - CoreReplicatedClusterPrefix = "core/cluster/replicated/" - CoreReplicatedClusterPrefixDR = "core/cluster/replicated-dr/" - - CoreReplicatedClusterInfoPath = CoreReplicatedClusterPrefix + "info" - CoreReplicatedClusterSecondariesPrefix = CoreReplicatedClusterPrefix + "secondaries/" - CoreReplicatedClusterInfoPathDR = CoreReplicatedClusterPrefixDR + "info" - CoreReplicatedClusterSecondariesPrefixDR = CoreReplicatedClusterPrefixDR + "secondaries/" - - // This is an identifier for the current secondary in the replicated paths - // manager. It should contain a character that is not allowed in secondary - // ids to ensure it doesn't collide. - CurrentReplicatedSecondaryIdentifier = ".current" - CoreFeatureFlagPath = "core/cluster/feature-flags" -) - -type ReplicationState uint32 - -const ( - _ ReplicationState = iota - OldReplicationPrimary - OldReplicationSecondary - OldReplicationBootstrapping - // Don't add anything here. Adding anything to this Old block would cause - // the rest of the values to change below. This was done originally to - // ensure no overlap between old and new values. - - ReplicationUnknown ReplicationState = 0 - ReplicationPerformancePrimary ReplicationState = 1 << iota // Note -- iota is 5 here! - ReplicationPerformanceSecondary - OldSplitReplicationBootstrapping - ReplicationDRPrimary - ReplicationDRSecondary - ReplicationPerformanceBootstrapping - ReplicationDRBootstrapping - ReplicationPerformanceDisabled - ReplicationDRDisabled - ReplicationPerformanceStandby -) - -// We verify no change to the above values are made -func init() { - if OldReplicationBootstrapping != 3 { - panic("Replication Constants have changed") - } - - if ReplicationPerformancePrimary != 1<<5 { - panic("Replication Constants have changed") - } -} - -func (r ReplicationState) string() string { - switch r { - case ReplicationPerformanceSecondary: - return "secondary" - case ReplicationPerformancePrimary: - return "primary" - case ReplicationPerformanceBootstrapping: - return "bootstrapping" - case ReplicationPerformanceDisabled: - return "disabled" - case ReplicationDRPrimary: - return "primary" - case ReplicationDRSecondary: - return "secondary" - case ReplicationDRBootstrapping: - return "bootstrapping" - case ReplicationDRDisabled: - return "disabled" - } - - return "unknown" -} - -func (r ReplicationState) StateStrings() []string { - var ret []string - if r.HasState(ReplicationPerformanceSecondary) { - ret = append(ret, "perf-secondary") - } - if r.HasState(ReplicationPerformancePrimary) { - ret = append(ret, "perf-primary") - } - if r.HasState(ReplicationPerformanceBootstrapping) { - ret = append(ret, "perf-bootstrapping") - } - if r.HasState(ReplicationPerformanceDisabled) { - ret = append(ret, "perf-disabled") - } - if r.HasState(ReplicationDRPrimary) { - ret = append(ret, "dr-primary") - } - if r.HasState(ReplicationDRSecondary) { - ret = append(ret, "dr-secondary") - } - if r.HasState(ReplicationDRBootstrapping) { - ret = append(ret, "dr-bootstrapping") - } - if r.HasState(ReplicationDRDisabled) { - ret = append(ret, "dr-disabled") - } - if r.HasState(ReplicationPerformanceStandby) { - ret = append(ret, "perfstandby") - } - - return ret -} - -func (r ReplicationState) GetDRString() string { - switch { - case r.HasState(ReplicationDRBootstrapping): - return ReplicationDRBootstrapping.string() - case r.HasState(ReplicationDRPrimary): - return ReplicationDRPrimary.string() - case r.HasState(ReplicationDRSecondary): - return ReplicationDRSecondary.string() - case r.HasState(ReplicationDRDisabled): - return ReplicationDRDisabled.string() - default: - return "unknown" - } -} - -func (r ReplicationState) GetPerformanceString() string { - switch { - case r.HasState(ReplicationPerformanceBootstrapping): - return ReplicationPerformanceBootstrapping.string() - case r.HasState(ReplicationPerformancePrimary): - return ReplicationPerformancePrimary.string() - case r.HasState(ReplicationPerformanceSecondary): - return ReplicationPerformanceSecondary.string() - case r.HasState(ReplicationPerformanceDisabled): - return ReplicationPerformanceDisabled.string() - default: - return "unknown" - } -} - -func (r ReplicationState) IsPrimaryState() bool { - return r.HasState(ReplicationPerformancePrimary | ReplicationDRPrimary) -} - -func (r ReplicationState) HasState(flag ReplicationState) bool { return r&flag != 0 } -func (r *ReplicationState) AddState(flag ReplicationState) { *r |= flag } -func (r *ReplicationState) ClearState(flag ReplicationState) { *r &= ^flag } -func (r *ReplicationState) ToggleState(flag ReplicationState) { *r ^= flag } - -type HAState uint32 - -const ( - _ HAState = iota - Standby - PerfStandby - Active -) diff --git a/vendor/github.com/hashicorp/vault/sdk/helper/consts/token_consts.go b/vendor/github.com/hashicorp/vault/sdk/helper/consts/token_consts.go deleted file mode 100644 index 2b4e0278..00000000 --- a/vendor/github.com/hashicorp/vault/sdk/helper/consts/token_consts.go +++ /dev/null @@ -1,10 +0,0 @@ -package consts - -const ( - ServiceTokenPrefix = "hvs." - BatchTokenPrefix = "hvb." - RecoveryTokenPrefix = "hvr." - LegacyServiceTokenPrefix = "s." - LegacyBatchTokenPrefix = "b." - LegacyRecoveryTokenPrefix = "r." -) diff --git a/vendor/github.com/hashicorp/vault/sdk/helper/cryptoutil/cryptoutil.go b/vendor/github.com/hashicorp/vault/sdk/helper/cryptoutil/cryptoutil.go deleted file mode 100644 index a37086c6..00000000 --- a/vendor/github.com/hashicorp/vault/sdk/helper/cryptoutil/cryptoutil.go +++ /dev/null @@ -1,11 +0,0 @@ -package cryptoutil - -import "golang.org/x/crypto/blake2b" - -func Blake2b256Hash(key string) []byte { - hf, _ := blake2b.New256(nil) - - hf.Write([]byte(key)) - - return hf.Sum(nil) -} diff --git a/vendor/github.com/hashicorp/vault/sdk/helper/errutil/error.go b/vendor/github.com/hashicorp/vault/sdk/helper/errutil/error.go deleted file mode 100644 index 0b95efb4..00000000 --- a/vendor/github.com/hashicorp/vault/sdk/helper/errutil/error.go +++ /dev/null @@ -1,20 +0,0 @@ -package errutil - -// UserError represents an error generated due to invalid user input -type UserError struct { - Err string -} - -func (e UserError) Error() string { - return e.Err -} - -// InternalError represents an error generated internally, -// presumably not due to invalid user input -type InternalError struct { - Err string -} - -func (e InternalError) Error() string { - return e.Err -} diff --git a/vendor/github.com/hashicorp/vault/sdk/helper/hclutil/hcl.go b/vendor/github.com/hashicorp/vault/sdk/helper/hclutil/hcl.go deleted file mode 100644 index 0b120367..00000000 --- a/vendor/github.com/hashicorp/vault/sdk/helper/hclutil/hcl.go +++ /dev/null @@ -1,36 +0,0 @@ -package hclutil - -import ( - "fmt" - - multierror "github.com/hashicorp/go-multierror" - "github.com/hashicorp/hcl/hcl/ast" -) - -// CheckHCLKeys checks whether the keys in the AST list contains any of the valid keys provided. -func CheckHCLKeys(node ast.Node, valid []string) error { - var list *ast.ObjectList - switch n := node.(type) { - case *ast.ObjectList: - list = n - case *ast.ObjectType: - list = n.List - default: - return fmt.Errorf("cannot check HCL keys of type %T", n) - } - - validMap := make(map[string]struct{}, len(valid)) - for _, v := range valid { - validMap[v] = struct{}{} - } - - var result error - for _, item := range list.Items { - key := item.Keys[0].Token.Value().(string) - if _, ok := validMap[key]; !ok { - result = multierror.Append(result, fmt.Errorf("invalid key %q on line %d", key, item.Assign.Line)) - } - } - - return result -} diff --git a/vendor/github.com/hashicorp/vault/sdk/helper/jsonutil/json.go b/vendor/github.com/hashicorp/vault/sdk/helper/jsonutil/json.go deleted file mode 100644 index c03a4f8c..00000000 --- a/vendor/github.com/hashicorp/vault/sdk/helper/jsonutil/json.go +++ /dev/null @@ -1,100 +0,0 @@ -package jsonutil - -import ( - "bytes" - "compress/gzip" - "encoding/json" - "fmt" - "io" - - "github.com/hashicorp/errwrap" - "github.com/hashicorp/vault/sdk/helper/compressutil" -) - -// Encodes/Marshals the given object into JSON -func EncodeJSON(in interface{}) ([]byte, error) { - if in == nil { - return nil, fmt.Errorf("input for encoding is nil") - } - var buf bytes.Buffer - enc := json.NewEncoder(&buf) - if err := enc.Encode(in); err != nil { - return nil, err - } - return buf.Bytes(), nil -} - -// EncodeJSONAndCompress encodes the given input into JSON and compresses the -// encoded value (using Gzip format BestCompression level, by default). A -// canary byte is placed at the beginning of the returned bytes for the logic -// in decompression method to identify compressed input. -func EncodeJSONAndCompress(in interface{}, config *compressutil.CompressionConfig) ([]byte, error) { - if in == nil { - return nil, fmt.Errorf("input for encoding is nil") - } - - // First JSON encode the given input - encodedBytes, err := EncodeJSON(in) - if err != nil { - return nil, err - } - - if config == nil { - config = &compressutil.CompressionConfig{ - Type: compressutil.CompressionTypeGzip, - GzipCompressionLevel: gzip.BestCompression, - } - } - - return compressutil.Compress(encodedBytes, config) -} - -// DecodeJSON tries to decompress the given data. The call to decompress, fails -// if the content was not compressed in the first place, which is identified by -// a canary byte before the compressed data. If the data is not compressed, it -// is JSON decoded directly. Otherwise the decompressed data will be JSON -// decoded. -func DecodeJSON(data []byte, out interface{}) error { - if data == nil || len(data) == 0 { - return fmt.Errorf("'data' being decoded is nil") - } - if out == nil { - return fmt.Errorf("output parameter 'out' is nil") - } - - // Decompress the data if it was compressed in the first place - decompressedBytes, uncompressed, err := compressutil.Decompress(data) - if err != nil { - return errwrap.Wrapf("failed to decompress JSON: {{err}}", err) - } - if !uncompressed && (decompressedBytes == nil || len(decompressedBytes) == 0) { - return fmt.Errorf("decompressed data being decoded is invalid") - } - - // If the input supplied failed to contain the compression canary, it - // will be notified by the compression utility. Decode the decompressed - // input. - if !uncompressed { - data = decompressedBytes - } - - return DecodeJSONFromReader(bytes.NewReader(data), out) -} - -// Decodes/Unmarshals the given io.Reader pointing to a JSON, into a desired object -func DecodeJSONFromReader(r io.Reader, out interface{}) error { - if r == nil { - return fmt.Errorf("'io.Reader' being decoded is nil") - } - if out == nil { - return fmt.Errorf("output parameter 'out' is nil") - } - - dec := json.NewDecoder(r) - - // While decoding JSON values, interpret the integer values as `json.Number`s instead of `float64`. - dec.UseNumber() - - // Since 'out' is an interface representing a pointer, pass it to the decoder without an '&' - return dec.Decode(out) -} diff --git a/vendor/github.com/hashicorp/vault/sdk/helper/license/feature.go b/vendor/github.com/hashicorp/vault/sdk/helper/license/feature.go deleted file mode 100644 index c7c000a5..00000000 --- a/vendor/github.com/hashicorp/vault/sdk/helper/license/feature.go +++ /dev/null @@ -1,10 +0,0 @@ -package license - -// Features is a bitmask of feature flags -type Features uint - -const FeatureNone Features = 0 - -func (f Features) HasFeature(flag Features) bool { - return false -} diff --git a/vendor/github.com/hashicorp/vault/sdk/helper/locksutil/locks.go b/vendor/github.com/hashicorp/vault/sdk/helper/locksutil/locks.go deleted file mode 100644 index 35ffcf73..00000000 --- a/vendor/github.com/hashicorp/vault/sdk/helper/locksutil/locks.go +++ /dev/null @@ -1,58 +0,0 @@ -package locksutil - -import ( - "sync" - - "github.com/hashicorp/vault/sdk/helper/cryptoutil" -) - -const ( - LockCount = 256 -) - -type LockEntry struct { - sync.RWMutex -} - -// CreateLocks returns an array so that the locks can be iterated over in -// order. -// -// This is only threadsafe if a process is using a single lock, or iterating -// over the entire lock slice in order. Using a consistent order avoids -// deadlocks because you can never have the following: -// -// Lock A, Lock B -// Lock B, Lock A -// -// Where process 1 is now deadlocked trying to lock B, and process 2 deadlocked trying to lock A -func CreateLocks() []*LockEntry { - ret := make([]*LockEntry, LockCount) - for i := range ret { - ret[i] = new(LockEntry) - } - return ret -} - -func LockIndexForKey(key string) uint8 { - return uint8(cryptoutil.Blake2b256Hash(key)[0]) -} - -func LockForKey(locks []*LockEntry, key string) *LockEntry { - return locks[LockIndexForKey(key)] -} - -func LocksForKeys(locks []*LockEntry, keys []string) []*LockEntry { - lockIndexes := make(map[uint8]struct{}, len(keys)) - for _, k := range keys { - lockIndexes[LockIndexForKey(k)] = struct{}{} - } - - locksToReturn := make([]*LockEntry, 0, len(keys)) - for i, l := range locks { - if _, ok := lockIndexes[uint8(i)]; ok { - locksToReturn = append(locksToReturn, l) - } - } - - return locksToReturn -} diff --git a/vendor/github.com/hashicorp/vault/sdk/helper/logging/logging.go b/vendor/github.com/hashicorp/vault/sdk/helper/logging/logging.go deleted file mode 100644 index 211a545e..00000000 --- a/vendor/github.com/hashicorp/vault/sdk/helper/logging/logging.go +++ /dev/null @@ -1,81 +0,0 @@ -package logging - -import ( - "fmt" - "io" - "os" - "strings" - - log "github.com/hashicorp/go-hclog" -) - -type LogFormat int - -const ( - UnspecifiedFormat LogFormat = iota - StandardFormat - JSONFormat -) - -// Stringer implementation -func (l LogFormat) String() string { - switch l { - case UnspecifiedFormat: - return "unspecified" - case StandardFormat: - return "standard" - case JSONFormat: - return "json" - } - - // unreachable - return "unknown" -} - -// NewVaultLogger creates a new logger with the specified level and a Vault -// formatter -func NewVaultLogger(level log.Level) log.Logger { - return NewVaultLoggerWithWriter(log.DefaultOutput, level) -} - -// NewVaultLoggerWithWriter creates a new logger with the specified level and -// writer and a Vault formatter -func NewVaultLoggerWithWriter(w io.Writer, level log.Level) log.Logger { - opts := &log.LoggerOptions{ - Level: level, - IndependentLevels: true, - Output: w, - JSONFormat: ParseEnvLogFormat() == JSONFormat, - } - return log.New(opts) -} - -// ParseLogFormat parses the log format from the provided string. -func ParseLogFormat(format string) (LogFormat, error) { - switch strings.ToLower(strings.TrimSpace(format)) { - case "": - return UnspecifiedFormat, nil - case "standard": - return StandardFormat, nil - case "json": - return JSONFormat, nil - default: - return UnspecifiedFormat, fmt.Errorf("Unknown log format: %s", format) - } -} - -// ParseEnvLogFormat parses the log format from an environment variable. -func ParseEnvLogFormat() LogFormat { - logFormat := os.Getenv("VAULT_LOG_FORMAT") - if logFormat == "" { - logFormat = os.Getenv("LOGXI_FORMAT") - } - switch strings.ToLower(logFormat) { - case "json", "vault_json", "vault-json", "vaultjson": - return JSONFormat - case "standard": - return StandardFormat - default: - return UnspecifiedFormat - } -} diff --git a/vendor/github.com/hashicorp/vault/sdk/helper/pathmanager/pathmanager.go b/vendor/github.com/hashicorp/vault/sdk/helper/pathmanager/pathmanager.go deleted file mode 100644 index e0e39445..00000000 --- a/vendor/github.com/hashicorp/vault/sdk/helper/pathmanager/pathmanager.go +++ /dev/null @@ -1,136 +0,0 @@ -package pathmanager - -import ( - "strings" - "sync" - - iradix "github.com/hashicorp/go-immutable-radix" -) - -// PathManager is a prefix searchable index of paths -type PathManager struct { - l sync.RWMutex - paths *iradix.Tree -} - -// New creates a new path manager -func New() *PathManager { - return &PathManager{ - paths: iradix.New(), - } -} - -// AddPaths adds path to the paths list -func (p *PathManager) AddPaths(paths []string) { - p.l.Lock() - defer p.l.Unlock() - - txn := p.paths.Txn() - for _, prefix := range paths { - if len(prefix) == 0 { - continue - } - - var exception bool - if strings.HasPrefix(prefix, "!") { - prefix = strings.TrimPrefix(prefix, "!") - exception = true - } - - // We trim any trailing *, but we don't touch whether it is a trailing - // slash or not since we want to be able to ignore prefixes that fully - // specify a file - txn.Insert([]byte(strings.TrimSuffix(prefix, "*")), exception) - } - p.paths = txn.Commit() -} - -// RemovePaths removes paths from the paths list -func (p *PathManager) RemovePaths(paths []string) { - p.l.Lock() - defer p.l.Unlock() - - txn := p.paths.Txn() - for _, prefix := range paths { - if len(prefix) == 0 { - continue - } - - // Exceptions aren't stored with the leading ! so strip it - if strings.HasPrefix(prefix, "!") { - prefix = strings.TrimPrefix(prefix, "!") - } - - // We trim any trailing *, but we don't touch whether it is a trailing - // slash or not since we want to be able to ignore prefixes that fully - // specify a file - txn.Delete([]byte(strings.TrimSuffix(prefix, "*"))) - } - p.paths = txn.Commit() -} - -// RemovePathPrefix removes all paths with the given prefix -func (p *PathManager) RemovePathPrefix(prefix string) { - p.l.Lock() - defer p.l.Unlock() - - // We trim any trailing *, but we don't touch whether it is a trailing - // slash or not since we want to be able to ignore prefixes that fully - // specify a file - p.paths, _ = p.paths.DeletePrefix([]byte(strings.TrimSuffix(prefix, "*"))) -} - -// Len returns the number of paths -func (p *PathManager) Len() int { - return p.paths.Len() -} - -// Paths returns the path list -func (p *PathManager) Paths() []string { - p.l.RLock() - defer p.l.RUnlock() - - paths := make([]string, 0, p.paths.Len()) - walkFn := func(k []byte, v interface{}) bool { - paths = append(paths, string(k)) - return false - } - p.paths.Root().Walk(walkFn) - return paths -} - -// HasPath returns if the prefix for the path exists regardless if it is a path -// (ending with /) or a prefix for a leaf node -func (p *PathManager) HasPath(path string) bool { - p.l.RLock() - defer p.l.RUnlock() - - if _, exceptionRaw, ok := p.paths.Root().LongestPrefix([]byte(path)); ok { - var exception bool - if exceptionRaw != nil { - exception = exceptionRaw.(bool) - } - return !exception - } - return false -} - -// HasExactPath returns if the longest match is an exact match for the -// full path -func (p *PathManager) HasExactPath(path string) bool { - p.l.RLock() - defer p.l.RUnlock() - - if val, exceptionRaw, ok := p.paths.Root().LongestPrefix([]byte(path)); ok { - var exception bool - if exceptionRaw != nil { - exception = exceptionRaw.(bool) - } - - strVal := string(val) - if strings.HasSuffix(strVal, "/") || strVal == path { - return !exception - } - } - return false -} diff --git a/vendor/github.com/hashicorp/vault/sdk/helper/pluginutil/env.go b/vendor/github.com/hashicorp/vault/sdk/helper/pluginutil/env.go deleted file mode 100644 index df1fdbee..00000000 --- a/vendor/github.com/hashicorp/vault/sdk/helper/pluginutil/env.go +++ /dev/null @@ -1,77 +0,0 @@ -package pluginutil - -import ( - "os" - - "github.com/hashicorp/go-secure-stdlib/mlock" - version "github.com/hashicorp/go-version" -) - -const ( - // PluginAutoMTLSEnv is used to ensure AutoMTLS is used. This will override - // setting a TLSProviderFunc for a plugin. - PluginAutoMTLSEnv = "VAULT_PLUGIN_AUTOMTLS_ENABLED" - - // PluginMlockEnabled is the ENV name used to pass the configuration for - // enabling mlock - PluginMlockEnabled = "VAULT_PLUGIN_MLOCK_ENABLED" - - // PluginVaultVersionEnv is the ENV name used to pass the version of the - // vault server to the plugin - PluginVaultVersionEnv = "VAULT_VERSION" - - // PluginMetadataModeEnv is an ENV name used to disable TLS communication - // to bootstrap mounting plugins. - PluginMetadataModeEnv = "VAULT_PLUGIN_METADATA_MODE" - - // PluginUnwrapTokenEnv is the ENV name used to pass unwrap tokens to the - // plugin. - PluginUnwrapTokenEnv = "VAULT_UNWRAP_TOKEN" - - // PluginCACertPEMEnv is an ENV name used for holding a CA PEM-encoded - // string. Used for testing. - PluginCACertPEMEnv = "VAULT_TESTING_PLUGIN_CA_PEM" - - // PluginMultiplexingOptOut is an ENV name used to define a comma separated list of plugin names - // opted-out of the multiplexing feature; for emergencies if multiplexing ever causes issues - PluginMultiplexingOptOut = "VAULT_PLUGIN_MULTIPLEXING_OPT_OUT" -) - -// OptionallyEnableMlock determines if mlock should be called, and if so enables -// mlock. -func OptionallyEnableMlock() error { - if os.Getenv(PluginMlockEnabled) == "true" { - return mlock.LockMemory() - } - - return nil -} - -// GRPCSupport defaults to returning true, unless VAULT_VERSION is missing or -// it fails to meet the version constraint. -func GRPCSupport() bool { - verString := os.Getenv(PluginVaultVersionEnv) - // If the env var is empty, we fall back to netrpc for backward compatibility. - if verString == "" { - return false - } - if verString != "unknown" { - ver, err := version.NewVersion(verString) - if err != nil { - return true - } - // Due to some regressions on 0.9.2 & 0.9.3 we now require version 0.9.4 - // to allow the plugin framework to default to gRPC. - constraint, err := version.NewConstraint(">= 0.9.4") - if err != nil { - return true - } - return constraint.Check(ver) - } - return true -} - -// InMetadataMode returns true if the plugin calling this function is running in metadata mode. -func InMetadataMode() bool { - return os.Getenv(PluginMetadataModeEnv) == "true" -} diff --git a/vendor/github.com/hashicorp/vault/sdk/helper/pluginutil/multiplexing.go b/vendor/github.com/hashicorp/vault/sdk/helper/pluginutil/multiplexing.go deleted file mode 100644 index 41316ec4..00000000 --- a/vendor/github.com/hashicorp/vault/sdk/helper/pluginutil/multiplexing.go +++ /dev/null @@ -1,80 +0,0 @@ -package pluginutil - -import ( - "context" - "errors" - "fmt" - "os" - "strings" - - "github.com/hashicorp/go-secure-stdlib/strutil" - "google.golang.org/grpc" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/metadata" - "google.golang.org/grpc/status" -) - -var ErrNoMultiplexingIDFound = errors.New("no multiplexing ID found") - -type PluginMultiplexingServerImpl struct { - UnimplementedPluginMultiplexingServer - - Supported bool -} - -func (pm PluginMultiplexingServerImpl) MultiplexingSupport(_ context.Context, _ *MultiplexingSupportRequest) (*MultiplexingSupportResponse, error) { - return &MultiplexingSupportResponse{ - Supported: pm.Supported, - }, nil -} - -func MultiplexingSupported(ctx context.Context, cc grpc.ClientConnInterface, name string) (bool, error) { - if cc == nil { - return false, fmt.Errorf("client connection is nil") - } - - out := strings.Split(os.Getenv(PluginMultiplexingOptOut), ",") - if strutil.StrListContains(out, name) { - return false, nil - } - - req := new(MultiplexingSupportRequest) - resp, err := NewPluginMultiplexingClient(cc).MultiplexingSupport(ctx, req) - if err != nil { - - // If the server does not implement the multiplexing server then we can - // assume it is not multiplexed - if status.Code(err) == codes.Unimplemented { - return false, nil - } - - return false, err - } - if resp == nil { - // Somehow got a nil response, assume not multiplexed - return false, nil - } - - return resp.Supported, nil -} - -func GetMultiplexIDFromContext(ctx context.Context) (string, error) { - md, ok := metadata.FromIncomingContext(ctx) - if !ok { - return "", fmt.Errorf("missing plugin multiplexing metadata") - } - - multiplexIDs := md[MultiplexingCtxKey] - if len(multiplexIDs) == 0 { - return "", ErrNoMultiplexingIDFound - } else if len(multiplexIDs) != 1 { - return "", fmt.Errorf("unexpected number of IDs in metadata: (%d)", len(multiplexIDs)) - } - - multiplexID := multiplexIDs[0] - if multiplexID == "" { - return "", fmt.Errorf("empty multiplex ID in metadata") - } - - return multiplexID, nil -} diff --git a/vendor/github.com/hashicorp/vault/sdk/helper/pluginutil/multiplexing.pb.go b/vendor/github.com/hashicorp/vault/sdk/helper/pluginutil/multiplexing.pb.go deleted file mode 100644 index 96963af3..00000000 --- a/vendor/github.com/hashicorp/vault/sdk/helper/pluginutil/multiplexing.pb.go +++ /dev/null @@ -1,213 +0,0 @@ -// Code generated by protoc-gen-go. DO NOT EDIT. -// versions: -// protoc-gen-go v1.28.1 -// protoc v3.21.7 -// source: sdk/helper/pluginutil/multiplexing.proto - -package pluginutil - -import ( - protoreflect "google.golang.org/protobuf/reflect/protoreflect" - protoimpl "google.golang.org/protobuf/runtime/protoimpl" - reflect "reflect" - sync "sync" -) - -const ( - // Verify that this generated code is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) - // Verify that runtime/protoimpl is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) -) - -type MultiplexingSupportRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields -} - -func (x *MultiplexingSupportRequest) Reset() { - *x = MultiplexingSupportRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_sdk_helper_pluginutil_multiplexing_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *MultiplexingSupportRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*MultiplexingSupportRequest) ProtoMessage() {} - -func (x *MultiplexingSupportRequest) ProtoReflect() protoreflect.Message { - mi := &file_sdk_helper_pluginutil_multiplexing_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use MultiplexingSupportRequest.ProtoReflect.Descriptor instead. -func (*MultiplexingSupportRequest) Descriptor() ([]byte, []int) { - return file_sdk_helper_pluginutil_multiplexing_proto_rawDescGZIP(), []int{0} -} - -type MultiplexingSupportResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Supported bool `protobuf:"varint,1,opt,name=supported,proto3" json:"supported,omitempty"` -} - -func (x *MultiplexingSupportResponse) Reset() { - *x = MultiplexingSupportResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_sdk_helper_pluginutil_multiplexing_proto_msgTypes[1] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *MultiplexingSupportResponse) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*MultiplexingSupportResponse) ProtoMessage() {} - -func (x *MultiplexingSupportResponse) ProtoReflect() protoreflect.Message { - mi := &file_sdk_helper_pluginutil_multiplexing_proto_msgTypes[1] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use MultiplexingSupportResponse.ProtoReflect.Descriptor instead. -func (*MultiplexingSupportResponse) Descriptor() ([]byte, []int) { - return file_sdk_helper_pluginutil_multiplexing_proto_rawDescGZIP(), []int{1} -} - -func (x *MultiplexingSupportResponse) GetSupported() bool { - if x != nil { - return x.Supported - } - return false -} - -var File_sdk_helper_pluginutil_multiplexing_proto protoreflect.FileDescriptor - -var file_sdk_helper_pluginutil_multiplexing_proto_rawDesc = []byte{ - 0x0a, 0x28, 0x73, 0x64, 0x6b, 0x2f, 0x68, 0x65, 0x6c, 0x70, 0x65, 0x72, 0x2f, 0x70, 0x6c, 0x75, - 0x67, 0x69, 0x6e, 0x75, 0x74, 0x69, 0x6c, 0x2f, 0x6d, 0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, 0x65, - 0x78, 0x69, 0x6e, 0x67, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x17, 0x70, 0x6c, 0x75, 0x67, - 0x69, 0x6e, 0x75, 0x74, 0x69, 0x6c, 0x2e, 0x6d, 0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, 0x65, 0x78, - 0x69, 0x6e, 0x67, 0x22, 0x1c, 0x0a, 0x1a, 0x4d, 0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, 0x65, 0x78, - 0x69, 0x6e, 0x67, 0x53, 0x75, 0x70, 0x70, 0x6f, 0x72, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, - 0x74, 0x22, 0x3b, 0x0a, 0x1b, 0x4d, 0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, 0x65, 0x78, 0x69, 0x6e, - 0x67, 0x53, 0x75, 0x70, 0x70, 0x6f, 0x72, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, - 0x12, 0x1c, 0x0a, 0x09, 0x73, 0x75, 0x70, 0x70, 0x6f, 0x72, 0x74, 0x65, 0x64, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x08, 0x52, 0x09, 0x73, 0x75, 0x70, 0x70, 0x6f, 0x72, 0x74, 0x65, 0x64, 0x32, 0x97, - 0x01, 0x0a, 0x12, 0x50, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x4d, 0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, - 0x65, 0x78, 0x69, 0x6e, 0x67, 0x12, 0x80, 0x01, 0x0a, 0x13, 0x4d, 0x75, 0x6c, 0x74, 0x69, 0x70, - 0x6c, 0x65, 0x78, 0x69, 0x6e, 0x67, 0x53, 0x75, 0x70, 0x70, 0x6f, 0x72, 0x74, 0x12, 0x33, 0x2e, - 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x75, 0x74, 0x69, 0x6c, 0x2e, 0x6d, 0x75, 0x6c, 0x74, 0x69, - 0x70, 0x6c, 0x65, 0x78, 0x69, 0x6e, 0x67, 0x2e, 0x4d, 0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, 0x65, - 0x78, 0x69, 0x6e, 0x67, 0x53, 0x75, 0x70, 0x70, 0x6f, 0x72, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x1a, 0x34, 0x2e, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x75, 0x74, 0x69, 0x6c, 0x2e, - 0x6d, 0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, 0x65, 0x78, 0x69, 0x6e, 0x67, 0x2e, 0x4d, 0x75, 0x6c, - 0x74, 0x69, 0x70, 0x6c, 0x65, 0x78, 0x69, 0x6e, 0x67, 0x53, 0x75, 0x70, 0x70, 0x6f, 0x72, 0x74, - 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x42, 0x32, 0x5a, 0x30, 0x67, 0x69, 0x74, 0x68, - 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x68, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, - 0x2f, 0x76, 0x61, 0x75, 0x6c, 0x74, 0x2f, 0x73, 0x64, 0x6b, 0x2f, 0x68, 0x65, 0x6c, 0x70, 0x65, - 0x72, 0x2f, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x75, 0x74, 0x69, 0x6c, 0x62, 0x06, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x33, -} - -var ( - file_sdk_helper_pluginutil_multiplexing_proto_rawDescOnce sync.Once - file_sdk_helper_pluginutil_multiplexing_proto_rawDescData = file_sdk_helper_pluginutil_multiplexing_proto_rawDesc -) - -func file_sdk_helper_pluginutil_multiplexing_proto_rawDescGZIP() []byte { - file_sdk_helper_pluginutil_multiplexing_proto_rawDescOnce.Do(func() { - file_sdk_helper_pluginutil_multiplexing_proto_rawDescData = protoimpl.X.CompressGZIP(file_sdk_helper_pluginutil_multiplexing_proto_rawDescData) - }) - return file_sdk_helper_pluginutil_multiplexing_proto_rawDescData -} - -var file_sdk_helper_pluginutil_multiplexing_proto_msgTypes = make([]protoimpl.MessageInfo, 2) -var file_sdk_helper_pluginutil_multiplexing_proto_goTypes = []interface{}{ - (*MultiplexingSupportRequest)(nil), // 0: pluginutil.multiplexing.MultiplexingSupportRequest - (*MultiplexingSupportResponse)(nil), // 1: pluginutil.multiplexing.MultiplexingSupportResponse -} -var file_sdk_helper_pluginutil_multiplexing_proto_depIdxs = []int32{ - 0, // 0: pluginutil.multiplexing.PluginMultiplexing.MultiplexingSupport:input_type -> pluginutil.multiplexing.MultiplexingSupportRequest - 1, // 1: pluginutil.multiplexing.PluginMultiplexing.MultiplexingSupport:output_type -> pluginutil.multiplexing.MultiplexingSupportResponse - 1, // [1:2] is the sub-list for method output_type - 0, // [0:1] is the sub-list for method input_type - 0, // [0:0] is the sub-list for extension type_name - 0, // [0:0] is the sub-list for extension extendee - 0, // [0:0] is the sub-list for field type_name -} - -func init() { file_sdk_helper_pluginutil_multiplexing_proto_init() } -func file_sdk_helper_pluginutil_multiplexing_proto_init() { - if File_sdk_helper_pluginutil_multiplexing_proto != nil { - return - } - if !protoimpl.UnsafeEnabled { - file_sdk_helper_pluginutil_multiplexing_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*MultiplexingSupportRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_sdk_helper_pluginutil_multiplexing_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*MultiplexingSupportResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } - type x struct{} - out := protoimpl.TypeBuilder{ - File: protoimpl.DescBuilder{ - GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_sdk_helper_pluginutil_multiplexing_proto_rawDesc, - NumEnums: 0, - NumMessages: 2, - NumExtensions: 0, - NumServices: 1, - }, - GoTypes: file_sdk_helper_pluginutil_multiplexing_proto_goTypes, - DependencyIndexes: file_sdk_helper_pluginutil_multiplexing_proto_depIdxs, - MessageInfos: file_sdk_helper_pluginutil_multiplexing_proto_msgTypes, - }.Build() - File_sdk_helper_pluginutil_multiplexing_proto = out.File - file_sdk_helper_pluginutil_multiplexing_proto_rawDesc = nil - file_sdk_helper_pluginutil_multiplexing_proto_goTypes = nil - file_sdk_helper_pluginutil_multiplexing_proto_depIdxs = nil -} diff --git a/vendor/github.com/hashicorp/vault/sdk/helper/pluginutil/multiplexing.proto b/vendor/github.com/hashicorp/vault/sdk/helper/pluginutil/multiplexing.proto deleted file mode 100644 index aa2438b0..00000000 --- a/vendor/github.com/hashicorp/vault/sdk/helper/pluginutil/multiplexing.proto +++ /dev/null @@ -1,13 +0,0 @@ -syntax = "proto3"; -package pluginutil.multiplexing; - -option go_package = "github.com/hashicorp/vault/sdk/helper/pluginutil"; - -message MultiplexingSupportRequest {} -message MultiplexingSupportResponse { - bool supported = 1; -} - -service PluginMultiplexing { - rpc MultiplexingSupport(MultiplexingSupportRequest) returns (MultiplexingSupportResponse); -} diff --git a/vendor/github.com/hashicorp/vault/sdk/helper/pluginutil/multiplexing_grpc.pb.go b/vendor/github.com/hashicorp/vault/sdk/helper/pluginutil/multiplexing_grpc.pb.go deleted file mode 100644 index aa8d0e47..00000000 --- a/vendor/github.com/hashicorp/vault/sdk/helper/pluginutil/multiplexing_grpc.pb.go +++ /dev/null @@ -1,101 +0,0 @@ -// Code generated by protoc-gen-go-grpc. DO NOT EDIT. - -package pluginutil - -import ( - context "context" - grpc "google.golang.org/grpc" - codes "google.golang.org/grpc/codes" - status "google.golang.org/grpc/status" -) - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the grpc package it is being compiled against. -// Requires gRPC-Go v1.32.0 or later. -const _ = grpc.SupportPackageIsVersion7 - -// PluginMultiplexingClient is the client API for PluginMultiplexing service. -// -// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. -type PluginMultiplexingClient interface { - MultiplexingSupport(ctx context.Context, in *MultiplexingSupportRequest, opts ...grpc.CallOption) (*MultiplexingSupportResponse, error) -} - -type pluginMultiplexingClient struct { - cc grpc.ClientConnInterface -} - -func NewPluginMultiplexingClient(cc grpc.ClientConnInterface) PluginMultiplexingClient { - return &pluginMultiplexingClient{cc} -} - -func (c *pluginMultiplexingClient) MultiplexingSupport(ctx context.Context, in *MultiplexingSupportRequest, opts ...grpc.CallOption) (*MultiplexingSupportResponse, error) { - out := new(MultiplexingSupportResponse) - err := c.cc.Invoke(ctx, "/pluginutil.multiplexing.PluginMultiplexing/MultiplexingSupport", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -// PluginMultiplexingServer is the server API for PluginMultiplexing service. -// All implementations must embed UnimplementedPluginMultiplexingServer -// for forward compatibility -type PluginMultiplexingServer interface { - MultiplexingSupport(context.Context, *MultiplexingSupportRequest) (*MultiplexingSupportResponse, error) - mustEmbedUnimplementedPluginMultiplexingServer() -} - -// UnimplementedPluginMultiplexingServer must be embedded to have forward compatible implementations. -type UnimplementedPluginMultiplexingServer struct { -} - -func (UnimplementedPluginMultiplexingServer) MultiplexingSupport(context.Context, *MultiplexingSupportRequest) (*MultiplexingSupportResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method MultiplexingSupport not implemented") -} -func (UnimplementedPluginMultiplexingServer) mustEmbedUnimplementedPluginMultiplexingServer() {} - -// UnsafePluginMultiplexingServer may be embedded to opt out of forward compatibility for this service. -// Use of this interface is not recommended, as added methods to PluginMultiplexingServer will -// result in compilation errors. -type UnsafePluginMultiplexingServer interface { - mustEmbedUnimplementedPluginMultiplexingServer() -} - -func RegisterPluginMultiplexingServer(s grpc.ServiceRegistrar, srv PluginMultiplexingServer) { - s.RegisterService(&PluginMultiplexing_ServiceDesc, srv) -} - -func _PluginMultiplexing_MultiplexingSupport_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(MultiplexingSupportRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(PluginMultiplexingServer).MultiplexingSupport(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/pluginutil.multiplexing.PluginMultiplexing/MultiplexingSupport", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(PluginMultiplexingServer).MultiplexingSupport(ctx, req.(*MultiplexingSupportRequest)) - } - return interceptor(ctx, in, info, handler) -} - -// PluginMultiplexing_ServiceDesc is the grpc.ServiceDesc for PluginMultiplexing service. -// It's only intended for direct use with grpc.RegisterService, -// and not to be introspected or modified (even as a copy) -var PluginMultiplexing_ServiceDesc = grpc.ServiceDesc{ - ServiceName: "pluginutil.multiplexing.PluginMultiplexing", - HandlerType: (*PluginMultiplexingServer)(nil), - Methods: []grpc.MethodDesc{ - { - MethodName: "MultiplexingSupport", - Handler: _PluginMultiplexing_MultiplexingSupport_Handler, - }, - }, - Streams: []grpc.StreamDesc{}, - Metadata: "sdk/helper/pluginutil/multiplexing.proto", -} diff --git a/vendor/github.com/hashicorp/vault/sdk/helper/pluginutil/run_config.go b/vendor/github.com/hashicorp/vault/sdk/helper/pluginutil/run_config.go deleted file mode 100644 index 3eb8fb2b..00000000 --- a/vendor/github.com/hashicorp/vault/sdk/helper/pluginutil/run_config.go +++ /dev/null @@ -1,179 +0,0 @@ -package pluginutil - -import ( - "context" - "crypto/sha256" - "crypto/tls" - "fmt" - "os/exec" - - log "github.com/hashicorp/go-hclog" - "github.com/hashicorp/go-plugin" - "github.com/hashicorp/vault/sdk/helper/consts" - "github.com/hashicorp/vault/sdk/version" -) - -type PluginClientConfig struct { - Name string - PluginType consts.PluginType - Version string - PluginSets map[int]plugin.PluginSet - HandshakeConfig plugin.HandshakeConfig - Logger log.Logger - IsMetadataMode bool - AutoMTLS bool - MLock bool - Wrapper RunnerUtil -} - -type runConfig struct { - // Provided by PluginRunner - command string - args []string - sha256 []byte - - // Initialized with what's in PluginRunner.Env, but can be added to - env []string - - PluginClientConfig -} - -func (rc runConfig) makeConfig(ctx context.Context) (*plugin.ClientConfig, error) { - cmd := exec.Command(rc.command, rc.args...) - cmd.Env = append(cmd.Env, rc.env...) - - // Add the mlock setting to the ENV of the plugin - if rc.MLock || (rc.Wrapper != nil && rc.Wrapper.MlockEnabled()) { - cmd.Env = append(cmd.Env, fmt.Sprintf("%s=%s", PluginMlockEnabled, "true")) - } - cmd.Env = append(cmd.Env, fmt.Sprintf("%s=%s", PluginVaultVersionEnv, version.GetVersion().Version)) - - if rc.IsMetadataMode { - rc.Logger = rc.Logger.With("metadata", "true") - } - metadataEnv := fmt.Sprintf("%s=%t", PluginMetadataModeEnv, rc.IsMetadataMode) - cmd.Env = append(cmd.Env, metadataEnv) - - automtlsEnv := fmt.Sprintf("%s=%t", PluginAutoMTLSEnv, rc.AutoMTLS) - cmd.Env = append(cmd.Env, automtlsEnv) - - var clientTLSConfig *tls.Config - if !rc.AutoMTLS && !rc.IsMetadataMode { - // Get a CA TLS Certificate - certBytes, key, err := generateCert() - if err != nil { - return nil, err - } - - // Use CA to sign a client cert and return a configured TLS config - clientTLSConfig, err = createClientTLSConfig(certBytes, key) - if err != nil { - return nil, err - } - - // Use CA to sign a server cert and wrap the values in a response wrapped - // token. - wrapToken, err := wrapServerConfig(ctx, rc.Wrapper, certBytes, key) - if err != nil { - return nil, err - } - - // Add the response wrap token to the ENV of the plugin - cmd.Env = append(cmd.Env, fmt.Sprintf("%s=%s", PluginUnwrapTokenEnv, wrapToken)) - } - - secureConfig := &plugin.SecureConfig{ - Checksum: rc.sha256, - Hash: sha256.New(), - } - - clientConfig := &plugin.ClientConfig{ - HandshakeConfig: rc.HandshakeConfig, - VersionedPlugins: rc.PluginSets, - Cmd: cmd, - SecureConfig: secureConfig, - TLSConfig: clientTLSConfig, - Logger: rc.Logger, - AllowedProtocols: []plugin.Protocol{ - plugin.ProtocolNetRPC, - plugin.ProtocolGRPC, - }, - AutoMTLS: rc.AutoMTLS, - } - return clientConfig, nil -} - -func (rc runConfig) run(ctx context.Context) (*plugin.Client, error) { - clientConfig, err := rc.makeConfig(ctx) - if err != nil { - return nil, err - } - - client := plugin.NewClient(clientConfig) - return client, nil -} - -type RunOpt func(*runConfig) - -func Env(env ...string) RunOpt { - return func(rc *runConfig) { - rc.env = append(rc.env, env...) - } -} - -func Runner(wrapper RunnerUtil) RunOpt { - return func(rc *runConfig) { - rc.Wrapper = wrapper - } -} - -func PluginSets(pluginSets map[int]plugin.PluginSet) RunOpt { - return func(rc *runConfig) { - rc.PluginSets = pluginSets - } -} - -func HandshakeConfig(hs plugin.HandshakeConfig) RunOpt { - return func(rc *runConfig) { - rc.HandshakeConfig = hs - } -} - -func Logger(logger log.Logger) RunOpt { - return func(rc *runConfig) { - rc.Logger = logger - } -} - -func MetadataMode(isMetadataMode bool) RunOpt { - return func(rc *runConfig) { - rc.IsMetadataMode = isMetadataMode - } -} - -func AutoMTLS(autoMTLS bool) RunOpt { - return func(rc *runConfig) { - rc.AutoMTLS = autoMTLS - } -} - -func MLock(mlock bool) RunOpt { - return func(rc *runConfig) { - rc.MLock = mlock - } -} - -func (r *PluginRunner) RunConfig(ctx context.Context, opts ...RunOpt) (*plugin.Client, error) { - rc := runConfig{ - command: r.Command, - args: r.Args, - sha256: r.Sha256, - env: r.Env, - } - - for _, opt := range opts { - opt(&rc) - } - - return rc.run(ctx) -} diff --git a/vendor/github.com/hashicorp/vault/sdk/helper/pluginutil/runner.go b/vendor/github.com/hashicorp/vault/sdk/helper/pluginutil/runner.go deleted file mode 100644 index 631c4f3a..00000000 --- a/vendor/github.com/hashicorp/vault/sdk/helper/pluginutil/runner.go +++ /dev/null @@ -1,115 +0,0 @@ -package pluginutil - -import ( - "context" - "time" - - log "github.com/hashicorp/go-hclog" - "github.com/hashicorp/go-plugin" - "github.com/hashicorp/go-version" - "github.com/hashicorp/vault/sdk/helper/consts" - "github.com/hashicorp/vault/sdk/helper/wrapping" - "google.golang.org/grpc" -) - -// Looker defines the plugin Lookup function that looks into the plugin catalog -// for available plugins and returns a PluginRunner -type Looker interface { - LookupPlugin(ctx context.Context, pluginName string, pluginType consts.PluginType) (*PluginRunner, error) - LookupPluginVersion(ctx context.Context, pluginName string, pluginType consts.PluginType, version string) (*PluginRunner, error) -} - -// RunnerUtil interface defines the functions needed by the runner to wrap the -// metadata needed to run a plugin process. This includes looking up Mlock -// configuration and wrapping data in a response wrapped token. -// logical.SystemView implementations satisfy this interface. -type RunnerUtil interface { - NewPluginClient(ctx context.Context, config PluginClientConfig) (PluginClient, error) - ResponseWrapData(ctx context.Context, data map[string]interface{}, ttl time.Duration, jwt bool) (*wrapping.ResponseWrapInfo, error) - MlockEnabled() bool -} - -// LookRunnerUtil defines the functions for both Looker and Wrapper -type LookRunnerUtil interface { - Looker - RunnerUtil -} - -type PluginClient interface { - Conn() grpc.ClientConnInterface - Reload() error - plugin.ClientProtocol -} - -const MultiplexingCtxKey string = "multiplex_id" - -// PluginRunner defines the metadata needed to run a plugin securely with -// go-plugin. -type PluginRunner struct { - Name string `json:"name" structs:"name"` - Type consts.PluginType `json:"type" structs:"type"` - Version string `json:"version" structs:"version"` - Command string `json:"command" structs:"command"` - Args []string `json:"args" structs:"args"` - Env []string `json:"env" structs:"env"` - Sha256 []byte `json:"sha256" structs:"sha256"` - Builtin bool `json:"builtin" structs:"builtin"` - BuiltinFactory func() (interface{}, error) `json:"-" structs:"-"` -} - -// Run takes a wrapper RunnerUtil instance along with the go-plugin parameters and -// returns a configured plugin.Client with TLS Configured and a wrapping token set -// on PluginUnwrapTokenEnv for plugin process consumption. -func (r *PluginRunner) Run(ctx context.Context, wrapper RunnerUtil, pluginSets map[int]plugin.PluginSet, hs plugin.HandshakeConfig, env []string, logger log.Logger) (*plugin.Client, error) { - return r.RunConfig(ctx, - Runner(wrapper), - PluginSets(pluginSets), - HandshakeConfig(hs), - Env(env...), - Logger(logger), - MetadataMode(false), - ) -} - -// RunMetadataMode returns a configured plugin.Client that will dispense a plugin -// in metadata mode. The PluginMetadataModeEnv is passed in as part of the Cmd to -// plugin.Client, and consumed by the plugin process on api.VaultPluginTLSProvider. -func (r *PluginRunner) RunMetadataMode(ctx context.Context, wrapper RunnerUtil, pluginSets map[int]plugin.PluginSet, hs plugin.HandshakeConfig, env []string, logger log.Logger) (*plugin.Client, error) { - return r.RunConfig(ctx, - Runner(wrapper), - PluginSets(pluginSets), - HandshakeConfig(hs), - Env(env...), - Logger(logger), - MetadataMode(true), - ) -} - -// VersionedPlugin holds any versioning information stored about a plugin in the -// plugin catalog. -type VersionedPlugin struct { - Type string `json:"type"` // string instead of consts.PluginType so that we get the string form in API responses. - Name string `json:"name"` - Version string `json:"version"` - SHA256 string `json:"sha256,omitempty"` - Builtin bool `json:"builtin"` - DeprecationStatus string `json:"deprecation_status,omitempty"` - - // Pre-parsed semver struct of the Version field - SemanticVersion *version.Version `json:"-"` -} - -// CtxCancelIfCanceled takes a context cancel func and a context. If the context is -// shutdown the cancelfunc is called. This is useful for merging two cancel -// functions. -func CtxCancelIfCanceled(f context.CancelFunc, ctxCanceler context.Context) chan struct{} { - quitCh := make(chan struct{}) - go func() { - select { - case <-quitCh: - case <-ctxCanceler.Done(): - f() - } - }() - return quitCh -} diff --git a/vendor/github.com/hashicorp/vault/sdk/helper/pluginutil/tls.go b/vendor/github.com/hashicorp/vault/sdk/helper/pluginutil/tls.go deleted file mode 100644 index c5fff6d7..00000000 --- a/vendor/github.com/hashicorp/vault/sdk/helper/pluginutil/tls.go +++ /dev/null @@ -1,106 +0,0 @@ -package pluginutil - -import ( - "context" - "crypto/ecdsa" - "crypto/elliptic" - "crypto/rand" - "crypto/tls" - "crypto/x509" - "crypto/x509/pkix" - "time" - - "github.com/hashicorp/errwrap" - "github.com/hashicorp/go-uuid" - "github.com/hashicorp/vault/sdk/helper/certutil" -) - -// generateCert is used internally to create certificates for the plugin -// client and server. -func generateCert() ([]byte, *ecdsa.PrivateKey, error) { - key, err := ecdsa.GenerateKey(elliptic.P521(), rand.Reader) - if err != nil { - return nil, nil, err - } - - host, err := uuid.GenerateUUID() - if err != nil { - return nil, nil, err - } - - sn, err := certutil.GenerateSerialNumber() - if err != nil { - return nil, nil, err - } - - template := &x509.Certificate{ - Subject: pkix.Name{ - CommonName: host, - }, - DNSNames: []string{host}, - ExtKeyUsage: []x509.ExtKeyUsage{ - x509.ExtKeyUsageClientAuth, - x509.ExtKeyUsageServerAuth, - }, - KeyUsage: x509.KeyUsageDigitalSignature | x509.KeyUsageKeyEncipherment | x509.KeyUsageKeyAgreement, - SerialNumber: sn, - NotBefore: time.Now().Add(-30 * time.Second), - NotAfter: time.Now().Add(262980 * time.Hour), - IsCA: true, - } - - certBytes, err := x509.CreateCertificate(rand.Reader, template, template, key.Public(), key) - if err != nil { - return nil, nil, errwrap.Wrapf("unable to generate client certificate: {{err}}", err) - } - - return certBytes, key, nil -} - -// createClientTLSConfig creates a signed certificate and returns a configured -// TLS config. -func createClientTLSConfig(certBytes []byte, key *ecdsa.PrivateKey) (*tls.Config, error) { - clientCert, err := x509.ParseCertificate(certBytes) - if err != nil { - return nil, errwrap.Wrapf("error parsing generated plugin certificate: {{err}}", err) - } - - cert := tls.Certificate{ - Certificate: [][]byte{certBytes}, - PrivateKey: key, - Leaf: clientCert, - } - - clientCertPool := x509.NewCertPool() - clientCertPool.AddCert(clientCert) - - tlsConfig := &tls.Config{ - Certificates: []tls.Certificate{cert}, - RootCAs: clientCertPool, - ClientCAs: clientCertPool, - ClientAuth: tls.RequireAndVerifyClientCert, - ServerName: clientCert.Subject.CommonName, - MinVersion: tls.VersionTLS12, - } - - return tlsConfig, nil -} - -// wrapServerConfig is used to create a server certificate and private key, then -// wrap them in an unwrap token for later retrieval by the plugin. -func wrapServerConfig(ctx context.Context, sys RunnerUtil, certBytes []byte, key *ecdsa.PrivateKey) (string, error) { - rawKey, err := x509.MarshalECPrivateKey(key) - if err != nil { - return "", err - } - - wrapInfo, err := sys.ResponseWrapData(ctx, map[string]interface{}{ - "ServerCert": certBytes, - "ServerKey": rawKey, - }, time.Second*60, true) - if err != nil { - return "", err - } - - return wrapInfo.Token, nil -} diff --git a/vendor/github.com/hashicorp/vault/sdk/helper/strutil/strutil.go b/vendor/github.com/hashicorp/vault/sdk/helper/strutil/strutil.go deleted file mode 100644 index 09cc9425..00000000 --- a/vendor/github.com/hashicorp/vault/sdk/helper/strutil/strutil.go +++ /dev/null @@ -1,94 +0,0 @@ -// DEPRECATED: this has been moved to go-secure-stdlib and will be removed -package strutil - -import ( - extstrutil "github.com/hashicorp/go-secure-stdlib/strutil" -) - -func StrListContainsGlob(haystack []string, needle string) bool { - return extstrutil.StrListContainsGlob(haystack, needle) -} - -func StrListContains(haystack []string, needle string) bool { - return extstrutil.StrListContains(haystack, needle) -} - -func StrListContainsCaseInsensitive(haystack []string, needle string) bool { - return extstrutil.StrListContainsCaseInsensitive(haystack, needle) -} - -func StrListSubset(super, sub []string) bool { - return extstrutil.StrListSubset(super, sub) -} - -func ParseDedupAndSortStrings(input string, sep string) []string { - return extstrutil.ParseDedupAndSortStrings(input, sep) -} - -func ParseDedupLowercaseAndSortStrings(input string, sep string) []string { - return extstrutil.ParseDedupLowercaseAndSortStrings(input, sep) -} - -func ParseKeyValues(input string, out map[string]string, sep string) error { - return extstrutil.ParseKeyValues(input, out, sep) -} - -func ParseArbitraryKeyValues(input string, out map[string]string, sep string) error { - return extstrutil.ParseArbitraryKeyValues(input, out, sep) -} - -func ParseStringSlice(input string, sep string) []string { - return extstrutil.ParseStringSlice(input, sep) -} - -func ParseArbitraryStringSlice(input string, sep string) []string { - return extstrutil.ParseArbitraryStringSlice(input, sep) -} - -func TrimStrings(items []string) []string { - return extstrutil.TrimStrings(items) -} - -func RemoveDuplicates(items []string, lowercase bool) []string { - return extstrutil.RemoveDuplicates(items, lowercase) -} - -func RemoveDuplicatesStable(items []string, caseInsensitive bool) []string { - return extstrutil.RemoveDuplicatesStable(items, caseInsensitive) -} - -func RemoveEmpty(items []string) []string { - return extstrutil.RemoveEmpty(items) -} - -func EquivalentSlices(a, b []string) bool { - return extstrutil.EquivalentSlices(a, b) -} - -func EqualStringMaps(a, b map[string]string) bool { - return extstrutil.EqualStringMaps(a, b) -} - -func StrListDelete(s []string, d string) []string { - return extstrutil.StrListDelete(s, d) -} - -func GlobbedStringsMatch(item, val string) bool { - return extstrutil.GlobbedStringsMatch(item, val) -} - -func AppendIfMissing(slice []string, i string) []string { - return extstrutil.AppendIfMissing(slice, i) -} - -func MergeSlices(args ...[]string) []string { - return extstrutil.MergeSlices(args...) -} - -func Difference(a, b []string, lowercase bool) []string { - return extstrutil.Difference(a, b, lowercase) -} - -func GetString(m map[string]interface{}, key string) (string, error) { - return extstrutil.GetString(m, key) -} diff --git a/vendor/github.com/hashicorp/vault/sdk/helper/wrapping/wrapinfo.go b/vendor/github.com/hashicorp/vault/sdk/helper/wrapping/wrapinfo.go deleted file mode 100644 index 8d8e6334..00000000 --- a/vendor/github.com/hashicorp/vault/sdk/helper/wrapping/wrapinfo.go +++ /dev/null @@ -1,37 +0,0 @@ -package wrapping - -import "time" - -type ResponseWrapInfo struct { - // Setting to non-zero specifies that the response should be wrapped. - // Specifies the desired TTL of the wrapping token. - TTL time.Duration `json:"ttl" structs:"ttl" mapstructure:"ttl" sentinel:""` - - // The token containing the wrapped response - Token string `json:"token" structs:"token" mapstructure:"token" sentinel:""` - - // The token accessor for the wrapped response token - Accessor string `json:"accessor" structs:"accessor" mapstructure:"accessor"` - - // The creation time. This can be used with the TTL to figure out an - // expected expiration. - CreationTime time.Time `json:"creation_time" structs:"creation_time" mapstructure:"creation_time" sentinel:""` - - // If the contained response is the output of a token or approle secret-id creation call, the - // created token's/secret-id's accessor will be accessible here - WrappedAccessor string `json:"wrapped_accessor" structs:"wrapped_accessor" mapstructure:"wrapped_accessor" sentinel:""` - - // WrappedEntityID is the entity identifier of the caller who initiated the - // wrapping request - WrappedEntityID string `json:"wrapped_entity_id" structs:"wrapped_entity_id" mapstructure:"wrapped_entity_id" sentinel:""` - - // The format to use. This doesn't get returned, it's only internal. - Format string `json:"format" structs:"format" mapstructure:"format" sentinel:""` - - // CreationPath is the original request path that was used to create - // the wrapped response. - CreationPath string `json:"creation_path" structs:"creation_path" mapstructure:"creation_path" sentinel:""` - - // Controls seal wrapping behavior downstream for specific use cases - SealWrap bool `json:"seal_wrap" structs:"seal_wrap" mapstructure:"seal_wrap" sentinel:""` -} diff --git a/vendor/github.com/hashicorp/vault/sdk/logical/audit.go b/vendor/github.com/hashicorp/vault/sdk/logical/audit.go deleted file mode 100644 index 8ba70f37..00000000 --- a/vendor/github.com/hashicorp/vault/sdk/logical/audit.go +++ /dev/null @@ -1,19 +0,0 @@ -package logical - -type LogInput struct { - Type string - Auth *Auth - Request *Request - Response *Response - OuterErr error - NonHMACReqDataKeys []string - NonHMACRespDataKeys []string -} - -type MarshalOptions struct { - ValueHasher func(string) string -} - -type OptMarshaler interface { - MarshalJSONWithOptions(*MarshalOptions) ([]byte, error) -} diff --git a/vendor/github.com/hashicorp/vault/sdk/logical/auth.go b/vendor/github.com/hashicorp/vault/sdk/logical/auth.go deleted file mode 100644 index 62707e81..00000000 --- a/vendor/github.com/hashicorp/vault/sdk/logical/auth.go +++ /dev/null @@ -1,129 +0,0 @@ -package logical - -import ( - "fmt" - "time" - - sockaddr "github.com/hashicorp/go-sockaddr" -) - -// Auth is the resulting authentication information that is part of -// Response for credential backends. It's also attached to Request objects and -// defines the authentication used for the request. This value is audit logged. -type Auth struct { - LeaseOptions - - // InternalData is JSON-encodable data that is stored with the auth struct. - // This will be sent back during a Renew/Revoke for storing internal data - // used for those operations. - InternalData map[string]interface{} `json:"internal_data" mapstructure:"internal_data" structs:"internal_data"` - - // DisplayName is a non-security sensitive identifier that is - // applicable to this Auth. It is used for logging and prefixing - // of dynamic secrets. For example, DisplayName may be "armon" for - // the github credential backend. If the client token is used to - // generate a SQL credential, the user may be "github-armon-uuid". - // This is to help identify the source without using audit tables. - DisplayName string `json:"display_name" mapstructure:"display_name" structs:"display_name"` - - // Policies is the list of policies that the authenticated user - // is associated with. - Policies []string `json:"policies" mapstructure:"policies" structs:"policies"` - - // TokenPolicies and IdentityPolicies break down the list in Policies to - // help determine where a policy was sourced - TokenPolicies []string `json:"token_policies" mapstructure:"token_policies" structs:"token_policies"` - IdentityPolicies []string `json:"identity_policies" mapstructure:"identity_policies" structs:"identity_policies"` - - // ExternalNamespacePolicies represent the policies authorized from - // different namespaces indexed by respective namespace identifiers - ExternalNamespacePolicies map[string][]string `json:"external_namespace_policies" mapstructure:"external_namespace_policies" structs:"external_namespace_policies"` - - // Indicates that the default policy should not be added by core when - // creating a token. The default policy will still be added if it's - // explicitly defined. - NoDefaultPolicy bool `json:"no_default_policy" mapstructure:"no_default_policy" structs:"no_default_policy"` - - // Metadata is used to attach arbitrary string-type metadata to - // an authenticated user. This metadata will be outputted into the - // audit log. - Metadata map[string]string `json:"metadata" mapstructure:"metadata" structs:"metadata"` - - // ClientToken is the token that is generated for the authentication. - // This will be filled in by Vault core when an auth structure is - // returned. Setting this manually will have no effect. - ClientToken string `json:"client_token" mapstructure:"client_token" structs:"client_token"` - - // Accessor is the identifier for the ClientToken. This can be used - // to perform management functionalities (especially revocation) when - // ClientToken in the audit logs are obfuscated. Accessor can be used - // to revoke a ClientToken and to lookup the capabilities of the ClientToken, - // both without actually knowing the ClientToken. - Accessor string `json:"accessor" mapstructure:"accessor" structs:"accessor"` - - // Period indicates that the token generated using this Auth object - // should never expire. The token should be renewed within the duration - // specified by this period. - Period time.Duration `json:"period" mapstructure:"period" structs:"period"` - - // ExplicitMaxTTL is the max TTL that constrains periodic tokens. For normal - // tokens, this value is constrained by the configured max ttl. - ExplicitMaxTTL time.Duration `json:"explicit_max_ttl" mapstructure:"explicit_max_ttl" structs:"explicit_max_ttl"` - - // Number of allowed uses of the issued token - NumUses int `json:"num_uses" mapstructure:"num_uses" structs:"num_uses"` - - // EntityID is the identifier of the entity in identity store to which the - // identity of the authenticating client belongs to. - EntityID string `json:"entity_id" mapstructure:"entity_id" structs:"entity_id"` - - // Alias is the information about the authenticated client returned by - // the auth backend - Alias *Alias `json:"alias" mapstructure:"alias" structs:"alias"` - - // GroupAliases are the informational mappings of external groups which an - // authenticated user belongs to. This is used to check if there are - // mappings groups for the group aliases in identity store. For all the - // matching groups, the entity ID of the user will be added. - GroupAliases []*Alias `json:"group_aliases" mapstructure:"group_aliases" structs:"group_aliases"` - - // The set of CIDRs that this token can be used with - BoundCIDRs []*sockaddr.SockAddrMarshaler `json:"bound_cidrs"` - - // CreationPath is a path that the backend can return to use in the lease. - // This is currently only supported for the token store where roles may - // change the perceived path of the lease, even though they don't change - // the request path itself. - CreationPath string `json:"creation_path"` - - // TokenType is the type of token being requested - TokenType TokenType `json:"token_type"` - - // Orphan is set if the token does not have a parent - Orphan bool `json:"orphan"` - - // PolicyResults is the set of policies that grant the token access to the - // requesting path. - PolicyResults *PolicyResults `json:"policy_results"` - - // MFARequirement - MFARequirement *MFARequirement `json:"mfa_requirement"` - - // EntityCreated is set to true if an entity is created as part of a login request - EntityCreated bool `json:"entity_created"` -} - -func (a *Auth) GoString() string { - return fmt.Sprintf("*%#v", *a) -} - -type PolicyResults struct { - Allowed bool `json:"allowed"` - GrantingPolicies []PolicyInfo `json:"granting_policies"` -} - -type PolicyInfo struct { - Name string `json:"name"` - NamespaceId string `json:"namespace_id"` - Type string `json:"type"` -} diff --git a/vendor/github.com/hashicorp/vault/sdk/logical/connection.go b/vendor/github.com/hashicorp/vault/sdk/logical/connection.go deleted file mode 100644 index 5be86307..00000000 --- a/vendor/github.com/hashicorp/vault/sdk/logical/connection.go +++ /dev/null @@ -1,18 +0,0 @@ -package logical - -import ( - "crypto/tls" -) - -// Connection represents the connection information for a request. This -// is present on the Request structure for credential backends. -type Connection struct { - // RemoteAddr is the network address that sent the request. - RemoteAddr string `json:"remote_addr"` - - // RemotePort is the network port that sent the request. - RemotePort int `json:"remote_port"` - - // ConnState is the TLS connection state if applicable. - ConnState *tls.ConnectionState `sentinel:""` -} diff --git a/vendor/github.com/hashicorp/vault/sdk/logical/controlgroup.go b/vendor/github.com/hashicorp/vault/sdk/logical/controlgroup.go deleted file mode 100644 index 2ed1b076..00000000 --- a/vendor/github.com/hashicorp/vault/sdk/logical/controlgroup.go +++ /dev/null @@ -1,17 +0,0 @@ -package logical - -import ( - "time" -) - -type ControlGroup struct { - Authorizations []*Authz `json:"authorizations"` - RequestTime time.Time `json:"request_time"` - Approved bool `json:"approved"` - NamespaceID string `json:"namespace_id"` -} - -type Authz struct { - Token string `json:"token"` - AuthorizationTime time.Time `json:"authorization_time"` -} diff --git a/vendor/github.com/hashicorp/vault/sdk/logical/error.go b/vendor/github.com/hashicorp/vault/sdk/logical/error.go deleted file mode 100644 index 68c8e137..00000000 --- a/vendor/github.com/hashicorp/vault/sdk/logical/error.go +++ /dev/null @@ -1,122 +0,0 @@ -package logical - -import "errors" - -var ( - // ErrUnsupportedOperation is returned if the operation is not supported - // by the logical backend. - ErrUnsupportedOperation = errors.New("unsupported operation") - - // ErrUnsupportedPath is returned if the path is not supported - // by the logical backend. - ErrUnsupportedPath = errors.New("unsupported path") - - // ErrInvalidRequest is returned if the request is invalid - ErrInvalidRequest = errors.New("invalid request") - - // ErrPermissionDenied is returned if the client is not authorized - ErrPermissionDenied = errors.New("permission denied") - - // ErrInvalidCredentials is returned when the provided credentials are incorrect - // This is used internally for user lockout purposes. This is not seen externally. - // The status code returned does not change because of this error - ErrInvalidCredentials = errors.New("invalid credentials") - - // ErrMultiAuthzPending is returned if the the request needs more - // authorizations - ErrMultiAuthzPending = errors.New("request needs further approval") - - // ErrUpstreamRateLimited is returned when Vault receives a rate limited - // response from an upstream - ErrUpstreamRateLimited = errors.New("upstream rate limited") - - // ErrPerfStandbyForward is returned when Vault is in a state such that a - // perf standby cannot satisfy a request - ErrPerfStandbyPleaseForward = errors.New("please forward to the active node") - - // ErrLeaseCountQuotaExceeded is returned when a request is rejected due to a lease - // count quota being exceeded. - ErrLeaseCountQuotaExceeded = errors.New("lease count quota exceeded") - - // ErrRateLimitQuotaExceeded is returned when a request is rejected due to a - // rate limit quota being exceeded. - ErrRateLimitQuotaExceeded = errors.New("rate limit quota exceeded") - - // ErrUnrecoverable is returned when a request fails due to something that - // is likely to require manual intervention. This is a generic form of an - // unrecoverable error. - // e.g.: misconfigured or disconnected storage backend. - ErrUnrecoverable = errors.New("unrecoverable error") - - // ErrMissingRequiredState is returned when a request can't be satisfied - // with the data in the local node's storage, based on the provided - // X-Vault-Index request header. - ErrMissingRequiredState = errors.New("required index state not present") - - // Error indicating that the requested path used to serve a purpose in older - // versions, but the functionality has now been removed - ErrPathFunctionalityRemoved = errors.New("functionality on this path has been removed") -) - -type HTTPCodedError interface { - Error() string - Code() int -} - -func CodedError(status int, msg string) HTTPCodedError { - return &codedError{ - Status: status, - Message: msg, - } -} - -var _ HTTPCodedError = (*codedError)(nil) - -type codedError struct { - Status int - Message string -} - -func (e *codedError) Error() string { - return e.Message -} - -func (e *codedError) Code() int { - return e.Status -} - -// Struct to identify user input errors. This is helpful in responding the -// appropriate status codes to clients from the HTTP endpoints. -type StatusBadRequest struct { - Err string -} - -// Implementing error interface -func (s *StatusBadRequest) Error() string { - return s.Err -} - -// This is a new type declared to not cause potential compatibility problems if -// the logic around the CodedError changes; in particular for logical request -// paths it is basically ignored, and changing that behavior might cause -// unforeseen issues. -type ReplicationCodedError struct { - Msg string - Code int -} - -func (r *ReplicationCodedError) Error() string { - return r.Msg -} - -type KeyNotFoundError struct { - Err error -} - -func (e *KeyNotFoundError) WrappedErrors() []error { - return []error{e.Err} -} - -func (e *KeyNotFoundError) Error() string { - return e.Err.Error() -} diff --git a/vendor/github.com/hashicorp/vault/sdk/logical/identity.pb.go b/vendor/github.com/hashicorp/vault/sdk/logical/identity.pb.go deleted file mode 100644 index 18af6e68..00000000 --- a/vendor/github.com/hashicorp/vault/sdk/logical/identity.pb.go +++ /dev/null @@ -1,709 +0,0 @@ -// Code generated by protoc-gen-go. DO NOT EDIT. -// versions: -// protoc-gen-go v1.28.1 -// protoc v3.21.7 -// source: sdk/logical/identity.proto - -package logical - -import ( - protoreflect "google.golang.org/protobuf/reflect/protoreflect" - protoimpl "google.golang.org/protobuf/runtime/protoimpl" - reflect "reflect" - sync "sync" -) - -const ( - // Verify that this generated code is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) - // Verify that runtime/protoimpl is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) -) - -type Entity struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // ID is the unique identifier for the entity - ID string `protobuf:"bytes,1,opt,name=ID,proto3" json:"ID,omitempty"` - // Name is the human-friendly unique identifier for the entity - Name string `protobuf:"bytes,2,opt,name=name,proto3" json:"name,omitempty"` - // Aliases contains thhe alias mappings for the given entity - Aliases []*Alias `protobuf:"bytes,3,rep,name=aliases,proto3" json:"aliases,omitempty"` - // Metadata represents the custom data tied to this entity - Metadata map[string]string `protobuf:"bytes,4,rep,name=metadata,proto3" json:"metadata,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` - // Disabled is true if the entity is disabled. - Disabled bool `protobuf:"varint,5,opt,name=disabled,proto3" json:"disabled,omitempty"` - // NamespaceID is the identifier of the namespace to which this entity - // belongs to. - NamespaceID string `protobuf:"bytes,6,opt,name=namespace_id,json=namespaceID,proto3" json:"namespace_id,omitempty"` -} - -func (x *Entity) Reset() { - *x = Entity{} - if protoimpl.UnsafeEnabled { - mi := &file_sdk_logical_identity_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *Entity) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*Entity) ProtoMessage() {} - -func (x *Entity) ProtoReflect() protoreflect.Message { - mi := &file_sdk_logical_identity_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use Entity.ProtoReflect.Descriptor instead. -func (*Entity) Descriptor() ([]byte, []int) { - return file_sdk_logical_identity_proto_rawDescGZIP(), []int{0} -} - -func (x *Entity) GetID() string { - if x != nil { - return x.ID - } - return "" -} - -func (x *Entity) GetName() string { - if x != nil { - return x.Name - } - return "" -} - -func (x *Entity) GetAliases() []*Alias { - if x != nil { - return x.Aliases - } - return nil -} - -func (x *Entity) GetMetadata() map[string]string { - if x != nil { - return x.Metadata - } - return nil -} - -func (x *Entity) GetDisabled() bool { - if x != nil { - return x.Disabled - } - return false -} - -func (x *Entity) GetNamespaceID() string { - if x != nil { - return x.NamespaceID - } - return "" -} - -type Alias struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // MountType is the backend mount's type to which this identity belongs - MountType string `protobuf:"bytes,1,opt,name=mount_type,json=mountType,proto3" json:"mount_type,omitempty"` - // MountAccessor is the identifier of the mount entry to which this - // identity belongs - MountAccessor string `protobuf:"bytes,2,opt,name=mount_accessor,json=mountAccessor,proto3" json:"mount_accessor,omitempty"` - // Name is the identifier of this identity in its authentication source - Name string `protobuf:"bytes,3,opt,name=name,proto3" json:"name,omitempty"` - // Metadata represents the custom data tied to this alias. Fields added - // to it should have a low rate of change (or no change) because each - // change incurs a storage write, so quickly-changing fields can have - // a significant performance impact at scale. See the SDK's - // "aliasmetadata" package for a helper that eases and standardizes - // using this safely. - Metadata map[string]string `protobuf:"bytes,4,rep,name=metadata,proto3" json:"metadata,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` - // ID is the unique identifier for the alias - ID string `protobuf:"bytes,5,opt,name=ID,proto3" json:"ID,omitempty"` - // NamespaceID is the identifier of the namespace to which this alias - // belongs. - NamespaceID string `protobuf:"bytes,6,opt,name=namespace_id,json=namespaceID,proto3" json:"namespace_id,omitempty"` - // Custom Metadata represents the custom data tied to this alias - CustomMetadata map[string]string `protobuf:"bytes,7,rep,name=custom_metadata,json=customMetadata,proto3" json:"custom_metadata,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` - // Local indicates if the alias only belongs to the cluster where it was - // created. If true, the alias will be stored in a location that are ignored - // by the performance replication subsystem. - Local bool `protobuf:"varint,8,opt,name=local,proto3" json:"local,omitempty"` -} - -func (x *Alias) Reset() { - *x = Alias{} - if protoimpl.UnsafeEnabled { - mi := &file_sdk_logical_identity_proto_msgTypes[1] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *Alias) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*Alias) ProtoMessage() {} - -func (x *Alias) ProtoReflect() protoreflect.Message { - mi := &file_sdk_logical_identity_proto_msgTypes[1] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use Alias.ProtoReflect.Descriptor instead. -func (*Alias) Descriptor() ([]byte, []int) { - return file_sdk_logical_identity_proto_rawDescGZIP(), []int{1} -} - -func (x *Alias) GetMountType() string { - if x != nil { - return x.MountType - } - return "" -} - -func (x *Alias) GetMountAccessor() string { - if x != nil { - return x.MountAccessor - } - return "" -} - -func (x *Alias) GetName() string { - if x != nil { - return x.Name - } - return "" -} - -func (x *Alias) GetMetadata() map[string]string { - if x != nil { - return x.Metadata - } - return nil -} - -func (x *Alias) GetID() string { - if x != nil { - return x.ID - } - return "" -} - -func (x *Alias) GetNamespaceID() string { - if x != nil { - return x.NamespaceID - } - return "" -} - -func (x *Alias) GetCustomMetadata() map[string]string { - if x != nil { - return x.CustomMetadata - } - return nil -} - -func (x *Alias) GetLocal() bool { - if x != nil { - return x.Local - } - return false -} - -type Group struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // ID is the unique identifier for the group - ID string `protobuf:"bytes,1,opt,name=ID,proto3" json:"ID,omitempty"` - // Name is the human-friendly unique identifier for the group - Name string `protobuf:"bytes,2,opt,name=name,proto3" json:"name,omitempty"` - // Metadata represents the custom data tied to this group - Metadata map[string]string `protobuf:"bytes,3,rep,name=metadata,proto3" json:"metadata,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` - // NamespaceID is the identifier of the namespace to which this group - // belongs to. - NamespaceID string `protobuf:"bytes,4,opt,name=namespace_id,json=namespaceID,proto3" json:"namespace_id,omitempty"` -} - -func (x *Group) Reset() { - *x = Group{} - if protoimpl.UnsafeEnabled { - mi := &file_sdk_logical_identity_proto_msgTypes[2] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *Group) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*Group) ProtoMessage() {} - -func (x *Group) ProtoReflect() protoreflect.Message { - mi := &file_sdk_logical_identity_proto_msgTypes[2] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use Group.ProtoReflect.Descriptor instead. -func (*Group) Descriptor() ([]byte, []int) { - return file_sdk_logical_identity_proto_rawDescGZIP(), []int{2} -} - -func (x *Group) GetID() string { - if x != nil { - return x.ID - } - return "" -} - -func (x *Group) GetName() string { - if x != nil { - return x.Name - } - return "" -} - -func (x *Group) GetMetadata() map[string]string { - if x != nil { - return x.Metadata - } - return nil -} - -func (x *Group) GetNamespaceID() string { - if x != nil { - return x.NamespaceID - } - return "" -} - -type MFAMethodID struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"` - ID string `protobuf:"bytes,2,opt,name=id,proto3" json:"id,omitempty"` - UsesPasscode bool `protobuf:"varint,3,opt,name=uses_passcode,json=usesPasscode,proto3" json:"uses_passcode,omitempty"` -} - -func (x *MFAMethodID) Reset() { - *x = MFAMethodID{} - if protoimpl.UnsafeEnabled { - mi := &file_sdk_logical_identity_proto_msgTypes[3] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *MFAMethodID) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*MFAMethodID) ProtoMessage() {} - -func (x *MFAMethodID) ProtoReflect() protoreflect.Message { - mi := &file_sdk_logical_identity_proto_msgTypes[3] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use MFAMethodID.ProtoReflect.Descriptor instead. -func (*MFAMethodID) Descriptor() ([]byte, []int) { - return file_sdk_logical_identity_proto_rawDescGZIP(), []int{3} -} - -func (x *MFAMethodID) GetType() string { - if x != nil { - return x.Type - } - return "" -} - -func (x *MFAMethodID) GetID() string { - if x != nil { - return x.ID - } - return "" -} - -func (x *MFAMethodID) GetUsesPasscode() bool { - if x != nil { - return x.UsesPasscode - } - return false -} - -type MFAConstraintAny struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Any []*MFAMethodID `protobuf:"bytes,1,rep,name=any,proto3" json:"any,omitempty"` -} - -func (x *MFAConstraintAny) Reset() { - *x = MFAConstraintAny{} - if protoimpl.UnsafeEnabled { - mi := &file_sdk_logical_identity_proto_msgTypes[4] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *MFAConstraintAny) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*MFAConstraintAny) ProtoMessage() {} - -func (x *MFAConstraintAny) ProtoReflect() protoreflect.Message { - mi := &file_sdk_logical_identity_proto_msgTypes[4] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use MFAConstraintAny.ProtoReflect.Descriptor instead. -func (*MFAConstraintAny) Descriptor() ([]byte, []int) { - return file_sdk_logical_identity_proto_rawDescGZIP(), []int{4} -} - -func (x *MFAConstraintAny) GetAny() []*MFAMethodID { - if x != nil { - return x.Any - } - return nil -} - -type MFARequirement struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - MFARequestID string `protobuf:"bytes,1,opt,name=mfa_request_id,json=mfaRequestId,proto3" json:"mfa_request_id,omitempty"` - MFAConstraints map[string]*MFAConstraintAny `protobuf:"bytes,2,rep,name=mfa_constraints,json=mfaConstraints,proto3" json:"mfa_constraints,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` -} - -func (x *MFARequirement) Reset() { - *x = MFARequirement{} - if protoimpl.UnsafeEnabled { - mi := &file_sdk_logical_identity_proto_msgTypes[5] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *MFARequirement) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*MFARequirement) ProtoMessage() {} - -func (x *MFARequirement) ProtoReflect() protoreflect.Message { - mi := &file_sdk_logical_identity_proto_msgTypes[5] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use MFARequirement.ProtoReflect.Descriptor instead. -func (*MFARequirement) Descriptor() ([]byte, []int) { - return file_sdk_logical_identity_proto_rawDescGZIP(), []int{5} -} - -func (x *MFARequirement) GetMFARequestID() string { - if x != nil { - return x.MFARequestID - } - return "" -} - -func (x *MFARequirement) GetMFAConstraints() map[string]*MFAConstraintAny { - if x != nil { - return x.MFAConstraints - } - return nil -} - -var File_sdk_logical_identity_proto protoreflect.FileDescriptor - -var file_sdk_logical_identity_proto_rawDesc = []byte{ - 0x0a, 0x1a, 0x73, 0x64, 0x6b, 0x2f, 0x6c, 0x6f, 0x67, 0x69, 0x63, 0x61, 0x6c, 0x2f, 0x69, 0x64, - 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x07, 0x6c, 0x6f, - 0x67, 0x69, 0x63, 0x61, 0x6c, 0x22, 0x8d, 0x02, 0x0a, 0x06, 0x45, 0x6e, 0x74, 0x69, 0x74, 0x79, - 0x12, 0x0e, 0x0a, 0x02, 0x49, 0x44, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x49, 0x44, - 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, - 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x28, 0x0a, 0x07, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x65, 0x73, 0x18, - 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x6c, 0x6f, 0x67, 0x69, 0x63, 0x61, 0x6c, 0x2e, - 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x07, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x65, 0x73, 0x12, 0x39, - 0x0a, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, - 0x32, 0x1d, 0x2e, 0x6c, 0x6f, 0x67, 0x69, 0x63, 0x61, 0x6c, 0x2e, 0x45, 0x6e, 0x74, 0x69, 0x74, - 0x79, 0x2e, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, - 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x1a, 0x0a, 0x08, 0x64, 0x69, 0x73, - 0x61, 0x62, 0x6c, 0x65, 0x64, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x64, 0x69, 0x73, - 0x61, 0x62, 0x6c, 0x65, 0x64, 0x12, 0x21, 0x0a, 0x0c, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, - 0x63, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x6e, 0x61, 0x6d, - 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x49, 0x64, 0x1a, 0x3b, 0x0a, 0x0d, 0x4d, 0x65, 0x74, 0x61, - 0x64, 0x61, 0x74, 0x61, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, - 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, - 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0xb1, 0x03, 0x0a, 0x05, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x12, - 0x1d, 0x0a, 0x0a, 0x6d, 0x6f, 0x75, 0x6e, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x09, 0x6d, 0x6f, 0x75, 0x6e, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x25, - 0x0a, 0x0e, 0x6d, 0x6f, 0x75, 0x6e, 0x74, 0x5f, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6f, 0x72, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x6d, 0x6f, 0x75, 0x6e, 0x74, 0x41, 0x63, 0x63, - 0x65, 0x73, 0x73, 0x6f, 0x72, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x03, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x38, 0x0a, 0x08, 0x6d, 0x65, 0x74, - 0x61, 0x64, 0x61, 0x74, 0x61, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x6c, 0x6f, - 0x67, 0x69, 0x63, 0x61, 0x6c, 0x2e, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x2e, 0x4d, 0x65, 0x74, 0x61, - 0x64, 0x61, 0x74, 0x61, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, - 0x61, 0x74, 0x61, 0x12, 0x0e, 0x0a, 0x02, 0x49, 0x44, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x02, 0x49, 0x44, 0x12, 0x21, 0x0a, 0x0c, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, - 0x5f, 0x69, 0x64, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x6e, 0x61, 0x6d, 0x65, 0x73, - 0x70, 0x61, 0x63, 0x65, 0x49, 0x64, 0x12, 0x4b, 0x0a, 0x0f, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, - 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x18, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, - 0x22, 0x2e, 0x6c, 0x6f, 0x67, 0x69, 0x63, 0x61, 0x6c, 0x2e, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x2e, - 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x45, 0x6e, - 0x74, 0x72, 0x79, 0x52, 0x0e, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x4d, 0x65, 0x74, 0x61, 0x64, - 0x61, 0x74, 0x61, 0x12, 0x14, 0x0a, 0x05, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x18, 0x08, 0x20, 0x01, - 0x28, 0x08, 0x52, 0x05, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x1a, 0x3b, 0x0a, 0x0d, 0x4d, 0x65, 0x74, - 0x61, 0x64, 0x61, 0x74, 0x61, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, - 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, - 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, - 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, 0x41, 0x0a, 0x13, 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, - 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, - 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, - 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, - 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0xc5, 0x01, 0x0a, 0x05, 0x47, 0x72, - 0x6f, 0x75, 0x70, 0x12, 0x0e, 0x0a, 0x02, 0x49, 0x44, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x02, 0x49, 0x44, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x38, 0x0a, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, - 0x61, 0x74, 0x61, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x6c, 0x6f, 0x67, 0x69, - 0x63, 0x61, 0x6c, 0x2e, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x2e, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, - 0x74, 0x61, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, - 0x61, 0x12, 0x21, 0x0a, 0x0c, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x5f, 0x69, - 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, - 0x63, 0x65, 0x49, 0x64, 0x1a, 0x3b, 0x0a, 0x0d, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, - 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, - 0x01, 0x22, 0x56, 0x0a, 0x0b, 0x4d, 0x46, 0x41, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x49, 0x44, - 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, - 0x74, 0x79, 0x70, 0x65, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x02, 0x69, 0x64, 0x12, 0x23, 0x0a, 0x0d, 0x75, 0x73, 0x65, 0x73, 0x5f, 0x70, 0x61, 0x73, - 0x73, 0x63, 0x6f, 0x64, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0c, 0x75, 0x73, 0x65, - 0x73, 0x50, 0x61, 0x73, 0x73, 0x63, 0x6f, 0x64, 0x65, 0x22, 0x3a, 0x0a, 0x10, 0x4d, 0x46, 0x41, - 0x43, 0x6f, 0x6e, 0x73, 0x74, 0x72, 0x61, 0x69, 0x6e, 0x74, 0x41, 0x6e, 0x79, 0x12, 0x26, 0x0a, - 0x03, 0x61, 0x6e, 0x79, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x6c, 0x6f, 0x67, - 0x69, 0x63, 0x61, 0x6c, 0x2e, 0x4d, 0x46, 0x41, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x49, 0x44, - 0x52, 0x03, 0x61, 0x6e, 0x79, 0x22, 0xea, 0x01, 0x0a, 0x0e, 0x4d, 0x46, 0x41, 0x52, 0x65, 0x71, - 0x75, 0x69, 0x72, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x12, 0x24, 0x0a, 0x0e, 0x6d, 0x66, 0x61, 0x5f, - 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x0c, 0x6d, 0x66, 0x61, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x49, 0x64, 0x12, 0x54, - 0x0a, 0x0f, 0x6d, 0x66, 0x61, 0x5f, 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x72, 0x61, 0x69, 0x6e, 0x74, - 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2b, 0x2e, 0x6c, 0x6f, 0x67, 0x69, 0x63, 0x61, - 0x6c, 0x2e, 0x4d, 0x46, 0x41, 0x52, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x6d, 0x65, 0x6e, 0x74, - 0x2e, 0x4d, 0x66, 0x61, 0x43, 0x6f, 0x6e, 0x73, 0x74, 0x72, 0x61, 0x69, 0x6e, 0x74, 0x73, 0x45, - 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0e, 0x6d, 0x66, 0x61, 0x43, 0x6f, 0x6e, 0x73, 0x74, 0x72, 0x61, - 0x69, 0x6e, 0x74, 0x73, 0x1a, 0x5c, 0x0a, 0x13, 0x4d, 0x66, 0x61, 0x43, 0x6f, 0x6e, 0x73, 0x74, - 0x72, 0x61, 0x69, 0x6e, 0x74, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, - 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x2f, 0x0a, - 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x6c, - 0x6f, 0x67, 0x69, 0x63, 0x61, 0x6c, 0x2e, 0x4d, 0x46, 0x41, 0x43, 0x6f, 0x6e, 0x73, 0x74, 0x72, - 0x61, 0x69, 0x6e, 0x74, 0x41, 0x6e, 0x79, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, - 0x38, 0x01, 0x42, 0x28, 0x5a, 0x26, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, - 0x2f, 0x68, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2f, 0x76, 0x61, 0x75, 0x6c, 0x74, - 0x2f, 0x73, 0x64, 0x6b, 0x2f, 0x6c, 0x6f, 0x67, 0x69, 0x63, 0x61, 0x6c, 0x62, 0x06, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x33, -} - -var ( - file_sdk_logical_identity_proto_rawDescOnce sync.Once - file_sdk_logical_identity_proto_rawDescData = file_sdk_logical_identity_proto_rawDesc -) - -func file_sdk_logical_identity_proto_rawDescGZIP() []byte { - file_sdk_logical_identity_proto_rawDescOnce.Do(func() { - file_sdk_logical_identity_proto_rawDescData = protoimpl.X.CompressGZIP(file_sdk_logical_identity_proto_rawDescData) - }) - return file_sdk_logical_identity_proto_rawDescData -} - -var file_sdk_logical_identity_proto_msgTypes = make([]protoimpl.MessageInfo, 11) -var file_sdk_logical_identity_proto_goTypes = []interface{}{ - (*Entity)(nil), // 0: logical.Entity - (*Alias)(nil), // 1: logical.Alias - (*Group)(nil), // 2: logical.Group - (*MFAMethodID)(nil), // 3: logical.MFAMethodID - (*MFAConstraintAny)(nil), // 4: logical.MFAConstraintAny - (*MFARequirement)(nil), // 5: logical.MFARequirement - nil, // 6: logical.Entity.MetadataEntry - nil, // 7: logical.Alias.MetadataEntry - nil, // 8: logical.Alias.CustomMetadataEntry - nil, // 9: logical.Group.MetadataEntry - nil, // 10: logical.MFARequirement.MFAConstraintsEntry -} -var file_sdk_logical_identity_proto_depIDxs = []int32{ - 1, // 0: logical.Entity.aliases:type_name -> logical.Alias - 6, // 1: logical.Entity.metadata:type_name -> logical.Entity.MetadataEntry - 7, // 2: logical.Alias.metadata:type_name -> logical.Alias.MetadataEntry - 8, // 3: logical.Alias.custom_metadata:type_name -> logical.Alias.CustomMetadataEntry - 9, // 4: logical.Group.metadata:type_name -> logical.Group.MetadataEntry - 3, // 5: logical.MFAConstraintAny.any:type_name -> logical.MFAMethodID - 10, // 6: logical.MFARequirement.mfa_constraints:type_name -> logical.MFARequirement.MFAConstraintsEntry - 4, // 7: logical.MFARequirement.MFAConstraintsEntry.value:type_name -> logical.MFAConstraintAny - 8, // [8:8] is the sub-list for method output_type - 8, // [8:8] is the sub-list for method input_type - 8, // [8:8] is the sub-list for extension type_name - 8, // [8:8] is the sub-list for extension extendee - 0, // [0:8] is the sub-list for field type_name -} - -func init() { file_sdk_logical_identity_proto_init() } -func file_sdk_logical_identity_proto_init() { - if File_sdk_logical_identity_proto != nil { - return - } - if !protoimpl.UnsafeEnabled { - file_sdk_logical_identity_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Entity); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_sdk_logical_identity_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Alias); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_sdk_logical_identity_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Group); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_sdk_logical_identity_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*MFAMethodID); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_sdk_logical_identity_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*MFAConstraintAny); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_sdk_logical_identity_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*MFARequirement); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } - type x struct{} - out := protoimpl.TypeBuilder{ - File: protoimpl.DescBuilder{ - GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_sdk_logical_identity_proto_rawDesc, - NumEnums: 0, - NumMessages: 11, - NumExtensions: 0, - NumServices: 0, - }, - GoTypes: file_sdk_logical_identity_proto_goTypes, - DependencyIndexes: file_sdk_logical_identity_proto_depIDxs, - MessageInfos: file_sdk_logical_identity_proto_msgTypes, - }.Build() - File_sdk_logical_identity_proto = out.File - file_sdk_logical_identity_proto_rawDesc = nil - file_sdk_logical_identity_proto_goTypes = nil - file_sdk_logical_identity_proto_depIDxs = nil -} diff --git a/vendor/github.com/hashicorp/vault/sdk/logical/identity.proto b/vendor/github.com/hashicorp/vault/sdk/logical/identity.proto deleted file mode 100644 index ea2e373b..00000000 --- a/vendor/github.com/hashicorp/vault/sdk/logical/identity.proto +++ /dev/null @@ -1,91 +0,0 @@ -syntax = "proto3"; - -option go_package = "github.com/hashicorp/vault/sdk/logical"; - -package logical; - -message Entity { - // ID is the unique identifier for the entity - string ID = 1; - - // Name is the human-friendly unique identifier for the entity - string name = 2; - - // Aliases contains thhe alias mappings for the given entity - repeated Alias aliases = 3; - - // Metadata represents the custom data tied to this entity - map metadata = 4; - - // Disabled is true if the entity is disabled. - bool disabled = 5; - - // NamespaceID is the identifier of the namespace to which this entity - // belongs to. - string namespace_id = 6; -} - -message Alias { - // MountType is the backend mount's type to which this identity belongs - string mount_type = 1; - - // MountAccessor is the identifier of the mount entry to which this - // identity belongs - string mount_accessor = 2; - - // Name is the identifier of this identity in its authentication source - string name = 3; - - // Metadata represents the custom data tied to this alias. Fields added - // to it should have a low rate of change (or no change) because each - // change incurs a storage write, so quickly-changing fields can have - // a significant performance impact at scale. See the SDK's - // "aliasmetadata" package for a helper that eases and standardizes - // using this safely. - map metadata = 4; - - // ID is the unique identifier for the alias - string ID = 5; - - // NamespaceID is the identifier of the namespace to which this alias - // belongs. - string namespace_id = 6; - - // Custom Metadata represents the custom data tied to this alias - map custom_metadata = 7; - - // Local indicates if the alias only belongs to the cluster where it was - // created. If true, the alias will be stored in a location that are ignored - // by the performance replication subsystem. - bool local = 8; -} - -message Group { - // ID is the unique identifier for the group - string ID = 1; - - // Name is the human-friendly unique identifier for the group - string name = 2; - - // Metadata represents the custom data tied to this group - map metadata = 3; - - // NamespaceID is the identifier of the namespace to which this group - // belongs to. - string namespace_id = 4; -} - -message MFAMethodID { - string type = 1; - string id = 2; - bool uses_passcode = 3; -} - -message MFAConstraintAny { - repeated MFAMethodID any = 1; -} - -message MFARequirement { - string mfa_request_id = 1; - map mfa_constraints = 2; -} diff --git a/vendor/github.com/hashicorp/vault/sdk/logical/lease.go b/vendor/github.com/hashicorp/vault/sdk/logical/lease.go deleted file mode 100644 index 97bbe4f6..00000000 --- a/vendor/github.com/hashicorp/vault/sdk/logical/lease.go +++ /dev/null @@ -1,53 +0,0 @@ -package logical - -import ( - "time" -) - -// LeaseOptions is an embeddable struct to capture common lease -// settings between a Secret and Auth -type LeaseOptions struct { - // TTL is the duration that this secret is valid for. Vault - // will automatically revoke it after the duration. - TTL time.Duration `json:"lease"` - - // MaxTTL is the maximum duration that this secret is valid for. - MaxTTL time.Duration `json:"max_ttl"` - - // Renewable, if true, means that this secret can be renewed. - Renewable bool `json:"renewable"` - - // Increment will be the lease increment that the user requested. - // This is only available on a Renew operation and has no effect - // when returning a response. - Increment time.Duration `json:"-"` - - // IssueTime is the time of issue for the original lease. This is - // only available on Renew and Revoke operations and has no effect when returning - // a response. It can be used to enforce maximum lease periods by - // a logical backend. - IssueTime time.Time `json:"-"` -} - -// LeaseEnabled checks if leasing is enabled -func (l *LeaseOptions) LeaseEnabled() bool { - return l.TTL > 0 -} - -// LeaseTotal is the lease duration with a guard against a negative TTL -func (l *LeaseOptions) LeaseTotal() time.Duration { - if l.TTL <= 0 { - return 0 - } - - return l.TTL -} - -// ExpirationTime computes the time until expiration including the grace period -func (l *LeaseOptions) ExpirationTime() time.Time { - var expireTime time.Time - if l.LeaseEnabled() { - expireTime = time.Now().Add(l.LeaseTotal()) - } - return expireTime -} diff --git a/vendor/github.com/hashicorp/vault/sdk/logical/logical.go b/vendor/github.com/hashicorp/vault/sdk/logical/logical.go deleted file mode 100644 index 60114895..00000000 --- a/vendor/github.com/hashicorp/vault/sdk/logical/logical.go +++ /dev/null @@ -1,156 +0,0 @@ -package logical - -import ( - "context" - - log "github.com/hashicorp/go-hclog" -) - -// BackendType is the type of backend that is being implemented -type BackendType uint32 - -// The these are the types of backends that can be derived from -// logical.Backend -const ( - TypeUnknown BackendType = 0 // This is also the zero-value for BackendType - TypeLogical BackendType = 1 - TypeCredential BackendType = 2 -) - -// Stringer implementation -func (b BackendType) String() string { - switch b { - case TypeLogical: - return "secret" - case TypeCredential: - return "auth" - } - - return "unknown" -} - -// Backend interface must be implemented to be "mountable" at -// a given path. Requests flow through a router which has various mount -// points that flow to a logical backend. The logic of each backend is flexible, -// and this is what allows materialized keys to function. There can be specialized -// logical backends for various upstreams (Consul, PostgreSQL, MySQL, etc) that can -// interact with remote APIs to generate keys dynamically. This interface also -// allows for a "procfs" like interaction, as internal state can be exposed by -// acting like a logical backend and being mounted. -type Backend interface { - // Initialize is used to initialize a plugin after it has been mounted. - Initialize(context.Context, *InitializationRequest) error - - // HandleRequest is used to handle a request and generate a response. - // The backends must check the operation type and handle appropriately. - HandleRequest(context.Context, *Request) (*Response, error) - - // SpecialPaths is a list of paths that are special in some way. - // See PathType for the types of special paths. The key is the type - // of the special path, and the value is a list of paths for this type. - // This is not a regular expression but is an exact match. If the path - // ends in '*' then it is a prefix-based match. The '*' can only appear - // at the end. - SpecialPaths() *Paths - - // System provides an interface to access certain system configuration - // information, such as globally configured default and max lease TTLs. - System() SystemView - - // Logger provides an interface to access the underlying logger. This - // is useful when a struct embeds a Backend-implemented struct that - // contains a private instance of logger. - Logger() log.Logger - - // HandleExistenceCheck is used to handle a request and generate a response - // indicating whether the given path exists or not; this is used to - // understand whether the request must have a Create or Update capability - // ACL applied. The first bool indicates whether an existence check - // function was found for the backend; the second indicates whether, if an - // existence check function was found, the item exists or not. - HandleExistenceCheck(context.Context, *Request) (bool, bool, error) - - // Cleanup is invoked during an unmount of a backend to allow it to - // handle any cleanup like connection closing or releasing of file handles. - Cleanup(context.Context) - - // InvalidateKey may be invoked when an object is modified that belongs - // to the backend. The backend can use this to clear any caches or reset - // internal state as needed. - InvalidateKey(context.Context, string) - - // Setup is used to set up the backend based on the provided backend - // configuration. - Setup(context.Context, *BackendConfig) error - - // Type returns the BackendType for the particular backend - Type() BackendType -} - -// BackendConfig is provided to the factory to initialize the backend -type BackendConfig struct { - // View should not be stored, and should only be used for initialization - StorageView Storage - - // The backend should use this logger. The log should not contain any secrets. - Logger log.Logger - - // System provides a view into a subset of safe system information that - // is useful for backends, such as the default/max lease TTLs - System SystemView - - // BackendUUID is a unique identifier provided to this backend. It's useful - // when a backend needs a consistent and unique string without using storage. - BackendUUID string - - // Config is the opaque user configuration provided when mounting - Config map[string]string -} - -// Factory is the factory function to create a logical backend. -type Factory func(context.Context, *BackendConfig) (Backend, error) - -// Paths is the structure of special paths that is used for SpecialPaths. -type Paths struct { - // Root are the API paths that require a root token to access - Root []string - - // Unauthenticated are the API paths that can be accessed without any auth. - // These can't be regular expressions, it is either exact match, a prefix - // match and/or a wildcard match. For prefix match, append '*' as a suffix. - // For a wildcard match, use '+' in the segment to match any identifier - // (e.g. 'foo/+/bar'). Note that '+' can't be adjacent to a non-slash. - Unauthenticated []string - - // LocalStorage are storage paths (prefixes) that are local to this cluster; - // this indicates that these paths should not be replicated across performance clusters - // (DR replication is unaffected). - LocalStorage []string - - // SealWrapStorage are storage paths that, when using a capable seal, - // should be seal wrapped with extra encryption. It is exact matching - // unless it ends with '/' in which case it will be treated as a prefix. - SealWrapStorage []string -} - -type Auditor interface { - AuditRequest(ctx context.Context, input *LogInput) error - AuditResponse(ctx context.Context, input *LogInput) error -} - -// Externaler allows us to check if a backend is running externally (i.e., over GRPC) -type Externaler interface { - IsExternal() bool -} - -type PluginVersion struct { - Version string -} - -// PluginVersioner is an optional interface to return version info. -type PluginVersioner interface { - // PluginVersion returns the version for the backend - PluginVersion() PluginVersion -} - -var EmptyPluginVersion = PluginVersion{""} diff --git a/vendor/github.com/hashicorp/vault/sdk/logical/logical_storage.go b/vendor/github.com/hashicorp/vault/sdk/logical/logical_storage.go deleted file mode 100644 index 16b85cd7..00000000 --- a/vendor/github.com/hashicorp/vault/sdk/logical/logical_storage.go +++ /dev/null @@ -1,52 +0,0 @@ -package logical - -import ( - "context" - - "github.com/hashicorp/vault/sdk/physical" -) - -type LogicalStorage struct { - underlying physical.Backend -} - -func (s *LogicalStorage) Get(ctx context.Context, key string) (*StorageEntry, error) { - entry, err := s.underlying.Get(ctx, key) - if err != nil { - return nil, err - } - if entry == nil { - return nil, nil - } - return &StorageEntry{ - Key: entry.Key, - Value: entry.Value, - SealWrap: entry.SealWrap, - }, nil -} - -func (s *LogicalStorage) Put(ctx context.Context, entry *StorageEntry) error { - return s.underlying.Put(ctx, &physical.Entry{ - Key: entry.Key, - Value: entry.Value, - SealWrap: entry.SealWrap, - }) -} - -func (s *LogicalStorage) Delete(ctx context.Context, key string) error { - return s.underlying.Delete(ctx, key) -} - -func (s *LogicalStorage) List(ctx context.Context, prefix string) ([]string, error) { - return s.underlying.List(ctx, prefix) -} - -func (s *LogicalStorage) Underlying() physical.Backend { - return s.underlying -} - -func NewLogicalStorage(underlying physical.Backend) *LogicalStorage { - return &LogicalStorage{ - underlying: underlying, - } -} diff --git a/vendor/github.com/hashicorp/vault/sdk/logical/managed_key.go b/vendor/github.com/hashicorp/vault/sdk/logical/managed_key.go deleted file mode 100644 index e892c9cc..00000000 --- a/vendor/github.com/hashicorp/vault/sdk/logical/managed_key.go +++ /dev/null @@ -1,97 +0,0 @@ -package logical - -import ( - "context" - "crypto" - "crypto/cipher" - "io" -) - -type KeyUsage int - -const ( - KeyUsageEncrypt KeyUsage = 1 + iota - KeyUsageDecrypt - KeyUsageSign - KeyUsageVerify - KeyUsageWrap - KeyUsageUnwrap -) - -type ManagedKey interface { - // Name is a human-readable identifier for a managed key that may change/renamed. Use Uuid if a - // long term consistent identifier is needed. - Name() string - // UUID is a unique identifier for a managed key that is guaranteed to remain - // consistent even if a key is migrated or renamed. - UUID() string - // Present returns true if the key is established in the KMS. This may return false if for example - // an HSM library is not configured on all cluster nodes. - Present(ctx context.Context) (bool, error) - - // AllowsAll returns true if all the requested usages are supported by the managed key. - AllowsAll(usages []KeyUsage) bool -} - -type ( - ManagedKeyConsumer func(context.Context, ManagedKey) error - ManagedSigningKeyConsumer func(context.Context, ManagedSigningKey) error - ManagedEncryptingKeyConsumer func(context.Context, ManagedEncryptingKey) error -) - -type ManagedKeySystemView interface { - // WithManagedKeyByName retrieves an instantiated managed key for consumption by the given function. The - // provided key can only be used within the scope of that function call - WithManagedKeyByName(ctx context.Context, keyName, backendUUID string, f ManagedKeyConsumer) error - // WithManagedKeyByUUID retrieves an instantiated managed key for consumption by the given function. The - // provided key can only be used within the scope of that function call - WithManagedKeyByUUID(ctx context.Context, keyUuid, backendUUID string, f ManagedKeyConsumer) error - - // WithManagedSigningKeyByName retrieves an instantiated managed signing key for consumption by the given function, - // with the same semantics as WithManagedKeyByName - WithManagedSigningKeyByName(ctx context.Context, keyName, backendUUID string, f ManagedSigningKeyConsumer) error - // WithManagedSigningKeyByUUID retrieves an instantiated managed signing key for consumption by the given function, - // with the same semantics as WithManagedKeyByUUID - WithManagedSigningKeyByUUID(ctx context.Context, keyUuid, backendUUID string, f ManagedSigningKeyConsumer) error - // WithManagedSigningKeyByName retrieves an instantiated managed signing key for consumption by the given function, - // with the same semantics as WithManagedKeyByName - WithManagedEncryptingKeyByName(ctx context.Context, keyName, backendUUID string, f ManagedEncryptingKeyConsumer) error - // WithManagedSigningKeyByUUID retrieves an instantiated managed signing key for consumption by the given function, - // with the same semantics as WithManagedKeyByUUID - WithManagedEncryptingKeyByUUID(ctx context.Context, keyUuid, backendUUID string, f ManagedEncryptingKeyConsumer) error -} - -type ManagedAsymmetricKey interface { - ManagedKey - GetPublicKey(ctx context.Context) (crypto.PublicKey, error) -} - -type ManagedKeyLifecycle interface { - // GenerateKey generates a key in the KMS if it didn't yet exist, returning the id. - // If it already existed, returns the existing id. KMSKey's key material is ignored if present. - GenerateKey(ctx context.Context) (string, error) -} - -type ManagedSigningKey interface { - ManagedAsymmetricKey - - // Sign returns a digital signature of the provided value. The SignerOpts param must provide the hash function - // that generated the value (if any). - // The optional randomSource specifies the source of random values and may be ignored by the implementation - // (such as on HSMs with their own internal RNG) - Sign(ctx context.Context, value []byte, randomSource io.Reader, opts crypto.SignerOpts) ([]byte, error) - - // Verify verifies the provided signature against the value. The SignerOpts param must provide the hash function - // that generated the value (if any). - // If true is returned the signature is correct, false otherwise. - Verify(ctx context.Context, signature, value []byte, opts crypto.SignerOpts) (bool, error) - - // GetSigner returns an implementation of crypto.Signer backed by the managed key. This should be called - // as needed so as to use per request contexts. - GetSigner(context.Context) (crypto.Signer, error) -} - -type ManagedEncryptingKey interface { - ManagedKey - GetAEAD(iv []byte) (cipher.AEAD, error) -} diff --git a/vendor/github.com/hashicorp/vault/sdk/logical/plugin.pb.go b/vendor/github.com/hashicorp/vault/sdk/logical/plugin.pb.go deleted file mode 100644 index 9be723e1..00000000 --- a/vendor/github.com/hashicorp/vault/sdk/logical/plugin.pb.go +++ /dev/null @@ -1,171 +0,0 @@ -// Code generated by protoc-gen-go. DO NOT EDIT. -// versions: -// protoc-gen-go v1.28.1 -// protoc v3.21.7 -// source: sdk/logical/plugin.proto - -package logical - -import ( - protoreflect "google.golang.org/protobuf/reflect/protoreflect" - protoimpl "google.golang.org/protobuf/runtime/protoimpl" - reflect "reflect" - sync "sync" -) - -const ( - // Verify that this generated code is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) - // Verify that runtime/protoimpl is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) -) - -type PluginEnvironment struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // VaultVersion is the version of the Vault server - VaultVersion string `protobuf:"bytes,1,opt,name=vault_version,json=vaultVersion,proto3" json:"vault_version,omitempty"` - // VaultVersionPrerelease is the prerelease information of the Vault server - VaultVersionPrerelease string `protobuf:"bytes,2,opt,name=vault_version_prerelease,json=vaultVersionPrerelease,proto3" json:"vault_version_prerelease,omitempty"` - // VaultVersionMetadata is the version metadata of the Vault server - VaultVersionMetadata string `protobuf:"bytes,3,opt,name=vault_version_metadata,json=vaultVersionMetadata,proto3" json:"vault_version_metadata,omitempty"` -} - -func (x *PluginEnvironment) Reset() { - *x = PluginEnvironment{} - if protoimpl.UnsafeEnabled { - mi := &file_sdk_logical_plugin_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *PluginEnvironment) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*PluginEnvironment) ProtoMessage() {} - -func (x *PluginEnvironment) ProtoReflect() protoreflect.Message { - mi := &file_sdk_logical_plugin_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use PluginEnvironment.ProtoReflect.Descriptor instead. -func (*PluginEnvironment) Descriptor() ([]byte, []int) { - return file_sdk_logical_plugin_proto_rawDescGZIP(), []int{0} -} - -func (x *PluginEnvironment) GetVaultVersion() string { - if x != nil { - return x.VaultVersion - } - return "" -} - -func (x *PluginEnvironment) GetVaultVersionPrerelease() string { - if x != nil { - return x.VaultVersionPrerelease - } - return "" -} - -func (x *PluginEnvironment) GetVaultVersionMetadata() string { - if x != nil { - return x.VaultVersionMetadata - } - return "" -} - -var File_sdk_logical_plugin_proto protoreflect.FileDescriptor - -var file_sdk_logical_plugin_proto_rawDesc = []byte{ - 0x0a, 0x18, 0x73, 0x64, 0x6b, 0x2f, 0x6c, 0x6f, 0x67, 0x69, 0x63, 0x61, 0x6c, 0x2f, 0x70, 0x6c, - 0x75, 0x67, 0x69, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x07, 0x6c, 0x6f, 0x67, 0x69, - 0x63, 0x61, 0x6c, 0x22, 0xa8, 0x01, 0x0a, 0x11, 0x50, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x45, 0x6e, - 0x76, 0x69, 0x72, 0x6f, 0x6e, 0x6d, 0x65, 0x6e, 0x74, 0x12, 0x23, 0x0a, 0x0d, 0x76, 0x61, 0x75, - 0x6c, 0x74, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x0c, 0x76, 0x61, 0x75, 0x6c, 0x74, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x38, - 0x0a, 0x18, 0x76, 0x61, 0x75, 0x6c, 0x74, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x5f, - 0x70, 0x72, 0x65, 0x72, 0x65, 0x6c, 0x65, 0x61, 0x73, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x16, 0x76, 0x61, 0x75, 0x6c, 0x74, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x50, 0x72, - 0x65, 0x72, 0x65, 0x6c, 0x65, 0x61, 0x73, 0x65, 0x12, 0x34, 0x0a, 0x16, 0x76, 0x61, 0x75, 0x6c, - 0x74, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, - 0x74, 0x61, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x14, 0x76, 0x61, 0x75, 0x6c, 0x74, 0x56, - 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x42, 0x28, - 0x5a, 0x26, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x68, 0x61, 0x73, - 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2f, 0x76, 0x61, 0x75, 0x6c, 0x74, 0x2f, 0x73, 0x64, 0x6b, - 0x2f, 0x6c, 0x6f, 0x67, 0x69, 0x63, 0x61, 0x6c, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, -} - -var ( - file_sdk_logical_plugin_proto_rawDescOnce sync.Once - file_sdk_logical_plugin_proto_rawDescData = file_sdk_logical_plugin_proto_rawDesc -) - -func file_sdk_logical_plugin_proto_rawDescGZIP() []byte { - file_sdk_logical_plugin_proto_rawDescOnce.Do(func() { - file_sdk_logical_plugin_proto_rawDescData = protoimpl.X.CompressGZIP(file_sdk_logical_plugin_proto_rawDescData) - }) - return file_sdk_logical_plugin_proto_rawDescData -} - -var file_sdk_logical_plugin_proto_msgTypes = make([]protoimpl.MessageInfo, 1) -var file_sdk_logical_plugin_proto_goTypes = []interface{}{ - (*PluginEnvironment)(nil), // 0: logical.PluginEnvironment -} -var file_sdk_logical_plugin_proto_depIdxs = []int32{ - 0, // [0:0] is the sub-list for method output_type - 0, // [0:0] is the sub-list for method input_type - 0, // [0:0] is the sub-list for extension type_name - 0, // [0:0] is the sub-list for extension extendee - 0, // [0:0] is the sub-list for field type_name -} - -func init() { file_sdk_logical_plugin_proto_init() } -func file_sdk_logical_plugin_proto_init() { - if File_sdk_logical_plugin_proto != nil { - return - } - if !protoimpl.UnsafeEnabled { - file_sdk_logical_plugin_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*PluginEnvironment); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } - type x struct{} - out := protoimpl.TypeBuilder{ - File: protoimpl.DescBuilder{ - GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_sdk_logical_plugin_proto_rawDesc, - NumEnums: 0, - NumMessages: 1, - NumExtensions: 0, - NumServices: 0, - }, - GoTypes: file_sdk_logical_plugin_proto_goTypes, - DependencyIndexes: file_sdk_logical_plugin_proto_depIdxs, - MessageInfos: file_sdk_logical_plugin_proto_msgTypes, - }.Build() - File_sdk_logical_plugin_proto = out.File - file_sdk_logical_plugin_proto_rawDesc = nil - file_sdk_logical_plugin_proto_goTypes = nil - file_sdk_logical_plugin_proto_depIdxs = nil -} diff --git a/vendor/github.com/hashicorp/vault/sdk/logical/plugin.proto b/vendor/github.com/hashicorp/vault/sdk/logical/plugin.proto deleted file mode 100644 index f2df6c75..00000000 --- a/vendor/github.com/hashicorp/vault/sdk/logical/plugin.proto +++ /dev/null @@ -1,16 +0,0 @@ -syntax = "proto3"; - -option go_package = "github.com/hashicorp/vault/sdk/logical"; - -package logical; - -message PluginEnvironment { - // VaultVersion is the version of the Vault server - string vault_version = 1; - - // VaultVersionPrerelease is the prerelease information of the Vault server - string vault_version_prerelease = 2; - - // VaultVersionMetadata is the version metadata of the Vault server - string vault_version_metadata = 3; -} diff --git a/vendor/github.com/hashicorp/vault/sdk/logical/request.go b/vendor/github.com/hashicorp/vault/sdk/logical/request.go deleted file mode 100644 index d774fd17..00000000 --- a/vendor/github.com/hashicorp/vault/sdk/logical/request.go +++ /dev/null @@ -1,394 +0,0 @@ -package logical - -import ( - "context" - "fmt" - "net/http" - "strings" - "time" - - "github.com/mitchellh/copystructure" -) - -// RequestWrapInfo is a struct that stores information about desired response -// and seal wrapping behavior -type RequestWrapInfo struct { - // Setting to non-zero specifies that the response should be wrapped. - // Specifies the desired TTL of the wrapping token. - TTL time.Duration `json:"ttl" structs:"ttl" mapstructure:"ttl" sentinel:""` - - // The format to use for the wrapped response; if not specified it's a bare - // token - Format string `json:"format" structs:"format" mapstructure:"format" sentinel:""` - - // A flag to conforming backends that data for a given request should be - // seal wrapped - SealWrap bool `json:"seal_wrap" structs:"seal_wrap" mapstructure:"seal_wrap" sentinel:""` -} - -func (r *RequestWrapInfo) SentinelGet(key string) (interface{}, error) { - if r == nil { - return nil, nil - } - switch key { - case "ttl": - return r.TTL, nil - case "ttl_seconds": - return int64(r.TTL.Seconds()), nil - } - - return nil, nil -} - -func (r *RequestWrapInfo) SentinelKeys() []string { - return []string{ - "ttl", - "ttl_seconds", - } -} - -type ClientTokenSource uint32 - -const ( - NoClientToken ClientTokenSource = iota - ClientTokenFromVaultHeader - ClientTokenFromAuthzHeader -) - -type WALState struct { - ClusterID string - LocalIndex uint64 - ReplicatedIndex uint64 -} - -const indexStateCtxKey = "index_state" - -// IndexStateContext returns a context with an added value holding the index -// state that should be populated on writes. -func IndexStateContext(ctx context.Context, state *WALState) context.Context { - return context.WithValue(ctx, indexStateCtxKey, state) -} - -// IndexStateFromContext is a helper to look up if the provided context contains -// an index state pointer. -func IndexStateFromContext(ctx context.Context) *WALState { - s, ok := ctx.Value(indexStateCtxKey).(*WALState) - if !ok { - return nil - } - return s -} - -// Request is a struct that stores the parameters and context of a request -// being made to Vault. It is used to abstract the details of the higher level -// request protocol from the handlers. -// -// Note: Many of these have Sentinel disabled because they are values populated -// by the router after policy checks; the token namespace would be the right -// place to access them via Sentinel -type Request struct { - // Id is the uuid associated with each request - ID string `json:"id" structs:"id" mapstructure:"id" sentinel:""` - - // If set, the name given to the replication secondary where this request - // originated - ReplicationCluster string `json:"replication_cluster" structs:"replication_cluster" mapstructure:"replication_cluster" sentinel:""` - - // Operation is the requested operation type - Operation Operation `json:"operation" structs:"operation" mapstructure:"operation"` - - // Path is the full path of the request - Path string `json:"path" structs:"path" mapstructure:"path" sentinel:""` - - // Request data is an opaque map that must have string keys. - Data map[string]interface{} `json:"map" structs:"data" mapstructure:"data"` - - // Storage can be used to durably store and retrieve state. - Storage Storage `json:"-" sentinel:""` - - // Secret will be non-nil only for Revoke and Renew operations - // to represent the secret that was returned prior. - Secret *Secret `json:"secret" structs:"secret" mapstructure:"secret" sentinel:""` - - // Auth will be non-nil only for Renew operations - // to represent the auth that was returned prior. - Auth *Auth `json:"auth" structs:"auth" mapstructure:"auth" sentinel:""` - - // Headers will contain the http headers from the request. This value will - // be used in the audit broker to ensure we are auditing only the allowed - // headers. - Headers map[string][]string `json:"headers" structs:"headers" mapstructure:"headers" sentinel:""` - - // Connection will be non-nil only for credential providers to - // inspect the connection information and potentially use it for - // authentication/protection. - Connection *Connection `json:"connection" structs:"connection" mapstructure:"connection"` - - // ClientToken is provided to the core so that the identity - // can be verified and ACLs applied. This value is passed - // through to the logical backends but after being salted and - // hashed. - ClientToken string `json:"client_token" structs:"client_token" mapstructure:"client_token" sentinel:""` - - // ClientTokenAccessor is provided to the core so that the it can get - // logged as part of request audit logging. - ClientTokenAccessor string `json:"client_token_accessor" structs:"client_token_accessor" mapstructure:"client_token_accessor" sentinel:""` - - // DisplayName is provided to the logical backend to help associate - // dynamic secrets with the source entity. This is not a sensitive - // name, but is useful for operators. - DisplayName string `json:"display_name" structs:"display_name" mapstructure:"display_name" sentinel:""` - - // MountPoint is provided so that a logical backend can generate - // paths relative to itself. The `Path` is effectively the client - // request path with the MountPoint trimmed off. - MountPoint string `json:"mount_point" structs:"mount_point" mapstructure:"mount_point" sentinel:""` - - // MountType is provided so that a logical backend can make decisions - // based on the specific mount type (e.g., if a mount type has different - // aliases, generating different defaults depending on the alias) - MountType string `json:"mount_type" structs:"mount_type" mapstructure:"mount_type" sentinel:""` - - // MountAccessor is provided so that identities returned by the authentication - // backends can be tied to the mount it belongs to. - MountAccessor string `json:"mount_accessor" structs:"mount_accessor" mapstructure:"mount_accessor" sentinel:""` - - // WrapInfo contains requested response wrapping parameters - WrapInfo *RequestWrapInfo `json:"wrap_info" structs:"wrap_info" mapstructure:"wrap_info" sentinel:""` - - // ClientTokenRemainingUses represents the allowed number of uses left on the - // token supplied - ClientTokenRemainingUses int `json:"client_token_remaining_uses" structs:"client_token_remaining_uses" mapstructure:"client_token_remaining_uses"` - - // EntityID is the identity of the caller extracted out of the token used - // to make this request - EntityID string `json:"entity_id" structs:"entity_id" mapstructure:"entity_id" sentinel:""` - - // PolicyOverride indicates that the requestor wishes to override - // soft-mandatory Sentinel policies - PolicyOverride bool `json:"policy_override" structs:"policy_override" mapstructure:"policy_override"` - - // Whether the request is unauthenticated, as in, had no client token - // attached. Useful in some situations where the client token is not made - // accessible. - Unauthenticated bool `json:"unauthenticated" structs:"unauthenticated" mapstructure:"unauthenticated"` - - // MFACreds holds the parsed MFA information supplied over the API as part of - // X-Vault-MFA header - MFACreds MFACreds `json:"mfa_creds" structs:"mfa_creds" mapstructure:"mfa_creds" sentinel:""` - - // Cached token entry. This avoids another lookup in request handling when - // we've already looked it up at http handling time. Note that this token - // has not been "used", as in it will not properly take into account use - // count limitations. As a result this field should only ever be used for - // transport to a function that would otherwise do a lookup and then - // properly use the token. - tokenEntry *TokenEntry - - // For replication, contains the last WAL on the remote side after handling - // the request, used for best-effort avoidance of stale read-after-write - lastRemoteWAL uint64 - - // ControlGroup holds the authorizations that have happened on this - // request - ControlGroup *ControlGroup `json:"control_group" structs:"control_group" mapstructure:"control_group" sentinel:""` - - // ClientTokenSource tells us where the client token was sourced from, so - // we can delete it before sending off to plugins - ClientTokenSource ClientTokenSource - - // HTTPRequest, if set, can be used to access fields from the HTTP request - // that generated this logical.Request object, such as the request body. - HTTPRequest *http.Request `json:"-" sentinel:""` - - // ResponseWriter if set can be used to stream a response value to the http - // request that generated this logical.Request object. - ResponseWriter *HTTPResponseWriter `json:"-" sentinel:""` - - // requiredState is used internally to propagate the X-Vault-Index request - // header to later levels of request processing that operate only on - // logical.Request. - requiredState []string - - // responseState is used internally to propagate the state that should appear - // in response headers; it's attached to the request rather than the response - // because not all requests yields non-nil responses. - responseState *WALState - - // ClientID is the identity of the caller. If the token is associated with an - // entity, it will be the same as the EntityID . If the token has no entity, - // this will be the sha256(sorted policies + namespace) associated with the - // client token. - ClientID string `json:"client_id" structs:"client_id" mapstructure:"client_id" sentinel:""` - - // InboundSSCToken is the token that arrives on an inbound request, supplied - // by the vault user. - InboundSSCToken string -} - -// Clone returns a deep copy of the request by using copystructure -func (r *Request) Clone() (*Request, error) { - cpy, err := copystructure.Copy(r) - if err != nil { - return nil, err - } - return cpy.(*Request), nil -} - -// Get returns a data field and guards for nil Data -func (r *Request) Get(key string) interface{} { - if r.Data == nil { - return nil - } - return r.Data[key] -} - -// GetString returns a data field as a string -func (r *Request) GetString(key string) string { - raw := r.Get(key) - s, _ := raw.(string) - return s -} - -func (r *Request) GoString() string { - return fmt.Sprintf("*%#v", *r) -} - -func (r *Request) SentinelGet(key string) (interface{}, error) { - switch key { - case "path": - // Sanitize it here so that it's consistent in policies - return strings.TrimPrefix(r.Path, "/"), nil - - case "wrapping", "wrap_info": - // If the pointer is nil accessing the wrap info is considered - // "undefined" so this allows us to instead discover a TTL of zero - if r.WrapInfo == nil { - return &RequestWrapInfo{}, nil - } - return r.WrapInfo, nil - } - - return nil, nil -} - -func (r *Request) SentinelKeys() []string { - return []string{ - "path", - "wrapping", - "wrap_info", - } -} - -func (r *Request) LastRemoteWAL() uint64 { - return r.lastRemoteWAL -} - -func (r *Request) SetLastRemoteWAL(last uint64) { - r.lastRemoteWAL = last -} - -func (r *Request) RequiredState() []string { - return r.requiredState -} - -func (r *Request) SetRequiredState(state []string) { - r.requiredState = state -} - -func (r *Request) ResponseState() *WALState { - return r.responseState -} - -func (r *Request) SetResponseState(w *WALState) { - r.responseState = w -} - -func (r *Request) TokenEntry() *TokenEntry { - return r.tokenEntry -} - -func (r *Request) SetTokenEntry(te *TokenEntry) { - r.tokenEntry = te -} - -// RenewRequest creates the structure of the renew request. -func RenewRequest(path string, secret *Secret, data map[string]interface{}) *Request { - return &Request{ - Operation: RenewOperation, - Path: path, - Data: data, - Secret: secret, - } -} - -// RenewAuthRequest creates the structure of the renew request for an auth. -func RenewAuthRequest(path string, auth *Auth, data map[string]interface{}) *Request { - return &Request{ - Operation: RenewOperation, - Path: path, - Data: data, - Auth: auth, - } -} - -// RevokeRequest creates the structure of the revoke request. -func RevokeRequest(path string, secret *Secret, data map[string]interface{}) *Request { - return &Request{ - Operation: RevokeOperation, - Path: path, - Data: data, - Secret: secret, - } -} - -// RollbackRequest creates the structure of the revoke request. -func RollbackRequest(path string) *Request { - return &Request{ - Operation: RollbackOperation, - Path: path, - Data: make(map[string]interface{}), - } -} - -// Operation is an enum that is used to specify the type -// of request being made -type Operation string - -const ( - // The operations below are called per path - CreateOperation Operation = "create" - ReadOperation = "read" - UpdateOperation = "update" - PatchOperation = "patch" - DeleteOperation = "delete" - ListOperation = "list" - HelpOperation = "help" - AliasLookaheadOperation = "alias-lookahead" - ResolveRoleOperation = "resolve-role" - - // The operations below are called globally, the path is less relevant. - RevokeOperation Operation = "revoke" - RenewOperation = "renew" - RollbackOperation = "rollback" -) - -type MFACreds map[string][]string - -// InitializationRequest stores the parameters and context of an Initialize() -// call being made to a logical.Backend. -type InitializationRequest struct { - // Storage can be used to durably store and retrieve state. - Storage Storage -} - -type CustomHeader struct { - Name string - Value string -} - -type CtxKeyInFlightRequestID struct{} - -func (c CtxKeyInFlightRequestID) String() string { - return "in-flight-request-ID" -} diff --git a/vendor/github.com/hashicorp/vault/sdk/logical/response.go b/vendor/github.com/hashicorp/vault/sdk/logical/response.go deleted file mode 100644 index 0f8a2210..00000000 --- a/vendor/github.com/hashicorp/vault/sdk/logical/response.go +++ /dev/null @@ -1,322 +0,0 @@ -package logical - -import ( - "encoding/json" - "errors" - "fmt" - "net/http" - "strconv" - "sync/atomic" - - "github.com/hashicorp/vault/sdk/helper/wrapping" -) - -const ( - // HTTPContentType can be specified in the Data field of a Response - // so that the HTTP front end can specify a custom Content-Type associated - // with the HTTPRawBody. This can only be used for non-secrets, and should - // be avoided unless absolutely necessary, such as implementing a specification. - // The value must be a string. - HTTPContentType = "http_content_type" - - // HTTPRawBody is the raw content of the HTTP body that goes with the HTTPContentType. - // This can only be specified for non-secrets, and should should be similarly - // avoided like the HTTPContentType. The value must be a byte slice. - HTTPRawBody = "http_raw_body" - - // HTTPStatusCode is the response code of the HTTP body that goes with the HTTPContentType. - // This can only be specified for non-secrets, and should should be similarly - // avoided like the HTTPContentType. The value must be an integer. - HTTPStatusCode = "http_status_code" - - // For unwrapping we may need to know whether the value contained in the - // raw body is already JSON-unmarshaled. The presence of this key indicates - // that it has already been unmarshaled. That way we don't need to simply - // ignore errors. - HTTPRawBodyAlreadyJSONDecoded = "http_raw_body_already_json_decoded" - - // If set, HTTPCacheControlHeader will replace the default Cache-Control=no-store header - // set by the generic wrapping handler. The value must be a string. - HTTPCacheControlHeader = "http_raw_cache_control" - - // If set, HTTPPragmaHeader will set the Pragma response header. - // The value must be a string. - HTTPPragmaHeader = "http_raw_pragma" - - // If set, HTTPWWWAuthenticateHeader will set the WWW-Authenticate response header. - // The value must be a string. - HTTPWWWAuthenticateHeader = "http_www_authenticate" -) - -// Response is a struct that stores the response of a request. -// It is used to abstract the details of the higher level request protocol. -type Response struct { - // Secret, if not nil, denotes that this response represents a secret. - Secret *Secret `json:"secret" structs:"secret" mapstructure:"secret"` - - // Auth, if not nil, contains the authentication information for - // this response. This is only checked and means something for - // credential backends. - Auth *Auth `json:"auth" structs:"auth" mapstructure:"auth"` - - // Response data is an opaque map that must have string keys. For - // secrets, this data is sent down to the user as-is. To store internal - // data that you don't want the user to see, store it in - // Secret.InternalData. - Data map[string]interface{} `json:"data" structs:"data" mapstructure:"data"` - - // Redirect is an HTTP URL to redirect to for further authentication. - // This is only valid for credential backends. This will be blanked - // for any logical backend and ignored. - Redirect string `json:"redirect" structs:"redirect" mapstructure:"redirect"` - - // Warnings allow operations or backends to return warnings in response - // to user actions without failing the action outright. - Warnings []string `json:"warnings" structs:"warnings" mapstructure:"warnings"` - - // Information for wrapping the response in a cubbyhole - WrapInfo *wrapping.ResponseWrapInfo `json:"wrap_info" structs:"wrap_info" mapstructure:"wrap_info"` - - // Headers will contain the http headers from the plugin that it wishes to - // have as part of the output - Headers map[string][]string `json:"headers" structs:"headers" mapstructure:"headers"` -} - -// AddWarning adds a warning into the response's warning list -func (r *Response) AddWarning(warning string) { - if r.Warnings == nil { - r.Warnings = make([]string, 0, 1) - } - r.Warnings = append(r.Warnings, warning) -} - -// IsError returns true if this response seems to indicate an error. -func (r *Response) IsError() bool { - // If the response data contains only an 'error' element, or an 'error' and a 'data' element only - return r != nil && r.Data != nil && r.Data["error"] != nil && (len(r.Data) == 1 || (r.Data["data"] != nil && len(r.Data) == 2)) -} - -func (r *Response) Error() error { - if !r.IsError() { - return nil - } - switch r.Data["error"].(type) { - case string: - return errors.New(r.Data["error"].(string)) - case error: - return r.Data["error"].(error) - } - return nil -} - -// HelpResponse is used to format a help response -func HelpResponse(text string, seeAlso []string, oapiDoc interface{}) *Response { - return &Response{ - Data: map[string]interface{}{ - "help": text, - "see_also": seeAlso, - "openapi": oapiDoc, - }, - } -} - -// ErrorResponse is used to format an error response -func ErrorResponse(text string, vargs ...interface{}) *Response { - if len(vargs) > 0 { - text = fmt.Sprintf(text, vargs...) - } - return &Response{ - Data: map[string]interface{}{ - "error": text, - }, - } -} - -// ListResponse is used to format a response to a list operation. -func ListResponse(keys []string) *Response { - resp := &Response{ - Data: map[string]interface{}{}, - } - if len(keys) != 0 { - resp.Data["keys"] = keys - } - return resp -} - -// ListResponseWithInfo is used to format a response to a list operation and -// return the keys as well as a map with corresponding key info. -func ListResponseWithInfo(keys []string, keyInfo map[string]interface{}) *Response { - resp := ListResponse(keys) - - keyInfoData := make(map[string]interface{}) - for _, key := range keys { - val, ok := keyInfo[key] - if ok { - keyInfoData[key] = val - } - } - - if len(keyInfoData) > 0 { - resp.Data["key_info"] = keyInfoData - } - - return resp -} - -// RespondWithStatusCode takes a response and converts it to a raw response with -// the provided Status Code. -func RespondWithStatusCode(resp *Response, req *Request, code int) (*Response, error) { - ret := &Response{ - Data: map[string]interface{}{ - HTTPContentType: "application/json", - HTTPStatusCode: code, - }, - } - - if resp != nil { - httpResp := LogicalResponseToHTTPResponse(resp) - - if req != nil { - httpResp.RequestID = req.ID - } - - body, err := json.Marshal(httpResp) - if err != nil { - return nil, err - } - - // We default to string here so that the value is HMAC'd via audit. - // Since this function is always marshaling to JSON, this is - // appropriate. - ret.Data[HTTPRawBody] = string(body) - } - - return ret, nil -} - -// HTTPResponseWriter is optionally added to a request object and can be used to -// write directly to the HTTP response writer. -type HTTPResponseWriter struct { - http.ResponseWriter - written *uint32 -} - -// NewHTTPResponseWriter creates a new HTTPResponseWriter object that wraps the -// provided io.Writer. -func NewHTTPResponseWriter(w http.ResponseWriter) *HTTPResponseWriter { - return &HTTPResponseWriter{ - ResponseWriter: w, - written: new(uint32), - } -} - -// Write will write the bytes to the underlying io.Writer. -func (w *HTTPResponseWriter) Write(bytes []byte) (int, error) { - atomic.StoreUint32(w.written, 1) - return w.ResponseWriter.Write(bytes) -} - -// Written tells us if the writer has been written to yet. -func (w *HTTPResponseWriter) Written() bool { - return atomic.LoadUint32(w.written) == 1 -} - -type WrappingResponseWriter interface { - http.ResponseWriter - Wrapped() http.ResponseWriter -} - -type StatusHeaderResponseWriter struct { - wrapped http.ResponseWriter - wroteHeader bool - StatusCode int - headers map[string][]*CustomHeader -} - -func NewStatusHeaderResponseWriter(w http.ResponseWriter, h map[string][]*CustomHeader) *StatusHeaderResponseWriter { - return &StatusHeaderResponseWriter{ - wrapped: w, - wroteHeader: false, - StatusCode: 200, - headers: h, - } -} - -func (w *StatusHeaderResponseWriter) Wrapped() http.ResponseWriter { - return w.wrapped -} - -func (w *StatusHeaderResponseWriter) Header() http.Header { - return w.wrapped.Header() -} - -func (w *StatusHeaderResponseWriter) Write(buf []byte) (int, error) { - // It is allowed to only call ResponseWriter.Write and skip - // ResponseWriter.WriteHeader. An example of such a situation is - // "handleUIStub". The Write function will internally set the status code - // 200 for the response for which that call might invoke other - // implementations of the WriteHeader function. So, we still need to set - // the custom headers. In cases where both WriteHeader and Write of - // statusHeaderResponseWriter struct are called the internal call to the - // WriterHeader invoked from inside Write method won't change the headers. - if !w.wroteHeader { - w.setCustomResponseHeaders(w.StatusCode) - } - - return w.wrapped.Write(buf) -} - -func (w *StatusHeaderResponseWriter) WriteHeader(statusCode int) { - w.setCustomResponseHeaders(statusCode) - w.wrapped.WriteHeader(statusCode) - w.StatusCode = statusCode - // in cases where Write is called after WriteHeader, let's prevent setting - // ResponseWriter headers twice - w.wroteHeader = true -} - -func (w *StatusHeaderResponseWriter) setCustomResponseHeaders(status int) { - sch := w.headers - if sch == nil { - return - } - - // Checking the validity of the status code - if status >= 600 || status < 100 { - return - } - - // setter function to set the headers - setter := func(hvl []*CustomHeader) { - for _, hv := range hvl { - w.Header().Set(hv.Name, hv.Value) - } - } - - // Setting the default headers first - setter(sch["default"]) - - // setting the Xyy pattern first - d := fmt.Sprintf("%vxx", status/100) - if val, ok := sch[d]; ok { - setter(val) - } - - // Setting the specific headers - if val, ok := sch[strconv.Itoa(status)]; ok { - setter(val) - } - - return -} - -var _ WrappingResponseWriter = &StatusHeaderResponseWriter{} - -// ResolveRoleResponse returns a standard response to be returned by functions handling a ResolveRoleOperation -func ResolveRoleResponse(roleName string) (*Response, error) { - return &Response{ - Data: map[string]interface{}{ - "role": roleName, - }, - }, nil -} diff --git a/vendor/github.com/hashicorp/vault/sdk/logical/response_util.go b/vendor/github.com/hashicorp/vault/sdk/logical/response_util.go deleted file mode 100644 index 4a9f61d5..00000000 --- a/vendor/github.com/hashicorp/vault/sdk/logical/response_util.go +++ /dev/null @@ -1,204 +0,0 @@ -package logical - -import ( - "encoding/json" - "errors" - "fmt" - "net/http" - - "github.com/hashicorp/errwrap" - multierror "github.com/hashicorp/go-multierror" - "github.com/hashicorp/vault/sdk/helper/consts" -) - -// RespondErrorCommon pulls most of the functionality from http's -// respondErrorCommon and some of http's handleLogical and makes it available -// to both the http package and elsewhere. -func RespondErrorCommon(req *Request, resp *Response, err error) (int, error) { - if err == nil && (resp == nil || !resp.IsError()) { - switch { - case req.Operation == ReadOperation: - if resp == nil { - return http.StatusNotFound, nil - } - - // Basically: if we have empty "keys" or no keys at all, 404. This - // provides consistency with GET. - case req.Operation == ListOperation && (resp == nil || resp.WrapInfo == nil): - if resp == nil { - return http.StatusNotFound, nil - } - if len(resp.Data) == 0 { - if len(resp.Warnings) > 0 { - return 0, nil - } - return http.StatusNotFound, nil - } - keysRaw, ok := resp.Data["keys"] - if !ok || keysRaw == nil { - // If we don't have keys but have other data, return as-is - if len(resp.Data) > 0 || len(resp.Warnings) > 0 { - return 0, nil - } - return http.StatusNotFound, nil - } - - var keys []string - switch keysRaw.(type) { - case []interface{}: - keys = make([]string, len(keysRaw.([]interface{}))) - for i, el := range keysRaw.([]interface{}) { - s, ok := el.(string) - if !ok { - return http.StatusInternalServerError, nil - } - keys[i] = s - } - - case []string: - keys = keysRaw.([]string) - default: - return http.StatusInternalServerError, nil - } - - if len(keys) == 0 { - return http.StatusNotFound, nil - } - } - - return 0, nil - } - - if errwrap.ContainsType(err, new(ReplicationCodedError)) { - var allErrors error - var codedErr *ReplicationCodedError - errwrap.Walk(err, func(inErr error) { - newErr, ok := inErr.(*ReplicationCodedError) - if ok { - codedErr = newErr - } else { - allErrors = multierror.Append(allErrors, inErr) - } - }) - if allErrors != nil { - return codedErr.Code, multierror.Append(fmt.Errorf("errors from both primary and secondary; primary error was %v; secondary errors follow", codedErr.Msg), allErrors) - } - return codedErr.Code, errors.New(codedErr.Msg) - } - - // Start out with internal server error since in most of these cases there - // won't be a response so this won't be overridden - statusCode := http.StatusInternalServerError - // If we actually have a response, start out with bad request - if resp != nil { - statusCode = http.StatusBadRequest - } - - // Now, check the error itself; if it has a specific logical error, set the - // appropriate code - if err != nil { - switch { - case errwrap.ContainsType(err, new(StatusBadRequest)): - statusCode = http.StatusBadRequest - case errwrap.Contains(err, ErrPermissionDenied.Error()): - statusCode = http.StatusForbidden - case errwrap.Contains(err, consts.ErrInvalidWrappingToken.Error()): - statusCode = http.StatusBadRequest - case errwrap.Contains(err, ErrUnsupportedOperation.Error()): - statusCode = http.StatusMethodNotAllowed - case errwrap.Contains(err, ErrUnsupportedPath.Error()): - statusCode = http.StatusNotFound - case errwrap.Contains(err, ErrInvalidRequest.Error()): - statusCode = http.StatusBadRequest - case errwrap.Contains(err, ErrUpstreamRateLimited.Error()): - statusCode = http.StatusBadGateway - case errwrap.Contains(err, ErrRateLimitQuotaExceeded.Error()): - statusCode = http.StatusTooManyRequests - case errwrap.Contains(err, ErrLeaseCountQuotaExceeded.Error()): - statusCode = http.StatusTooManyRequests - case errwrap.Contains(err, ErrMissingRequiredState.Error()): - statusCode = http.StatusPreconditionFailed - case errwrap.Contains(err, ErrPathFunctionalityRemoved.Error()): - statusCode = http.StatusNotFound - case errwrap.Contains(err, ErrRelativePath.Error()): - statusCode = http.StatusBadRequest - case errwrap.Contains(err, ErrInvalidCredentials.Error()): - statusCode = http.StatusBadRequest - } - } - - if resp != nil && resp.IsError() { - err = fmt.Errorf("%s", resp.Data["error"].(string)) - } - - return statusCode, err -} - -// AdjustErrorStatusCode adjusts the status that will be sent in error -// conditions in a way that can be shared across http's respondError and other -// locations. -func AdjustErrorStatusCode(status *int, err error) { - // Handle nested errors - if t, ok := err.(*multierror.Error); ok { - for _, e := range t.Errors { - AdjustErrorStatusCode(status, e) - } - } - - // Adjust status code when sealed - if errwrap.Contains(err, consts.ErrSealed.Error()) { - *status = http.StatusServiceUnavailable - } - - if errwrap.Contains(err, consts.ErrAPILocked.Error()) { - *status = http.StatusServiceUnavailable - } - - // Adjust status code on - if errwrap.Contains(err, "http: request body too large") { - *status = http.StatusRequestEntityTooLarge - } - - // Allow HTTPCoded error passthrough to specify a code - if t, ok := err.(HTTPCodedError); ok { - *status = t.Code() - } -} - -func RespondError(w http.ResponseWriter, status int, err error) { - AdjustErrorStatusCode(&status, err) - - w.Header().Set("Content-Type", "application/json") - w.WriteHeader(status) - - type ErrorResponse struct { - Errors []string `json:"errors"` - } - resp := &ErrorResponse{Errors: make([]string, 0, 1)} - if err != nil { - resp.Errors = append(resp.Errors, err.Error()) - } - - enc := json.NewEncoder(w) - enc.Encode(resp) -} - -func RespondErrorAndData(w http.ResponseWriter, status int, data interface{}, err error) { - AdjustErrorStatusCode(&status, err) - - w.Header().Set("Content-Type", "application/json") - w.WriteHeader(status) - - type ErrorAndDataResponse struct { - Errors []string `json:"errors"` - Data interface{} `json:"data""` - } - resp := &ErrorAndDataResponse{Errors: make([]string, 0, 1)} - if err != nil { - resp.Errors = append(resp.Errors, err.Error()) - } - resp.Data = data - - enc := json.NewEncoder(w) - enc.Encode(resp) -} diff --git a/vendor/github.com/hashicorp/vault/sdk/logical/secret.go b/vendor/github.com/hashicorp/vault/sdk/logical/secret.go deleted file mode 100644 index a2128d86..00000000 --- a/vendor/github.com/hashicorp/vault/sdk/logical/secret.go +++ /dev/null @@ -1,30 +0,0 @@ -package logical - -import "fmt" - -// Secret represents the secret part of a response. -type Secret struct { - LeaseOptions - - // InternalData is JSON-encodable data that is stored with the secret. - // This will be sent back during a Renew/Revoke for storing internal data - // used for those operations. - InternalData map[string]interface{} `json:"internal_data" sentinel:""` - - // LeaseID is the ID returned to the user to manage this secret. - // This is generated by Vault core. Any set value will be ignored. - // For requests, this will always be blank. - LeaseID string `sentinel:""` -} - -func (s *Secret) Validate() error { - if s.TTL < 0 { - return fmt.Errorf("ttl duration must not be less than zero") - } - - return nil -} - -func (s *Secret) GoString() string { - return fmt.Sprintf("*%#v", *s) -} diff --git a/vendor/github.com/hashicorp/vault/sdk/logical/storage.go b/vendor/github.com/hashicorp/vault/sdk/logical/storage.go deleted file mode 100644 index 0802ad01..00000000 --- a/vendor/github.com/hashicorp/vault/sdk/logical/storage.go +++ /dev/null @@ -1,158 +0,0 @@ -package logical - -import ( - "context" - "errors" - "fmt" - "strings" - - "github.com/hashicorp/errwrap" - "github.com/hashicorp/go-hclog" - "github.com/hashicorp/vault/sdk/helper/jsonutil" -) - -// ErrReadOnly is returned when a backend does not support -// writing. This can be caused by a read-only replica or secondary -// cluster operation. -var ErrReadOnly = errors.New("cannot write to readonly storage") - -// ErrSetupReadOnly is returned when a write operation is attempted on a -// storage while the backend is still being setup. -var ErrSetupReadOnly = errors.New("cannot write to storage during setup") - -// Storage is the way that logical backends are able read/write data. -type Storage interface { - List(context.Context, string) ([]string, error) - Get(context.Context, string) (*StorageEntry, error) - Put(context.Context, *StorageEntry) error - Delete(context.Context, string) error -} - -// StorageEntry is the entry for an item in a Storage implementation. -type StorageEntry struct { - Key string - Value []byte - SealWrap bool -} - -// DecodeJSON decodes the 'Value' present in StorageEntry. -func (e *StorageEntry) DecodeJSON(out interface{}) error { - return jsonutil.DecodeJSON(e.Value, out) -} - -// StorageEntryJSON creates a StorageEntry with a JSON-encoded value. -func StorageEntryJSON(k string, v interface{}) (*StorageEntry, error) { - encodedBytes, err := jsonutil.EncodeJSON(v) - if err != nil { - return nil, errwrap.Wrapf("failed to encode storage entry: {{err}}", err) - } - - return &StorageEntry{ - Key: k, - Value: encodedBytes, - }, nil -} - -type ClearableView interface { - List(context.Context, string) ([]string, error) - Delete(context.Context, string) error -} - -// ScanView is used to scan all the keys in a view iteratively -func ScanView(ctx context.Context, view ClearableView, cb func(path string)) error { - frontier := []string{""} - for len(frontier) > 0 { - n := len(frontier) - current := frontier[n-1] - frontier = frontier[:n-1] - - // List the contents - contents, err := view.List(ctx, current) - if err != nil { - return errwrap.Wrapf(fmt.Sprintf("list failed at path %q: {{err}}", current), err) - } - - // Handle the contents in the directory - for _, c := range contents { - // Exit if the context has been canceled - if ctx.Err() != nil { - return ctx.Err() - } - fullPath := current + c - if strings.HasSuffix(c, "/") { - frontier = append(frontier, fullPath) - } else { - cb(fullPath) - } - } - } - return nil -} - -// CollectKeys is used to collect all the keys in a view -func CollectKeys(ctx context.Context, view ClearableView) ([]string, error) { - return CollectKeysWithPrefix(ctx, view, "") -} - -// CollectKeysWithPrefix is used to collect all the keys in a view with a given prefix string -func CollectKeysWithPrefix(ctx context.Context, view ClearableView, prefix string) ([]string, error) { - var keys []string - - cb := func(path string) { - if strings.HasPrefix(path, prefix) { - keys = append(keys, path) - } - } - - // Scan for all the keys - if err := ScanView(ctx, view, cb); err != nil { - return nil, err - } - return keys, nil -} - -// ClearView is used to delete all the keys in a view -func ClearView(ctx context.Context, view ClearableView) error { - return ClearViewWithLogging(ctx, view, nil) -} - -func ClearViewWithLogging(ctx context.Context, view ClearableView, logger hclog.Logger) error { - if view == nil { - return nil - } - - if logger == nil { - logger = hclog.NewNullLogger() - } - - // Collect all the keys - keys, err := CollectKeys(ctx, view) - if err != nil { - return err - } - - logger.Debug("clearing view", "total_keys", len(keys)) - - // Delete all the keys - var pctDone int - for idx, key := range keys { - // Rather than keep trying to do stuff with a canceled context, bail; - // storage will fail anyways - if ctx.Err() != nil { - return ctx.Err() - } - if err := view.Delete(ctx, key); err != nil { - return err - } - - newPctDone := idx * 100.0 / len(keys) - if int(newPctDone) > pctDone { - pctDone = int(newPctDone) - logger.Trace("view deletion progress", "percent", pctDone, "keys_deleted", idx) - } - } - - logger.Debug("view cleared") - - return nil -} diff --git a/vendor/github.com/hashicorp/vault/sdk/logical/storage_inmem.go b/vendor/github.com/hashicorp/vault/sdk/logical/storage_inmem.go deleted file mode 100644 index 65368a07..00000000 --- a/vendor/github.com/hashicorp/vault/sdk/logical/storage_inmem.go +++ /dev/null @@ -1,87 +0,0 @@ -package logical - -import ( - "context" - "sync" - - "github.com/hashicorp/vault/sdk/physical" - "github.com/hashicorp/vault/sdk/physical/inmem" -) - -// InmemStorage implements Storage and stores all data in memory. It is -// basically a straight copy of physical.Inmem, but it prevents backends from -// having to load all of physical's dependencies (which are legion) just to -// have some testing storage. -type InmemStorage struct { - underlying physical.Backend - once sync.Once -} - -func (s *InmemStorage) Get(ctx context.Context, key string) (*StorageEntry, error) { - s.once.Do(s.init) - - entry, err := s.underlying.Get(ctx, key) - if err != nil { - return nil, err - } - if entry == nil { - return nil, nil - } - return &StorageEntry{ - Key: entry.Key, - Value: entry.Value, - SealWrap: entry.SealWrap, - }, nil -} - -func (s *InmemStorage) Put(ctx context.Context, entry *StorageEntry) error { - s.once.Do(s.init) - - return s.underlying.Put(ctx, &physical.Entry{ - Key: entry.Key, - Value: entry.Value, - SealWrap: entry.SealWrap, - }) -} - -func (s *InmemStorage) Delete(ctx context.Context, key string) error { - s.once.Do(s.init) - - return s.underlying.Delete(ctx, key) -} - -func (s *InmemStorage) List(ctx context.Context, prefix string) ([]string, error) { - s.once.Do(s.init) - - return s.underlying.List(ctx, prefix) -} - -func (s *InmemStorage) Underlying() *inmem.InmemBackend { - s.once.Do(s.init) - - return s.underlying.(*inmem.InmemBackend) -} - -func (s *InmemStorage) FailPut(fail bool) *InmemStorage { - s.Underlying().FailPut(fail) - return s -} - -func (s *InmemStorage) FailGet(fail bool) *InmemStorage { - s.Underlying().FailGet(fail) - return s -} - -func (s *InmemStorage) FailDelete(fail bool) *InmemStorage { - s.Underlying().FailDelete(fail) - return s -} - -func (s *InmemStorage) FailList(fail bool) *InmemStorage { - s.Underlying().FailList(fail) - return s -} - -func (s *InmemStorage) init() { - s.underlying, _ = inmem.NewInmem(nil, nil) -} diff --git a/vendor/github.com/hashicorp/vault/sdk/logical/storage_view.go b/vendor/github.com/hashicorp/vault/sdk/logical/storage_view.go deleted file mode 100644 index 2cd07715..00000000 --- a/vendor/github.com/hashicorp/vault/sdk/logical/storage_view.go +++ /dev/null @@ -1,110 +0,0 @@ -package logical - -import ( - "context" - "errors" - "strings" -) - -type StorageView struct { - storage Storage - prefix string -} - -var ErrRelativePath = errors.New("relative paths not supported") - -func NewStorageView(storage Storage, prefix string) *StorageView { - return &StorageView{ - storage: storage, - prefix: prefix, - } -} - -// logical.Storage impl. -func (s *StorageView) List(ctx context.Context, prefix string) ([]string, error) { - if err := s.SanityCheck(prefix); err != nil { - return nil, err - } - return s.storage.List(ctx, s.ExpandKey(prefix)) -} - -// logical.Storage impl. -func (s *StorageView) Get(ctx context.Context, key string) (*StorageEntry, error) { - if err := s.SanityCheck(key); err != nil { - return nil, err - } - entry, err := s.storage.Get(ctx, s.ExpandKey(key)) - if err != nil { - return nil, err - } - if entry == nil { - return nil, nil - } - entry.Key = s.TruncateKey(entry.Key) - - return &StorageEntry{ - Key: entry.Key, - Value: entry.Value, - SealWrap: entry.SealWrap, - }, nil -} - -// logical.Storage impl. -func (s *StorageView) Put(ctx context.Context, entry *StorageEntry) error { - if entry == nil { - return errors.New("cannot write nil entry") - } - - if err := s.SanityCheck(entry.Key); err != nil { - return err - } - - expandedKey := s.ExpandKey(entry.Key) - - nested := &StorageEntry{ - Key: expandedKey, - Value: entry.Value, - SealWrap: entry.SealWrap, - } - - return s.storage.Put(ctx, nested) -} - -// logical.Storage impl. -func (s *StorageView) Delete(ctx context.Context, key string) error { - if err := s.SanityCheck(key); err != nil { - return err - } - - expandedKey := s.ExpandKey(key) - - return s.storage.Delete(ctx, expandedKey) -} - -func (s *StorageView) Prefix() string { - return s.prefix -} - -// SubView constructs a nested sub-view using the given prefix -func (s *StorageView) SubView(prefix string) *StorageView { - sub := s.ExpandKey(prefix) - return &StorageView{storage: s.storage, prefix: sub} -} - -// SanityCheck is used to perform a sanity check on a key -func (s *StorageView) SanityCheck(key string) error { - if strings.Contains(key, "..") { - return ErrRelativePath - } - return nil -} - -// ExpandKey is used to expand to the full key path with the prefix -func (s *StorageView) ExpandKey(suffix string) string { - return s.prefix + suffix -} - -// TruncateKey is used to remove the prefix of the key -func (s *StorageView) TruncateKey(full string) string { - return strings.TrimPrefix(full, s.prefix) -} diff --git a/vendor/github.com/hashicorp/vault/sdk/logical/system_view.go b/vendor/github.com/hashicorp/vault/sdk/logical/system_view.go deleted file mode 100644 index 4e5627b1..00000000 --- a/vendor/github.com/hashicorp/vault/sdk/logical/system_view.go +++ /dev/null @@ -1,235 +0,0 @@ -package logical - -import ( - "context" - "errors" - "fmt" - "io" - "time" - - "github.com/hashicorp/vault/sdk/helper/consts" - "github.com/hashicorp/vault/sdk/helper/license" - "github.com/hashicorp/vault/sdk/helper/pluginutil" - "github.com/hashicorp/vault/sdk/helper/wrapping" -) - -// SystemView exposes system configuration information in a safe way -// for logical backends to consume -type SystemView interface { - // DefaultLeaseTTL returns the default lease TTL set in Vault configuration - DefaultLeaseTTL() time.Duration - - // MaxLeaseTTL returns the max lease TTL set in Vault configuration; backend - // authors should take care not to issue credentials that last longer than - // this value, as Vault will revoke them - MaxLeaseTTL() time.Duration - - // Returns true if the mount is tainted. A mount is tainted if it is in the - // process of being unmounted. This should only be used in special - // circumstances; a primary use-case is as a guard in revocation functions. - // If revocation of a backend's leases fails it can keep the unmounting - // process from being successful. If the reason for this failure is not - // relevant when the mount is tainted (for instance, saving a CRL to disk - // when the stored CRL will be removed during the unmounting process - // anyways), we can ignore the errors to allow unmounting to complete. - Tainted() bool - - // Returns true if caching is disabled. If true, no caches should be used, - // despite known slowdowns. - CachingDisabled() bool - - // When run from a system view attached to a request, indicates whether the - // request is affecting a local mount or not - LocalMount() bool - - // ReplicationState indicates the state of cluster replication - ReplicationState() consts.ReplicationState - - // HasFeature returns true if the feature is currently enabled - HasFeature(feature license.Features) bool - - // ResponseWrapData wraps the given data in a cubbyhole and returns the - // token used to unwrap. - ResponseWrapData(ctx context.Context, data map[string]interface{}, ttl time.Duration, jwt bool) (*wrapping.ResponseWrapInfo, error) - - // LookupPlugin looks into the plugin catalog for a plugin with the given - // name. Returns a PluginRunner or an error if a plugin can not be found. - LookupPlugin(ctx context.Context, pluginName string, pluginType consts.PluginType) (*pluginutil.PluginRunner, error) - - // LookupPluginVersion looks into the plugin catalog for a plugin with the given - // name and version. Returns a PluginRunner or an error if a plugin can not be found. - LookupPluginVersion(ctx context.Context, pluginName string, pluginType consts.PluginType, version string) (*pluginutil.PluginRunner, error) - - // ListVersionedPlugins returns information about all plugins of a certain - // type in the catalog, including any versioning information stored for them. - ListVersionedPlugins(ctx context.Context, pluginType consts.PluginType) ([]pluginutil.VersionedPlugin, error) - - // NewPluginClient returns a client for managing the lifecycle of plugin - // processes - NewPluginClient(ctx context.Context, config pluginutil.PluginClientConfig) (pluginutil.PluginClient, error) - - // MlockEnabled returns the configuration setting for enabling mlock on - // plugins. - MlockEnabled() bool - - // EntityInfo returns a subset of information related to the identity entity - // for the given entity id - EntityInfo(entityID string) (*Entity, error) - - // GroupsForEntity returns the group membership information for the provided - // entity id - GroupsForEntity(entityID string) ([]*Group, error) - - // PluginEnv returns Vault environment information used by plugins - PluginEnv(context.Context) (*PluginEnvironment, error) - - // GeneratePasswordFromPolicy generates a password from the policy referenced. - // If the policy does not exist, this will return an error. - GeneratePasswordFromPolicy(ctx context.Context, policyName string) (password string, err error) -} - -type PasswordPolicy interface { - // Generate a random password - Generate(context.Context, io.Reader) (string, error) -} - -type ExtendedSystemView interface { - Auditor() Auditor - ForwardGenericRequest(context.Context, *Request) (*Response, error) -} - -type PasswordGenerator func() (password string, err error) - -type StaticSystemView struct { - DefaultLeaseTTLVal time.Duration - MaxLeaseTTLVal time.Duration - SudoPrivilegeVal bool - TaintedVal bool - CachingDisabledVal bool - Primary bool - EnableMlock bool - LocalMountVal bool - ReplicationStateVal consts.ReplicationState - EntityVal *Entity - GroupsVal []*Group - Features license.Features - VaultVersion string - PluginEnvironment *PluginEnvironment - PasswordPolicies map[string]PasswordGenerator -} - -type noopAuditor struct{} - -func (a noopAuditor) AuditRequest(ctx context.Context, input *LogInput) error { - return nil -} - -func (a noopAuditor) AuditResponse(ctx context.Context, input *LogInput) error { - return nil -} - -func (d StaticSystemView) Auditor() Auditor { - return noopAuditor{} -} - -func (d StaticSystemView) ForwardGenericRequest(ctx context.Context, req *Request) (*Response, error) { - return nil, errors.New("ForwardGenericRequest is not implemented in StaticSystemView") -} - -func (d StaticSystemView) DefaultLeaseTTL() time.Duration { - return d.DefaultLeaseTTLVal -} - -func (d StaticSystemView) MaxLeaseTTL() time.Duration { - return d.MaxLeaseTTLVal -} - -func (d StaticSystemView) SudoPrivilege(_ context.Context, path string, token string) bool { - return d.SudoPrivilegeVal -} - -func (d StaticSystemView) Tainted() bool { - return d.TaintedVal -} - -func (d StaticSystemView) CachingDisabled() bool { - return d.CachingDisabledVal -} - -func (d StaticSystemView) LocalMount() bool { - return d.LocalMountVal -} - -func (d StaticSystemView) ReplicationState() consts.ReplicationState { - return d.ReplicationStateVal -} - -func (d StaticSystemView) NewPluginClient(ctx context.Context, config pluginutil.PluginClientConfig) (pluginutil.PluginClient, error) { - return nil, errors.New("NewPluginClient is not implemented in StaticSystemView") -} - -func (d StaticSystemView) ResponseWrapData(_ context.Context, data map[string]interface{}, ttl time.Duration, jwt bool) (*wrapping.ResponseWrapInfo, error) { - return nil, errors.New("ResponseWrapData is not implemented in StaticSystemView") -} - -func (d StaticSystemView) LookupPlugin(_ context.Context, _ string, _ consts.PluginType) (*pluginutil.PluginRunner, error) { - return nil, errors.New("LookupPlugin is not implemented in StaticSystemView") -} - -func (d StaticSystemView) LookupPluginVersion(_ context.Context, _ string, _ consts.PluginType, _ string) (*pluginutil.PluginRunner, error) { - return nil, errors.New("LookupPluginVersion is not implemented in StaticSystemView") -} - -func (d StaticSystemView) ListVersionedPlugins(_ context.Context, _ consts.PluginType) ([]pluginutil.VersionedPlugin, error) { - return nil, errors.New("ListVersionedPlugins is not implemented in StaticSystemView") -} - -func (d StaticSystemView) MlockEnabled() bool { - return d.EnableMlock -} - -func (d StaticSystemView) EntityInfo(entityID string) (*Entity, error) { - return d.EntityVal, nil -} - -func (d StaticSystemView) GroupsForEntity(entityID string) ([]*Group, error) { - return d.GroupsVal, nil -} - -func (d StaticSystemView) HasFeature(feature license.Features) bool { - return d.Features.HasFeature(feature) -} - -func (d StaticSystemView) PluginEnv(_ context.Context) (*PluginEnvironment, error) { - return d.PluginEnvironment, nil -} - -func (d StaticSystemView) GeneratePasswordFromPolicy(ctx context.Context, policyName string) (password string, err error) { - select { - case <-ctx.Done(): - return "", fmt.Errorf("context timed out") - default: - } - - if d.PasswordPolicies == nil { - return "", fmt.Errorf("password policy not found") - } - policy, exists := d.PasswordPolicies[policyName] - if !exists { - return "", fmt.Errorf("password policy not found") - } - return policy() -} - -func (d *StaticSystemView) SetPasswordPolicy(name string, generator PasswordGenerator) { - if d.PasswordPolicies == nil { - d.PasswordPolicies = map[string]PasswordGenerator{} - } - d.PasswordPolicies[name] = generator -} - -func (d *StaticSystemView) DeletePasswordPolicy(name string) (existed bool) { - _, existed = d.PasswordPolicies[name] - delete(d.PasswordPolicies, name) - return existed -} diff --git a/vendor/github.com/hashicorp/vault/sdk/logical/testing.go b/vendor/github.com/hashicorp/vault/sdk/logical/testing.go deleted file mode 100644 index 765f0982..00000000 --- a/vendor/github.com/hashicorp/vault/sdk/logical/testing.go +++ /dev/null @@ -1,87 +0,0 @@ -package logical - -import ( - "context" - "reflect" - "time" - - testing "github.com/mitchellh/go-testing-interface" - - log "github.com/hashicorp/go-hclog" - "github.com/hashicorp/vault/sdk/helper/logging" -) - -// TestRequest is a helper to create a purely in-memory Request struct. -func TestRequest(t testing.T, op Operation, path string) *Request { - return &Request{ - Operation: op, - Path: path, - Data: make(map[string]interface{}), - Storage: new(InmemStorage), - Connection: &Connection{}, - } -} - -// TestStorage is a helper that can be used from unit tests to verify -// the behavior of a Storage impl. -func TestStorage(t testing.T, s Storage) { - keys, err := s.List(context.Background(), "") - if err != nil { - t.Fatalf("list error: %s", err) - } - if len(keys) > 0 { - t.Fatalf("should have no keys to start: %#v", keys) - } - - entry := &StorageEntry{Key: "foo", Value: []byte("bar")} - if err := s.Put(context.Background(), entry); err != nil { - t.Fatalf("put error: %s", err) - } - - actual, err := s.Get(context.Background(), "foo") - if err != nil { - t.Fatalf("get error: %s", err) - } - if !reflect.DeepEqual(actual, entry) { - t.Fatalf("wrong value. Expected: %#v\nGot: %#v", entry, actual) - } - - keys, err = s.List(context.Background(), "") - if err != nil { - t.Fatalf("list error: %s", err) - } - if !reflect.DeepEqual(keys, []string{"foo"}) { - t.Fatalf("bad keys: %#v", keys) - } - - if err := s.Delete(context.Background(), "foo"); err != nil { - t.Fatalf("put error: %s", err) - } - - keys, err = s.List(context.Background(), "") - if err != nil { - t.Fatalf("list error: %s", err) - } - if len(keys) > 0 { - t.Fatalf("should have no keys to start: %#v", keys) - } -} - -func TestSystemView() *StaticSystemView { - defaultLeaseTTLVal := time.Hour * 24 - maxLeaseTTLVal := time.Hour * 24 * 2 - return &StaticSystemView{ - DefaultLeaseTTLVal: defaultLeaseTTLVal, - MaxLeaseTTLVal: maxLeaseTTLVal, - } -} - -func TestBackendConfig() *BackendConfig { - bc := &BackendConfig{ - Logger: logging.NewVaultLogger(log.Trace), - System: TestSystemView(), - Config: make(map[string]string), - } - - return bc -} diff --git a/vendor/github.com/hashicorp/vault/sdk/logical/token.go b/vendor/github.com/hashicorp/vault/sdk/logical/token.go deleted file mode 100644 index ebebd4ad..00000000 --- a/vendor/github.com/hashicorp/vault/sdk/logical/token.go +++ /dev/null @@ -1,304 +0,0 @@ -package logical - -import ( - "crypto/sha256" - "encoding/base64" - "fmt" - "sort" - "strings" - "time" - - sockaddr "github.com/hashicorp/go-sockaddr" -) - -type TokenType uint8 - -const ( - // TokenTypeDefault means "use the default, if any, that is currently set - // on the mount". If not set, results in a Service token. - TokenTypeDefault TokenType = iota - - // TokenTypeService is a "normal" Vault token for long-lived services - TokenTypeService - - // TokenTypeBatch is a batch token - TokenTypeBatch - - // TokenTypeDefaultService configured on a mount, means that if - // TokenTypeDefault is sent back by the mount, create Service tokens - TokenTypeDefaultService - - // TokenTypeDefaultBatch configured on a mount, means that if - // TokenTypeDefault is sent back by the mount, create Batch tokens - TokenTypeDefaultBatch - - // ClientIDTWEDelimiter Delimiter between the string fields used to generate a client - // ID for tokens without entities. This is the 0 character, which - // is a non-printable string. Please see unicode.IsPrint for details. - ClientIDTWEDelimiter = rune('\x00') - - // SortedPoliciesTWEDelimiter Delimiter between each policy in the sorted policies used to - // generate a client ID for tokens without entities. This is the 127 - // character, which is a non-printable string. Please see unicode.IsPrint - // for details. - SortedPoliciesTWEDelimiter = rune('\x7F') -) - -func (t *TokenType) UnmarshalJSON(b []byte) error { - if len(b) == 1 { - *t = TokenType(b[0] - '0') - return nil - } - - // Handle upgrade from pre-1.2 where we were serialized as string: - s := string(b) - switch s { - case `"default"`, `""`: - *t = TokenTypeDefault - case `"service"`: - *t = TokenTypeService - case `"batch"`: - *t = TokenTypeBatch - case `"default-service"`: - *t = TokenTypeDefaultService - case `"default-batch"`: - *t = TokenTypeDefaultBatch - default: - return fmt.Errorf("unknown token type %q", s) - } - return nil -} - -func (t TokenType) String() string { - switch t { - case TokenTypeDefault: - return "default" - case TokenTypeService: - return "service" - case TokenTypeBatch: - return "batch" - case TokenTypeDefaultService: - return "default-service" - case TokenTypeDefaultBatch: - return "default-batch" - default: - panic("unreachable") - } -} - -// TokenEntry is used to represent a given token -type TokenEntry struct { - Type TokenType `json:"type" mapstructure:"type" structs:"type" sentinel:""` - - // ID of this entry, generally a random UUID - ID string `json:"id" mapstructure:"id" structs:"id" sentinel:""` - - // ExternalID is the ID of a newly created service - // token that will be returned to a user - ExternalID string `json:"-"` - - // Accessor for this token, a random UUID - Accessor string `json:"accessor" mapstructure:"accessor" structs:"accessor" sentinel:""` - - // Parent token, used for revocation trees - Parent string `json:"parent" mapstructure:"parent" structs:"parent" sentinel:""` - - // Which named policies should be used - Policies []string `json:"policies" mapstructure:"policies" structs:"policies"` - - // InlinePolicy specifies ACL rules to be applied to this token entry. - InlinePolicy string `json:"inline_policy" mapstructure:"inline_policy" structs:"inline_policy"` - - // Used for audit trails, this is something like "auth/user/login" - Path string `json:"path" mapstructure:"path" structs:"path"` - - // Used for auditing. This could include things like "source", "user", "ip" - Meta map[string]string `json:"meta" mapstructure:"meta" structs:"meta" sentinel:"meta"` - - // InternalMeta is used to store internal metadata. This metadata will not be audit logged or returned from lookup APIs. - InternalMeta map[string]string `json:"internal_meta" mapstructure:"internal_meta" structs:"internal_meta"` - - // Used for operators to be able to associate with the source - DisplayName string `json:"display_name" mapstructure:"display_name" structs:"display_name"` - - // Used to restrict the number of uses (zero is unlimited). This is to - // support one-time-tokens (generalized). There are a few special values: - // if it's -1 it has run through its use counts and is executing its final - // use; if it's -2 it is tainted, which means revocation is currently - // running on it; and if it's -3 it's also tainted but revocation - // previously ran and failed, so this hints the tidy function to try it - // again. - NumUses int `json:"num_uses" mapstructure:"num_uses" structs:"num_uses"` - - // Time of token creation - CreationTime int64 `json:"creation_time" mapstructure:"creation_time" structs:"creation_time" sentinel:""` - - // Duration set when token was created - TTL time.Duration `json:"ttl" mapstructure:"ttl" structs:"ttl" sentinel:""` - - // Explicit maximum TTL on the token - ExplicitMaxTTL time.Duration `json:"explicit_max_ttl" mapstructure:"explicit_max_ttl" structs:"explicit_max_ttl" sentinel:""` - - // If set, the role that was used for parameters at creation time - Role string `json:"role" mapstructure:"role" structs:"role"` - - // If set, the period of the token. This is only used when created directly - // through the create endpoint; periods managed by roles or other auth - // backends are subject to those renewal rules. - Period time.Duration `json:"period" mapstructure:"period" structs:"period" sentinel:""` - - // These are the deprecated fields - DisplayNameDeprecated string `json:"DisplayName" mapstructure:"DisplayName" structs:"DisplayName" sentinel:""` - NumUsesDeprecated int `json:"NumUses" mapstructure:"NumUses" structs:"NumUses" sentinel:""` - CreationTimeDeprecated int64 `json:"CreationTime" mapstructure:"CreationTime" structs:"CreationTime" sentinel:""` - ExplicitMaxTTLDeprecated time.Duration `json:"ExplicitMaxTTL" mapstructure:"ExplicitMaxTTL" structs:"ExplicitMaxTTL" sentinel:""` - - // EntityID is the ID of the entity associated with this token. - EntityID string `json:"entity_id" mapstructure:"entity_id" structs:"entity_id"` - - // If NoIdentityPolicies is true, the token will not inherit - // identity policies from the associated EntityID. - NoIdentityPolicies bool `json:"no_identity_policies" mapstructure:"no_identity_policies" structs:"no_identity_policies"` - - // The set of CIDRs that this token can be used with - BoundCIDRs []*sockaddr.SockAddrMarshaler `json:"bound_cidrs" sentinel:""` - - // NamespaceID is the identifier of the namespace to which this token is - // confined to. Do not return this value over the API when the token is - // being looked up. - NamespaceID string `json:"namespace_id" mapstructure:"namespace_id" structs:"namespace_id" sentinel:""` - - // CubbyholeID is the identifier of the cubbyhole storage belonging to this - // token - CubbyholeID string `json:"cubbyhole_id" mapstructure:"cubbyhole_id" structs:"cubbyhole_id" sentinel:""` -} - -// CreateClientID returns the client ID, and a boolean which is false if the clientID -// has an entity, and true otherwise -func (te *TokenEntry) CreateClientID() (string, bool) { - var clientIDInputBuilder strings.Builder - - // if entry has an associated entity ID, return it - if te.EntityID != "" { - return te.EntityID, false - } - - // The entry is associated with a TWE (token without entity). In this case - // we must create a client ID by calculating the following formula: - // clientID = SHA256(sorted policies + namespace) - - // Step 1: Copy entry policies to a new struct - sortedPolicies := make([]string, len(te.Policies)) - copy(sortedPolicies, te.Policies) - - // Step 2: Sort and join copied policies - sort.Strings(sortedPolicies) - for _, pol := range sortedPolicies { - clientIDInputBuilder.WriteRune(SortedPoliciesTWEDelimiter) - clientIDInputBuilder.WriteString(pol) - } - - // Step 3: Add namespace ID - clientIDInputBuilder.WriteRune(ClientIDTWEDelimiter) - clientIDInputBuilder.WriteString(te.NamespaceID) - - if clientIDInputBuilder.Len() == 0 { - return "", true - } - // Step 4: Remove the first character in the string, as it's an unnecessary delimiter - clientIDInput := clientIDInputBuilder.String()[1:] - - // Step 5: Hash the sum - hashed := sha256.Sum256([]byte(clientIDInput)) - return base64.StdEncoding.EncodeToString(hashed[:]), true -} - -func (te *TokenEntry) SentinelGet(key string) (interface{}, error) { - if te == nil { - return nil, nil - } - switch key { - case "policies": - return te.Policies, nil - - case "path": - return te.Path, nil - - case "display_name": - return te.DisplayName, nil - - case "num_uses": - return te.NumUses, nil - - case "role": - return te.Role, nil - - case "entity_id": - return te.EntityID, nil - - case "period": - return te.Period, nil - - case "period_seconds": - return int64(te.Period.Seconds()), nil - - case "explicit_max_ttl": - return te.ExplicitMaxTTL, nil - - case "explicit_max_ttl_seconds": - return int64(te.ExplicitMaxTTL.Seconds()), nil - - case "creation_ttl": - return te.TTL, nil - - case "creation_ttl_seconds": - return int64(te.TTL.Seconds()), nil - - case "creation_time": - return time.Unix(te.CreationTime, 0).Format(time.RFC3339Nano), nil - - case "creation_time_unix": - return time.Unix(te.CreationTime, 0), nil - - case "meta", "metadata": - return te.Meta, nil - - case "type": - teType := te.Type - switch teType { - case TokenTypeBatch, TokenTypeService: - case TokenTypeDefault: - teType = TokenTypeService - default: - return "unknown", nil - } - return teType.String(), nil - } - - return nil, nil -} - -func (te *TokenEntry) SentinelKeys() []string { - return []string{ - "period", - "period_seconds", - "explicit_max_ttl", - "explicit_max_ttl_seconds", - "creation_ttl", - "creation_ttl_seconds", - "creation_time", - "creation_time_unix", - "meta", - "metadata", - "type", - } -} - -// IsRoot returns false if the token is not root (or doesn't exist) -func (te *TokenEntry) IsRoot() bool { - if te == nil { - return false - } - - return len(te.Policies) == 1 && te.Policies[0] == "root" -} diff --git a/vendor/github.com/hashicorp/vault/sdk/logical/translate_response.go b/vendor/github.com/hashicorp/vault/sdk/logical/translate_response.go deleted file mode 100644 index de5ea8fd..00000000 --- a/vendor/github.com/hashicorp/vault/sdk/logical/translate_response.go +++ /dev/null @@ -1,161 +0,0 @@ -package logical - -import ( - "bytes" - "encoding/json" - "fmt" - "time" -) - -// This logic was pulled from the http package so that it can be used for -// encoding wrapped responses as well. It simply translates the logical -// response to an http response, with the values we want and omitting the -// values we don't. -func LogicalResponseToHTTPResponse(input *Response) *HTTPResponse { - httpResp := &HTTPResponse{ - Data: input.Data, - Warnings: input.Warnings, - Headers: input.Headers, - } - - if input.Secret != nil { - httpResp.LeaseID = input.Secret.LeaseID - httpResp.Renewable = input.Secret.Renewable - httpResp.LeaseDuration = int(input.Secret.TTL.Seconds()) - } - - // If we have authentication information, then - // set up the result structure. - if input.Auth != nil { - httpResp.Auth = &HTTPAuth{ - ClientToken: input.Auth.ClientToken, - Accessor: input.Auth.Accessor, - Policies: input.Auth.Policies, - TokenPolicies: input.Auth.TokenPolicies, - IdentityPolicies: input.Auth.IdentityPolicies, - Metadata: input.Auth.Metadata, - LeaseDuration: int(input.Auth.TTL.Seconds()), - Renewable: input.Auth.Renewable, - EntityID: input.Auth.EntityID, - TokenType: input.Auth.TokenType.String(), - Orphan: input.Auth.Orphan, - MFARequirement: input.Auth.MFARequirement, - NumUses: input.Auth.NumUses, - } - } - - return httpResp -} - -func HTTPResponseToLogicalResponse(input *HTTPResponse) *Response { - logicalResp := &Response{ - Data: input.Data, - Warnings: input.Warnings, - Headers: input.Headers, - } - - if input.LeaseID != "" { - logicalResp.Secret = &Secret{ - LeaseID: input.LeaseID, - } - logicalResp.Secret.Renewable = input.Renewable - logicalResp.Secret.TTL = time.Second * time.Duration(input.LeaseDuration) - } - - if input.Auth != nil { - logicalResp.Auth = &Auth{ - ClientToken: input.Auth.ClientToken, - Accessor: input.Auth.Accessor, - Policies: input.Auth.Policies, - TokenPolicies: input.Auth.TokenPolicies, - IdentityPolicies: input.Auth.IdentityPolicies, - Metadata: input.Auth.Metadata, - EntityID: input.Auth.EntityID, - Orphan: input.Auth.Orphan, - } - logicalResp.Auth.Renewable = input.Auth.Renewable - logicalResp.Auth.TTL = time.Second * time.Duration(input.Auth.LeaseDuration) - switch input.Auth.TokenType { - case "service": - logicalResp.Auth.TokenType = TokenTypeService - case "batch": - logicalResp.Auth.TokenType = TokenTypeBatch - } - } - - return logicalResp -} - -type HTTPResponse struct { - RequestID string `json:"request_id"` - LeaseID string `json:"lease_id"` - Renewable bool `json:"renewable"` - LeaseDuration int `json:"lease_duration"` - Data map[string]interface{} `json:"data"` - WrapInfo *HTTPWrapInfo `json:"wrap_info"` - Warnings []string `json:"warnings"` - Headers map[string][]string `json:"-"` - Auth *HTTPAuth `json:"auth"` -} - -type HTTPAuth struct { - ClientToken string `json:"client_token"` - Accessor string `json:"accessor"` - Policies []string `json:"policies"` - TokenPolicies []string `json:"token_policies,omitempty"` - IdentityPolicies []string `json:"identity_policies,omitempty"` - Metadata map[string]string `json:"metadata"` - LeaseDuration int `json:"lease_duration"` - Renewable bool `json:"renewable"` - EntityID string `json:"entity_id"` - TokenType string `json:"token_type"` - Orphan bool `json:"orphan"` - MFARequirement *MFARequirement `json:"mfa_requirement"` - NumUses int `json:"num_uses"` -} - -type HTTPWrapInfo struct { - Token string `json:"token"` - Accessor string `json:"accessor"` - TTL int `json:"ttl"` - CreationTime string `json:"creation_time"` - CreationPath string `json:"creation_path"` - WrappedAccessor string `json:"wrapped_accessor,omitempty"` -} - -type HTTPSysInjector struct { - Response *HTTPResponse -} - -func (h HTTPSysInjector) MarshalJSON() ([]byte, error) { - j, err := json.Marshal(h.Response) - if err != nil { - return nil, err - } - // Fast path no data or empty data - if h.Response.Data == nil || len(h.Response.Data) == 0 { - return j, nil - } - // Marshaling a response will always be a JSON object, meaning it will - // always start with '{', so we hijack this to prepend necessary values - - var buf bytes.Buffer - buf.WriteRune('{') - for k, v := range h.Response.Data { - // Marshal each key/value individually - mk, err := json.Marshal(k) - if err != nil { - return nil, err - } - mv, err := json.Marshal(v) - if err != nil { - return nil, err - } - // Write into the final buffer. We'll never have a valid response - // without any fields so we can unconditionally add a comma after each. - buf.WriteString(fmt.Sprintf("%s: %s, ", mk, mv)) - } - // Add the rest, without the first '{' - buf.Write(j[1:]) - return buf.Bytes(), nil -} diff --git a/vendor/github.com/hashicorp/vault/sdk/logical/version.pb.go b/vendor/github.com/hashicorp/vault/sdk/logical/version.pb.go deleted file mode 100644 index 415970f1..00000000 --- a/vendor/github.com/hashicorp/vault/sdk/logical/version.pb.go +++ /dev/null @@ -1,204 +0,0 @@ -// Code generated by protoc-gen-go. DO NOT EDIT. -// versions: -// protoc-gen-go v1.28.1 -// protoc v3.21.7 -// source: sdk/logical/version.proto - -package logical - -import ( - protoreflect "google.golang.org/protobuf/reflect/protoreflect" - protoimpl "google.golang.org/protobuf/runtime/protoimpl" - reflect "reflect" - sync "sync" -) - -const ( - // Verify that this generated code is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) - // Verify that runtime/protoimpl is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) -) - -type Empty struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields -} - -func (x *Empty) Reset() { - *x = Empty{} - if protoimpl.UnsafeEnabled { - mi := &file_sdk_logical_version_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *Empty) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*Empty) ProtoMessage() {} - -func (x *Empty) ProtoReflect() protoreflect.Message { - mi := &file_sdk_logical_version_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use Empty.ProtoReflect.Descriptor instead. -func (*Empty) Descriptor() ([]byte, []int) { - return file_sdk_logical_version_proto_rawDescGZIP(), []int{0} -} - -// VersionReply is the reply for the Version method. -type VersionReply struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - PluginVersion string `protobuf:"bytes,1,opt,name=plugin_version,json=pluginVersion,proto3" json:"plugin_version,omitempty"` -} - -func (x *VersionReply) Reset() { - *x = VersionReply{} - if protoimpl.UnsafeEnabled { - mi := &file_sdk_logical_version_proto_msgTypes[1] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *VersionReply) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*VersionReply) ProtoMessage() {} - -func (x *VersionReply) ProtoReflect() protoreflect.Message { - mi := &file_sdk_logical_version_proto_msgTypes[1] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use VersionReply.ProtoReflect.Descriptor instead. -func (*VersionReply) Descriptor() ([]byte, []int) { - return file_sdk_logical_version_proto_rawDescGZIP(), []int{1} -} - -func (x *VersionReply) GetPluginVersion() string { - if x != nil { - return x.PluginVersion - } - return "" -} - -var File_sdk_logical_version_proto protoreflect.FileDescriptor - -var file_sdk_logical_version_proto_rawDesc = []byte{ - 0x0a, 0x19, 0x73, 0x64, 0x6b, 0x2f, 0x6c, 0x6f, 0x67, 0x69, 0x63, 0x61, 0x6c, 0x2f, 0x76, 0x65, - 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x07, 0x6c, 0x6f, 0x67, - 0x69, 0x63, 0x61, 0x6c, 0x22, 0x07, 0x0a, 0x05, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x22, 0x35, 0x0a, - 0x0c, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x12, 0x25, 0x0a, - 0x0e, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x56, 0x65, 0x72, - 0x73, 0x69, 0x6f, 0x6e, 0x32, 0x41, 0x0a, 0x0d, 0x50, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x56, 0x65, - 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x30, 0x0a, 0x07, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, - 0x12, 0x0e, 0x2e, 0x6c, 0x6f, 0x67, 0x69, 0x63, 0x61, 0x6c, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, - 0x1a, 0x15, 0x2e, 0x6c, 0x6f, 0x67, 0x69, 0x63, 0x61, 0x6c, 0x2e, 0x56, 0x65, 0x72, 0x73, 0x69, - 0x6f, 0x6e, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x42, 0x28, 0x5a, 0x26, 0x67, 0x69, 0x74, 0x68, 0x75, - 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x68, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2f, - 0x76, 0x61, 0x75, 0x6c, 0x74, 0x2f, 0x73, 0x64, 0x6b, 0x2f, 0x6c, 0x6f, 0x67, 0x69, 0x63, 0x61, - 0x6c, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, -} - -var ( - file_sdk_logical_version_proto_rawDescOnce sync.Once - file_sdk_logical_version_proto_rawDescData = file_sdk_logical_version_proto_rawDesc -) - -func file_sdk_logical_version_proto_rawDescGZIP() []byte { - file_sdk_logical_version_proto_rawDescOnce.Do(func() { - file_sdk_logical_version_proto_rawDescData = protoimpl.X.CompressGZIP(file_sdk_logical_version_proto_rawDescData) - }) - return file_sdk_logical_version_proto_rawDescData -} - -var file_sdk_logical_version_proto_msgTypes = make([]protoimpl.MessageInfo, 2) -var file_sdk_logical_version_proto_goTypes = []interface{}{ - (*Empty)(nil), // 0: logical.Empty - (*VersionReply)(nil), // 1: logical.VersionReply -} -var file_sdk_logical_version_proto_depIdxs = []int32{ - 0, // 0: logical.PluginVersion.Version:input_type -> logical.Empty - 1, // 1: logical.PluginVersion.Version:output_type -> logical.VersionReply - 1, // [1:2] is the sub-list for method output_type - 0, // [0:1] is the sub-list for method input_type - 0, // [0:0] is the sub-list for extension type_name - 0, // [0:0] is the sub-list for extension extendee - 0, // [0:0] is the sub-list for field type_name -} - -func init() { file_sdk_logical_version_proto_init() } -func file_sdk_logical_version_proto_init() { - if File_sdk_logical_version_proto != nil { - return - } - if !protoimpl.UnsafeEnabled { - file_sdk_logical_version_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Empty); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_sdk_logical_version_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*VersionReply); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } - type x struct{} - out := protoimpl.TypeBuilder{ - File: protoimpl.DescBuilder{ - GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_sdk_logical_version_proto_rawDesc, - NumEnums: 0, - NumMessages: 2, - NumExtensions: 0, - NumServices: 1, - }, - GoTypes: file_sdk_logical_version_proto_goTypes, - DependencyIndexes: file_sdk_logical_version_proto_depIdxs, - MessageInfos: file_sdk_logical_version_proto_msgTypes, - }.Build() - File_sdk_logical_version_proto = out.File - file_sdk_logical_version_proto_rawDesc = nil - file_sdk_logical_version_proto_goTypes = nil - file_sdk_logical_version_proto_depIdxs = nil -} diff --git a/vendor/github.com/hashicorp/vault/sdk/logical/version.proto b/vendor/github.com/hashicorp/vault/sdk/logical/version.proto deleted file mode 100644 index 345051ae..00000000 --- a/vendor/github.com/hashicorp/vault/sdk/logical/version.proto +++ /dev/null @@ -1,17 +0,0 @@ -syntax = "proto3"; -package logical; - -option go_package = "github.com/hashicorp/vault/sdk/logical"; - -message Empty {} - -// VersionReply is the reply for the Version method. -message VersionReply { - string plugin_version = 1; -} - -// PluginVersion is an optional RPC service implemented by plugins. -service PluginVersion { - // Version returns version information for the plugin. - rpc Version(Empty) returns (VersionReply); -} \ No newline at end of file diff --git a/vendor/github.com/hashicorp/vault/sdk/logical/version_grpc.pb.go b/vendor/github.com/hashicorp/vault/sdk/logical/version_grpc.pb.go deleted file mode 100644 index a69e9705..00000000 --- a/vendor/github.com/hashicorp/vault/sdk/logical/version_grpc.pb.go +++ /dev/null @@ -1,103 +0,0 @@ -// Code generated by protoc-gen-go-grpc. DO NOT EDIT. - -package logical - -import ( - context "context" - grpc "google.golang.org/grpc" - codes "google.golang.org/grpc/codes" - status "google.golang.org/grpc/status" -) - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the grpc package it is being compiled against. -// Requires gRPC-Go v1.32.0 or later. -const _ = grpc.SupportPackageIsVersion7 - -// PluginVersionClient is the client API for PluginVersion service. -// -// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. -type PluginVersionClient interface { - // Version returns version information for the plugin. - Version(ctx context.Context, in *Empty, opts ...grpc.CallOption) (*VersionReply, error) -} - -type pluginVersionClient struct { - cc grpc.ClientConnInterface -} - -func NewPluginVersionClient(cc grpc.ClientConnInterface) PluginVersionClient { - return &pluginVersionClient{cc} -} - -func (c *pluginVersionClient) Version(ctx context.Context, in *Empty, opts ...grpc.CallOption) (*VersionReply, error) { - out := new(VersionReply) - err := c.cc.Invoke(ctx, "/logical.PluginVersion/Version", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -// PluginVersionServer is the server API for PluginVersion service. -// All implementations must embed UnimplementedPluginVersionServer -// for forward compatibility -type PluginVersionServer interface { - // Version returns version information for the plugin. - Version(context.Context, *Empty) (*VersionReply, error) - mustEmbedUnimplementedPluginVersionServer() -} - -// UnimplementedPluginVersionServer must be embedded to have forward compatible implementations. -type UnimplementedPluginVersionServer struct { -} - -func (UnimplementedPluginVersionServer) Version(context.Context, *Empty) (*VersionReply, error) { - return nil, status.Errorf(codes.Unimplemented, "method Version not implemented") -} -func (UnimplementedPluginVersionServer) mustEmbedUnimplementedPluginVersionServer() {} - -// UnsafePluginVersionServer may be embedded to opt out of forward compatibility for this service. -// Use of this interface is not recommended, as added methods to PluginVersionServer will -// result in compilation errors. -type UnsafePluginVersionServer interface { - mustEmbedUnimplementedPluginVersionServer() -} - -func RegisterPluginVersionServer(s grpc.ServiceRegistrar, srv PluginVersionServer) { - s.RegisterService(&PluginVersion_ServiceDesc, srv) -} - -func _PluginVersion_Version_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(Empty) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(PluginVersionServer).Version(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/logical.PluginVersion/Version", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(PluginVersionServer).Version(ctx, req.(*Empty)) - } - return interceptor(ctx, in, info, handler) -} - -// PluginVersion_ServiceDesc is the grpc.ServiceDesc for PluginVersion service. -// It's only intended for direct use with grpc.RegisterService, -// and not to be introspected or modified (even as a copy) -var PluginVersion_ServiceDesc = grpc.ServiceDesc{ - ServiceName: "logical.PluginVersion", - HandlerType: (*PluginVersionServer)(nil), - Methods: []grpc.MethodDesc{ - { - MethodName: "Version", - Handler: _PluginVersion_Version_Handler, - }, - }, - Streams: []grpc.StreamDesc{}, - Metadata: "sdk/logical/version.proto", -} diff --git a/vendor/github.com/hashicorp/vault/sdk/physical/cache.go b/vendor/github.com/hashicorp/vault/sdk/physical/cache.go deleted file mode 100644 index af40f538..00000000 --- a/vendor/github.com/hashicorp/vault/sdk/physical/cache.go +++ /dev/null @@ -1,260 +0,0 @@ -package physical - -import ( - "context" - "sync/atomic" - - metrics "github.com/armon/go-metrics" - log "github.com/hashicorp/go-hclog" - lru "github.com/hashicorp/golang-lru" - "github.com/hashicorp/vault/sdk/helper/locksutil" - "github.com/hashicorp/vault/sdk/helper/pathmanager" -) - -const ( - // DefaultCacheSize is used if no cache size is specified for NewCache - DefaultCacheSize = 128 * 1024 - - // refreshCacheCtxKey is a ctx value that denotes the cache should be - // refreshed during a Get call. - refreshCacheCtxKey = "refresh_cache" -) - -// These paths don't need to be cached by the LRU cache. This should -// particularly help memory pressure when unsealing. -var cacheExceptionsPaths = []string{ - "wal/logs/", - "index/pages/", - "index-dr/pages/", - "sys/expire/", - "core/poison-pill", - "core/raft/tls", -} - -// CacheRefreshContext returns a context with an added value denoting if the -// cache should attempt a refresh. -func CacheRefreshContext(ctx context.Context, r bool) context.Context { - return context.WithValue(ctx, refreshCacheCtxKey, r) -} - -// cacheRefreshFromContext is a helper to look up if the provided context is -// requesting a cache refresh. -func cacheRefreshFromContext(ctx context.Context) bool { - r, ok := ctx.Value(refreshCacheCtxKey).(bool) - if !ok { - return false - } - return r -} - -// Cache is used to wrap an underlying physical backend -// and provide an LRU cache layer on top. Most of the reads done by -// Vault are for policy objects so there is a large read reduction -// by using a simple write-through cache. -type Cache struct { - backend Backend - lru *lru.TwoQueueCache - locks []*locksutil.LockEntry - logger log.Logger - enabled *uint32 - cacheExceptions *pathmanager.PathManager - metricSink metrics.MetricSink -} - -// TransactionalCache is a Cache that wraps the physical that is transactional -type TransactionalCache struct { - *Cache - Transactional -} - -// Verify Cache satisfies the correct interfaces -var ( - _ ToggleablePurgemonster = (*Cache)(nil) - _ ToggleablePurgemonster = (*TransactionalCache)(nil) - _ Backend = (*Cache)(nil) - _ Transactional = (*TransactionalCache)(nil) -) - -// NewCache returns a physical cache of the given size. -// If no size is provided, the default size is used. -func NewCache(b Backend, size int, logger log.Logger, metricSink metrics.MetricSink) *Cache { - if logger.IsDebug() { - logger.Debug("creating LRU cache", "size", size) - } - if size <= 0 { - size = DefaultCacheSize - } - - pm := pathmanager.New() - pm.AddPaths(cacheExceptionsPaths) - - cache, _ := lru.New2Q(size) - c := &Cache{ - backend: b, - lru: cache, - locks: locksutil.CreateLocks(), - logger: logger, - // This fails safe. - enabled: new(uint32), - cacheExceptions: pm, - metricSink: metricSink, - } - return c -} - -func NewTransactionalCache(b Backend, size int, logger log.Logger, metricSink metrics.MetricSink) *TransactionalCache { - c := &TransactionalCache{ - Cache: NewCache(b, size, logger, metricSink), - Transactional: b.(Transactional), - } - return c -} - -func (c *Cache) ShouldCache(key string) bool { - if atomic.LoadUint32(c.enabled) == 0 { - return false - } - - return !c.cacheExceptions.HasPath(key) -} - -// SetEnabled is used to toggle whether the cache is on or off. It must be -// called with true to actually activate the cache after creation. -func (c *Cache) SetEnabled(enabled bool) { - if enabled { - atomic.StoreUint32(c.enabled, 1) - return - } - atomic.StoreUint32(c.enabled, 0) -} - -// Purge is used to clear the cache -func (c *Cache) Purge(ctx context.Context) { - // Lock the world - for _, lock := range c.locks { - lock.Lock() - defer lock.Unlock() - } - - c.lru.Purge() -} - -func (c *Cache) Put(ctx context.Context, entry *Entry) error { - if entry != nil && !c.ShouldCache(entry.Key) { - return c.backend.Put(ctx, entry) - } - - lock := locksutil.LockForKey(c.locks, entry.Key) - lock.Lock() - defer lock.Unlock() - - err := c.backend.Put(ctx, entry) - if err == nil { - c.lru.Add(entry.Key, entry) - c.metricSink.IncrCounter([]string{"cache", "write"}, 1) - } - return err -} - -func (c *Cache) Get(ctx context.Context, key string) (*Entry, error) { - if !c.ShouldCache(key) { - return c.backend.Get(ctx, key) - } - - lock := locksutil.LockForKey(c.locks, key) - lock.RLock() - defer lock.RUnlock() - - // Check the LRU first - if !cacheRefreshFromContext(ctx) { - if raw, ok := c.lru.Get(key); ok { - if raw == nil { - return nil, nil - } - c.metricSink.IncrCounter([]string{"cache", "hit"}, 1) - return raw.(*Entry), nil - } - } - - c.metricSink.IncrCounter([]string{"cache", "miss"}, 1) - // Read from the underlying backend - ent, err := c.backend.Get(ctx, key) - if err != nil { - return nil, err - } - - // Cache the result, even if nil - c.lru.Add(key, ent) - - return ent, nil -} - -func (c *Cache) Delete(ctx context.Context, key string) error { - if !c.ShouldCache(key) { - return c.backend.Delete(ctx, key) - } - - lock := locksutil.LockForKey(c.locks, key) - lock.Lock() - defer lock.Unlock() - - err := c.backend.Delete(ctx, key) - if err == nil { - c.lru.Remove(key) - } - return err -} - -func (c *Cache) List(ctx context.Context, prefix string) ([]string, error) { - // Always pass-through as this would be difficult to cache. For the same - // reason we don't lock as we can't reasonably know which locks to readlock - // ahead of time. - return c.backend.List(ctx, prefix) -} - -func (c *TransactionalCache) Locks() []*locksutil.LockEntry { - return c.locks -} - -func (c *TransactionalCache) LRU() *lru.TwoQueueCache { - return c.lru -} - -func (c *TransactionalCache) Transaction(ctx context.Context, txns []*TxnEntry) error { - // Bypass the locking below - if atomic.LoadUint32(c.enabled) == 0 { - return c.Transactional.Transaction(ctx, txns) - } - - // Collect keys that need to be locked - var keys []string - for _, curr := range txns { - keys = append(keys, curr.Entry.Key) - } - // Lock the keys - for _, l := range locksutil.LocksForKeys(c.locks, keys) { - l.Lock() - defer l.Unlock() - } - - if err := c.Transactional.Transaction(ctx, txns); err != nil { - return err - } - - for _, txn := range txns { - if !c.ShouldCache(txn.Entry.Key) { - continue - } - - switch txn.Operation { - case PutOperation: - c.lru.Add(txn.Entry.Key, txn.Entry) - c.metricSink.IncrCounter([]string{"cache", "write"}, 1) - case DeleteOperation: - c.lru.Remove(txn.Entry.Key) - c.metricSink.IncrCounter([]string{"cache", "delete"}, 1) - } - } - - return nil -} diff --git a/vendor/github.com/hashicorp/vault/sdk/physical/encoding.go b/vendor/github.com/hashicorp/vault/sdk/physical/encoding.go deleted file mode 100644 index dbde84cc..00000000 --- a/vendor/github.com/hashicorp/vault/sdk/physical/encoding.go +++ /dev/null @@ -1,108 +0,0 @@ -package physical - -import ( - "context" - "errors" - "strings" - "unicode" - "unicode/utf8" -) - -var ( - ErrNonUTF8 = errors.New("key contains invalid UTF-8 characters") - ErrNonPrintable = errors.New("key contains non-printable characters") -) - -// StorageEncoding is used to add errors into underlying physical requests -type StorageEncoding struct { - Backend -} - -// TransactionalStorageEncoding is the transactional version of the error -// injector -type TransactionalStorageEncoding struct { - *StorageEncoding - Transactional -} - -// Verify StorageEncoding satisfies the correct interfaces -var ( - _ Backend = (*StorageEncoding)(nil) - _ Transactional = (*TransactionalStorageEncoding)(nil) -) - -// NewStorageEncoding returns a wrapped physical backend and verifies the key -// encoding -func NewStorageEncoding(b Backend) Backend { - enc := &StorageEncoding{ - Backend: b, - } - - if bTxn, ok := b.(Transactional); ok { - return &TransactionalStorageEncoding{ - StorageEncoding: enc, - Transactional: bTxn, - } - } - - return enc -} - -func (e *StorageEncoding) containsNonPrintableChars(key string) bool { - idx := strings.IndexFunc(key, func(c rune) bool { - return !unicode.IsPrint(c) - }) - - return idx != -1 -} - -func (e *StorageEncoding) Put(ctx context.Context, entry *Entry) error { - if !utf8.ValidString(entry.Key) { - return ErrNonUTF8 - } - - if e.containsNonPrintableChars(entry.Key) { - return ErrNonPrintable - } - - return e.Backend.Put(ctx, entry) -} - -func (e *StorageEncoding) Delete(ctx context.Context, key string) error { - if !utf8.ValidString(key) { - return ErrNonUTF8 - } - - if e.containsNonPrintableChars(key) { - return ErrNonPrintable - } - - return e.Backend.Delete(ctx, key) -} - -func (e *TransactionalStorageEncoding) Transaction(ctx context.Context, txns []*TxnEntry) error { - for _, txn := range txns { - if !utf8.ValidString(txn.Entry.Key) { - return ErrNonUTF8 - } - - if e.containsNonPrintableChars(txn.Entry.Key) { - return ErrNonPrintable - } - - } - - return e.Transactional.Transaction(ctx, txns) -} - -func (e *StorageEncoding) Purge(ctx context.Context) { - if purgeable, ok := e.Backend.(ToggleablePurgemonster); ok { - purgeable.Purge(ctx) - } -} - -func (e *StorageEncoding) SetEnabled(enabled bool) { - if purgeable, ok := e.Backend.(ToggleablePurgemonster); ok { - purgeable.SetEnabled(enabled) - } -} diff --git a/vendor/github.com/hashicorp/vault/sdk/physical/entry.go b/vendor/github.com/hashicorp/vault/sdk/physical/entry.go deleted file mode 100644 index 389fe6c8..00000000 --- a/vendor/github.com/hashicorp/vault/sdk/physical/entry.go +++ /dev/null @@ -1,20 +0,0 @@ -package physical - -import ( - "encoding/hex" - "fmt" -) - -// Entry is used to represent data stored by the physical backend -type Entry struct { - Key string - Value []byte - SealWrap bool `json:"seal_wrap,omitempty"` - - // Only used in replication - ValueHash []byte -} - -func (e *Entry) String() string { - return fmt.Sprintf("Key: %s. SealWrap: %t. Value: %s. ValueHash: %s", e.Key, e.SealWrap, hex.EncodeToString(e.Value), hex.EncodeToString(e.ValueHash)) -} diff --git a/vendor/github.com/hashicorp/vault/sdk/physical/error.go b/vendor/github.com/hashicorp/vault/sdk/physical/error.go deleted file mode 100644 index b547e4e4..00000000 --- a/vendor/github.com/hashicorp/vault/sdk/physical/error.go +++ /dev/null @@ -1,110 +0,0 @@ -package physical - -import ( - "context" - "errors" - "math/rand" - "sync" - "time" - - log "github.com/hashicorp/go-hclog" -) - -const ( - // DefaultErrorPercent is used to determin how often we error - DefaultErrorPercent = 20 -) - -// ErrorInjector is used to add errors into underlying physical requests -type ErrorInjector struct { - backend Backend - errorPercent int - randomLock *sync.Mutex - random *rand.Rand -} - -// TransactionalErrorInjector is the transactional version of the error -// injector -type TransactionalErrorInjector struct { - *ErrorInjector - Transactional -} - -// Verify ErrorInjector satisfies the correct interfaces -var ( - _ Backend = (*ErrorInjector)(nil) - _ Transactional = (*TransactionalErrorInjector)(nil) -) - -// NewErrorInjector returns a wrapped physical backend to inject error -func NewErrorInjector(b Backend, errorPercent int, logger log.Logger) *ErrorInjector { - if errorPercent < 0 || errorPercent > 100 { - errorPercent = DefaultErrorPercent - } - logger.Info("creating error injector") - - return &ErrorInjector{ - backend: b, - errorPercent: errorPercent, - randomLock: new(sync.Mutex), - random: rand.New(rand.NewSource(int64(time.Now().Nanosecond()))), - } -} - -// NewTransactionalErrorInjector creates a new transactional ErrorInjector -func NewTransactionalErrorInjector(b Backend, errorPercent int, logger log.Logger) *TransactionalErrorInjector { - return &TransactionalErrorInjector{ - ErrorInjector: NewErrorInjector(b, errorPercent, logger), - Transactional: b.(Transactional), - } -} - -func (e *ErrorInjector) SetErrorPercentage(p int) { - e.errorPercent = p -} - -func (e *ErrorInjector) addError() error { - e.randomLock.Lock() - roll := e.random.Intn(100) - e.randomLock.Unlock() - if roll < e.errorPercent { - return errors.New("random error") - } - - return nil -} - -func (e *ErrorInjector) Put(ctx context.Context, entry *Entry) error { - if err := e.addError(); err != nil { - return err - } - return e.backend.Put(ctx, entry) -} - -func (e *ErrorInjector) Get(ctx context.Context, key string) (*Entry, error) { - if err := e.addError(); err != nil { - return nil, err - } - return e.backend.Get(ctx, key) -} - -func (e *ErrorInjector) Delete(ctx context.Context, key string) error { - if err := e.addError(); err != nil { - return err - } - return e.backend.Delete(ctx, key) -} - -func (e *ErrorInjector) List(ctx context.Context, prefix string) ([]string, error) { - if err := e.addError(); err != nil { - return nil, err - } - return e.backend.List(ctx, prefix) -} - -func (e *TransactionalErrorInjector) Transaction(ctx context.Context, txns []*TxnEntry) error { - if err := e.addError(); err != nil { - return err - } - return e.Transactional.Transaction(ctx, txns) -} diff --git a/vendor/github.com/hashicorp/vault/sdk/physical/inmem/inmem.go b/vendor/github.com/hashicorp/vault/sdk/physical/inmem/inmem.go deleted file mode 100644 index be16b4ca..00000000 --- a/vendor/github.com/hashicorp/vault/sdk/physical/inmem/inmem.go +++ /dev/null @@ -1,310 +0,0 @@ -package inmem - -import ( - "context" - "errors" - "fmt" - "os" - "strconv" - "strings" - "sync" - "sync/atomic" - - "github.com/armon/go-radix" - log "github.com/hashicorp/go-hclog" - "github.com/hashicorp/vault/sdk/physical" -) - -// Verify interfaces are satisfied -var ( - _ physical.Backend = (*InmemBackend)(nil) - _ physical.HABackend = (*InmemHABackend)(nil) - _ physical.HABackend = (*TransactionalInmemHABackend)(nil) - _ physical.Lock = (*InmemLock)(nil) - _ physical.Transactional = (*TransactionalInmemBackend)(nil) - _ physical.Transactional = (*TransactionalInmemHABackend)(nil) -) - -var ( - PutDisabledError = errors.New("put operations disabled in inmem backend") - GetDisabledError = errors.New("get operations disabled in inmem backend") - DeleteDisabledError = errors.New("delete operations disabled in inmem backend") - ListDisabledError = errors.New("list operations disabled in inmem backend") - GetInTxnDisabledError = errors.New("get operations inside transactions are disabled in inmem backend") -) - -// InmemBackend is an in-memory only physical backend. It is useful -// for testing and development situations where the data is not -// expected to be durable. -type InmemBackend struct { - sync.RWMutex - root *radix.Tree - permitPool *physical.PermitPool - logger log.Logger - failGet *uint32 - failPut *uint32 - failDelete *uint32 - failList *uint32 - failGetInTxn *uint32 - logOps bool - maxValueSize int -} - -type TransactionalInmemBackend struct { - InmemBackend -} - -// NewInmem constructs a new in-memory backend -func NewInmem(conf map[string]string, logger log.Logger) (physical.Backend, error) { - maxValueSize := 0 - maxValueSizeStr, ok := conf["max_value_size"] - if ok { - var err error - maxValueSize, err = strconv.Atoi(maxValueSizeStr) - if err != nil { - return nil, err - } - } - - return &InmemBackend{ - root: radix.New(), - permitPool: physical.NewPermitPool(physical.DefaultParallelOperations), - logger: logger, - failGet: new(uint32), - failPut: new(uint32), - failDelete: new(uint32), - failList: new(uint32), - failGetInTxn: new(uint32), - logOps: os.Getenv("VAULT_INMEM_LOG_ALL_OPS") != "", - maxValueSize: maxValueSize, - }, nil -} - -// Basically for now just creates a permit pool of size 1 so only one operation -// can run at a time -func NewTransactionalInmem(conf map[string]string, logger log.Logger) (physical.Backend, error) { - maxValueSize := 0 - maxValueSizeStr, ok := conf["max_value_size"] - if ok { - var err error - maxValueSize, err = strconv.Atoi(maxValueSizeStr) - if err != nil { - return nil, err - } - } - - return &TransactionalInmemBackend{ - InmemBackend: InmemBackend{ - root: radix.New(), - permitPool: physical.NewPermitPool(1), - logger: logger, - failGet: new(uint32), - failPut: new(uint32), - failDelete: new(uint32), - failList: new(uint32), - failGetInTxn: new(uint32), - logOps: os.Getenv("VAULT_INMEM_LOG_ALL_OPS") != "", - maxValueSize: maxValueSize, - }, - }, nil -} - -// Put is used to insert or update an entry -func (i *InmemBackend) Put(ctx context.Context, entry *physical.Entry) error { - i.permitPool.Acquire() - defer i.permitPool.Release() - - i.Lock() - defer i.Unlock() - - return i.PutInternal(ctx, entry) -} - -func (i *InmemBackend) PutInternal(ctx context.Context, entry *physical.Entry) error { - if i.logOps { - i.logger.Trace("put", "key", entry.Key) - } - if atomic.LoadUint32(i.failPut) != 0 { - return PutDisabledError - } - - select { - case <-ctx.Done(): - return ctx.Err() - default: - } - - if i.maxValueSize > 0 && len(entry.Value) > i.maxValueSize { - return fmt.Errorf("%s", physical.ErrValueTooLarge) - } - - i.root.Insert(entry.Key, entry.Value) - return nil -} - -func (i *InmemBackend) FailPut(fail bool) { - var val uint32 - if fail { - val = 1 - } - atomic.StoreUint32(i.failPut, val) -} - -// Get is used to fetch an entry -func (i *InmemBackend) Get(ctx context.Context, key string) (*physical.Entry, error) { - i.permitPool.Acquire() - defer i.permitPool.Release() - - i.RLock() - defer i.RUnlock() - - return i.GetInternal(ctx, key) -} - -func (i *InmemBackend) GetInternal(ctx context.Context, key string) (*physical.Entry, error) { - if i.logOps { - i.logger.Trace("get", "key", key) - } - if atomic.LoadUint32(i.failGet) != 0 { - return nil, GetDisabledError - } - - select { - case <-ctx.Done(): - return nil, ctx.Err() - default: - } - - if raw, ok := i.root.Get(key); ok { - return &physical.Entry{ - Key: key, - Value: raw.([]byte), - }, nil - } - return nil, nil -} - -func (i *InmemBackend) FailGet(fail bool) { - var val uint32 - if fail { - val = 1 - } - atomic.StoreUint32(i.failGet, val) -} - -func (i *InmemBackend) FailGetInTxn(fail bool) { - var val uint32 - if fail { - val = 1 - } - atomic.StoreUint32(i.failGetInTxn, val) -} - -// Delete is used to permanently delete an entry -func (i *InmemBackend) Delete(ctx context.Context, key string) error { - i.permitPool.Acquire() - defer i.permitPool.Release() - - i.Lock() - defer i.Unlock() - - return i.DeleteInternal(ctx, key) -} - -func (i *InmemBackend) DeleteInternal(ctx context.Context, key string) error { - if i.logOps { - i.logger.Trace("delete", "key", key) - } - if atomic.LoadUint32(i.failDelete) != 0 { - return DeleteDisabledError - } - select { - case <-ctx.Done(): - return ctx.Err() - default: - } - - i.root.Delete(key) - return nil -} - -func (i *InmemBackend) FailDelete(fail bool) { - var val uint32 - if fail { - val = 1 - } - atomic.StoreUint32(i.failDelete, val) -} - -// List is used to list all the keys under a given -// prefix, up to the next prefix. -func (i *InmemBackend) List(ctx context.Context, prefix string) ([]string, error) { - i.permitPool.Acquire() - defer i.permitPool.Release() - - i.RLock() - defer i.RUnlock() - - return i.ListInternal(ctx, prefix) -} - -func (i *InmemBackend) ListInternal(ctx context.Context, prefix string) ([]string, error) { - if i.logOps { - i.logger.Trace("list", "prefix", prefix) - } - if atomic.LoadUint32(i.failList) != 0 { - return nil, ListDisabledError - } - - var out []string - seen := make(map[string]interface{}) - walkFn := func(s string, v interface{}) bool { - trimmed := strings.TrimPrefix(s, prefix) - sep := strings.Index(trimmed, "/") - if sep == -1 { - out = append(out, trimmed) - } else { - trimmed = trimmed[:sep+1] - if _, ok := seen[trimmed]; !ok { - out = append(out, trimmed) - seen[trimmed] = struct{}{} - } - } - return false - } - i.root.WalkPrefix(prefix, walkFn) - - select { - case <-ctx.Done(): - return nil, ctx.Err() - default: - } - - return out, nil -} - -func (i *InmemBackend) FailList(fail bool) { - var val uint32 - if fail { - val = 1 - } - atomic.StoreUint32(i.failList, val) -} - -// Transaction implements the transaction interface -func (t *TransactionalInmemBackend) Transaction(ctx context.Context, txns []*physical.TxnEntry) error { - t.permitPool.Acquire() - defer t.permitPool.Release() - - t.Lock() - defer t.Unlock() - - failGetInTxn := atomic.LoadUint32(t.failGetInTxn) - for _, t := range txns { - if t.Operation == physical.GetOperation && failGetInTxn != 0 { - return GetInTxnDisabledError - } - } - - return physical.GenericTransactionHandler(ctx, t, txns) -} diff --git a/vendor/github.com/hashicorp/vault/sdk/physical/inmem/inmem_ha.go b/vendor/github.com/hashicorp/vault/sdk/physical/inmem/inmem_ha.go deleted file mode 100644 index 64fcb3a6..00000000 --- a/vendor/github.com/hashicorp/vault/sdk/physical/inmem/inmem_ha.go +++ /dev/null @@ -1,167 +0,0 @@ -package inmem - -import ( - "fmt" - "sync" - - log "github.com/hashicorp/go-hclog" - "github.com/hashicorp/vault/sdk/physical" -) - -type InmemHABackend struct { - physical.Backend - locks map[string]string - l *sync.Mutex - cond *sync.Cond - logger log.Logger -} - -type TransactionalInmemHABackend struct { - physical.Transactional - InmemHABackend -} - -// NewInmemHA constructs a new in-memory HA backend. This is only for testing. -func NewInmemHA(_ map[string]string, logger log.Logger) (physical.Backend, error) { - be, err := NewInmem(nil, logger) - if err != nil { - return nil, err - } - - in := &InmemHABackend{ - Backend: be, - locks: make(map[string]string), - logger: logger, - l: new(sync.Mutex), - } - in.cond = sync.NewCond(in.l) - return in, nil -} - -func NewTransactionalInmemHA(_ map[string]string, logger log.Logger) (physical.Backend, error) { - transInmem, err := NewTransactionalInmem(nil, logger) - if err != nil { - return nil, err - } - inmemHA := InmemHABackend{ - Backend: transInmem, - locks: make(map[string]string), - logger: logger, - l: new(sync.Mutex), - } - - in := &TransactionalInmemHABackend{ - InmemHABackend: inmemHA, - Transactional: transInmem.(physical.Transactional), - } - in.cond = sync.NewCond(in.l) - return in, nil -} - -// LockWith is used for mutual exclusion based on the given key. -func (i *InmemHABackend) LockWith(key, value string) (physical.Lock, error) { - l := &InmemLock{ - in: i, - key: key, - value: value, - } - return l, nil -} - -// LockMapSize is used in some tests to determine whether this backend has ever -// been used for HA purposes rather than simply for storage -func (i *InmemHABackend) LockMapSize() int { - return len(i.locks) -} - -// HAEnabled indicates whether the HA functionality should be exposed. -// Currently always returns true. -func (i *InmemHABackend) HAEnabled() bool { - return true -} - -// InmemLock is an in-memory Lock implementation for the HABackend -type InmemLock struct { - in *InmemHABackend - key string - value string - - held bool - leaderCh chan struct{} - l sync.Mutex -} - -func (i *InmemLock) Lock(stopCh <-chan struct{}) (<-chan struct{}, error) { - i.l.Lock() - defer i.l.Unlock() - if i.held { - return nil, fmt.Errorf("lock already held") - } - - // Attempt an async acquisition - didLock := make(chan struct{}) - releaseCh := make(chan bool, 1) - go func() { - // Wait to acquire the lock - i.in.l.Lock() - _, ok := i.in.locks[i.key] - for ok { - i.in.cond.Wait() - _, ok = i.in.locks[i.key] - } - i.in.locks[i.key] = i.value - i.in.l.Unlock() - - // Signal that lock is held - close(didLock) - - // Handle an early abort - release := <-releaseCh - if release { - i.in.l.Lock() - delete(i.in.locks, i.key) - i.in.l.Unlock() - i.in.cond.Broadcast() - } - }() - - // Wait for lock acquisition or shutdown - select { - case <-didLock: - releaseCh <- false - case <-stopCh: - releaseCh <- true - return nil, nil - } - - // Create the leader channel - i.held = true - i.leaderCh = make(chan struct{}) - return i.leaderCh, nil -} - -func (i *InmemLock) Unlock() error { - i.l.Lock() - defer i.l.Unlock() - - if !i.held { - return nil - } - - close(i.leaderCh) - i.leaderCh = nil - i.held = false - - i.in.l.Lock() - delete(i.in.locks, i.key) - i.in.l.Unlock() - i.in.cond.Broadcast() - return nil -} - -func (i *InmemLock) Value() (bool, string, error) { - i.in.l.Lock() - val, ok := i.in.locks[i.key] - i.in.l.Unlock() - return ok, val, nil -} diff --git a/vendor/github.com/hashicorp/vault/sdk/physical/latency.go b/vendor/github.com/hashicorp/vault/sdk/physical/latency.go deleted file mode 100644 index 18b2c4c1..00000000 --- a/vendor/github.com/hashicorp/vault/sdk/physical/latency.go +++ /dev/null @@ -1,113 +0,0 @@ -package physical - -import ( - "context" - "math/rand" - "sync" - "time" - - log "github.com/hashicorp/go-hclog" - uberAtomic "go.uber.org/atomic" -) - -const ( - // DefaultJitterPercent is used if no cache size is specified for NewCache - DefaultJitterPercent = 20 -) - -// LatencyInjector is used to add latency into underlying physical requests -type LatencyInjector struct { - logger log.Logger - backend Backend - latency *uberAtomic.Duration - jitterPercent int - randomLock *sync.Mutex - random *rand.Rand -} - -// TransactionalLatencyInjector is the transactional version of the latency -// injector -type TransactionalLatencyInjector struct { - *LatencyInjector - Transactional -} - -// Verify LatencyInjector satisfies the correct interfaces -var ( - _ Backend = (*LatencyInjector)(nil) - _ Transactional = (*TransactionalLatencyInjector)(nil) -) - -// NewLatencyInjector returns a wrapped physical backend to simulate latency -func NewLatencyInjector(b Backend, latency time.Duration, jitter int, logger log.Logger) *LatencyInjector { - if jitter < 0 || jitter > 100 { - jitter = DefaultJitterPercent - } - logger.Info("creating latency injector") - - return &LatencyInjector{ - logger: logger, - backend: b, - latency: uberAtomic.NewDuration(latency), - jitterPercent: jitter, - randomLock: new(sync.Mutex), - random: rand.New(rand.NewSource(int64(time.Now().Nanosecond()))), - } -} - -// NewTransactionalLatencyInjector creates a new transactional LatencyInjector -func NewTransactionalLatencyInjector(b Backend, latency time.Duration, jitter int, logger log.Logger) *TransactionalLatencyInjector { - return &TransactionalLatencyInjector{ - LatencyInjector: NewLatencyInjector(b, latency, jitter, logger), - Transactional: b.(Transactional), - } -} - -func (l *LatencyInjector) SetLatency(latency time.Duration) { - l.logger.Info("Changing backend latency", "latency", latency) - l.latency.Store(latency) -} - -func (l *LatencyInjector) addLatency() { - // Calculate a value between 1 +- jitter% - percent := 100 - if l.jitterPercent > 0 { - min := 100 - l.jitterPercent - max := 100 + l.jitterPercent - l.randomLock.Lock() - percent = l.random.Intn(max-min) + min - l.randomLock.Unlock() - } - latencyDuration := time.Duration(int(l.latency.Load()) * percent / 100) - time.Sleep(latencyDuration) -} - -// Put is a latent put request -func (l *LatencyInjector) Put(ctx context.Context, entry *Entry) error { - l.addLatency() - return l.backend.Put(ctx, entry) -} - -// Get is a latent get request -func (l *LatencyInjector) Get(ctx context.Context, key string) (*Entry, error) { - l.addLatency() - return l.backend.Get(ctx, key) -} - -// Delete is a latent delete request -func (l *LatencyInjector) Delete(ctx context.Context, key string) error { - l.addLatency() - return l.backend.Delete(ctx, key) -} - -// List is a latent list request -func (l *LatencyInjector) List(ctx context.Context, prefix string) ([]string, error) { - l.addLatency() - return l.backend.List(ctx, prefix) -} - -// Transaction is a latent transaction request -func (l *TransactionalLatencyInjector) Transaction(ctx context.Context, txns []*TxnEntry) error { - l.addLatency() - return l.Transactional.Transaction(ctx, txns) -} diff --git a/vendor/github.com/hashicorp/vault/sdk/physical/physical.go b/vendor/github.com/hashicorp/vault/sdk/physical/physical.go deleted file mode 100644 index 808abd50..00000000 --- a/vendor/github.com/hashicorp/vault/sdk/physical/physical.go +++ /dev/null @@ -1,134 +0,0 @@ -package physical - -import ( - "context" - "strings" - - log "github.com/hashicorp/go-hclog" -) - -const DefaultParallelOperations = 128 - -// The operation type -type Operation string - -const ( - DeleteOperation Operation = "delete" - GetOperation = "get" - ListOperation = "list" - PutOperation = "put" -) - -const ( - ErrValueTooLarge = "put failed due to value being too large" - ErrKeyTooLarge = "put failed due to key being too large" -) - -// Backend is the interface required for a physical -// backend. A physical backend is used to durably store -// data outside of Vault. As such, it is completely untrusted, -// and is only accessed via a security barrier. The backends -// must represent keys in a hierarchical manner. All methods -// are expected to be thread safe. -type Backend interface { - // Put is used to insert or update an entry - Put(ctx context.Context, entry *Entry) error - - // Get is used to fetch an entry - Get(ctx context.Context, key string) (*Entry, error) - - // Delete is used to permanently delete an entry - Delete(ctx context.Context, key string) error - - // List is used to list all the keys under a given - // prefix, up to the next prefix. - List(ctx context.Context, prefix string) ([]string, error) -} - -// HABackend is an extensions to the standard physical -// backend to support high-availability. Vault only expects to -// use mutual exclusion to allow multiple instances to act as a -// hot standby for a leader that services all requests. -type HABackend interface { - // LockWith is used for mutual exclusion based on the given key. - LockWith(key, value string) (Lock, error) - - // Whether or not HA functionality is enabled - HAEnabled() bool -} - -// ToggleablePurgemonster is an interface for backends that can toggle on or -// off special functionality and/or support purging. This is only used for the -// cache, don't use it for other things. -type ToggleablePurgemonster interface { - Purge(ctx context.Context) - SetEnabled(bool) -} - -// RedirectDetect is an optional interface that an HABackend -// can implement. If they do, a redirect address can be automatically -// detected. -type RedirectDetect interface { - // DetectHostAddr is used to detect the host address - DetectHostAddr() (string, error) -} - -type Lock interface { - // Lock is used to acquire the given lock - // The stopCh is optional and if closed should interrupt the lock - // acquisition attempt. The return struct should be closed when - // leadership is lost. - Lock(stopCh <-chan struct{}) (<-chan struct{}, error) - - // Unlock is used to release the lock - Unlock() error - - // Returns the value of the lock and if it is held - Value() (bool, string, error) -} - -// Factory is the factory function to create a physical backend. -type Factory func(config map[string]string, logger log.Logger) (Backend, error) - -// PermitPool is used to limit maximum outstanding requests -type PermitPool struct { - sem chan int -} - -// NewPermitPool returns a new permit pool with the provided -// number of permits -func NewPermitPool(permits int) *PermitPool { - if permits < 1 { - permits = DefaultParallelOperations - } - return &PermitPool{ - sem: make(chan int, permits), - } -} - -// Acquire returns when a permit has been acquired -func (c *PermitPool) Acquire() { - c.sem <- 1 -} - -// Release returns a permit to the pool -func (c *PermitPool) Release() { - <-c.sem -} - -// Get number of requests in the permit pool -func (c *PermitPool) CurrentPermits() int { - return len(c.sem) -} - -// Prefixes is a shared helper function returns all parent 'folders' for a -// given vault key. -// e.g. for 'foo/bar/baz', it returns ['foo', 'foo/bar'] -func Prefixes(s string) []string { - components := strings.Split(s, "/") - result := []string{} - for i := 1; i < len(components); i++ { - result = append(result, strings.Join(components[:i], "/")) - } - return result -} diff --git a/vendor/github.com/hashicorp/vault/sdk/physical/physical_access.go b/vendor/github.com/hashicorp/vault/sdk/physical/physical_access.go deleted file mode 100644 index 7497313a..00000000 --- a/vendor/github.com/hashicorp/vault/sdk/physical/physical_access.go +++ /dev/null @@ -1,40 +0,0 @@ -package physical - -import ( - "context" -) - -// PhysicalAccess is a wrapper around physical.Backend that allows Core to -// expose its physical storage operations through PhysicalAccess() while -// restricting the ability to modify Core.physical itself. -type PhysicalAccess struct { - physical Backend -} - -var _ Backend = (*PhysicalAccess)(nil) - -func NewPhysicalAccess(physical Backend) *PhysicalAccess { - return &PhysicalAccess{physical: physical} -} - -func (p *PhysicalAccess) Put(ctx context.Context, entry *Entry) error { - return p.physical.Put(ctx, entry) -} - -func (p *PhysicalAccess) Get(ctx context.Context, key string) (*Entry, error) { - return p.physical.Get(ctx, key) -} - -func (p *PhysicalAccess) Delete(ctx context.Context, key string) error { - return p.physical.Delete(ctx, key) -} - -func (p *PhysicalAccess) List(ctx context.Context, prefix string) ([]string, error) { - return p.physical.List(ctx, prefix) -} - -func (p *PhysicalAccess) Purge(ctx context.Context) { - if purgeable, ok := p.physical.(ToggleablePurgemonster); ok { - purgeable.Purge(ctx) - } -} diff --git a/vendor/github.com/hashicorp/vault/sdk/physical/physical_view.go b/vendor/github.com/hashicorp/vault/sdk/physical/physical_view.go deleted file mode 100644 index 189ac931..00000000 --- a/vendor/github.com/hashicorp/vault/sdk/physical/physical_view.go +++ /dev/null @@ -1,94 +0,0 @@ -package physical - -import ( - "context" - "errors" - "strings" -) - -var ErrRelativePath = errors.New("relative paths not supported") - -// View represents a prefixed view of a physical backend -type View struct { - backend Backend - prefix string -} - -// Verify View satisfies the correct interfaces -var _ Backend = (*View)(nil) - -// NewView takes an underlying physical backend and returns -// a view of it that can only operate with the given prefix. -func NewView(backend Backend, prefix string) *View { - return &View{ - backend: backend, - prefix: prefix, - } -} - -// List the contents of the prefixed view -func (v *View) List(ctx context.Context, prefix string) ([]string, error) { - if err := v.sanityCheck(prefix); err != nil { - return nil, err - } - return v.backend.List(ctx, v.expandKey(prefix)) -} - -// Get the key of the prefixed view -func (v *View) Get(ctx context.Context, key string) (*Entry, error) { - if err := v.sanityCheck(key); err != nil { - return nil, err - } - entry, err := v.backend.Get(ctx, v.expandKey(key)) - if err != nil { - return nil, err - } - if entry == nil { - return nil, nil - } - entry.Key = v.truncateKey(entry.Key) - - return &Entry{ - Key: entry.Key, - Value: entry.Value, - }, nil -} - -// Put the entry into the prefix view -func (v *View) Put(ctx context.Context, entry *Entry) error { - if err := v.sanityCheck(entry.Key); err != nil { - return err - } - - nested := &Entry{ - Key: v.expandKey(entry.Key), - Value: entry.Value, - } - return v.backend.Put(ctx, nested) -} - -// Delete the entry from the prefix view -func (v *View) Delete(ctx context.Context, key string) error { - if err := v.sanityCheck(key); err != nil { - return err - } - return v.backend.Delete(ctx, v.expandKey(key)) -} - -// sanityCheck is used to perform a sanity check on a key -func (v *View) sanityCheck(key string) error { - if strings.Contains(key, "..") { - return ErrRelativePath - } - return nil -} - -// expandKey is used to expand to the full key path with the prefix -func (v *View) expandKey(suffix string) string { - return v.prefix + suffix -} - -// truncateKey is used to remove the prefix of the key -func (v *View) truncateKey(full string) string { - return strings.TrimPrefix(full, v.prefix) -} diff --git a/vendor/github.com/hashicorp/vault/sdk/physical/testing.go b/vendor/github.com/hashicorp/vault/sdk/physical/testing.go deleted file mode 100644 index 6e0ddfcc..00000000 --- a/vendor/github.com/hashicorp/vault/sdk/physical/testing.go +++ /dev/null @@ -1,497 +0,0 @@ -package physical - -import ( - "context" - "reflect" - "sort" - "testing" - "time" -) - -func ExerciseBackend(t testing.TB, b Backend) { - t.Helper() - - // Should be empty - keys, err := b.List(context.Background(), "") - if err != nil { - t.Fatalf("initial list failed: %v", err) - } - if len(keys) != 0 { - t.Errorf("initial not empty: %v", keys) - } - - // Delete should work if it does not exist - err = b.Delete(context.Background(), "foo") - if err != nil { - t.Fatalf("idempotent delete: %v", err) - } - - // Get should not fail, but be nil - out, err := b.Get(context.Background(), "foo") - if err != nil { - t.Fatalf("initial get failed: %v", err) - } - if out != nil { - t.Errorf("initial get was not nil: %v", out) - } - - // Make an entry - e := &Entry{Key: "foo", Value: []byte("test")} - err = b.Put(context.Background(), e) - if err != nil { - t.Fatalf("put failed: %v", err) - } - - // Get should work - out, err = b.Get(context.Background(), "foo") - if err != nil { - t.Fatalf("get failed: %v", err) - } - if !reflect.DeepEqual(out, e) { - t.Errorf("bad: %v expected: %v", out, e) - } - - // List should not be empty - keys, err = b.List(context.Background(), "") - if err != nil { - t.Fatalf("list failed: %v", err) - } - if len(keys) != 1 || keys[0] != "foo" { - t.Errorf("keys[0] did not equal foo: %v", keys) - } - - // Delete should work - err = b.Delete(context.Background(), "foo") - if err != nil { - t.Fatalf("delete: %v", err) - } - - // Should be empty - keys, err = b.List(context.Background(), "") - if err != nil { - t.Fatalf("list after delete: %v", err) - } - if len(keys) != 0 { - t.Errorf("list after delete not empty: %v", keys) - } - - // Get should fail - out, err = b.Get(context.Background(), "foo") - if err != nil { - t.Fatalf("get after delete: %v", err) - } - if out != nil { - t.Errorf("get after delete not nil: %v", out) - } - - // Multiple Puts should work; GH-189 - e = &Entry{Key: "foo", Value: []byte("test")} - err = b.Put(context.Background(), e) - if err != nil { - t.Fatalf("multi put 1 failed: %v", err) - } - e = &Entry{Key: "foo", Value: []byte("test")} - err = b.Put(context.Background(), e) - if err != nil { - t.Fatalf("multi put 2 failed: %v", err) - } - - // Make a nested entry - e = &Entry{Key: "foo/bar", Value: []byte("baz")} - err = b.Put(context.Background(), e) - if err != nil { - t.Fatalf("nested put failed: %v", err) - } - - // Get should work - out, err = b.Get(context.Background(), "foo/bar") - if err != nil { - t.Fatalf("get failed: %v", err) - } - if !reflect.DeepEqual(out, e) { - t.Errorf("bad: %v expected: %v", out, e) - } - - keys, err = b.List(context.Background(), "") - if err != nil { - t.Fatalf("list multi failed: %v", err) - } - sort.Strings(keys) - if len(keys) != 2 || keys[0] != "foo" || keys[1] != "foo/" { - t.Errorf("expected 2 keys [foo, foo/]: %v", keys) - } - - // Delete with children should work - err = b.Delete(context.Background(), "foo") - if err != nil { - t.Fatalf("delete after multi: %v", err) - } - - // Get should return the child - out, err = b.Get(context.Background(), "foo/bar") - if err != nil { - t.Fatalf("get after multi delete: %v", err) - } - if out == nil { - t.Errorf("get after multi delete not nil: %v", out) - } - - // Removal of nested secret should not leave artifacts - e = &Entry{Key: "foo/nested1/nested2/nested3", Value: []byte("baz")} - err = b.Put(context.Background(), e) - if err != nil { - t.Fatalf("deep nest: %v", err) - } - - err = b.Delete(context.Background(), "foo/nested1/nested2/nested3") - if err != nil { - t.Fatalf("failed to remove deep nest: %v", err) - } - - keys, err = b.List(context.Background(), "foo/") - if err != nil { - t.Fatalf("err: %v", err) - } - if len(keys) != 1 || keys[0] != "bar" { - t.Errorf("should be exactly 1 key == bar: %v", keys) - } - - // Make a second nested entry to test prefix removal - e = &Entry{Key: "foo/zip", Value: []byte("zap")} - err = b.Put(context.Background(), e) - if err != nil { - t.Fatalf("failed to create second nested: %v", err) - } - - // Delete should not remove the prefix - err = b.Delete(context.Background(), "foo/bar") - if err != nil { - t.Fatalf("failed to delete nested prefix: %v", err) - } - - keys, err = b.List(context.Background(), "") - if err != nil { - t.Fatalf("list nested prefix: %v", err) - } - if len(keys) != 1 || keys[0] != "foo/" { - t.Errorf("should be exactly 1 key == foo/: %v", keys) - } - - // Delete should remove the prefix - err = b.Delete(context.Background(), "foo/zip") - if err != nil { - t.Fatalf("failed to delete second prefix: %v", err) - } - - keys, err = b.List(context.Background(), "") - if err != nil { - t.Fatalf("listing after second delete failed: %v", err) - } - if len(keys) != 0 { - t.Errorf("should be empty at end: %v", keys) - } - - // When the root path is empty, adding and removing deep nested values should not break listing - e = &Entry{Key: "foo/nested1/nested2/value1", Value: []byte("baz")} - err = b.Put(context.Background(), e) - if err != nil { - t.Fatalf("deep nest: %v", err) - } - - e = &Entry{Key: "foo/nested1/nested2/value2", Value: []byte("baz")} - err = b.Put(context.Background(), e) - if err != nil { - t.Fatalf("deep nest: %v", err) - } - - err = b.Delete(context.Background(), "foo/nested1/nested2/value2") - if err != nil { - t.Fatalf("failed to remove deep nest: %v", err) - } - - keys, err = b.List(context.Background(), "") - if err != nil { - t.Fatalf("listing of root failed after deletion: %v", err) - } - if len(keys) == 0 { - t.Errorf("root is returning empty after deleting a single nested value, expected nested1/: %v", keys) - keys, err = b.List(context.Background(), "foo/nested1") - if err != nil { - t.Fatalf("listing of expected nested path 'foo/nested1' failed: %v", err) - } - // prove that the root should not be empty and that foo/nested1 exists - if len(keys) != 0 { - t.Logf(" keys can still be listed from nested1/ so it's not empty, expected nested2/: %v", keys) - } - } - - // cleanup left over listing bug test value - err = b.Delete(context.Background(), "foo/nested1/nested2/value1") - if err != nil { - t.Fatalf("failed to remove deep nest: %v", err) - } - - keys, err = b.List(context.Background(), "") - if err != nil { - t.Fatalf("listing of root failed after delete of deep nest: %v", err) - } - if len(keys) != 0 { - t.Errorf("should be empty at end: %v", keys) - } -} - -func ExerciseBackend_ListPrefix(t testing.TB, b Backend) { - t.Helper() - - e1 := &Entry{Key: "foo", Value: []byte("test")} - e2 := &Entry{Key: "foo/bar", Value: []byte("test")} - e3 := &Entry{Key: "foo/bar/baz", Value: []byte("test")} - - defer func() { - b.Delete(context.Background(), "foo") - b.Delete(context.Background(), "foo/bar") - b.Delete(context.Background(), "foo/bar/baz") - }() - - err := b.Put(context.Background(), e1) - if err != nil { - t.Fatalf("failed to put entry 1: %v", err) - } - err = b.Put(context.Background(), e2) - if err != nil { - t.Fatalf("failed to put entry 2: %v", err) - } - err = b.Put(context.Background(), e3) - if err != nil { - t.Fatalf("failed to put entry 3: %v", err) - } - - // Scan the root - keys, err := b.List(context.Background(), "") - if err != nil { - t.Fatalf("list root: %v", err) - } - sort.Strings(keys) - if len(keys) != 2 || keys[0] != "foo" || keys[1] != "foo/" { - t.Errorf("root expected [foo foo/]: %v", keys) - } - - // Scan foo/ - keys, err = b.List(context.Background(), "foo/") - if err != nil { - t.Fatalf("list level 1: %v", err) - } - sort.Strings(keys) - if len(keys) != 2 || keys[0] != "bar" || keys[1] != "bar/" { - t.Errorf("level 1 expected [bar bar/]: %v", keys) - } - - // Scan foo/bar/ - keys, err = b.List(context.Background(), "foo/bar/") - if err != nil { - t.Fatalf("list level 2: %v", err) - } - sort.Strings(keys) - if len(keys) != 1 || keys[0] != "baz" { - t.Errorf("level 1 expected [baz]: %v", keys) - } -} - -func ExerciseHABackend(t testing.TB, b HABackend, b2 HABackend) { - t.Helper() - - // Get the lock - lock, err := b.LockWith("foo", "bar") - if err != nil { - t.Fatalf("initial lock: %v", err) - } - - // Attempt to lock - leaderCh, err := lock.Lock(nil) - if err != nil { - t.Fatalf("lock attempt 1: %v", err) - } - if leaderCh == nil { - t.Fatalf("missing leaderCh") - } - - // Check the value - held, val, err := lock.Value() - if err != nil { - t.Fatalf("err: %v", err) - } - if !held { - t.Errorf("should be held") - } - if val != "bar" { - t.Errorf("expected value bar: %v", err) - } - - // Second acquisition should fail - lock2, err := b2.LockWith("foo", "baz") - if err != nil { - t.Fatalf("lock 2: %v", err) - } - - // Cancel attempt in 50 msec - stopCh := make(chan struct{}) - time.AfterFunc(50*time.Millisecond, func() { - close(stopCh) - }) - - // Attempt to lock - leaderCh2, err := lock2.Lock(stopCh) - if err != nil { - t.Fatalf("stop lock 2: %v", err) - } - if leaderCh2 != nil { - t.Errorf("should not have gotten leaderCh: %v", leaderCh2) - } - - // Release the first lock - lock.Unlock() - - // Attempt to lock should work - leaderCh2, err = lock2.Lock(nil) - if err != nil { - t.Fatalf("lock 2 lock: %v", err) - } - if leaderCh2 == nil { - t.Errorf("should get leaderCh") - } - - // Check the value - held, val, err = lock2.Value() - if err != nil { - t.Fatalf("value: %v", err) - } - if !held { - t.Errorf("should still be held") - } - if val != "baz" { - t.Errorf("expected: baz, got: %v", val) - } - - // Cleanup - lock2.Unlock() -} - -func ExerciseTransactionalBackend(t testing.TB, b Backend) { - t.Helper() - tb, ok := b.(Transactional) - if !ok { - t.Fatal("Not a transactional backend") - } - - txns := SetupTestingTransactions(t, b) - - if err := tb.Transaction(context.Background(), txns); err != nil { - t.Fatal(err) - } - - keys, err := b.List(context.Background(), "") - if err != nil { - t.Fatal(err) - } - - expected := []string{"foo", "zip"} - - sort.Strings(keys) - sort.Strings(expected) - if !reflect.DeepEqual(keys, expected) { - t.Fatalf("mismatch: expected\n%#v\ngot\n%#v\n", expected, keys) - } - - entry, err := b.Get(context.Background(), "foo") - if err != nil { - t.Fatal(err) - } - if entry == nil { - t.Fatal("got nil entry") - } - if entry.Value == nil { - t.Fatal("got nil value") - } - if string(entry.Value) != "bar3" { - t.Fatal("updates did not apply correctly") - } - - entry, err = b.Get(context.Background(), "zip") - if err != nil { - t.Fatal(err) - } - if entry == nil { - t.Fatal("got nil entry") - } - if entry.Value == nil { - t.Fatal("got nil value") - } - if string(entry.Value) != "zap3" { - t.Fatal("updates did not apply correctly") - } -} - -func SetupTestingTransactions(t testing.TB, b Backend) []*TxnEntry { - t.Helper() - // Add a few keys so that we test rollback with deletion - if err := b.Put(context.Background(), &Entry{ - Key: "foo", - Value: []byte("bar"), - }); err != nil { - t.Fatal(err) - } - if err := b.Put(context.Background(), &Entry{ - Key: "zip", - Value: []byte("zap"), - }); err != nil { - t.Fatal(err) - } - if err := b.Put(context.Background(), &Entry{ - Key: "deleteme", - }); err != nil { - t.Fatal(err) - } - if err := b.Put(context.Background(), &Entry{ - Key: "deleteme2", - }); err != nil { - t.Fatal(err) - } - - txns := []*TxnEntry{ - { - Operation: PutOperation, - Entry: &Entry{ - Key: "foo", - Value: []byte("bar2"), - }, - }, - { - Operation: DeleteOperation, - Entry: &Entry{ - Key: "deleteme", - }, - }, - { - Operation: PutOperation, - Entry: &Entry{ - Key: "foo", - Value: []byte("bar3"), - }, - }, - { - Operation: DeleteOperation, - Entry: &Entry{ - Key: "deleteme2", - }, - }, - { - Operation: PutOperation, - Entry: &Entry{ - Key: "zip", - Value: []byte("zap3"), - }, - }, - } - - return txns -} diff --git a/vendor/github.com/hashicorp/vault/sdk/physical/transactions.go b/vendor/github.com/hashicorp/vault/sdk/physical/transactions.go deleted file mode 100644 index a943c6bd..00000000 --- a/vendor/github.com/hashicorp/vault/sdk/physical/transactions.go +++ /dev/null @@ -1,150 +0,0 @@ -package physical - -import ( - "context" - "fmt" - - "github.com/hashicorp/go-multierror" -) - -// TxnEntry is an operation that takes atomically as part of -// a transactional update. Only supported by Transactional backends. -type TxnEntry struct { - Operation Operation - Entry *Entry -} - -func (t *TxnEntry) String() string { - return fmt.Sprintf("Operation: %s. Entry: %s", t.Operation, t.Entry) -} - -// Transactional is an optional interface for backends that -// support doing transactional updates of multiple keys. This is -// required for some features such as replication. -type Transactional interface { - // The function to run a transaction - Transaction(context.Context, []*TxnEntry) error -} - -type TransactionalBackend interface { - Backend - Transactional -} - -type PseudoTransactional interface { - // An internal function should do no locking or permit pool acquisition. - // Depending on the backend and if it natively supports transactions, these - // may simply chain to the normal backend functions. - GetInternal(context.Context, string) (*Entry, error) - PutInternal(context.Context, *Entry) error - DeleteInternal(context.Context, string) error -} - -// Implements the transaction interface -func GenericTransactionHandler(ctx context.Context, t PseudoTransactional, txns []*TxnEntry) (retErr error) { - rollbackStack := make([]*TxnEntry, 0, len(txns)) - var dirty bool - - // Update all of our GET transaction entries, so we can populate existing values back at the wal layer. - for _, txn := range txns { - if txn.Operation == GetOperation { - entry, err := t.GetInternal(ctx, txn.Entry.Key) - if err != nil { - return err - } - if entry != nil { - txn.Entry.Value = entry.Value - } - } - } - - // We walk the transactions in order; each successful operation goes into a - // LIFO for rollback if we hit an error along the way -TxnWalk: - for _, txn := range txns { - switch txn.Operation { - case DeleteOperation: - entry, err := t.GetInternal(ctx, txn.Entry.Key) - if err != nil { - retErr = multierror.Append(retErr, err) - dirty = true - break TxnWalk - } - if entry == nil { - // Nothing to delete or roll back - continue - } - rollbackEntry := &TxnEntry{ - Operation: PutOperation, - Entry: &Entry{ - Key: entry.Key, - Value: entry.Value, - }, - } - err = t.DeleteInternal(ctx, txn.Entry.Key) - if err != nil { - retErr = multierror.Append(retErr, err) - dirty = true - break TxnWalk - } - rollbackStack = append([]*TxnEntry{rollbackEntry}, rollbackStack...) - - case PutOperation: - entry, err := t.GetInternal(ctx, txn.Entry.Key) - if err != nil { - retErr = multierror.Append(retErr, err) - dirty = true - break TxnWalk - } - - // Nothing existed so in fact rolling back requires a delete - var rollbackEntry *TxnEntry - if entry == nil { - rollbackEntry = &TxnEntry{ - Operation: DeleteOperation, - Entry: &Entry{ - Key: txn.Entry.Key, - }, - } - } else { - rollbackEntry = &TxnEntry{ - Operation: PutOperation, - Entry: &Entry{ - Key: entry.Key, - Value: entry.Value, - }, - } - } - - err = t.PutInternal(ctx, txn.Entry) - if err != nil { - retErr = multierror.Append(retErr, err) - dirty = true - break TxnWalk - } - rollbackStack = append([]*TxnEntry{rollbackEntry}, rollbackStack...) - } - } - - // Need to roll back because we hit an error along the way - if dirty { - // While traversing this, if we get an error, we continue anyways in - // best-effort fashion - for _, txn := range rollbackStack { - switch txn.Operation { - case DeleteOperation: - err := t.DeleteInternal(ctx, txn.Entry.Key) - if err != nil { - retErr = multierror.Append(retErr, err) - } - case PutOperation: - err := t.PutInternal(ctx, txn.Entry) - if err != nil { - retErr = multierror.Append(retErr, err) - } - } - } - } - - return -} diff --git a/vendor/github.com/hashicorp/vault/sdk/version/cgo.go b/vendor/github.com/hashicorp/vault/sdk/version/cgo.go deleted file mode 100644 index 5bc93e5b..00000000 --- a/vendor/github.com/hashicorp/vault/sdk/version/cgo.go +++ /dev/null @@ -1,7 +0,0 @@ -//go:build cgo - -package version - -func init() { - CgoEnabled = true -} diff --git a/vendor/github.com/hashicorp/vault/sdk/version/version.go b/vendor/github.com/hashicorp/vault/sdk/version/version.go deleted file mode 100644 index 78b8eb82..00000000 --- a/vendor/github.com/hashicorp/vault/sdk/version/version.go +++ /dev/null @@ -1,80 +0,0 @@ -package version - -import ( - "bytes" - "fmt" -) - -// VersionInfo -type VersionInfo struct { - Revision string `json:"revision,omitempty"` - Version string `json:"version,omitempty"` - VersionPrerelease string `json:"version_prerelease,omitempty"` - VersionMetadata string `json:"version_metadata,omitempty"` - BuildDate string `json:"build_date,omitempty"` -} - -func GetVersion() *VersionInfo { - ver := Version - rel := VersionPrerelease - md := VersionMetadata - if GitDescribe != "" { - ver = GitDescribe - } - if GitDescribe == "" && rel == "" && VersionPrerelease != "" { - rel = "dev" - } - - return &VersionInfo{ - Revision: GitCommit, - Version: ver, - VersionPrerelease: rel, - VersionMetadata: md, - BuildDate: BuildDate, - } -} - -func (c *VersionInfo) VersionNumber() string { - if Version == "unknown" && VersionPrerelease == "unknown" { - return "(version unknown)" - } - - version := c.Version - - if c.VersionPrerelease != "" { - version = fmt.Sprintf("%s-%s", version, c.VersionPrerelease) - } - - if c.VersionMetadata != "" { - version = fmt.Sprintf("%s+%s", version, c.VersionMetadata) - } - - return version -} - -func (c *VersionInfo) FullVersionNumber(rev bool) string { - var versionString bytes.Buffer - - if Version == "unknown" && VersionPrerelease == "unknown" { - return "Vault (version unknown)" - } - - fmt.Fprintf(&versionString, "Vault v%s", c.Version) - if c.VersionPrerelease != "" { - fmt.Fprintf(&versionString, "-%s", c.VersionPrerelease) - } - - if c.VersionMetadata != "" { - fmt.Fprintf(&versionString, "+%s", c.VersionMetadata) - } - - if rev && c.Revision != "" { - fmt.Fprintf(&versionString, " (%s)", c.Revision) - } - - if c.BuildDate != "" { - fmt.Fprintf(&versionString, ", built %s", c.BuildDate) - } - - return versionString.String() -} diff --git a/vendor/github.com/hashicorp/vault/sdk/version/version_base.go b/vendor/github.com/hashicorp/vault/sdk/version/version_base.go deleted file mode 100644 index e45626e2..00000000 --- a/vendor/github.com/hashicorp/vault/sdk/version/version_base.go +++ /dev/null @@ -1,17 +0,0 @@ -package version - -var ( - // The git commit that was compiled. This will be filled in by the compiler. - GitCommit string - GitDescribe string - - // The compilation date. This will be filled in by the compiler. - BuildDate string - - // Whether cgo is enabled or not; set at build time - CgoEnabled bool - - Version = "1.13.0" - VersionPrerelease = "dev1" - VersionMetadata = "" -) diff --git a/vendor/github.com/hashicorp/yamux/session.go b/vendor/github.com/hashicorp/yamux/session.go index 046a3d30..38fe3ed1 100644 --- a/vendor/github.com/hashicorp/yamux/session.go +++ b/vendor/github.com/hashicorp/yamux/session.go @@ -2,6 +2,7 @@ package yamux import ( "bufio" + "bytes" "fmt" "io" "io/ioutil" @@ -63,23 +64,26 @@ type Session struct { // sendCh is used to mark a stream as ready to send, // or to send a header out directly. - sendCh chan sendReady + sendCh chan *sendReady // recvDoneCh is closed when recv() exits to avoid a race // between stream registration and stream shutdown recvDoneCh chan struct{} + sendDoneCh chan struct{} // shutdown is used to safely close a session - shutdown bool - shutdownErr error - shutdownCh chan struct{} - shutdownLock sync.Mutex + shutdown bool + shutdownErr error + shutdownCh chan struct{} + shutdownLock sync.Mutex + shutdownErrLock sync.Mutex } // sendReady is used to either mark a stream as ready // or to directly send a header type sendReady struct { Hdr []byte + mu sync.Mutex // Protects Body from unsafe reads. Body []byte Err chan error } @@ -101,8 +105,9 @@ func newSession(config *Config, conn io.ReadWriteCloser, client bool) *Session { inflight: make(map[uint32]struct{}), synCh: make(chan struct{}, config.AcceptBacklog), acceptCh: make(chan *Stream, config.AcceptBacklog), - sendCh: make(chan sendReady, 64), + sendCh: make(chan *sendReady, 64), recvDoneCh: make(chan struct{}), + sendDoneCh: make(chan struct{}), shutdownCh: make(chan struct{}), } if client { @@ -255,10 +260,15 @@ func (s *Session) Close() error { return nil } s.shutdown = true + + s.shutdownErrLock.Lock() if s.shutdownErr == nil { s.shutdownErr = ErrSessionShutdown } + s.shutdownErrLock.Unlock() + close(s.shutdownCh) + s.conn.Close() <-s.recvDoneCh @@ -267,17 +277,18 @@ func (s *Session) Close() error { for _, stream := range s.streams { stream.forceClose() } + <-s.sendDoneCh return nil } // exitErr is used to handle an error that is causing the // session to terminate. func (s *Session) exitErr(err error) { - s.shutdownLock.Lock() + s.shutdownErrLock.Lock() if s.shutdownErr == nil { s.shutdownErr = err } - s.shutdownLock.Unlock() + s.shutdownErrLock.Unlock() s.Close() } @@ -373,7 +384,7 @@ func (s *Session) waitForSendErr(hdr header, body []byte, errCh chan error) erro timerPool.Put(t) }() - ready := sendReady{Hdr: hdr, Body: body, Err: errCh} + ready := &sendReady{Hdr: hdr, Body: body, Err: errCh} select { case s.sendCh <- ready: case <-s.shutdownCh: @@ -382,12 +393,34 @@ func (s *Session) waitForSendErr(hdr header, body []byte, errCh chan error) erro return ErrConnectionWriteTimeout } + bodyCopy := func() { + if body == nil { + return // A nil body is ignored. + } + + // In the event of session shutdown or connection write timeout, + // we need to prevent `send` from reading the body buffer after + // returning from this function since the caller may re-use the + // underlying array. + ready.mu.Lock() + defer ready.mu.Unlock() + + if ready.Body == nil { + return // Body was already copied in `send`. + } + newBody := make([]byte, len(body)) + copy(newBody, body) + ready.Body = newBody + } + select { case err := <-errCh: return err case <-s.shutdownCh: + bodyCopy() return ErrSessionShutdown case <-timer.C: + bodyCopy() return ErrConnectionWriteTimeout } } @@ -409,7 +442,7 @@ func (s *Session) sendNoWait(hdr header) error { }() select { - case s.sendCh <- sendReady{Hdr: hdr}: + case s.sendCh <- &sendReady{Hdr: hdr}: return nil case <-s.shutdownCh: return ErrSessionShutdown @@ -420,39 +453,59 @@ func (s *Session) sendNoWait(hdr header) error { // send is a long running goroutine that sends data func (s *Session) send() { + if err := s.sendLoop(); err != nil { + s.exitErr(err) + } +} + +func (s *Session) sendLoop() error { + defer close(s.sendDoneCh) + var bodyBuf bytes.Buffer for { + bodyBuf.Reset() + select { case ready := <-s.sendCh: // Send a header if ready if ready.Hdr != nil { - sent := 0 - for sent < len(ready.Hdr) { - n, err := s.conn.Write(ready.Hdr[sent:]) - if err != nil { - s.logger.Printf("[ERR] yamux: Failed to write header: %v", err) - asyncSendErr(ready.Err, err) - s.exitErr(err) - return - } - sent += n + _, err := s.conn.Write(ready.Hdr) + if err != nil { + s.logger.Printf("[ERR] yamux: Failed to write header: %v", err) + asyncSendErr(ready.Err, err) + return err } } - // Send data from a body if given + ready.mu.Lock() if ready.Body != nil { - _, err := s.conn.Write(ready.Body) + // Copy the body into the buffer to avoid + // holding a mutex lock during the write. + _, err := bodyBuf.Write(ready.Body) + if err != nil { + ready.Body = nil + ready.mu.Unlock() + s.logger.Printf("[ERR] yamux: Failed to copy body into buffer: %v", err) + asyncSendErr(ready.Err, err) + return err + } + ready.Body = nil + } + ready.mu.Unlock() + + if bodyBuf.Len() > 0 { + // Send data from a body if given + _, err := s.conn.Write(bodyBuf.Bytes()) if err != nil { s.logger.Printf("[ERR] yamux: Failed to write body: %v", err) asyncSendErr(ready.Err, err) - s.exitErr(err) - return + return err } } // No error, successful send asyncSendErr(ready.Err, nil) case <-s.shutdownCh: - return + return nil } } } @@ -639,8 +692,9 @@ func (s *Session) incomingStream(id uint32) error { // Backlog exceeded! RST the stream s.logger.Printf("[WARN] yamux: backlog exceeded, forcing connection reset") delete(s.streams, id) - stream.sendHdr.encode(typeWindowUpdate, flagRST, id, 0) - return s.sendNoWait(stream.sendHdr) + hdr := header(make([]byte, headerSize)) + hdr.encode(typeWindowUpdate, flagRST, id, 0) + return s.sendNoWait(hdr) } } diff --git a/vendor/github.com/hashicorp/yamux/stream.go b/vendor/github.com/hashicorp/yamux/stream.go index f444bdc3..23d08fcc 100644 --- a/vendor/github.com/hashicorp/yamux/stream.go +++ b/vendor/github.com/hashicorp/yamux/stream.go @@ -2,6 +2,7 @@ package yamux import ( "bytes" + "errors" "io" "sync" "sync/atomic" @@ -127,6 +128,9 @@ START: // Send a window update potentially err = s.sendWindowUpdate() + if err == ErrSessionShutdown { + err = nil + } return n, err WAIT: @@ -200,6 +204,10 @@ START: // Send the header s.sendHdr.encode(typeData, flags, s.id, max) if err = s.session.waitForSendErr(s.sendHdr, body, s.sendErr); err != nil { + if errors.Is(err, ErrSessionShutdown) || errors.Is(err, ErrConnectionWriteTimeout) { + // Message left in ready queue, header re-use is unsafe. + s.sendHdr = header(make([]byte, headerSize)) + } return 0, err } @@ -273,6 +281,10 @@ func (s *Stream) sendWindowUpdate() error { // Send the header s.controlHdr.encode(typeWindowUpdate, flags, s.id, delta) if err := s.session.waitForSendErr(s.controlHdr, nil, s.controlErr); err != nil { + if errors.Is(err, ErrSessionShutdown) || errors.Is(err, ErrConnectionWriteTimeout) { + // Message left in ready queue, header re-use is unsafe. + s.controlHdr = header(make([]byte, headerSize)) + } return err } return nil @@ -287,6 +299,10 @@ func (s *Stream) sendClose() error { flags |= flagFIN s.controlHdr.encode(typeWindowUpdate, flags, s.id, 0) if err := s.session.waitForSendErr(s.controlHdr, nil, s.controlErr); err != nil { + if errors.Is(err, ErrSessionShutdown) || errors.Is(err, ErrConnectionWriteTimeout) { + // Message left in ready queue, header re-use is unsafe. + s.controlHdr = header(make([]byte, headerSize)) + } return err } return nil @@ -362,8 +378,9 @@ func (s *Stream) closeTimeout() { // Send a RST so the remote side closes too. s.sendLock.Lock() defer s.sendLock.Unlock() - s.sendHdr.encode(typeWindowUpdate, flagRST, s.id, 0) - s.session.sendNoWait(s.sendHdr) + hdr := header(make([]byte, headerSize)) + hdr.encode(typeWindowUpdate, flagRST, s.id, 0) + s.session.sendNoWait(hdr) } // forceClose is used for when the session is exiting @@ -465,6 +482,7 @@ func (s *Stream) readData(hdr header, flags uint16, conn io.Reader) error { if length > s.recvWindow { s.session.logger.Printf("[ERR] yamux: receive window exceeded (stream: %d, remain: %d, recv: %d)", s.id, s.recvWindow, length) + s.recvLock.Unlock() return ErrRecvWindowExceeded } @@ -473,14 +491,15 @@ func (s *Stream) readData(hdr header, flags uint16, conn io.Reader) error { // This way we can read in the whole packet without further allocations. s.recvBuf = bytes.NewBuffer(make([]byte, 0, length)) } - if _, err := io.Copy(s.recvBuf, conn); err != nil { + copiedLength, err := io.Copy(s.recvBuf, conn) + if err != nil { s.session.logger.Printf("[ERR] yamux: Failed to read stream data: %v", err) s.recvLock.Unlock() return err } // Decrement the receive window - s.recvWindow -= length + s.recvWindow -= uint32(copiedLength) s.recvLock.Unlock() // Unblock any readers diff --git a/vendor/github.com/ishidawataru/sctp/.gitignore b/vendor/github.com/ishidawataru/sctp/.gitignore deleted file mode 100644 index cf2d826c..00000000 --- a/vendor/github.com/ishidawataru/sctp/.gitignore +++ /dev/null @@ -1,16 +0,0 @@ -# Binaries for programs and plugins -*.exe -*.dll -*.so -*.dylib - -# Test binary, build with `go test -c` -*.test - -# Output of the go coverage tool, specifically when used with LiteIDE -*.out - -# Project-local glide cache, RE: https://github.com/Masterminds/glide/issues/736 -.glide/ - -example/example diff --git a/vendor/github.com/ishidawataru/sctp/.travis.yml b/vendor/github.com/ishidawataru/sctp/.travis.yml deleted file mode 100644 index 01a76be9..00000000 --- a/vendor/github.com/ishidawataru/sctp/.travis.yml +++ /dev/null @@ -1,18 +0,0 @@ -language: go -go: - - 1.9.x - - 1.10.x - - 1.11.x - - 1.12.x - - 1.13.x - -script: - - go test -v -race ./... - - GOOS=linux GOARCH=amd64 go build . - - GOOS=linux GOARCH=arm go build . - - GOOS=linux GOARCH=arm64 go build . - - GOOS=linux GOARCH=ppc64le go build . - - (go version | grep go1.6 > /dev/null) || GOOS=linux GOARCH=s390x go build . -# can be compiled but not functional: - - GOOS=linux GOARCH=386 go build . - - GOOS=windows GOARCH=amd64 go build . diff --git a/vendor/github.com/ishidawataru/sctp/GO_LICENSE b/vendor/github.com/ishidawataru/sctp/GO_LICENSE deleted file mode 100644 index 6a66aea5..00000000 --- a/vendor/github.com/ishidawataru/sctp/GO_LICENSE +++ /dev/null @@ -1,27 +0,0 @@ -Copyright (c) 2009 The Go Authors. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/ishidawataru/sctp/NOTICE b/vendor/github.com/ishidawataru/sctp/NOTICE deleted file mode 100644 index cfb675fd..00000000 --- a/vendor/github.com/ishidawataru/sctp/NOTICE +++ /dev/null @@ -1,3 +0,0 @@ -This source code includes following third party code - -- ipsock_linux.go : licensed by the Go authors, see GO_LICENSE file for the license which applies to the code diff --git a/vendor/github.com/ishidawataru/sctp/README.md b/vendor/github.com/ishidawataru/sctp/README.md deleted file mode 100644 index 574ececa..00000000 --- a/vendor/github.com/ishidawataru/sctp/README.md +++ /dev/null @@ -1,18 +0,0 @@ -Stream Control Transmission Protocol (SCTP) ----- - -[![Build Status](https://travis-ci.org/ishidawataru/sctp.svg?branch=master)](https://travis-ci.org/ishidawataru/sctp/builds) - -Examples ----- - -See `example/sctp.go` - -```go -$ cd example -$ go build -$ # run example SCTP server -$ ./example -server -port 1000 -ip 10.10.0.1,10.20.0.1 -$ # run example SCTP client -$ ./example -port 1000 -ip 10.10.0.1,10.20.0.1 -``` diff --git a/vendor/github.com/ishidawataru/sctp/ipsock_linux.go b/vendor/github.com/ishidawataru/sctp/ipsock_linux.go deleted file mode 100644 index 3df30fa4..00000000 --- a/vendor/github.com/ishidawataru/sctp/ipsock_linux.go +++ /dev/null @@ -1,222 +0,0 @@ -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the GO_LICENSE file. - -package sctp - -import ( - "net" - "os" - "sync" - "syscall" -) - -//from https://github.com/golang/go -// Boolean to int. -func boolint(b bool) int { - if b { - return 1 - } - return 0 -} - -//from https://github.com/golang/go -func ipToSockaddr(family int, ip net.IP, port int, zone string) (syscall.Sockaddr, error) { - switch family { - case syscall.AF_INET: - if len(ip) == 0 { - ip = net.IPv4zero - } - ip4 := ip.To4() - if ip4 == nil { - return nil, &net.AddrError{Err: "non-IPv4 address", Addr: ip.String()} - } - sa := &syscall.SockaddrInet4{Port: port} - copy(sa.Addr[:], ip4) - return sa, nil - case syscall.AF_INET6: - // In general, an IP wildcard address, which is either - // "0.0.0.0" or "::", means the entire IP addressing - // space. For some historical reason, it is used to - // specify "any available address" on some operations - // of IP node. - // - // When the IP node supports IPv4-mapped IPv6 address, - // we allow an listener to listen to the wildcard - // address of both IP addressing spaces by specifying - // IPv6 wildcard address. - if len(ip) == 0 || ip.Equal(net.IPv4zero) { - ip = net.IPv6zero - } - // We accept any IPv6 address including IPv4-mapped - // IPv6 address. - ip6 := ip.To16() - if ip6 == nil { - return nil, &net.AddrError{Err: "non-IPv6 address", Addr: ip.String()} - } - //we set ZoneId to 0, as currently we use this functon only to probe the IP capabilities of the host - //if real Zone handling is required, the zone cache implementation in golang/net should be pulled here - sa := &syscall.SockaddrInet6{Port: port, ZoneId: 0} - copy(sa.Addr[:], ip6) - return sa, nil - } - return nil, &net.AddrError{Err: "invalid address family", Addr: ip.String()} -} - -//from https://github.com/golang/go -func sockaddr(a *net.TCPAddr, family int) (syscall.Sockaddr, error) { - if a == nil { - return nil, nil - } - return ipToSockaddr(family, a.IP, a.Port, a.Zone) -} - -//from https://github.com/golang/go -type ipStackCapabilities struct { - sync.Once // guards following - ipv4Enabled bool - ipv6Enabled bool - ipv4MappedIPv6Enabled bool -} - -//from https://github.com/golang/go -var ipStackCaps ipStackCapabilities - -//from https://github.com/golang/go -// supportsIPv4 reports whether the platform supports IPv4 networking -// functionality. -func supportsIPv4() bool { - ipStackCaps.Once.Do(ipStackCaps.probe) - return ipStackCaps.ipv4Enabled -} - -//from https://github.com/golang/go -// supportsIPv6 reports whether the platform supports IPv6 networking -// functionality. -func supportsIPv6() bool { - ipStackCaps.Once.Do(ipStackCaps.probe) - return ipStackCaps.ipv6Enabled -} - -//from https://github.com/golang/go -// supportsIPv4map reports whether the platform supports mapping an -// IPv4 address inside an IPv6 address at transport layer -// protocols. See RFC 4291, RFC 4038 and RFC 3493. -func supportsIPv4map() bool { - ipStackCaps.Once.Do(ipStackCaps.probe) - return ipStackCaps.ipv4MappedIPv6Enabled -} - -//from https://github.com/golang/go -// Probe probes IPv4, IPv6 and IPv4-mapped IPv6 communication -// capabilities which are controlled by the IPV6_V6ONLY socket option -// and kernel configuration. -// -// Should we try to use the IPv4 socket interface if we're only -// dealing with IPv4 sockets? As long as the host system understands -// IPv4-mapped IPv6, it's okay to pass IPv4-mapeed IPv6 addresses to -// the IPv6 interface. That simplifies our code and is most -// general. Unfortunately, we need to run on kernels built without -// IPv6 support too. So probe the kernel to figure it out. -func (p *ipStackCapabilities) probe() { - s, err := syscall.Socket(syscall.AF_INET, syscall.SOCK_STREAM, syscall.IPPROTO_TCP) - switch err { - case syscall.EAFNOSUPPORT, syscall.EPROTONOSUPPORT: - case nil: - syscall.Close(s) - p.ipv4Enabled = true - } - var probes = []struct { - laddr net.TCPAddr - value int - }{ - // IPv6 communication capability - {laddr: net.TCPAddr{IP: net.IPv6loopback}, value: 1}, - // IPv4-mapped IPv6 address communication capability - {laddr: net.TCPAddr{IP: net.IPv4(127, 0, 0, 1)}, value: 0}, - } - - for i := range probes { - s, err := syscall.Socket(syscall.AF_INET6, syscall.SOCK_STREAM, syscall.IPPROTO_TCP) - if err != nil { - continue - } - defer syscall.Close(s) - syscall.SetsockoptInt(s, syscall.IPPROTO_IPV6, syscall.IPV6_V6ONLY, probes[i].value) - sa, err := sockaddr(&(probes[i].laddr), syscall.AF_INET6) - if err != nil { - continue - } - if err := syscall.Bind(s, sa); err != nil { - continue - } - if i == 0 { - p.ipv6Enabled = true - } else { - p.ipv4MappedIPv6Enabled = true - } - } -} - -//from https://github.com/golang/go -//Change: we check the first IP address in the list of candidate SCTP IP addresses -func (a *SCTPAddr) isWildcard() bool { - if a == nil { - return true - } - if 0 == len(a.IPAddrs) { - return true - } - - return a.IPAddrs[0].IP.IsUnspecified() -} - -func (a *SCTPAddr) family() int { - if a != nil { - for _, ip := range a.IPAddrs { - if ip.IP.To4() == nil { - return syscall.AF_INET6 - } - } - } - return syscall.AF_INET -} - -//from https://github.com/golang/go -func favoriteAddrFamily(network string, laddr *SCTPAddr, raddr *SCTPAddr, mode string) (family int, ipv6only bool) { - switch network[len(network)-1] { - case '4': - return syscall.AF_INET, false - case '6': - return syscall.AF_INET6, true - } - - if mode == "listen" && (laddr == nil || laddr.isWildcard()) { - if supportsIPv4map() || !supportsIPv4() { - return syscall.AF_INET6, false - } - if laddr == nil { - return syscall.AF_INET, false - } - return laddr.family(), false - } - - if (laddr == nil || laddr.family() == syscall.AF_INET) && - (raddr == nil || raddr.family() == syscall.AF_INET) { - return syscall.AF_INET, false - } - return syscall.AF_INET6, false -} - -//from https://github.com/golang/go -//Changes: it is for SCTP only -func setDefaultSockopts(s int, family int, ipv6only bool) error { - if family == syscall.AF_INET6 { - // Allow both IP versions even if the OS default - // is otherwise. Note that some operating systems - // never admit this option. - syscall.SetsockoptInt(s, syscall.IPPROTO_IPV6, syscall.IPV6_V6ONLY, boolint(ipv6only)) - } - // Allow broadcast. - return os.NewSyscallError("setsockopt", syscall.SetsockoptInt(s, syscall.SOL_SOCKET, syscall.SO_BROADCAST, 1)) -} diff --git a/vendor/github.com/ishidawataru/sctp/sctp.go b/vendor/github.com/ishidawataru/sctp/sctp.go deleted file mode 100644 index 94842f42..00000000 --- a/vendor/github.com/ishidawataru/sctp/sctp.go +++ /dev/null @@ -1,729 +0,0 @@ -// Copyright 2019 Wataru Ishida. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -// implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package sctp - -import ( - "bytes" - "encoding/binary" - "fmt" - "net" - "strconv" - "strings" - "sync" - "sync/atomic" - "syscall" - "time" - "unsafe" -) - -const ( - SOL_SCTP = 132 - - SCTP_BINDX_ADD_ADDR = 0x01 - SCTP_BINDX_REM_ADDR = 0x02 - - MSG_NOTIFICATION = 0x8000 -) - -const ( - SCTP_RTOINFO = iota - SCTP_ASSOCINFO - SCTP_INITMSG - SCTP_NODELAY - SCTP_AUTOCLOSE - SCTP_SET_PEER_PRIMARY_ADDR - SCTP_PRIMARY_ADDR - SCTP_ADAPTATION_LAYER - SCTP_DISABLE_FRAGMENTS - SCTP_PEER_ADDR_PARAMS - SCTP_DEFAULT_SENT_PARAM - SCTP_EVENTS - SCTP_I_WANT_MAPPED_V4_ADDR - SCTP_MAXSEG - SCTP_STATUS - SCTP_GET_PEER_ADDR_INFO - SCTP_DELAYED_ACK_TIME - SCTP_DELAYED_ACK = SCTP_DELAYED_ACK_TIME - SCTP_DELAYED_SACK = SCTP_DELAYED_ACK_TIME - - SCTP_SOCKOPT_BINDX_ADD = 100 - SCTP_SOCKOPT_BINDX_REM = 101 - SCTP_SOCKOPT_PEELOFF = 102 - SCTP_GET_PEER_ADDRS = 108 - SCTP_GET_LOCAL_ADDRS = 109 - SCTP_SOCKOPT_CONNECTX = 110 - SCTP_SOCKOPT_CONNECTX3 = 111 -) - -const ( - SCTP_EVENT_DATA_IO = 1 << iota - SCTP_EVENT_ASSOCIATION - SCTP_EVENT_ADDRESS - SCTP_EVENT_SEND_FAILURE - SCTP_EVENT_PEER_ERROR - SCTP_EVENT_SHUTDOWN - SCTP_EVENT_PARTIAL_DELIVERY - SCTP_EVENT_ADAPTATION_LAYER - SCTP_EVENT_AUTHENTICATION - SCTP_EVENT_SENDER_DRY - - SCTP_EVENT_ALL = SCTP_EVENT_DATA_IO | SCTP_EVENT_ASSOCIATION | SCTP_EVENT_ADDRESS | SCTP_EVENT_SEND_FAILURE | SCTP_EVENT_PEER_ERROR | SCTP_EVENT_SHUTDOWN | SCTP_EVENT_PARTIAL_DELIVERY | SCTP_EVENT_ADAPTATION_LAYER | SCTP_EVENT_AUTHENTICATION | SCTP_EVENT_SENDER_DRY -) - -type SCTPNotificationType int - -const ( - SCTP_SN_TYPE_BASE = SCTPNotificationType(iota + (1 << 15)) - SCTP_ASSOC_CHANGE - SCTP_PEER_ADDR_CHANGE - SCTP_SEND_FAILED - SCTP_REMOTE_ERROR - SCTP_SHUTDOWN_EVENT - SCTP_PARTIAL_DELIVERY_EVENT - SCTP_ADAPTATION_INDICATION - SCTP_AUTHENTICATION_INDICATION - SCTP_SENDER_DRY_EVENT -) - -type NotificationHandler func([]byte) error - -type EventSubscribe struct { - DataIO uint8 - Association uint8 - Address uint8 - SendFailure uint8 - PeerError uint8 - Shutdown uint8 - PartialDelivery uint8 - AdaptationLayer uint8 - Authentication uint8 - SenderDry uint8 -} - -const ( - SCTP_CMSG_INIT = iota - SCTP_CMSG_SNDRCV - SCTP_CMSG_SNDINFO - SCTP_CMSG_RCVINFO - SCTP_CMSG_NXTINFO -) - -const ( - SCTP_UNORDERED = 1 << iota - SCTP_ADDR_OVER - SCTP_ABORT - SCTP_SACK_IMMEDIATELY - SCTP_EOF -) - -const ( - SCTP_MAX_STREAM = 0xffff -) - -type InitMsg struct { - NumOstreams uint16 - MaxInstreams uint16 - MaxAttempts uint16 - MaxInitTimeout uint16 -} - -type SndRcvInfo struct { - Stream uint16 - SSN uint16 - Flags uint16 - _ uint16 - PPID uint32 - Context uint32 - TTL uint32 - TSN uint32 - CumTSN uint32 - AssocID int32 -} - -type SndInfo struct { - SID uint16 - Flags uint16 - PPID uint32 - Context uint32 - AssocID int32 -} - -type GetAddrsOld struct { - AssocID int32 - AddrNum int32 - Addrs uintptr -} - -type NotificationHeader struct { - Type uint16 - Flags uint16 - Length uint32 -} - -type SCTPState uint16 - -const ( - SCTP_COMM_UP = SCTPState(iota) - SCTP_COMM_LOST - SCTP_RESTART - SCTP_SHUTDOWN_COMP - SCTP_CANT_STR_ASSOC -) - -var nativeEndian binary.ByteOrder -var sndRcvInfoSize uintptr - -func init() { - i := uint16(1) - if *(*byte)(unsafe.Pointer(&i)) == 0 { - nativeEndian = binary.BigEndian - } else { - nativeEndian = binary.LittleEndian - } - info := SndRcvInfo{} - sndRcvInfoSize = unsafe.Sizeof(info) -} - -func toBuf(v interface{}) []byte { - var buf bytes.Buffer - binary.Write(&buf, nativeEndian, v) - return buf.Bytes() -} - -func htons(h uint16) uint16 { - if nativeEndian == binary.LittleEndian { - return (h << 8 & 0xff00) | (h >> 8 & 0xff) - } - return h -} - -var ntohs = htons - -// setInitOpts sets options for an SCTP association initialization -// see https://tools.ietf.org/html/rfc4960#page-25 -func setInitOpts(fd int, options InitMsg) error { - optlen := unsafe.Sizeof(options) - _, _, err := setsockopt(fd, SCTP_INITMSG, uintptr(unsafe.Pointer(&options)), uintptr(optlen)) - return err -} - -func setNumOstreams(fd, num int) error { - return setInitOpts(fd, InitMsg{NumOstreams: uint16(num)}) -} - -type SCTPAddr struct { - IPAddrs []net.IPAddr - Port int -} - -func (a *SCTPAddr) ToRawSockAddrBuf() []byte { - p := htons(uint16(a.Port)) - if len(a.IPAddrs) == 0 { // if a.IPAddrs list is empty - fall back to IPv4 zero addr - s := syscall.RawSockaddrInet4{ - Family: syscall.AF_INET, - Port: p, - } - copy(s.Addr[:], net.IPv4zero) - return toBuf(s) - } - buf := []byte{} - for _, ip := range a.IPAddrs { - ipBytes := ip.IP - if len(ipBytes) == 0 { - ipBytes = net.IPv4zero - } - if ip4 := ipBytes.To4(); ip4 != nil { - s := syscall.RawSockaddrInet4{ - Family: syscall.AF_INET, - Port: p, - } - copy(s.Addr[:], ip4) - buf = append(buf, toBuf(s)...) - } else { - var scopeid uint32 - ifi, err := net.InterfaceByName(ip.Zone) - if err == nil { - scopeid = uint32(ifi.Index) - } - s := syscall.RawSockaddrInet6{ - Family: syscall.AF_INET6, - Port: p, - Scope_id: scopeid, - } - copy(s.Addr[:], ipBytes) - buf = append(buf, toBuf(s)...) - } - } - return buf -} - -func (a *SCTPAddr) String() string { - var b bytes.Buffer - - for n, i := range a.IPAddrs { - if i.IP.To4() != nil { - b.WriteString(i.String()) - } else if i.IP.To16() != nil { - b.WriteRune('[') - b.WriteString(i.String()) - b.WriteRune(']') - } - if n < len(a.IPAddrs)-1 { - b.WriteRune('/') - } - } - b.WriteRune(':') - b.WriteString(strconv.Itoa(a.Port)) - return b.String() -} - -func (a *SCTPAddr) Network() string { return "sctp" } - -func ResolveSCTPAddr(network, addrs string) (*SCTPAddr, error) { - tcpnet := "" - switch network { - case "", "sctp": - tcpnet = "tcp" - case "sctp4": - tcpnet = "tcp4" - case "sctp6": - tcpnet = "tcp6" - default: - return nil, fmt.Errorf("invalid net: %s", network) - } - elems := strings.Split(addrs, "/") - if len(elems) == 0 { - return nil, fmt.Errorf("invalid input: %s", addrs) - } - ipaddrs := make([]net.IPAddr, 0, len(elems)) - for _, e := range elems[:len(elems)-1] { - tcpa, err := net.ResolveTCPAddr(tcpnet, e+":") - if err != nil { - return nil, err - } - ipaddrs = append(ipaddrs, net.IPAddr{IP: tcpa.IP, Zone: tcpa.Zone}) - } - tcpa, err := net.ResolveTCPAddr(tcpnet, elems[len(elems)-1]) - if err != nil { - return nil, err - } - if tcpa.IP != nil { - ipaddrs = append(ipaddrs, net.IPAddr{IP: tcpa.IP, Zone: tcpa.Zone}) - } else { - ipaddrs = nil - } - return &SCTPAddr{ - IPAddrs: ipaddrs, - Port: tcpa.Port, - }, nil -} - -func SCTPConnect(fd int, addr *SCTPAddr) (int, error) { - buf := addr.ToRawSockAddrBuf() - param := GetAddrsOld{ - AddrNum: int32(len(buf)), - Addrs: uintptr(uintptr(unsafe.Pointer(&buf[0]))), - } - optlen := unsafe.Sizeof(param) - _, _, err := getsockopt(fd, SCTP_SOCKOPT_CONNECTX3, uintptr(unsafe.Pointer(¶m)), uintptr(unsafe.Pointer(&optlen))) - if err == nil { - return int(param.AssocID), nil - } else if err != syscall.ENOPROTOOPT { - return 0, err - } - r0, _, err := setsockopt(fd, SCTP_SOCKOPT_CONNECTX, uintptr(unsafe.Pointer(&buf[0])), uintptr(len(buf))) - return int(r0), err -} - -func SCTPBind(fd int, addr *SCTPAddr, flags int) error { - var option uintptr - switch flags { - case SCTP_BINDX_ADD_ADDR: - option = SCTP_SOCKOPT_BINDX_ADD - case SCTP_BINDX_REM_ADDR: - option = SCTP_SOCKOPT_BINDX_REM - default: - return syscall.EINVAL - } - - buf := addr.ToRawSockAddrBuf() - _, _, err := setsockopt(fd, option, uintptr(unsafe.Pointer(&buf[0])), uintptr(len(buf))) - return err -} - -type SCTPConn struct { - _fd int32 - notificationHandler NotificationHandler -} - -func (c *SCTPConn) fd() int { - return int(atomic.LoadInt32(&c._fd)) -} - -func NewSCTPConn(fd int, handler NotificationHandler) *SCTPConn { - conn := &SCTPConn{ - _fd: int32(fd), - notificationHandler: handler, - } - return conn -} - -func (c *SCTPConn) Write(b []byte) (int, error) { - return c.SCTPWrite(b, nil) -} - -func (c *SCTPConn) Read(b []byte) (int, error) { - n, _, err := c.SCTPRead(b) - if n < 0 { - n = 0 - } - return n, err -} - -func (c *SCTPConn) SetInitMsg(numOstreams, maxInstreams, maxAttempts, maxInitTimeout int) error { - return setInitOpts(c.fd(), InitMsg{ - NumOstreams: uint16(numOstreams), - MaxInstreams: uint16(maxInstreams), - MaxAttempts: uint16(maxAttempts), - MaxInitTimeout: uint16(maxInitTimeout), - }) -} - -func (c *SCTPConn) SubscribeEvents(flags int) error { - var d, a, ad, sf, p, sh, pa, ada, au, se uint8 - if flags&SCTP_EVENT_DATA_IO > 0 { - d = 1 - } - if flags&SCTP_EVENT_ASSOCIATION > 0 { - a = 1 - } - if flags&SCTP_EVENT_ADDRESS > 0 { - ad = 1 - } - if flags&SCTP_EVENT_SEND_FAILURE > 0 { - sf = 1 - } - if flags&SCTP_EVENT_PEER_ERROR > 0 { - p = 1 - } - if flags&SCTP_EVENT_SHUTDOWN > 0 { - sh = 1 - } - if flags&SCTP_EVENT_PARTIAL_DELIVERY > 0 { - pa = 1 - } - if flags&SCTP_EVENT_ADAPTATION_LAYER > 0 { - ada = 1 - } - if flags&SCTP_EVENT_AUTHENTICATION > 0 { - au = 1 - } - if flags&SCTP_EVENT_SENDER_DRY > 0 { - se = 1 - } - param := EventSubscribe{ - DataIO: d, - Association: a, - Address: ad, - SendFailure: sf, - PeerError: p, - Shutdown: sh, - PartialDelivery: pa, - AdaptationLayer: ada, - Authentication: au, - SenderDry: se, - } - optlen := unsafe.Sizeof(param) - _, _, err := setsockopt(c.fd(), SCTP_EVENTS, uintptr(unsafe.Pointer(¶m)), uintptr(optlen)) - return err -} - -func (c *SCTPConn) SubscribedEvents() (int, error) { - param := EventSubscribe{} - optlen := unsafe.Sizeof(param) - _, _, err := getsockopt(c.fd(), SCTP_EVENTS, uintptr(unsafe.Pointer(¶m)), uintptr(unsafe.Pointer(&optlen))) - if err != nil { - return 0, err - } - var flags int - if param.DataIO > 0 { - flags |= SCTP_EVENT_DATA_IO - } - if param.Association > 0 { - flags |= SCTP_EVENT_ASSOCIATION - } - if param.Address > 0 { - flags |= SCTP_EVENT_ADDRESS - } - if param.SendFailure > 0 { - flags |= SCTP_EVENT_SEND_FAILURE - } - if param.PeerError > 0 { - flags |= SCTP_EVENT_PEER_ERROR - } - if param.Shutdown > 0 { - flags |= SCTP_EVENT_SHUTDOWN - } - if param.PartialDelivery > 0 { - flags |= SCTP_EVENT_PARTIAL_DELIVERY - } - if param.AdaptationLayer > 0 { - flags |= SCTP_EVENT_ADAPTATION_LAYER - } - if param.Authentication > 0 { - flags |= SCTP_EVENT_AUTHENTICATION - } - if param.SenderDry > 0 { - flags |= SCTP_EVENT_SENDER_DRY - } - return flags, nil -} - -func (c *SCTPConn) SetDefaultSentParam(info *SndRcvInfo) error { - optlen := unsafe.Sizeof(*info) - _, _, err := setsockopt(c.fd(), SCTP_DEFAULT_SENT_PARAM, uintptr(unsafe.Pointer(info)), uintptr(optlen)) - return err -} - -func (c *SCTPConn) GetDefaultSentParam() (*SndRcvInfo, error) { - info := &SndRcvInfo{} - optlen := unsafe.Sizeof(*info) - _, _, err := getsockopt(c.fd(), SCTP_DEFAULT_SENT_PARAM, uintptr(unsafe.Pointer(info)), uintptr(unsafe.Pointer(&optlen))) - return info, err -} - -func resolveFromRawAddr(ptr unsafe.Pointer, n int) (*SCTPAddr, error) { - addr := &SCTPAddr{ - IPAddrs: make([]net.IPAddr, n), - } - - switch family := (*(*syscall.RawSockaddrAny)(ptr)).Addr.Family; family { - case syscall.AF_INET: - addr.Port = int(ntohs(uint16((*(*syscall.RawSockaddrInet4)(ptr)).Port))) - tmp := syscall.RawSockaddrInet4{} - size := unsafe.Sizeof(tmp) - for i := 0; i < n; i++ { - a := *(*syscall.RawSockaddrInet4)(unsafe.Pointer( - uintptr(ptr) + size*uintptr(i))) - addr.IPAddrs[i] = net.IPAddr{IP: a.Addr[:]} - } - case syscall.AF_INET6: - addr.Port = int(ntohs(uint16((*(*syscall.RawSockaddrInet4)(ptr)).Port))) - tmp := syscall.RawSockaddrInet6{} - size := unsafe.Sizeof(tmp) - for i := 0; i < n; i++ { - a := *(*syscall.RawSockaddrInet6)(unsafe.Pointer( - uintptr(ptr) + size*uintptr(i))) - var zone string - ifi, err := net.InterfaceByIndex(int(a.Scope_id)) - if err == nil { - zone = ifi.Name - } - addr.IPAddrs[i] = net.IPAddr{IP: a.Addr[:], Zone: zone} - } - default: - return nil, fmt.Errorf("unknown address family: %d", family) - } - return addr, nil -} - -func sctpGetAddrs(fd, id, optname int) (*SCTPAddr, error) { - - type getaddrs struct { - assocId int32 - addrNum uint32 - addrs [4096]byte - } - param := getaddrs{ - assocId: int32(id), - } - optlen := unsafe.Sizeof(param) - _, _, err := getsockopt(fd, uintptr(optname), uintptr(unsafe.Pointer(¶m)), uintptr(unsafe.Pointer(&optlen))) - if err != nil { - return nil, err - } - return resolveFromRawAddr(unsafe.Pointer(¶m.addrs), int(param.addrNum)) -} - -func (c *SCTPConn) SCTPGetPrimaryPeerAddr() (*SCTPAddr, error) { - - type sctpGetSetPrim struct { - assocId int32 - addrs [128]byte - } - param := sctpGetSetPrim{ - assocId: int32(0), - } - optlen := unsafe.Sizeof(param) - _, _, err := getsockopt(c.fd(), SCTP_PRIMARY_ADDR, uintptr(unsafe.Pointer(¶m)), uintptr(unsafe.Pointer(&optlen))) - if err != nil { - return nil, err - } - return resolveFromRawAddr(unsafe.Pointer(¶m.addrs), 1) -} - -func (c *SCTPConn) SCTPLocalAddr(id int) (*SCTPAddr, error) { - return sctpGetAddrs(c.fd(), id, SCTP_GET_LOCAL_ADDRS) -} - -func (c *SCTPConn) SCTPRemoteAddr(id int) (*SCTPAddr, error) { - return sctpGetAddrs(c.fd(), id, SCTP_GET_PEER_ADDRS) -} - -func (c *SCTPConn) LocalAddr() net.Addr { - addr, err := sctpGetAddrs(c.fd(), 0, SCTP_GET_LOCAL_ADDRS) - if err != nil { - return nil - } - return addr -} - -func (c *SCTPConn) RemoteAddr() net.Addr { - addr, err := sctpGetAddrs(c.fd(), 0, SCTP_GET_PEER_ADDRS) - if err != nil { - return nil - } - return addr -} - -func (c *SCTPConn) PeelOff(id int) (*SCTPConn, error) { - type peeloffArg struct { - assocId int32 - sd int - } - param := peeloffArg{ - assocId: int32(id), - } - optlen := unsafe.Sizeof(param) - _, _, err := getsockopt(c.fd(), SCTP_SOCKOPT_PEELOFF, uintptr(unsafe.Pointer(¶m)), uintptr(unsafe.Pointer(&optlen))) - if err != nil { - return nil, err - } - return &SCTPConn{_fd: int32(param.sd)}, nil -} - -func (c *SCTPConn) SetDeadline(t time.Time) error { - return syscall.EOPNOTSUPP -} - -func (c *SCTPConn) SetReadDeadline(t time.Time) error { - return syscall.EOPNOTSUPP -} - -func (c *SCTPConn) SetWriteDeadline(t time.Time) error { - return syscall.EOPNOTSUPP -} - -type SCTPListener struct { - fd int - m sync.Mutex -} - -func (ln *SCTPListener) Addr() net.Addr { - laddr, err := sctpGetAddrs(ln.fd, 0, SCTP_GET_LOCAL_ADDRS) - if err != nil { - return nil - } - return laddr -} - -type SCTPSndRcvInfoWrappedConn struct { - conn *SCTPConn -} - -func NewSCTPSndRcvInfoWrappedConn(conn *SCTPConn) *SCTPSndRcvInfoWrappedConn { - conn.SubscribeEvents(SCTP_EVENT_DATA_IO) - return &SCTPSndRcvInfoWrappedConn{conn} -} - -func (c *SCTPSndRcvInfoWrappedConn) Write(b []byte) (int, error) { - if len(b) < int(sndRcvInfoSize) { - return 0, syscall.EINVAL - } - info := (*SndRcvInfo)(unsafe.Pointer(&b[0])) - n, err := c.conn.SCTPWrite(b[sndRcvInfoSize:], info) - return n + int(sndRcvInfoSize), err -} - -func (c *SCTPSndRcvInfoWrappedConn) Read(b []byte) (int, error) { - if len(b) < int(sndRcvInfoSize) { - return 0, syscall.EINVAL - } - n, info, err := c.conn.SCTPRead(b[sndRcvInfoSize:]) - if err != nil { - return n, err - } - copy(b, toBuf(info)) - return n + int(sndRcvInfoSize), err -} - -func (c *SCTPSndRcvInfoWrappedConn) Close() error { - return c.conn.Close() -} - -func (c *SCTPSndRcvInfoWrappedConn) LocalAddr() net.Addr { - return c.conn.LocalAddr() -} - -func (c *SCTPSndRcvInfoWrappedConn) RemoteAddr() net.Addr { - return c.conn.RemoteAddr() -} - -func (c *SCTPSndRcvInfoWrappedConn) SetDeadline(t time.Time) error { - return c.conn.SetDeadline(t) -} - -func (c *SCTPSndRcvInfoWrappedConn) SetReadDeadline(t time.Time) error { - return c.conn.SetReadDeadline(t) -} - -func (c *SCTPSndRcvInfoWrappedConn) SetWriteDeadline(t time.Time) error { - return c.conn.SetWriteDeadline(t) -} - -func (c *SCTPSndRcvInfoWrappedConn) SetWriteBuffer(bytes int) error { - return c.conn.SetWriteBuffer(bytes) -} - -func (c *SCTPSndRcvInfoWrappedConn) GetWriteBuffer() (int, error) { - return c.conn.GetWriteBuffer() -} - -func (c *SCTPSndRcvInfoWrappedConn) SetReadBuffer(bytes int) error { - return c.conn.SetReadBuffer(bytes) -} - -func (c *SCTPSndRcvInfoWrappedConn) GetReadBuffer() (int, error) { - return c.conn.GetReadBuffer() -} - -// SocketConfig contains options for the SCTP socket. -type SocketConfig struct { - // If Control is not nil it is called after the socket is created but before - // it is bound or connected. - Control func(network, address string, c syscall.RawConn) error - - // InitMsg is the options to send in the initial SCTP message - InitMsg InitMsg -} - -func (cfg *SocketConfig) Listen(net string, laddr *SCTPAddr) (*SCTPListener, error) { - return listenSCTPExtConfig(net, laddr, cfg.InitMsg, cfg.Control) -} - -func (cfg *SocketConfig) Dial(net string, laddr, raddr *SCTPAddr) (*SCTPConn, error) { - return dialSCTPExtConfig(net, laddr, raddr, cfg.InitMsg, cfg.Control) -} diff --git a/vendor/github.com/ishidawataru/sctp/sctp_linux.go b/vendor/github.com/ishidawataru/sctp/sctp_linux.go deleted file mode 100644 index ac340ddf..00000000 --- a/vendor/github.com/ishidawataru/sctp/sctp_linux.go +++ /dev/null @@ -1,305 +0,0 @@ -// +build linux,!386 -// Copyright 2019 Wataru Ishida. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -// implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package sctp - -import ( - "io" - "net" - "sync/atomic" - "syscall" - "unsafe" -) - -func setsockopt(fd int, optname, optval, optlen uintptr) (uintptr, uintptr, error) { - // FIXME: syscall.SYS_SETSOCKOPT is undefined on 386 - r0, r1, errno := syscall.Syscall6(syscall.SYS_SETSOCKOPT, - uintptr(fd), - SOL_SCTP, - optname, - optval, - optlen, - 0) - if errno != 0 { - return r0, r1, errno - } - return r0, r1, nil -} - -func getsockopt(fd int, optname, optval, optlen uintptr) (uintptr, uintptr, error) { - // FIXME: syscall.SYS_GETSOCKOPT is undefined on 386 - r0, r1, errno := syscall.Syscall6(syscall.SYS_GETSOCKOPT, - uintptr(fd), - SOL_SCTP, - optname, - optval, - optlen, - 0) - if errno != 0 { - return r0, r1, errno - } - return r0, r1, nil -} - -type rawConn struct { - sockfd int -} - -func (r rawConn) Control(f func(fd uintptr)) error { - f(uintptr(r.sockfd)) - return nil -} - -func (r rawConn) Read(f func(fd uintptr) (done bool)) error { - panic("not implemented") -} - -func (r rawConn) Write(f func(fd uintptr) (done bool)) error { - panic("not implemented") -} - -func (c *SCTPConn) SCTPWrite(b []byte, info *SndRcvInfo) (int, error) { - var cbuf []byte - if info != nil { - cmsgBuf := toBuf(info) - hdr := &syscall.Cmsghdr{ - Level: syscall.IPPROTO_SCTP, - Type: SCTP_CMSG_SNDRCV, - } - - // bitwidth of hdr.Len is platform-specific, - // so we use hdr.SetLen() rather than directly setting hdr.Len - hdr.SetLen(syscall.CmsgSpace(len(cmsgBuf))) - cbuf = append(toBuf(hdr), cmsgBuf...) - } - return syscall.SendmsgN(c.fd(), b, cbuf, nil, 0) -} - -func parseSndRcvInfo(b []byte) (*SndRcvInfo, error) { - msgs, err := syscall.ParseSocketControlMessage(b) - if err != nil { - return nil, err - } - for _, m := range msgs { - if m.Header.Level == syscall.IPPROTO_SCTP { - switch m.Header.Type { - case SCTP_CMSG_SNDRCV: - return (*SndRcvInfo)(unsafe.Pointer(&m.Data[0])), nil - } - } - } - return nil, nil -} - -func (c *SCTPConn) SCTPRead(b []byte) (int, *SndRcvInfo, error) { - oob := make([]byte, 254) - for { - n, oobn, recvflags, _, err := syscall.Recvmsg(c.fd(), b, oob, 0) - if err != nil { - return n, nil, err - } - - if n == 0 && oobn == 0 { - return 0, nil, io.EOF - } - - if recvflags&MSG_NOTIFICATION > 0 && c.notificationHandler != nil { - if err := c.notificationHandler(b[:n]); err != nil { - return 0, nil, err - } - } else { - var info *SndRcvInfo - if oobn > 0 { - info, err = parseSndRcvInfo(oob[:oobn]) - } - return n, info, err - } - } -} - -func (c *SCTPConn) Close() error { - if c != nil { - fd := atomic.SwapInt32(&c._fd, -1) - if fd > 0 { - info := &SndRcvInfo{ - Flags: SCTP_EOF, - } - c.SCTPWrite(nil, info) - syscall.Shutdown(int(fd), syscall.SHUT_RDWR) - return syscall.Close(int(fd)) - } - } - return syscall.EBADF -} - -func (c *SCTPConn) SetWriteBuffer(bytes int) error { - return syscall.SetsockoptInt(c.fd(), syscall.SOL_SOCKET, syscall.SO_SNDBUF, bytes) -} - -func (c *SCTPConn) GetWriteBuffer() (int, error) { - return syscall.GetsockoptInt(c.fd(), syscall.SOL_SOCKET, syscall.SO_SNDBUF) -} - -func (c *SCTPConn) SetReadBuffer(bytes int) error { - return syscall.SetsockoptInt(c.fd(), syscall.SOL_SOCKET, syscall.SO_RCVBUF, bytes) -} - -func (c *SCTPConn) GetReadBuffer() (int, error) { - return syscall.GetsockoptInt(c.fd(), syscall.SOL_SOCKET, syscall.SO_RCVBUF) -} - -// ListenSCTP - start listener on specified address/port -func ListenSCTP(net string, laddr *SCTPAddr) (*SCTPListener, error) { - return ListenSCTPExt(net, laddr, InitMsg{NumOstreams: SCTP_MAX_STREAM}) -} - -// ListenSCTPExt - start listener on specified address/port with given SCTP options -func ListenSCTPExt(network string, laddr *SCTPAddr, options InitMsg) (*SCTPListener, error) { - return listenSCTPExtConfig(network, laddr, options, nil) -} - -// listenSCTPExtConfig - start listener on specified address/port with given SCTP options and socket configuration -func listenSCTPExtConfig(network string, laddr *SCTPAddr, options InitMsg, control func(network, address string, c syscall.RawConn) error) (*SCTPListener, error) { - af, ipv6only := favoriteAddrFamily(network, laddr, nil, "listen") - sock, err := syscall.Socket( - af, - syscall.SOCK_STREAM, - syscall.IPPROTO_SCTP, - ) - if err != nil { - return nil, err - } - - // close socket on error - defer func() { - if err != nil { - syscall.Close(sock) - } - }() - if err = setDefaultSockopts(sock, af, ipv6only); err != nil { - return nil, err - } - if control != nil { - rc := rawConn{sockfd: sock} - if err = control(network, laddr.String(), rc); err != nil { - return nil, err - } - } - err = setInitOpts(sock, options) - if err != nil { - return nil, err - } - - if laddr != nil { - // If IP address and/or port was not provided so far, let's use the unspecified IPv4 or IPv6 address - if len(laddr.IPAddrs) == 0 { - if af == syscall.AF_INET { - laddr.IPAddrs = append(laddr.IPAddrs, net.IPAddr{IP: net.IPv4zero}) - } else if af == syscall.AF_INET6 { - laddr.IPAddrs = append(laddr.IPAddrs, net.IPAddr{IP: net.IPv6zero}) - } - } - err := SCTPBind(sock, laddr, SCTP_BINDX_ADD_ADDR) - if err != nil { - return nil, err - } - } - err = syscall.Listen(sock, syscall.SOMAXCONN) - if err != nil { - return nil, err - } - return &SCTPListener{ - fd: sock, - }, nil -} - -// AcceptSCTP waits for and returns the next SCTP connection to the listener. -func (ln *SCTPListener) AcceptSCTP() (*SCTPConn, error) { - fd, _, err := syscall.Accept4(ln.fd, 0) - return NewSCTPConn(fd, nil), err -} - -// Accept waits for and returns the next connection connection to the listener. -func (ln *SCTPListener) Accept() (net.Conn, error) { - return ln.AcceptSCTP() -} - -func (ln *SCTPListener) Close() error { - syscall.Shutdown(ln.fd, syscall.SHUT_RDWR) - return syscall.Close(ln.fd) -} - -// DialSCTP - bind socket to laddr (if given) and connect to raddr -func DialSCTP(net string, laddr, raddr *SCTPAddr) (*SCTPConn, error) { - return DialSCTPExt(net, laddr, raddr, InitMsg{NumOstreams: SCTP_MAX_STREAM}) -} - -// DialSCTPExt - same as DialSCTP but with given SCTP options -func DialSCTPExt(network string, laddr, raddr *SCTPAddr, options InitMsg) (*SCTPConn, error) { - return dialSCTPExtConfig(network, laddr, raddr, options, nil) -} - -// dialSCTPExtConfig - same as DialSCTP but with given SCTP options and socket configuration -func dialSCTPExtConfig(network string, laddr, raddr *SCTPAddr, options InitMsg, control func(network, address string, c syscall.RawConn) error) (*SCTPConn, error) { - af, ipv6only := favoriteAddrFamily(network, laddr, raddr, "dial") - sock, err := syscall.Socket( - af, - syscall.SOCK_STREAM, - syscall.IPPROTO_SCTP, - ) - if err != nil { - return nil, err - } - - // close socket on error - defer func() { - if err != nil { - syscall.Close(sock) - } - }() - if err = setDefaultSockopts(sock, af, ipv6only); err != nil { - return nil, err - } - if control != nil { - rc := rawConn{sockfd: sock} - if err = control(network, laddr.String(), rc); err != nil { - return nil, err - } - } - err = setInitOpts(sock, options) - if err != nil { - return nil, err - } - if laddr != nil { - // If IP address and/or port was not provided so far, let's use the unspecified IPv4 or IPv6 address - if len(laddr.IPAddrs) == 0 { - if af == syscall.AF_INET { - laddr.IPAddrs = append(laddr.IPAddrs, net.IPAddr{IP: net.IPv4zero}) - } else if af == syscall.AF_INET6 { - laddr.IPAddrs = append(laddr.IPAddrs, net.IPAddr{IP: net.IPv6zero}) - } - } - err := SCTPBind(sock, laddr, SCTP_BINDX_ADD_ADDR) - if err != nil { - return nil, err - } - } - _, err = SCTPConnect(sock, raddr) - if err != nil { - return nil, err - } - return NewSCTPConn(sock, nil), nil -} diff --git a/vendor/github.com/ishidawataru/sctp/sctp_unsupported.go b/vendor/github.com/ishidawataru/sctp/sctp_unsupported.go deleted file mode 100644 index 118fe159..00000000 --- a/vendor/github.com/ishidawataru/sctp/sctp_unsupported.go +++ /dev/null @@ -1,98 +0,0 @@ -// +build !linux linux,386 -// Copyright 2019 Wataru Ishida. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -// implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package sctp - -import ( - "errors" - "net" - "runtime" - "syscall" -) - -var ErrUnsupported = errors.New("SCTP is unsupported on " + runtime.GOOS + "/" + runtime.GOARCH) - -func setsockopt(fd int, optname, optval, optlen uintptr) (uintptr, uintptr, error) { - return 0, 0, ErrUnsupported -} - -func getsockopt(fd int, optname, optval, optlen uintptr) (uintptr, uintptr, error) { - return 0, 0, ErrUnsupported -} - -func (c *SCTPConn) SCTPWrite(b []byte, info *SndRcvInfo) (int, error) { - return 0, ErrUnsupported -} - -func (c *SCTPConn) SCTPRead(b []byte) (int, *SndRcvInfo, error) { - return 0, nil, ErrUnsupported -} - -func (c *SCTPConn) Close() error { - return ErrUnsupported -} - -func (c *SCTPConn) SetWriteBuffer(bytes int) error { - return ErrUnsupported -} - -func (c *SCTPConn) GetWriteBuffer() (int, error) { - return 0, ErrUnsupported -} - -func (c *SCTPConn) SetReadBuffer(bytes int) error { - return ErrUnsupported -} - -func (c *SCTPConn) GetReadBuffer() (int, error) { - return 0, ErrUnsupported -} - -func ListenSCTP(net string, laddr *SCTPAddr) (*SCTPListener, error) { - return nil, ErrUnsupported -} - -func ListenSCTPExt(net string, laddr *SCTPAddr, options InitMsg) (*SCTPListener, error) { - return nil, ErrUnsupported -} - -func listenSCTPExtConfig(network string, laddr *SCTPAddr, options InitMsg, control func(network, address string, c syscall.RawConn) error) (*SCTPListener, error) { - return nil, ErrUnsupported -} - -func (ln *SCTPListener) Accept() (net.Conn, error) { - return nil, ErrUnsupported -} - -func (ln *SCTPListener) AcceptSCTP() (*SCTPConn, error) { - return nil, ErrUnsupported -} - -func (ln *SCTPListener) Close() error { - return ErrUnsupported -} - -func DialSCTP(net string, laddr, raddr *SCTPAddr) (*SCTPConn, error) { - return nil, ErrUnsupported -} - -func DialSCTPExt(network string, laddr, raddr *SCTPAddr, options InitMsg) (*SCTPConn, error) { - return nil, ErrUnsupported -} - -func dialSCTPExtConfig(network string, laddr, raddr *SCTPAddr, options InitMsg, control func(network, address string, c syscall.RawConn) error) (*SCTPConn, error) { - return nil, ErrUnsupported -} diff --git a/vendor/github.com/klauspost/compress/.gitattributes b/vendor/github.com/klauspost/compress/.gitattributes new file mode 100644 index 00000000..40243359 --- /dev/null +++ b/vendor/github.com/klauspost/compress/.gitattributes @@ -0,0 +1,2 @@ +* -text +*.bin -text -diff diff --git a/vendor/github.com/gojuno/minimock/v3/.gitignore b/vendor/github.com/klauspost/compress/.gitignore similarity index 74% rename from vendor/github.com/gojuno/minimock/v3/.gitignore rename to vendor/github.com/klauspost/compress/.gitignore index 289978ec..d31b3781 100644 --- a/vendor/github.com/gojuno/minimock/v3/.gitignore +++ b/vendor/github.com/klauspost/compress/.gitignore @@ -22,8 +22,11 @@ _testmain.go *.exe *.test *.prof +/s2/cmd/_s2sx/sfx-exe -/examples/sample_interface_mock.go -/cmd/minimock/minimock -/minimock -/dist +# Linux perf files +perf.data +perf.data.old + +# gdb history +.gdb_history diff --git a/vendor/github.com/klauspost/compress/.goreleaser.yml b/vendor/github.com/klauspost/compress/.goreleaser.yml new file mode 100644 index 00000000..0af08e65 --- /dev/null +++ b/vendor/github.com/klauspost/compress/.goreleaser.yml @@ -0,0 +1,141 @@ +# This is an example goreleaser.yaml file with some sane defaults. +# Make sure to check the documentation at http://goreleaser.com +before: + hooks: + - ./gen.sh + - go install mvdan.cc/garble@latest + +builds: + - + id: "s2c" + binary: s2c + main: ./s2/cmd/s2c/main.go + flags: + - -trimpath + env: + - CGO_ENABLED=0 + goos: + - aix + - linux + - freebsd + - netbsd + - windows + - darwin + goarch: + - 386 + - amd64 + - arm + - arm64 + - ppc64 + - ppc64le + - mips64 + - mips64le + goarm: + - 7 + gobinary: garble + - + id: "s2d" + binary: s2d + main: ./s2/cmd/s2d/main.go + flags: + - -trimpath + env: + - CGO_ENABLED=0 + goos: + - aix + - linux + - freebsd + - netbsd + - windows + - darwin + goarch: + - 386 + - amd64 + - arm + - arm64 + - ppc64 + - ppc64le + - mips64 + - mips64le + goarm: + - 7 + gobinary: garble + - + id: "s2sx" + binary: s2sx + main: ./s2/cmd/_s2sx/main.go + flags: + - -modfile=s2sx.mod + - -trimpath + env: + - CGO_ENABLED=0 + goos: + - aix + - linux + - freebsd + - netbsd + - windows + - darwin + goarch: + - 386 + - amd64 + - arm + - arm64 + - ppc64 + - ppc64le + - mips64 + - mips64le + goarm: + - 7 + gobinary: garble + +archives: + - + id: s2-binaries + name_template: "s2-{{ .Os }}_{{ .Arch }}_{{ .Version }}" + replacements: + aix: AIX + darwin: OSX + linux: Linux + windows: Windows + 386: i386 + amd64: x86_64 + freebsd: FreeBSD + netbsd: NetBSD + format_overrides: + - goos: windows + format: zip + files: + - unpack/* + - s2/LICENSE + - s2/README.md +checksum: + name_template: 'checksums.txt' +snapshot: + name_template: "{{ .Tag }}-next" +changelog: + sort: asc + filters: + exclude: + - '^doc:' + - '^docs:' + - '^test:' + - '^tests:' + - '^Update\sREADME.md' + +nfpms: + - + file_name_template: "s2_package_{{ .Version }}_{{ .Os }}_{{ .Arch }}" + vendor: Klaus Post + homepage: https://github.com/klauspost/compress + maintainer: Klaus Post + description: S2 Compression Tool + license: BSD 3-Clause + formats: + - deb + - rpm + replacements: + darwin: Darwin + linux: Linux + freebsd: FreeBSD + amd64: x86_64 diff --git a/vendor/github.com/docker/libnetwork/LICENSE b/vendor/github.com/klauspost/compress/LICENSE similarity index 67% rename from vendor/github.com/docker/libnetwork/LICENSE rename to vendor/github.com/klauspost/compress/LICENSE index e06d2081..87d55747 100644 --- a/vendor/github.com/docker/libnetwork/LICENSE +++ b/vendor/github.com/klauspost/compress/LICENSE @@ -1,4 +1,37 @@ -Apache License +Copyright (c) 2012 The Go Authors. All rights reserved. +Copyright (c) 2019 Klaus Post. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +------------------ + +Files: gzhttp/* + + Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ @@ -178,7 +211,7 @@ Apache License APPENDIX: How to apply the Apache License to your work. To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "{}" + boilerplate notice, with the fields enclosed by brackets "[]" replaced with your own identifying information. (Don't include the brackets!) The text should be enclosed in the appropriate comment syntax for the file format. We also recommend that a @@ -186,7 +219,7 @@ Apache License same "printed page" as the copyright notice for easier identification within third-party archives. - Copyright {yyyy} {name of copyright owner} + Copyright 2016-2017 The New York Times Company Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -200,3 +233,72 @@ Apache License See the License for the specific language governing permissions and limitations under the License. +------------------ + +Files: s2/cmd/internal/readahead/* + +The MIT License (MIT) + +Copyright (c) 2015 Klaus Post + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + +--------------------- +Files: snappy/* +Files: internal/snapref/* + +Copyright (c) 2011 The Snappy-Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +----------------- + +Files: s2/cmd/internal/filepathx/* + +Copyright 2016 The filepathx Authors + +Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/klauspost/compress/README.md b/vendor/github.com/klauspost/compress/README.md new file mode 100644 index 00000000..3c00c1af --- /dev/null +++ b/vendor/github.com/klauspost/compress/README.md @@ -0,0 +1,578 @@ +# compress + +This package provides various compression algorithms. + +* [zstandard](https://github.com/klauspost/compress/tree/master/zstd#zstd) compression and decompression in pure Go. +* [S2](https://github.com/klauspost/compress/tree/master/s2#s2-compression) is a high performance replacement for Snappy. +* Optimized [deflate](https://godoc.org/github.com/klauspost/compress/flate) packages which can be used as a dropin replacement for [gzip](https://godoc.org/github.com/klauspost/compress/gzip), [zip](https://godoc.org/github.com/klauspost/compress/zip) and [zlib](https://godoc.org/github.com/klauspost/compress/zlib). +* [snappy](https://github.com/klauspost/compress/tree/master/snappy) is a drop-in replacement for `github.com/golang/snappy` offering better compression and concurrent streams. +* [huff0](https://github.com/klauspost/compress/tree/master/huff0) and [FSE](https://github.com/klauspost/compress/tree/master/fse) implementations for raw entropy encoding. +* [gzhttp](https://github.com/klauspost/compress/tree/master/gzhttp) Provides client and server wrappers for handling gzipped requests efficiently. +* [pgzip](https://github.com/klauspost/pgzip) is a separate package that provides a very fast parallel gzip implementation. +* [fuzz package](https://github.com/klauspost/compress-fuzz) for fuzz testing all compressors/decompressors here. + +[![Go Reference](https://pkg.go.dev/badge/klauspost/compress.svg)](https://pkg.go.dev/github.com/klauspost/compress?tab=subdirectories) +[![Go](https://github.com/klauspost/compress/actions/workflows/go.yml/badge.svg)](https://github.com/klauspost/compress/actions/workflows/go.yml) +[![Sourcegraph Badge](https://sourcegraph.com/github.com/klauspost/compress/-/badge.svg)](https://sourcegraph.com/github.com/klauspost/compress?badge) + +# changelog + +* Sept 16, 2022 (v1.15.10) + + * zstd: Add [WithDecodeAllCapLimit](https://pkg.go.dev/github.com/klauspost/compress@v1.15.10/zstd#WithDecodeAllCapLimit) https://github.com/klauspost/compress/pull/649 + * Add Go 1.19 - deprecate Go 1.16 https://github.com/klauspost/compress/pull/651 + * flate: Improve level 5+6 compression https://github.com/klauspost/compress/pull/656 + * zstd: Improve "better" compresssion https://github.com/klauspost/compress/pull/657 + * s2: Improve "best" compression https://github.com/klauspost/compress/pull/658 + * s2: Improve "better" compression. https://github.com/klauspost/compress/pull/635 + * s2: Slightly faster non-assembly decompression https://github.com/klauspost/compress/pull/646 + * Use arrays for constant size copies https://github.com/klauspost/compress/pull/659 + +* July 21, 2022 (v1.15.9) + + * zstd: Fix decoder crash on amd64 (no BMI) on invalid input https://github.com/klauspost/compress/pull/645 + * zstd: Disable decoder extended memory copies (amd64) due to possible crashes https://github.com/klauspost/compress/pull/644 + * zstd: Allow single segments up to "max decoded size" by @klauspost in https://github.com/klauspost/compress/pull/643 + +* July 13, 2022 (v1.15.8) + + * gzip: fix stack exhaustion bug in Reader.Read https://github.com/klauspost/compress/pull/641 + * s2: Add Index header trim/restore https://github.com/klauspost/compress/pull/638 + * zstd: Optimize seqdeq amd64 asm by @greatroar in https://github.com/klauspost/compress/pull/636 + * zstd: Improve decoder memcopy https://github.com/klauspost/compress/pull/637 + * huff0: Pass a single bitReader pointer to asm by @greatroar in https://github.com/klauspost/compress/pull/634 + * zstd: Branchless getBits for amd64 w/o BMI2 by @greatroar in https://github.com/klauspost/compress/pull/640 + * gzhttp: Remove header before writing https://github.com/klauspost/compress/pull/639 + +* June 29, 2022 (v1.15.7) + + * s2: Fix absolute forward seeks https://github.com/klauspost/compress/pull/633 + * zip: Merge upstream https://github.com/klauspost/compress/pull/631 + * zip: Re-add zip64 fix https://github.com/klauspost/compress/pull/624 + * zstd: translate fseDecoder.buildDtable into asm by @WojciechMula in https://github.com/klauspost/compress/pull/598 + * flate: Faster histograms https://github.com/klauspost/compress/pull/620 + * deflate: Use compound hcode https://github.com/klauspost/compress/pull/622 + +* June 3, 2022 (v1.15.6) + * s2: Improve coding for long, close matches https://github.com/klauspost/compress/pull/613 + * s2c: Add Snappy/S2 stream recompression https://github.com/klauspost/compress/pull/611 + * zstd: Always use configured block size https://github.com/klauspost/compress/pull/605 + * zstd: Fix incorrect hash table placement for dict encoding in default https://github.com/klauspost/compress/pull/606 + * zstd: Apply default config to ZipDecompressor without options https://github.com/klauspost/compress/pull/608 + * gzhttp: Exclude more common archive formats https://github.com/klauspost/compress/pull/612 + * s2: Add ReaderIgnoreCRC https://github.com/klauspost/compress/pull/609 + * s2: Remove sanity load on index creation https://github.com/klauspost/compress/pull/607 + * snappy: Use dedicated function for scoring https://github.com/klauspost/compress/pull/614 + * s2c+s2d: Use official snappy framed extension https://github.com/klauspost/compress/pull/610 + +* May 25, 2022 (v1.15.5) + * s2: Add concurrent stream decompression https://github.com/klauspost/compress/pull/602 + * s2: Fix final emit oob read crash on amd64 https://github.com/klauspost/compress/pull/601 + * huff0: asm implementation of Decompress1X by @WojciechMula https://github.com/klauspost/compress/pull/596 + * zstd: Use 1 less goroutine for stream decoding https://github.com/klauspost/compress/pull/588 + * zstd: Copy literal in 16 byte blocks when possible https://github.com/klauspost/compress/pull/592 + * zstd: Speed up when WithDecoderLowmem(false) https://github.com/klauspost/compress/pull/599 + * zstd: faster next state update in BMI2 version of decode by @WojciechMula in https://github.com/klauspost/compress/pull/593 + * huff0: Do not check max size when reading table. https://github.com/klauspost/compress/pull/586 + * flate: Inplace hashing for level 7-9 by @klauspost in https://github.com/klauspost/compress/pull/590 + + +* May 11, 2022 (v1.15.4) + * huff0: decompress directly into output by @WojciechMula in [#577](https://github.com/klauspost/compress/pull/577) + * inflate: Keep dict on stack [#581](https://github.com/klauspost/compress/pull/581) + * zstd: Faster decoding memcopy in asm [#583](https://github.com/klauspost/compress/pull/583) + * zstd: Fix ignored crc [#580](https://github.com/klauspost/compress/pull/580) + +* May 5, 2022 (v1.15.3) + * zstd: Allow to ignore checksum checking by @WojciechMula [#572](https://github.com/klauspost/compress/pull/572) + * s2: Fix incorrect seek for io.SeekEnd in [#575](https://github.com/klauspost/compress/pull/575) + +* Apr 26, 2022 (v1.15.2) + * zstd: Add x86-64 assembly for decompression on streams and blocks. Contributed by [@WojciechMula](https://github.com/WojciechMula). Typically 2x faster. [#528](https://github.com/klauspost/compress/pull/528) [#531](https://github.com/klauspost/compress/pull/531) [#545](https://github.com/klauspost/compress/pull/545) [#537](https://github.com/klauspost/compress/pull/537) + * zstd: Add options to ZipDecompressor and fixes [#539](https://github.com/klauspost/compress/pull/539) + * s2: Use sorted search for index [#555](https://github.com/klauspost/compress/pull/555) + * Minimum version is Go 1.16, added CI test on 1.18. + +* Mar 11, 2022 (v1.15.1) + * huff0: Add x86 assembly of Decode4X by @WojciechMula in [#512](https://github.com/klauspost/compress/pull/512) + * zstd: Reuse zip decoders in [#514](https://github.com/klauspost/compress/pull/514) + * zstd: Detect extra block data and report as corrupted in [#520](https://github.com/klauspost/compress/pull/520) + * zstd: Handle zero sized frame content size stricter in [#521](https://github.com/klauspost/compress/pull/521) + * zstd: Add stricter block size checks in [#523](https://github.com/klauspost/compress/pull/523) + +* Mar 3, 2022 (v1.15.0) + * zstd: Refactor decoder by @klauspost in [#498](https://github.com/klauspost/compress/pull/498) + * zstd: Add stream encoding without goroutines by @klauspost in [#505](https://github.com/klauspost/compress/pull/505) + * huff0: Prevent single blocks exceeding 16 bits by @klauspost in[#507](https://github.com/klauspost/compress/pull/507) + * flate: Inline literal emission by @klauspost in [#509](https://github.com/klauspost/compress/pull/509) + * gzhttp: Add zstd to transport by @klauspost in [#400](https://github.com/klauspost/compress/pull/400) + * gzhttp: Make content-type optional by @klauspost in [#510](https://github.com/klauspost/compress/pull/510) + +Both compression and decompression now supports "synchronous" stream operations. This means that whenever "concurrency" is set to 1, they will operate without spawning goroutines. + +Stream decompression is now faster on asynchronous, since the goroutine allocation much more effectively splits the workload. On typical streams this will typically use 2 cores fully for decompression. When a stream has finished decoding no goroutines will be left over, so decoders can now safely be pooled and still be garbage collected. + +While the release has been extensively tested, it is recommended to testing when upgrading. + +
+ See changes to v1.14.x + +* Feb 22, 2022 (v1.14.4) + * flate: Fix rare huffman only (-2) corruption. [#503](https://github.com/klauspost/compress/pull/503) + * zip: Update deprecated CreateHeaderRaw to correctly call CreateRaw by @saracen in [#502](https://github.com/klauspost/compress/pull/502) + * zip: don't read data descriptor early by @saracen in [#501](https://github.com/klauspost/compress/pull/501) #501 + * huff0: Use static decompression buffer up to 30% faster by @klauspost in [#499](https://github.com/klauspost/compress/pull/499) [#500](https://github.com/klauspost/compress/pull/500) + +* Feb 17, 2022 (v1.14.3) + * flate: Improve fastest levels compression speed ~10% more throughput. [#482](https://github.com/klauspost/compress/pull/482) [#489](https://github.com/klauspost/compress/pull/489) [#490](https://github.com/klauspost/compress/pull/490) [#491](https://github.com/klauspost/compress/pull/491) [#494](https://github.com/klauspost/compress/pull/494) [#478](https://github.com/klauspost/compress/pull/478) + * flate: Faster decompression speed, ~5-10%. [#483](https://github.com/klauspost/compress/pull/483) + * s2: Faster compression with Go v1.18 and amd64 microarch level 3+. [#484](https://github.com/klauspost/compress/pull/484) [#486](https://github.com/klauspost/compress/pull/486) + +* Jan 25, 2022 (v1.14.2) + * zstd: improve header decoder by @dsnet [#476](https://github.com/klauspost/compress/pull/476) + * zstd: Add bigger default blocks [#469](https://github.com/klauspost/compress/pull/469) + * zstd: Remove unused decompression buffer [#470](https://github.com/klauspost/compress/pull/470) + * zstd: Fix logically dead code by @ningmingxiao [#472](https://github.com/klauspost/compress/pull/472) + * flate: Improve level 7-9 [#471](https://github.com/klauspost/compress/pull/471) [#473](https://github.com/klauspost/compress/pull/473) + * zstd: Add noasm tag for xxhash [#475](https://github.com/klauspost/compress/pull/475) + +* Jan 11, 2022 (v1.14.1) + * s2: Add stream index in [#462](https://github.com/klauspost/compress/pull/462) + * flate: Speed and efficiency improvements in [#439](https://github.com/klauspost/compress/pull/439) [#461](https://github.com/klauspost/compress/pull/461) [#455](https://github.com/klauspost/compress/pull/455) [#452](https://github.com/klauspost/compress/pull/452) [#458](https://github.com/klauspost/compress/pull/458) + * zstd: Performance improvement in [#420]( https://github.com/klauspost/compress/pull/420) [#456](https://github.com/klauspost/compress/pull/456) [#437](https://github.com/klauspost/compress/pull/437) [#467](https://github.com/klauspost/compress/pull/467) [#468](https://github.com/klauspost/compress/pull/468) + * zstd: add arm64 xxhash assembly in [#464](https://github.com/klauspost/compress/pull/464) + * Add garbled for binaries for s2 in [#445](https://github.com/klauspost/compress/pull/445) +
+ +
+ See changes to v1.13.x + +* Aug 30, 2021 (v1.13.5) + * gz/zlib/flate: Alias stdlib errors [#425](https://github.com/klauspost/compress/pull/425) + * s2: Add block support to commandline tools [#413](https://github.com/klauspost/compress/pull/413) + * zstd: pooledZipWriter should return Writers to the same pool [#426](https://github.com/klauspost/compress/pull/426) + * Removed golang/snappy as external dependency for tests [#421](https://github.com/klauspost/compress/pull/421) + +* Aug 12, 2021 (v1.13.4) + * Add [snappy replacement package](https://github.com/klauspost/compress/tree/master/snappy). + * zstd: Fix incorrect encoding in "best" mode [#415](https://github.com/klauspost/compress/pull/415) + +* Aug 3, 2021 (v1.13.3) + * zstd: Improve Best compression [#404](https://github.com/klauspost/compress/pull/404) + * zstd: Fix WriteTo error forwarding [#411](https://github.com/klauspost/compress/pull/411) + * gzhttp: Return http.HandlerFunc instead of http.Handler. Unlikely breaking change. [#406](https://github.com/klauspost/compress/pull/406) + * s2sx: Fix max size error [#399](https://github.com/klauspost/compress/pull/399) + * zstd: Add optional stream content size on reset [#401](https://github.com/klauspost/compress/pull/401) + * zstd: use SpeedBestCompression for level >= 10 [#410](https://github.com/klauspost/compress/pull/410) + +* Jun 14, 2021 (v1.13.1) + * s2: Add full Snappy output support [#396](https://github.com/klauspost/compress/pull/396) + * zstd: Add configurable [Decoder window](https://pkg.go.dev/github.com/klauspost/compress/zstd#WithDecoderMaxWindow) size [#394](https://github.com/klauspost/compress/pull/394) + * gzhttp: Add header to skip compression [#389](https://github.com/klauspost/compress/pull/389) + * s2: Improve speed with bigger output margin [#395](https://github.com/klauspost/compress/pull/395) + +* Jun 3, 2021 (v1.13.0) + * Added [gzhttp](https://github.com/klauspost/compress/tree/master/gzhttp#gzip-handler) which allows wrapping HTTP servers and clients with GZIP compressors. + * zstd: Detect short invalid signatures [#382](https://github.com/klauspost/compress/pull/382) + * zstd: Spawn decoder goroutine only if needed. [#380](https://github.com/klauspost/compress/pull/380) +
+ + +
+ See changes to v1.12.x + +* May 25, 2021 (v1.12.3) + * deflate: Better/faster Huffman encoding [#374](https://github.com/klauspost/compress/pull/374) + * deflate: Allocate less for history. [#375](https://github.com/klauspost/compress/pull/375) + * zstd: Forward read errors [#373](https://github.com/klauspost/compress/pull/373) + +* Apr 27, 2021 (v1.12.2) + * zstd: Improve better/best compression [#360](https://github.com/klauspost/compress/pull/360) [#364](https://github.com/klauspost/compress/pull/364) [#365](https://github.com/klauspost/compress/pull/365) + * zstd: Add helpers to compress/decompress zstd inside zip files [#363](https://github.com/klauspost/compress/pull/363) + * deflate: Improve level 5+6 compression [#367](https://github.com/klauspost/compress/pull/367) + * s2: Improve better/best compression [#358](https://github.com/klauspost/compress/pull/358) [#359](https://github.com/klauspost/compress/pull/358) + * s2: Load after checking src limit on amd64. [#362](https://github.com/klauspost/compress/pull/362) + * s2sx: Limit max executable size [#368](https://github.com/klauspost/compress/pull/368) + +* Apr 14, 2021 (v1.12.1) + * snappy package removed. Upstream added as dependency. + * s2: Better compression in "best" mode [#353](https://github.com/klauspost/compress/pull/353) + * s2sx: Add stdin input and detect pre-compressed from signature [#352](https://github.com/klauspost/compress/pull/352) + * s2c/s2d: Add http as possible input [#348](https://github.com/klauspost/compress/pull/348) + * s2c/s2d/s2sx: Always truncate when writing files [#352](https://github.com/klauspost/compress/pull/352) + * zstd: Reduce memory usage further when using [WithLowerEncoderMem](https://pkg.go.dev/github.com/klauspost/compress/zstd#WithLowerEncoderMem) [#346](https://github.com/klauspost/compress/pull/346) + * s2: Fix potential problem with amd64 assembly and profilers [#349](https://github.com/klauspost/compress/pull/349) +
+ +
+ See changes to v1.11.x + +* Mar 26, 2021 (v1.11.13) + * zstd: Big speedup on small dictionary encodes [#344](https://github.com/klauspost/compress/pull/344) [#345](https://github.com/klauspost/compress/pull/345) + * zstd: Add [WithLowerEncoderMem](https://pkg.go.dev/github.com/klauspost/compress/zstd#WithLowerEncoderMem) encoder option [#336](https://github.com/klauspost/compress/pull/336) + * deflate: Improve entropy compression [#338](https://github.com/klauspost/compress/pull/338) + * s2: Clean up and minor performance improvement in best [#341](https://github.com/klauspost/compress/pull/341) + +* Mar 5, 2021 (v1.11.12) + * s2: Add `s2sx` binary that creates [self extracting archives](https://github.com/klauspost/compress/tree/master/s2#s2sx-self-extracting-archives). + * s2: Speed up decompression on non-assembly platforms [#328](https://github.com/klauspost/compress/pull/328) + +* Mar 1, 2021 (v1.11.9) + * s2: Add ARM64 decompression assembly. Around 2x output speed. [#324](https://github.com/klauspost/compress/pull/324) + * s2: Improve "better" speed and efficiency. [#325](https://github.com/klauspost/compress/pull/325) + * s2: Fix binaries. + +* Feb 25, 2021 (v1.11.8) + * s2: Fixed occational out-of-bounds write on amd64. Upgrade recommended. + * s2: Add AMD64 assembly for better mode. 25-50% faster. [#315](https://github.com/klauspost/compress/pull/315) + * s2: Less upfront decoder allocation. [#322](https://github.com/klauspost/compress/pull/322) + * zstd: Faster "compression" of incompressible data. [#314](https://github.com/klauspost/compress/pull/314) + * zip: Fix zip64 headers. [#313](https://github.com/klauspost/compress/pull/313) + +* Jan 14, 2021 (v1.11.7) + * Use Bytes() interface to get bytes across packages. [#309](https://github.com/klauspost/compress/pull/309) + * s2: Add 'best' compression option. [#310](https://github.com/klauspost/compress/pull/310) + * s2: Add ReaderMaxBlockSize, changes `s2.NewReader` signature to include varargs. [#311](https://github.com/klauspost/compress/pull/311) + * s2: Fix crash on small better buffers. [#308](https://github.com/klauspost/compress/pull/308) + * s2: Clean up decoder. [#312](https://github.com/klauspost/compress/pull/312) + +* Jan 7, 2021 (v1.11.6) + * zstd: Make decoder allocations smaller [#306](https://github.com/klauspost/compress/pull/306) + * zstd: Free Decoder resources when Reset is called with a nil io.Reader [#305](https://github.com/klauspost/compress/pull/305) + +* Dec 20, 2020 (v1.11.4) + * zstd: Add Best compression mode [#304](https://github.com/klauspost/compress/pull/304) + * Add header decoder [#299](https://github.com/klauspost/compress/pull/299) + * s2: Add uncompressed stream option [#297](https://github.com/klauspost/compress/pull/297) + * Simplify/speed up small blocks with known max size. [#300](https://github.com/klauspost/compress/pull/300) + * zstd: Always reset literal dict encoder [#303](https://github.com/klauspost/compress/pull/303) + +* Nov 15, 2020 (v1.11.3) + * inflate: 10-15% faster decompression [#293](https://github.com/klauspost/compress/pull/293) + * zstd: Tweak DecodeAll default allocation [#295](https://github.com/klauspost/compress/pull/295) + +* Oct 11, 2020 (v1.11.2) + * s2: Fix out of bounds read in "better" block compression [#291](https://github.com/klauspost/compress/pull/291) + +* Oct 1, 2020 (v1.11.1) + * zstd: Set allLitEntropy true in default configuration [#286](https://github.com/klauspost/compress/pull/286) + +* Sept 8, 2020 (v1.11.0) + * zstd: Add experimental compression [dictionaries](https://github.com/klauspost/compress/tree/master/zstd#dictionaries) [#281](https://github.com/klauspost/compress/pull/281) + * zstd: Fix mixed Write and ReadFrom calls [#282](https://github.com/klauspost/compress/pull/282) + * inflate/gz: Limit variable shifts, ~5% faster decompression [#274](https://github.com/klauspost/compress/pull/274) +
+ +
+ See changes to v1.10.x + +* July 8, 2020 (v1.10.11) + * zstd: Fix extra block when compressing with ReadFrom. [#278](https://github.com/klauspost/compress/pull/278) + * huff0: Also populate compression table when reading decoding table. [#275](https://github.com/klauspost/compress/pull/275) + +* June 23, 2020 (v1.10.10) + * zstd: Skip entropy compression in fastest mode when no matches. [#270](https://github.com/klauspost/compress/pull/270) + +* June 16, 2020 (v1.10.9): + * zstd: API change for specifying dictionaries. See [#268](https://github.com/klauspost/compress/pull/268) + * zip: update CreateHeaderRaw to handle zip64 fields. [#266](https://github.com/klauspost/compress/pull/266) + * Fuzzit tests removed. The service has been purchased and is no longer available. + +* June 5, 2020 (v1.10.8): + * 1.15x faster zstd block decompression. [#265](https://github.com/klauspost/compress/pull/265) + +* June 1, 2020 (v1.10.7): + * Added zstd decompression [dictionary support](https://github.com/klauspost/compress/tree/master/zstd#dictionaries) + * Increase zstd decompression speed up to 1.19x. [#259](https://github.com/klauspost/compress/pull/259) + * Remove internal reset call in zstd compression and reduce allocations. [#263](https://github.com/klauspost/compress/pull/263) + +* May 21, 2020: (v1.10.6) + * zstd: Reduce allocations while decoding. [#258](https://github.com/klauspost/compress/pull/258), [#252](https://github.com/klauspost/compress/pull/252) + * zstd: Stricter decompression checks. + +* April 12, 2020: (v1.10.5) + * s2-commands: Flush output when receiving SIGINT. [#239](https://github.com/klauspost/compress/pull/239) + +* Apr 8, 2020: (v1.10.4) + * zstd: Minor/special case optimizations. [#251](https://github.com/klauspost/compress/pull/251), [#250](https://github.com/klauspost/compress/pull/250), [#249](https://github.com/klauspost/compress/pull/249), [#247](https://github.com/klauspost/compress/pull/247) +* Mar 11, 2020: (v1.10.3) + * s2: Use S2 encoder in pure Go mode for Snappy output as well. [#245](https://github.com/klauspost/compress/pull/245) + * s2: Fix pure Go block encoder. [#244](https://github.com/klauspost/compress/pull/244) + * zstd: Added "better compression" mode. [#240](https://github.com/klauspost/compress/pull/240) + * zstd: Improve speed of fastest compression mode by 5-10% [#241](https://github.com/klauspost/compress/pull/241) + * zstd: Skip creating encoders when not needed. [#238](https://github.com/klauspost/compress/pull/238) + +* Feb 27, 2020: (v1.10.2) + * Close to 50% speedup in inflate (gzip/zip decompression). [#236](https://github.com/klauspost/compress/pull/236) [#234](https://github.com/klauspost/compress/pull/234) [#232](https://github.com/klauspost/compress/pull/232) + * Reduce deflate level 1-6 memory usage up to 59%. [#227](https://github.com/klauspost/compress/pull/227) + +* Feb 18, 2020: (v1.10.1) + * Fix zstd crash when resetting multiple times without sending data. [#226](https://github.com/klauspost/compress/pull/226) + * deflate: Fix dictionary use on level 1-6. [#224](https://github.com/klauspost/compress/pull/224) + * Remove deflate writer reference when closing. [#224](https://github.com/klauspost/compress/pull/224) + +* Feb 4, 2020: (v1.10.0) + * Add optional dictionary to [stateless deflate](https://pkg.go.dev/github.com/klauspost/compress/flate?tab=doc#StatelessDeflate). Breaking change, send `nil` for previous behaviour. [#216](https://github.com/klauspost/compress/pull/216) + * Fix buffer overflow on repeated small block deflate. [#218](https://github.com/klauspost/compress/pull/218) + * Allow copying content from an existing ZIP file without decompressing+compressing. [#214](https://github.com/klauspost/compress/pull/214) + * Added [S2](https://github.com/klauspost/compress/tree/master/s2#s2-compression) AMD64 assembler and various optimizations. Stream speed >10GB/s. [#186](https://github.com/klauspost/compress/pull/186) + +
+ +
+ See changes prior to v1.10.0 + +* Jan 20,2020 (v1.9.8) Optimize gzip/deflate with better size estimates and faster table generation. [#207](https://github.com/klauspost/compress/pull/207) by [luyu6056](https://github.com/luyu6056), [#206](https://github.com/klauspost/compress/pull/206). +* Jan 11, 2020: S2 Encode/Decode will use provided buffer if capacity is big enough. [#204](https://github.com/klauspost/compress/pull/204) +* Jan 5, 2020: (v1.9.7) Fix another zstd regression in v1.9.5 - v1.9.6 removed. +* Jan 4, 2020: (v1.9.6) Regression in v1.9.5 fixed causing corrupt zstd encodes in rare cases. +* Jan 4, 2020: Faster IO in [s2c + s2d commandline tools](https://github.com/klauspost/compress/tree/master/s2#commandline-tools) compression/decompression. [#192](https://github.com/klauspost/compress/pull/192) +* Dec 29, 2019: Removed v1.9.5 since fuzz tests showed a compatibility problem with the reference zstandard decoder. +* Dec 29, 2019: (v1.9.5) zstd: 10-20% faster block compression. [#199](https://github.com/klauspost/compress/pull/199) +* Dec 29, 2019: [zip](https://godoc.org/github.com/klauspost/compress/zip) package updated with latest Go features +* Dec 29, 2019: zstd: Single segment flag condintions tweaked. [#197](https://github.com/klauspost/compress/pull/197) +* Dec 18, 2019: s2: Faster compression when ReadFrom is used. [#198](https://github.com/klauspost/compress/pull/198) +* Dec 10, 2019: s2: Fix repeat length output when just above at 16MB limit. +* Dec 10, 2019: zstd: Add function to get decoder as io.ReadCloser. [#191](https://github.com/klauspost/compress/pull/191) +* Dec 3, 2019: (v1.9.4) S2: limit max repeat length. [#188](https://github.com/klauspost/compress/pull/188) +* Dec 3, 2019: Add [WithNoEntropyCompression](https://godoc.org/github.com/klauspost/compress/zstd#WithNoEntropyCompression) to zstd [#187](https://github.com/klauspost/compress/pull/187) +* Dec 3, 2019: Reduce memory use for tests. Check for leaked goroutines. +* Nov 28, 2019 (v1.9.3) Less allocations in stateless deflate. +* Nov 28, 2019: 5-20% Faster huff0 decode. Impacts zstd as well. [#184](https://github.com/klauspost/compress/pull/184) +* Nov 12, 2019 (v1.9.2) Added [Stateless Compression](#stateless-compression) for gzip/deflate. +* Nov 12, 2019: Fixed zstd decompression of large single blocks. [#180](https://github.com/klauspost/compress/pull/180) +* Nov 11, 2019: Set default [s2c](https://github.com/klauspost/compress/tree/master/s2#commandline-tools) block size to 4MB. +* Nov 11, 2019: Reduce inflate memory use by 1KB. +* Nov 10, 2019: Less allocations in deflate bit writer. +* Nov 10, 2019: Fix inconsistent error returned by zstd decoder. +* Oct 28, 2019 (v1.9.1) ztsd: Fix crash when compressing blocks. [#174](https://github.com/klauspost/compress/pull/174) +* Oct 24, 2019 (v1.9.0) zstd: Fix rare data corruption [#173](https://github.com/klauspost/compress/pull/173) +* Oct 24, 2019 zstd: Fix huff0 out of buffer write [#171](https://github.com/klauspost/compress/pull/171) and always return errors [#172](https://github.com/klauspost/compress/pull/172) +* Oct 10, 2019: Big deflate rewrite, 30-40% faster with better compression [#105](https://github.com/klauspost/compress/pull/105) + +
+ +
+ See changes prior to v1.9.0 + +* Oct 10, 2019: (v1.8.6) zstd: Allow partial reads to get flushed data. [#169](https://github.com/klauspost/compress/pull/169) +* Oct 3, 2019: Fix inconsistent results on broken zstd streams. +* Sep 25, 2019: Added `-rm` (remove source files) and `-q` (no output except errors) to `s2c` and `s2d` [commands](https://github.com/klauspost/compress/tree/master/s2#commandline-tools) +* Sep 16, 2019: (v1.8.4) Add `s2c` and `s2d` [commandline tools](https://github.com/klauspost/compress/tree/master/s2#commandline-tools). +* Sep 10, 2019: (v1.8.3) Fix s2 decoder [Skip](https://godoc.org/github.com/klauspost/compress/s2#Reader.Skip). +* Sep 7, 2019: zstd: Added [WithWindowSize](https://godoc.org/github.com/klauspost/compress/zstd#WithWindowSize), contributed by [ianwilkes](https://github.com/ianwilkes). +* Sep 5, 2019: (v1.8.2) Add [WithZeroFrames](https://godoc.org/github.com/klauspost/compress/zstd#WithZeroFrames) which adds full zero payload block encoding option. +* Sep 5, 2019: Lazy initialization of zstandard predefined en/decoder tables. +* Aug 26, 2019: (v1.8.1) S2: 1-2% compression increase in "better" compression mode. +* Aug 26, 2019: zstd: Check maximum size of Huffman 1X compressed literals while decoding. +* Aug 24, 2019: (v1.8.0) Added [S2 compression](https://github.com/klauspost/compress/tree/master/s2#s2-compression), a high performance replacement for Snappy. +* Aug 21, 2019: (v1.7.6) Fixed minor issues found by fuzzer. One could lead to zstd not decompressing. +* Aug 18, 2019: Add [fuzzit](https://fuzzit.dev/) continuous fuzzing. +* Aug 14, 2019: zstd: Skip incompressible data 2x faster. [#147](https://github.com/klauspost/compress/pull/147) +* Aug 4, 2019 (v1.7.5): Better literal compression. [#146](https://github.com/klauspost/compress/pull/146) +* Aug 4, 2019: Faster zstd compression. [#143](https://github.com/klauspost/compress/pull/143) [#144](https://github.com/klauspost/compress/pull/144) +* Aug 4, 2019: Faster zstd decompression. [#145](https://github.com/klauspost/compress/pull/145) [#143](https://github.com/klauspost/compress/pull/143) [#142](https://github.com/klauspost/compress/pull/142) +* July 15, 2019 (v1.7.4): Fix double EOF block in rare cases on zstd encoder. +* July 15, 2019 (v1.7.3): Minor speedup/compression increase in default zstd encoder. +* July 14, 2019: zstd decoder: Fix decompression error on multiple uses with mixed content. +* July 7, 2019 (v1.7.2): Snappy update, zstd decoder potential race fix. +* June 17, 2019: zstd decompression bugfix. +* June 17, 2019: fix 32 bit builds. +* June 17, 2019: Easier use in modules (less dependencies). +* June 9, 2019: New stronger "default" [zstd](https://github.com/klauspost/compress/tree/master/zstd#zstd) compression mode. Matches zstd default compression ratio. +* June 5, 2019: 20-40% throughput in [zstandard](https://github.com/klauspost/compress/tree/master/zstd#zstd) compression and better compression. +* June 5, 2019: deflate/gzip compression: Reduce memory usage of lower compression levels. +* June 2, 2019: Added [zstandard](https://github.com/klauspost/compress/tree/master/zstd#zstd) compression! +* May 25, 2019: deflate/gzip: 10% faster bit writer, mostly visible in lower levels. +* Apr 22, 2019: [zstd](https://github.com/klauspost/compress/tree/master/zstd#zstd) decompression added. +* Aug 1, 2018: Added [huff0 README](https://github.com/klauspost/compress/tree/master/huff0#huff0-entropy-compression). +* Jul 8, 2018: Added [Performance Update 2018](#performance-update-2018) below. +* Jun 23, 2018: Merged [Go 1.11 inflate optimizations](https://go-review.googlesource.com/c/go/+/102235). Go 1.9 is now required. Backwards compatible version tagged with [v1.3.0](https://github.com/klauspost/compress/releases/tag/v1.3.0). +* Apr 2, 2018: Added [huff0](https://godoc.org/github.com/klauspost/compress/huff0) en/decoder. Experimental for now, API may change. +* Mar 4, 2018: Added [FSE Entropy](https://godoc.org/github.com/klauspost/compress/fse) en/decoder. Experimental for now, API may change. +* Nov 3, 2017: Add compression [Estimate](https://godoc.org/github.com/klauspost/compress#Estimate) function. +* May 28, 2017: Reduce allocations when resetting decoder. +* Apr 02, 2017: Change back to official crc32, since changes were merged in Go 1.7. +* Jan 14, 2017: Reduce stack pressure due to array copies. See [Issue #18625](https://github.com/golang/go/issues/18625). +* Oct 25, 2016: Level 2-4 have been rewritten and now offers significantly better performance than before. +* Oct 20, 2016: Port zlib changes from Go 1.7 to fix zlib writer issue. Please update. +* Oct 16, 2016: Go 1.7 changes merged. Apples to apples this package is a few percent faster, but has a significantly better balance between speed and compression per level. +* Mar 24, 2016: Always attempt Huffman encoding on level 4-7. This improves base 64 encoded data compression. +* Mar 24, 2016: Small speedup for level 1-3. +* Feb 19, 2016: Faster bit writer, level -2 is 15% faster, level 1 is 4% faster. +* Feb 19, 2016: Handle small payloads faster in level 1-3. +* Feb 19, 2016: Added faster level 2 + 3 compression modes. +* Feb 19, 2016: [Rebalanced compression levels](https://blog.klauspost.com/rebalancing-deflate-compression-levels/), so there is a more even progresssion in terms of compression. New default level is 5. +* Feb 14, 2016: Snappy: Merge upstream changes. +* Feb 14, 2016: Snappy: Fix aggressive skipping. +* Feb 14, 2016: Snappy: Update benchmark. +* Feb 13, 2016: Deflate: Fixed assembler problem that could lead to sub-optimal compression. +* Feb 12, 2016: Snappy: Added AMD64 SSE 4.2 optimizations to matching, which makes easy to compress material run faster. Typical speedup is around 25%. +* Feb 9, 2016: Added Snappy package fork. This version is 5-7% faster, much more on hard to compress content. +* Jan 30, 2016: Optimize level 1 to 3 by not considering static dictionary or storing uncompressed. ~4-5% speedup. +* Jan 16, 2016: Optimization on deflate level 1,2,3 compression. +* Jan 8 2016: Merge [CL 18317](https://go-review.googlesource.com/#/c/18317): fix reading, writing of zip64 archives. +* Dec 8 2015: Make level 1 and -2 deterministic even if write size differs. +* Dec 8 2015: Split encoding functions, so hashing and matching can potentially be inlined. 1-3% faster on AMD64. 5% faster on other platforms. +* Dec 8 2015: Fixed rare [one byte out-of bounds read](https://github.com/klauspost/compress/issues/20). Please update! +* Nov 23 2015: Optimization on token writer. ~2-4% faster. Contributed by [@dsnet](https://github.com/dsnet). +* Nov 20 2015: Small optimization to bit writer on 64 bit systems. +* Nov 17 2015: Fixed out-of-bound errors if the underlying Writer returned an error. See [#15](https://github.com/klauspost/compress/issues/15). +* Nov 12 2015: Added [io.WriterTo](https://golang.org/pkg/io/#WriterTo) support to gzip/inflate. +* Nov 11 2015: Merged [CL 16669](https://go-review.googlesource.com/#/c/16669/4): archive/zip: enable overriding (de)compressors per file +* Oct 15 2015: Added skipping on uncompressible data. Random data speed up >5x. + +
+ +# deflate usage + +The packages are drop-in replacements for standard libraries. Simply replace the import path to use them: + +| old import | new import | Documentation +|--------------------|-----------------------------------------|--------------------| +| `compress/gzip` | `github.com/klauspost/compress/gzip` | [gzip](https://pkg.go.dev/github.com/klauspost/compress/gzip?tab=doc) +| `compress/zlib` | `github.com/klauspost/compress/zlib` | [zlib](https://pkg.go.dev/github.com/klauspost/compress/zlib?tab=doc) +| `archive/zip` | `github.com/klauspost/compress/zip` | [zip](https://pkg.go.dev/github.com/klauspost/compress/zip?tab=doc) +| `compress/flate` | `github.com/klauspost/compress/flate` | [flate](https://pkg.go.dev/github.com/klauspost/compress/flate?tab=doc) + +* Optimized [deflate](https://godoc.org/github.com/klauspost/compress/flate) packages which can be used as a dropin replacement for [gzip](https://godoc.org/github.com/klauspost/compress/gzip), [zip](https://godoc.org/github.com/klauspost/compress/zip) and [zlib](https://godoc.org/github.com/klauspost/compress/zlib). + +You may also be interested in [pgzip](https://github.com/klauspost/pgzip), which is a drop in replacement for gzip, which support multithreaded compression on big files and the optimized [crc32](https://github.com/klauspost/crc32) package used by these packages. + +The packages contains the same as the standard library, so you can use the godoc for that: [gzip](http://golang.org/pkg/compress/gzip/), [zip](http://golang.org/pkg/archive/zip/), [zlib](http://golang.org/pkg/compress/zlib/), [flate](http://golang.org/pkg/compress/flate/). + +Currently there is only minor speedup on decompression (mostly CRC32 calculation). + +Memory usage is typically 1MB for a Writer. stdlib is in the same range. +If you expect to have a lot of concurrently allocated Writers consider using +the stateless compress described below. + +For compression performance, see: [this spreadsheet](https://docs.google.com/spreadsheets/d/1nuNE2nPfuINCZJRMt6wFWhKpToF95I47XjSsc-1rbPQ/edit?usp=sharing). + +# Stateless compression + +This package offers stateless compression as a special option for gzip/deflate. +It will do compression but without maintaining any state between Write calls. + +This means there will be no memory kept between Write calls, but compression and speed will be suboptimal. + +This is only relevant in cases where you expect to run many thousands of compressors concurrently, +but with very little activity. This is *not* intended for regular web servers serving individual requests. + +Because of this, the size of actual Write calls will affect output size. + +In gzip, specify level `-3` / `gzip.StatelessCompression` to enable. + +For direct deflate use, NewStatelessWriter and StatelessDeflate are available. See [documentation](https://godoc.org/github.com/klauspost/compress/flate#NewStatelessWriter) + +A `bufio.Writer` can of course be used to control write sizes. For example, to use a 4KB buffer: + +``` + // replace 'ioutil.Discard' with your output. + gzw, err := gzip.NewWriterLevel(ioutil.Discard, gzip.StatelessCompression) + if err != nil { + return err + } + defer gzw.Close() + + w := bufio.NewWriterSize(gzw, 4096) + defer w.Flush() + + // Write to 'w' +``` + +This will only use up to 4KB in memory when the writer is idle. + +Compression is almost always worse than the fastest compression level +and each write will allocate (a little) memory. + +# Performance Update 2018 + +It has been a while since we have been looking at the speed of this package compared to the standard library, so I thought I would re-do my tests and give some overall recommendations based on the current state. All benchmarks have been performed with Go 1.10 on my Desktop Intel(R) Core(TM) i7-2600 CPU @3.40GHz. Since I last ran the tests, I have gotten more RAM, which means tests with big files are no longer limited by my SSD. + +The raw results are in my [updated spreadsheet](https://docs.google.com/spreadsheets/d/1nuNE2nPfuINCZJRMt6wFWhKpToF95I47XjSsc-1rbPQ/edit?usp=sharing). Due to cgo changes and upstream updates i could not get the cgo version of gzip to compile. Instead I included the [zstd](https://github.com/datadog/zstd) cgo implementation. If I get cgo gzip to work again, I might replace the results in the sheet. + +The columns to take note of are: *MB/s* - the throughput. *Reduction* - the data size reduction in percent of the original. *Rel Speed* relative speed compared to the standard library at the same level. *Smaller* - how many percent smaller is the compressed output compared to stdlib. Negative means the output was bigger. *Loss* means the loss (or gain) in compression as a percentage difference of the input. + +The `gzstd` (standard library gzip) and `gzkp` (this package gzip) only uses one CPU core. [`pgzip`](https://github.com/klauspost/pgzip), [`bgzf`](https://github.com/biogo/hts/tree/master/bgzf) uses all 4 cores. [`zstd`](https://github.com/DataDog/zstd) uses one core, and is a beast (but not Go, yet). + + +## Overall differences. + +There appears to be a roughly 5-10% speed advantage over the standard library when comparing at similar compression levels. + +The biggest difference you will see is the result of [re-balancing](https://blog.klauspost.com/rebalancing-deflate-compression-levels/) the compression levels. I wanted by library to give a smoother transition between the compression levels than the standard library. + +This package attempts to provide a more smooth transition, where "1" is taking a lot of shortcuts, "5" is the reasonable trade-off and "9" is the "give me the best compression", and the values in between gives something reasonable in between. The standard library has big differences in levels 1-4, but levels 5-9 having no significant gains - often spending a lot more time than can be justified by the achieved compression. + +There are links to all the test data in the [spreadsheet](https://docs.google.com/spreadsheets/d/1nuNE2nPfuINCZJRMt6wFWhKpToF95I47XjSsc-1rbPQ/edit?usp=sharing) in the top left field on each tab. + +## Web Content + +This test set aims to emulate typical use in a web server. The test-set is 4GB data in 53k files, and is a mixture of (mostly) HTML, JS, CSS. + +Since level 1 and 9 are close to being the same code, they are quite close. But looking at the levels in-between the differences are quite big. + +Looking at level 6, this package is 88% faster, but will output about 6% more data. For a web server, this means you can serve 88% more data, but have to pay for 6% more bandwidth. You can draw your own conclusions on what would be the most expensive for your case. + +## Object files + +This test is for typical data files stored on a server. In this case it is a collection of Go precompiled objects. They are very compressible. + +The picture is similar to the web content, but with small differences since this is very compressible. Levels 2-3 offer good speed, but is sacrificing quite a bit of compression. + +The standard library seems suboptimal on level 3 and 4 - offering both worse compression and speed than level 6 & 7 of this package respectively. + +## Highly Compressible File + +This is a JSON file with very high redundancy. The reduction starts at 95% on level 1, so in real life terms we are dealing with something like a highly redundant stream of data, etc. + +It is definitely visible that we are dealing with specialized content here, so the results are very scattered. This package does not do very well at levels 1-4, but picks up significantly at level 5 and levels 7 and 8 offering great speed for the achieved compression. + +So if you know you content is extremely compressible you might want to go slightly higher than the defaults. The standard library has a huge gap between levels 3 and 4 in terms of speed (2.75x slowdown), so it offers little "middle ground". + +## Medium-High Compressible + +This is a pretty common test corpus: [enwik9](http://mattmahoney.net/dc/textdata.html). It contains the first 10^9 bytes of the English Wikipedia dump on Mar. 3, 2006. This is a very good test of typical text based compression and more data heavy streams. + +We see a similar picture here as in "Web Content". On equal levels some compression is sacrificed for more speed. Level 5 seems to be the best trade-off between speed and size, beating stdlib level 3 in both. + +## Medium Compressible + +I will combine two test sets, one [10GB file set](http://mattmahoney.net/dc/10gb.html) and a VM disk image (~8GB). Both contain different data types and represent a typical backup scenario. + +The most notable thing is how quickly the standard library drops to very low compression speeds around level 5-6 without any big gains in compression. Since this type of data is fairly common, this does not seem like good behavior. + + +## Un-compressible Content + +This is mainly a test of how good the algorithms are at detecting un-compressible input. The standard library only offers this feature with very conservative settings at level 1. Obviously there is no reason for the algorithms to try to compress input that cannot be compressed. The only downside is that it might skip some compressible data on false detections. + + +## Huffman only compression + +This compression library adds a special compression level, named `HuffmanOnly`, which allows near linear time compression. This is done by completely disabling matching of previous data, and only reduce the number of bits to represent each character. + +This means that often used characters, like 'e' and ' ' (space) in text use the fewest bits to represent, and rare characters like '¤' takes more bits to represent. For more information see [wikipedia](https://en.wikipedia.org/wiki/Huffman_coding) or this nice [video](https://youtu.be/ZdooBTdW5bM). + +Since this type of compression has much less variance, the compression speed is mostly unaffected by the input data, and is usually more than *180MB/s* for a single core. + +The downside is that the compression ratio is usually considerably worse than even the fastest conventional compression. The compression ratio can never be better than 8:1 (12.5%). + +The linear time compression can be used as a "better than nothing" mode, where you cannot risk the encoder to slow down on some content. For comparison, the size of the "Twain" text is *233460 bytes* (+29% vs. level 1) and encode speed is 144MB/s (4.5x level 1). So in this case you trade a 30% size increase for a 4 times speedup. + +For more information see my blog post on [Fast Linear Time Compression](http://blog.klauspost.com/constant-time-gzipzip-compression/). + +This is implemented on Go 1.7 as "Huffman Only" mode, though not exposed for gzip. + +# Other packages + +Here are other packages of good quality and pure Go (no cgo wrappers or autoconverted code): + +* [github.com/pierrec/lz4](https://github.com/pierrec/lz4) - strong multithreaded LZ4 compression. +* [github.com/cosnicolaou/pbzip2](https://github.com/cosnicolaou/pbzip2) - multithreaded bzip2 decompression. +* [github.com/dsnet/compress](https://github.com/dsnet/compress) - brotli decompression, bzip2 writer. + +# license + +This code is licensed under the same conditions as the original Go code. See LICENSE file. diff --git a/vendor/github.com/klauspost/compress/compressible.go b/vendor/github.com/klauspost/compress/compressible.go new file mode 100644 index 00000000..ea5a692d --- /dev/null +++ b/vendor/github.com/klauspost/compress/compressible.go @@ -0,0 +1,85 @@ +package compress + +import "math" + +// Estimate returns a normalized compressibility estimate of block b. +// Values close to zero are likely uncompressible. +// Values above 0.1 are likely to be compressible. +// Values above 0.5 are very compressible. +// Very small lengths will return 0. +func Estimate(b []byte) float64 { + if len(b) < 16 { + return 0 + } + + // Correctly predicted order 1 + hits := 0 + lastMatch := false + var o1 [256]byte + var hist [256]int + c1 := byte(0) + for _, c := range b { + if c == o1[c1] { + // We only count a hit if there was two correct predictions in a row. + if lastMatch { + hits++ + } + lastMatch = true + } else { + lastMatch = false + } + o1[c1] = c + c1 = c + hist[c]++ + } + + // Use x^0.6 to give better spread + prediction := math.Pow(float64(hits)/float64(len(b)), 0.6) + + // Calculate histogram distribution + variance := float64(0) + avg := float64(len(b)) / 256 + + for _, v := range hist { + Δ := float64(v) - avg + variance += Δ * Δ + } + + stddev := math.Sqrt(float64(variance)) / float64(len(b)) + exp := math.Sqrt(1 / float64(len(b))) + + // Subtract expected stddev + stddev -= exp + if stddev < 0 { + stddev = 0 + } + stddev *= 1 + exp + + // Use x^0.4 to give better spread + entropy := math.Pow(stddev, 0.4) + + // 50/50 weight between prediction and histogram distribution + return math.Pow((prediction+entropy)/2, 0.9) +} + +// ShannonEntropyBits returns the number of bits minimum required to represent +// an entropy encoding of the input bytes. +// https://en.wiktionary.org/wiki/Shannon_entropy +func ShannonEntropyBits(b []byte) int { + if len(b) == 0 { + return 0 + } + var hist [256]int + for _, c := range b { + hist[c]++ + } + shannon := float64(0) + invTotal := 1.0 / float64(len(b)) + for _, v := range hist[:] { + if v > 0 { + n := float64(v) + shannon += math.Ceil(-math.Log2(n*invTotal) * n) + } + } + return int(math.Ceil(shannon)) +} diff --git a/vendor/github.com/klauspost/compress/fse/README.md b/vendor/github.com/klauspost/compress/fse/README.md new file mode 100644 index 00000000..ea7324da --- /dev/null +++ b/vendor/github.com/klauspost/compress/fse/README.md @@ -0,0 +1,79 @@ +# Finite State Entropy + +This package provides Finite State Entropy encoding and decoding. + +Finite State Entropy (also referenced as [tANS](https://en.wikipedia.org/wiki/Asymmetric_numeral_systems#tANS)) +encoding provides a fast near-optimal symbol encoding/decoding +for byte blocks as implemented in [zstandard](https://github.com/facebook/zstd). + +This can be used for compressing input with a lot of similar input values to the smallest number of bytes. +This does not perform any multi-byte [dictionary coding](https://en.wikipedia.org/wiki/Dictionary_coder) as LZ coders, +but it can be used as a secondary step to compressors (like Snappy) that does not do entropy encoding. + +* [Godoc documentation](https://godoc.org/github.com/klauspost/compress/fse) + +## News + + * Feb 2018: First implementation released. Consider this beta software for now. + +# Usage + +This package provides a low level interface that allows to compress single independent blocks. + +Each block is separate, and there is no built in integrity checks. +This means that the caller should keep track of block sizes and also do checksums if needed. + +Compressing a block is done via the [`Compress`](https://godoc.org/github.com/klauspost/compress/fse#Compress) function. +You must provide input and will receive the output and maybe an error. + +These error values can be returned: + +| Error | Description | +|---------------------|-----------------------------------------------------------------------------| +| `` | Everything ok, output is returned | +| `ErrIncompressible` | Returned when input is judged to be too hard to compress | +| `ErrUseRLE` | Returned from the compressor when the input is a single byte value repeated | +| `(error)` | An internal error occurred. | + +As can be seen above there are errors that will be returned even under normal operation so it is important to handle these. + +To reduce allocations you can provide a [`Scratch`](https://godoc.org/github.com/klauspost/compress/fse#Scratch) object +that can be re-used for successive calls. Both compression and decompression accepts a `Scratch` object, and the same +object can be used for both. + +Be aware, that when re-using a `Scratch` object that the *output* buffer is also re-used, so if you are still using this +you must set the `Out` field in the scratch to nil. The same buffer is used for compression and decompression output. + +Decompressing is done by calling the [`Decompress`](https://godoc.org/github.com/klauspost/compress/fse#Decompress) function. +You must provide the output from the compression stage, at exactly the size you got back. If you receive an error back +your input was likely corrupted. + +It is important to note that a successful decoding does *not* mean your output matches your original input. +There are no integrity checks, so relying on errors from the decompressor does not assure your data is valid. + +For more detailed usage, see examples in the [godoc documentation](https://godoc.org/github.com/klauspost/compress/fse#pkg-examples). + +# Performance + +A lot of factors are affecting speed. Block sizes and compressibility of the material are primary factors. +All compression functions are currently only running on the calling goroutine so only one core will be used per block. + +The compressor is significantly faster if symbols are kept as small as possible. The highest byte value of the input +is used to reduce some of the processing, so if all your input is above byte value 64 for instance, it may be +beneficial to transpose all your input values down by 64. + +With moderate block sizes around 64k speed are typically 200MB/s per core for compression and +around 300MB/s decompression speed. + +The same hardware typically does Huffman (deflate) encoding at 125MB/s and decompression at 100MB/s. + +# Plans + +At one point, more internals will be exposed to facilitate more "expert" usage of the components. + +A streaming interface is also likely to be implemented. Likely compatible with [FSE stream format](https://github.com/Cyan4973/FiniteStateEntropy/blob/dev/programs/fileio.c#L261). + +# Contributing + +Contributions are always welcome. Be aware that adding public functions will require good justification and breaking +changes will likely not be accepted. If in doubt open an issue before writing the PR. \ No newline at end of file diff --git a/vendor/github.com/klauspost/compress/fse/bitreader.go b/vendor/github.com/klauspost/compress/fse/bitreader.go new file mode 100644 index 00000000..f65eb390 --- /dev/null +++ b/vendor/github.com/klauspost/compress/fse/bitreader.go @@ -0,0 +1,122 @@ +// Copyright 2018 Klaus Post. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. +// Based on work Copyright (c) 2013, Yann Collet, released under BSD License. + +package fse + +import ( + "encoding/binary" + "errors" + "io" +) + +// bitReader reads a bitstream in reverse. +// The last set bit indicates the start of the stream and is used +// for aligning the input. +type bitReader struct { + in []byte + off uint // next byte to read is at in[off - 1] + value uint64 + bitsRead uint8 +} + +// init initializes and resets the bit reader. +func (b *bitReader) init(in []byte) error { + if len(in) < 1 { + return errors.New("corrupt stream: too short") + } + b.in = in + b.off = uint(len(in)) + // The highest bit of the last byte indicates where to start + v := in[len(in)-1] + if v == 0 { + return errors.New("corrupt stream, did not find end of stream") + } + b.bitsRead = 64 + b.value = 0 + if len(in) >= 8 { + b.fillFastStart() + } else { + b.fill() + b.fill() + } + b.bitsRead += 8 - uint8(highBits(uint32(v))) + return nil +} + +// getBits will return n bits. n can be 0. +func (b *bitReader) getBits(n uint8) uint16 { + if n == 0 || b.bitsRead >= 64 { + return 0 + } + return b.getBitsFast(n) +} + +// getBitsFast requires that at least one bit is requested every time. +// There are no checks if the buffer is filled. +func (b *bitReader) getBitsFast(n uint8) uint16 { + const regMask = 64 - 1 + v := uint16((b.value << (b.bitsRead & regMask)) >> ((regMask + 1 - n) & regMask)) + b.bitsRead += n + return v +} + +// fillFast() will make sure at least 32 bits are available. +// There must be at least 4 bytes available. +func (b *bitReader) fillFast() { + if b.bitsRead < 32 { + return + } + // 2 bounds checks. + v := b.in[b.off-4:] + v = v[:4] + low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) + b.value = (b.value << 32) | uint64(low) + b.bitsRead -= 32 + b.off -= 4 +} + +// fill() will make sure at least 32 bits are available. +func (b *bitReader) fill() { + if b.bitsRead < 32 { + return + } + if b.off > 4 { + v := b.in[b.off-4:] + v = v[:4] + low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) + b.value = (b.value << 32) | uint64(low) + b.bitsRead -= 32 + b.off -= 4 + return + } + for b.off > 0 { + b.value = (b.value << 8) | uint64(b.in[b.off-1]) + b.bitsRead -= 8 + b.off-- + } +} + +// fillFastStart() assumes the bitreader is empty and there is at least 8 bytes to read. +func (b *bitReader) fillFastStart() { + // Do single re-slice to avoid bounds checks. + b.value = binary.LittleEndian.Uint64(b.in[b.off-8:]) + b.bitsRead = 0 + b.off -= 8 +} + +// finished returns true if all bits have been read from the bit stream. +func (b *bitReader) finished() bool { + return b.bitsRead >= 64 && b.off == 0 +} + +// close the bitstream and returns an error if out-of-buffer reads occurred. +func (b *bitReader) close() error { + // Release reference. + b.in = nil + if b.bitsRead > 64 { + return io.ErrUnexpectedEOF + } + return nil +} diff --git a/vendor/github.com/klauspost/compress/fse/bitwriter.go b/vendor/github.com/klauspost/compress/fse/bitwriter.go new file mode 100644 index 00000000..43e46361 --- /dev/null +++ b/vendor/github.com/klauspost/compress/fse/bitwriter.go @@ -0,0 +1,168 @@ +// Copyright 2018 Klaus Post. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. +// Based on work Copyright (c) 2013, Yann Collet, released under BSD License. + +package fse + +import "fmt" + +// bitWriter will write bits. +// First bit will be LSB of the first byte of output. +type bitWriter struct { + bitContainer uint64 + nBits uint8 + out []byte +} + +// bitMask16 is bitmasks. Has extra to avoid bounds check. +var bitMask16 = [32]uint16{ + 0, 1, 3, 7, 0xF, 0x1F, + 0x3F, 0x7F, 0xFF, 0x1FF, 0x3FF, 0x7FF, + 0xFFF, 0x1FFF, 0x3FFF, 0x7FFF, 0xFFFF, 0xFFFF, + 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, + 0xFFFF, 0xFFFF} /* up to 16 bits */ + +// addBits16NC will add up to 16 bits. +// It will not check if there is space for them, +// so the caller must ensure that it has flushed recently. +func (b *bitWriter) addBits16NC(value uint16, bits uint8) { + b.bitContainer |= uint64(value&bitMask16[bits&31]) << (b.nBits & 63) + b.nBits += bits +} + +// addBits16Clean will add up to 16 bits. value may not contain more set bits than indicated. +// It will not check if there is space for them, so the caller must ensure that it has flushed recently. +func (b *bitWriter) addBits16Clean(value uint16, bits uint8) { + b.bitContainer |= uint64(value) << (b.nBits & 63) + b.nBits += bits +} + +// addBits16ZeroNC will add up to 16 bits. +// It will not check if there is space for them, +// so the caller must ensure that it has flushed recently. +// This is fastest if bits can be zero. +func (b *bitWriter) addBits16ZeroNC(value uint16, bits uint8) { + if bits == 0 { + return + } + value <<= (16 - bits) & 15 + value >>= (16 - bits) & 15 + b.bitContainer |= uint64(value) << (b.nBits & 63) + b.nBits += bits +} + +// flush will flush all pending full bytes. +// There will be at least 56 bits available for writing when this has been called. +// Using flush32 is faster, but leaves less space for writing. +func (b *bitWriter) flush() { + v := b.nBits >> 3 + switch v { + case 0: + case 1: + b.out = append(b.out, + byte(b.bitContainer), + ) + case 2: + b.out = append(b.out, + byte(b.bitContainer), + byte(b.bitContainer>>8), + ) + case 3: + b.out = append(b.out, + byte(b.bitContainer), + byte(b.bitContainer>>8), + byte(b.bitContainer>>16), + ) + case 4: + b.out = append(b.out, + byte(b.bitContainer), + byte(b.bitContainer>>8), + byte(b.bitContainer>>16), + byte(b.bitContainer>>24), + ) + case 5: + b.out = append(b.out, + byte(b.bitContainer), + byte(b.bitContainer>>8), + byte(b.bitContainer>>16), + byte(b.bitContainer>>24), + byte(b.bitContainer>>32), + ) + case 6: + b.out = append(b.out, + byte(b.bitContainer), + byte(b.bitContainer>>8), + byte(b.bitContainer>>16), + byte(b.bitContainer>>24), + byte(b.bitContainer>>32), + byte(b.bitContainer>>40), + ) + case 7: + b.out = append(b.out, + byte(b.bitContainer), + byte(b.bitContainer>>8), + byte(b.bitContainer>>16), + byte(b.bitContainer>>24), + byte(b.bitContainer>>32), + byte(b.bitContainer>>40), + byte(b.bitContainer>>48), + ) + case 8: + b.out = append(b.out, + byte(b.bitContainer), + byte(b.bitContainer>>8), + byte(b.bitContainer>>16), + byte(b.bitContainer>>24), + byte(b.bitContainer>>32), + byte(b.bitContainer>>40), + byte(b.bitContainer>>48), + byte(b.bitContainer>>56), + ) + default: + panic(fmt.Errorf("bits (%d) > 64", b.nBits)) + } + b.bitContainer >>= v << 3 + b.nBits &= 7 +} + +// flush32 will flush out, so there are at least 32 bits available for writing. +func (b *bitWriter) flush32() { + if b.nBits < 32 { + return + } + b.out = append(b.out, + byte(b.bitContainer), + byte(b.bitContainer>>8), + byte(b.bitContainer>>16), + byte(b.bitContainer>>24)) + b.nBits -= 32 + b.bitContainer >>= 32 +} + +// flushAlign will flush remaining full bytes and align to next byte boundary. +func (b *bitWriter) flushAlign() { + nbBytes := (b.nBits + 7) >> 3 + for i := uint8(0); i < nbBytes; i++ { + b.out = append(b.out, byte(b.bitContainer>>(i*8))) + } + b.nBits = 0 + b.bitContainer = 0 +} + +// close will write the alignment bit and write the final byte(s) +// to the output. +func (b *bitWriter) close() error { + // End mark + b.addBits16Clean(1, 1) + // flush until next byte. + b.flushAlign() + return nil +} + +// reset and continue writing by appending to out. +func (b *bitWriter) reset(out []byte) { + b.bitContainer = 0 + b.nBits = 0 + b.out = out +} diff --git a/vendor/github.com/klauspost/compress/fse/bytereader.go b/vendor/github.com/klauspost/compress/fse/bytereader.go new file mode 100644 index 00000000..abade2d6 --- /dev/null +++ b/vendor/github.com/klauspost/compress/fse/bytereader.go @@ -0,0 +1,47 @@ +// Copyright 2018 Klaus Post. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. +// Based on work Copyright (c) 2013, Yann Collet, released under BSD License. + +package fse + +// byteReader provides a byte reader that reads +// little endian values from a byte stream. +// The input stream is manually advanced. +// The reader performs no bounds checks. +type byteReader struct { + b []byte + off int +} + +// init will initialize the reader and set the input. +func (b *byteReader) init(in []byte) { + b.b = in + b.off = 0 +} + +// advance the stream b n bytes. +func (b *byteReader) advance(n uint) { + b.off += int(n) +} + +// Uint32 returns a little endian uint32 starting at current offset. +func (b byteReader) Uint32() uint32 { + b2 := b.b[b.off:] + b2 = b2[:4] + v3 := uint32(b2[3]) + v2 := uint32(b2[2]) + v1 := uint32(b2[1]) + v0 := uint32(b2[0]) + return v0 | (v1 << 8) | (v2 << 16) | (v3 << 24) +} + +// unread returns the unread portion of the input. +func (b byteReader) unread() []byte { + return b.b[b.off:] +} + +// remain will return the number of bytes remaining. +func (b byteReader) remain() int { + return len(b.b) - b.off +} diff --git a/vendor/github.com/klauspost/compress/fse/compress.go b/vendor/github.com/klauspost/compress/fse/compress.go new file mode 100644 index 00000000..6f341914 --- /dev/null +++ b/vendor/github.com/klauspost/compress/fse/compress.go @@ -0,0 +1,683 @@ +// Copyright 2018 Klaus Post. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. +// Based on work Copyright (c) 2013, Yann Collet, released under BSD License. + +package fse + +import ( + "errors" + "fmt" +) + +// Compress the input bytes. Input must be < 2GB. +// Provide a Scratch buffer to avoid memory allocations. +// Note that the output is also kept in the scratch buffer. +// If input is too hard to compress, ErrIncompressible is returned. +// If input is a single byte value repeated ErrUseRLE is returned. +func Compress(in []byte, s *Scratch) ([]byte, error) { + if len(in) <= 1 { + return nil, ErrIncompressible + } + if len(in) > (2<<30)-1 { + return nil, errors.New("input too big, must be < 2GB") + } + s, err := s.prepare(in) + if err != nil { + return nil, err + } + + // Create histogram, if none was provided. + maxCount := s.maxCount + if maxCount == 0 { + maxCount = s.countSimple(in) + } + // Reset for next run. + s.clearCount = true + s.maxCount = 0 + if maxCount == len(in) { + // One symbol, use RLE + return nil, ErrUseRLE + } + if maxCount == 1 || maxCount < (len(in)>>7) { + // Each symbol present maximum once or too well distributed. + return nil, ErrIncompressible + } + s.optimalTableLog() + err = s.normalizeCount() + if err != nil { + return nil, err + } + err = s.writeCount() + if err != nil { + return nil, err + } + + if false { + err = s.validateNorm() + if err != nil { + return nil, err + } + } + + err = s.buildCTable() + if err != nil { + return nil, err + } + err = s.compress(in) + if err != nil { + return nil, err + } + s.Out = s.bw.out + // Check if we compressed. + if len(s.Out) >= len(in) { + return nil, ErrIncompressible + } + return s.Out, nil +} + +// cState contains the compression state of a stream. +type cState struct { + bw *bitWriter + stateTable []uint16 + state uint16 +} + +// init will initialize the compression state to the first symbol of the stream. +func (c *cState) init(bw *bitWriter, ct *cTable, tableLog uint8, first symbolTransform) { + c.bw = bw + c.stateTable = ct.stateTable + + nbBitsOut := (first.deltaNbBits + (1 << 15)) >> 16 + im := int32((nbBitsOut << 16) - first.deltaNbBits) + lu := (im >> nbBitsOut) + first.deltaFindState + c.state = c.stateTable[lu] +} + +// encode the output symbol provided and write it to the bitstream. +func (c *cState) encode(symbolTT symbolTransform) { + nbBitsOut := (uint32(c.state) + symbolTT.deltaNbBits) >> 16 + dstState := int32(c.state>>(nbBitsOut&15)) + symbolTT.deltaFindState + c.bw.addBits16NC(c.state, uint8(nbBitsOut)) + c.state = c.stateTable[dstState] +} + +// encode the output symbol provided and write it to the bitstream. +func (c *cState) encodeZero(symbolTT symbolTransform) { + nbBitsOut := (uint32(c.state) + symbolTT.deltaNbBits) >> 16 + dstState := int32(c.state>>(nbBitsOut&15)) + symbolTT.deltaFindState + c.bw.addBits16ZeroNC(c.state, uint8(nbBitsOut)) + c.state = c.stateTable[dstState] +} + +// flush will write the tablelog to the output and flush the remaining full bytes. +func (c *cState) flush(tableLog uint8) { + c.bw.flush32() + c.bw.addBits16NC(c.state, tableLog) + c.bw.flush() +} + +// compress is the main compression loop that will encode the input from the last byte to the first. +func (s *Scratch) compress(src []byte) error { + if len(src) <= 2 { + return errors.New("compress: src too small") + } + tt := s.ct.symbolTT[:256] + s.bw.reset(s.Out) + + // Our two states each encodes every second byte. + // Last byte encoded (first byte decoded) will always be encoded by c1. + var c1, c2 cState + + // Encode so remaining size is divisible by 4. + ip := len(src) + if ip&1 == 1 { + c1.init(&s.bw, &s.ct, s.actualTableLog, tt[src[ip-1]]) + c2.init(&s.bw, &s.ct, s.actualTableLog, tt[src[ip-2]]) + c1.encodeZero(tt[src[ip-3]]) + ip -= 3 + } else { + c2.init(&s.bw, &s.ct, s.actualTableLog, tt[src[ip-1]]) + c1.init(&s.bw, &s.ct, s.actualTableLog, tt[src[ip-2]]) + ip -= 2 + } + if ip&2 != 0 { + c2.encodeZero(tt[src[ip-1]]) + c1.encodeZero(tt[src[ip-2]]) + ip -= 2 + } + + // Main compression loop. + switch { + case !s.zeroBits && s.actualTableLog <= 8: + // We can encode 4 symbols without requiring a flush. + // We do not need to check if any output is 0 bits. + for ip >= 4 { + s.bw.flush32() + v3, v2, v1, v0 := src[ip-4], src[ip-3], src[ip-2], src[ip-1] + c2.encode(tt[v0]) + c1.encode(tt[v1]) + c2.encode(tt[v2]) + c1.encode(tt[v3]) + ip -= 4 + } + case !s.zeroBits: + // We do not need to check if any output is 0 bits. + for ip >= 4 { + s.bw.flush32() + v3, v2, v1, v0 := src[ip-4], src[ip-3], src[ip-2], src[ip-1] + c2.encode(tt[v0]) + c1.encode(tt[v1]) + s.bw.flush32() + c2.encode(tt[v2]) + c1.encode(tt[v3]) + ip -= 4 + } + case s.actualTableLog <= 8: + // We can encode 4 symbols without requiring a flush + for ip >= 4 { + s.bw.flush32() + v3, v2, v1, v0 := src[ip-4], src[ip-3], src[ip-2], src[ip-1] + c2.encodeZero(tt[v0]) + c1.encodeZero(tt[v1]) + c2.encodeZero(tt[v2]) + c1.encodeZero(tt[v3]) + ip -= 4 + } + default: + for ip >= 4 { + s.bw.flush32() + v3, v2, v1, v0 := src[ip-4], src[ip-3], src[ip-2], src[ip-1] + c2.encodeZero(tt[v0]) + c1.encodeZero(tt[v1]) + s.bw.flush32() + c2.encodeZero(tt[v2]) + c1.encodeZero(tt[v3]) + ip -= 4 + } + } + + // Flush final state. + // Used to initialize state when decoding. + c2.flush(s.actualTableLog) + c1.flush(s.actualTableLog) + + return s.bw.close() +} + +// writeCount will write the normalized histogram count to header. +// This is read back by readNCount. +func (s *Scratch) writeCount() error { + var ( + tableLog = s.actualTableLog + tableSize = 1 << tableLog + previous0 bool + charnum uint16 + + maxHeaderSize = ((int(s.symbolLen) * int(tableLog)) >> 3) + 3 + + // Write Table Size + bitStream = uint32(tableLog - minTablelog) + bitCount = uint(4) + remaining = int16(tableSize + 1) /* +1 for extra accuracy */ + threshold = int16(tableSize) + nbBits = uint(tableLog + 1) + ) + if cap(s.Out) < maxHeaderSize { + s.Out = make([]byte, 0, s.br.remain()+maxHeaderSize) + } + outP := uint(0) + out := s.Out[:maxHeaderSize] + + // stops at 1 + for remaining > 1 { + if previous0 { + start := charnum + for s.norm[charnum] == 0 { + charnum++ + } + for charnum >= start+24 { + start += 24 + bitStream += uint32(0xFFFF) << bitCount + out[outP] = byte(bitStream) + out[outP+1] = byte(bitStream >> 8) + outP += 2 + bitStream >>= 16 + } + for charnum >= start+3 { + start += 3 + bitStream += 3 << bitCount + bitCount += 2 + } + bitStream += uint32(charnum-start) << bitCount + bitCount += 2 + if bitCount > 16 { + out[outP] = byte(bitStream) + out[outP+1] = byte(bitStream >> 8) + outP += 2 + bitStream >>= 16 + bitCount -= 16 + } + } + + count := s.norm[charnum] + charnum++ + max := (2*threshold - 1) - remaining + if count < 0 { + remaining += count + } else { + remaining -= count + } + count++ // +1 for extra accuracy + if count >= threshold { + count += max // [0..max[ [max..threshold[ (...) [threshold+max 2*threshold[ + } + bitStream += uint32(count) << bitCount + bitCount += nbBits + if count < max { + bitCount-- + } + + previous0 = count == 1 + if remaining < 1 { + return errors.New("internal error: remaining<1") + } + for remaining < threshold { + nbBits-- + threshold >>= 1 + } + + if bitCount > 16 { + out[outP] = byte(bitStream) + out[outP+1] = byte(bitStream >> 8) + outP += 2 + bitStream >>= 16 + bitCount -= 16 + } + } + + out[outP] = byte(bitStream) + out[outP+1] = byte(bitStream >> 8) + outP += (bitCount + 7) / 8 + + if charnum > s.symbolLen { + return errors.New("internal error: charnum > s.symbolLen") + } + s.Out = out[:outP] + return nil +} + +// symbolTransform contains the state transform for a symbol. +type symbolTransform struct { + deltaFindState int32 + deltaNbBits uint32 +} + +// String prints values as a human readable string. +func (s symbolTransform) String() string { + return fmt.Sprintf("dnbits: %08x, fs:%d", s.deltaNbBits, s.deltaFindState) +} + +// cTable contains tables used for compression. +type cTable struct { + tableSymbol []byte + stateTable []uint16 + symbolTT []symbolTransform +} + +// allocCtable will allocate tables needed for compression. +// If existing tables a re big enough, they are simply re-used. +func (s *Scratch) allocCtable() { + tableSize := 1 << s.actualTableLog + // get tableSymbol that is big enough. + if cap(s.ct.tableSymbol) < tableSize { + s.ct.tableSymbol = make([]byte, tableSize) + } + s.ct.tableSymbol = s.ct.tableSymbol[:tableSize] + + ctSize := tableSize + if cap(s.ct.stateTable) < ctSize { + s.ct.stateTable = make([]uint16, ctSize) + } + s.ct.stateTable = s.ct.stateTable[:ctSize] + + if cap(s.ct.symbolTT) < 256 { + s.ct.symbolTT = make([]symbolTransform, 256) + } + s.ct.symbolTT = s.ct.symbolTT[:256] +} + +// buildCTable will populate the compression table so it is ready to be used. +func (s *Scratch) buildCTable() error { + tableSize := uint32(1 << s.actualTableLog) + highThreshold := tableSize - 1 + var cumul [maxSymbolValue + 2]int16 + + s.allocCtable() + tableSymbol := s.ct.tableSymbol[:tableSize] + // symbol start positions + { + cumul[0] = 0 + for ui, v := range s.norm[:s.symbolLen-1] { + u := byte(ui) // one less than reference + if v == -1 { + // Low proba symbol + cumul[u+1] = cumul[u] + 1 + tableSymbol[highThreshold] = u + highThreshold-- + } else { + cumul[u+1] = cumul[u] + v + } + } + // Encode last symbol separately to avoid overflowing u + u := int(s.symbolLen - 1) + v := s.norm[s.symbolLen-1] + if v == -1 { + // Low proba symbol + cumul[u+1] = cumul[u] + 1 + tableSymbol[highThreshold] = byte(u) + highThreshold-- + } else { + cumul[u+1] = cumul[u] + v + } + if uint32(cumul[s.symbolLen]) != tableSize { + return fmt.Errorf("internal error: expected cumul[s.symbolLen] (%d) == tableSize (%d)", cumul[s.symbolLen], tableSize) + } + cumul[s.symbolLen] = int16(tableSize) + 1 + } + // Spread symbols + s.zeroBits = false + { + step := tableStep(tableSize) + tableMask := tableSize - 1 + var position uint32 + // if any symbol > largeLimit, we may have 0 bits output. + largeLimit := int16(1 << (s.actualTableLog - 1)) + for ui, v := range s.norm[:s.symbolLen] { + symbol := byte(ui) + if v > largeLimit { + s.zeroBits = true + } + for nbOccurrences := int16(0); nbOccurrences < v; nbOccurrences++ { + tableSymbol[position] = symbol + position = (position + step) & tableMask + for position > highThreshold { + position = (position + step) & tableMask + } /* Low proba area */ + } + } + + // Check if we have gone through all positions + if position != 0 { + return errors.New("position!=0") + } + } + + // Build table + table := s.ct.stateTable + { + tsi := int(tableSize) + for u, v := range tableSymbol { + // TableU16 : sorted by symbol order; gives next state value + table[cumul[v]] = uint16(tsi + u) + cumul[v]++ + } + } + + // Build Symbol Transformation Table + { + total := int16(0) + symbolTT := s.ct.symbolTT[:s.symbolLen] + tableLog := s.actualTableLog + tl := (uint32(tableLog) << 16) - (1 << tableLog) + for i, v := range s.norm[:s.symbolLen] { + switch v { + case 0: + case -1, 1: + symbolTT[i].deltaNbBits = tl + symbolTT[i].deltaFindState = int32(total - 1) + total++ + default: + maxBitsOut := uint32(tableLog) - highBits(uint32(v-1)) + minStatePlus := uint32(v) << maxBitsOut + symbolTT[i].deltaNbBits = (maxBitsOut << 16) - minStatePlus + symbolTT[i].deltaFindState = int32(total - v) + total += v + } + } + if total != int16(tableSize) { + return fmt.Errorf("total mismatch %d (got) != %d (want)", total, tableSize) + } + } + return nil +} + +// countSimple will create a simple histogram in s.count. +// Returns the biggest count. +// Does not update s.clearCount. +func (s *Scratch) countSimple(in []byte) (max int) { + for _, v := range in { + s.count[v]++ + } + m := uint32(0) + for i, v := range s.count[:] { + if v > m { + m = v + } + if v > 0 { + s.symbolLen = uint16(i) + 1 + } + } + return int(m) +} + +// minTableLog provides the minimum logSize to safely represent a distribution. +func (s *Scratch) minTableLog() uint8 { + minBitsSrc := highBits(uint32(s.br.remain()-1)) + 1 + minBitsSymbols := highBits(uint32(s.symbolLen-1)) + 2 + if minBitsSrc < minBitsSymbols { + return uint8(minBitsSrc) + } + return uint8(minBitsSymbols) +} + +// optimalTableLog calculates and sets the optimal tableLog in s.actualTableLog +func (s *Scratch) optimalTableLog() { + tableLog := s.TableLog + minBits := s.minTableLog() + maxBitsSrc := uint8(highBits(uint32(s.br.remain()-1))) - 2 + if maxBitsSrc < tableLog { + // Accuracy can be reduced + tableLog = maxBitsSrc + } + if minBits > tableLog { + tableLog = minBits + } + // Need a minimum to safely represent all symbol values + if tableLog < minTablelog { + tableLog = minTablelog + } + if tableLog > maxTableLog { + tableLog = maxTableLog + } + s.actualTableLog = tableLog +} + +var rtbTable = [...]uint32{0, 473195, 504333, 520860, 550000, 700000, 750000, 830000} + +// normalizeCount will normalize the count of the symbols so +// the total is equal to the table size. +func (s *Scratch) normalizeCount() error { + var ( + tableLog = s.actualTableLog + scale = 62 - uint64(tableLog) + step = (1 << 62) / uint64(s.br.remain()) + vStep = uint64(1) << (scale - 20) + stillToDistribute = int16(1 << tableLog) + largest int + largestP int16 + lowThreshold = (uint32)(s.br.remain() >> tableLog) + ) + + for i, cnt := range s.count[:s.symbolLen] { + // already handled + // if (count[s] == s.length) return 0; /* rle special case */ + + if cnt == 0 { + s.norm[i] = 0 + continue + } + if cnt <= lowThreshold { + s.norm[i] = -1 + stillToDistribute-- + } else { + proba := (int16)((uint64(cnt) * step) >> scale) + if proba < 8 { + restToBeat := vStep * uint64(rtbTable[proba]) + v := uint64(cnt)*step - (uint64(proba) << scale) + if v > restToBeat { + proba++ + } + } + if proba > largestP { + largestP = proba + largest = i + } + s.norm[i] = proba + stillToDistribute -= proba + } + } + + if -stillToDistribute >= (s.norm[largest] >> 1) { + // corner case, need another normalization method + return s.normalizeCount2() + } + s.norm[largest] += stillToDistribute + return nil +} + +// Secondary normalization method. +// To be used when primary method fails. +func (s *Scratch) normalizeCount2() error { + const notYetAssigned = -2 + var ( + distributed uint32 + total = uint32(s.br.remain()) + tableLog = s.actualTableLog + lowThreshold = total >> tableLog + lowOne = (total * 3) >> (tableLog + 1) + ) + for i, cnt := range s.count[:s.symbolLen] { + if cnt == 0 { + s.norm[i] = 0 + continue + } + if cnt <= lowThreshold { + s.norm[i] = -1 + distributed++ + total -= cnt + continue + } + if cnt <= lowOne { + s.norm[i] = 1 + distributed++ + total -= cnt + continue + } + s.norm[i] = notYetAssigned + } + toDistribute := (1 << tableLog) - distributed + + if (total / toDistribute) > lowOne { + // risk of rounding to zero + lowOne = (total * 3) / (toDistribute * 2) + for i, cnt := range s.count[:s.symbolLen] { + if (s.norm[i] == notYetAssigned) && (cnt <= lowOne) { + s.norm[i] = 1 + distributed++ + total -= cnt + continue + } + } + toDistribute = (1 << tableLog) - distributed + } + if distributed == uint32(s.symbolLen)+1 { + // all values are pretty poor; + // probably incompressible data (should have already been detected); + // find max, then give all remaining points to max + var maxV int + var maxC uint32 + for i, cnt := range s.count[:s.symbolLen] { + if cnt > maxC { + maxV = i + maxC = cnt + } + } + s.norm[maxV] += int16(toDistribute) + return nil + } + + if total == 0 { + // all of the symbols were low enough for the lowOne or lowThreshold + for i := uint32(0); toDistribute > 0; i = (i + 1) % (uint32(s.symbolLen)) { + if s.norm[i] > 0 { + toDistribute-- + s.norm[i]++ + } + } + return nil + } + + var ( + vStepLog = 62 - uint64(tableLog) + mid = uint64((1 << (vStepLog - 1)) - 1) + rStep = (((1 << vStepLog) * uint64(toDistribute)) + mid) / uint64(total) // scale on remaining + tmpTotal = mid + ) + for i, cnt := range s.count[:s.symbolLen] { + if s.norm[i] == notYetAssigned { + var ( + end = tmpTotal + uint64(cnt)*rStep + sStart = uint32(tmpTotal >> vStepLog) + sEnd = uint32(end >> vStepLog) + weight = sEnd - sStart + ) + if weight < 1 { + return errors.New("weight < 1") + } + s.norm[i] = int16(weight) + tmpTotal = end + } + } + return nil +} + +// validateNorm validates the normalized histogram table. +func (s *Scratch) validateNorm() (err error) { + var total int + for _, v := range s.norm[:s.symbolLen] { + if v >= 0 { + total += int(v) + } else { + total -= int(v) + } + } + defer func() { + if err == nil { + return + } + fmt.Printf("selected TableLog: %d, Symbol length: %d\n", s.actualTableLog, s.symbolLen) + for i, v := range s.norm[:s.symbolLen] { + fmt.Printf("%3d: %5d -> %4d \n", i, s.count[i], v) + } + }() + if total != (1 << s.actualTableLog) { + return fmt.Errorf("warning: Total == %d != %d", total, 1< tablelogAbsoluteMax { + return errors.New("tableLog too large") + } + bitStream >>= 4 + bitCount := uint(4) + + s.actualTableLog = uint8(nbBits) + remaining := int32((1 << nbBits) + 1) + threshold := int32(1 << nbBits) + gotTotal := int32(0) + nbBits++ + + for remaining > 1 { + if previous0 { + n0 := charnum + for (bitStream & 0xFFFF) == 0xFFFF { + n0 += 24 + if b.off < iend-5 { + b.advance(2) + bitStream = b.Uint32() >> bitCount + } else { + bitStream >>= 16 + bitCount += 16 + } + } + for (bitStream & 3) == 3 { + n0 += 3 + bitStream >>= 2 + bitCount += 2 + } + n0 += uint16(bitStream & 3) + bitCount += 2 + if n0 > maxSymbolValue { + return errors.New("maxSymbolValue too small") + } + for charnum < n0 { + s.norm[charnum&0xff] = 0 + charnum++ + } + + if b.off <= iend-7 || b.off+int(bitCount>>3) <= iend-4 { + b.advance(bitCount >> 3) + bitCount &= 7 + bitStream = b.Uint32() >> bitCount + } else { + bitStream >>= 2 + } + } + + max := (2*(threshold) - 1) - (remaining) + var count int32 + + if (int32(bitStream) & (threshold - 1)) < max { + count = int32(bitStream) & (threshold - 1) + bitCount += nbBits - 1 + } else { + count = int32(bitStream) & (2*threshold - 1) + if count >= threshold { + count -= max + } + bitCount += nbBits + } + + count-- // extra accuracy + if count < 0 { + // -1 means +1 + remaining += count + gotTotal -= count + } else { + remaining -= count + gotTotal += count + } + s.norm[charnum&0xff] = int16(count) + charnum++ + previous0 = count == 0 + for remaining < threshold { + nbBits-- + threshold >>= 1 + } + if b.off <= iend-7 || b.off+int(bitCount>>3) <= iend-4 { + b.advance(bitCount >> 3) + bitCount &= 7 + } else { + bitCount -= (uint)(8 * (len(b.b) - 4 - b.off)) + b.off = len(b.b) - 4 + } + bitStream = b.Uint32() >> (bitCount & 31) + } + s.symbolLen = charnum + + if s.symbolLen <= 1 { + return fmt.Errorf("symbolLen (%d) too small", s.symbolLen) + } + if s.symbolLen > maxSymbolValue+1 { + return fmt.Errorf("symbolLen (%d) too big", s.symbolLen) + } + if remaining != 1 { + return fmt.Errorf("corruption detected (remaining %d != 1)", remaining) + } + if bitCount > 32 { + return fmt.Errorf("corruption detected (bitCount %d > 32)", bitCount) + } + if gotTotal != 1<> 3) + return nil +} + +// decSymbol contains information about a state entry, +// Including the state offset base, the output symbol and +// the number of bits to read for the low part of the destination state. +type decSymbol struct { + newState uint16 + symbol uint8 + nbBits uint8 +} + +// allocDtable will allocate decoding tables if they are not big enough. +func (s *Scratch) allocDtable() { + tableSize := 1 << s.actualTableLog + if cap(s.decTable) < tableSize { + s.decTable = make([]decSymbol, tableSize) + } + s.decTable = s.decTable[:tableSize] + + if cap(s.ct.tableSymbol) < 256 { + s.ct.tableSymbol = make([]byte, 256) + } + s.ct.tableSymbol = s.ct.tableSymbol[:256] + + if cap(s.ct.stateTable) < 256 { + s.ct.stateTable = make([]uint16, 256) + } + s.ct.stateTable = s.ct.stateTable[:256] +} + +// buildDtable will build the decoding table. +func (s *Scratch) buildDtable() error { + tableSize := uint32(1 << s.actualTableLog) + highThreshold := tableSize - 1 + s.allocDtable() + symbolNext := s.ct.stateTable[:256] + + // Init, lay down lowprob symbols + s.zeroBits = false + { + largeLimit := int16(1 << (s.actualTableLog - 1)) + for i, v := range s.norm[:s.symbolLen] { + if v == -1 { + s.decTable[highThreshold].symbol = uint8(i) + highThreshold-- + symbolNext[i] = 1 + } else { + if v >= largeLimit { + s.zeroBits = true + } + symbolNext[i] = uint16(v) + } + } + } + // Spread symbols + { + tableMask := tableSize - 1 + step := tableStep(tableSize) + position := uint32(0) + for ss, v := range s.norm[:s.symbolLen] { + for i := 0; i < int(v); i++ { + s.decTable[position].symbol = uint8(ss) + position = (position + step) & tableMask + for position > highThreshold { + // lowprob area + position = (position + step) & tableMask + } + } + } + if position != 0 { + // position must reach all cells once, otherwise normalizedCounter is incorrect + return errors.New("corrupted input (position != 0)") + } + } + + // Build Decoding table + { + tableSize := uint16(1 << s.actualTableLog) + for u, v := range s.decTable { + symbol := v.symbol + nextState := symbolNext[symbol] + symbolNext[symbol] = nextState + 1 + nBits := s.actualTableLog - byte(highBits(uint32(nextState))) + s.decTable[u].nbBits = nBits + newState := (nextState << nBits) - tableSize + if newState >= tableSize { + return fmt.Errorf("newState (%d) outside table size (%d)", newState, tableSize) + } + if newState == uint16(u) && nBits == 0 { + // Seems weird that this is possible with nbits > 0. + return fmt.Errorf("newState (%d) == oldState (%d) and no bits", newState, u) + } + s.decTable[u].newState = newState + } + } + return nil +} + +// decompress will decompress the bitstream. +// If the buffer is over-read an error is returned. +func (s *Scratch) decompress() error { + br := &s.bits + br.init(s.br.unread()) + + var s1, s2 decoder + // Initialize and decode first state and symbol. + s1.init(br, s.decTable, s.actualTableLog) + s2.init(br, s.decTable, s.actualTableLog) + + // Use temp table to avoid bound checks/append penalty. + var tmp = s.ct.tableSymbol[:256] + var off uint8 + + // Main part + if !s.zeroBits { + for br.off >= 8 { + br.fillFast() + tmp[off+0] = s1.nextFast() + tmp[off+1] = s2.nextFast() + br.fillFast() + tmp[off+2] = s1.nextFast() + tmp[off+3] = s2.nextFast() + off += 4 + // When off is 0, we have overflowed and should write. + if off == 0 { + s.Out = append(s.Out, tmp...) + if len(s.Out) >= s.DecompressLimit { + return fmt.Errorf("output size (%d) > DecompressLimit (%d)", len(s.Out), s.DecompressLimit) + } + } + } + } else { + for br.off >= 8 { + br.fillFast() + tmp[off+0] = s1.next() + tmp[off+1] = s2.next() + br.fillFast() + tmp[off+2] = s1.next() + tmp[off+3] = s2.next() + off += 4 + if off == 0 { + s.Out = append(s.Out, tmp...) + // When off is 0, we have overflowed and should write. + if len(s.Out) >= s.DecompressLimit { + return fmt.Errorf("output size (%d) > DecompressLimit (%d)", len(s.Out), s.DecompressLimit) + } + } + } + } + s.Out = append(s.Out, tmp[:off]...) + + // Final bits, a bit more expensive check + for { + if s1.finished() { + s.Out = append(s.Out, s1.final(), s2.final()) + break + } + br.fill() + s.Out = append(s.Out, s1.next()) + if s2.finished() { + s.Out = append(s.Out, s2.final(), s1.final()) + break + } + s.Out = append(s.Out, s2.next()) + if len(s.Out) >= s.DecompressLimit { + return fmt.Errorf("output size (%d) > DecompressLimit (%d)", len(s.Out), s.DecompressLimit) + } + } + return br.close() +} + +// decoder keeps track of the current state and updates it from the bitstream. +type decoder struct { + state uint16 + br *bitReader + dt []decSymbol +} + +// init will initialize the decoder and read the first state from the stream. +func (d *decoder) init(in *bitReader, dt []decSymbol, tableLog uint8) { + d.dt = dt + d.br = in + d.state = in.getBits(tableLog) +} + +// next returns the next symbol and sets the next state. +// At least tablelog bits must be available in the bit reader. +func (d *decoder) next() uint8 { + n := &d.dt[d.state] + lowBits := d.br.getBits(n.nbBits) + d.state = n.newState + lowBits + return n.symbol +} + +// finished returns true if all bits have been read from the bitstream +// and the next state would require reading bits from the input. +func (d *decoder) finished() bool { + return d.br.finished() && d.dt[d.state].nbBits > 0 +} + +// final returns the current state symbol without decoding the next. +func (d *decoder) final() uint8 { + return d.dt[d.state].symbol +} + +// nextFast returns the next symbol and sets the next state. +// This can only be used if no symbols are 0 bits. +// At least tablelog bits must be available in the bit reader. +func (d *decoder) nextFast() uint8 { + n := d.dt[d.state] + lowBits := d.br.getBitsFast(n.nbBits) + d.state = n.newState + lowBits + return n.symbol +} diff --git a/vendor/github.com/klauspost/compress/fse/fse.go b/vendor/github.com/klauspost/compress/fse/fse.go new file mode 100644 index 00000000..535cbadf --- /dev/null +++ b/vendor/github.com/klauspost/compress/fse/fse.go @@ -0,0 +1,144 @@ +// Copyright 2018 Klaus Post. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. +// Based on work Copyright (c) 2013, Yann Collet, released under BSD License. + +// Package fse provides Finite State Entropy encoding and decoding. +// +// Finite State Entropy encoding provides a fast near-optimal symbol encoding/decoding +// for byte blocks as implemented in zstd. +// +// See https://github.com/klauspost/compress/tree/master/fse for more information. +package fse + +import ( + "errors" + "fmt" + "math/bits" +) + +const ( + /*!MEMORY_USAGE : + * Memory usage formula : N->2^N Bytes (examples : 10 -> 1KB; 12 -> 4KB ; 16 -> 64KB; 20 -> 1MB; etc.) + * Increasing memory usage improves compression ratio + * Reduced memory usage can improve speed, due to cache effect + * Recommended max value is 14, for 16KB, which nicely fits into Intel x86 L1 cache */ + maxMemoryUsage = 14 + defaultMemoryUsage = 13 + + maxTableLog = maxMemoryUsage - 2 + maxTablesize = 1 << maxTableLog + defaultTablelog = defaultMemoryUsage - 2 + minTablelog = 5 + maxSymbolValue = 255 +) + +var ( + // ErrIncompressible is returned when input is judged to be too hard to compress. + ErrIncompressible = errors.New("input is not compressible") + + // ErrUseRLE is returned from the compressor when the input is a single byte value repeated. + ErrUseRLE = errors.New("input is single value repeated") +) + +// Scratch provides temporary storage for compression and decompression. +type Scratch struct { + // Private + count [maxSymbolValue + 1]uint32 + norm [maxSymbolValue + 1]int16 + br byteReader + bits bitReader + bw bitWriter + ct cTable // Compression tables. + decTable []decSymbol // Decompression table. + maxCount int // count of the most probable symbol + + // Per block parameters. + // These can be used to override compression parameters of the block. + // Do not touch, unless you know what you are doing. + + // Out is output buffer. + // If the scratch is re-used before the caller is done processing the output, + // set this field to nil. + // Otherwise the output buffer will be re-used for next Compression/Decompression step + // and allocation will be avoided. + Out []byte + + // DecompressLimit limits the maximum decoded size acceptable. + // If > 0 decompression will stop when approximately this many bytes + // has been decoded. + // If 0, maximum size will be 2GB. + DecompressLimit int + + symbolLen uint16 // Length of active part of the symbol table. + actualTableLog uint8 // Selected tablelog. + zeroBits bool // no bits has prob > 50%. + clearCount bool // clear count + + // MaxSymbolValue will override the maximum symbol value of the next block. + MaxSymbolValue uint8 + + // TableLog will attempt to override the tablelog for the next block. + TableLog uint8 +} + +// Histogram allows to populate the histogram and skip that step in the compression, +// It otherwise allows to inspect the histogram when compression is done. +// To indicate that you have populated the histogram call HistogramFinished +// with the value of the highest populated symbol, as well as the number of entries +// in the most populated entry. These are accepted at face value. +// The returned slice will always be length 256. +func (s *Scratch) Histogram() []uint32 { + return s.count[:] +} + +// HistogramFinished can be called to indicate that the histogram has been populated. +// maxSymbol is the index of the highest set symbol of the next data segment. +// maxCount is the number of entries in the most populated entry. +// These are accepted at face value. +func (s *Scratch) HistogramFinished(maxSymbol uint8, maxCount int) { + s.maxCount = maxCount + s.symbolLen = uint16(maxSymbol) + 1 + s.clearCount = maxCount != 0 +} + +// prepare will prepare and allocate scratch tables used for both compression and decompression. +func (s *Scratch) prepare(in []byte) (*Scratch, error) { + if s == nil { + s = &Scratch{} + } + if s.MaxSymbolValue == 0 { + s.MaxSymbolValue = 255 + } + if s.TableLog == 0 { + s.TableLog = defaultTablelog + } + if s.TableLog > maxTableLog { + return nil, fmt.Errorf("tableLog (%d) > maxTableLog (%d)", s.TableLog, maxTableLog) + } + if cap(s.Out) == 0 { + s.Out = make([]byte, 0, len(in)) + } + if s.clearCount && s.maxCount == 0 { + for i := range s.count { + s.count[i] = 0 + } + s.clearCount = false + } + s.br.init(in) + if s.DecompressLimit == 0 { + // Max size 2GB. + s.DecompressLimit = (2 << 30) - 1 + } + + return s, nil +} + +// tableStep returns the next table index. +func tableStep(tableSize uint32) uint32 { + return (tableSize >> 1) + (tableSize >> 3) + 3 +} + +func highBits(val uint32) (n uint32) { + return uint32(bits.Len32(val) - 1) +} diff --git a/vendor/github.com/klauspost/compress/gen.sh b/vendor/github.com/klauspost/compress/gen.sh new file mode 100644 index 00000000..aff94220 --- /dev/null +++ b/vendor/github.com/klauspost/compress/gen.sh @@ -0,0 +1,4 @@ +#!/bin/sh + +cd s2/cmd/_s2sx/ || exit 1 +go generate . diff --git a/vendor/github.com/klauspost/compress/huff0/.gitignore b/vendor/github.com/klauspost/compress/huff0/.gitignore new file mode 100644 index 00000000..b3d26295 --- /dev/null +++ b/vendor/github.com/klauspost/compress/huff0/.gitignore @@ -0,0 +1 @@ +/huff0-fuzz.zip diff --git a/vendor/github.com/klauspost/compress/huff0/README.md b/vendor/github.com/klauspost/compress/huff0/README.md new file mode 100644 index 00000000..8b6e5c66 --- /dev/null +++ b/vendor/github.com/klauspost/compress/huff0/README.md @@ -0,0 +1,89 @@ +# Huff0 entropy compression + +This package provides Huff0 encoding and decoding as used in zstd. + +[Huff0](https://github.com/Cyan4973/FiniteStateEntropy#new-generation-entropy-coders), +a Huffman codec designed for modern CPU, featuring OoO (Out of Order) operations on multiple ALU +(Arithmetic Logic Unit), achieving extremely fast compression and decompression speeds. + +This can be used for compressing input with a lot of similar input values to the smallest number of bytes. +This does not perform any multi-byte [dictionary coding](https://en.wikipedia.org/wiki/Dictionary_coder) as LZ coders, +but it can be used as a secondary step to compressors (like Snappy) that does not do entropy encoding. + +* [Godoc documentation](https://godoc.org/github.com/klauspost/compress/huff0) + +## News + +This is used as part of the [zstandard](https://github.com/klauspost/compress/tree/master/zstd#zstd) compression and decompression package. + +This ensures that most functionality is well tested. + +# Usage + +This package provides a low level interface that allows to compress single independent blocks. + +Each block is separate, and there is no built in integrity checks. +This means that the caller should keep track of block sizes and also do checksums if needed. + +Compressing a block is done via the [`Compress1X`](https://godoc.org/github.com/klauspost/compress/huff0#Compress1X) and +[`Compress4X`](https://godoc.org/github.com/klauspost/compress/huff0#Compress4X) functions. +You must provide input and will receive the output and maybe an error. + +These error values can be returned: + +| Error | Description | +|---------------------|-----------------------------------------------------------------------------| +| `` | Everything ok, output is returned | +| `ErrIncompressible` | Returned when input is judged to be too hard to compress | +| `ErrUseRLE` | Returned from the compressor when the input is a single byte value repeated | +| `ErrTooBig` | Returned if the input block exceeds the maximum allowed size (128 Kib) | +| `(error)` | An internal error occurred. | + + +As can be seen above some of there are errors that will be returned even under normal operation so it is important to handle these. + +To reduce allocations you can provide a [`Scratch`](https://godoc.org/github.com/klauspost/compress/huff0#Scratch) object +that can be re-used for successive calls. Both compression and decompression accepts a `Scratch` object, and the same +object can be used for both. + +Be aware, that when re-using a `Scratch` object that the *output* buffer is also re-used, so if you are still using this +you must set the `Out` field in the scratch to nil. The same buffer is used for compression and decompression output. + +The `Scratch` object will retain state that allows to re-use previous tables for encoding and decoding. + +## Tables and re-use + +Huff0 allows for reusing tables from the previous block to save space if that is expected to give better/faster results. + +The Scratch object allows you to set a [`ReusePolicy`](https://godoc.org/github.com/klauspost/compress/huff0#ReusePolicy) +that controls this behaviour. See the documentation for details. This can be altered between each block. + +Do however note that this information is *not* stored in the output block and it is up to the users of the package to +record whether [`ReadTable`](https://godoc.org/github.com/klauspost/compress/huff0#ReadTable) should be called, +based on the boolean reported back from the CompressXX call. + +If you want to store the table separate from the data, you can access them as `OutData` and `OutTable` on the +[`Scratch`](https://godoc.org/github.com/klauspost/compress/huff0#Scratch) object. + +## Decompressing + +The first part of decoding is to initialize the decoding table through [`ReadTable`](https://godoc.org/github.com/klauspost/compress/huff0#ReadTable). +This will initialize the decoding tables. +You can supply the complete block to `ReadTable` and it will return the data part of the block +which can be given to the decompressor. + +Decompressing is done by calling the [`Decompress1X`](https://godoc.org/github.com/klauspost/compress/huff0#Scratch.Decompress1X) +or [`Decompress4X`](https://godoc.org/github.com/klauspost/compress/huff0#Scratch.Decompress4X) function. + +For concurrently decompressing content with a fixed table a stateless [`Decoder`](https://godoc.org/github.com/klauspost/compress/huff0#Decoder) can be requested which will remain correct as long as the scratch is unchanged. The capacity of the provided slice indicates the expected output size. + +You must provide the output from the compression stage, at exactly the size you got back. If you receive an error back +your input was likely corrupted. + +It is important to note that a successful decoding does *not* mean your output matches your original input. +There are no integrity checks, so relying on errors from the decompressor does not assure your data is valid. + +# Contributing + +Contributions are always welcome. Be aware that adding public functions will require good justification and breaking +changes will likely not be accepted. If in doubt open an issue before writing the PR. diff --git a/vendor/github.com/klauspost/compress/huff0/bitreader.go b/vendor/github.com/klauspost/compress/huff0/bitreader.go new file mode 100644 index 00000000..504a7be9 --- /dev/null +++ b/vendor/github.com/klauspost/compress/huff0/bitreader.go @@ -0,0 +1,233 @@ +// Copyright 2018 Klaus Post. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. +// Based on work Copyright (c) 2013, Yann Collet, released under BSD License. + +package huff0 + +import ( + "encoding/binary" + "errors" + "fmt" + "io" +) + +// bitReader reads a bitstream in reverse. +// The last set bit indicates the start of the stream and is used +// for aligning the input. +type bitReaderBytes struct { + in []byte + off uint // next byte to read is at in[off - 1] + value uint64 + bitsRead uint8 +} + +// init initializes and resets the bit reader. +func (b *bitReaderBytes) init(in []byte) error { + if len(in) < 1 { + return errors.New("corrupt stream: too short") + } + b.in = in + b.off = uint(len(in)) + // The highest bit of the last byte indicates where to start + v := in[len(in)-1] + if v == 0 { + return errors.New("corrupt stream, did not find end of stream") + } + b.bitsRead = 64 + b.value = 0 + if len(in) >= 8 { + b.fillFastStart() + } else { + b.fill() + b.fill() + } + b.advance(8 - uint8(highBit32(uint32(v)))) + return nil +} + +// peekBitsFast requires that at least one bit is requested every time. +// There are no checks if the buffer is filled. +func (b *bitReaderBytes) peekByteFast() uint8 { + got := uint8(b.value >> 56) + return got +} + +func (b *bitReaderBytes) advance(n uint8) { + b.bitsRead += n + b.value <<= n & 63 +} + +// fillFast() will make sure at least 32 bits are available. +// There must be at least 4 bytes available. +func (b *bitReaderBytes) fillFast() { + if b.bitsRead < 32 { + return + } + + // 2 bounds checks. + v := b.in[b.off-4 : b.off] + v = v[:4] + low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) + b.value |= uint64(low) << (b.bitsRead - 32) + b.bitsRead -= 32 + b.off -= 4 +} + +// fillFastStart() assumes the bitReaderBytes is empty and there is at least 8 bytes to read. +func (b *bitReaderBytes) fillFastStart() { + // Do single re-slice to avoid bounds checks. + b.value = binary.LittleEndian.Uint64(b.in[b.off-8:]) + b.bitsRead = 0 + b.off -= 8 +} + +// fill() will make sure at least 32 bits are available. +func (b *bitReaderBytes) fill() { + if b.bitsRead < 32 { + return + } + if b.off > 4 { + v := b.in[b.off-4:] + v = v[:4] + low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) + b.value |= uint64(low) << (b.bitsRead - 32) + b.bitsRead -= 32 + b.off -= 4 + return + } + for b.off > 0 { + b.value |= uint64(b.in[b.off-1]) << (b.bitsRead - 8) + b.bitsRead -= 8 + b.off-- + } +} + +// finished returns true if all bits have been read from the bit stream. +func (b *bitReaderBytes) finished() bool { + return b.off == 0 && b.bitsRead >= 64 +} + +func (b *bitReaderBytes) remaining() uint { + return b.off*8 + uint(64-b.bitsRead) +} + +// close the bitstream and returns an error if out-of-buffer reads occurred. +func (b *bitReaderBytes) close() error { + // Release reference. + b.in = nil + if b.remaining() > 0 { + return fmt.Errorf("corrupt input: %d bits remain on stream", b.remaining()) + } + if b.bitsRead > 64 { + return io.ErrUnexpectedEOF + } + return nil +} + +// bitReaderShifted reads a bitstream in reverse. +// The last set bit indicates the start of the stream and is used +// for aligning the input. +type bitReaderShifted struct { + in []byte + off uint // next byte to read is at in[off - 1] + value uint64 + bitsRead uint8 +} + +// init initializes and resets the bit reader. +func (b *bitReaderShifted) init(in []byte) error { + if len(in) < 1 { + return errors.New("corrupt stream: too short") + } + b.in = in + b.off = uint(len(in)) + // The highest bit of the last byte indicates where to start + v := in[len(in)-1] + if v == 0 { + return errors.New("corrupt stream, did not find end of stream") + } + b.bitsRead = 64 + b.value = 0 + if len(in) >= 8 { + b.fillFastStart() + } else { + b.fill() + b.fill() + } + b.advance(8 - uint8(highBit32(uint32(v)))) + return nil +} + +// peekBitsFast requires that at least one bit is requested every time. +// There are no checks if the buffer is filled. +func (b *bitReaderShifted) peekBitsFast(n uint8) uint16 { + return uint16(b.value >> ((64 - n) & 63)) +} + +func (b *bitReaderShifted) advance(n uint8) { + b.bitsRead += n + b.value <<= n & 63 +} + +// fillFast() will make sure at least 32 bits are available. +// There must be at least 4 bytes available. +func (b *bitReaderShifted) fillFast() { + if b.bitsRead < 32 { + return + } + + // 2 bounds checks. + v := b.in[b.off-4 : b.off] + v = v[:4] + low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) + b.value |= uint64(low) << ((b.bitsRead - 32) & 63) + b.bitsRead -= 32 + b.off -= 4 +} + +// fillFastStart() assumes the bitReaderShifted is empty and there is at least 8 bytes to read. +func (b *bitReaderShifted) fillFastStart() { + // Do single re-slice to avoid bounds checks. + b.value = binary.LittleEndian.Uint64(b.in[b.off-8:]) + b.bitsRead = 0 + b.off -= 8 +} + +// fill() will make sure at least 32 bits are available. +func (b *bitReaderShifted) fill() { + if b.bitsRead < 32 { + return + } + if b.off > 4 { + v := b.in[b.off-4:] + v = v[:4] + low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) + b.value |= uint64(low) << ((b.bitsRead - 32) & 63) + b.bitsRead -= 32 + b.off -= 4 + return + } + for b.off > 0 { + b.value |= uint64(b.in[b.off-1]) << ((b.bitsRead - 8) & 63) + b.bitsRead -= 8 + b.off-- + } +} + +func (b *bitReaderShifted) remaining() uint { + return b.off*8 + uint(64-b.bitsRead) +} + +// close the bitstream and returns an error if out-of-buffer reads occurred. +func (b *bitReaderShifted) close() error { + // Release reference. + b.in = nil + if b.remaining() > 0 { + return fmt.Errorf("corrupt input: %d bits remain on stream", b.remaining()) + } + if b.bitsRead > 64 { + return io.ErrUnexpectedEOF + } + return nil +} diff --git a/vendor/github.com/klauspost/compress/huff0/bitwriter.go b/vendor/github.com/klauspost/compress/huff0/bitwriter.go new file mode 100644 index 00000000..ec71f7a3 --- /dev/null +++ b/vendor/github.com/klauspost/compress/huff0/bitwriter.go @@ -0,0 +1,95 @@ +// Copyright 2018 Klaus Post. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. +// Based on work Copyright (c) 2013, Yann Collet, released under BSD License. + +package huff0 + +// bitWriter will write bits. +// First bit will be LSB of the first byte of output. +type bitWriter struct { + bitContainer uint64 + nBits uint8 + out []byte +} + +// bitMask16 is bitmasks. Has extra to avoid bounds check. +var bitMask16 = [32]uint16{ + 0, 1, 3, 7, 0xF, 0x1F, + 0x3F, 0x7F, 0xFF, 0x1FF, 0x3FF, 0x7FF, + 0xFFF, 0x1FFF, 0x3FFF, 0x7FFF, 0xFFFF, 0xFFFF, + 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, + 0xFFFF, 0xFFFF} /* up to 16 bits */ + +// addBits16Clean will add up to 16 bits. value may not contain more set bits than indicated. +// It will not check if there is space for them, so the caller must ensure that it has flushed recently. +func (b *bitWriter) addBits16Clean(value uint16, bits uint8) { + b.bitContainer |= uint64(value) << (b.nBits & 63) + b.nBits += bits +} + +// encSymbol will add up to 16 bits. value may not contain more set bits than indicated. +// It will not check if there is space for them, so the caller must ensure that it has flushed recently. +func (b *bitWriter) encSymbol(ct cTable, symbol byte) { + enc := ct[symbol] + b.bitContainer |= uint64(enc.val) << (b.nBits & 63) + if false { + if enc.nBits == 0 { + panic("nbits 0") + } + } + b.nBits += enc.nBits +} + +// encTwoSymbols will add up to 32 bits. value may not contain more set bits than indicated. +// It will not check if there is space for them, so the caller must ensure that it has flushed recently. +func (b *bitWriter) encTwoSymbols(ct cTable, av, bv byte) { + encA := ct[av] + encB := ct[bv] + sh := b.nBits & 63 + combined := uint64(encA.val) | (uint64(encB.val) << (encA.nBits & 63)) + b.bitContainer |= combined << sh + if false { + if encA.nBits == 0 { + panic("nbitsA 0") + } + if encB.nBits == 0 { + panic("nbitsB 0") + } + } + b.nBits += encA.nBits + encB.nBits +} + +// flush32 will flush out, so there are at least 32 bits available for writing. +func (b *bitWriter) flush32() { + if b.nBits < 32 { + return + } + b.out = append(b.out, + byte(b.bitContainer), + byte(b.bitContainer>>8), + byte(b.bitContainer>>16), + byte(b.bitContainer>>24)) + b.nBits -= 32 + b.bitContainer >>= 32 +} + +// flushAlign will flush remaining full bytes and align to next byte boundary. +func (b *bitWriter) flushAlign() { + nbBytes := (b.nBits + 7) >> 3 + for i := uint8(0); i < nbBytes; i++ { + b.out = append(b.out, byte(b.bitContainer>>(i*8))) + } + b.nBits = 0 + b.bitContainer = 0 +} + +// close will write the alignment bit and write the final byte(s) +// to the output. +func (b *bitWriter) close() error { + // End mark + b.addBits16Clean(1, 1) + // flush until next byte. + b.flushAlign() + return nil +} diff --git a/vendor/github.com/klauspost/compress/huff0/bytereader.go b/vendor/github.com/klauspost/compress/huff0/bytereader.go new file mode 100644 index 00000000..4dcab8d2 --- /dev/null +++ b/vendor/github.com/klauspost/compress/huff0/bytereader.go @@ -0,0 +1,44 @@ +// Copyright 2018 Klaus Post. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. +// Based on work Copyright (c) 2013, Yann Collet, released under BSD License. + +package huff0 + +// byteReader provides a byte reader that reads +// little endian values from a byte stream. +// The input stream is manually advanced. +// The reader performs no bounds checks. +type byteReader struct { + b []byte + off int +} + +// init will initialize the reader and set the input. +func (b *byteReader) init(in []byte) { + b.b = in + b.off = 0 +} + +// Int32 returns a little endian int32 starting at current offset. +func (b byteReader) Int32() int32 { + v3 := int32(b.b[b.off+3]) + v2 := int32(b.b[b.off+2]) + v1 := int32(b.b[b.off+1]) + v0 := int32(b.b[b.off]) + return (v3 << 24) | (v2 << 16) | (v1 << 8) | v0 +} + +// Uint32 returns a little endian uint32 starting at current offset. +func (b byteReader) Uint32() uint32 { + v3 := uint32(b.b[b.off+3]) + v2 := uint32(b.b[b.off+2]) + v1 := uint32(b.b[b.off+1]) + v0 := uint32(b.b[b.off]) + return (v3 << 24) | (v2 << 16) | (v1 << 8) | v0 +} + +// remain will return the number of bytes remaining. +func (b byteReader) remain() int { + return len(b.b) - b.off +} diff --git a/vendor/github.com/klauspost/compress/huff0/compress.go b/vendor/github.com/klauspost/compress/huff0/compress.go new file mode 100644 index 00000000..4d14542f --- /dev/null +++ b/vendor/github.com/klauspost/compress/huff0/compress.go @@ -0,0 +1,730 @@ +package huff0 + +import ( + "fmt" + "math" + "runtime" + "sync" +) + +// Compress1X will compress the input. +// The output can be decoded using Decompress1X. +// Supply a Scratch object. The scratch object contains state about re-use, +// So when sharing across independent encodes, be sure to set the re-use policy. +func Compress1X(in []byte, s *Scratch) (out []byte, reUsed bool, err error) { + s, err = s.prepare(in) + if err != nil { + return nil, false, err + } + return compress(in, s, s.compress1X) +} + +// Compress4X will compress the input. The input is split into 4 independent blocks +// and compressed similar to Compress1X. +// The output can be decoded using Decompress4X. +// Supply a Scratch object. The scratch object contains state about re-use, +// So when sharing across independent encodes, be sure to set the re-use policy. +func Compress4X(in []byte, s *Scratch) (out []byte, reUsed bool, err error) { + s, err = s.prepare(in) + if err != nil { + return nil, false, err + } + if false { + // TODO: compress4Xp only slightly faster. + const parallelThreshold = 8 << 10 + if len(in) < parallelThreshold || runtime.GOMAXPROCS(0) == 1 { + return compress(in, s, s.compress4X) + } + return compress(in, s, s.compress4Xp) + } + return compress(in, s, s.compress4X) +} + +func compress(in []byte, s *Scratch, compressor func(src []byte) ([]byte, error)) (out []byte, reUsed bool, err error) { + // Nuke previous table if we cannot reuse anyway. + if s.Reuse == ReusePolicyNone { + s.prevTable = s.prevTable[:0] + } + + // Create histogram, if none was provided. + maxCount := s.maxCount + var canReuse = false + if maxCount == 0 { + maxCount, canReuse = s.countSimple(in) + } else { + canReuse = s.canUseTable(s.prevTable) + } + + // We want the output size to be less than this: + wantSize := len(in) + if s.WantLogLess > 0 { + wantSize -= wantSize >> s.WantLogLess + } + + // Reset for next run. + s.clearCount = true + s.maxCount = 0 + if maxCount >= len(in) { + if maxCount > len(in) { + return nil, false, fmt.Errorf("maxCount (%d) > length (%d)", maxCount, len(in)) + } + if len(in) == 1 { + return nil, false, ErrIncompressible + } + // One symbol, use RLE + return nil, false, ErrUseRLE + } + if maxCount == 1 || maxCount < (len(in)>>7) { + // Each symbol present maximum once or too well distributed. + return nil, false, ErrIncompressible + } + if s.Reuse == ReusePolicyMust && !canReuse { + // We must reuse, but we can't. + return nil, false, ErrIncompressible + } + if (s.Reuse == ReusePolicyPrefer || s.Reuse == ReusePolicyMust) && canReuse { + keepTable := s.cTable + keepTL := s.actualTableLog + s.cTable = s.prevTable + s.actualTableLog = s.prevTableLog + s.Out, err = compressor(in) + s.cTable = keepTable + s.actualTableLog = keepTL + if err == nil && len(s.Out) < wantSize { + s.OutData = s.Out + return s.Out, true, nil + } + if s.Reuse == ReusePolicyMust { + return nil, false, ErrIncompressible + } + // Do not attempt to re-use later. + s.prevTable = s.prevTable[:0] + } + + // Calculate new table. + err = s.buildCTable() + if err != nil { + return nil, false, err + } + + if false && !s.canUseTable(s.cTable) { + panic("invalid table generated") + } + + if s.Reuse == ReusePolicyAllow && canReuse { + hSize := len(s.Out) + oldSize := s.prevTable.estimateSize(s.count[:s.symbolLen]) + newSize := s.cTable.estimateSize(s.count[:s.symbolLen]) + if oldSize <= hSize+newSize || hSize+12 >= wantSize { + // Retain cTable even if we re-use. + keepTable := s.cTable + keepTL := s.actualTableLog + + s.cTable = s.prevTable + s.actualTableLog = s.prevTableLog + s.Out, err = compressor(in) + + // Restore ctable. + s.cTable = keepTable + s.actualTableLog = keepTL + if err != nil { + return nil, false, err + } + if len(s.Out) >= wantSize { + return nil, false, ErrIncompressible + } + s.OutData = s.Out + return s.Out, true, nil + } + } + + // Use new table + err = s.cTable.write(s) + if err != nil { + s.OutTable = nil + return nil, false, err + } + s.OutTable = s.Out + + // Compress using new table + s.Out, err = compressor(in) + if err != nil { + s.OutTable = nil + return nil, false, err + } + if len(s.Out) >= wantSize { + s.OutTable = nil + return nil, false, ErrIncompressible + } + // Move current table into previous. + s.prevTable, s.prevTableLog, s.cTable = s.cTable, s.actualTableLog, s.prevTable[:0] + s.OutData = s.Out[len(s.OutTable):] + return s.Out, false, nil +} + +// EstimateSizes will estimate the data sizes +func EstimateSizes(in []byte, s *Scratch) (tableSz, dataSz, reuseSz int, err error) { + s, err = s.prepare(in) + if err != nil { + return 0, 0, 0, err + } + + // Create histogram, if none was provided. + tableSz, dataSz, reuseSz = -1, -1, -1 + maxCount := s.maxCount + var canReuse = false + if maxCount == 0 { + maxCount, canReuse = s.countSimple(in) + } else { + canReuse = s.canUseTable(s.prevTable) + } + + // We want the output size to be less than this: + wantSize := len(in) + if s.WantLogLess > 0 { + wantSize -= wantSize >> s.WantLogLess + } + + // Reset for next run. + s.clearCount = true + s.maxCount = 0 + if maxCount >= len(in) { + if maxCount > len(in) { + return 0, 0, 0, fmt.Errorf("maxCount (%d) > length (%d)", maxCount, len(in)) + } + if len(in) == 1 { + return 0, 0, 0, ErrIncompressible + } + // One symbol, use RLE + return 0, 0, 0, ErrUseRLE + } + if maxCount == 1 || maxCount < (len(in)>>7) { + // Each symbol present maximum once or too well distributed. + return 0, 0, 0, ErrIncompressible + } + + // Calculate new table. + err = s.buildCTable() + if err != nil { + return 0, 0, 0, err + } + + if false && !s.canUseTable(s.cTable) { + panic("invalid table generated") + } + + tableSz, err = s.cTable.estTableSize(s) + if err != nil { + return 0, 0, 0, err + } + if canReuse { + reuseSz = s.prevTable.estimateSize(s.count[:s.symbolLen]) + } + dataSz = s.cTable.estimateSize(s.count[:s.symbolLen]) + + // Restore + return tableSz, dataSz, reuseSz, nil +} + +func (s *Scratch) compress1X(src []byte) ([]byte, error) { + return s.compress1xDo(s.Out, src) +} + +func (s *Scratch) compress1xDo(dst, src []byte) ([]byte, error) { + var bw = bitWriter{out: dst} + + // N is length divisible by 4. + n := len(src) + n -= n & 3 + cTable := s.cTable[:256] + + // Encode last bytes. + for i := len(src) & 3; i > 0; i-- { + bw.encSymbol(cTable, src[n+i-1]) + } + n -= 4 + if s.actualTableLog <= 8 { + for ; n >= 0; n -= 4 { + tmp := src[n : n+4] + // tmp should be len 4 + bw.flush32() + bw.encTwoSymbols(cTable, tmp[3], tmp[2]) + bw.encTwoSymbols(cTable, tmp[1], tmp[0]) + } + } else { + for ; n >= 0; n -= 4 { + tmp := src[n : n+4] + // tmp should be len 4 + bw.flush32() + bw.encTwoSymbols(cTable, tmp[3], tmp[2]) + bw.flush32() + bw.encTwoSymbols(cTable, tmp[1], tmp[0]) + } + } + err := bw.close() + return bw.out, err +} + +var sixZeros [6]byte + +func (s *Scratch) compress4X(src []byte) ([]byte, error) { + if len(src) < 12 { + return nil, ErrIncompressible + } + segmentSize := (len(src) + 3) / 4 + + // Add placeholder for output length + offsetIdx := len(s.Out) + s.Out = append(s.Out, sixZeros[:]...) + + for i := 0; i < 4; i++ { + toDo := src + if len(toDo) > segmentSize { + toDo = toDo[:segmentSize] + } + src = src[len(toDo):] + + var err error + idx := len(s.Out) + s.Out, err = s.compress1xDo(s.Out, toDo) + if err != nil { + return nil, err + } + if len(s.Out)-idx > math.MaxUint16 { + // We cannot store the size in the jump table + return nil, ErrIncompressible + } + // Write compressed length as little endian before block. + if i < 3 { + // Last length is not written. + length := len(s.Out) - idx + s.Out[i*2+offsetIdx] = byte(length) + s.Out[i*2+offsetIdx+1] = byte(length >> 8) + } + } + + return s.Out, nil +} + +// compress4Xp will compress 4 streams using separate goroutines. +func (s *Scratch) compress4Xp(src []byte) ([]byte, error) { + if len(src) < 12 { + return nil, ErrIncompressible + } + // Add placeholder for output length + s.Out = s.Out[:6] + + segmentSize := (len(src) + 3) / 4 + var wg sync.WaitGroup + var errs [4]error + wg.Add(4) + for i := 0; i < 4; i++ { + toDo := src + if len(toDo) > segmentSize { + toDo = toDo[:segmentSize] + } + src = src[len(toDo):] + + // Separate goroutine for each block. + go func(i int) { + s.tmpOut[i], errs[i] = s.compress1xDo(s.tmpOut[i][:0], toDo) + wg.Done() + }(i) + } + wg.Wait() + for i := 0; i < 4; i++ { + if errs[i] != nil { + return nil, errs[i] + } + o := s.tmpOut[i] + if len(o) > math.MaxUint16 { + // We cannot store the size in the jump table + return nil, ErrIncompressible + } + // Write compressed length as little endian before block. + if i < 3 { + // Last length is not written. + s.Out[i*2] = byte(len(o)) + s.Out[i*2+1] = byte(len(o) >> 8) + } + + // Write output. + s.Out = append(s.Out, o...) + } + return s.Out, nil +} + +// countSimple will create a simple histogram in s.count. +// Returns the biggest count. +// Does not update s.clearCount. +func (s *Scratch) countSimple(in []byte) (max int, reuse bool) { + reuse = true + for _, v := range in { + s.count[v]++ + } + m := uint32(0) + if len(s.prevTable) > 0 { + for i, v := range s.count[:] { + if v > m { + m = v + } + if v > 0 { + s.symbolLen = uint16(i) + 1 + if i >= len(s.prevTable) { + reuse = false + } else { + if s.prevTable[i].nBits == 0 { + reuse = false + } + } + } + } + return int(m), reuse + } + for i, v := range s.count[:] { + if v > m { + m = v + } + if v > 0 { + s.symbolLen = uint16(i) + 1 + } + } + return int(m), false +} + +func (s *Scratch) canUseTable(c cTable) bool { + if len(c) < int(s.symbolLen) { + return false + } + for i, v := range s.count[:s.symbolLen] { + if v != 0 && c[i].nBits == 0 { + return false + } + } + return true +} + +//lint:ignore U1000 used for debugging +func (s *Scratch) validateTable(c cTable) bool { + if len(c) < int(s.symbolLen) { + return false + } + for i, v := range s.count[:s.symbolLen] { + if v != 0 { + if c[i].nBits == 0 { + return false + } + if c[i].nBits > s.actualTableLog { + return false + } + } + } + return true +} + +// minTableLog provides the minimum logSize to safely represent a distribution. +func (s *Scratch) minTableLog() uint8 { + minBitsSrc := highBit32(uint32(s.br.remain())) + 1 + minBitsSymbols := highBit32(uint32(s.symbolLen-1)) + 2 + if minBitsSrc < minBitsSymbols { + return uint8(minBitsSrc) + } + return uint8(minBitsSymbols) +} + +// optimalTableLog calculates and sets the optimal tableLog in s.actualTableLog +func (s *Scratch) optimalTableLog() { + tableLog := s.TableLog + minBits := s.minTableLog() + maxBitsSrc := uint8(highBit32(uint32(s.br.remain()-1))) - 1 + if maxBitsSrc < tableLog { + // Accuracy can be reduced + tableLog = maxBitsSrc + } + if minBits > tableLog { + tableLog = minBits + } + // Need a minimum to safely represent all symbol values + if tableLog < minTablelog { + tableLog = minTablelog + } + if tableLog > tableLogMax { + tableLog = tableLogMax + } + s.actualTableLog = tableLog +} + +type cTableEntry struct { + val uint16 + nBits uint8 + // We have 8 bits extra +} + +const huffNodesMask = huffNodesLen - 1 + +func (s *Scratch) buildCTable() error { + s.optimalTableLog() + s.huffSort() + if cap(s.cTable) < maxSymbolValue+1 { + s.cTable = make([]cTableEntry, s.symbolLen, maxSymbolValue+1) + } else { + s.cTable = s.cTable[:s.symbolLen] + for i := range s.cTable { + s.cTable[i] = cTableEntry{} + } + } + + var startNode = int16(s.symbolLen) + nonNullRank := s.symbolLen - 1 + + nodeNb := startNode + huffNode := s.nodes[1 : huffNodesLen+1] + + // This overlays the slice above, but allows "-1" index lookups. + // Different from reference implementation. + huffNode0 := s.nodes[0 : huffNodesLen+1] + + for huffNode[nonNullRank].count == 0 { + nonNullRank-- + } + + lowS := int16(nonNullRank) + nodeRoot := nodeNb + lowS - 1 + lowN := nodeNb + huffNode[nodeNb].count = huffNode[lowS].count + huffNode[lowS-1].count + huffNode[lowS].parent, huffNode[lowS-1].parent = uint16(nodeNb), uint16(nodeNb) + nodeNb++ + lowS -= 2 + for n := nodeNb; n <= nodeRoot; n++ { + huffNode[n].count = 1 << 30 + } + // fake entry, strong barrier + huffNode0[0].count = 1 << 31 + + // create parents + for nodeNb <= nodeRoot { + var n1, n2 int16 + if huffNode0[lowS+1].count < huffNode0[lowN+1].count { + n1 = lowS + lowS-- + } else { + n1 = lowN + lowN++ + } + if huffNode0[lowS+1].count < huffNode0[lowN+1].count { + n2 = lowS + lowS-- + } else { + n2 = lowN + lowN++ + } + + huffNode[nodeNb].count = huffNode0[n1+1].count + huffNode0[n2+1].count + huffNode0[n1+1].parent, huffNode0[n2+1].parent = uint16(nodeNb), uint16(nodeNb) + nodeNb++ + } + + // distribute weights (unlimited tree height) + huffNode[nodeRoot].nbBits = 0 + for n := nodeRoot - 1; n >= startNode; n-- { + huffNode[n].nbBits = huffNode[huffNode[n].parent].nbBits + 1 + } + for n := uint16(0); n <= nonNullRank; n++ { + huffNode[n].nbBits = huffNode[huffNode[n].parent].nbBits + 1 + } + s.actualTableLog = s.setMaxHeight(int(nonNullRank)) + maxNbBits := s.actualTableLog + + // fill result into tree (val, nbBits) + if maxNbBits > tableLogMax { + return fmt.Errorf("internal error: maxNbBits (%d) > tableLogMax (%d)", maxNbBits, tableLogMax) + } + var nbPerRank [tableLogMax + 1]uint16 + var valPerRank [16]uint16 + for _, v := range huffNode[:nonNullRank+1] { + nbPerRank[v.nbBits]++ + } + // determine stating value per rank + { + min := uint16(0) + for n := maxNbBits; n > 0; n-- { + // get starting value within each rank + valPerRank[n] = min + min += nbPerRank[n] + min >>= 1 + } + } + + // push nbBits per symbol, symbol order + for _, v := range huffNode[:nonNullRank+1] { + s.cTable[v.symbol].nBits = v.nbBits + } + + // assign value within rank, symbol order + t := s.cTable[:s.symbolLen] + for n, val := range t { + nbits := val.nBits & 15 + v := valPerRank[nbits] + t[n].val = v + valPerRank[nbits] = v + 1 + } + + return nil +} + +// huffSort will sort symbols, decreasing order. +func (s *Scratch) huffSort() { + type rankPos struct { + base uint32 + current uint32 + } + + // Clear nodes + nodes := s.nodes[:huffNodesLen+1] + s.nodes = nodes + nodes = nodes[1 : huffNodesLen+1] + + // Sort into buckets based on length of symbol count. + var rank [32]rankPos + for _, v := range s.count[:s.symbolLen] { + r := highBit32(v+1) & 31 + rank[r].base++ + } + // maxBitLength is log2(BlockSizeMax) + 1 + const maxBitLength = 18 + 1 + for n := maxBitLength; n > 0; n-- { + rank[n-1].base += rank[n].base + } + for n := range rank[:maxBitLength] { + rank[n].current = rank[n].base + } + for n, c := range s.count[:s.symbolLen] { + r := (highBit32(c+1) + 1) & 31 + pos := rank[r].current + rank[r].current++ + prev := nodes[(pos-1)&huffNodesMask] + for pos > rank[r].base && c > prev.count { + nodes[pos&huffNodesMask] = prev + pos-- + prev = nodes[(pos-1)&huffNodesMask] + } + nodes[pos&huffNodesMask] = nodeElt{count: c, symbol: byte(n)} + } +} + +func (s *Scratch) setMaxHeight(lastNonNull int) uint8 { + maxNbBits := s.actualTableLog + huffNode := s.nodes[1 : huffNodesLen+1] + //huffNode = huffNode[: huffNodesLen] + + largestBits := huffNode[lastNonNull].nbBits + + // early exit : no elt > maxNbBits + if largestBits <= maxNbBits { + return largestBits + } + totalCost := int(0) + baseCost := int(1) << (largestBits - maxNbBits) + n := uint32(lastNonNull) + + for huffNode[n].nbBits > maxNbBits { + totalCost += baseCost - (1 << (largestBits - huffNode[n].nbBits)) + huffNode[n].nbBits = maxNbBits + n-- + } + // n stops at huffNode[n].nbBits <= maxNbBits + + for huffNode[n].nbBits == maxNbBits { + n-- + } + // n end at index of smallest symbol using < maxNbBits + + // renorm totalCost + totalCost >>= largestBits - maxNbBits /* note : totalCost is necessarily a multiple of baseCost */ + + // repay normalized cost + { + const noSymbol = 0xF0F0F0F0 + var rankLast [tableLogMax + 2]uint32 + + for i := range rankLast[:] { + rankLast[i] = noSymbol + } + + // Get pos of last (smallest) symbol per rank + { + currentNbBits := maxNbBits + for pos := int(n); pos >= 0; pos-- { + if huffNode[pos].nbBits >= currentNbBits { + continue + } + currentNbBits = huffNode[pos].nbBits // < maxNbBits + rankLast[maxNbBits-currentNbBits] = uint32(pos) + } + } + + for totalCost > 0 { + nBitsToDecrease := uint8(highBit32(uint32(totalCost))) + 1 + + for ; nBitsToDecrease > 1; nBitsToDecrease-- { + highPos := rankLast[nBitsToDecrease] + lowPos := rankLast[nBitsToDecrease-1] + if highPos == noSymbol { + continue + } + if lowPos == noSymbol { + break + } + highTotal := huffNode[highPos].count + lowTotal := 2 * huffNode[lowPos].count + if highTotal <= lowTotal { + break + } + } + // only triggered when no more rank 1 symbol left => find closest one (note : there is necessarily at least one !) + // HUF_MAX_TABLELOG test just to please gcc 5+; but it should not be necessary + // FIXME: try to remove + for (nBitsToDecrease <= tableLogMax) && (rankLast[nBitsToDecrease] == noSymbol) { + nBitsToDecrease++ + } + totalCost -= 1 << (nBitsToDecrease - 1) + if rankLast[nBitsToDecrease-1] == noSymbol { + // this rank is no longer empty + rankLast[nBitsToDecrease-1] = rankLast[nBitsToDecrease] + } + huffNode[rankLast[nBitsToDecrease]].nbBits++ + if rankLast[nBitsToDecrease] == 0 { + /* special case, reached largest symbol */ + rankLast[nBitsToDecrease] = noSymbol + } else { + rankLast[nBitsToDecrease]-- + if huffNode[rankLast[nBitsToDecrease]].nbBits != maxNbBits-nBitsToDecrease { + rankLast[nBitsToDecrease] = noSymbol /* this rank is now empty */ + } + } + } + + for totalCost < 0 { /* Sometimes, cost correction overshoot */ + if rankLast[1] == noSymbol { /* special case : no rank 1 symbol (using maxNbBits-1); let's create one from largest rank 0 (using maxNbBits) */ + for huffNode[n].nbBits == maxNbBits { + n-- + } + huffNode[n+1].nbBits-- + rankLast[1] = n + 1 + totalCost++ + continue + } + huffNode[rankLast[1]+1].nbBits-- + rankLast[1]++ + totalCost++ + } + } + return maxNbBits +} + +type nodeElt struct { + count uint32 + parent uint16 + symbol byte + nbBits uint8 +} diff --git a/vendor/github.com/klauspost/compress/huff0/decompress.go b/vendor/github.com/klauspost/compress/huff0/decompress.go new file mode 100644 index 00000000..42a237ea --- /dev/null +++ b/vendor/github.com/klauspost/compress/huff0/decompress.go @@ -0,0 +1,1167 @@ +package huff0 + +import ( + "errors" + "fmt" + "io" + "sync" + + "github.com/klauspost/compress/fse" +) + +type dTable struct { + single []dEntrySingle +} + +// single-symbols decoding +type dEntrySingle struct { + entry uint16 +} + +// Uses special code for all tables that are < 8 bits. +const use8BitTables = true + +// ReadTable will read a table from the input. +// The size of the input may be larger than the table definition. +// Any content remaining after the table definition will be returned. +// If no Scratch is provided a new one is allocated. +// The returned Scratch can be used for encoding or decoding input using this table. +func ReadTable(in []byte, s *Scratch) (s2 *Scratch, remain []byte, err error) { + s, err = s.prepare(nil) + if err != nil { + return s, nil, err + } + if len(in) <= 1 { + return s, nil, errors.New("input too small for table") + } + iSize := in[0] + in = in[1:] + if iSize >= 128 { + // Uncompressed + oSize := iSize - 127 + iSize = (oSize + 1) / 2 + if int(iSize) > len(in) { + return s, nil, errors.New("input too small for table") + } + for n := uint8(0); n < oSize; n += 2 { + v := in[n/2] + s.huffWeight[n] = v >> 4 + s.huffWeight[n+1] = v & 15 + } + s.symbolLen = uint16(oSize) + in = in[iSize:] + } else { + if len(in) < int(iSize) { + return s, nil, fmt.Errorf("input too small for table, want %d bytes, have %d", iSize, len(in)) + } + // FSE compressed weights + s.fse.DecompressLimit = 255 + hw := s.huffWeight[:] + s.fse.Out = hw + b, err := fse.Decompress(in[:iSize], s.fse) + s.fse.Out = nil + if err != nil { + return s, nil, err + } + if len(b) > 255 { + return s, nil, errors.New("corrupt input: output table too large") + } + s.symbolLen = uint16(len(b)) + in = in[iSize:] + } + + // collect weight stats + var rankStats [16]uint32 + weightTotal := uint32(0) + for _, v := range s.huffWeight[:s.symbolLen] { + if v > tableLogMax { + return s, nil, errors.New("corrupt input: weight too large") + } + v2 := v & 15 + rankStats[v2]++ + // (1 << (v2-1)) is slower since the compiler cannot prove that v2 isn't 0. + weightTotal += (1 << v2) >> 1 + } + if weightTotal == 0 { + return s, nil, errors.New("corrupt input: weights zero") + } + + // get last non-null symbol weight (implied, total must be 2^n) + { + tableLog := highBit32(weightTotal) + 1 + if tableLog > tableLogMax { + return s, nil, errors.New("corrupt input: tableLog too big") + } + s.actualTableLog = uint8(tableLog) + // determine last weight + { + total := uint32(1) << tableLog + rest := total - weightTotal + verif := uint32(1) << highBit32(rest) + lastWeight := highBit32(rest) + 1 + if verif != rest { + // last value must be a clean power of 2 + return s, nil, errors.New("corrupt input: last value not power of two") + } + s.huffWeight[s.symbolLen] = uint8(lastWeight) + s.symbolLen++ + rankStats[lastWeight]++ + } + } + + if (rankStats[1] < 2) || (rankStats[1]&1 != 0) { + // by construction : at least 2 elts of rank 1, must be even + return s, nil, errors.New("corrupt input: min elt size, even check failed ") + } + + // TODO: Choose between single/double symbol decoding + + // Calculate starting value for each rank + { + var nextRankStart uint32 + for n := uint8(1); n < s.actualTableLog+1; n++ { + current := nextRankStart + nextRankStart += rankStats[n] << (n - 1) + rankStats[n] = current + } + } + + // fill DTable (always full size) + tSize := 1 << tableLogMax + if len(s.dt.single) != tSize { + s.dt.single = make([]dEntrySingle, tSize) + } + cTable := s.prevTable + if cap(cTable) < maxSymbolValue+1 { + cTable = make([]cTableEntry, 0, maxSymbolValue+1) + } + cTable = cTable[:maxSymbolValue+1] + s.prevTable = cTable[:s.symbolLen] + s.prevTableLog = s.actualTableLog + + for n, w := range s.huffWeight[:s.symbolLen] { + if w == 0 { + cTable[n] = cTableEntry{ + val: 0, + nBits: 0, + } + continue + } + length := (uint32(1) << w) >> 1 + d := dEntrySingle{ + entry: uint16(s.actualTableLog+1-w) | (uint16(n) << 8), + } + + rank := &rankStats[w] + cTable[n] = cTableEntry{ + val: uint16(*rank >> (w - 1)), + nBits: uint8(d.entry), + } + + single := s.dt.single[*rank : *rank+length] + for i := range single { + single[i] = d + } + *rank += length + } + + return s, in, nil +} + +// Decompress1X will decompress a 1X encoded stream. +// The length of the supplied input must match the end of a block exactly. +// Before this is called, the table must be initialized with ReadTable unless +// the encoder re-used the table. +// deprecated: Use the stateless Decoder() to get a concurrent version. +func (s *Scratch) Decompress1X(in []byte) (out []byte, err error) { + if cap(s.Out) < s.MaxDecodedSize { + s.Out = make([]byte, s.MaxDecodedSize) + } + s.Out = s.Out[:0:s.MaxDecodedSize] + s.Out, err = s.Decoder().Decompress1X(s.Out, in) + return s.Out, err +} + +// Decompress4X will decompress a 4X encoded stream. +// Before this is called, the table must be initialized with ReadTable unless +// the encoder re-used the table. +// The length of the supplied input must match the end of a block exactly. +// The destination size of the uncompressed data must be known and provided. +// deprecated: Use the stateless Decoder() to get a concurrent version. +func (s *Scratch) Decompress4X(in []byte, dstSize int) (out []byte, err error) { + if dstSize > s.MaxDecodedSize { + return nil, ErrMaxDecodedSizeExceeded + } + if cap(s.Out) < dstSize { + s.Out = make([]byte, s.MaxDecodedSize) + } + s.Out = s.Out[:0:dstSize] + s.Out, err = s.Decoder().Decompress4X(s.Out, in) + return s.Out, err +} + +// Decoder will return a stateless decoder that can be used by multiple +// decompressors concurrently. +// Before this is called, the table must be initialized with ReadTable. +// The Decoder is still linked to the scratch buffer so that cannot be reused. +// However, it is safe to discard the scratch. +func (s *Scratch) Decoder() *Decoder { + return &Decoder{ + dt: s.dt, + actualTableLog: s.actualTableLog, + bufs: &s.decPool, + } +} + +// Decoder provides stateless decoding. +type Decoder struct { + dt dTable + actualTableLog uint8 + bufs *sync.Pool +} + +func (d *Decoder) buffer() *[4][256]byte { + buf, ok := d.bufs.Get().(*[4][256]byte) + if ok { + return buf + } + return &[4][256]byte{} +} + +// decompress1X8Bit will decompress a 1X encoded stream with tablelog <= 8. +// The cap of the output buffer will be the maximum decompressed size. +// The length of the supplied input must match the end of a block exactly. +func (d *Decoder) decompress1X8Bit(dst, src []byte) ([]byte, error) { + if d.actualTableLog == 8 { + return d.decompress1X8BitExactly(dst, src) + } + var br bitReaderBytes + err := br.init(src) + if err != nil { + return dst, err + } + maxDecodedSize := cap(dst) + dst = dst[:0] + + // Avoid bounds check by always having full sized table. + dt := d.dt.single[:256] + + // Use temp table to avoid bound checks/append penalty. + bufs := d.buffer() + buf := &bufs[0] + var off uint8 + + switch d.actualTableLog { + case 8: + const shift = 8 - 8 + for br.off >= 4 { + br.fillFast() + v := dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+0] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+1] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+2] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+3] = uint8(v.entry >> 8) + + off += 4 + if off == 0 { + if len(dst)+256 > maxDecodedSize { + br.close() + d.bufs.Put(bufs) + return nil, ErrMaxDecodedSizeExceeded + } + dst = append(dst, buf[:]...) + } + } + case 7: + const shift = 8 - 7 + for br.off >= 4 { + br.fillFast() + v := dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+0] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+1] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+2] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+3] = uint8(v.entry >> 8) + + off += 4 + if off == 0 { + if len(dst)+256 > maxDecodedSize { + br.close() + d.bufs.Put(bufs) + return nil, ErrMaxDecodedSizeExceeded + } + dst = append(dst, buf[:]...) + } + } + case 6: + const shift = 8 - 6 + for br.off >= 4 { + br.fillFast() + v := dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+0] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+1] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+2] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+3] = uint8(v.entry >> 8) + + off += 4 + if off == 0 { + if len(dst)+256 > maxDecodedSize { + d.bufs.Put(bufs) + br.close() + return nil, ErrMaxDecodedSizeExceeded + } + dst = append(dst, buf[:]...) + } + } + case 5: + const shift = 8 - 5 + for br.off >= 4 { + br.fillFast() + v := dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+0] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+1] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+2] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+3] = uint8(v.entry >> 8) + + off += 4 + if off == 0 { + if len(dst)+256 > maxDecodedSize { + d.bufs.Put(bufs) + br.close() + return nil, ErrMaxDecodedSizeExceeded + } + dst = append(dst, buf[:]...) + } + } + case 4: + const shift = 8 - 4 + for br.off >= 4 { + br.fillFast() + v := dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+0] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+1] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+2] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+3] = uint8(v.entry >> 8) + + off += 4 + if off == 0 { + if len(dst)+256 > maxDecodedSize { + d.bufs.Put(bufs) + br.close() + return nil, ErrMaxDecodedSizeExceeded + } + dst = append(dst, buf[:]...) + } + } + case 3: + const shift = 8 - 3 + for br.off >= 4 { + br.fillFast() + v := dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+0] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+1] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+2] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+3] = uint8(v.entry >> 8) + + off += 4 + if off == 0 { + if len(dst)+256 > maxDecodedSize { + d.bufs.Put(bufs) + br.close() + return nil, ErrMaxDecodedSizeExceeded + } + dst = append(dst, buf[:]...) + } + } + case 2: + const shift = 8 - 2 + for br.off >= 4 { + br.fillFast() + v := dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+0] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+1] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+2] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+3] = uint8(v.entry >> 8) + + off += 4 + if off == 0 { + if len(dst)+256 > maxDecodedSize { + d.bufs.Put(bufs) + br.close() + return nil, ErrMaxDecodedSizeExceeded + } + dst = append(dst, buf[:]...) + } + } + case 1: + const shift = 8 - 1 + for br.off >= 4 { + br.fillFast() + v := dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+0] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+1] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+2] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+3] = uint8(v.entry >> 8) + + off += 4 + if off == 0 { + if len(dst)+256 > maxDecodedSize { + d.bufs.Put(bufs) + br.close() + return nil, ErrMaxDecodedSizeExceeded + } + dst = append(dst, buf[:]...) + } + } + default: + d.bufs.Put(bufs) + return nil, fmt.Errorf("invalid tablelog: %d", d.actualTableLog) + } + + if len(dst)+int(off) > maxDecodedSize { + d.bufs.Put(bufs) + br.close() + return nil, ErrMaxDecodedSizeExceeded + } + dst = append(dst, buf[:off]...) + + // br < 4, so uint8 is fine + bitsLeft := int8(uint8(br.off)*8 + (64 - br.bitsRead)) + shift := (8 - d.actualTableLog) & 7 + + for bitsLeft > 0 { + if br.bitsRead >= 64-8 { + for br.off > 0 { + br.value |= uint64(br.in[br.off-1]) << (br.bitsRead - 8) + br.bitsRead -= 8 + br.off-- + } + } + if len(dst) >= maxDecodedSize { + br.close() + d.bufs.Put(bufs) + return nil, ErrMaxDecodedSizeExceeded + } + v := dt[br.peekByteFast()>>shift] + nBits := uint8(v.entry) + br.advance(nBits) + bitsLeft -= int8(nBits) + dst = append(dst, uint8(v.entry>>8)) + } + d.bufs.Put(bufs) + return dst, br.close() +} + +// decompress1X8Bit will decompress a 1X encoded stream with tablelog <= 8. +// The cap of the output buffer will be the maximum decompressed size. +// The length of the supplied input must match the end of a block exactly. +func (d *Decoder) decompress1X8BitExactly(dst, src []byte) ([]byte, error) { + var br bitReaderBytes + err := br.init(src) + if err != nil { + return dst, err + } + maxDecodedSize := cap(dst) + dst = dst[:0] + + // Avoid bounds check by always having full sized table. + dt := d.dt.single[:256] + + // Use temp table to avoid bound checks/append penalty. + bufs := d.buffer() + buf := &bufs[0] + var off uint8 + + const shift = 56 + + //fmt.Printf("mask: %b, tl:%d\n", mask, d.actualTableLog) + for br.off >= 4 { + br.fillFast() + v := dt[uint8(br.value>>shift)] + br.advance(uint8(v.entry)) + buf[off+0] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>shift)] + br.advance(uint8(v.entry)) + buf[off+1] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>shift)] + br.advance(uint8(v.entry)) + buf[off+2] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>shift)] + br.advance(uint8(v.entry)) + buf[off+3] = uint8(v.entry >> 8) + + off += 4 + if off == 0 { + if len(dst)+256 > maxDecodedSize { + d.bufs.Put(bufs) + br.close() + return nil, ErrMaxDecodedSizeExceeded + } + dst = append(dst, buf[:]...) + } + } + + if len(dst)+int(off) > maxDecodedSize { + d.bufs.Put(bufs) + br.close() + return nil, ErrMaxDecodedSizeExceeded + } + dst = append(dst, buf[:off]...) + + // br < 4, so uint8 is fine + bitsLeft := int8(uint8(br.off)*8 + (64 - br.bitsRead)) + for bitsLeft > 0 { + if br.bitsRead >= 64-8 { + for br.off > 0 { + br.value |= uint64(br.in[br.off-1]) << (br.bitsRead - 8) + br.bitsRead -= 8 + br.off-- + } + } + if len(dst) >= maxDecodedSize { + d.bufs.Put(bufs) + br.close() + return nil, ErrMaxDecodedSizeExceeded + } + v := dt[br.peekByteFast()] + nBits := uint8(v.entry) + br.advance(nBits) + bitsLeft -= int8(nBits) + dst = append(dst, uint8(v.entry>>8)) + } + d.bufs.Put(bufs) + return dst, br.close() +} + +// Decompress4X will decompress a 4X encoded stream. +// The length of the supplied input must match the end of a block exactly. +// The *capacity* of the dst slice must match the destination size of +// the uncompressed data exactly. +func (d *Decoder) decompress4X8bit(dst, src []byte) ([]byte, error) { + if d.actualTableLog == 8 { + return d.decompress4X8bitExactly(dst, src) + } + + var br [4]bitReaderBytes + start := 6 + for i := 0; i < 3; i++ { + length := int(src[i*2]) | (int(src[i*2+1]) << 8) + if start+length >= len(src) { + return nil, errors.New("truncated input (or invalid offset)") + } + err := br[i].init(src[start : start+length]) + if err != nil { + return nil, err + } + start += length + } + err := br[3].init(src[start:]) + if err != nil { + return nil, err + } + + // destination, offset to match first output + dstSize := cap(dst) + dst = dst[:dstSize] + out := dst + dstEvery := (dstSize + 3) / 4 + + shift := (56 + (8 - d.actualTableLog)) & 63 + + const tlSize = 1 << 8 + single := d.dt.single[:tlSize] + + // Use temp table to avoid bound checks/append penalty. + buf := d.buffer() + var off uint8 + var decoded int + + // Decode 4 values from each decoder/loop. + const bufoff = 256 + for { + if br[0].off < 4 || br[1].off < 4 || br[2].off < 4 || br[3].off < 4 { + break + } + + { + // Interleave 2 decodes. + const stream = 0 + const stream2 = 1 + br1 := &br[stream] + br2 := &br[stream2] + br1.fillFast() + br2.fillFast() + + v := single[uint8(br1.value>>shift)].entry + v2 := single[uint8(br2.value>>shift)].entry + br1.bitsRead += uint8(v) + br1.value <<= v & 63 + br2.bitsRead += uint8(v2) + br2.value <<= v2 & 63 + buf[stream][off] = uint8(v >> 8) + buf[stream2][off] = uint8(v2 >> 8) + + v = single[uint8(br1.value>>shift)].entry + v2 = single[uint8(br2.value>>shift)].entry + br1.bitsRead += uint8(v) + br1.value <<= v & 63 + br2.bitsRead += uint8(v2) + br2.value <<= v2 & 63 + buf[stream][off+1] = uint8(v >> 8) + buf[stream2][off+1] = uint8(v2 >> 8) + + v = single[uint8(br1.value>>shift)].entry + v2 = single[uint8(br2.value>>shift)].entry + br1.bitsRead += uint8(v) + br1.value <<= v & 63 + br2.bitsRead += uint8(v2) + br2.value <<= v2 & 63 + buf[stream][off+2] = uint8(v >> 8) + buf[stream2][off+2] = uint8(v2 >> 8) + + v = single[uint8(br1.value>>shift)].entry + v2 = single[uint8(br2.value>>shift)].entry + br1.bitsRead += uint8(v) + br1.value <<= v & 63 + br2.bitsRead += uint8(v2) + br2.value <<= v2 & 63 + buf[stream][off+3] = uint8(v >> 8) + buf[stream2][off+3] = uint8(v2 >> 8) + } + + { + const stream = 2 + const stream2 = 3 + br1 := &br[stream] + br2 := &br[stream2] + br1.fillFast() + br2.fillFast() + + v := single[uint8(br1.value>>shift)].entry + v2 := single[uint8(br2.value>>shift)].entry + br1.bitsRead += uint8(v) + br1.value <<= v & 63 + br2.bitsRead += uint8(v2) + br2.value <<= v2 & 63 + buf[stream][off] = uint8(v >> 8) + buf[stream2][off] = uint8(v2 >> 8) + + v = single[uint8(br1.value>>shift)].entry + v2 = single[uint8(br2.value>>shift)].entry + br1.bitsRead += uint8(v) + br1.value <<= v & 63 + br2.bitsRead += uint8(v2) + br2.value <<= v2 & 63 + buf[stream][off+1] = uint8(v >> 8) + buf[stream2][off+1] = uint8(v2 >> 8) + + v = single[uint8(br1.value>>shift)].entry + v2 = single[uint8(br2.value>>shift)].entry + br1.bitsRead += uint8(v) + br1.value <<= v & 63 + br2.bitsRead += uint8(v2) + br2.value <<= v2 & 63 + buf[stream][off+2] = uint8(v >> 8) + buf[stream2][off+2] = uint8(v2 >> 8) + + v = single[uint8(br1.value>>shift)].entry + v2 = single[uint8(br2.value>>shift)].entry + br1.bitsRead += uint8(v) + br1.value <<= v & 63 + br2.bitsRead += uint8(v2) + br2.value <<= v2 & 63 + buf[stream][off+3] = uint8(v >> 8) + buf[stream2][off+3] = uint8(v2 >> 8) + } + + off += 4 + + if off == 0 { + if bufoff > dstEvery { + d.bufs.Put(buf) + return nil, errors.New("corruption detected: stream overrun 1") + } + // There must at least be 3 buffers left. + if len(out)-bufoff < dstEvery*3 { + d.bufs.Put(buf) + return nil, errors.New("corruption detected: stream overrun 2") + } + //copy(out, buf[0][:]) + //copy(out[dstEvery:], buf[1][:]) + //copy(out[dstEvery*2:], buf[2][:]) + *(*[bufoff]byte)(out) = buf[0] + *(*[bufoff]byte)(out[dstEvery:]) = buf[1] + *(*[bufoff]byte)(out[dstEvery*2:]) = buf[2] + *(*[bufoff]byte)(out[dstEvery*3:]) = buf[3] + out = out[bufoff:] + decoded += bufoff * 4 + } + } + if off > 0 { + ioff := int(off) + if len(out) < dstEvery*3+ioff { + d.bufs.Put(buf) + return nil, errors.New("corruption detected: stream overrun 3") + } + copy(out, buf[0][:off]) + copy(out[dstEvery:], buf[1][:off]) + copy(out[dstEvery*2:], buf[2][:off]) + copy(out[dstEvery*3:], buf[3][:off]) + decoded += int(off) * 4 + out = out[off:] + } + + // Decode remaining. + // Decode remaining. + remainBytes := dstEvery - (decoded / 4) + for i := range br { + offset := dstEvery * i + endsAt := offset + remainBytes + if endsAt > len(out) { + endsAt = len(out) + } + br := &br[i] + bitsLeft := br.remaining() + for bitsLeft > 0 { + if br.finished() { + d.bufs.Put(buf) + return nil, io.ErrUnexpectedEOF + } + if br.bitsRead >= 56 { + if br.off >= 4 { + v := br.in[br.off-4:] + v = v[:4] + low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) + br.value |= uint64(low) << (br.bitsRead - 32) + br.bitsRead -= 32 + br.off -= 4 + } else { + for br.off > 0 { + br.value |= uint64(br.in[br.off-1]) << (br.bitsRead - 8) + br.bitsRead -= 8 + br.off-- + } + } + } + // end inline... + if offset >= endsAt { + d.bufs.Put(buf) + return nil, errors.New("corruption detected: stream overrun 4") + } + + // Read value and increment offset. + v := single[uint8(br.value>>shift)].entry + nBits := uint8(v) + br.advance(nBits) + bitsLeft -= uint(nBits) + out[offset] = uint8(v >> 8) + offset++ + } + if offset != endsAt { + d.bufs.Put(buf) + return nil, fmt.Errorf("corruption detected: short output block %d, end %d != %d", i, offset, endsAt) + } + decoded += offset - dstEvery*i + err = br.close() + if err != nil { + d.bufs.Put(buf) + return nil, err + } + } + d.bufs.Put(buf) + if dstSize != decoded { + return nil, errors.New("corruption detected: short output block") + } + return dst, nil +} + +// Decompress4X will decompress a 4X encoded stream. +// The length of the supplied input must match the end of a block exactly. +// The *capacity* of the dst slice must match the destination size of +// the uncompressed data exactly. +func (d *Decoder) decompress4X8bitExactly(dst, src []byte) ([]byte, error) { + var br [4]bitReaderBytes + start := 6 + for i := 0; i < 3; i++ { + length := int(src[i*2]) | (int(src[i*2+1]) << 8) + if start+length >= len(src) { + return nil, errors.New("truncated input (or invalid offset)") + } + err := br[i].init(src[start : start+length]) + if err != nil { + return nil, err + } + start += length + } + err := br[3].init(src[start:]) + if err != nil { + return nil, err + } + + // destination, offset to match first output + dstSize := cap(dst) + dst = dst[:dstSize] + out := dst + dstEvery := (dstSize + 3) / 4 + + const shift = 56 + const tlSize = 1 << 8 + single := d.dt.single[:tlSize] + + // Use temp table to avoid bound checks/append penalty. + buf := d.buffer() + var off uint8 + var decoded int + + // Decode 4 values from each decoder/loop. + const bufoff = 256 + for { + if br[0].off < 4 || br[1].off < 4 || br[2].off < 4 || br[3].off < 4 { + break + } + + { + // Interleave 2 decodes. + const stream = 0 + const stream2 = 1 + br1 := &br[stream] + br2 := &br[stream2] + br1.fillFast() + br2.fillFast() + + v := single[uint8(br1.value>>shift)].entry + v2 := single[uint8(br2.value>>shift)].entry + br1.bitsRead += uint8(v) + br1.value <<= v & 63 + br2.bitsRead += uint8(v2) + br2.value <<= v2 & 63 + buf[stream][off] = uint8(v >> 8) + buf[stream2][off] = uint8(v2 >> 8) + + v = single[uint8(br1.value>>shift)].entry + v2 = single[uint8(br2.value>>shift)].entry + br1.bitsRead += uint8(v) + br1.value <<= v & 63 + br2.bitsRead += uint8(v2) + br2.value <<= v2 & 63 + buf[stream][off+1] = uint8(v >> 8) + buf[stream2][off+1] = uint8(v2 >> 8) + + v = single[uint8(br1.value>>shift)].entry + v2 = single[uint8(br2.value>>shift)].entry + br1.bitsRead += uint8(v) + br1.value <<= v & 63 + br2.bitsRead += uint8(v2) + br2.value <<= v2 & 63 + buf[stream][off+2] = uint8(v >> 8) + buf[stream2][off+2] = uint8(v2 >> 8) + + v = single[uint8(br1.value>>shift)].entry + v2 = single[uint8(br2.value>>shift)].entry + br1.bitsRead += uint8(v) + br1.value <<= v & 63 + br2.bitsRead += uint8(v2) + br2.value <<= v2 & 63 + buf[stream][off+3] = uint8(v >> 8) + buf[stream2][off+3] = uint8(v2 >> 8) + } + + { + const stream = 2 + const stream2 = 3 + br1 := &br[stream] + br2 := &br[stream2] + br1.fillFast() + br2.fillFast() + + v := single[uint8(br1.value>>shift)].entry + v2 := single[uint8(br2.value>>shift)].entry + br1.bitsRead += uint8(v) + br1.value <<= v & 63 + br2.bitsRead += uint8(v2) + br2.value <<= v2 & 63 + buf[stream][off] = uint8(v >> 8) + buf[stream2][off] = uint8(v2 >> 8) + + v = single[uint8(br1.value>>shift)].entry + v2 = single[uint8(br2.value>>shift)].entry + br1.bitsRead += uint8(v) + br1.value <<= v & 63 + br2.bitsRead += uint8(v2) + br2.value <<= v2 & 63 + buf[stream][off+1] = uint8(v >> 8) + buf[stream2][off+1] = uint8(v2 >> 8) + + v = single[uint8(br1.value>>shift)].entry + v2 = single[uint8(br2.value>>shift)].entry + br1.bitsRead += uint8(v) + br1.value <<= v & 63 + br2.bitsRead += uint8(v2) + br2.value <<= v2 & 63 + buf[stream][off+2] = uint8(v >> 8) + buf[stream2][off+2] = uint8(v2 >> 8) + + v = single[uint8(br1.value>>shift)].entry + v2 = single[uint8(br2.value>>shift)].entry + br1.bitsRead += uint8(v) + br1.value <<= v & 63 + br2.bitsRead += uint8(v2) + br2.value <<= v2 & 63 + buf[stream][off+3] = uint8(v >> 8) + buf[stream2][off+3] = uint8(v2 >> 8) + } + + off += 4 + + if off == 0 { + if bufoff > dstEvery { + d.bufs.Put(buf) + return nil, errors.New("corruption detected: stream overrun 1") + } + // There must at least be 3 buffers left. + if len(out)-bufoff < dstEvery*3 { + d.bufs.Put(buf) + return nil, errors.New("corruption detected: stream overrun 2") + } + + //copy(out, buf[0][:]) + //copy(out[dstEvery:], buf[1][:]) + //copy(out[dstEvery*2:], buf[2][:]) + // copy(out[dstEvery*3:], buf[3][:]) + *(*[bufoff]byte)(out) = buf[0] + *(*[bufoff]byte)(out[dstEvery:]) = buf[1] + *(*[bufoff]byte)(out[dstEvery*2:]) = buf[2] + *(*[bufoff]byte)(out[dstEvery*3:]) = buf[3] + out = out[bufoff:] + decoded += bufoff * 4 + } + } + if off > 0 { + ioff := int(off) + if len(out) < dstEvery*3+ioff { + return nil, errors.New("corruption detected: stream overrun 3") + } + copy(out, buf[0][:off]) + copy(out[dstEvery:], buf[1][:off]) + copy(out[dstEvery*2:], buf[2][:off]) + copy(out[dstEvery*3:], buf[3][:off]) + decoded += int(off) * 4 + out = out[off:] + } + + // Decode remaining. + remainBytes := dstEvery - (decoded / 4) + for i := range br { + offset := dstEvery * i + endsAt := offset + remainBytes + if endsAt > len(out) { + endsAt = len(out) + } + br := &br[i] + bitsLeft := br.remaining() + for bitsLeft > 0 { + if br.finished() { + d.bufs.Put(buf) + return nil, io.ErrUnexpectedEOF + } + if br.bitsRead >= 56 { + if br.off >= 4 { + v := br.in[br.off-4:] + v = v[:4] + low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) + br.value |= uint64(low) << (br.bitsRead - 32) + br.bitsRead -= 32 + br.off -= 4 + } else { + for br.off > 0 { + br.value |= uint64(br.in[br.off-1]) << (br.bitsRead - 8) + br.bitsRead -= 8 + br.off-- + } + } + } + // end inline... + if offset >= endsAt { + d.bufs.Put(buf) + return nil, errors.New("corruption detected: stream overrun 4") + } + + // Read value and increment offset. + v := single[br.peekByteFast()].entry + nBits := uint8(v) + br.advance(nBits) + bitsLeft -= uint(nBits) + out[offset] = uint8(v >> 8) + offset++ + } + if offset != endsAt { + d.bufs.Put(buf) + return nil, fmt.Errorf("corruption detected: short output block %d, end %d != %d", i, offset, endsAt) + } + + decoded += offset - dstEvery*i + err = br.close() + if err != nil { + d.bufs.Put(buf) + return nil, err + } + } + d.bufs.Put(buf) + if dstSize != decoded { + return nil, errors.New("corruption detected: short output block") + } + return dst, nil +} + +// matches will compare a decoding table to a coding table. +// Errors are written to the writer. +// Nothing will be written if table is ok. +func (s *Scratch) matches(ct cTable, w io.Writer) { + if s == nil || len(s.dt.single) == 0 { + return + } + dt := s.dt.single[:1<>8) == byte(sym) { + fmt.Fprintf(w, "symbol %x has decoder, but no encoder\n", sym) + errs++ + break + } + } + if errs == 0 { + broken-- + } + continue + } + // Unused bits in input + ub := tablelog - enc.nBits + top := enc.val << ub + // decoder looks at top bits. + dec := dt[top] + if uint8(dec.entry) != enc.nBits { + fmt.Fprintf(w, "symbol 0x%x bit size mismatch (enc: %d, dec:%d).\n", sym, enc.nBits, uint8(dec.entry)) + errs++ + } + if uint8(dec.entry>>8) != uint8(sym) { + fmt.Fprintf(w, "symbol 0x%x decoder output mismatch (enc: %d, dec:%d).\n", sym, sym, uint8(dec.entry>>8)) + errs++ + } + if errs > 0 { + fmt.Fprintf(w, "%d errros in base, stopping\n", errs) + continue + } + // Ensure that all combinations are covered. + for i := uint16(0); i < (1 << ub); i++ { + vval := top | i + dec := dt[vval] + if uint8(dec.entry) != enc.nBits { + fmt.Fprintf(w, "symbol 0x%x bit size mismatch (enc: %d, dec:%d).\n", vval, enc.nBits, uint8(dec.entry)) + errs++ + } + if uint8(dec.entry>>8) != uint8(sym) { + fmt.Fprintf(w, "symbol 0x%x decoder output mismatch (enc: %d, dec:%d).\n", vval, sym, uint8(dec.entry>>8)) + errs++ + } + if errs > 20 { + fmt.Fprintf(w, "%d errros, stopping\n", errs) + break + } + } + if errs == 0 { + ok++ + broken-- + } + } + if broken > 0 { + fmt.Fprintf(w, "%d broken, %d ok\n", broken, ok) + } +} diff --git a/vendor/github.com/klauspost/compress/huff0/decompress_amd64.go b/vendor/github.com/klauspost/compress/huff0/decompress_amd64.go new file mode 100644 index 00000000..ba7e8e6b --- /dev/null +++ b/vendor/github.com/klauspost/compress/huff0/decompress_amd64.go @@ -0,0 +1,226 @@ +//go:build amd64 && !appengine && !noasm && gc +// +build amd64,!appengine,!noasm,gc + +// This file contains the specialisation of Decoder.Decompress4X +// and Decoder.Decompress1X that use an asm implementation of thir main loops. +package huff0 + +import ( + "errors" + "fmt" + + "github.com/klauspost/compress/internal/cpuinfo" +) + +// decompress4x_main_loop_x86 is an x86 assembler implementation +// of Decompress4X when tablelog > 8. +// +//go:noescape +func decompress4x_main_loop_amd64(ctx *decompress4xContext) + +// decompress4x_8b_loop_x86 is an x86 assembler implementation +// of Decompress4X when tablelog <= 8 which decodes 4 entries +// per loop. +// +//go:noescape +func decompress4x_8b_main_loop_amd64(ctx *decompress4xContext) + +// fallback8BitSize is the size where using Go version is faster. +const fallback8BitSize = 800 + +type decompress4xContext struct { + pbr *[4]bitReaderShifted + peekBits uint8 + out *byte + dstEvery int + tbl *dEntrySingle + decoded int + limit *byte +} + +// Decompress4X will decompress a 4X encoded stream. +// The length of the supplied input must match the end of a block exactly. +// The *capacity* of the dst slice must match the destination size of +// the uncompressed data exactly. +func (d *Decoder) Decompress4X(dst, src []byte) ([]byte, error) { + if len(d.dt.single) == 0 { + return nil, errors.New("no table loaded") + } + if len(src) < 6+(4*1) { + return nil, errors.New("input too small") + } + + use8BitTables := d.actualTableLog <= 8 + if cap(dst) < fallback8BitSize && use8BitTables { + return d.decompress4X8bit(dst, src) + } + + var br [4]bitReaderShifted + // Decode "jump table" + start := 6 + for i := 0; i < 3; i++ { + length := int(src[i*2]) | (int(src[i*2+1]) << 8) + if start+length >= len(src) { + return nil, errors.New("truncated input (or invalid offset)") + } + err := br[i].init(src[start : start+length]) + if err != nil { + return nil, err + } + start += length + } + err := br[3].init(src[start:]) + if err != nil { + return nil, err + } + + // destination, offset to match first output + dstSize := cap(dst) + dst = dst[:dstSize] + out := dst + dstEvery := (dstSize + 3) / 4 + + const tlSize = 1 << tableLogMax + const tlMask = tlSize - 1 + single := d.dt.single[:tlSize] + + var decoded int + + if len(out) > 4*4 && !(br[0].off < 4 || br[1].off < 4 || br[2].off < 4 || br[3].off < 4) { + ctx := decompress4xContext{ + pbr: &br, + peekBits: uint8((64 - d.actualTableLog) & 63), // see: bitReaderShifted.peekBitsFast() + out: &out[0], + dstEvery: dstEvery, + tbl: &single[0], + limit: &out[dstEvery-4], // Always stop decoding when first buffer gets here to avoid writing OOB on last. + } + if use8BitTables { + decompress4x_8b_main_loop_amd64(&ctx) + } else { + decompress4x_main_loop_amd64(&ctx) + } + + decoded = ctx.decoded + out = out[decoded/4:] + } + + // Decode remaining. + remainBytes := dstEvery - (decoded / 4) + for i := range br { + offset := dstEvery * i + endsAt := offset + remainBytes + if endsAt > len(out) { + endsAt = len(out) + } + br := &br[i] + bitsLeft := br.remaining() + for bitsLeft > 0 { + br.fill() + if offset >= endsAt { + return nil, errors.New("corruption detected: stream overrun 4") + } + + // Read value and increment offset. + val := br.peekBitsFast(d.actualTableLog) + v := single[val&tlMask].entry + nBits := uint8(v) + br.advance(nBits) + bitsLeft -= uint(nBits) + out[offset] = uint8(v >> 8) + offset++ + } + if offset != endsAt { + return nil, fmt.Errorf("corruption detected: short output block %d, end %d != %d", i, offset, endsAt) + } + decoded += offset - dstEvery*i + err = br.close() + if err != nil { + return nil, err + } + } + if dstSize != decoded { + return nil, errors.New("corruption detected: short output block") + } + return dst, nil +} + +// decompress4x_main_loop_x86 is an x86 assembler implementation +// of Decompress1X when tablelog > 8. +// +//go:noescape +func decompress1x_main_loop_amd64(ctx *decompress1xContext) + +// decompress4x_main_loop_x86 is an x86 with BMI2 assembler implementation +// of Decompress1X when tablelog > 8. +// +//go:noescape +func decompress1x_main_loop_bmi2(ctx *decompress1xContext) + +type decompress1xContext struct { + pbr *bitReaderShifted + peekBits uint8 + out *byte + outCap int + tbl *dEntrySingle + decoded int +} + +// Error reported by asm implementations +const error_max_decoded_size_exeeded = -1 + +// Decompress1X will decompress a 1X encoded stream. +// The cap of the output buffer will be the maximum decompressed size. +// The length of the supplied input must match the end of a block exactly. +func (d *Decoder) Decompress1X(dst, src []byte) ([]byte, error) { + if len(d.dt.single) == 0 { + return nil, errors.New("no table loaded") + } + var br bitReaderShifted + err := br.init(src) + if err != nil { + return dst, err + } + maxDecodedSize := cap(dst) + dst = dst[:maxDecodedSize] + + const tlSize = 1 << tableLogMax + const tlMask = tlSize - 1 + + if maxDecodedSize >= 4 { + ctx := decompress1xContext{ + pbr: &br, + out: &dst[0], + outCap: maxDecodedSize, + peekBits: uint8((64 - d.actualTableLog) & 63), // see: bitReaderShifted.peekBitsFast() + tbl: &d.dt.single[0], + } + + if cpuinfo.HasBMI2() { + decompress1x_main_loop_bmi2(&ctx) + } else { + decompress1x_main_loop_amd64(&ctx) + } + if ctx.decoded == error_max_decoded_size_exeeded { + return nil, ErrMaxDecodedSizeExceeded + } + + dst = dst[:ctx.decoded] + } + + // br < 8, so uint8 is fine + bitsLeft := uint8(br.off)*8 + 64 - br.bitsRead + for bitsLeft > 0 { + br.fill() + if len(dst) >= maxDecodedSize { + br.close() + return nil, ErrMaxDecodedSizeExceeded + } + v := d.dt.single[br.peekBitsFast(d.actualTableLog)&tlMask] + nBits := uint8(v.entry) + br.advance(nBits) + bitsLeft -= nBits + dst = append(dst, uint8(v.entry>>8)) + } + return dst, br.close() +} diff --git a/vendor/github.com/klauspost/compress/huff0/decompress_amd64.s b/vendor/github.com/klauspost/compress/huff0/decompress_amd64.s new file mode 100644 index 00000000..8d2187a2 --- /dev/null +++ b/vendor/github.com/klauspost/compress/huff0/decompress_amd64.s @@ -0,0 +1,846 @@ +// Code generated by command: go run gen.go -out ../decompress_amd64.s -pkg=huff0. DO NOT EDIT. + +//go:build amd64 && !appengine && !noasm && gc + +// func decompress4x_main_loop_amd64(ctx *decompress4xContext) +TEXT ·decompress4x_main_loop_amd64(SB), $0-8 + XORQ DX, DX + + // Preload values + MOVQ ctx+0(FP), AX + MOVBQZX 8(AX), DI + MOVQ 16(AX), SI + MOVQ 48(AX), BX + MOVQ 24(AX), R9 + MOVQ 32(AX), R10 + MOVQ (AX), R11 + + // Main loop +main_loop: + MOVQ SI, R8 + CMPQ R8, BX + SETGE DL + + // br0.fillFast32() + MOVQ 32(R11), R12 + MOVBQZX 40(R11), R13 + CMPQ R13, $0x20 + JBE skip_fill0 + MOVQ 24(R11), AX + SUBQ $0x20, R13 + SUBQ $0x04, AX + MOVQ (R11), R14 + + // b.value |= uint64(low) << (b.bitsRead & 63) + MOVL (AX)(R14*1), R14 + MOVQ R13, CX + SHLQ CL, R14 + MOVQ AX, 24(R11) + ORQ R14, R12 + + // exhausted = exhausted || (br0.off < 4) + CMPQ AX, $0x04 + SETLT AL + ORB AL, DL + +skip_fill0: + // val0 := br0.peekTopBits(peekBits) + MOVQ R12, R14 + MOVQ DI, CX + SHRQ CL, R14 + + // v0 := table[val0&mask] + MOVW (R10)(R14*2), CX + + // br0.advance(uint8(v0.entry) + MOVB CH, AL + SHLQ CL, R12 + ADDB CL, R13 + + // val1 := br0.peekTopBits(peekBits) + MOVQ DI, CX + MOVQ R12, R14 + SHRQ CL, R14 + + // v1 := table[val1&mask] + MOVW (R10)(R14*2), CX + + // br0.advance(uint8(v1.entry)) + MOVB CH, AH + SHLQ CL, R12 + ADDB CL, R13 + + // these two writes get coalesced + // out[id * dstEvery + 0] = uint8(v0.entry >> 8) + // out[id * dstEvery + 1] = uint8(v1.entry >> 8) + MOVW AX, (R8) + + // update the bitreader structure + MOVQ R12, 32(R11) + MOVB R13, 40(R11) + ADDQ R9, R8 + + // br1.fillFast32() + MOVQ 80(R11), R12 + MOVBQZX 88(R11), R13 + CMPQ R13, $0x20 + JBE skip_fill1 + MOVQ 72(R11), AX + SUBQ $0x20, R13 + SUBQ $0x04, AX + MOVQ 48(R11), R14 + + // b.value |= uint64(low) << (b.bitsRead & 63) + MOVL (AX)(R14*1), R14 + MOVQ R13, CX + SHLQ CL, R14 + MOVQ AX, 72(R11) + ORQ R14, R12 + + // exhausted = exhausted || (br1.off < 4) + CMPQ AX, $0x04 + SETLT AL + ORB AL, DL + +skip_fill1: + // val0 := br1.peekTopBits(peekBits) + MOVQ R12, R14 + MOVQ DI, CX + SHRQ CL, R14 + + // v0 := table[val0&mask] + MOVW (R10)(R14*2), CX + + // br1.advance(uint8(v0.entry) + MOVB CH, AL + SHLQ CL, R12 + ADDB CL, R13 + + // val1 := br1.peekTopBits(peekBits) + MOVQ DI, CX + MOVQ R12, R14 + SHRQ CL, R14 + + // v1 := table[val1&mask] + MOVW (R10)(R14*2), CX + + // br1.advance(uint8(v1.entry)) + MOVB CH, AH + SHLQ CL, R12 + ADDB CL, R13 + + // these two writes get coalesced + // out[id * dstEvery + 0] = uint8(v0.entry >> 8) + // out[id * dstEvery + 1] = uint8(v1.entry >> 8) + MOVW AX, (R8) + + // update the bitreader structure + MOVQ R12, 80(R11) + MOVB R13, 88(R11) + ADDQ R9, R8 + + // br2.fillFast32() + MOVQ 128(R11), R12 + MOVBQZX 136(R11), R13 + CMPQ R13, $0x20 + JBE skip_fill2 + MOVQ 120(R11), AX + SUBQ $0x20, R13 + SUBQ $0x04, AX + MOVQ 96(R11), R14 + + // b.value |= uint64(low) << (b.bitsRead & 63) + MOVL (AX)(R14*1), R14 + MOVQ R13, CX + SHLQ CL, R14 + MOVQ AX, 120(R11) + ORQ R14, R12 + + // exhausted = exhausted || (br2.off < 4) + CMPQ AX, $0x04 + SETLT AL + ORB AL, DL + +skip_fill2: + // val0 := br2.peekTopBits(peekBits) + MOVQ R12, R14 + MOVQ DI, CX + SHRQ CL, R14 + + // v0 := table[val0&mask] + MOVW (R10)(R14*2), CX + + // br2.advance(uint8(v0.entry) + MOVB CH, AL + SHLQ CL, R12 + ADDB CL, R13 + + // val1 := br2.peekTopBits(peekBits) + MOVQ DI, CX + MOVQ R12, R14 + SHRQ CL, R14 + + // v1 := table[val1&mask] + MOVW (R10)(R14*2), CX + + // br2.advance(uint8(v1.entry)) + MOVB CH, AH + SHLQ CL, R12 + ADDB CL, R13 + + // these two writes get coalesced + // out[id * dstEvery + 0] = uint8(v0.entry >> 8) + // out[id * dstEvery + 1] = uint8(v1.entry >> 8) + MOVW AX, (R8) + + // update the bitreader structure + MOVQ R12, 128(R11) + MOVB R13, 136(R11) + ADDQ R9, R8 + + // br3.fillFast32() + MOVQ 176(R11), R12 + MOVBQZX 184(R11), R13 + CMPQ R13, $0x20 + JBE skip_fill3 + MOVQ 168(R11), AX + SUBQ $0x20, R13 + SUBQ $0x04, AX + MOVQ 144(R11), R14 + + // b.value |= uint64(low) << (b.bitsRead & 63) + MOVL (AX)(R14*1), R14 + MOVQ R13, CX + SHLQ CL, R14 + MOVQ AX, 168(R11) + ORQ R14, R12 + + // exhausted = exhausted || (br3.off < 4) + CMPQ AX, $0x04 + SETLT AL + ORB AL, DL + +skip_fill3: + // val0 := br3.peekTopBits(peekBits) + MOVQ R12, R14 + MOVQ DI, CX + SHRQ CL, R14 + + // v0 := table[val0&mask] + MOVW (R10)(R14*2), CX + + // br3.advance(uint8(v0.entry) + MOVB CH, AL + SHLQ CL, R12 + ADDB CL, R13 + + // val1 := br3.peekTopBits(peekBits) + MOVQ DI, CX + MOVQ R12, R14 + SHRQ CL, R14 + + // v1 := table[val1&mask] + MOVW (R10)(R14*2), CX + + // br3.advance(uint8(v1.entry)) + MOVB CH, AH + SHLQ CL, R12 + ADDB CL, R13 + + // these two writes get coalesced + // out[id * dstEvery + 0] = uint8(v0.entry >> 8) + // out[id * dstEvery + 1] = uint8(v1.entry >> 8) + MOVW AX, (R8) + + // update the bitreader structure + MOVQ R12, 176(R11) + MOVB R13, 184(R11) + ADDQ $0x02, SI + TESTB DL, DL + JZ main_loop + MOVQ ctx+0(FP), AX + SUBQ 16(AX), SI + SHLQ $0x02, SI + MOVQ SI, 40(AX) + RET + +// func decompress4x_8b_main_loop_amd64(ctx *decompress4xContext) +TEXT ·decompress4x_8b_main_loop_amd64(SB), $0-8 + XORQ DX, DX + + // Preload values + MOVQ ctx+0(FP), CX + MOVBQZX 8(CX), DI + MOVQ 16(CX), BX + MOVQ 48(CX), SI + MOVQ 24(CX), R9 + MOVQ 32(CX), R10 + MOVQ (CX), R11 + + // Main loop +main_loop: + MOVQ BX, R8 + CMPQ R8, SI + SETGE DL + + // br0.fillFast32() + MOVQ 32(R11), R12 + MOVBQZX 40(R11), R13 + CMPQ R13, $0x20 + JBE skip_fill0 + MOVQ 24(R11), R14 + SUBQ $0x20, R13 + SUBQ $0x04, R14 + MOVQ (R11), R15 + + // b.value |= uint64(low) << (b.bitsRead & 63) + MOVL (R14)(R15*1), R15 + MOVQ R13, CX + SHLQ CL, R15 + MOVQ R14, 24(R11) + ORQ R15, R12 + + // exhausted = exhausted || (br0.off < 4) + CMPQ R14, $0x04 + SETLT AL + ORB AL, DL + +skip_fill0: + // val0 := br0.peekTopBits(peekBits) + MOVQ R12, R14 + MOVQ DI, CX + SHRQ CL, R14 + + // v0 := table[val0&mask] + MOVW (R10)(R14*2), CX + + // br0.advance(uint8(v0.entry) + MOVB CH, AL + SHLQ CL, R12 + ADDB CL, R13 + + // val1 := br0.peekTopBits(peekBits) + MOVQ R12, R14 + MOVQ DI, CX + SHRQ CL, R14 + + // v1 := table[val0&mask] + MOVW (R10)(R14*2), CX + + // br0.advance(uint8(v1.entry) + MOVB CH, AH + SHLQ CL, R12 + ADDB CL, R13 + BSWAPL AX + + // val2 := br0.peekTopBits(peekBits) + MOVQ R12, R14 + MOVQ DI, CX + SHRQ CL, R14 + + // v2 := table[val0&mask] + MOVW (R10)(R14*2), CX + + // br0.advance(uint8(v2.entry) + MOVB CH, AH + SHLQ CL, R12 + ADDB CL, R13 + + // val3 := br0.peekTopBits(peekBits) + MOVQ R12, R14 + MOVQ DI, CX + SHRQ CL, R14 + + // v3 := table[val0&mask] + MOVW (R10)(R14*2), CX + + // br0.advance(uint8(v3.entry) + MOVB CH, AL + SHLQ CL, R12 + ADDB CL, R13 + BSWAPL AX + + // these four writes get coalesced + // out[id * dstEvery + 0] = uint8(v0.entry >> 8) + // out[id * dstEvery + 1] = uint8(v1.entry >> 8) + // out[id * dstEvery + 3] = uint8(v2.entry >> 8) + // out[id * dstEvery + 4] = uint8(v3.entry >> 8) + MOVL AX, (R8) + + // update the bitreader structure + MOVQ R12, 32(R11) + MOVB R13, 40(R11) + ADDQ R9, R8 + + // br1.fillFast32() + MOVQ 80(R11), R12 + MOVBQZX 88(R11), R13 + CMPQ R13, $0x20 + JBE skip_fill1 + MOVQ 72(R11), R14 + SUBQ $0x20, R13 + SUBQ $0x04, R14 + MOVQ 48(R11), R15 + + // b.value |= uint64(low) << (b.bitsRead & 63) + MOVL (R14)(R15*1), R15 + MOVQ R13, CX + SHLQ CL, R15 + MOVQ R14, 72(R11) + ORQ R15, R12 + + // exhausted = exhausted || (br1.off < 4) + CMPQ R14, $0x04 + SETLT AL + ORB AL, DL + +skip_fill1: + // val0 := br1.peekTopBits(peekBits) + MOVQ R12, R14 + MOVQ DI, CX + SHRQ CL, R14 + + // v0 := table[val0&mask] + MOVW (R10)(R14*2), CX + + // br1.advance(uint8(v0.entry) + MOVB CH, AL + SHLQ CL, R12 + ADDB CL, R13 + + // val1 := br1.peekTopBits(peekBits) + MOVQ R12, R14 + MOVQ DI, CX + SHRQ CL, R14 + + // v1 := table[val0&mask] + MOVW (R10)(R14*2), CX + + // br1.advance(uint8(v1.entry) + MOVB CH, AH + SHLQ CL, R12 + ADDB CL, R13 + BSWAPL AX + + // val2 := br1.peekTopBits(peekBits) + MOVQ R12, R14 + MOVQ DI, CX + SHRQ CL, R14 + + // v2 := table[val0&mask] + MOVW (R10)(R14*2), CX + + // br1.advance(uint8(v2.entry) + MOVB CH, AH + SHLQ CL, R12 + ADDB CL, R13 + + // val3 := br1.peekTopBits(peekBits) + MOVQ R12, R14 + MOVQ DI, CX + SHRQ CL, R14 + + // v3 := table[val0&mask] + MOVW (R10)(R14*2), CX + + // br1.advance(uint8(v3.entry) + MOVB CH, AL + SHLQ CL, R12 + ADDB CL, R13 + BSWAPL AX + + // these four writes get coalesced + // out[id * dstEvery + 0] = uint8(v0.entry >> 8) + // out[id * dstEvery + 1] = uint8(v1.entry >> 8) + // out[id * dstEvery + 3] = uint8(v2.entry >> 8) + // out[id * dstEvery + 4] = uint8(v3.entry >> 8) + MOVL AX, (R8) + + // update the bitreader structure + MOVQ R12, 80(R11) + MOVB R13, 88(R11) + ADDQ R9, R8 + + // br2.fillFast32() + MOVQ 128(R11), R12 + MOVBQZX 136(R11), R13 + CMPQ R13, $0x20 + JBE skip_fill2 + MOVQ 120(R11), R14 + SUBQ $0x20, R13 + SUBQ $0x04, R14 + MOVQ 96(R11), R15 + + // b.value |= uint64(low) << (b.bitsRead & 63) + MOVL (R14)(R15*1), R15 + MOVQ R13, CX + SHLQ CL, R15 + MOVQ R14, 120(R11) + ORQ R15, R12 + + // exhausted = exhausted || (br2.off < 4) + CMPQ R14, $0x04 + SETLT AL + ORB AL, DL + +skip_fill2: + // val0 := br2.peekTopBits(peekBits) + MOVQ R12, R14 + MOVQ DI, CX + SHRQ CL, R14 + + // v0 := table[val0&mask] + MOVW (R10)(R14*2), CX + + // br2.advance(uint8(v0.entry) + MOVB CH, AL + SHLQ CL, R12 + ADDB CL, R13 + + // val1 := br2.peekTopBits(peekBits) + MOVQ R12, R14 + MOVQ DI, CX + SHRQ CL, R14 + + // v1 := table[val0&mask] + MOVW (R10)(R14*2), CX + + // br2.advance(uint8(v1.entry) + MOVB CH, AH + SHLQ CL, R12 + ADDB CL, R13 + BSWAPL AX + + // val2 := br2.peekTopBits(peekBits) + MOVQ R12, R14 + MOVQ DI, CX + SHRQ CL, R14 + + // v2 := table[val0&mask] + MOVW (R10)(R14*2), CX + + // br2.advance(uint8(v2.entry) + MOVB CH, AH + SHLQ CL, R12 + ADDB CL, R13 + + // val3 := br2.peekTopBits(peekBits) + MOVQ R12, R14 + MOVQ DI, CX + SHRQ CL, R14 + + // v3 := table[val0&mask] + MOVW (R10)(R14*2), CX + + // br2.advance(uint8(v3.entry) + MOVB CH, AL + SHLQ CL, R12 + ADDB CL, R13 + BSWAPL AX + + // these four writes get coalesced + // out[id * dstEvery + 0] = uint8(v0.entry >> 8) + // out[id * dstEvery + 1] = uint8(v1.entry >> 8) + // out[id * dstEvery + 3] = uint8(v2.entry >> 8) + // out[id * dstEvery + 4] = uint8(v3.entry >> 8) + MOVL AX, (R8) + + // update the bitreader structure + MOVQ R12, 128(R11) + MOVB R13, 136(R11) + ADDQ R9, R8 + + // br3.fillFast32() + MOVQ 176(R11), R12 + MOVBQZX 184(R11), R13 + CMPQ R13, $0x20 + JBE skip_fill3 + MOVQ 168(R11), R14 + SUBQ $0x20, R13 + SUBQ $0x04, R14 + MOVQ 144(R11), R15 + + // b.value |= uint64(low) << (b.bitsRead & 63) + MOVL (R14)(R15*1), R15 + MOVQ R13, CX + SHLQ CL, R15 + MOVQ R14, 168(R11) + ORQ R15, R12 + + // exhausted = exhausted || (br3.off < 4) + CMPQ R14, $0x04 + SETLT AL + ORB AL, DL + +skip_fill3: + // val0 := br3.peekTopBits(peekBits) + MOVQ R12, R14 + MOVQ DI, CX + SHRQ CL, R14 + + // v0 := table[val0&mask] + MOVW (R10)(R14*2), CX + + // br3.advance(uint8(v0.entry) + MOVB CH, AL + SHLQ CL, R12 + ADDB CL, R13 + + // val1 := br3.peekTopBits(peekBits) + MOVQ R12, R14 + MOVQ DI, CX + SHRQ CL, R14 + + // v1 := table[val0&mask] + MOVW (R10)(R14*2), CX + + // br3.advance(uint8(v1.entry) + MOVB CH, AH + SHLQ CL, R12 + ADDB CL, R13 + BSWAPL AX + + // val2 := br3.peekTopBits(peekBits) + MOVQ R12, R14 + MOVQ DI, CX + SHRQ CL, R14 + + // v2 := table[val0&mask] + MOVW (R10)(R14*2), CX + + // br3.advance(uint8(v2.entry) + MOVB CH, AH + SHLQ CL, R12 + ADDB CL, R13 + + // val3 := br3.peekTopBits(peekBits) + MOVQ R12, R14 + MOVQ DI, CX + SHRQ CL, R14 + + // v3 := table[val0&mask] + MOVW (R10)(R14*2), CX + + // br3.advance(uint8(v3.entry) + MOVB CH, AL + SHLQ CL, R12 + ADDB CL, R13 + BSWAPL AX + + // these four writes get coalesced + // out[id * dstEvery + 0] = uint8(v0.entry >> 8) + // out[id * dstEvery + 1] = uint8(v1.entry >> 8) + // out[id * dstEvery + 3] = uint8(v2.entry >> 8) + // out[id * dstEvery + 4] = uint8(v3.entry >> 8) + MOVL AX, (R8) + + // update the bitreader structure + MOVQ R12, 176(R11) + MOVB R13, 184(R11) + ADDQ $0x04, BX + TESTB DL, DL + JZ main_loop + MOVQ ctx+0(FP), AX + SUBQ 16(AX), BX + SHLQ $0x02, BX + MOVQ BX, 40(AX) + RET + +// func decompress1x_main_loop_amd64(ctx *decompress1xContext) +TEXT ·decompress1x_main_loop_amd64(SB), $0-8 + MOVQ ctx+0(FP), CX + MOVQ 16(CX), DX + MOVQ 24(CX), BX + CMPQ BX, $0x04 + JB error_max_decoded_size_exeeded + LEAQ (DX)(BX*1), BX + MOVQ (CX), SI + MOVQ (SI), R8 + MOVQ 24(SI), R9 + MOVQ 32(SI), R10 + MOVBQZX 40(SI), R11 + MOVQ 32(CX), SI + MOVBQZX 8(CX), DI + JMP loop_condition + +main_loop: + // Check if we have room for 4 bytes in the output buffer + LEAQ 4(DX), CX + CMPQ CX, BX + JGE error_max_decoded_size_exeeded + + // Decode 4 values + CMPQ R11, $0x20 + JL bitReader_fillFast_1_end + SUBQ $0x20, R11 + SUBQ $0x04, R9 + MOVL (R8)(R9*1), R12 + MOVQ R11, CX + SHLQ CL, R12 + ORQ R12, R10 + +bitReader_fillFast_1_end: + MOVQ DI, CX + MOVQ R10, R12 + SHRQ CL, R12 + MOVW (SI)(R12*2), CX + MOVB CH, AL + MOVBQZX CL, CX + ADDQ CX, R11 + SHLQ CL, R10 + MOVQ DI, CX + MOVQ R10, R12 + SHRQ CL, R12 + MOVW (SI)(R12*2), CX + MOVB CH, AH + MOVBQZX CL, CX + ADDQ CX, R11 + SHLQ CL, R10 + BSWAPL AX + CMPQ R11, $0x20 + JL bitReader_fillFast_2_end + SUBQ $0x20, R11 + SUBQ $0x04, R9 + MOVL (R8)(R9*1), R12 + MOVQ R11, CX + SHLQ CL, R12 + ORQ R12, R10 + +bitReader_fillFast_2_end: + MOVQ DI, CX + MOVQ R10, R12 + SHRQ CL, R12 + MOVW (SI)(R12*2), CX + MOVB CH, AH + MOVBQZX CL, CX + ADDQ CX, R11 + SHLQ CL, R10 + MOVQ DI, CX + MOVQ R10, R12 + SHRQ CL, R12 + MOVW (SI)(R12*2), CX + MOVB CH, AL + MOVBQZX CL, CX + ADDQ CX, R11 + SHLQ CL, R10 + BSWAPL AX + + // Store the decoded values + MOVL AX, (DX) + ADDQ $0x04, DX + +loop_condition: + CMPQ R9, $0x08 + JGE main_loop + + // Update ctx structure + MOVQ ctx+0(FP), AX + SUBQ 16(AX), DX + MOVQ DX, 40(AX) + MOVQ (AX), AX + MOVQ R9, 24(AX) + MOVQ R10, 32(AX) + MOVB R11, 40(AX) + RET + + // Report error +error_max_decoded_size_exeeded: + MOVQ ctx+0(FP), AX + MOVQ $-1, CX + MOVQ CX, 40(AX) + RET + +// func decompress1x_main_loop_bmi2(ctx *decompress1xContext) +// Requires: BMI2 +TEXT ·decompress1x_main_loop_bmi2(SB), $0-8 + MOVQ ctx+0(FP), CX + MOVQ 16(CX), DX + MOVQ 24(CX), BX + CMPQ BX, $0x04 + JB error_max_decoded_size_exeeded + LEAQ (DX)(BX*1), BX + MOVQ (CX), SI + MOVQ (SI), R8 + MOVQ 24(SI), R9 + MOVQ 32(SI), R10 + MOVBQZX 40(SI), R11 + MOVQ 32(CX), SI + MOVBQZX 8(CX), DI + JMP loop_condition + +main_loop: + // Check if we have room for 4 bytes in the output buffer + LEAQ 4(DX), CX + CMPQ CX, BX + JGE error_max_decoded_size_exeeded + + // Decode 4 values + CMPQ R11, $0x20 + JL bitReader_fillFast_1_end + SUBQ $0x20, R11 + SUBQ $0x04, R9 + MOVL (R8)(R9*1), CX + SHLXQ R11, CX, CX + ORQ CX, R10 + +bitReader_fillFast_1_end: + SHRXQ DI, R10, CX + MOVW (SI)(CX*2), CX + MOVB CH, AL + MOVBQZX CL, CX + ADDQ CX, R11 + SHLXQ CX, R10, R10 + SHRXQ DI, R10, CX + MOVW (SI)(CX*2), CX + MOVB CH, AH + MOVBQZX CL, CX + ADDQ CX, R11 + SHLXQ CX, R10, R10 + BSWAPL AX + CMPQ R11, $0x20 + JL bitReader_fillFast_2_end + SUBQ $0x20, R11 + SUBQ $0x04, R9 + MOVL (R8)(R9*1), CX + SHLXQ R11, CX, CX + ORQ CX, R10 + +bitReader_fillFast_2_end: + SHRXQ DI, R10, CX + MOVW (SI)(CX*2), CX + MOVB CH, AH + MOVBQZX CL, CX + ADDQ CX, R11 + SHLXQ CX, R10, R10 + SHRXQ DI, R10, CX + MOVW (SI)(CX*2), CX + MOVB CH, AL + MOVBQZX CL, CX + ADDQ CX, R11 + SHLXQ CX, R10, R10 + BSWAPL AX + + // Store the decoded values + MOVL AX, (DX) + ADDQ $0x04, DX + +loop_condition: + CMPQ R9, $0x08 + JGE main_loop + + // Update ctx structure + MOVQ ctx+0(FP), AX + SUBQ 16(AX), DX + MOVQ DX, 40(AX) + MOVQ (AX), AX + MOVQ R9, 24(AX) + MOVQ R10, 32(AX) + MOVB R11, 40(AX) + RET + + // Report error +error_max_decoded_size_exeeded: + MOVQ ctx+0(FP), AX + MOVQ $-1, CX + MOVQ CX, 40(AX) + RET diff --git a/vendor/github.com/klauspost/compress/huff0/decompress_generic.go b/vendor/github.com/klauspost/compress/huff0/decompress_generic.go new file mode 100644 index 00000000..908c17de --- /dev/null +++ b/vendor/github.com/klauspost/compress/huff0/decompress_generic.go @@ -0,0 +1,299 @@ +//go:build !amd64 || appengine || !gc || noasm +// +build !amd64 appengine !gc noasm + +// This file contains a generic implementation of Decoder.Decompress4X. +package huff0 + +import ( + "errors" + "fmt" +) + +// Decompress4X will decompress a 4X encoded stream. +// The length of the supplied input must match the end of a block exactly. +// The *capacity* of the dst slice must match the destination size of +// the uncompressed data exactly. +func (d *Decoder) Decompress4X(dst, src []byte) ([]byte, error) { + if len(d.dt.single) == 0 { + return nil, errors.New("no table loaded") + } + if len(src) < 6+(4*1) { + return nil, errors.New("input too small") + } + if use8BitTables && d.actualTableLog <= 8 { + return d.decompress4X8bit(dst, src) + } + + var br [4]bitReaderShifted + // Decode "jump table" + start := 6 + for i := 0; i < 3; i++ { + length := int(src[i*2]) | (int(src[i*2+1]) << 8) + if start+length >= len(src) { + return nil, errors.New("truncated input (or invalid offset)") + } + err := br[i].init(src[start : start+length]) + if err != nil { + return nil, err + } + start += length + } + err := br[3].init(src[start:]) + if err != nil { + return nil, err + } + + // destination, offset to match first output + dstSize := cap(dst) + dst = dst[:dstSize] + out := dst + dstEvery := (dstSize + 3) / 4 + + const tlSize = 1 << tableLogMax + const tlMask = tlSize - 1 + single := d.dt.single[:tlSize] + + // Use temp table to avoid bound checks/append penalty. + buf := d.buffer() + var off uint8 + var decoded int + + // Decode 2 values from each decoder/loop. + const bufoff = 256 + for { + if br[0].off < 4 || br[1].off < 4 || br[2].off < 4 || br[3].off < 4 { + break + } + + { + const stream = 0 + const stream2 = 1 + br[stream].fillFast() + br[stream2].fillFast() + + val := br[stream].peekBitsFast(d.actualTableLog) + val2 := br[stream2].peekBitsFast(d.actualTableLog) + v := single[val&tlMask] + v2 := single[val2&tlMask] + br[stream].advance(uint8(v.entry)) + br[stream2].advance(uint8(v2.entry)) + buf[stream][off] = uint8(v.entry >> 8) + buf[stream2][off] = uint8(v2.entry >> 8) + + val = br[stream].peekBitsFast(d.actualTableLog) + val2 = br[stream2].peekBitsFast(d.actualTableLog) + v = single[val&tlMask] + v2 = single[val2&tlMask] + br[stream].advance(uint8(v.entry)) + br[stream2].advance(uint8(v2.entry)) + buf[stream][off+1] = uint8(v.entry >> 8) + buf[stream2][off+1] = uint8(v2.entry >> 8) + } + + { + const stream = 2 + const stream2 = 3 + br[stream].fillFast() + br[stream2].fillFast() + + val := br[stream].peekBitsFast(d.actualTableLog) + val2 := br[stream2].peekBitsFast(d.actualTableLog) + v := single[val&tlMask] + v2 := single[val2&tlMask] + br[stream].advance(uint8(v.entry)) + br[stream2].advance(uint8(v2.entry)) + buf[stream][off] = uint8(v.entry >> 8) + buf[stream2][off] = uint8(v2.entry >> 8) + + val = br[stream].peekBitsFast(d.actualTableLog) + val2 = br[stream2].peekBitsFast(d.actualTableLog) + v = single[val&tlMask] + v2 = single[val2&tlMask] + br[stream].advance(uint8(v.entry)) + br[stream2].advance(uint8(v2.entry)) + buf[stream][off+1] = uint8(v.entry >> 8) + buf[stream2][off+1] = uint8(v2.entry >> 8) + } + + off += 2 + + if off == 0 { + if bufoff > dstEvery { + d.bufs.Put(buf) + return nil, errors.New("corruption detected: stream overrun 1") + } + // There must at least be 3 buffers left. + if len(out)-bufoff < dstEvery*3 { + d.bufs.Put(buf) + return nil, errors.New("corruption detected: stream overrun 2") + } + //copy(out, buf[0][:]) + //copy(out[dstEvery:], buf[1][:]) + //copy(out[dstEvery*2:], buf[2][:]) + //copy(out[dstEvery*3:], buf[3][:]) + *(*[bufoff]byte)(out) = buf[0] + *(*[bufoff]byte)(out[dstEvery:]) = buf[1] + *(*[bufoff]byte)(out[dstEvery*2:]) = buf[2] + *(*[bufoff]byte)(out[dstEvery*3:]) = buf[3] + out = out[bufoff:] + decoded += bufoff * 4 + } + } + if off > 0 { + ioff := int(off) + if len(out) < dstEvery*3+ioff { + d.bufs.Put(buf) + return nil, errors.New("corruption detected: stream overrun 3") + } + copy(out, buf[0][:off]) + copy(out[dstEvery:], buf[1][:off]) + copy(out[dstEvery*2:], buf[2][:off]) + copy(out[dstEvery*3:], buf[3][:off]) + decoded += int(off) * 4 + out = out[off:] + } + + // Decode remaining. + remainBytes := dstEvery - (decoded / 4) + for i := range br { + offset := dstEvery * i + endsAt := offset + remainBytes + if endsAt > len(out) { + endsAt = len(out) + } + br := &br[i] + bitsLeft := br.remaining() + for bitsLeft > 0 { + br.fill() + if offset >= endsAt { + d.bufs.Put(buf) + return nil, errors.New("corruption detected: stream overrun 4") + } + + // Read value and increment offset. + val := br.peekBitsFast(d.actualTableLog) + v := single[val&tlMask].entry + nBits := uint8(v) + br.advance(nBits) + bitsLeft -= uint(nBits) + out[offset] = uint8(v >> 8) + offset++ + } + if offset != endsAt { + d.bufs.Put(buf) + return nil, fmt.Errorf("corruption detected: short output block %d, end %d != %d", i, offset, endsAt) + } + decoded += offset - dstEvery*i + err = br.close() + if err != nil { + return nil, err + } + } + d.bufs.Put(buf) + if dstSize != decoded { + return nil, errors.New("corruption detected: short output block") + } + return dst, nil +} + +// Decompress1X will decompress a 1X encoded stream. +// The cap of the output buffer will be the maximum decompressed size. +// The length of the supplied input must match the end of a block exactly. +func (d *Decoder) Decompress1X(dst, src []byte) ([]byte, error) { + if len(d.dt.single) == 0 { + return nil, errors.New("no table loaded") + } + if use8BitTables && d.actualTableLog <= 8 { + return d.decompress1X8Bit(dst, src) + } + var br bitReaderShifted + err := br.init(src) + if err != nil { + return dst, err + } + maxDecodedSize := cap(dst) + dst = dst[:0] + + // Avoid bounds check by always having full sized table. + const tlSize = 1 << tableLogMax + const tlMask = tlSize - 1 + dt := d.dt.single[:tlSize] + + // Use temp table to avoid bound checks/append penalty. + bufs := d.buffer() + buf := &bufs[0] + var off uint8 + + for br.off >= 8 { + br.fillFast() + v := dt[br.peekBitsFast(d.actualTableLog)&tlMask] + br.advance(uint8(v.entry)) + buf[off+0] = uint8(v.entry >> 8) + + v = dt[br.peekBitsFast(d.actualTableLog)&tlMask] + br.advance(uint8(v.entry)) + buf[off+1] = uint8(v.entry >> 8) + + // Refill + br.fillFast() + + v = dt[br.peekBitsFast(d.actualTableLog)&tlMask] + br.advance(uint8(v.entry)) + buf[off+2] = uint8(v.entry >> 8) + + v = dt[br.peekBitsFast(d.actualTableLog)&tlMask] + br.advance(uint8(v.entry)) + buf[off+3] = uint8(v.entry >> 8) + + off += 4 + if off == 0 { + if len(dst)+256 > maxDecodedSize { + br.close() + d.bufs.Put(bufs) + return nil, ErrMaxDecodedSizeExceeded + } + dst = append(dst, buf[:]...) + } + } + + if len(dst)+int(off) > maxDecodedSize { + d.bufs.Put(bufs) + br.close() + return nil, ErrMaxDecodedSizeExceeded + } + dst = append(dst, buf[:off]...) + + // br < 8, so uint8 is fine + bitsLeft := uint8(br.off)*8 + 64 - br.bitsRead + for bitsLeft > 0 { + br.fill() + if false && br.bitsRead >= 32 { + if br.off >= 4 { + v := br.in[br.off-4:] + v = v[:4] + low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) + br.value = (br.value << 32) | uint64(low) + br.bitsRead -= 32 + br.off -= 4 + } else { + for br.off > 0 { + br.value = (br.value << 8) | uint64(br.in[br.off-1]) + br.bitsRead -= 8 + br.off-- + } + } + } + if len(dst) >= maxDecodedSize { + d.bufs.Put(bufs) + br.close() + return nil, ErrMaxDecodedSizeExceeded + } + v := d.dt.single[br.peekBitsFast(d.actualTableLog)&tlMask] + nBits := uint8(v.entry) + br.advance(nBits) + bitsLeft -= nBits + dst = append(dst, uint8(v.entry>>8)) + } + d.bufs.Put(bufs) + return dst, br.close() +} diff --git a/vendor/github.com/klauspost/compress/huff0/huff0.go b/vendor/github.com/klauspost/compress/huff0/huff0.go new file mode 100644 index 00000000..e8ad17ad --- /dev/null +++ b/vendor/github.com/klauspost/compress/huff0/huff0.go @@ -0,0 +1,337 @@ +// Package huff0 provides fast huffman encoding as used in zstd. +// +// See README.md at https://github.com/klauspost/compress/tree/master/huff0 for details. +package huff0 + +import ( + "errors" + "fmt" + "math" + "math/bits" + "sync" + + "github.com/klauspost/compress/fse" +) + +const ( + maxSymbolValue = 255 + + // zstandard limits tablelog to 11, see: + // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#huffman-tree-description + tableLogMax = 11 + tableLogDefault = 11 + minTablelog = 5 + huffNodesLen = 512 + + // BlockSizeMax is maximum input size for a single block uncompressed. + BlockSizeMax = 1<<18 - 1 +) + +var ( + // ErrIncompressible is returned when input is judged to be too hard to compress. + ErrIncompressible = errors.New("input is not compressible") + + // ErrUseRLE is returned from the compressor when the input is a single byte value repeated. + ErrUseRLE = errors.New("input is single value repeated") + + // ErrTooBig is return if input is too large for a single block. + ErrTooBig = errors.New("input too big") + + // ErrMaxDecodedSizeExceeded is return if input is too large for a single block. + ErrMaxDecodedSizeExceeded = errors.New("maximum output size exceeded") +) + +type ReusePolicy uint8 + +const ( + // ReusePolicyAllow will allow reuse if it produces smaller output. + ReusePolicyAllow ReusePolicy = iota + + // ReusePolicyPrefer will re-use aggressively if possible. + // This will not check if a new table will produce smaller output, + // except if the current table is impossible to use or + // compressed output is bigger than input. + ReusePolicyPrefer + + // ReusePolicyNone will disable re-use of tables. + // This is slightly faster than ReusePolicyAllow but may produce larger output. + ReusePolicyNone + + // ReusePolicyMust must allow reuse and produce smaller output. + ReusePolicyMust +) + +type Scratch struct { + count [maxSymbolValue + 1]uint32 + + // Per block parameters. + // These can be used to override compression parameters of the block. + // Do not touch, unless you know what you are doing. + + // Out is output buffer. + // If the scratch is re-used before the caller is done processing the output, + // set this field to nil. + // Otherwise the output buffer will be re-used for next Compression/Decompression step + // and allocation will be avoided. + Out []byte + + // OutTable will contain the table data only, if a new table has been generated. + // Slice of the returned data. + OutTable []byte + + // OutData will contain the compressed data. + // Slice of the returned data. + OutData []byte + + // MaxDecodedSize will set the maximum allowed output size. + // This value will automatically be set to BlockSizeMax if not set. + // Decoders will return ErrMaxDecodedSizeExceeded is this limit is exceeded. + MaxDecodedSize int + + br byteReader + + // MaxSymbolValue will override the maximum symbol value of the next block. + MaxSymbolValue uint8 + + // TableLog will attempt to override the tablelog for the next block. + // Must be <= 11 and >= 5. + TableLog uint8 + + // Reuse will specify the reuse policy + Reuse ReusePolicy + + // WantLogLess allows to specify a log 2 reduction that should at least be achieved, + // otherwise the block will be returned as incompressible. + // The reduction should then at least be (input size >> WantLogLess) + // If WantLogLess == 0 any improvement will do. + WantLogLess uint8 + + symbolLen uint16 // Length of active part of the symbol table. + maxCount int // count of the most probable symbol + clearCount bool // clear count + actualTableLog uint8 // Selected tablelog. + prevTableLog uint8 // Tablelog for previous table + prevTable cTable // Table used for previous compression. + cTable cTable // compression table + dt dTable // decompression table + nodes []nodeElt + tmpOut [4][]byte + fse *fse.Scratch + decPool sync.Pool // *[4][256]byte buffers. + huffWeight [maxSymbolValue + 1]byte +} + +// TransferCTable will transfer the previously used compression table. +func (s *Scratch) TransferCTable(src *Scratch) { + if cap(s.prevTable) < len(src.prevTable) { + s.prevTable = make(cTable, 0, maxSymbolValue+1) + } + s.prevTable = s.prevTable[:len(src.prevTable)] + copy(s.prevTable, src.prevTable) + s.prevTableLog = src.prevTableLog +} + +func (s *Scratch) prepare(in []byte) (*Scratch, error) { + if len(in) > BlockSizeMax { + return nil, ErrTooBig + } + if s == nil { + s = &Scratch{} + } + if s.MaxSymbolValue == 0 { + s.MaxSymbolValue = maxSymbolValue + } + if s.TableLog == 0 { + s.TableLog = tableLogDefault + } + if s.TableLog > tableLogMax || s.TableLog < minTablelog { + return nil, fmt.Errorf(" invalid tableLog %d (%d -> %d)", s.TableLog, minTablelog, tableLogMax) + } + if s.MaxDecodedSize <= 0 || s.MaxDecodedSize > BlockSizeMax { + s.MaxDecodedSize = BlockSizeMax + } + if s.clearCount && s.maxCount == 0 { + for i := range s.count { + s.count[i] = 0 + } + s.clearCount = false + } + if cap(s.Out) == 0 { + s.Out = make([]byte, 0, len(in)) + } + s.Out = s.Out[:0] + + s.OutTable = nil + s.OutData = nil + if cap(s.nodes) < huffNodesLen+1 { + s.nodes = make([]nodeElt, 0, huffNodesLen+1) + } + s.nodes = s.nodes[:0] + if s.fse == nil { + s.fse = &fse.Scratch{} + } + s.br.init(in) + + return s, nil +} + +type cTable []cTableEntry + +func (c cTable) write(s *Scratch) error { + var ( + // precomputed conversion table + bitsToWeight [tableLogMax + 1]byte + huffLog = s.actualTableLog + // last weight is not saved. + maxSymbolValue = uint8(s.symbolLen - 1) + huffWeight = s.huffWeight[:256] + ) + const ( + maxFSETableLog = 6 + ) + // convert to weight + bitsToWeight[0] = 0 + for n := uint8(1); n < huffLog+1; n++ { + bitsToWeight[n] = huffLog + 1 - n + } + + // Acquire histogram for FSE. + hist := s.fse.Histogram() + hist = hist[:256] + for i := range hist[:16] { + hist[i] = 0 + } + for n := uint8(0); n < maxSymbolValue; n++ { + v := bitsToWeight[c[n].nBits] & 15 + huffWeight[n] = v + hist[v]++ + } + + // FSE compress if feasible. + if maxSymbolValue >= 2 { + huffMaxCnt := uint32(0) + huffMax := uint8(0) + for i, v := range hist[:16] { + if v == 0 { + continue + } + huffMax = byte(i) + if v > huffMaxCnt { + huffMaxCnt = v + } + } + s.fse.HistogramFinished(huffMax, int(huffMaxCnt)) + s.fse.TableLog = maxFSETableLog + b, err := fse.Compress(huffWeight[:maxSymbolValue], s.fse) + if err == nil && len(b) < int(s.symbolLen>>1) { + s.Out = append(s.Out, uint8(len(b))) + s.Out = append(s.Out, b...) + return nil + } + // Unable to compress (RLE/uncompressible) + } + // write raw values as 4-bits (max : 15) + if maxSymbolValue > (256 - 128) { + // should not happen : likely means source cannot be compressed + return ErrIncompressible + } + op := s.Out + // special case, pack weights 4 bits/weight. + op = append(op, 128|(maxSymbolValue-1)) + // be sure it doesn't cause msan issue in final combination + huffWeight[maxSymbolValue] = 0 + for n := uint16(0); n < uint16(maxSymbolValue); n += 2 { + op = append(op, (huffWeight[n]<<4)|huffWeight[n+1]) + } + s.Out = op + return nil +} + +func (c cTable) estTableSize(s *Scratch) (sz int, err error) { + var ( + // precomputed conversion table + bitsToWeight [tableLogMax + 1]byte + huffLog = s.actualTableLog + // last weight is not saved. + maxSymbolValue = uint8(s.symbolLen - 1) + huffWeight = s.huffWeight[:256] + ) + const ( + maxFSETableLog = 6 + ) + // convert to weight + bitsToWeight[0] = 0 + for n := uint8(1); n < huffLog+1; n++ { + bitsToWeight[n] = huffLog + 1 - n + } + + // Acquire histogram for FSE. + hist := s.fse.Histogram() + hist = hist[:256] + for i := range hist[:16] { + hist[i] = 0 + } + for n := uint8(0); n < maxSymbolValue; n++ { + v := bitsToWeight[c[n].nBits] & 15 + huffWeight[n] = v + hist[v]++ + } + + // FSE compress if feasible. + if maxSymbolValue >= 2 { + huffMaxCnt := uint32(0) + huffMax := uint8(0) + for i, v := range hist[:16] { + if v == 0 { + continue + } + huffMax = byte(i) + if v > huffMaxCnt { + huffMaxCnt = v + } + } + s.fse.HistogramFinished(huffMax, int(huffMaxCnt)) + s.fse.TableLog = maxFSETableLog + b, err := fse.Compress(huffWeight[:maxSymbolValue], s.fse) + if err == nil && len(b) < int(s.symbolLen>>1) { + sz += 1 + len(b) + return sz, nil + } + // Unable to compress (RLE/uncompressible) + } + // write raw values as 4-bits (max : 15) + if maxSymbolValue > (256 - 128) { + // should not happen : likely means source cannot be compressed + return 0, ErrIncompressible + } + // special case, pack weights 4 bits/weight. + sz += 1 + int(maxSymbolValue/2) + return sz, nil +} + +// estimateSize returns the estimated size in bytes of the input represented in the +// histogram supplied. +func (c cTable) estimateSize(hist []uint32) int { + nbBits := uint32(7) + for i, v := range c[:len(hist)] { + nbBits += uint32(v.nBits) * hist[i] + } + return int(nbBits >> 3) +} + +// minSize returns the minimum possible size considering the shannon limit. +func (s *Scratch) minSize(total int) int { + nbBits := float64(7) + fTotal := float64(total) + for _, v := range s.count[:s.symbolLen] { + n := float64(v) + if n > 0 { + nbBits += math.Log2(fTotal/n) * n + } + } + return int(nbBits) >> 3 +} + +func highBit32(val uint32) (n uint32) { + return uint32(bits.Len32(val) - 1) +} diff --git a/vendor/github.com/klauspost/compress/internal/cpuinfo/cpuinfo.go b/vendor/github.com/klauspost/compress/internal/cpuinfo/cpuinfo.go new file mode 100644 index 00000000..3954c512 --- /dev/null +++ b/vendor/github.com/klauspost/compress/internal/cpuinfo/cpuinfo.go @@ -0,0 +1,34 @@ +// Package cpuinfo gives runtime info about the current CPU. +// +// This is a very limited module meant for use internally +// in this project. For more versatile solution check +// https://github.com/klauspost/cpuid. +package cpuinfo + +// HasBMI1 checks whether an x86 CPU supports the BMI1 extension. +func HasBMI1() bool { + return hasBMI1 +} + +// HasBMI2 checks whether an x86 CPU supports the BMI2 extension. +func HasBMI2() bool { + return hasBMI2 +} + +// DisableBMI2 will disable BMI2, for testing purposes. +// Call returned function to restore previous state. +func DisableBMI2() func() { + old := hasBMI2 + hasBMI2 = false + return func() { + hasBMI2 = old + } +} + +// HasBMI checks whether an x86 CPU supports both BMI1 and BMI2 extensions. +func HasBMI() bool { + return HasBMI1() && HasBMI2() +} + +var hasBMI1 bool +var hasBMI2 bool diff --git a/vendor/github.com/klauspost/compress/internal/cpuinfo/cpuinfo_amd64.go b/vendor/github.com/klauspost/compress/internal/cpuinfo/cpuinfo_amd64.go new file mode 100644 index 00000000..e802579c --- /dev/null +++ b/vendor/github.com/klauspost/compress/internal/cpuinfo/cpuinfo_amd64.go @@ -0,0 +1,11 @@ +//go:build amd64 && !appengine && !noasm && gc +// +build amd64,!appengine,!noasm,gc + +package cpuinfo + +// go:noescape +func x86extensions() (bmi1, bmi2 bool) + +func init() { + hasBMI1, hasBMI2 = x86extensions() +} diff --git a/vendor/github.com/klauspost/compress/internal/cpuinfo/cpuinfo_amd64.s b/vendor/github.com/klauspost/compress/internal/cpuinfo/cpuinfo_amd64.s new file mode 100644 index 00000000..4465fbe9 --- /dev/null +++ b/vendor/github.com/klauspost/compress/internal/cpuinfo/cpuinfo_amd64.s @@ -0,0 +1,36 @@ +// +build !appengine +// +build gc +// +build !noasm + +#include "textflag.h" +#include "funcdata.h" +#include "go_asm.h" + +TEXT ·x86extensions(SB), NOSPLIT, $0 + // 1. determine max EAX value + XORQ AX, AX + CPUID + + CMPQ AX, $7 + JB unsupported + + // 2. EAX = 7, ECX = 0 --- see Table 3-8 "Information Returned by CPUID Instruction" + MOVQ $7, AX + MOVQ $0, CX + CPUID + + BTQ $3, BX // bit 3 = BMI1 + SETCS AL + + BTQ $8, BX // bit 8 = BMI2 + SETCS AH + + MOVB AL, bmi1+0(FP) + MOVB AH, bmi2+1(FP) + RET + +unsupported: + XORQ AX, AX + MOVB AL, bmi1+0(FP) + MOVB AL, bmi2+1(FP) + RET diff --git a/vendor/github.com/golang/snappy/LICENSE b/vendor/github.com/klauspost/compress/internal/snapref/LICENSE similarity index 100% rename from vendor/github.com/golang/snappy/LICENSE rename to vendor/github.com/klauspost/compress/internal/snapref/LICENSE diff --git a/vendor/github.com/golang/snappy/decode.go b/vendor/github.com/klauspost/compress/internal/snapref/decode.go similarity index 99% rename from vendor/github.com/golang/snappy/decode.go rename to vendor/github.com/klauspost/compress/internal/snapref/decode.go index 23c6e26c..40796a49 100644 --- a/vendor/github.com/golang/snappy/decode.go +++ b/vendor/github.com/klauspost/compress/internal/snapref/decode.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -package snappy +package snapref import ( "encoding/binary" diff --git a/vendor/github.com/golang/snappy/decode_other.go b/vendor/github.com/klauspost/compress/internal/snapref/decode_other.go similarity index 98% rename from vendor/github.com/golang/snappy/decode_other.go rename to vendor/github.com/klauspost/compress/internal/snapref/decode_other.go index 2f672be5..77395a6b 100644 --- a/vendor/github.com/golang/snappy/decode_other.go +++ b/vendor/github.com/klauspost/compress/internal/snapref/decode_other.go @@ -2,9 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build !amd64,!arm64 appengine !gc noasm - -package snappy +package snapref // decode writes the decoding of src to dst. It assumes that the varint-encoded // length of the decompressed bytes has already been read, and that len(dst) diff --git a/vendor/github.com/golang/snappy/encode.go b/vendor/github.com/klauspost/compress/internal/snapref/encode.go similarity index 99% rename from vendor/github.com/golang/snappy/encode.go rename to vendor/github.com/klauspost/compress/internal/snapref/encode.go index 7f236570..13c6040a 100644 --- a/vendor/github.com/golang/snappy/encode.go +++ b/vendor/github.com/klauspost/compress/internal/snapref/encode.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -package snappy +package snapref import ( "encoding/binary" diff --git a/vendor/github.com/golang/snappy/encode_other.go b/vendor/github.com/klauspost/compress/internal/snapref/encode_other.go similarity index 98% rename from vendor/github.com/golang/snappy/encode_other.go rename to vendor/github.com/klauspost/compress/internal/snapref/encode_other.go index 296d7f0b..298c4f8e 100644 --- a/vendor/github.com/golang/snappy/encode_other.go +++ b/vendor/github.com/klauspost/compress/internal/snapref/encode_other.go @@ -2,9 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build !amd64,!arm64 appengine !gc noasm - -package snappy +package snapref func load32(b []byte, i int) uint32 { b = b[i : i+4 : len(b)] // Help the compiler eliminate bounds checks on the next line. @@ -20,6 +18,7 @@ func load64(b []byte, i int) uint64 { // emitLiteral writes a literal chunk and returns the number of bytes written. // // It assumes that: +// // dst is long enough to hold the encoded bytes // 1 <= len(lit) && len(lit) <= 65536 func emitLiteral(dst, lit []byte) int { @@ -44,6 +43,7 @@ func emitLiteral(dst, lit []byte) int { // emitCopy writes a copy chunk and returns the number of bytes written. // // It assumes that: +// // dst is long enough to hold the encoded bytes // 1 <= offset && offset <= 65535 // 4 <= length && length <= 65535 @@ -91,6 +91,7 @@ func emitCopy(dst []byte, offset, length int) int { // src[i:i+k-j] and src[j:k] have the same contents. // // It assumes that: +// // 0 <= i && i < j && j <= len(src) func extendMatch(src []byte, i, j int) int { for ; j < len(src) && src[i] == src[j]; i, j = i+1, j+1 { @@ -107,8 +108,9 @@ func hash(u, shift uint32) uint32 { // been written. // // It also assumes that: +// // len(dst) >= MaxEncodedLen(len(src)) && -// minNonLiteralBlockSize <= len(src) && len(src) <= maxBlockSize +// minNonLiteralBlockSize <= len(src) && len(src) <= maxBlockSize func encodeBlock(dst, src []byte) (d int) { // Initialize the hash table. Its size ranges from 1<<8 to 1<<14 inclusive. // The table element type is uint16, as s < sLimit and sLimit < len(src) diff --git a/vendor/github.com/golang/snappy/snappy.go b/vendor/github.com/klauspost/compress/internal/snapref/snappy.go similarity index 96% rename from vendor/github.com/golang/snappy/snappy.go rename to vendor/github.com/klauspost/compress/internal/snapref/snappy.go index ece692ea..34d01f4a 100644 --- a/vendor/github.com/golang/snappy/snappy.go +++ b/vendor/github.com/klauspost/compress/internal/snapref/snappy.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// Package snappy implements the Snappy compression format. It aims for very +// Package snapref implements the Snappy compression format. It aims for very // high speeds and reasonable compression. // // There are actually two Snappy formats: block and stream. They are related, @@ -17,7 +17,7 @@ // // The canonical, C++ implementation is at https://github.com/google/snappy and // it only implements the block format. -package snappy // import "github.com/golang/snappy" +package snapref import ( "hash/crc32" diff --git a/vendor/github.com/klauspost/compress/s2sx.mod b/vendor/github.com/klauspost/compress/s2sx.mod new file mode 100644 index 00000000..2263853f --- /dev/null +++ b/vendor/github.com/klauspost/compress/s2sx.mod @@ -0,0 +1,4 @@ +module github.com/klauspost/compress + +go 1.16 + diff --git a/vendor/github.com/klauspost/compress/s2sx.sum b/vendor/github.com/klauspost/compress/s2sx.sum new file mode 100644 index 00000000..e69de29b diff --git a/vendor/github.com/klauspost/compress/zstd/README.md b/vendor/github.com/klauspost/compress/zstd/README.md new file mode 100644 index 00000000..65b38abe --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/README.md @@ -0,0 +1,441 @@ +# zstd + +[Zstandard](https://facebook.github.io/zstd/) is a real-time compression algorithm, providing high compression ratios. +It offers a very wide range of compression / speed trade-off, while being backed by a very fast decoder. +A high performance compression algorithm is implemented. For now focused on speed. + +This package provides [compression](#Compressor) to and [decompression](#Decompressor) of Zstandard content. + +This package is pure Go and without use of "unsafe". + +The `zstd` package is provided as open source software using a Go standard license. + +Currently the package is heavily optimized for 64 bit processors and will be significantly slower on 32 bit processors. + +For seekable zstd streams, see [this excellent package](https://github.com/SaveTheRbtz/zstd-seekable-format-go). + +## Installation + +Install using `go get -u github.com/klauspost/compress`. The package is located in `github.com/klauspost/compress/zstd`. + +[![Go Reference](https://pkg.go.dev/badge/github.com/klauspost/compress/zstd.svg)](https://pkg.go.dev/github.com/klauspost/compress/zstd) + +## Compressor + +### Status: + +STABLE - there may always be subtle bugs, a wide variety of content has been tested and the library is actively +used by several projects. This library is being [fuzz-tested](https://github.com/klauspost/compress-fuzz) for all updates. + +There may still be specific combinations of data types/size/settings that could lead to edge cases, +so as always, testing is recommended. + +For now, a high speed (fastest) and medium-fast (default) compressor has been implemented. + +* The "Fastest" compression ratio is roughly equivalent to zstd level 1. +* The "Default" compression ratio is roughly equivalent to zstd level 3 (default). +* The "Better" compression ratio is roughly equivalent to zstd level 7. +* The "Best" compression ratio is roughly equivalent to zstd level 11. + +In terms of speed, it is typically 2x as fast as the stdlib deflate/gzip in its fastest mode. +The compression ratio compared to stdlib is around level 3, but usually 3x as fast. + + +### Usage + +An Encoder can be used for either compressing a stream via the +`io.WriteCloser` interface supported by the Encoder or as multiple independent +tasks via the `EncodeAll` function. +Smaller encodes are encouraged to use the EncodeAll function. +Use `NewWriter` to create a new instance that can be used for both. + +To create a writer with default options, do like this: + +```Go +// Compress input to output. +func Compress(in io.Reader, out io.Writer) error { + enc, err := zstd.NewWriter(out) + if err != nil { + return err + } + _, err = io.Copy(enc, in) + if err != nil { + enc.Close() + return err + } + return enc.Close() +} +``` + +Now you can encode by writing data to `enc`. The output will be finished writing when `Close()` is called. +Even if your encode fails, you should still call `Close()` to release any resources that may be held up. + +The above is fine for big encodes. However, whenever possible try to *reuse* the writer. + +To reuse the encoder, you can use the `Reset(io.Writer)` function to change to another output. +This will allow the encoder to reuse all resources and avoid wasteful allocations. + +Currently stream encoding has 'light' concurrency, meaning up to 2 goroutines can be working on part +of a stream. This is independent of the `WithEncoderConcurrency(n)`, but that is likely to change +in the future. So if you want to limit concurrency for future updates, specify the concurrency +you would like. + +If you would like stream encoding to be done without spawning async goroutines, use `WithEncoderConcurrency(1)` +which will compress input as each block is completed, blocking on writes until each has completed. + +You can specify your desired compression level using `WithEncoderLevel()` option. Currently only pre-defined +compression settings can be specified. + +#### Future Compatibility Guarantees + +This will be an evolving project. When using this package it is important to note that both the compression efficiency and speed may change. + +The goal will be to keep the default efficiency at the default zstd (level 3). +However the encoding should never be assumed to remain the same, +and you should not use hashes of compressed output for similarity checks. + +The Encoder can be assumed to produce the same output from the exact same code version. +However, the may be modes in the future that break this, +although they will not be enabled without an explicit option. + +This encoder is not designed to (and will probably never) output the exact same bitstream as the reference encoder. + +Also note, that the cgo decompressor currently does not [report all errors on invalid input](https://github.com/DataDog/zstd/issues/59), +[omits error checks](https://github.com/DataDog/zstd/issues/61), [ignores checksums](https://github.com/DataDog/zstd/issues/43) +and seems to ignore concatenated streams, even though [it is part of the spec](https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#frames). + +#### Blocks + +For compressing small blocks, the returned encoder has a function called `EncodeAll(src, dst []byte) []byte`. + +`EncodeAll` will encode all input in src and append it to dst. +This function can be called concurrently. +Each call will only run on a same goroutine as the caller. + +Encoded blocks can be concatenated and the result will be the combined input stream. +Data compressed with EncodeAll can be decoded with the Decoder, using either a stream or `DecodeAll`. + +Especially when encoding blocks you should take special care to reuse the encoder. +This will effectively make it run without allocations after a warmup period. +To make it run completely without allocations, supply a destination buffer with space for all content. + +```Go +import "github.com/klauspost/compress/zstd" + +// Create a writer that caches compressors. +// For this operation type we supply a nil Reader. +var encoder, _ = zstd.NewWriter(nil) + +// Compress a buffer. +// If you have a destination buffer, the allocation in the call can also be eliminated. +func Compress(src []byte) []byte { + return encoder.EncodeAll(src, make([]byte, 0, len(src))) +} +``` + +You can control the maximum number of concurrent encodes using the `WithEncoderConcurrency(n)` +option when creating the writer. + +Using the Encoder for both a stream and individual blocks concurrently is safe. + +### Performance + +I have collected some speed examples to compare speed and compression against other compressors. + +* `file` is the input file. +* `out` is the compressor used. `zskp` is this package. `zstd` is the Datadog cgo library. `gzstd/gzkp` is gzip standard and this library. +* `level` is the compression level used. For `zskp` level 1 is "fastest", level 2 is "default"; 3 is "better", 4 is "best". +* `insize`/`outsize` is the input/output size. +* `millis` is the number of milliseconds used for compression. +* `mb/s` is megabytes (2^20 bytes) per second. + +``` +Silesia Corpus: +http://sun.aei.polsl.pl/~sdeor/corpus/silesia.zip + +This package: +file out level insize outsize millis mb/s +silesia.tar zskp 1 211947520 73821326 634 318.47 +silesia.tar zskp 2 211947520 67655404 1508 133.96 +silesia.tar zskp 3 211947520 64746933 3000 67.37 +silesia.tar zskp 4 211947520 60073508 16926 11.94 + +cgo zstd: +silesia.tar zstd 1 211947520 73605392 543 371.56 +silesia.tar zstd 3 211947520 66793289 864 233.68 +silesia.tar zstd 6 211947520 62916450 1913 105.66 +silesia.tar zstd 9 211947520 60212393 5063 39.92 + +gzip, stdlib/this package: +silesia.tar gzstd 1 211947520 80007735 1498 134.87 +silesia.tar gzkp 1 211947520 80088272 1009 200.31 + +GOB stream of binary data. Highly compressible. +https://files.klauspost.com/compress/gob-stream.7z + +file out level insize outsize millis mb/s +gob-stream zskp 1 1911399616 233948096 3230 564.34 +gob-stream zskp 2 1911399616 203997694 4997 364.73 +gob-stream zskp 3 1911399616 173526523 13435 135.68 +gob-stream zskp 4 1911399616 162195235 47559 38.33 + +gob-stream zstd 1 1911399616 249810424 2637 691.26 +gob-stream zstd 3 1911399616 208192146 3490 522.31 +gob-stream zstd 6 1911399616 193632038 6687 272.56 +gob-stream zstd 9 1911399616 177620386 16175 112.70 + +gob-stream gzstd 1 1911399616 357382013 9046 201.49 +gob-stream gzkp 1 1911399616 359136669 4885 373.08 + +The test data for the Large Text Compression Benchmark is the first +10^9 bytes of the English Wikipedia dump on Mar. 3, 2006. +http://mattmahoney.net/dc/textdata.html + +file out level insize outsize millis mb/s +enwik9 zskp 1 1000000000 343833605 3687 258.64 +enwik9 zskp 2 1000000000 317001237 7672 124.29 +enwik9 zskp 3 1000000000 291915823 15923 59.89 +enwik9 zskp 4 1000000000 261710291 77697 12.27 + +enwik9 zstd 1 1000000000 358072021 3110 306.65 +enwik9 zstd 3 1000000000 313734672 4784 199.35 +enwik9 zstd 6 1000000000 295138875 10290 92.68 +enwik9 zstd 9 1000000000 278348700 28549 33.40 + +enwik9 gzstd 1 1000000000 382578136 8608 110.78 +enwik9 gzkp 1 1000000000 382781160 5628 169.45 + +Highly compressible JSON file. +https://files.klauspost.com/compress/github-june-2days-2019.json.zst + +file out level insize outsize millis mb/s +github-june-2days-2019.json zskp 1 6273951764 697439532 9789 611.17 +github-june-2days-2019.json zskp 2 6273951764 610876538 18553 322.49 +github-june-2days-2019.json zskp 3 6273951764 517662858 44186 135.41 +github-june-2days-2019.json zskp 4 6273951764 464617114 165373 36.18 + +github-june-2days-2019.json zstd 1 6273951764 766284037 8450 708.00 +github-june-2days-2019.json zstd 3 6273951764 661889476 10927 547.57 +github-june-2days-2019.json zstd 6 6273951764 642756859 22996 260.18 +github-june-2days-2019.json zstd 9 6273951764 601974523 52413 114.16 + +github-june-2days-2019.json gzstd 1 6273951764 1164397768 26793 223.32 +github-june-2days-2019.json gzkp 1 6273951764 1120631856 17693 338.16 + +VM Image, Linux mint with a few installed applications: +https://files.klauspost.com/compress/rawstudio-mint14.7z + +file out level insize outsize millis mb/s +rawstudio-mint14.tar zskp 1 8558382592 3718400221 18206 448.29 +rawstudio-mint14.tar zskp 2 8558382592 3326118337 37074 220.15 +rawstudio-mint14.tar zskp 3 8558382592 3163842361 87306 93.49 +rawstudio-mint14.tar zskp 4 8558382592 2970480650 783862 10.41 + +rawstudio-mint14.tar zstd 1 8558382592 3609250104 17136 476.27 +rawstudio-mint14.tar zstd 3 8558382592 3341679997 29262 278.92 +rawstudio-mint14.tar zstd 6 8558382592 3235846406 77904 104.77 +rawstudio-mint14.tar zstd 9 8558382592 3160778861 140946 57.91 + +rawstudio-mint14.tar gzstd 1 8558382592 3926234992 51345 158.96 +rawstudio-mint14.tar gzkp 1 8558382592 3960117298 36722 222.26 + +CSV data: +https://files.klauspost.com/compress/nyc-taxi-data-10M.csv.zst + +file out level insize outsize millis mb/s +nyc-taxi-data-10M.csv zskp 1 3325605752 641319332 9462 335.17 +nyc-taxi-data-10M.csv zskp 2 3325605752 588976126 17570 180.50 +nyc-taxi-data-10M.csv zskp 3 3325605752 529329260 32432 97.79 +nyc-taxi-data-10M.csv zskp 4 3325605752 474949772 138025 22.98 + +nyc-taxi-data-10M.csv zstd 1 3325605752 687399637 8233 385.18 +nyc-taxi-data-10M.csv zstd 3 3325605752 598514411 10065 315.07 +nyc-taxi-data-10M.csv zstd 6 3325605752 570522953 20038 158.27 +nyc-taxi-data-10M.csv zstd 9 3325605752 517554797 64565 49.12 + +nyc-taxi-data-10M.csv gzstd 1 3325605752 928654908 21270 149.11 +nyc-taxi-data-10M.csv gzkp 1 3325605752 922273214 13929 227.68 +``` + +## Decompressor + +Staus: STABLE - there may still be subtle bugs, but a wide variety of content has been tested. + +This library is being continuously [fuzz-tested](https://github.com/klauspost/compress-fuzz), +kindly supplied by [fuzzit.dev](https://fuzzit.dev/). +The main purpose of the fuzz testing is to ensure that it is not possible to crash the decoder, +or run it past its limits with ANY input provided. + +### Usage + +The package has been designed for two main usages, big streams of data and smaller in-memory buffers. +There are two main usages of the package for these. Both of them are accessed by creating a `Decoder`. + +For streaming use a simple setup could look like this: + +```Go +import "github.com/klauspost/compress/zstd" + +func Decompress(in io.Reader, out io.Writer) error { + d, err := zstd.NewReader(in) + if err != nil { + return err + } + defer d.Close() + + // Copy content... + _, err = io.Copy(out, d) + return err +} +``` + +It is important to use the "Close" function when you no longer need the Reader to stop running goroutines, +when running with default settings. +Goroutines will exit once an error has been returned, including `io.EOF` at the end of a stream. + +Streams are decoded concurrently in 4 asynchronous stages to give the best possible throughput. +However, if you prefer synchronous decompression, use `WithDecoderConcurrency(1)` which will decompress data +as it is being requested only. + +For decoding buffers, it could look something like this: + +```Go +import "github.com/klauspost/compress/zstd" + +// Create a reader that caches decompressors. +// For this operation type we supply a nil Reader. +var decoder, _ = zstd.NewReader(nil, WithDecoderConcurrency(0)) + +// Decompress a buffer. We don't supply a destination buffer, +// so it will be allocated by the decoder. +func Decompress(src []byte) ([]byte, error) { + return decoder.DecodeAll(src, nil) +} +``` + +Both of these cases should provide the functionality needed. +The decoder can be used for *concurrent* decompression of multiple buffers. +By default 4 decompressors will be created. + +It will only allow a certain number of concurrent operations to run. +To tweak that yourself use the `WithDecoderConcurrency(n)` option when creating the decoder. +It is possible to use `WithDecoderConcurrency(0)` to create GOMAXPROCS decoders. + +### Dictionaries + +Data compressed with [dictionaries](https://github.com/facebook/zstd#the-case-for-small-data-compression) can be decompressed. + +Dictionaries are added individually to Decoders. +Dictionaries are generated by the `zstd --train` command and contains an initial state for the decoder. +To add a dictionary use the `WithDecoderDicts(dicts ...[]byte)` option with the dictionary data. +Several dictionaries can be added at once. + +The dictionary will be used automatically for the data that specifies them. +A re-used Decoder will still contain the dictionaries registered. + +When registering multiple dictionaries with the same ID, the last one will be used. + +It is possible to use dictionaries when compressing data. + +To enable a dictionary use `WithEncoderDict(dict []byte)`. Here only one dictionary will be used +and it will likely be used even if it doesn't improve compression. + +The used dictionary must be used to decompress the content. + +For any real gains, the dictionary should be built with similar data. +If an unsuitable dictionary is used the output may be slightly larger than using no dictionary. +Use the [zstd commandline tool](https://github.com/facebook/zstd/releases) to build a dictionary from sample data. +For information see [zstd dictionary information](https://github.com/facebook/zstd#the-case-for-small-data-compression). + +For now there is a fixed startup performance penalty for compressing content with dictionaries. +This will likely be improved over time. Just be aware to test performance when implementing. + +### Allocation-less operation + +The decoder has been designed to operate without allocations after a warmup. + +This means that you should *store* the decoder for best performance. +To re-use a stream decoder, use the `Reset(r io.Reader) error` to switch to another stream. +A decoder can safely be re-used even if the previous stream failed. + +To release the resources, you must call the `Close()` function on a decoder. +After this it can *no longer be reused*, but all running goroutines will be stopped. +So you *must* use this if you will no longer need the Reader. + +For decompressing smaller buffers a single decoder can be used. +When decoding buffers, you can supply a destination slice with length 0 and your expected capacity. +In this case no unneeded allocations should be made. + +### Concurrency + +The buffer decoder does everything on the same goroutine and does nothing concurrently. +It can however decode several buffers concurrently. Use `WithDecoderConcurrency(n)` to limit that. + +The stream decoder will create goroutines that: + +1) Reads input and splits the input into blocks. +2) Decompression of literals. +3) Decompression of sequences. +4) Reconstruction of output stream. + +So effectively this also means the decoder will "read ahead" and prepare data to always be available for output. + +The concurrency level will, for streams, determine how many blocks ahead the compression will start. + +Since "blocks" are quite dependent on the output of the previous block stream decoding will only have limited concurrency. + +In practice this means that concurrency is often limited to utilizing about 3 cores effectively. + +### Benchmarks + +The first two are streaming decodes and the last are smaller inputs. + +Running on AMD Ryzen 9 3950X 16-Core Processor. AMD64 assembly used. + +``` +BenchmarkDecoderSilesia-32 5 206878840 ns/op 1024.50 MB/s 49808 B/op 43 allocs/op +BenchmarkDecoderEnwik9-32 1 1271809000 ns/op 786.28 MB/s 72048 B/op 52 allocs/op + +Concurrent blocks, performance: + +BenchmarkDecoder_DecodeAllParallel/kppkn.gtb.zst-32 67356 17857 ns/op 10321.96 MB/s 22.48 pct 102 B/op 0 allocs/op +BenchmarkDecoder_DecodeAllParallel/geo.protodata.zst-32 266656 4421 ns/op 26823.21 MB/s 11.89 pct 19 B/op 0 allocs/op +BenchmarkDecoder_DecodeAllParallel/plrabn12.txt.zst-32 20992 56842 ns/op 8477.17 MB/s 39.90 pct 754 B/op 0 allocs/op +BenchmarkDecoder_DecodeAllParallel/lcet10.txt.zst-32 27456 43932 ns/op 9714.01 MB/s 33.27 pct 524 B/op 0 allocs/op +BenchmarkDecoder_DecodeAllParallel/asyoulik.txt.zst-32 78432 15047 ns/op 8319.15 MB/s 40.34 pct 66 B/op 0 allocs/op +BenchmarkDecoder_DecodeAllParallel/alice29.txt.zst-32 65800 18436 ns/op 8249.63 MB/s 37.75 pct 88 B/op 0 allocs/op +BenchmarkDecoder_DecodeAllParallel/html_x_4.zst-32 102993 11523 ns/op 35546.09 MB/s 3.637 pct 143 B/op 0 allocs/op +BenchmarkDecoder_DecodeAllParallel/paper-100k.pdf.zst-32 1000000 1070 ns/op 95720.98 MB/s 80.53 pct 3 B/op 0 allocs/op +BenchmarkDecoder_DecodeAllParallel/fireworks.jpeg.zst-32 749802 1752 ns/op 70272.35 MB/s 100.0 pct 5 B/op 0 allocs/op +BenchmarkDecoder_DecodeAllParallel/urls.10K.zst-32 22640 52934 ns/op 13263.37 MB/s 26.25 pct 1014 B/op 0 allocs/op +BenchmarkDecoder_DecodeAllParallel/html.zst-32 226412 5232 ns/op 19572.27 MB/s 14.49 pct 20 B/op 0 allocs/op +BenchmarkDecoder_DecodeAllParallel/comp-data.bin.zst-32 923041 1276 ns/op 3194.71 MB/s 31.26 pct 0 B/op 0 allocs/op +``` + +This reflects the performance around May 2022, but this may be out of date. + +## Zstd inside ZIP files + +It is possible to use zstandard to compress individual files inside zip archives. +While this isn't widely supported it can be useful for internal files. + +To support the compression and decompression of these files you must register a compressor and decompressor. + +It is highly recommended registering the (de)compressors on individual zip Reader/Writer and NOT +use the global registration functions. The main reason for this is that 2 registrations from +different packages will result in a panic. + +It is a good idea to only have a single compressor and decompressor, since they can be used for multiple zip +files concurrently, and using a single instance will allow reusing some resources. + +See [this example](https://pkg.go.dev/github.com/klauspost/compress/zstd#example-ZipCompressor) for +how to compress and decompress files inside zip archives. + +# Contributions + +Contributions are always welcome. +For new features/fixes, remember to add tests and for performance enhancements include benchmarks. + +For general feedback and experience reports, feel free to open an issue or write me on [Twitter](https://twitter.com/sh0dan). + +This package includes the excellent [`github.com/cespare/xxhash`](https://github.com/cespare/xxhash) package Copyright (c) 2016 Caleb Spare. diff --git a/vendor/github.com/klauspost/compress/zstd/bitreader.go b/vendor/github.com/klauspost/compress/zstd/bitreader.go new file mode 100644 index 00000000..97299d49 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/bitreader.go @@ -0,0 +1,140 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +import ( + "encoding/binary" + "errors" + "fmt" + "io" + "math/bits" +) + +// bitReader reads a bitstream in reverse. +// The last set bit indicates the start of the stream and is used +// for aligning the input. +type bitReader struct { + in []byte + off uint // next byte to read is at in[off - 1] + value uint64 // Maybe use [16]byte, but shifting is awkward. + bitsRead uint8 +} + +// init initializes and resets the bit reader. +func (b *bitReader) init(in []byte) error { + if len(in) < 1 { + return errors.New("corrupt stream: too short") + } + b.in = in + b.off = uint(len(in)) + // The highest bit of the last byte indicates where to start + v := in[len(in)-1] + if v == 0 { + return errors.New("corrupt stream, did not find end of stream") + } + b.bitsRead = 64 + b.value = 0 + if len(in) >= 8 { + b.fillFastStart() + } else { + b.fill() + b.fill() + } + b.bitsRead += 8 - uint8(highBits(uint32(v))) + return nil +} + +// getBits will return n bits. n can be 0. +func (b *bitReader) getBits(n uint8) int { + if n == 0 /*|| b.bitsRead >= 64 */ { + return 0 + } + return int(b.get32BitsFast(n)) +} + +// get32BitsFast requires that at least one bit is requested every time. +// There are no checks if the buffer is filled. +func (b *bitReader) get32BitsFast(n uint8) uint32 { + const regMask = 64 - 1 + v := uint32((b.value << (b.bitsRead & regMask)) >> ((regMask + 1 - n) & regMask)) + b.bitsRead += n + return v +} + +// fillFast() will make sure at least 32 bits are available. +// There must be at least 4 bytes available. +func (b *bitReader) fillFast() { + if b.bitsRead < 32 { + return + } + // 2 bounds checks. + v := b.in[b.off-4:] + v = v[:4] + low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) + b.value = (b.value << 32) | uint64(low) + b.bitsRead -= 32 + b.off -= 4 +} + +// fillFastStart() assumes the bitreader is empty and there is at least 8 bytes to read. +func (b *bitReader) fillFastStart() { + // Do single re-slice to avoid bounds checks. + b.value = binary.LittleEndian.Uint64(b.in[b.off-8:]) + b.bitsRead = 0 + b.off -= 8 +} + +// fill() will make sure at least 32 bits are available. +func (b *bitReader) fill() { + if b.bitsRead < 32 { + return + } + if b.off >= 4 { + v := b.in[b.off-4:] + v = v[:4] + low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) + b.value = (b.value << 32) | uint64(low) + b.bitsRead -= 32 + b.off -= 4 + return + } + for b.off > 0 { + b.value = (b.value << 8) | uint64(b.in[b.off-1]) + b.bitsRead -= 8 + b.off-- + } +} + +// finished returns true if all bits have been read from the bit stream. +func (b *bitReader) finished() bool { + return b.off == 0 && b.bitsRead >= 64 +} + +// overread returns true if more bits have been requested than is on the stream. +func (b *bitReader) overread() bool { + return b.bitsRead > 64 +} + +// remain returns the number of bits remaining. +func (b *bitReader) remain() uint { + return b.off*8 + 64 - uint(b.bitsRead) +} + +// close the bitstream and returns an error if out-of-buffer reads occurred. +func (b *bitReader) close() error { + // Release reference. + b.in = nil + if !b.finished() { + return fmt.Errorf("%d extra bits on block, should be 0", b.remain()) + } + if b.bitsRead > 64 { + return io.ErrUnexpectedEOF + } + return nil +} + +func highBits(val uint32) (n uint32) { + return uint32(bits.Len32(val) - 1) +} diff --git a/vendor/github.com/klauspost/compress/zstd/bitwriter.go b/vendor/github.com/klauspost/compress/zstd/bitwriter.go new file mode 100644 index 00000000..78b3c61b --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/bitwriter.go @@ -0,0 +1,113 @@ +// Copyright 2018 Klaus Post. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. +// Based on work Copyright (c) 2013, Yann Collet, released under BSD License. + +package zstd + +// bitWriter will write bits. +// First bit will be LSB of the first byte of output. +type bitWriter struct { + bitContainer uint64 + nBits uint8 + out []byte +} + +// bitMask16 is bitmasks. Has extra to avoid bounds check. +var bitMask16 = [32]uint16{ + 0, 1, 3, 7, 0xF, 0x1F, + 0x3F, 0x7F, 0xFF, 0x1FF, 0x3FF, 0x7FF, + 0xFFF, 0x1FFF, 0x3FFF, 0x7FFF, 0xFFFF, 0xFFFF, + 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, + 0xFFFF, 0xFFFF} /* up to 16 bits */ + +var bitMask32 = [32]uint32{ + 0, 1, 3, 7, 0xF, 0x1F, 0x3F, 0x7F, 0xFF, + 0x1FF, 0x3FF, 0x7FF, 0xFFF, 0x1FFF, 0x3FFF, 0x7FFF, 0xFFFF, + 0x1ffff, 0x3ffff, 0x7FFFF, 0xfFFFF, 0x1fFFFF, 0x3fFFFF, 0x7fFFFF, 0xffFFFF, + 0x1ffFFFF, 0x3ffFFFF, 0x7ffFFFF, 0xfffFFFF, 0x1fffFFFF, 0x3fffFFFF, 0x7fffFFFF, +} // up to 32 bits + +// addBits16NC will add up to 16 bits. +// It will not check if there is space for them, +// so the caller must ensure that it has flushed recently. +func (b *bitWriter) addBits16NC(value uint16, bits uint8) { + b.bitContainer |= uint64(value&bitMask16[bits&31]) << (b.nBits & 63) + b.nBits += bits +} + +// addBits32NC will add up to 31 bits. +// It will not check if there is space for them, +// so the caller must ensure that it has flushed recently. +func (b *bitWriter) addBits32NC(value uint32, bits uint8) { + b.bitContainer |= uint64(value&bitMask32[bits&31]) << (b.nBits & 63) + b.nBits += bits +} + +// addBits64NC will add up to 64 bits. +// There must be space for 32 bits. +func (b *bitWriter) addBits64NC(value uint64, bits uint8) { + if bits <= 31 { + b.addBits32Clean(uint32(value), bits) + return + } + b.addBits32Clean(uint32(value), 32) + b.flush32() + b.addBits32Clean(uint32(value>>32), bits-32) +} + +// addBits32Clean will add up to 32 bits. +// It will not check if there is space for them. +// The input must not contain more bits than specified. +func (b *bitWriter) addBits32Clean(value uint32, bits uint8) { + b.bitContainer |= uint64(value) << (b.nBits & 63) + b.nBits += bits +} + +// addBits16Clean will add up to 16 bits. value may not contain more set bits than indicated. +// It will not check if there is space for them, so the caller must ensure that it has flushed recently. +func (b *bitWriter) addBits16Clean(value uint16, bits uint8) { + b.bitContainer |= uint64(value) << (b.nBits & 63) + b.nBits += bits +} + +// flush32 will flush out, so there are at least 32 bits available for writing. +func (b *bitWriter) flush32() { + if b.nBits < 32 { + return + } + b.out = append(b.out, + byte(b.bitContainer), + byte(b.bitContainer>>8), + byte(b.bitContainer>>16), + byte(b.bitContainer>>24)) + b.nBits -= 32 + b.bitContainer >>= 32 +} + +// flushAlign will flush remaining full bytes and align to next byte boundary. +func (b *bitWriter) flushAlign() { + nbBytes := (b.nBits + 7) >> 3 + for i := uint8(0); i < nbBytes; i++ { + b.out = append(b.out, byte(b.bitContainer>>(i*8))) + } + b.nBits = 0 + b.bitContainer = 0 +} + +// close will write the alignment bit and write the final byte(s) +// to the output. +func (b *bitWriter) close() error { + // End mark + b.addBits16Clean(1, 1) + // flush until next byte. + b.flushAlign() + return nil +} + +// reset and continue writing by appending to out. +func (b *bitWriter) reset(out []byte) { + b.bitContainer = 0 + b.nBits = 0 + b.out = out +} diff --git a/vendor/github.com/klauspost/compress/zstd/blockdec.go b/vendor/github.com/klauspost/compress/zstd/blockdec.go new file mode 100644 index 00000000..f52d1aed --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/blockdec.go @@ -0,0 +1,720 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +import ( + "bytes" + "encoding/binary" + "errors" + "fmt" + "io" + "os" + "path/filepath" + "sync" + + "github.com/klauspost/compress/huff0" + "github.com/klauspost/compress/zstd/internal/xxhash" +) + +type blockType uint8 + +//go:generate stringer -type=blockType,literalsBlockType,seqCompMode,tableIndex + +const ( + blockTypeRaw blockType = iota + blockTypeRLE + blockTypeCompressed + blockTypeReserved +) + +type literalsBlockType uint8 + +const ( + literalsBlockRaw literalsBlockType = iota + literalsBlockRLE + literalsBlockCompressed + literalsBlockTreeless +) + +const ( + // maxCompressedBlockSize is the biggest allowed compressed block size (128KB) + maxCompressedBlockSize = 128 << 10 + + compressedBlockOverAlloc = 16 + maxCompressedBlockSizeAlloc = 128<<10 + compressedBlockOverAlloc + + // Maximum possible block size (all Raw+Uncompressed). + maxBlockSize = (1 << 21) - 1 + + maxMatchLen = 131074 + maxSequences = 0x7f00 + 0xffff + + // We support slightly less than the reference decoder to be able to + // use ints on 32 bit archs. + maxOffsetBits = 30 +) + +var ( + huffDecoderPool = sync.Pool{New: func() interface{} { + return &huff0.Scratch{} + }} + + fseDecoderPool = sync.Pool{New: func() interface{} { + return &fseDecoder{} + }} +) + +type blockDec struct { + // Raw source data of the block. + data []byte + dataStorage []byte + + // Destination of the decoded data. + dst []byte + + // Buffer for literals data. + literalBuf []byte + + // Window size of the block. + WindowSize uint64 + + err error + + // Check against this crc + checkCRC []byte + + // Frame to use for singlethreaded decoding. + // Should not be used by the decoder itself since parent may be another frame. + localFrame *frameDec + + sequence []seqVals + + async struct { + newHist *history + literals []byte + seqData []byte + seqSize int // Size of uncompressed sequences + fcs uint64 + } + + // Block is RLE, this is the size. + RLESize uint32 + + Type blockType + + // Is this the last block of a frame? + Last bool + + // Use less memory + lowMem bool +} + +func (b *blockDec) String() string { + if b == nil { + return "" + } + return fmt.Sprintf("Steam Size: %d, Type: %v, Last: %t, Window: %d", len(b.data), b.Type, b.Last, b.WindowSize) +} + +func newBlockDec(lowMem bool) *blockDec { + b := blockDec{ + lowMem: lowMem, + } + return &b +} + +// reset will reset the block. +// Input must be a start of a block and will be at the end of the block when returned. +func (b *blockDec) reset(br byteBuffer, windowSize uint64) error { + b.WindowSize = windowSize + tmp, err := br.readSmall(3) + if err != nil { + println("Reading block header:", err) + return err + } + bh := uint32(tmp[0]) | (uint32(tmp[1]) << 8) | (uint32(tmp[2]) << 16) + b.Last = bh&1 != 0 + b.Type = blockType((bh >> 1) & 3) + // find size. + cSize := int(bh >> 3) + maxSize := maxCompressedBlockSizeAlloc + switch b.Type { + case blockTypeReserved: + return ErrReservedBlockType + case blockTypeRLE: + if cSize > maxCompressedBlockSize || cSize > int(b.WindowSize) { + if debugDecoder { + printf("rle block too big: csize:%d block: %+v\n", uint64(cSize), b) + } + return ErrWindowSizeExceeded + } + b.RLESize = uint32(cSize) + if b.lowMem { + maxSize = cSize + } + cSize = 1 + case blockTypeCompressed: + if debugDecoder { + println("Data size on stream:", cSize) + } + b.RLESize = 0 + maxSize = maxCompressedBlockSizeAlloc + if windowSize < maxCompressedBlockSize && b.lowMem { + maxSize = int(windowSize) + compressedBlockOverAlloc + } + if cSize > maxCompressedBlockSize || uint64(cSize) > b.WindowSize { + if debugDecoder { + printf("compressed block too big: csize:%d block: %+v\n", uint64(cSize), b) + } + return ErrCompressedSizeTooBig + } + // Empty compressed blocks must at least be 2 bytes + // for Literals_Block_Type and one for Sequences_Section_Header. + if cSize < 2 { + return ErrBlockTooSmall + } + case blockTypeRaw: + if cSize > maxCompressedBlockSize || cSize > int(b.WindowSize) { + if debugDecoder { + printf("rle block too big: csize:%d block: %+v\n", uint64(cSize), b) + } + return ErrWindowSizeExceeded + } + + b.RLESize = 0 + // We do not need a destination for raw blocks. + maxSize = -1 + default: + panic("Invalid block type") + } + + // Read block data. + if cap(b.dataStorage) < cSize { + if b.lowMem || cSize > maxCompressedBlockSize { + b.dataStorage = make([]byte, 0, cSize+compressedBlockOverAlloc) + } else { + b.dataStorage = make([]byte, 0, maxCompressedBlockSizeAlloc) + } + } + if cap(b.dst) <= maxSize { + b.dst = make([]byte, 0, maxSize+1) + } + b.data, err = br.readBig(cSize, b.dataStorage) + if err != nil { + if debugDecoder { + println("Reading block:", err, "(", cSize, ")", len(b.data)) + printf("%T", br) + } + return err + } + return nil +} + +// sendEOF will make the decoder send EOF on this frame. +func (b *blockDec) sendErr(err error) { + b.Last = true + b.Type = blockTypeReserved + b.err = err +} + +// Close will release resources. +// Closed blockDec cannot be reset. +func (b *blockDec) Close() { +} + +// decodeBuf +func (b *blockDec) decodeBuf(hist *history) error { + switch b.Type { + case blockTypeRLE: + if cap(b.dst) < int(b.RLESize) { + if b.lowMem { + b.dst = make([]byte, b.RLESize) + } else { + b.dst = make([]byte, maxBlockSize) + } + } + b.dst = b.dst[:b.RLESize] + v := b.data[0] + for i := range b.dst { + b.dst[i] = v + } + hist.appendKeep(b.dst) + return nil + case blockTypeRaw: + hist.appendKeep(b.data) + return nil + case blockTypeCompressed: + saved := b.dst + // Append directly to history + if hist.ignoreBuffer == 0 { + b.dst = hist.b + hist.b = nil + } else { + b.dst = b.dst[:0] + } + err := b.decodeCompressed(hist) + if debugDecoder { + println("Decompressed to total", len(b.dst), "bytes, hash:", xxhash.Sum64(b.dst), "error:", err) + } + if hist.ignoreBuffer == 0 { + hist.b = b.dst + b.dst = saved + } else { + hist.appendKeep(b.dst) + } + return err + case blockTypeReserved: + // Used for returning errors. + return b.err + default: + panic("Invalid block type") + } +} + +func (b *blockDec) decodeLiterals(in []byte, hist *history) (remain []byte, err error) { + // There must be at least one byte for Literals_Block_Type and one for Sequences_Section_Header + if len(in) < 2 { + return in, ErrBlockTooSmall + } + + litType := literalsBlockType(in[0] & 3) + var litRegenSize int + var litCompSize int + sizeFormat := (in[0] >> 2) & 3 + var fourStreams bool + var literals []byte + switch litType { + case literalsBlockRaw, literalsBlockRLE: + switch sizeFormat { + case 0, 2: + // Regenerated_Size uses 5 bits (0-31). Literals_Section_Header uses 1 byte. + litRegenSize = int(in[0] >> 3) + in = in[1:] + case 1: + // Regenerated_Size uses 12 bits (0-4095). Literals_Section_Header uses 2 bytes. + litRegenSize = int(in[0]>>4) + (int(in[1]) << 4) + in = in[2:] + case 3: + // Regenerated_Size uses 20 bits (0-1048575). Literals_Section_Header uses 3 bytes. + if len(in) < 3 { + println("too small: litType:", litType, " sizeFormat", sizeFormat, len(in)) + return in, ErrBlockTooSmall + } + litRegenSize = int(in[0]>>4) + (int(in[1]) << 4) + (int(in[2]) << 12) + in = in[3:] + } + case literalsBlockCompressed, literalsBlockTreeless: + switch sizeFormat { + case 0, 1: + // Both Regenerated_Size and Compressed_Size use 10 bits (0-1023). + if len(in) < 3 { + println("too small: litType:", litType, " sizeFormat", sizeFormat, len(in)) + return in, ErrBlockTooSmall + } + n := uint64(in[0]>>4) + (uint64(in[1]) << 4) + (uint64(in[2]) << 12) + litRegenSize = int(n & 1023) + litCompSize = int(n >> 10) + fourStreams = sizeFormat == 1 + in = in[3:] + case 2: + fourStreams = true + if len(in) < 4 { + println("too small: litType:", litType, " sizeFormat", sizeFormat, len(in)) + return in, ErrBlockTooSmall + } + n := uint64(in[0]>>4) + (uint64(in[1]) << 4) + (uint64(in[2]) << 12) + (uint64(in[3]) << 20) + litRegenSize = int(n & 16383) + litCompSize = int(n >> 14) + in = in[4:] + case 3: + fourStreams = true + if len(in) < 5 { + println("too small: litType:", litType, " sizeFormat", sizeFormat, len(in)) + return in, ErrBlockTooSmall + } + n := uint64(in[0]>>4) + (uint64(in[1]) << 4) + (uint64(in[2]) << 12) + (uint64(in[3]) << 20) + (uint64(in[4]) << 28) + litRegenSize = int(n & 262143) + litCompSize = int(n >> 18) + in = in[5:] + } + } + if debugDecoder { + println("literals type:", litType, "litRegenSize:", litRegenSize, "litCompSize:", litCompSize, "sizeFormat:", sizeFormat, "4X:", fourStreams) + } + if litRegenSize > int(b.WindowSize) || litRegenSize > maxCompressedBlockSize { + return in, ErrWindowSizeExceeded + } + + switch litType { + case literalsBlockRaw: + if len(in) < litRegenSize { + println("too small: litType:", litType, " sizeFormat", sizeFormat, "remain:", len(in), "want:", litRegenSize) + return in, ErrBlockTooSmall + } + literals = in[:litRegenSize] + in = in[litRegenSize:] + //printf("Found %d uncompressed literals\n", litRegenSize) + case literalsBlockRLE: + if len(in) < 1 { + println("too small: litType:", litType, " sizeFormat", sizeFormat, "remain:", len(in), "want:", 1) + return in, ErrBlockTooSmall + } + if cap(b.literalBuf) < litRegenSize { + if b.lowMem { + b.literalBuf = make([]byte, litRegenSize, litRegenSize+compressedBlockOverAlloc) + } else { + b.literalBuf = make([]byte, litRegenSize, maxCompressedBlockSize+compressedBlockOverAlloc) + } + } + literals = b.literalBuf[:litRegenSize] + v := in[0] + for i := range literals { + literals[i] = v + } + in = in[1:] + if debugDecoder { + printf("Found %d RLE compressed literals\n", litRegenSize) + } + case literalsBlockTreeless: + if len(in) < litCompSize { + println("too small: litType:", litType, " sizeFormat", sizeFormat, "remain:", len(in), "want:", litCompSize) + return in, ErrBlockTooSmall + } + // Store compressed literals, so we defer decoding until we get history. + literals = in[:litCompSize] + in = in[litCompSize:] + if debugDecoder { + printf("Found %d compressed literals\n", litCompSize) + } + huff := hist.huffTree + if huff == nil { + return in, errors.New("literal block was treeless, but no history was defined") + } + // Ensure we have space to store it. + if cap(b.literalBuf) < litRegenSize { + if b.lowMem { + b.literalBuf = make([]byte, 0, litRegenSize+compressedBlockOverAlloc) + } else { + b.literalBuf = make([]byte, 0, maxCompressedBlockSize+compressedBlockOverAlloc) + } + } + var err error + // Use our out buffer. + huff.MaxDecodedSize = litRegenSize + if fourStreams { + literals, err = huff.Decoder().Decompress4X(b.literalBuf[:0:litRegenSize], literals) + } else { + literals, err = huff.Decoder().Decompress1X(b.literalBuf[:0:litRegenSize], literals) + } + // Make sure we don't leak our literals buffer + if err != nil { + println("decompressing literals:", err) + return in, err + } + if len(literals) != litRegenSize { + return in, fmt.Errorf("literal output size mismatch want %d, got %d", litRegenSize, len(literals)) + } + + case literalsBlockCompressed: + if len(in) < litCompSize { + println("too small: litType:", litType, " sizeFormat", sizeFormat, "remain:", len(in), "want:", litCompSize) + return in, ErrBlockTooSmall + } + literals = in[:litCompSize] + in = in[litCompSize:] + // Ensure we have space to store it. + if cap(b.literalBuf) < litRegenSize { + if b.lowMem { + b.literalBuf = make([]byte, 0, litRegenSize+compressedBlockOverAlloc) + } else { + b.literalBuf = make([]byte, 0, maxCompressedBlockSize+compressedBlockOverAlloc) + } + } + huff := hist.huffTree + if huff == nil || (hist.dict != nil && huff == hist.dict.litEnc) { + huff = huffDecoderPool.Get().(*huff0.Scratch) + if huff == nil { + huff = &huff0.Scratch{} + } + } + var err error + huff, literals, err = huff0.ReadTable(literals, huff) + if err != nil { + println("reading huffman table:", err) + return in, err + } + hist.huffTree = huff + huff.MaxDecodedSize = litRegenSize + // Use our out buffer. + if fourStreams { + literals, err = huff.Decoder().Decompress4X(b.literalBuf[:0:litRegenSize], literals) + } else { + literals, err = huff.Decoder().Decompress1X(b.literalBuf[:0:litRegenSize], literals) + } + if err != nil { + println("decoding compressed literals:", err) + return in, err + } + // Make sure we don't leak our literals buffer + if len(literals) != litRegenSize { + return in, fmt.Errorf("literal output size mismatch want %d, got %d", litRegenSize, len(literals)) + } + // Re-cap to get extra size. + literals = b.literalBuf[:len(literals)] + if debugDecoder { + printf("Decompressed %d literals into %d bytes\n", litCompSize, litRegenSize) + } + } + hist.decoders.literals = literals + return in, nil +} + +// decodeCompressed will start decompressing a block. +func (b *blockDec) decodeCompressed(hist *history) error { + in := b.data + in, err := b.decodeLiterals(in, hist) + if err != nil { + return err + } + err = b.prepareSequences(in, hist) + if err != nil { + return err + } + if hist.decoders.nSeqs == 0 { + b.dst = append(b.dst, hist.decoders.literals...) + return nil + } + before := len(hist.decoders.out) + err = hist.decoders.decodeSync(hist.b[hist.ignoreBuffer:]) + if err != nil { + return err + } + if hist.decoders.maxSyncLen > 0 { + hist.decoders.maxSyncLen += uint64(before) + hist.decoders.maxSyncLen -= uint64(len(hist.decoders.out)) + } + b.dst = hist.decoders.out + hist.recentOffsets = hist.decoders.prevOffset + return nil +} + +func (b *blockDec) prepareSequences(in []byte, hist *history) (err error) { + if debugDecoder { + printf("prepareSequences: %d byte(s) input\n", len(in)) + } + // Decode Sequences + // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#sequences-section + if len(in) < 1 { + return ErrBlockTooSmall + } + var nSeqs int + seqHeader := in[0] + switch { + case seqHeader < 128: + nSeqs = int(seqHeader) + in = in[1:] + case seqHeader < 255: + if len(in) < 2 { + return ErrBlockTooSmall + } + nSeqs = int(seqHeader-128)<<8 | int(in[1]) + in = in[2:] + case seqHeader == 255: + if len(in) < 3 { + return ErrBlockTooSmall + } + nSeqs = 0x7f00 + int(in[1]) + (int(in[2]) << 8) + in = in[3:] + } + if nSeqs == 0 && len(in) != 0 { + // When no sequences, there should not be any more data... + if debugDecoder { + printf("prepareSequences: 0 sequences, but %d byte(s) left on stream\n", len(in)) + } + return ErrUnexpectedBlockSize + } + + var seqs = &hist.decoders + seqs.nSeqs = nSeqs + if nSeqs > 0 { + if len(in) < 1 { + return ErrBlockTooSmall + } + br := byteReader{b: in, off: 0} + compMode := br.Uint8() + br.advance(1) + if debugDecoder { + printf("Compression modes: 0b%b", compMode) + } + for i := uint(0); i < 3; i++ { + mode := seqCompMode((compMode >> (6 - i*2)) & 3) + if debugDecoder { + println("Table", tableIndex(i), "is", mode) + } + var seq *sequenceDec + switch tableIndex(i) { + case tableLiteralLengths: + seq = &seqs.litLengths + case tableOffsets: + seq = &seqs.offsets + case tableMatchLengths: + seq = &seqs.matchLengths + default: + panic("unknown table") + } + switch mode { + case compModePredefined: + if seq.fse != nil && !seq.fse.preDefined { + fseDecoderPool.Put(seq.fse) + } + seq.fse = &fsePredef[i] + case compModeRLE: + if br.remain() < 1 { + return ErrBlockTooSmall + } + v := br.Uint8() + br.advance(1) + if seq.fse == nil || seq.fse.preDefined { + seq.fse = fseDecoderPool.Get().(*fseDecoder) + } + symb, err := decSymbolValue(v, symbolTableX[i]) + if err != nil { + printf("RLE Transform table (%v) error: %v", tableIndex(i), err) + return err + } + seq.fse.setRLE(symb) + if debugDecoder { + printf("RLE set to %+v, code: %v", symb, v) + } + case compModeFSE: + println("Reading table for", tableIndex(i)) + if seq.fse == nil || seq.fse.preDefined { + seq.fse = fseDecoderPool.Get().(*fseDecoder) + } + err := seq.fse.readNCount(&br, uint16(maxTableSymbol[i])) + if err != nil { + println("Read table error:", err) + return err + } + err = seq.fse.transform(symbolTableX[i]) + if err != nil { + println("Transform table error:", err) + return err + } + if debugDecoder { + println("Read table ok", "symbolLen:", seq.fse.symbolLen) + } + case compModeRepeat: + seq.repeat = true + } + if br.overread() { + return io.ErrUnexpectedEOF + } + } + in = br.unread() + } + if debugDecoder { + println("Literals:", len(seqs.literals), "hash:", xxhash.Sum64(seqs.literals), "and", seqs.nSeqs, "sequences.") + } + + if nSeqs == 0 { + if len(b.sequence) > 0 { + b.sequence = b.sequence[:0] + } + return nil + } + br := seqs.br + if br == nil { + br = &bitReader{} + } + if err := br.init(in); err != nil { + return err + } + + if err := seqs.initialize(br, hist, b.dst); err != nil { + println("initializing sequences:", err) + return err + } + // Extract blocks... + if false && hist.dict == nil { + fatalErr := func(err error) { + if err != nil { + panic(err) + } + } + fn := fmt.Sprintf("n-%d-lits-%d-prev-%d-%d-%d-win-%d.blk", hist.decoders.nSeqs, len(hist.decoders.literals), hist.recentOffsets[0], hist.recentOffsets[1], hist.recentOffsets[2], hist.windowSize) + var buf bytes.Buffer + fatalErr(binary.Write(&buf, binary.LittleEndian, hist.decoders.litLengths.fse)) + fatalErr(binary.Write(&buf, binary.LittleEndian, hist.decoders.matchLengths.fse)) + fatalErr(binary.Write(&buf, binary.LittleEndian, hist.decoders.offsets.fse)) + buf.Write(in) + os.WriteFile(filepath.Join("testdata", "seqs", fn), buf.Bytes(), os.ModePerm) + } + + return nil +} + +func (b *blockDec) decodeSequences(hist *history) error { + if cap(b.sequence) < hist.decoders.nSeqs { + if b.lowMem { + b.sequence = make([]seqVals, 0, hist.decoders.nSeqs) + } else { + b.sequence = make([]seqVals, 0, 0x7F00+0xffff) + } + } + b.sequence = b.sequence[:hist.decoders.nSeqs] + if hist.decoders.nSeqs == 0 { + hist.decoders.seqSize = len(hist.decoders.literals) + return nil + } + hist.decoders.windowSize = hist.windowSize + hist.decoders.prevOffset = hist.recentOffsets + + err := hist.decoders.decode(b.sequence) + hist.recentOffsets = hist.decoders.prevOffset + return err +} + +func (b *blockDec) executeSequences(hist *history) error { + hbytes := hist.b + if len(hbytes) > hist.windowSize { + hbytes = hbytes[len(hbytes)-hist.windowSize:] + // We do not need history anymore. + if hist.dict != nil { + hist.dict.content = nil + } + } + hist.decoders.windowSize = hist.windowSize + hist.decoders.out = b.dst[:0] + err := hist.decoders.execute(b.sequence, hbytes) + if err != nil { + return err + } + return b.updateHistory(hist) +} + +func (b *blockDec) updateHistory(hist *history) error { + if len(b.data) > maxCompressedBlockSize { + return fmt.Errorf("compressed block size too large (%d)", len(b.data)) + } + // Set output and release references. + b.dst = hist.decoders.out + hist.recentOffsets = hist.decoders.prevOffset + + if b.Last { + // if last block we don't care about history. + println("Last block, no history returned") + hist.b = hist.b[:0] + return nil + } else { + hist.append(b.dst) + if debugDecoder { + println("Finished block with ", len(b.sequence), "sequences. Added", len(b.dst), "to history, now length", len(hist.b)) + } + } + hist.decoders.out, hist.decoders.literals = nil, nil + + return nil +} diff --git a/vendor/github.com/klauspost/compress/zstd/blockenc.go b/vendor/github.com/klauspost/compress/zstd/blockenc.go new file mode 100644 index 00000000..12e8f6f0 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/blockenc.go @@ -0,0 +1,871 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +import ( + "errors" + "fmt" + "math" + "math/bits" + + "github.com/klauspost/compress/huff0" +) + +type blockEnc struct { + size int + literals []byte + sequences []seq + coders seqCoders + litEnc *huff0.Scratch + dictLitEnc *huff0.Scratch + wr bitWriter + + extraLits int + output []byte + recentOffsets [3]uint32 + prevRecentOffsets [3]uint32 + + last bool + lowMem bool +} + +// init should be used once the block has been created. +// If called more than once, the effect is the same as calling reset. +func (b *blockEnc) init() { + if b.lowMem { + // 1K literals + if cap(b.literals) < 1<<10 { + b.literals = make([]byte, 0, 1<<10) + } + const defSeqs = 20 + if cap(b.sequences) < defSeqs { + b.sequences = make([]seq, 0, defSeqs) + } + // 1K + if cap(b.output) < 1<<10 { + b.output = make([]byte, 0, 1<<10) + } + } else { + if cap(b.literals) < maxCompressedBlockSize { + b.literals = make([]byte, 0, maxCompressedBlockSize) + } + const defSeqs = 2000 + if cap(b.sequences) < defSeqs { + b.sequences = make([]seq, 0, defSeqs) + } + if cap(b.output) < maxCompressedBlockSize { + b.output = make([]byte, 0, maxCompressedBlockSize) + } + } + + if b.coders.mlEnc == nil { + b.coders.mlEnc = &fseEncoder{} + b.coders.mlPrev = &fseEncoder{} + b.coders.ofEnc = &fseEncoder{} + b.coders.ofPrev = &fseEncoder{} + b.coders.llEnc = &fseEncoder{} + b.coders.llPrev = &fseEncoder{} + } + b.litEnc = &huff0.Scratch{WantLogLess: 4} + b.reset(nil) +} + +// initNewEncode can be used to reset offsets and encoders to the initial state. +func (b *blockEnc) initNewEncode() { + b.recentOffsets = [3]uint32{1, 4, 8} + b.litEnc.Reuse = huff0.ReusePolicyNone + b.coders.setPrev(nil, nil, nil) +} + +// reset will reset the block for a new encode, but in the same stream, +// meaning that state will be carried over, but the block content is reset. +// If a previous block is provided, the recent offsets are carried over. +func (b *blockEnc) reset(prev *blockEnc) { + b.extraLits = 0 + b.literals = b.literals[:0] + b.size = 0 + b.sequences = b.sequences[:0] + b.output = b.output[:0] + b.last = false + if prev != nil { + b.recentOffsets = prev.prevRecentOffsets + } + b.dictLitEnc = nil +} + +// reset will reset the block for a new encode, but in the same stream, +// meaning that state will be carried over, but the block content is reset. +// If a previous block is provided, the recent offsets are carried over. +func (b *blockEnc) swapEncoders(prev *blockEnc) { + b.coders.swap(&prev.coders) + b.litEnc, prev.litEnc = prev.litEnc, b.litEnc +} + +// blockHeader contains the information for a block header. +type blockHeader uint32 + +// setLast sets the 'last' indicator on a block. +func (h *blockHeader) setLast(b bool) { + if b { + *h = *h | 1 + } else { + const mask = (1 << 24) - 2 + *h = *h & mask + } +} + +// setSize will store the compressed size of a block. +func (h *blockHeader) setSize(v uint32) { + const mask = 7 + *h = (*h)&mask | blockHeader(v<<3) +} + +// setType sets the block type. +func (h *blockHeader) setType(t blockType) { + const mask = 1 | (((1 << 24) - 1) ^ 7) + *h = (*h & mask) | blockHeader(t<<1) +} + +// appendTo will append the block header to a slice. +func (h blockHeader) appendTo(b []byte) []byte { + return append(b, uint8(h), uint8(h>>8), uint8(h>>16)) +} + +// String returns a string representation of the block. +func (h blockHeader) String() string { + return fmt.Sprintf("Type: %d, Size: %d, Last:%t", (h>>1)&3, h>>3, h&1 == 1) +} + +// literalsHeader contains literals header information. +type literalsHeader uint64 + +// setType can be used to set the type of literal block. +func (h *literalsHeader) setType(t literalsBlockType) { + const mask = math.MaxUint64 - 3 + *h = (*h & mask) | literalsHeader(t) +} + +// setSize can be used to set a single size, for uncompressed and RLE content. +func (h *literalsHeader) setSize(regenLen int) { + inBits := bits.Len32(uint32(regenLen)) + // Only retain 2 bits + const mask = 3 + lh := uint64(*h & mask) + switch { + case inBits < 5: + lh |= (uint64(regenLen) << 3) | (1 << 60) + if debugEncoder { + got := int(lh>>3) & 0xff + if got != regenLen { + panic(fmt.Sprint("litRegenSize = ", regenLen, "(want) != ", got, "(got)")) + } + } + case inBits < 12: + lh |= (1 << 2) | (uint64(regenLen) << 4) | (2 << 60) + case inBits < 20: + lh |= (3 << 2) | (uint64(regenLen) << 4) | (3 << 60) + default: + panic(fmt.Errorf("internal error: block too big (%d)", regenLen)) + } + *h = literalsHeader(lh) +} + +// setSizes will set the size of a compressed literals section and the input length. +func (h *literalsHeader) setSizes(compLen, inLen int, single bool) { + compBits, inBits := bits.Len32(uint32(compLen)), bits.Len32(uint32(inLen)) + // Only retain 2 bits + const mask = 3 + lh := uint64(*h & mask) + switch { + case compBits <= 10 && inBits <= 10: + if !single { + lh |= 1 << 2 + } + lh |= (uint64(inLen) << 4) | (uint64(compLen) << (10 + 4)) | (3 << 60) + if debugEncoder { + const mmask = (1 << 24) - 1 + n := (lh >> 4) & mmask + if int(n&1023) != inLen { + panic(fmt.Sprint("regensize:", int(n&1023), "!=", inLen, inBits)) + } + if int(n>>10) != compLen { + panic(fmt.Sprint("compsize:", int(n>>10), "!=", compLen, compBits)) + } + } + case compBits <= 14 && inBits <= 14: + lh |= (2 << 2) | (uint64(inLen) << 4) | (uint64(compLen) << (14 + 4)) | (4 << 60) + if single { + panic("single stream used with more than 10 bits length.") + } + case compBits <= 18 && inBits <= 18: + lh |= (3 << 2) | (uint64(inLen) << 4) | (uint64(compLen) << (18 + 4)) | (5 << 60) + if single { + panic("single stream used with more than 10 bits length.") + } + default: + panic("internal error: block too big") + } + *h = literalsHeader(lh) +} + +// appendTo will append the literals header to a byte slice. +func (h literalsHeader) appendTo(b []byte) []byte { + size := uint8(h >> 60) + switch size { + case 1: + b = append(b, uint8(h)) + case 2: + b = append(b, uint8(h), uint8(h>>8)) + case 3: + b = append(b, uint8(h), uint8(h>>8), uint8(h>>16)) + case 4: + b = append(b, uint8(h), uint8(h>>8), uint8(h>>16), uint8(h>>24)) + case 5: + b = append(b, uint8(h), uint8(h>>8), uint8(h>>16), uint8(h>>24), uint8(h>>32)) + default: + panic(fmt.Errorf("internal error: literalsHeader has invalid size (%d)", size)) + } + return b +} + +// size returns the output size with currently set values. +func (h literalsHeader) size() int { + return int(h >> 60) +} + +func (h literalsHeader) String() string { + return fmt.Sprintf("Type: %d, SizeFormat: %d, Size: 0x%d, Bytes:%d", literalsBlockType(h&3), (h>>2)&3, h&((1<<60)-1)>>4, h>>60) +} + +// pushOffsets will push the recent offsets to the backup store. +func (b *blockEnc) pushOffsets() { + b.prevRecentOffsets = b.recentOffsets +} + +// pushOffsets will push the recent offsets to the backup store. +func (b *blockEnc) popOffsets() { + b.recentOffsets = b.prevRecentOffsets +} + +// matchOffset will adjust recent offsets and return the adjusted one, +// if it matches a previous offset. +func (b *blockEnc) matchOffset(offset, lits uint32) uint32 { + // Check if offset is one of the recent offsets. + // Adjusts the output offset accordingly. + // Gives a tiny bit of compression, typically around 1%. + if true { + if lits > 0 { + switch offset { + case b.recentOffsets[0]: + offset = 1 + case b.recentOffsets[1]: + b.recentOffsets[1] = b.recentOffsets[0] + b.recentOffsets[0] = offset + offset = 2 + case b.recentOffsets[2]: + b.recentOffsets[2] = b.recentOffsets[1] + b.recentOffsets[1] = b.recentOffsets[0] + b.recentOffsets[0] = offset + offset = 3 + default: + b.recentOffsets[2] = b.recentOffsets[1] + b.recentOffsets[1] = b.recentOffsets[0] + b.recentOffsets[0] = offset + offset += 3 + } + } else { + switch offset { + case b.recentOffsets[1]: + b.recentOffsets[1] = b.recentOffsets[0] + b.recentOffsets[0] = offset + offset = 1 + case b.recentOffsets[2]: + b.recentOffsets[2] = b.recentOffsets[1] + b.recentOffsets[1] = b.recentOffsets[0] + b.recentOffsets[0] = offset + offset = 2 + case b.recentOffsets[0] - 1: + b.recentOffsets[2] = b.recentOffsets[1] + b.recentOffsets[1] = b.recentOffsets[0] + b.recentOffsets[0] = offset + offset = 3 + default: + b.recentOffsets[2] = b.recentOffsets[1] + b.recentOffsets[1] = b.recentOffsets[0] + b.recentOffsets[0] = offset + offset += 3 + } + } + } else { + offset += 3 + } + return offset +} + +// encodeRaw can be used to set the output to a raw representation of supplied bytes. +func (b *blockEnc) encodeRaw(a []byte) { + var bh blockHeader + bh.setLast(b.last) + bh.setSize(uint32(len(a))) + bh.setType(blockTypeRaw) + b.output = bh.appendTo(b.output[:0]) + b.output = append(b.output, a...) + if debugEncoder { + println("Adding RAW block, length", len(a), "last:", b.last) + } +} + +// encodeRaw can be used to set the output to a raw representation of supplied bytes. +func (b *blockEnc) encodeRawTo(dst, src []byte) []byte { + var bh blockHeader + bh.setLast(b.last) + bh.setSize(uint32(len(src))) + bh.setType(blockTypeRaw) + dst = bh.appendTo(dst) + dst = append(dst, src...) + if debugEncoder { + println("Adding RAW block, length", len(src), "last:", b.last) + } + return dst +} + +// encodeLits can be used if the block is only litLen. +func (b *blockEnc) encodeLits(lits []byte, raw bool) error { + var bh blockHeader + bh.setLast(b.last) + bh.setSize(uint32(len(lits))) + + // Don't compress extremely small blocks + if len(lits) < 8 || (len(lits) < 32 && b.dictLitEnc == nil) || raw { + if debugEncoder { + println("Adding RAW block, length", len(lits), "last:", b.last) + } + bh.setType(blockTypeRaw) + b.output = bh.appendTo(b.output) + b.output = append(b.output, lits...) + return nil + } + + var ( + out []byte + reUsed, single bool + err error + ) + if b.dictLitEnc != nil { + b.litEnc.TransferCTable(b.dictLitEnc) + b.litEnc.Reuse = huff0.ReusePolicyAllow + b.dictLitEnc = nil + } + if len(lits) >= 1024 { + // Use 4 Streams. + out, reUsed, err = huff0.Compress4X(lits, b.litEnc) + } else if len(lits) > 32 { + // Use 1 stream + single = true + out, reUsed, err = huff0.Compress1X(lits, b.litEnc) + } else { + err = huff0.ErrIncompressible + } + + switch err { + case huff0.ErrIncompressible: + if debugEncoder { + println("Adding RAW block, length", len(lits), "last:", b.last) + } + bh.setType(blockTypeRaw) + b.output = bh.appendTo(b.output) + b.output = append(b.output, lits...) + return nil + case huff0.ErrUseRLE: + if debugEncoder { + println("Adding RLE block, length", len(lits)) + } + bh.setType(blockTypeRLE) + b.output = bh.appendTo(b.output) + b.output = append(b.output, lits[0]) + return nil + case nil: + default: + return err + } + // Compressed... + // Now, allow reuse + b.litEnc.Reuse = huff0.ReusePolicyAllow + bh.setType(blockTypeCompressed) + var lh literalsHeader + if reUsed { + if debugEncoder { + println("Reused tree, compressed to", len(out)) + } + lh.setType(literalsBlockTreeless) + } else { + if debugEncoder { + println("New tree, compressed to", len(out), "tree size:", len(b.litEnc.OutTable)) + } + lh.setType(literalsBlockCompressed) + } + // Set sizes + lh.setSizes(len(out), len(lits), single) + bh.setSize(uint32(len(out) + lh.size() + 1)) + + // Write block headers. + b.output = bh.appendTo(b.output) + b.output = lh.appendTo(b.output) + // Add compressed data. + b.output = append(b.output, out...) + // No sequences. + b.output = append(b.output, 0) + return nil +} + +// fuzzFseEncoder can be used to fuzz the FSE encoder. +func fuzzFseEncoder(data []byte) int { + if len(data) > maxSequences || len(data) < 2 { + return 0 + } + enc := fseEncoder{} + hist := enc.Histogram() + maxSym := uint8(0) + for i, v := range data { + v = v & 63 + data[i] = v + hist[v]++ + if v > maxSym { + maxSym = v + } + } + if maxSym == 0 { + // All 0 + return 0 + } + maxCount := func(a []uint32) int { + var max uint32 + for _, v := range a { + if v > max { + max = v + } + } + return int(max) + } + cnt := maxCount(hist[:maxSym]) + if cnt == len(data) { + // RLE + return 0 + } + enc.HistogramFinished(maxSym, cnt) + err := enc.normalizeCount(len(data)) + if err != nil { + return 0 + } + _, err = enc.writeCount(nil) + if err != nil { + panic(err) + } + return 1 +} + +// encode will encode the block and append the output in b.output. +// Previous offset codes must be pushed if more blocks are expected. +func (b *blockEnc) encode(org []byte, raw, rawAllLits bool) error { + if len(b.sequences) == 0 { + return b.encodeLits(b.literals, rawAllLits) + } + // We want some difference to at least account for the headers. + saved := b.size - len(b.literals) - (b.size >> 5) + if saved < 16 { + if org == nil { + return errIncompressible + } + b.popOffsets() + return b.encodeLits(org, rawAllLits) + } + + var bh blockHeader + var lh literalsHeader + bh.setLast(b.last) + bh.setType(blockTypeCompressed) + // Store offset of the block header. Needed when we know the size. + bhOffset := len(b.output) + b.output = bh.appendTo(b.output) + + var ( + out []byte + reUsed, single bool + err error + ) + if b.dictLitEnc != nil { + b.litEnc.TransferCTable(b.dictLitEnc) + b.litEnc.Reuse = huff0.ReusePolicyAllow + b.dictLitEnc = nil + } + if len(b.literals) >= 1024 && !raw { + // Use 4 Streams. + out, reUsed, err = huff0.Compress4X(b.literals, b.litEnc) + } else if len(b.literals) > 32 && !raw { + // Use 1 stream + single = true + out, reUsed, err = huff0.Compress1X(b.literals, b.litEnc) + } else { + err = huff0.ErrIncompressible + } + + switch err { + case huff0.ErrIncompressible: + lh.setType(literalsBlockRaw) + lh.setSize(len(b.literals)) + b.output = lh.appendTo(b.output) + b.output = append(b.output, b.literals...) + if debugEncoder { + println("Adding literals RAW, length", len(b.literals)) + } + case huff0.ErrUseRLE: + lh.setType(literalsBlockRLE) + lh.setSize(len(b.literals)) + b.output = lh.appendTo(b.output) + b.output = append(b.output, b.literals[0]) + if debugEncoder { + println("Adding literals RLE") + } + case nil: + // Compressed litLen... + if reUsed { + if debugEncoder { + println("reused tree") + } + lh.setType(literalsBlockTreeless) + } else { + if debugEncoder { + println("new tree, size:", len(b.litEnc.OutTable)) + } + lh.setType(literalsBlockCompressed) + if debugEncoder { + _, _, err := huff0.ReadTable(out, nil) + if err != nil { + panic(err) + } + } + } + lh.setSizes(len(out), len(b.literals), single) + if debugEncoder { + printf("Compressed %d literals to %d bytes", len(b.literals), len(out)) + println("Adding literal header:", lh) + } + b.output = lh.appendTo(b.output) + b.output = append(b.output, out...) + b.litEnc.Reuse = huff0.ReusePolicyAllow + if debugEncoder { + println("Adding literals compressed") + } + default: + if debugEncoder { + println("Adding literals ERROR:", err) + } + return err + } + // Sequence compression + + // Write the number of sequences + switch { + case len(b.sequences) < 128: + b.output = append(b.output, uint8(len(b.sequences))) + case len(b.sequences) < 0x7f00: // TODO: this could be wrong + n := len(b.sequences) + b.output = append(b.output, 128+uint8(n>>8), uint8(n)) + default: + n := len(b.sequences) - 0x7f00 + b.output = append(b.output, 255, uint8(n), uint8(n>>8)) + } + if debugEncoder { + println("Encoding", len(b.sequences), "sequences") + } + b.genCodes() + llEnc := b.coders.llEnc + ofEnc := b.coders.ofEnc + mlEnc := b.coders.mlEnc + err = llEnc.normalizeCount(len(b.sequences)) + if err != nil { + return err + } + err = ofEnc.normalizeCount(len(b.sequences)) + if err != nil { + return err + } + err = mlEnc.normalizeCount(len(b.sequences)) + if err != nil { + return err + } + + // Choose the best compression mode for each type. + // Will evaluate the new vs predefined and previous. + chooseComp := func(cur, prev, preDef *fseEncoder) (*fseEncoder, seqCompMode) { + // See if predefined/previous is better + hist := cur.count[:cur.symbolLen] + nSize := cur.approxSize(hist) + cur.maxHeaderSize() + predefSize := preDef.approxSize(hist) + prevSize := prev.approxSize(hist) + + // Add a small penalty for new encoders. + // Don't bother with extremely small (<2 byte gains). + nSize = nSize + (nSize+2*8*16)>>4 + switch { + case predefSize <= prevSize && predefSize <= nSize || forcePreDef: + if debugEncoder { + println("Using predefined", predefSize>>3, "<=", nSize>>3) + } + return preDef, compModePredefined + case prevSize <= nSize: + if debugEncoder { + println("Using previous", prevSize>>3, "<=", nSize>>3) + } + return prev, compModeRepeat + default: + if debugEncoder { + println("Using new, predef", predefSize>>3, ". previous:", prevSize>>3, ">", nSize>>3, "header max:", cur.maxHeaderSize()>>3, "bytes") + println("tl:", cur.actualTableLog, "symbolLen:", cur.symbolLen, "norm:", cur.norm[:cur.symbolLen], "hist", cur.count[:cur.symbolLen]) + } + return cur, compModeFSE + } + } + + // Write compression mode + var mode uint8 + if llEnc.useRLE { + mode |= uint8(compModeRLE) << 6 + llEnc.setRLE(b.sequences[0].llCode) + if debugEncoder { + println("llEnc.useRLE") + } + } else { + var m seqCompMode + llEnc, m = chooseComp(llEnc, b.coders.llPrev, &fsePredefEnc[tableLiteralLengths]) + mode |= uint8(m) << 6 + } + if ofEnc.useRLE { + mode |= uint8(compModeRLE) << 4 + ofEnc.setRLE(b.sequences[0].ofCode) + if debugEncoder { + println("ofEnc.useRLE") + } + } else { + var m seqCompMode + ofEnc, m = chooseComp(ofEnc, b.coders.ofPrev, &fsePredefEnc[tableOffsets]) + mode |= uint8(m) << 4 + } + + if mlEnc.useRLE { + mode |= uint8(compModeRLE) << 2 + mlEnc.setRLE(b.sequences[0].mlCode) + if debugEncoder { + println("mlEnc.useRLE, code: ", b.sequences[0].mlCode, "value", b.sequences[0].matchLen) + } + } else { + var m seqCompMode + mlEnc, m = chooseComp(mlEnc, b.coders.mlPrev, &fsePredefEnc[tableMatchLengths]) + mode |= uint8(m) << 2 + } + b.output = append(b.output, mode) + if debugEncoder { + printf("Compression modes: 0b%b", mode) + } + b.output, err = llEnc.writeCount(b.output) + if err != nil { + return err + } + start := len(b.output) + b.output, err = ofEnc.writeCount(b.output) + if err != nil { + return err + } + if false { + println("block:", b.output[start:], "tablelog", ofEnc.actualTableLog, "maxcount:", ofEnc.maxCount) + fmt.Printf("selected TableLog: %d, Symbol length: %d\n", ofEnc.actualTableLog, ofEnc.symbolLen) + for i, v := range ofEnc.norm[:ofEnc.symbolLen] { + fmt.Printf("%3d: %5d -> %4d \n", i, ofEnc.count[i], v) + } + } + b.output, err = mlEnc.writeCount(b.output) + if err != nil { + return err + } + + // Maybe in block? + wr := &b.wr + wr.reset(b.output) + + var ll, of, ml cState + + // Current sequence + seq := len(b.sequences) - 1 + s := b.sequences[seq] + llEnc.setBits(llBitsTable[:]) + mlEnc.setBits(mlBitsTable[:]) + ofEnc.setBits(nil) + + llTT, ofTT, mlTT := llEnc.ct.symbolTT[:256], ofEnc.ct.symbolTT[:256], mlEnc.ct.symbolTT[:256] + + // We have 3 bounds checks here (and in the loop). + // Since we are iterating backwards it is kinda hard to avoid. + llB, ofB, mlB := llTT[s.llCode], ofTT[s.ofCode], mlTT[s.mlCode] + ll.init(wr, &llEnc.ct, llB) + of.init(wr, &ofEnc.ct, ofB) + wr.flush32() + ml.init(wr, &mlEnc.ct, mlB) + + // Each of these lookups also generates a bounds check. + wr.addBits32NC(s.litLen, llB.outBits) + wr.addBits32NC(s.matchLen, mlB.outBits) + wr.flush32() + wr.addBits32NC(s.offset, ofB.outBits) + if debugSequences { + println("Encoded seq", seq, s, "codes:", s.llCode, s.mlCode, s.ofCode, "states:", ll.state, ml.state, of.state, "bits:", llB, mlB, ofB) + } + seq-- + // Store sequences in reverse... + for seq >= 0 { + s = b.sequences[seq] + + ofB := ofTT[s.ofCode] + wr.flush32() // tablelog max is below 8 for each, so it will fill max 24 bits. + //of.encode(ofB) + nbBitsOut := (uint32(of.state) + ofB.deltaNbBits) >> 16 + dstState := int32(of.state>>(nbBitsOut&15)) + int32(ofB.deltaFindState) + wr.addBits16NC(of.state, uint8(nbBitsOut)) + of.state = of.stateTable[dstState] + + // Accumulate extra bits. + outBits := ofB.outBits & 31 + extraBits := uint64(s.offset & bitMask32[outBits]) + extraBitsN := outBits + + mlB := mlTT[s.mlCode] + //ml.encode(mlB) + nbBitsOut = (uint32(ml.state) + mlB.deltaNbBits) >> 16 + dstState = int32(ml.state>>(nbBitsOut&15)) + int32(mlB.deltaFindState) + wr.addBits16NC(ml.state, uint8(nbBitsOut)) + ml.state = ml.stateTable[dstState] + + outBits = mlB.outBits & 31 + extraBits = extraBits<> 16 + dstState = int32(ll.state>>(nbBitsOut&15)) + int32(llB.deltaFindState) + wr.addBits16NC(ll.state, uint8(nbBitsOut)) + ll.state = ll.stateTable[dstState] + + outBits = llB.outBits & 31 + extraBits = extraBits<= b.size { + // Maybe even add a bigger margin. + b.litEnc.Reuse = huff0.ReusePolicyNone + return errIncompressible + } + + // Size is output minus block header. + bh.setSize(uint32(len(b.output)-bhOffset) - 3) + if debugEncoder { + println("Rewriting block header", bh) + } + _ = bh.appendTo(b.output[bhOffset:bhOffset]) + b.coders.setPrev(llEnc, mlEnc, ofEnc) + return nil +} + +var errIncompressible = errors.New("incompressible") + +func (b *blockEnc) genCodes() { + if len(b.sequences) == 0 { + // nothing to do + return + } + if len(b.sequences) > math.MaxUint16 { + panic("can only encode up to 64K sequences") + } + // No bounds checks after here: + llH := b.coders.llEnc.Histogram() + ofH := b.coders.ofEnc.Histogram() + mlH := b.coders.mlEnc.Histogram() + for i := range llH { + llH[i] = 0 + } + for i := range ofH { + ofH[i] = 0 + } + for i := range mlH { + mlH[i] = 0 + } + + var llMax, ofMax, mlMax uint8 + for i := range b.sequences { + seq := &b.sequences[i] + v := llCode(seq.litLen) + seq.llCode = v + llH[v]++ + if v > llMax { + llMax = v + } + + v = ofCode(seq.offset) + seq.ofCode = v + ofH[v]++ + if v > ofMax { + ofMax = v + } + + v = mlCode(seq.matchLen) + seq.mlCode = v + mlH[v]++ + if v > mlMax { + mlMax = v + if debugAsserts && mlMax > maxMatchLengthSymbol { + panic(fmt.Errorf("mlMax > maxMatchLengthSymbol (%d), matchlen: %d", mlMax, seq.matchLen)) + } + } + } + maxCount := func(a []uint32) int { + var max uint32 + for _, v := range a { + if v > max { + max = v + } + } + return int(max) + } + if debugAsserts && mlMax > maxMatchLengthSymbol { + panic(fmt.Errorf("mlMax > maxMatchLengthSymbol (%d)", mlMax)) + } + if debugAsserts && ofMax > maxOffsetBits { + panic(fmt.Errorf("ofMax > maxOffsetBits (%d)", ofMax)) + } + if debugAsserts && llMax > maxLiteralLengthSymbol { + panic(fmt.Errorf("llMax > maxLiteralLengthSymbol (%d)", llMax)) + } + + b.coders.mlEnc.HistogramFinished(mlMax, maxCount(mlH[:mlMax+1])) + b.coders.ofEnc.HistogramFinished(ofMax, maxCount(ofH[:ofMax+1])) + b.coders.llEnc.HistogramFinished(llMax, maxCount(llH[:llMax+1])) +} diff --git a/vendor/github.com/klauspost/compress/zstd/blocktype_string.go b/vendor/github.com/klauspost/compress/zstd/blocktype_string.go new file mode 100644 index 00000000..01a01e48 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/blocktype_string.go @@ -0,0 +1,85 @@ +// Code generated by "stringer -type=blockType,literalsBlockType,seqCompMode,tableIndex"; DO NOT EDIT. + +package zstd + +import "strconv" + +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[blockTypeRaw-0] + _ = x[blockTypeRLE-1] + _ = x[blockTypeCompressed-2] + _ = x[blockTypeReserved-3] +} + +const _blockType_name = "blockTypeRawblockTypeRLEblockTypeCompressedblockTypeReserved" + +var _blockType_index = [...]uint8{0, 12, 24, 43, 60} + +func (i blockType) String() string { + if i >= blockType(len(_blockType_index)-1) { + return "blockType(" + strconv.FormatInt(int64(i), 10) + ")" + } + return _blockType_name[_blockType_index[i]:_blockType_index[i+1]] +} +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[literalsBlockRaw-0] + _ = x[literalsBlockRLE-1] + _ = x[literalsBlockCompressed-2] + _ = x[literalsBlockTreeless-3] +} + +const _literalsBlockType_name = "literalsBlockRawliteralsBlockRLEliteralsBlockCompressedliteralsBlockTreeless" + +var _literalsBlockType_index = [...]uint8{0, 16, 32, 55, 76} + +func (i literalsBlockType) String() string { + if i >= literalsBlockType(len(_literalsBlockType_index)-1) { + return "literalsBlockType(" + strconv.FormatInt(int64(i), 10) + ")" + } + return _literalsBlockType_name[_literalsBlockType_index[i]:_literalsBlockType_index[i+1]] +} +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[compModePredefined-0] + _ = x[compModeRLE-1] + _ = x[compModeFSE-2] + _ = x[compModeRepeat-3] +} + +const _seqCompMode_name = "compModePredefinedcompModeRLEcompModeFSEcompModeRepeat" + +var _seqCompMode_index = [...]uint8{0, 18, 29, 40, 54} + +func (i seqCompMode) String() string { + if i >= seqCompMode(len(_seqCompMode_index)-1) { + return "seqCompMode(" + strconv.FormatInt(int64(i), 10) + ")" + } + return _seqCompMode_name[_seqCompMode_index[i]:_seqCompMode_index[i+1]] +} +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[tableLiteralLengths-0] + _ = x[tableOffsets-1] + _ = x[tableMatchLengths-2] +} + +const _tableIndex_name = "tableLiteralLengthstableOffsetstableMatchLengths" + +var _tableIndex_index = [...]uint8{0, 19, 31, 48} + +func (i tableIndex) String() string { + if i >= tableIndex(len(_tableIndex_index)-1) { + return "tableIndex(" + strconv.FormatInt(int64(i), 10) + ")" + } + return _tableIndex_name[_tableIndex_index[i]:_tableIndex_index[i+1]] +} diff --git a/vendor/github.com/klauspost/compress/zstd/bytebuf.go b/vendor/github.com/klauspost/compress/zstd/bytebuf.go new file mode 100644 index 00000000..176788f2 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/bytebuf.go @@ -0,0 +1,131 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +import ( + "fmt" + "io" +) + +type byteBuffer interface { + // Read up to 8 bytes. + // Returns io.ErrUnexpectedEOF if this cannot be satisfied. + readSmall(n int) ([]byte, error) + + // Read >8 bytes. + // MAY use the destination slice. + readBig(n int, dst []byte) ([]byte, error) + + // Read a single byte. + readByte() (byte, error) + + // Skip n bytes. + skipN(n int64) error +} + +// in-memory buffer +type byteBuf []byte + +func (b *byteBuf) readSmall(n int) ([]byte, error) { + if debugAsserts && n > 8 { + panic(fmt.Errorf("small read > 8 (%d). use readBig", n)) + } + bb := *b + if len(bb) < n { + return nil, io.ErrUnexpectedEOF + } + r := bb[:n] + *b = bb[n:] + return r, nil +} + +func (b *byteBuf) readBig(n int, dst []byte) ([]byte, error) { + bb := *b + if len(bb) < n { + return nil, io.ErrUnexpectedEOF + } + r := bb[:n] + *b = bb[n:] + return r, nil +} + +func (b *byteBuf) readByte() (byte, error) { + bb := *b + if len(bb) < 1 { + return 0, nil + } + r := bb[0] + *b = bb[1:] + return r, nil +} + +func (b *byteBuf) skipN(n int64) error { + bb := *b + if n < 0 { + return fmt.Errorf("negative skip (%d) requested", n) + } + if int64(len(bb)) < n { + return io.ErrUnexpectedEOF + } + *b = bb[n:] + return nil +} + +// wrapper around a reader. +type readerWrapper struct { + r io.Reader + tmp [8]byte +} + +func (r *readerWrapper) readSmall(n int) ([]byte, error) { + if debugAsserts && n > 8 { + panic(fmt.Errorf("small read > 8 (%d). use readBig", n)) + } + n2, err := io.ReadFull(r.r, r.tmp[:n]) + // We only really care about the actual bytes read. + if err != nil { + if err == io.EOF { + return nil, io.ErrUnexpectedEOF + } + if debugDecoder { + println("readSmall: got", n2, "want", n, "err", err) + } + return nil, err + } + return r.tmp[:n], nil +} + +func (r *readerWrapper) readBig(n int, dst []byte) ([]byte, error) { + if cap(dst) < n { + dst = make([]byte, n) + } + n2, err := io.ReadFull(r.r, dst[:n]) + if err == io.EOF && n > 0 { + err = io.ErrUnexpectedEOF + } + return dst[:n2], err +} + +func (r *readerWrapper) readByte() (byte, error) { + n2, err := r.r.Read(r.tmp[:1]) + if err != nil { + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + return 0, err + } + if n2 != 1 { + return 0, io.ErrUnexpectedEOF + } + return r.tmp[0], nil +} + +func (r *readerWrapper) skipN(n int64) error { + n2, err := io.CopyN(io.Discard, r.r, n) + if n2 != n { + err = io.ErrUnexpectedEOF + } + return err +} diff --git a/vendor/github.com/klauspost/compress/zstd/bytereader.go b/vendor/github.com/klauspost/compress/zstd/bytereader.go new file mode 100644 index 00000000..0e59a242 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/bytereader.go @@ -0,0 +1,82 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +// byteReader provides a byte reader that reads +// little endian values from a byte stream. +// The input stream is manually advanced. +// The reader performs no bounds checks. +type byteReader struct { + b []byte + off int +} + +// advance the stream b n bytes. +func (b *byteReader) advance(n uint) { + b.off += int(n) +} + +// overread returns whether we have advanced too far. +func (b *byteReader) overread() bool { + return b.off > len(b.b) +} + +// Int32 returns a little endian int32 starting at current offset. +func (b byteReader) Int32() int32 { + b2 := b.b[b.off:] + b2 = b2[:4] + v3 := int32(b2[3]) + v2 := int32(b2[2]) + v1 := int32(b2[1]) + v0 := int32(b2[0]) + return v0 | (v1 << 8) | (v2 << 16) | (v3 << 24) +} + +// Uint8 returns the next byte +func (b *byteReader) Uint8() uint8 { + v := b.b[b.off] + return v +} + +// Uint32 returns a little endian uint32 starting at current offset. +func (b byteReader) Uint32() uint32 { + if r := b.remain(); r < 4 { + // Very rare + v := uint32(0) + for i := 1; i <= r; i++ { + v = (v << 8) | uint32(b.b[len(b.b)-i]) + } + return v + } + b2 := b.b[b.off:] + b2 = b2[:4] + v3 := uint32(b2[3]) + v2 := uint32(b2[2]) + v1 := uint32(b2[1]) + v0 := uint32(b2[0]) + return v0 | (v1 << 8) | (v2 << 16) | (v3 << 24) +} + +// Uint32NC returns a little endian uint32 starting at current offset. +// The caller must be sure if there are at least 4 bytes left. +func (b byteReader) Uint32NC() uint32 { + b2 := b.b[b.off:] + b2 = b2[:4] + v3 := uint32(b2[3]) + v2 := uint32(b2[2]) + v1 := uint32(b2[1]) + v0 := uint32(b2[0]) + return v0 | (v1 << 8) | (v2 << 16) | (v3 << 24) +} + +// unread returns the unread portion of the input. +func (b byteReader) unread() []byte { + return b.b[b.off:] +} + +// remain will return the number of bytes remaining. +func (b byteReader) remain() int { + return len(b.b) - b.off +} diff --git a/vendor/github.com/klauspost/compress/zstd/decodeheader.go b/vendor/github.com/klauspost/compress/zstd/decodeheader.go new file mode 100644 index 00000000..5022e71c --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/decodeheader.go @@ -0,0 +1,230 @@ +// Copyright 2020+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. + +package zstd + +import ( + "bytes" + "encoding/binary" + "errors" + "io" +) + +// HeaderMaxSize is the maximum size of a Frame and Block Header. +// If less is sent to Header.Decode it *may* still contain enough information. +const HeaderMaxSize = 14 + 3 + +// Header contains information about the first frame and block within that. +type Header struct { + // SingleSegment specifies whether the data is to be decompressed into a + // single contiguous memory segment. + // It implies that WindowSize is invalid and that FrameContentSize is valid. + SingleSegment bool + + // WindowSize is the window of data to keep while decoding. + // Will only be set if SingleSegment is false. + WindowSize uint64 + + // Dictionary ID. + // If 0, no dictionary. + DictionaryID uint32 + + // HasFCS specifies whether FrameContentSize has a valid value. + HasFCS bool + + // FrameContentSize is the expected uncompressed size of the entire frame. + FrameContentSize uint64 + + // Skippable will be true if the frame is meant to be skipped. + // This implies that FirstBlock.OK is false. + Skippable bool + + // SkippableID is the user-specific ID for the skippable frame. + // Valid values are between 0 to 15, inclusive. + SkippableID int + + // SkippableSize is the length of the user data to skip following + // the header. + SkippableSize uint32 + + // HeaderSize is the raw size of the frame header. + // + // For normal frames, it includes the size of the magic number and + // the size of the header (per section 3.1.1.1). + // It does not include the size for any data blocks (section 3.1.1.2) nor + // the size for the trailing content checksum. + // + // For skippable frames, this counts the size of the magic number + // along with the size of the size field of the payload. + // It does not include the size of the skippable payload itself. + // The total frame size is the HeaderSize plus the SkippableSize. + HeaderSize int + + // First block information. + FirstBlock struct { + // OK will be set if first block could be decoded. + OK bool + + // Is this the last block of a frame? + Last bool + + // Is the data compressed? + // If true CompressedSize will be populated. + // Unfortunately DecompressedSize cannot be determined + // without decoding the blocks. + Compressed bool + + // DecompressedSize is the expected decompressed size of the block. + // Will be 0 if it cannot be determined. + DecompressedSize int + + // CompressedSize of the data in the block. + // Does not include the block header. + // Will be equal to DecompressedSize if not Compressed. + CompressedSize int + } + + // If set there is a checksum present for the block content. + // The checksum field at the end is always 4 bytes long. + HasCheckSum bool +} + +// Decode the header from the beginning of the stream. +// This will decode the frame header and the first block header if enough bytes are provided. +// It is recommended to provide at least HeaderMaxSize bytes. +// If the frame header cannot be read an error will be returned. +// If there isn't enough input, io.ErrUnexpectedEOF is returned. +// The FirstBlock.OK will indicate if enough information was available to decode the first block header. +func (h *Header) Decode(in []byte) error { + *h = Header{} + if len(in) < 4 { + return io.ErrUnexpectedEOF + } + h.HeaderSize += 4 + b, in := in[:4], in[4:] + if !bytes.Equal(b, frameMagic) { + if !bytes.Equal(b[1:4], skippableFrameMagic) || b[0]&0xf0 != 0x50 { + return ErrMagicMismatch + } + if len(in) < 4 { + return io.ErrUnexpectedEOF + } + h.HeaderSize += 4 + h.Skippable = true + h.SkippableID = int(b[0] & 0xf) + h.SkippableSize = binary.LittleEndian.Uint32(in) + return nil + } + + // Read Window_Descriptor + // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#window_descriptor + if len(in) < 1 { + return io.ErrUnexpectedEOF + } + fhd, in := in[0], in[1:] + h.HeaderSize++ + h.SingleSegment = fhd&(1<<5) != 0 + h.HasCheckSum = fhd&(1<<2) != 0 + if fhd&(1<<3) != 0 { + return errors.New("reserved bit set on frame header") + } + + if !h.SingleSegment { + if len(in) < 1 { + return io.ErrUnexpectedEOF + } + var wd byte + wd, in = in[0], in[1:] + h.HeaderSize++ + windowLog := 10 + (wd >> 3) + windowBase := uint64(1) << windowLog + windowAdd := (windowBase / 8) * uint64(wd&0x7) + h.WindowSize = windowBase + windowAdd + } + + // Read Dictionary_ID + // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#dictionary_id + if size := fhd & 3; size != 0 { + if size == 3 { + size = 4 + } + if len(in) < int(size) { + return io.ErrUnexpectedEOF + } + b, in = in[:size], in[size:] + h.HeaderSize += int(size) + switch size { + case 1: + h.DictionaryID = uint32(b[0]) + case 2: + h.DictionaryID = uint32(b[0]) | (uint32(b[1]) << 8) + case 4: + h.DictionaryID = uint32(b[0]) | (uint32(b[1]) << 8) | (uint32(b[2]) << 16) | (uint32(b[3]) << 24) + } + } + + // Read Frame_Content_Size + // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#frame_content_size + var fcsSize int + v := fhd >> 6 + switch v { + case 0: + if h.SingleSegment { + fcsSize = 1 + } + default: + fcsSize = 1 << v + } + + if fcsSize > 0 { + h.HasFCS = true + if len(in) < fcsSize { + return io.ErrUnexpectedEOF + } + b, in = in[:fcsSize], in[fcsSize:] + h.HeaderSize += int(fcsSize) + switch fcsSize { + case 1: + h.FrameContentSize = uint64(b[0]) + case 2: + // When FCS_Field_Size is 2, the offset of 256 is added. + h.FrameContentSize = uint64(b[0]) | (uint64(b[1]) << 8) + 256 + case 4: + h.FrameContentSize = uint64(b[0]) | (uint64(b[1]) << 8) | (uint64(b[2]) << 16) | (uint64(b[3]) << 24) + case 8: + d1 := uint32(b[0]) | (uint32(b[1]) << 8) | (uint32(b[2]) << 16) | (uint32(b[3]) << 24) + d2 := uint32(b[4]) | (uint32(b[5]) << 8) | (uint32(b[6]) << 16) | (uint32(b[7]) << 24) + h.FrameContentSize = uint64(d1) | (uint64(d2) << 32) + } + } + + // Frame Header done, we will not fail from now on. + if len(in) < 3 { + return nil + } + tmp := in[:3] + bh := uint32(tmp[0]) | (uint32(tmp[1]) << 8) | (uint32(tmp[2]) << 16) + h.FirstBlock.Last = bh&1 != 0 + blockType := blockType((bh >> 1) & 3) + // find size. + cSize := int(bh >> 3) + switch blockType { + case blockTypeReserved: + return nil + case blockTypeRLE: + h.FirstBlock.Compressed = true + h.FirstBlock.DecompressedSize = cSize + h.FirstBlock.CompressedSize = 1 + case blockTypeCompressed: + h.FirstBlock.Compressed = true + h.FirstBlock.CompressedSize = cSize + case blockTypeRaw: + h.FirstBlock.DecompressedSize = cSize + h.FirstBlock.CompressedSize = cSize + default: + panic("Invalid block type") + } + + h.FirstBlock.OK = true + return nil +} diff --git a/vendor/github.com/klauspost/compress/zstd/decoder.go b/vendor/github.com/klauspost/compress/zstd/decoder.go new file mode 100644 index 00000000..78c10755 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/decoder.go @@ -0,0 +1,950 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +import ( + "bytes" + "context" + "encoding/binary" + "io" + "sync" + + "github.com/klauspost/compress/zstd/internal/xxhash" +) + +// Decoder provides decoding of zstandard streams. +// The decoder has been designed to operate without allocations after a warmup. +// This means that you should store the decoder for best performance. +// To re-use a stream decoder, use the Reset(r io.Reader) error to switch to another stream. +// A decoder can safely be re-used even if the previous stream failed. +// To release the resources, you must call the Close() function on a decoder. +type Decoder struct { + o decoderOptions + + // Unreferenced decoders, ready for use. + decoders chan *blockDec + + // Current read position used for Reader functionality. + current decoderState + + // sync stream decoding + syncStream struct { + decodedFrame uint64 + br readerWrapper + enabled bool + inFrame bool + dstBuf []byte + } + + frame *frameDec + + // Custom dictionaries. + // Always uses copies. + dicts map[uint32]dict + + // streamWg is the waitgroup for all streams + streamWg sync.WaitGroup +} + +// decoderState is used for maintaining state when the decoder +// is used for streaming. +type decoderState struct { + // current block being written to stream. + decodeOutput + + // output in order to be written to stream. + output chan decodeOutput + + // cancel remaining output. + cancel context.CancelFunc + + // crc of current frame + crc *xxhash.Digest + + flushed bool +} + +var ( + // Check the interfaces we want to support. + _ = io.WriterTo(&Decoder{}) + _ = io.Reader(&Decoder{}) +) + +// NewReader creates a new decoder. +// A nil Reader can be provided in which case Reset can be used to start a decode. +// +// A Decoder can be used in two modes: +// +// 1) As a stream, or +// 2) For stateless decoding using DecodeAll. +// +// Only a single stream can be decoded concurrently, but the same decoder +// can run multiple concurrent stateless decodes. It is even possible to +// use stateless decodes while a stream is being decoded. +// +// The Reset function can be used to initiate a new stream, which is will considerably +// reduce the allocations normally caused by NewReader. +func NewReader(r io.Reader, opts ...DOption) (*Decoder, error) { + initPredefined() + var d Decoder + d.o.setDefault() + for _, o := range opts { + err := o(&d.o) + if err != nil { + return nil, err + } + } + d.current.crc = xxhash.New() + d.current.flushed = true + + if r == nil { + d.current.err = ErrDecoderNilInput + } + + // Transfer option dicts. + d.dicts = make(map[uint32]dict, len(d.o.dicts)) + for _, dc := range d.o.dicts { + d.dicts[dc.id] = dc + } + d.o.dicts = nil + + // Create decoders + d.decoders = make(chan *blockDec, d.o.concurrent) + for i := 0; i < d.o.concurrent; i++ { + dec := newBlockDec(d.o.lowMem) + dec.localFrame = newFrameDec(d.o) + d.decoders <- dec + } + + if r == nil { + return &d, nil + } + return &d, d.Reset(r) +} + +// Read bytes from the decompressed stream into p. +// Returns the number of bytes written and any error that occurred. +// When the stream is done, io.EOF will be returned. +func (d *Decoder) Read(p []byte) (int, error) { + var n int + for { + if len(d.current.b) > 0 { + filled := copy(p, d.current.b) + p = p[filled:] + d.current.b = d.current.b[filled:] + n += filled + } + if len(p) == 0 { + break + } + if len(d.current.b) == 0 { + // We have an error and no more data + if d.current.err != nil { + break + } + if !d.nextBlock(n == 0) { + return n, d.current.err + } + } + } + if len(d.current.b) > 0 { + if debugDecoder { + println("returning", n, "still bytes left:", len(d.current.b)) + } + // Only return error at end of block + return n, nil + } + if d.current.err != nil { + d.drainOutput() + } + if debugDecoder { + println("returning", n, d.current.err, len(d.decoders)) + } + return n, d.current.err +} + +// Reset will reset the decoder the supplied stream after the current has finished processing. +// Note that this functionality cannot be used after Close has been called. +// Reset can be called with a nil reader to release references to the previous reader. +// After being called with a nil reader, no other operations than Reset or DecodeAll or Close +// should be used. +func (d *Decoder) Reset(r io.Reader) error { + if d.current.err == ErrDecoderClosed { + return d.current.err + } + + d.drainOutput() + + d.syncStream.br.r = nil + if r == nil { + d.current.err = ErrDecoderNilInput + if len(d.current.b) > 0 { + d.current.b = d.current.b[:0] + } + d.current.flushed = true + return nil + } + + // If bytes buffer and < 5MB, do sync decoding anyway. + if bb, ok := r.(byter); ok && bb.Len() < d.o.decodeBufsBelow && !d.o.limitToCap { + bb2 := bb + if debugDecoder { + println("*bytes.Buffer detected, doing sync decode, len:", bb.Len()) + } + b := bb2.Bytes() + var dst []byte + if cap(d.syncStream.dstBuf) > 0 { + dst = d.syncStream.dstBuf[:0] + } + + dst, err := d.DecodeAll(b, dst) + if err == nil { + err = io.EOF + } + // Save output buffer + d.syncStream.dstBuf = dst + d.current.b = dst + d.current.err = err + d.current.flushed = true + if debugDecoder { + println("sync decode to", len(dst), "bytes, err:", err) + } + return nil + } + // Remove current block. + d.stashDecoder() + d.current.decodeOutput = decodeOutput{} + d.current.err = nil + d.current.flushed = false + d.current.d = nil + d.syncStream.dstBuf = nil + + // Ensure no-one else is still running... + d.streamWg.Wait() + if d.frame == nil { + d.frame = newFrameDec(d.o) + } + + if d.o.concurrent == 1 { + return d.startSyncDecoder(r) + } + + d.current.output = make(chan decodeOutput, d.o.concurrent) + ctx, cancel := context.WithCancel(context.Background()) + d.current.cancel = cancel + d.streamWg.Add(1) + go d.startStreamDecoder(ctx, r, d.current.output) + + return nil +} + +// drainOutput will drain the output until errEndOfStream is sent. +func (d *Decoder) drainOutput() { + if d.current.cancel != nil { + if debugDecoder { + println("cancelling current") + } + d.current.cancel() + d.current.cancel = nil + } + if d.current.d != nil { + if debugDecoder { + printf("re-adding current decoder %p, decoders: %d", d.current.d, len(d.decoders)) + } + d.decoders <- d.current.d + d.current.d = nil + d.current.b = nil + } + if d.current.output == nil || d.current.flushed { + println("current already flushed") + return + } + for v := range d.current.output { + if v.d != nil { + if debugDecoder { + printf("re-adding decoder %p", v.d) + } + d.decoders <- v.d + } + } + d.current.output = nil + d.current.flushed = true +} + +// WriteTo writes data to w until there's no more data to write or when an error occurs. +// The return value n is the number of bytes written. +// Any error encountered during the write is also returned. +func (d *Decoder) WriteTo(w io.Writer) (int64, error) { + var n int64 + for { + if len(d.current.b) > 0 { + n2, err2 := w.Write(d.current.b) + n += int64(n2) + if err2 != nil && (d.current.err == nil || d.current.err == io.EOF) { + d.current.err = err2 + } else if n2 != len(d.current.b) { + d.current.err = io.ErrShortWrite + } + } + if d.current.err != nil { + break + } + d.nextBlock(true) + } + err := d.current.err + if err != nil { + d.drainOutput() + } + if err == io.EOF { + err = nil + } + return n, err +} + +// DecodeAll allows stateless decoding of a blob of bytes. +// Output will be appended to dst, so if the destination size is known +// you can pre-allocate the destination slice to avoid allocations. +// DecodeAll can be used concurrently. +// The Decoder concurrency limits will be respected. +func (d *Decoder) DecodeAll(input, dst []byte) ([]byte, error) { + if d.decoders == nil { + return dst, ErrDecoderClosed + } + + // Grab a block decoder and frame decoder. + block := <-d.decoders + frame := block.localFrame + initialSize := len(dst) + defer func() { + if debugDecoder { + printf("re-adding decoder: %p", block) + } + frame.rawInput = nil + frame.bBuf = nil + if frame.history.decoders.br != nil { + frame.history.decoders.br.in = nil + } + d.decoders <- block + }() + frame.bBuf = input + + for { + frame.history.reset() + err := frame.reset(&frame.bBuf) + if err != nil { + if err == io.EOF { + if debugDecoder { + println("frame reset return EOF") + } + return dst, nil + } + return dst, err + } + if frame.DictionaryID != nil { + dict, ok := d.dicts[*frame.DictionaryID] + if !ok { + return nil, ErrUnknownDictionary + } + if debugDecoder { + println("setting dict", frame.DictionaryID) + } + frame.history.setDict(&dict) + } + if frame.WindowSize > d.o.maxWindowSize { + if debugDecoder { + println("window size exceeded:", frame.WindowSize, ">", d.o.maxWindowSize) + } + return dst, ErrWindowSizeExceeded + } + if frame.FrameContentSize != fcsUnknown { + if frame.FrameContentSize > d.o.maxDecodedSize-uint64(len(dst)-initialSize) { + if debugDecoder { + println("decoder size exceeded; fcs:", frame.FrameContentSize, "> mcs:", d.o.maxDecodedSize-uint64(len(dst)-initialSize), "len:", len(dst)) + } + return dst, ErrDecoderSizeExceeded + } + if d.o.limitToCap && frame.FrameContentSize > uint64(cap(dst)-len(dst)) { + if debugDecoder { + println("decoder size exceeded; fcs:", frame.FrameContentSize, "> (cap-len)", cap(dst)-len(dst)) + } + return dst, ErrDecoderSizeExceeded + } + if cap(dst)-len(dst) < int(frame.FrameContentSize) { + dst2 := make([]byte, len(dst), len(dst)+int(frame.FrameContentSize)+compressedBlockOverAlloc) + copy(dst2, dst) + dst = dst2 + } + } + + if cap(dst) == 0 && !d.o.limitToCap { + // Allocate len(input) * 2 by default if nothing is provided + // and we didn't get frame content size. + size := len(input) * 2 + // Cap to 1 MB. + if size > 1<<20 { + size = 1 << 20 + } + if uint64(size) > d.o.maxDecodedSize { + size = int(d.o.maxDecodedSize) + } + dst = make([]byte, 0, size) + } + + dst, err = frame.runDecoder(dst, block) + if err != nil { + return dst, err + } + if uint64(len(dst)-initialSize) > d.o.maxDecodedSize { + return dst, ErrDecoderSizeExceeded + } + if len(frame.bBuf) == 0 { + if debugDecoder { + println("frame dbuf empty") + } + break + } + } + return dst, nil +} + +// nextBlock returns the next block. +// If an error occurs d.err will be set. +// Optionally the function can block for new output. +// If non-blocking mode is used the returned boolean will be false +// if no data was available without blocking. +func (d *Decoder) nextBlock(blocking bool) (ok bool) { + if d.current.err != nil { + // Keep error state. + return false + } + d.current.b = d.current.b[:0] + + // SYNC: + if d.syncStream.enabled { + if !blocking { + return false + } + ok = d.nextBlockSync() + if !ok { + d.stashDecoder() + } + return ok + } + + //ASYNC: + d.stashDecoder() + if blocking { + d.current.decodeOutput, ok = <-d.current.output + } else { + select { + case d.current.decodeOutput, ok = <-d.current.output: + default: + return false + } + } + if !ok { + // This should not happen, so signal error state... + d.current.err = io.ErrUnexpectedEOF + return false + } + next := d.current.decodeOutput + if next.d != nil && next.d.async.newHist != nil { + d.current.crc.Reset() + } + if debugDecoder { + var tmp [4]byte + binary.LittleEndian.PutUint32(tmp[:], uint32(xxhash.Sum64(next.b))) + println("got", len(d.current.b), "bytes, error:", d.current.err, "data crc:", tmp) + } + + if !d.o.ignoreChecksum && len(next.b) > 0 { + n, err := d.current.crc.Write(next.b) + if err == nil { + if n != len(next.b) { + d.current.err = io.ErrShortWrite + } + } + } + if next.err == nil && next.d != nil && len(next.d.checkCRC) != 0 { + got := d.current.crc.Sum64() + var tmp [4]byte + binary.LittleEndian.PutUint32(tmp[:], uint32(got)) + if !d.o.ignoreChecksum && !bytes.Equal(tmp[:], next.d.checkCRC) { + if debugDecoder { + println("CRC Check Failed:", tmp[:], " (got) !=", next.d.checkCRC, "(on stream)") + } + d.current.err = ErrCRCMismatch + } else { + if debugDecoder { + println("CRC ok", tmp[:]) + } + } + } + + return true +} + +func (d *Decoder) nextBlockSync() (ok bool) { + if d.current.d == nil { + d.current.d = <-d.decoders + } + for len(d.current.b) == 0 { + if !d.syncStream.inFrame { + d.frame.history.reset() + d.current.err = d.frame.reset(&d.syncStream.br) + if d.current.err != nil { + return false + } + if d.frame.DictionaryID != nil { + dict, ok := d.dicts[*d.frame.DictionaryID] + if !ok { + d.current.err = ErrUnknownDictionary + return false + } else { + d.frame.history.setDict(&dict) + } + } + if d.frame.WindowSize > d.o.maxDecodedSize || d.frame.WindowSize > d.o.maxWindowSize { + d.current.err = ErrDecoderSizeExceeded + return false + } + + d.syncStream.decodedFrame = 0 + d.syncStream.inFrame = true + } + d.current.err = d.frame.next(d.current.d) + if d.current.err != nil { + return false + } + d.frame.history.ensureBlock() + if debugDecoder { + println("History trimmed:", len(d.frame.history.b), "decoded already:", d.syncStream.decodedFrame) + } + histBefore := len(d.frame.history.b) + d.current.err = d.current.d.decodeBuf(&d.frame.history) + + if d.current.err != nil { + println("error after:", d.current.err) + return false + } + d.current.b = d.frame.history.b[histBefore:] + if debugDecoder { + println("history after:", len(d.frame.history.b)) + } + + // Check frame size (before CRC) + d.syncStream.decodedFrame += uint64(len(d.current.b)) + if d.syncStream.decodedFrame > d.frame.FrameContentSize { + if debugDecoder { + printf("DecodedFrame (%d) > FrameContentSize (%d)\n", d.syncStream.decodedFrame, d.frame.FrameContentSize) + } + d.current.err = ErrFrameSizeExceeded + return false + } + + // Check FCS + if d.current.d.Last && d.frame.FrameContentSize != fcsUnknown && d.syncStream.decodedFrame != d.frame.FrameContentSize { + if debugDecoder { + printf("DecodedFrame (%d) != FrameContentSize (%d)\n", d.syncStream.decodedFrame, d.frame.FrameContentSize) + } + d.current.err = ErrFrameSizeMismatch + return false + } + + // Update/Check CRC + if d.frame.HasCheckSum { + if !d.o.ignoreChecksum { + d.frame.crc.Write(d.current.b) + } + if d.current.d.Last { + if !d.o.ignoreChecksum { + d.current.err = d.frame.checkCRC() + } else { + d.current.err = d.frame.consumeCRC() + } + if d.current.err != nil { + println("CRC error:", d.current.err) + return false + } + } + } + d.syncStream.inFrame = !d.current.d.Last + } + return true +} + +func (d *Decoder) stashDecoder() { + if d.current.d != nil { + if debugDecoder { + printf("re-adding current decoder %p", d.current.d) + } + d.decoders <- d.current.d + d.current.d = nil + } +} + +// Close will release all resources. +// It is NOT possible to reuse the decoder after this. +func (d *Decoder) Close() { + if d.current.err == ErrDecoderClosed { + return + } + d.drainOutput() + if d.current.cancel != nil { + d.current.cancel() + d.streamWg.Wait() + d.current.cancel = nil + } + if d.decoders != nil { + close(d.decoders) + for dec := range d.decoders { + dec.Close() + } + d.decoders = nil + } + if d.current.d != nil { + d.current.d.Close() + d.current.d = nil + } + d.current.err = ErrDecoderClosed +} + +// IOReadCloser returns the decoder as an io.ReadCloser for convenience. +// Any changes to the decoder will be reflected, so the returned ReadCloser +// can be reused along with the decoder. +// io.WriterTo is also supported by the returned ReadCloser. +func (d *Decoder) IOReadCloser() io.ReadCloser { + return closeWrapper{d: d} +} + +// closeWrapper wraps a function call as a closer. +type closeWrapper struct { + d *Decoder +} + +// WriteTo forwards WriteTo calls to the decoder. +func (c closeWrapper) WriteTo(w io.Writer) (n int64, err error) { + return c.d.WriteTo(w) +} + +// Read forwards read calls to the decoder. +func (c closeWrapper) Read(p []byte) (n int, err error) { + return c.d.Read(p) +} + +// Close closes the decoder. +func (c closeWrapper) Close() error { + c.d.Close() + return nil +} + +type decodeOutput struct { + d *blockDec + b []byte + err error +} + +func (d *Decoder) startSyncDecoder(r io.Reader) error { + d.frame.history.reset() + d.syncStream.br = readerWrapper{r: r} + d.syncStream.inFrame = false + d.syncStream.enabled = true + d.syncStream.decodedFrame = 0 + return nil +} + +// Create Decoder: +// ASYNC: +// Spawn 3 go routines. +// 0: Read frames and decode block literals. +// 1: Decode sequences. +// 2: Execute sequences, send to output. +func (d *Decoder) startStreamDecoder(ctx context.Context, r io.Reader, output chan decodeOutput) { + defer d.streamWg.Done() + br := readerWrapper{r: r} + + var seqDecode = make(chan *blockDec, d.o.concurrent) + var seqExecute = make(chan *blockDec, d.o.concurrent) + + // Async 1: Decode sequences... + go func() { + var hist history + var hasErr bool + + for block := range seqDecode { + if hasErr { + if block != nil { + seqExecute <- block + } + continue + } + if block.async.newHist != nil { + if debugDecoder { + println("Async 1: new history, recent:", block.async.newHist.recentOffsets) + } + hist.reset() + hist.decoders = block.async.newHist.decoders + hist.recentOffsets = block.async.newHist.recentOffsets + hist.windowSize = block.async.newHist.windowSize + if block.async.newHist.dict != nil { + hist.setDict(block.async.newHist.dict) + } + } + if block.err != nil || block.Type != blockTypeCompressed { + hasErr = block.err != nil + seqExecute <- block + continue + } + + hist.decoders.literals = block.async.literals + block.err = block.prepareSequences(block.async.seqData, &hist) + if debugDecoder && block.err != nil { + println("prepareSequences returned:", block.err) + } + hasErr = block.err != nil + if block.err == nil { + block.err = block.decodeSequences(&hist) + if debugDecoder && block.err != nil { + println("decodeSequences returned:", block.err) + } + hasErr = block.err != nil + // block.async.sequence = hist.decoders.seq[:hist.decoders.nSeqs] + block.async.seqSize = hist.decoders.seqSize + } + seqExecute <- block + } + close(seqExecute) + hist.reset() + }() + + var wg sync.WaitGroup + wg.Add(1) + + // Async 3: Execute sequences... + frameHistCache := d.frame.history.b + go func() { + var hist history + var decodedFrame uint64 + var fcs uint64 + var hasErr bool + for block := range seqExecute { + out := decodeOutput{err: block.err, d: block} + if block.err != nil || hasErr { + hasErr = true + output <- out + continue + } + if block.async.newHist != nil { + if debugDecoder { + println("Async 2: new history") + } + hist.reset() + hist.windowSize = block.async.newHist.windowSize + hist.allocFrameBuffer = block.async.newHist.allocFrameBuffer + if block.async.newHist.dict != nil { + hist.setDict(block.async.newHist.dict) + } + + if cap(hist.b) < hist.allocFrameBuffer { + if cap(frameHistCache) >= hist.allocFrameBuffer { + hist.b = frameHistCache + } else { + hist.b = make([]byte, 0, hist.allocFrameBuffer) + println("Alloc history sized", hist.allocFrameBuffer) + } + } + hist.b = hist.b[:0] + fcs = block.async.fcs + decodedFrame = 0 + } + do := decodeOutput{err: block.err, d: block} + switch block.Type { + case blockTypeRLE: + if debugDecoder { + println("add rle block length:", block.RLESize) + } + + if cap(block.dst) < int(block.RLESize) { + if block.lowMem { + block.dst = make([]byte, block.RLESize) + } else { + block.dst = make([]byte, maxBlockSize) + } + } + block.dst = block.dst[:block.RLESize] + v := block.data[0] + for i := range block.dst { + block.dst[i] = v + } + hist.append(block.dst) + do.b = block.dst + case blockTypeRaw: + if debugDecoder { + println("add raw block length:", len(block.data)) + } + hist.append(block.data) + do.b = block.data + case blockTypeCompressed: + if debugDecoder { + println("execute with history length:", len(hist.b), "window:", hist.windowSize) + } + hist.decoders.seqSize = block.async.seqSize + hist.decoders.literals = block.async.literals + do.err = block.executeSequences(&hist) + hasErr = do.err != nil + if debugDecoder && hasErr { + println("executeSequences returned:", do.err) + } + do.b = block.dst + } + if !hasErr { + decodedFrame += uint64(len(do.b)) + if decodedFrame > fcs { + println("fcs exceeded", block.Last, fcs, decodedFrame) + do.err = ErrFrameSizeExceeded + hasErr = true + } else if block.Last && fcs != fcsUnknown && decodedFrame != fcs { + do.err = ErrFrameSizeMismatch + hasErr = true + } else { + if debugDecoder { + println("fcs ok", block.Last, fcs, decodedFrame) + } + } + } + output <- do + } + close(output) + frameHistCache = hist.b + wg.Done() + if debugDecoder { + println("decoder goroutines finished") + } + hist.reset() + }() + + var hist history +decodeStream: + for { + var hasErr bool + hist.reset() + decodeBlock := func(block *blockDec) { + if hasErr { + if block != nil { + seqDecode <- block + } + return + } + if block.err != nil || block.Type != blockTypeCompressed { + hasErr = block.err != nil + seqDecode <- block + return + } + + remain, err := block.decodeLiterals(block.data, &hist) + block.err = err + hasErr = block.err != nil + if err == nil { + block.async.literals = hist.decoders.literals + block.async.seqData = remain + } else if debugDecoder { + println("decodeLiterals error:", err) + } + seqDecode <- block + } + frame := d.frame + if debugDecoder { + println("New frame...") + } + var historySent bool + frame.history.reset() + err := frame.reset(&br) + if debugDecoder && err != nil { + println("Frame decoder returned", err) + } + if err == nil && frame.DictionaryID != nil { + dict, ok := d.dicts[*frame.DictionaryID] + if !ok { + err = ErrUnknownDictionary + } else { + frame.history.setDict(&dict) + } + } + if err == nil && d.frame.WindowSize > d.o.maxWindowSize { + if debugDecoder { + println("decoder size exceeded, fws:", d.frame.WindowSize, "> mws:", d.o.maxWindowSize) + } + + err = ErrDecoderSizeExceeded + } + if err != nil { + select { + case <-ctx.Done(): + case dec := <-d.decoders: + dec.sendErr(err) + decodeBlock(dec) + } + break decodeStream + } + + // Go through all blocks of the frame. + for { + var dec *blockDec + select { + case <-ctx.Done(): + break decodeStream + case dec = <-d.decoders: + // Once we have a decoder, we MUST return it. + } + err := frame.next(dec) + if !historySent { + h := frame.history + if debugDecoder { + println("Alloc History:", h.allocFrameBuffer) + } + hist.reset() + if h.dict != nil { + hist.setDict(h.dict) + } + dec.async.newHist = &h + dec.async.fcs = frame.FrameContentSize + historySent = true + } else { + dec.async.newHist = nil + } + if debugDecoder && err != nil { + println("next block returned error:", err) + } + dec.err = err + dec.checkCRC = nil + if dec.Last && frame.HasCheckSum && err == nil { + crc, err := frame.rawInput.readSmall(4) + if err != nil { + println("CRC missing?", err) + dec.err = err + } + var tmp [4]byte + copy(tmp[:], crc) + dec.checkCRC = tmp[:] + if debugDecoder { + println("found crc to check:", dec.checkCRC) + } + } + err = dec.err + last := dec.Last + decodeBlock(dec) + if err != nil { + break decodeStream + } + if last { + break + } + } + } + close(seqDecode) + wg.Wait() + hist.reset() + d.frame.history.b = frameHistCache +} diff --git a/vendor/github.com/klauspost/compress/zstd/decoder_options.go b/vendor/github.com/klauspost/compress/zstd/decoder_options.go new file mode 100644 index 00000000..f42448e6 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/decoder_options.go @@ -0,0 +1,149 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +import ( + "errors" + "runtime" +) + +// DOption is an option for creating a decoder. +type DOption func(*decoderOptions) error + +// options retains accumulated state of multiple options. +type decoderOptions struct { + lowMem bool + concurrent int + maxDecodedSize uint64 + maxWindowSize uint64 + dicts []dict + ignoreChecksum bool + limitToCap bool + decodeBufsBelow int +} + +func (o *decoderOptions) setDefault() { + *o = decoderOptions{ + // use less ram: true for now, but may change. + lowMem: true, + concurrent: runtime.GOMAXPROCS(0), + maxWindowSize: MaxWindowSize, + decodeBufsBelow: 128 << 10, + } + if o.concurrent > 4 { + o.concurrent = 4 + } + o.maxDecodedSize = 64 << 30 +} + +// WithDecoderLowmem will set whether to use a lower amount of memory, +// but possibly have to allocate more while running. +func WithDecoderLowmem(b bool) DOption { + return func(o *decoderOptions) error { o.lowMem = b; return nil } +} + +// WithDecoderConcurrency sets the number of created decoders. +// When decoding block with DecodeAll, this will limit the number +// of possible concurrently running decodes. +// When decoding streams, this will limit the number of +// inflight blocks. +// When decoding streams and setting maximum to 1, +// no async decoding will be done. +// When a value of 0 is provided GOMAXPROCS will be used. +// By default this will be set to 4 or GOMAXPROCS, whatever is lower. +func WithDecoderConcurrency(n int) DOption { + return func(o *decoderOptions) error { + if n < 0 { + return errors.New("concurrency must be at least 1") + } + if n == 0 { + o.concurrent = runtime.GOMAXPROCS(0) + } else { + o.concurrent = n + } + return nil + } +} + +// WithDecoderMaxMemory allows to set a maximum decoded size for in-memory +// non-streaming operations or maximum window size for streaming operations. +// This can be used to control memory usage of potentially hostile content. +// Maximum is 1 << 63 bytes. Default is 64GiB. +func WithDecoderMaxMemory(n uint64) DOption { + return func(o *decoderOptions) error { + if n == 0 { + return errors.New("WithDecoderMaxMemory must be at least 1") + } + if n > 1<<63 { + return errors.New("WithDecoderMaxmemory must be less than 1 << 63") + } + o.maxDecodedSize = n + return nil + } +} + +// WithDecoderDicts allows to register one or more dictionaries for the decoder. +// If several dictionaries with the same ID is provided the last one will be used. +func WithDecoderDicts(dicts ...[]byte) DOption { + return func(o *decoderOptions) error { + for _, b := range dicts { + d, err := loadDict(b) + if err != nil { + return err + } + o.dicts = append(o.dicts, *d) + } + return nil + } +} + +// WithDecoderMaxWindow allows to set a maximum window size for decodes. +// This allows rejecting packets that will cause big memory usage. +// The Decoder will likely allocate more memory based on the WithDecoderLowmem setting. +// If WithDecoderMaxMemory is set to a lower value, that will be used. +// Default is 512MB, Maximum is ~3.75 TB as per zstandard spec. +func WithDecoderMaxWindow(size uint64) DOption { + return func(o *decoderOptions) error { + if size < MinWindowSize { + return errors.New("WithMaxWindowSize must be at least 1KB, 1024 bytes") + } + if size > (1<<41)+7*(1<<38) { + return errors.New("WithMaxWindowSize must be less than (1<<41) + 7*(1<<38) ~ 3.75TB") + } + o.maxWindowSize = size + return nil + } +} + +// WithDecodeAllCapLimit will limit DecodeAll to decoding cap(dst)-len(dst) bytes, +// or any size set in WithDecoderMaxMemory. +// This can be used to limit decoding to a specific maximum output size. +// Disabled by default. +func WithDecodeAllCapLimit(b bool) DOption { + return func(o *decoderOptions) error { + o.limitToCap = b + return nil + } +} + +// WithDecodeBuffersBelow will fully decode readers that have a +// `Bytes() []byte` and `Len() int` interface similar to bytes.Buffer. +// This typically uses less allocations but will have the full decompressed object in memory. +// Note that DecodeAllCapLimit will disable this, as well as giving a size of 0 or less. +// Default is 128KiB. +func WithDecodeBuffersBelow(size int) DOption { + return func(o *decoderOptions) error { + o.decodeBufsBelow = size + return nil + } +} + +// IgnoreChecksum allows to forcibly ignore checksum checking. +func IgnoreChecksum(b bool) DOption { + return func(o *decoderOptions) error { + o.ignoreChecksum = b + return nil + } +} diff --git a/vendor/github.com/klauspost/compress/zstd/dict.go b/vendor/github.com/klauspost/compress/zstd/dict.go new file mode 100644 index 00000000..a36ae83e --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/dict.go @@ -0,0 +1,122 @@ +package zstd + +import ( + "bytes" + "encoding/binary" + "errors" + "fmt" + "io" + + "github.com/klauspost/compress/huff0" +) + +type dict struct { + id uint32 + + litEnc *huff0.Scratch + llDec, ofDec, mlDec sequenceDec + //llEnc, ofEnc, mlEnc []*fseEncoder + offsets [3]int + content []byte +} + +var dictMagic = [4]byte{0x37, 0xa4, 0x30, 0xec} + +// ID returns the dictionary id or 0 if d is nil. +func (d *dict) ID() uint32 { + if d == nil { + return 0 + } + return d.id +} + +// DictContentSize returns the dictionary content size or 0 if d is nil. +func (d *dict) DictContentSize() int { + if d == nil { + return 0 + } + return len(d.content) +} + +// Load a dictionary as described in +// https://github.com/facebook/zstd/blob/master/doc/zstd_compression_format.md#dictionary-format +func loadDict(b []byte) (*dict, error) { + // Check static field size. + if len(b) <= 8+(3*4) { + return nil, io.ErrUnexpectedEOF + } + d := dict{ + llDec: sequenceDec{fse: &fseDecoder{}}, + ofDec: sequenceDec{fse: &fseDecoder{}}, + mlDec: sequenceDec{fse: &fseDecoder{}}, + } + if !bytes.Equal(b[:4], dictMagic[:]) { + return nil, ErrMagicMismatch + } + d.id = binary.LittleEndian.Uint32(b[4:8]) + if d.id == 0 { + return nil, errors.New("dictionaries cannot have ID 0") + } + + // Read literal table + var err error + d.litEnc, b, err = huff0.ReadTable(b[8:], nil) + if err != nil { + return nil, err + } + d.litEnc.Reuse = huff0.ReusePolicyMust + + br := byteReader{ + b: b, + off: 0, + } + readDec := func(i tableIndex, dec *fseDecoder) error { + if err := dec.readNCount(&br, uint16(maxTableSymbol[i])); err != nil { + return err + } + if br.overread() { + return io.ErrUnexpectedEOF + } + err = dec.transform(symbolTableX[i]) + if err != nil { + println("Transform table error:", err) + return err + } + if debugDecoder || debugEncoder { + println("Read table ok", "symbolLen:", dec.symbolLen) + } + // Set decoders as predefined so they aren't reused. + dec.preDefined = true + return nil + } + + if err := readDec(tableOffsets, d.ofDec.fse); err != nil { + return nil, err + } + if err := readDec(tableMatchLengths, d.mlDec.fse); err != nil { + return nil, err + } + if err := readDec(tableLiteralLengths, d.llDec.fse); err != nil { + return nil, err + } + if br.remain() < 12 { + return nil, io.ErrUnexpectedEOF + } + + d.offsets[0] = int(br.Uint32()) + br.advance(4) + d.offsets[1] = int(br.Uint32()) + br.advance(4) + d.offsets[2] = int(br.Uint32()) + br.advance(4) + if d.offsets[0] <= 0 || d.offsets[1] <= 0 || d.offsets[2] <= 0 { + return nil, errors.New("invalid offset in dictionary") + } + d.content = make([]byte, br.remain()) + copy(d.content, br.unread()) + if d.offsets[0] > len(d.content) || d.offsets[1] > len(d.content) || d.offsets[2] > len(d.content) { + return nil, fmt.Errorf("initial offset bigger than dictionary content size %d, offsets: %v", len(d.content), d.offsets) + } + + return &d, nil +} diff --git a/vendor/github.com/klauspost/compress/zstd/enc_base.go b/vendor/github.com/klauspost/compress/zstd/enc_base.go new file mode 100644 index 00000000..15ae8ee8 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/enc_base.go @@ -0,0 +1,188 @@ +package zstd + +import ( + "fmt" + "math/bits" + + "github.com/klauspost/compress/zstd/internal/xxhash" +) + +const ( + dictShardBits = 6 +) + +type fastBase struct { + // cur is the offset at the start of hist + cur int32 + // maximum offset. Should be at least 2x block size. + maxMatchOff int32 + hist []byte + crc *xxhash.Digest + tmp [8]byte + blk *blockEnc + lastDictID uint32 + lowMem bool +} + +// CRC returns the underlying CRC writer. +func (e *fastBase) CRC() *xxhash.Digest { + return e.crc +} + +// AppendCRC will append the CRC to the destination slice and return it. +func (e *fastBase) AppendCRC(dst []byte) []byte { + crc := e.crc.Sum(e.tmp[:0]) + dst = append(dst, crc[7], crc[6], crc[5], crc[4]) + return dst +} + +// WindowSize returns the window size of the encoder, +// or a window size small enough to contain the input size, if > 0. +func (e *fastBase) WindowSize(size int64) int32 { + if size > 0 && size < int64(e.maxMatchOff) { + b := int32(1) << uint(bits.Len(uint(size))) + // Keep minimum window. + if b < 1024 { + b = 1024 + } + return b + } + return e.maxMatchOff +} + +// Block returns the current block. +func (e *fastBase) Block() *blockEnc { + return e.blk +} + +func (e *fastBase) addBlock(src []byte) int32 { + if debugAsserts && e.cur > bufferReset { + panic(fmt.Sprintf("ecur (%d) > buffer reset (%d)", e.cur, bufferReset)) + } + // check if we have space already + if len(e.hist)+len(src) > cap(e.hist) { + if cap(e.hist) == 0 { + e.ensureHist(len(src)) + } else { + if cap(e.hist) < int(e.maxMatchOff+maxCompressedBlockSize) { + panic(fmt.Errorf("unexpected buffer cap %d, want at least %d with window %d", cap(e.hist), e.maxMatchOff+maxCompressedBlockSize, e.maxMatchOff)) + } + // Move down + offset := int32(len(e.hist)) - e.maxMatchOff + copy(e.hist[0:e.maxMatchOff], e.hist[offset:]) + e.cur += offset + e.hist = e.hist[:e.maxMatchOff] + } + } + s := int32(len(e.hist)) + e.hist = append(e.hist, src...) + return s +} + +// ensureHist will ensure that history can keep at least this many bytes. +func (e *fastBase) ensureHist(n int) { + if cap(e.hist) >= n { + return + } + l := e.maxMatchOff + if (e.lowMem && e.maxMatchOff > maxCompressedBlockSize) || e.maxMatchOff <= maxCompressedBlockSize { + l += maxCompressedBlockSize + } else { + l += e.maxMatchOff + } + // Make it at least 1MB. + if l < 1<<20 && !e.lowMem { + l = 1 << 20 + } + // Make it at least the requested size. + if l < int32(n) { + l = int32(n) + } + e.hist = make([]byte, 0, l) +} + +// useBlock will replace the block with the provided one, +// but transfer recent offsets from the previous. +func (e *fastBase) UseBlock(enc *blockEnc) { + enc.reset(e.blk) + e.blk = enc +} + +func (e *fastBase) matchlen(s, t int32, src []byte) int32 { + if debugAsserts { + if s < 0 { + err := fmt.Sprintf("s (%d) < 0", s) + panic(err) + } + if t < 0 { + err := fmt.Sprintf("s (%d) < 0", s) + panic(err) + } + if s-t > e.maxMatchOff { + err := fmt.Sprintf("s (%d) - t (%d) > maxMatchOff (%d)", s, t, e.maxMatchOff) + panic(err) + } + if len(src)-int(s) > maxCompressedBlockSize { + panic(fmt.Sprintf("len(src)-s (%d) > maxCompressedBlockSize (%d)", len(src)-int(s), maxCompressedBlockSize)) + } + } + a := src[s:] + b := src[t:] + b = b[:len(a)] + end := int32((len(a) >> 3) << 3) + for i := int32(0); i < end; i += 8 { + if diff := load6432(a, i) ^ load6432(b, i); diff != 0 { + return i + int32(bits.TrailingZeros64(diff)>>3) + } + } + + a = a[end:] + b = b[end:] + for i := range a { + if a[i] != b[i] { + return int32(i) + end + } + } + return int32(len(a)) + end +} + +// Reset the encoding table. +func (e *fastBase) resetBase(d *dict, singleBlock bool) { + if e.blk == nil { + e.blk = &blockEnc{lowMem: e.lowMem} + e.blk.init() + } else { + e.blk.reset(nil) + } + e.blk.initNewEncode() + if e.crc == nil { + e.crc = xxhash.New() + } else { + e.crc.Reset() + } + if d != nil { + low := e.lowMem + if singleBlock { + e.lowMem = true + } + e.ensureHist(d.DictContentSize() + maxCompressedBlockSize) + e.lowMem = low + } + + // We offset current position so everything will be out of reach. + // If above reset line, history will be purged. + if e.cur < bufferReset { + e.cur += e.maxMatchOff + int32(len(e.hist)) + } + e.hist = e.hist[:0] + if d != nil { + // Set offsets (currently not used) + for i, off := range d.offsets { + e.blk.recentOffsets[i] = uint32(off) + e.blk.prevRecentOffsets[i] = e.blk.recentOffsets[i] + } + // Transfer litenc. + e.blk.dictLitEnc = d.litEnc + e.hist = append(e.hist, d.content...) + } +} diff --git a/vendor/github.com/klauspost/compress/zstd/enc_best.go b/vendor/github.com/klauspost/compress/zstd/enc_best.go new file mode 100644 index 00000000..dbbb88d9 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/enc_best.go @@ -0,0 +1,559 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +import ( + "bytes" + "fmt" + + "github.com/klauspost/compress" +) + +const ( + bestLongTableBits = 22 // Bits used in the long match table + bestLongTableSize = 1 << bestLongTableBits // Size of the table + bestLongLen = 8 // Bytes used for table hash + + // Note: Increasing the short table bits or making the hash shorter + // can actually lead to compression degradation since it will 'steal' more from the + // long match table and match offsets are quite big. + // This greatly depends on the type of input. + bestShortTableBits = 18 // Bits used in the short match table + bestShortTableSize = 1 << bestShortTableBits // Size of the table + bestShortLen = 4 // Bytes used for table hash + +) + +type match struct { + offset int32 + s int32 + length int32 + rep int32 + est int32 + _ [12]byte // Aligned size to cache line: 4+4+4+4+4 bytes + 12 bytes padding = 32 bytes +} + +const highScore = 25000 + +// estBits will estimate output bits from predefined tables. +func (m *match) estBits(bitsPerByte int32) { + mlc := mlCode(uint32(m.length - zstdMinMatch)) + var ofc uint8 + if m.rep < 0 { + ofc = ofCode(uint32(m.s-m.offset) + 3) + } else { + ofc = ofCode(uint32(m.rep)) + } + // Cost, excluding + ofTT, mlTT := fsePredefEnc[tableOffsets].ct.symbolTT[ofc], fsePredefEnc[tableMatchLengths].ct.symbolTT[mlc] + + // Add cost of match encoding... + m.est = int32(ofTT.outBits + mlTT.outBits) + m.est += int32(ofTT.deltaNbBits>>16 + mlTT.deltaNbBits>>16) + // Subtract savings compared to literal encoding... + m.est -= (m.length * bitsPerByte) >> 10 + if m.est > 0 { + // Unlikely gain.. + m.length = 0 + m.est = highScore + } +} + +// bestFastEncoder uses 2 tables, one for short matches (5 bytes) and one for long matches. +// The long match table contains the previous entry with the same hash, +// effectively making it a "chain" of length 2. +// When we find a long match we choose between the two values and select the longest. +// When we find a short match, after checking the long, we check if we can find a long at n+1 +// and that it is longer (lazy matching). +type bestFastEncoder struct { + fastBase + table [bestShortTableSize]prevEntry + longTable [bestLongTableSize]prevEntry + dictTable []prevEntry + dictLongTable []prevEntry +} + +// Encode improves compression... +func (e *bestFastEncoder) Encode(blk *blockEnc, src []byte) { + const ( + // Input margin is the number of bytes we read (8) + // and the maximum we will read ahead (2) + inputMargin = 8 + 4 + minNonLiteralBlockSize = 16 + ) + + // Protect against e.cur wraparound. + for e.cur >= bufferReset { + if len(e.hist) == 0 { + for i := range e.table[:] { + e.table[i] = prevEntry{} + } + for i := range e.longTable[:] { + e.longTable[i] = prevEntry{} + } + e.cur = e.maxMatchOff + break + } + // Shift down everything in the table that isn't already too far away. + minOff := e.cur + int32(len(e.hist)) - e.maxMatchOff + for i := range e.table[:] { + v := e.table[i].offset + v2 := e.table[i].prev + if v < minOff { + v = 0 + v2 = 0 + } else { + v = v - e.cur + e.maxMatchOff + if v2 < minOff { + v2 = 0 + } else { + v2 = v2 - e.cur + e.maxMatchOff + } + } + e.table[i] = prevEntry{ + offset: v, + prev: v2, + } + } + for i := range e.longTable[:] { + v := e.longTable[i].offset + v2 := e.longTable[i].prev + if v < minOff { + v = 0 + v2 = 0 + } else { + v = v - e.cur + e.maxMatchOff + if v2 < minOff { + v2 = 0 + } else { + v2 = v2 - e.cur + e.maxMatchOff + } + } + e.longTable[i] = prevEntry{ + offset: v, + prev: v2, + } + } + e.cur = e.maxMatchOff + break + } + + s := e.addBlock(src) + blk.size = len(src) + if len(src) < minNonLiteralBlockSize { + blk.extraLits = len(src) + blk.literals = blk.literals[:len(src)] + copy(blk.literals, src) + return + } + + // Use this to estimate literal cost. + // Scaled by 10 bits. + bitsPerByte := int32((compress.ShannonEntropyBits(src) * 1024) / len(src)) + // Huffman can never go < 1 bit/byte + if bitsPerByte < 1024 { + bitsPerByte = 1024 + } + + // Override src + src = e.hist + sLimit := int32(len(src)) - inputMargin + const kSearchStrength = 10 + + // nextEmit is where in src the next emitLiteral should start from. + nextEmit := s + cv := load6432(src, s) + + // Relative offsets + offset1 := int32(blk.recentOffsets[0]) + offset2 := int32(blk.recentOffsets[1]) + offset3 := int32(blk.recentOffsets[2]) + + addLiterals := func(s *seq, until int32) { + if until == nextEmit { + return + } + blk.literals = append(blk.literals, src[nextEmit:until]...) + s.litLen = uint32(until - nextEmit) + } + _ = addLiterals + + if debugEncoder { + println("recent offsets:", blk.recentOffsets) + } + +encodeLoop: + for { + // We allow the encoder to optionally turn off repeat offsets across blocks + canRepeat := len(blk.sequences) > 2 + + if debugAsserts && canRepeat && offset1 == 0 { + panic("offset0 was 0") + } + + bestOf := func(a, b match) match { + if a.est+(a.s-b.s)*bitsPerByte>>10 < b.est+(b.s-a.s)*bitsPerByte>>10 { + return a + } + return b + } + const goodEnough = 100 + + nextHashL := hashLen(cv, bestLongTableBits, bestLongLen) + nextHashS := hashLen(cv, bestShortTableBits, bestShortLen) + candidateL := e.longTable[nextHashL] + candidateS := e.table[nextHashS] + + matchAt := func(offset int32, s int32, first uint32, rep int32) match { + if s-offset >= e.maxMatchOff || load3232(src, offset) != first { + return match{s: s, est: highScore} + } + if debugAsserts { + if !bytes.Equal(src[s:s+4], src[offset:offset+4]) { + panic(fmt.Sprintf("first match mismatch: %v != %v, first: %08x", src[s:s+4], src[offset:offset+4], first)) + } + } + m := match{offset: offset, s: s, length: 4 + e.matchlen(s+4, offset+4, src), rep: rep} + m.estBits(bitsPerByte) + return m + } + + best := bestOf(matchAt(candidateL.offset-e.cur, s, uint32(cv), -1), matchAt(candidateL.prev-e.cur, s, uint32(cv), -1)) + best = bestOf(best, matchAt(candidateS.offset-e.cur, s, uint32(cv), -1)) + best = bestOf(best, matchAt(candidateS.prev-e.cur, s, uint32(cv), -1)) + + if canRepeat && best.length < goodEnough { + cv32 := uint32(cv >> 8) + spp := s + 1 + best = bestOf(best, matchAt(spp-offset1, spp, cv32, 1)) + best = bestOf(best, matchAt(spp-offset2, spp, cv32, 2)) + best = bestOf(best, matchAt(spp-offset3, spp, cv32, 3)) + if best.length > 0 { + cv32 = uint32(cv >> 24) + spp += 2 + best = bestOf(best, matchAt(spp-offset1, spp, cv32, 1)) + best = bestOf(best, matchAt(spp-offset2, spp, cv32, 2)) + best = bestOf(best, matchAt(spp-offset3, spp, cv32, 3)) + } + } + // Load next and check... + e.longTable[nextHashL] = prevEntry{offset: s + e.cur, prev: candidateL.offset} + e.table[nextHashS] = prevEntry{offset: s + e.cur, prev: candidateS.offset} + + // Look far ahead, unless we have a really long match already... + if best.length < goodEnough { + // No match found, move forward on input, no need to check forward... + if best.length < 4 { + s += 1 + (s-nextEmit)>>(kSearchStrength-1) + if s >= sLimit { + break encodeLoop + } + cv = load6432(src, s) + continue + } + + s++ + candidateS = e.table[hashLen(cv>>8, bestShortTableBits, bestShortLen)] + cv = load6432(src, s) + cv2 := load6432(src, s+1) + candidateL = e.longTable[hashLen(cv, bestLongTableBits, bestLongLen)] + candidateL2 := e.longTable[hashLen(cv2, bestLongTableBits, bestLongLen)] + + // Short at s+1 + best = bestOf(best, matchAt(candidateS.offset-e.cur, s, uint32(cv), -1)) + // Long at s+1, s+2 + best = bestOf(best, matchAt(candidateL.offset-e.cur, s, uint32(cv), -1)) + best = bestOf(best, matchAt(candidateL.prev-e.cur, s, uint32(cv), -1)) + best = bestOf(best, matchAt(candidateL2.offset-e.cur, s+1, uint32(cv2), -1)) + best = bestOf(best, matchAt(candidateL2.prev-e.cur, s+1, uint32(cv2), -1)) + if false { + // Short at s+3. + // Too often worse... + best = bestOf(best, matchAt(e.table[hashLen(cv2>>8, bestShortTableBits, bestShortLen)].offset-e.cur, s+2, uint32(cv2>>8), -1)) + } + // See if we can find a better match by checking where the current best ends. + // Use that offset to see if we can find a better full match. + if sAt := best.s + best.length; sAt < sLimit { + nextHashL := hashLen(load6432(src, sAt), bestLongTableBits, bestLongLen) + candidateEnd := e.longTable[nextHashL] + if pos := candidateEnd.offset - e.cur - best.length; pos >= 0 { + bestEnd := bestOf(best, matchAt(pos, best.s, load3232(src, best.s), -1)) + if pos := candidateEnd.prev - e.cur - best.length; pos >= 0 { + bestEnd = bestOf(bestEnd, matchAt(pos, best.s, load3232(src, best.s), -1)) + } + best = bestEnd + } + } + } + + if debugAsserts { + if !bytes.Equal(src[best.s:best.s+best.length], src[best.offset:best.offset+best.length]) { + panic(fmt.Sprintf("match mismatch: %v != %v", src[best.s:best.s+best.length], src[best.offset:best.offset+best.length])) + } + } + + // We have a match, we can store the forward value + if best.rep > 0 { + s = best.s + var seq seq + seq.matchLen = uint32(best.length - zstdMinMatch) + + // We might be able to match backwards. + // Extend as long as we can. + start := best.s + // We end the search early, so we don't risk 0 literals + // and have to do special offset treatment. + startLimit := nextEmit + 1 + + tMin := s - e.maxMatchOff + if tMin < 0 { + tMin = 0 + } + repIndex := best.offset + for repIndex > tMin && start > startLimit && src[repIndex-1] == src[start-1] && seq.matchLen < maxMatchLength-zstdMinMatch-1 { + repIndex-- + start-- + seq.matchLen++ + } + addLiterals(&seq, start) + + // rep 0 + seq.offset = uint32(best.rep) + if debugSequences { + println("repeat sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + + // Index match start+1 (long) -> s - 1 + index0 := s + s = best.s + best.length + + nextEmit = s + if s >= sLimit { + if debugEncoder { + println("repeat ended", s, best.length) + + } + break encodeLoop + } + // Index skipped... + off := index0 + e.cur + for index0 < s-1 { + cv0 := load6432(src, index0) + h0 := hashLen(cv0, bestLongTableBits, bestLongLen) + h1 := hashLen(cv0, bestShortTableBits, bestShortLen) + e.longTable[h0] = prevEntry{offset: off, prev: e.longTable[h0].offset} + e.table[h1] = prevEntry{offset: off, prev: e.table[h1].offset} + off++ + index0++ + } + switch best.rep { + case 2: + offset1, offset2 = offset2, offset1 + case 3: + offset1, offset2, offset3 = offset3, offset1, offset2 + } + cv = load6432(src, s) + continue + } + + // A 4-byte match has been found. Update recent offsets. + // We'll later see if more than 4 bytes. + s = best.s + t := best.offset + offset1, offset2, offset3 = s-t, offset1, offset2 + + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + + if debugAsserts && int(offset1) > len(src) { + panic("invalid offset") + } + + // Extend the n-byte match as long as possible. + l := best.length + + // Extend backwards + tMin := s - e.maxMatchOff + if tMin < 0 { + tMin = 0 + } + for t > tMin && s > nextEmit && src[t-1] == src[s-1] && l < maxMatchLength { + s-- + t-- + l++ + } + + // Write our sequence + var seq seq + seq.litLen = uint32(s - nextEmit) + seq.matchLen = uint32(l - zstdMinMatch) + if seq.litLen > 0 { + blk.literals = append(blk.literals, src[nextEmit:s]...) + } + seq.offset = uint32(s-t) + 3 + s += l + if debugSequences { + println("sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + nextEmit = s + if s >= sLimit { + break encodeLoop + } + + // Index match start+1 (long) -> s - 1 + index0 := s - l + 1 + // every entry + for index0 < s-1 { + cv0 := load6432(src, index0) + h0 := hashLen(cv0, bestLongTableBits, bestLongLen) + h1 := hashLen(cv0, bestShortTableBits, bestShortLen) + off := index0 + e.cur + e.longTable[h0] = prevEntry{offset: off, prev: e.longTable[h0].offset} + e.table[h1] = prevEntry{offset: off, prev: e.table[h1].offset} + index0++ + } + + cv = load6432(src, s) + if !canRepeat { + continue + } + + // Check offset 2 + for { + o2 := s - offset2 + if load3232(src, o2) != uint32(cv) { + // Do regular search + break + } + + // Store this, since we have it. + nextHashS := hashLen(cv, bestShortTableBits, bestShortLen) + nextHashL := hashLen(cv, bestLongTableBits, bestLongLen) + + // We have at least 4 byte match. + // No need to check backwards. We come straight from a match + l := 4 + e.matchlen(s+4, o2+4, src) + + e.longTable[nextHashL] = prevEntry{offset: s + e.cur, prev: e.longTable[nextHashL].offset} + e.table[nextHashS] = prevEntry{offset: s + e.cur, prev: e.table[nextHashS].offset} + seq.matchLen = uint32(l) - zstdMinMatch + seq.litLen = 0 + + // Since litlen is always 0, this is offset 1. + seq.offset = 1 + s += l + nextEmit = s + if debugSequences { + println("sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + + // Swap offset 1 and 2. + offset1, offset2 = offset2, offset1 + if s >= sLimit { + // Finished + break encodeLoop + } + cv = load6432(src, s) + } + } + + if int(nextEmit) < len(src) { + blk.literals = append(blk.literals, src[nextEmit:]...) + blk.extraLits = len(src) - int(nextEmit) + } + blk.recentOffsets[0] = uint32(offset1) + blk.recentOffsets[1] = uint32(offset2) + blk.recentOffsets[2] = uint32(offset3) + if debugEncoder { + println("returning, recent offsets:", blk.recentOffsets, "extra literals:", blk.extraLits) + } +} + +// EncodeNoHist will encode a block with no history and no following blocks. +// Most notable difference is that src will not be copied for history and +// we do not need to check for max match length. +func (e *bestFastEncoder) EncodeNoHist(blk *blockEnc, src []byte) { + e.ensureHist(len(src)) + e.Encode(blk, src) +} + +// Reset will reset and set a dictionary if not nil +func (e *bestFastEncoder) Reset(d *dict, singleBlock bool) { + e.resetBase(d, singleBlock) + if d == nil { + return + } + // Init or copy dict table + if len(e.dictTable) != len(e.table) || d.id != e.lastDictID { + if len(e.dictTable) != len(e.table) { + e.dictTable = make([]prevEntry, len(e.table)) + } + end := int32(len(d.content)) - 8 + e.maxMatchOff + for i := e.maxMatchOff; i < end; i += 4 { + const hashLog = bestShortTableBits + + cv := load6432(d.content, i-e.maxMatchOff) + nextHash := hashLen(cv, hashLog, bestShortLen) // 0 -> 4 + nextHash1 := hashLen(cv>>8, hashLog, bestShortLen) // 1 -> 5 + nextHash2 := hashLen(cv>>16, hashLog, bestShortLen) // 2 -> 6 + nextHash3 := hashLen(cv>>24, hashLog, bestShortLen) // 3 -> 7 + e.dictTable[nextHash] = prevEntry{ + prev: e.dictTable[nextHash].offset, + offset: i, + } + e.dictTable[nextHash1] = prevEntry{ + prev: e.dictTable[nextHash1].offset, + offset: i + 1, + } + e.dictTable[nextHash2] = prevEntry{ + prev: e.dictTable[nextHash2].offset, + offset: i + 2, + } + e.dictTable[nextHash3] = prevEntry{ + prev: e.dictTable[nextHash3].offset, + offset: i + 3, + } + } + e.lastDictID = d.id + } + + // Init or copy dict table + if len(e.dictLongTable) != len(e.longTable) || d.id != e.lastDictID { + if len(e.dictLongTable) != len(e.longTable) { + e.dictLongTable = make([]prevEntry, len(e.longTable)) + } + if len(d.content) >= 8 { + cv := load6432(d.content, 0) + h := hashLen(cv, bestLongTableBits, bestLongLen) + e.dictLongTable[h] = prevEntry{ + offset: e.maxMatchOff, + prev: e.dictLongTable[h].offset, + } + + end := int32(len(d.content)) - 8 + e.maxMatchOff + off := 8 // First to read + for i := e.maxMatchOff + 1; i < end; i++ { + cv = cv>>8 | (uint64(d.content[off]) << 56) + h := hashLen(cv, bestLongTableBits, bestLongLen) + e.dictLongTable[h] = prevEntry{ + offset: i, + prev: e.dictLongTable[h].offset, + } + off++ + } + } + e.lastDictID = d.id + } + // Reset table to initial state + copy(e.longTable[:], e.dictLongTable) + + e.cur = e.maxMatchOff + // Reset table to initial state + copy(e.table[:], e.dictTable) +} diff --git a/vendor/github.com/klauspost/compress/zstd/enc_better.go b/vendor/github.com/klauspost/compress/zstd/enc_better.go new file mode 100644 index 00000000..d70e3fd3 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/enc_better.go @@ -0,0 +1,1246 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +import "fmt" + +const ( + betterLongTableBits = 19 // Bits used in the long match table + betterLongTableSize = 1 << betterLongTableBits // Size of the table + betterLongLen = 8 // Bytes used for table hash + + // Note: Increasing the short table bits or making the hash shorter + // can actually lead to compression degradation since it will 'steal' more from the + // long match table and match offsets are quite big. + // This greatly depends on the type of input. + betterShortTableBits = 13 // Bits used in the short match table + betterShortTableSize = 1 << betterShortTableBits // Size of the table + betterShortLen = 5 // Bytes used for table hash + + betterLongTableShardCnt = 1 << (betterLongTableBits - dictShardBits) // Number of shards in the table + betterLongTableShardSize = betterLongTableSize / betterLongTableShardCnt // Size of an individual shard + + betterShortTableShardCnt = 1 << (betterShortTableBits - dictShardBits) // Number of shards in the table + betterShortTableShardSize = betterShortTableSize / betterShortTableShardCnt // Size of an individual shard +) + +type prevEntry struct { + offset int32 + prev int32 +} + +// betterFastEncoder uses 2 tables, one for short matches (5 bytes) and one for long matches. +// The long match table contains the previous entry with the same hash, +// effectively making it a "chain" of length 2. +// When we find a long match we choose between the two values and select the longest. +// When we find a short match, after checking the long, we check if we can find a long at n+1 +// and that it is longer (lazy matching). +type betterFastEncoder struct { + fastBase + table [betterShortTableSize]tableEntry + longTable [betterLongTableSize]prevEntry +} + +type betterFastEncoderDict struct { + betterFastEncoder + dictTable []tableEntry + dictLongTable []prevEntry + shortTableShardDirty [betterShortTableShardCnt]bool + longTableShardDirty [betterLongTableShardCnt]bool + allDirty bool +} + +// Encode improves compression... +func (e *betterFastEncoder) Encode(blk *blockEnc, src []byte) { + const ( + // Input margin is the number of bytes we read (8) + // and the maximum we will read ahead (2) + inputMargin = 8 + 2 + minNonLiteralBlockSize = 16 + ) + + // Protect against e.cur wraparound. + for e.cur >= bufferReset { + if len(e.hist) == 0 { + for i := range e.table[:] { + e.table[i] = tableEntry{} + } + for i := range e.longTable[:] { + e.longTable[i] = prevEntry{} + } + e.cur = e.maxMatchOff + break + } + // Shift down everything in the table that isn't already too far away. + minOff := e.cur + int32(len(e.hist)) - e.maxMatchOff + for i := range e.table[:] { + v := e.table[i].offset + if v < minOff { + v = 0 + } else { + v = v - e.cur + e.maxMatchOff + } + e.table[i].offset = v + } + for i := range e.longTable[:] { + v := e.longTable[i].offset + v2 := e.longTable[i].prev + if v < minOff { + v = 0 + v2 = 0 + } else { + v = v - e.cur + e.maxMatchOff + if v2 < minOff { + v2 = 0 + } else { + v2 = v2 - e.cur + e.maxMatchOff + } + } + e.longTable[i] = prevEntry{ + offset: v, + prev: v2, + } + } + e.cur = e.maxMatchOff + break + } + + s := e.addBlock(src) + blk.size = len(src) + if len(src) < minNonLiteralBlockSize { + blk.extraLits = len(src) + blk.literals = blk.literals[:len(src)] + copy(blk.literals, src) + return + } + + // Override src + src = e.hist + sLimit := int32(len(src)) - inputMargin + // stepSize is the number of bytes to skip on every main loop iteration. + // It should be >= 1. + const stepSize = 1 + + const kSearchStrength = 9 + + // nextEmit is where in src the next emitLiteral should start from. + nextEmit := s + cv := load6432(src, s) + + // Relative offsets + offset1 := int32(blk.recentOffsets[0]) + offset2 := int32(blk.recentOffsets[1]) + + addLiterals := func(s *seq, until int32) { + if until == nextEmit { + return + } + blk.literals = append(blk.literals, src[nextEmit:until]...) + s.litLen = uint32(until - nextEmit) + } + if debugEncoder { + println("recent offsets:", blk.recentOffsets) + } + +encodeLoop: + for { + var t int32 + // We allow the encoder to optionally turn off repeat offsets across blocks + canRepeat := len(blk.sequences) > 2 + var matched int32 + + for { + if debugAsserts && canRepeat && offset1 == 0 { + panic("offset0 was 0") + } + + nextHashL := hashLen(cv, betterLongTableBits, betterLongLen) + nextHashS := hashLen(cv, betterShortTableBits, betterShortLen) + candidateL := e.longTable[nextHashL] + candidateS := e.table[nextHashS] + + const repOff = 1 + repIndex := s - offset1 + repOff + off := s + e.cur + e.longTable[nextHashL] = prevEntry{offset: off, prev: candidateL.offset} + e.table[nextHashS] = tableEntry{offset: off, val: uint32(cv)} + + if canRepeat { + if repIndex >= 0 && load3232(src, repIndex) == uint32(cv>>(repOff*8)) { + // Consider history as well. + var seq seq + lenght := 4 + e.matchlen(s+4+repOff, repIndex+4, src) + + seq.matchLen = uint32(lenght - zstdMinMatch) + + // We might be able to match backwards. + // Extend as long as we can. + start := s + repOff + // We end the search early, so we don't risk 0 literals + // and have to do special offset treatment. + startLimit := nextEmit + 1 + + tMin := s - e.maxMatchOff + if tMin < 0 { + tMin = 0 + } + for repIndex > tMin && start > startLimit && src[repIndex-1] == src[start-1] && seq.matchLen < maxMatchLength-zstdMinMatch-1 { + repIndex-- + start-- + seq.matchLen++ + } + addLiterals(&seq, start) + + // rep 0 + seq.offset = 1 + if debugSequences { + println("repeat sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + + // Index match start+1 (long) -> s - 1 + index0 := s + repOff + s += lenght + repOff + + nextEmit = s + if s >= sLimit { + if debugEncoder { + println("repeat ended", s, lenght) + + } + break encodeLoop + } + // Index skipped... + for index0 < s-1 { + cv0 := load6432(src, index0) + cv1 := cv0 >> 8 + h0 := hashLen(cv0, betterLongTableBits, betterLongLen) + off := index0 + e.cur + e.longTable[h0] = prevEntry{offset: off, prev: e.longTable[h0].offset} + e.table[hashLen(cv1, betterShortTableBits, betterShortLen)] = tableEntry{offset: off + 1, val: uint32(cv1)} + index0 += 2 + } + cv = load6432(src, s) + continue + } + const repOff2 = 1 + + // We deviate from the reference encoder and also check offset 2. + // Still slower and not much better, so disabled. + // repIndex = s - offset2 + repOff2 + if false && repIndex >= 0 && load6432(src, repIndex) == load6432(src, s+repOff) { + // Consider history as well. + var seq seq + lenght := 8 + e.matchlen(s+8+repOff2, repIndex+8, src) + + seq.matchLen = uint32(lenght - zstdMinMatch) + + // We might be able to match backwards. + // Extend as long as we can. + start := s + repOff2 + // We end the search early, so we don't risk 0 literals + // and have to do special offset treatment. + startLimit := nextEmit + 1 + + tMin := s - e.maxMatchOff + if tMin < 0 { + tMin = 0 + } + for repIndex > tMin && start > startLimit && src[repIndex-1] == src[start-1] && seq.matchLen < maxMatchLength-zstdMinMatch-1 { + repIndex-- + start-- + seq.matchLen++ + } + addLiterals(&seq, start) + + // rep 2 + seq.offset = 2 + if debugSequences { + println("repeat sequence 2", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + + index0 := s + repOff2 + s += lenght + repOff2 + nextEmit = s + if s >= sLimit { + if debugEncoder { + println("repeat ended", s, lenght) + + } + break encodeLoop + } + + // Index skipped... + for index0 < s-1 { + cv0 := load6432(src, index0) + cv1 := cv0 >> 8 + h0 := hashLen(cv0, betterLongTableBits, betterLongLen) + off := index0 + e.cur + e.longTable[h0] = prevEntry{offset: off, prev: e.longTable[h0].offset} + e.table[hashLen(cv1, betterShortTableBits, betterShortLen)] = tableEntry{offset: off + 1, val: uint32(cv1)} + index0 += 2 + } + cv = load6432(src, s) + // Swap offsets + offset1, offset2 = offset2, offset1 + continue + } + } + // Find the offsets of our two matches. + coffsetL := candidateL.offset - e.cur + coffsetLP := candidateL.prev - e.cur + + // Check if we have a long match. + if s-coffsetL < e.maxMatchOff && cv == load6432(src, coffsetL) { + // Found a long match, at least 8 bytes. + matched = e.matchlen(s+8, coffsetL+8, src) + 8 + t = coffsetL + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + if debugAsserts && s-t > e.maxMatchOff { + panic("s - t >e.maxMatchOff") + } + if debugMatches { + println("long match") + } + + if s-coffsetLP < e.maxMatchOff && cv == load6432(src, coffsetLP) { + // Found a long match, at least 8 bytes. + prevMatch := e.matchlen(s+8, coffsetLP+8, src) + 8 + if prevMatch > matched { + matched = prevMatch + t = coffsetLP + } + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + if debugAsserts && s-t > e.maxMatchOff { + panic("s - t >e.maxMatchOff") + } + if debugMatches { + println("long match") + } + } + break + } + + // Check if we have a long match on prev. + if s-coffsetLP < e.maxMatchOff && cv == load6432(src, coffsetLP) { + // Found a long match, at least 8 bytes. + matched = e.matchlen(s+8, coffsetLP+8, src) + 8 + t = coffsetLP + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + if debugAsserts && s-t > e.maxMatchOff { + panic("s - t >e.maxMatchOff") + } + if debugMatches { + println("long match") + } + break + } + + coffsetS := candidateS.offset - e.cur + + // Check if we have a short match. + if s-coffsetS < e.maxMatchOff && uint32(cv) == candidateS.val { + // found a regular match + matched = e.matchlen(s+4, coffsetS+4, src) + 4 + + // See if we can find a long match at s+1 + const checkAt = 1 + cv := load6432(src, s+checkAt) + nextHashL = hashLen(cv, betterLongTableBits, betterLongLen) + candidateL = e.longTable[nextHashL] + coffsetL = candidateL.offset - e.cur + + // We can store it, since we have at least a 4 byte match. + e.longTable[nextHashL] = prevEntry{offset: s + checkAt + e.cur, prev: candidateL.offset} + if s-coffsetL < e.maxMatchOff && cv == load6432(src, coffsetL) { + // Found a long match, at least 8 bytes. + matchedNext := e.matchlen(s+8+checkAt, coffsetL+8, src) + 8 + if matchedNext > matched { + t = coffsetL + s += checkAt + matched = matchedNext + if debugMatches { + println("long match (after short)") + } + break + } + } + + // Check prev long... + coffsetL = candidateL.prev - e.cur + if s-coffsetL < e.maxMatchOff && cv == load6432(src, coffsetL) { + // Found a long match, at least 8 bytes. + matchedNext := e.matchlen(s+8+checkAt, coffsetL+8, src) + 8 + if matchedNext > matched { + t = coffsetL + s += checkAt + matched = matchedNext + if debugMatches { + println("prev long match (after short)") + } + break + } + } + t = coffsetS + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + if debugAsserts && s-t > e.maxMatchOff { + panic("s - t >e.maxMatchOff") + } + if debugAsserts && t < 0 { + panic("t<0") + } + if debugMatches { + println("short match") + } + break + } + + // No match found, move forward in input. + s += stepSize + ((s - nextEmit) >> (kSearchStrength - 1)) + if s >= sLimit { + break encodeLoop + } + cv = load6432(src, s) + } + + // Try to find a better match by searching for a long match at the end of the current best match + if s+matched < sLimit { + // Allow some bytes at the beginning to mismatch. + // Sweet spot is around 3 bytes, but depends on input. + // The skipped bytes are tested in Extend backwards, + // and still picked up as part of the match if they do. + const skipBeginning = 3 + + nextHashL := hashLen(load6432(src, s+matched), betterLongTableBits, betterLongLen) + s2 := s + skipBeginning + cv := load3232(src, s2) + candidateL := e.longTable[nextHashL] + coffsetL := candidateL.offset - e.cur - matched + skipBeginning + if coffsetL >= 0 && coffsetL < s2 && s2-coffsetL < e.maxMatchOff && cv == load3232(src, coffsetL) { + // Found a long match, at least 4 bytes. + matchedNext := e.matchlen(s2+4, coffsetL+4, src) + 4 + if matchedNext > matched { + t = coffsetL + s = s2 + matched = matchedNext + if debugMatches { + println("long match at end-of-match") + } + } + } + + // Check prev long... + if true { + coffsetL = candidateL.prev - e.cur - matched + skipBeginning + if coffsetL >= 0 && coffsetL < s2 && s2-coffsetL < e.maxMatchOff && cv == load3232(src, coffsetL) { + // Found a long match, at least 4 bytes. + matchedNext := e.matchlen(s2+4, coffsetL+4, src) + 4 + if matchedNext > matched { + t = coffsetL + s = s2 + matched = matchedNext + if debugMatches { + println("prev long match at end-of-match") + } + } + } + } + } + // A match has been found. Update recent offsets. + offset2 = offset1 + offset1 = s - t + + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + + if debugAsserts && canRepeat && int(offset1) > len(src) { + panic("invalid offset") + } + + // Extend the n-byte match as long as possible. + l := matched + + // Extend backwards + tMin := s - e.maxMatchOff + if tMin < 0 { + tMin = 0 + } + for t > tMin && s > nextEmit && src[t-1] == src[s-1] && l < maxMatchLength { + s-- + t-- + l++ + } + + // Write our sequence + var seq seq + seq.litLen = uint32(s - nextEmit) + seq.matchLen = uint32(l - zstdMinMatch) + if seq.litLen > 0 { + blk.literals = append(blk.literals, src[nextEmit:s]...) + } + seq.offset = uint32(s-t) + 3 + s += l + if debugSequences { + println("sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + nextEmit = s + if s >= sLimit { + break encodeLoop + } + + // Index match start+1 (long) -> s - 1 + index0 := s - l + 1 + for index0 < s-1 { + cv0 := load6432(src, index0) + cv1 := cv0 >> 8 + h0 := hashLen(cv0, betterLongTableBits, betterLongLen) + off := index0 + e.cur + e.longTable[h0] = prevEntry{offset: off, prev: e.longTable[h0].offset} + e.table[hashLen(cv1, betterShortTableBits, betterShortLen)] = tableEntry{offset: off + 1, val: uint32(cv1)} + index0 += 2 + } + + cv = load6432(src, s) + if !canRepeat { + continue + } + + // Check offset 2 + for { + o2 := s - offset2 + if load3232(src, o2) != uint32(cv) { + // Do regular search + break + } + + // Store this, since we have it. + nextHashL := hashLen(cv, betterLongTableBits, betterLongLen) + nextHashS := hashLen(cv, betterShortTableBits, betterShortLen) + + // We have at least 4 byte match. + // No need to check backwards. We come straight from a match + l := 4 + e.matchlen(s+4, o2+4, src) + + e.longTable[nextHashL] = prevEntry{offset: s + e.cur, prev: e.longTable[nextHashL].offset} + e.table[nextHashS] = tableEntry{offset: s + e.cur, val: uint32(cv)} + seq.matchLen = uint32(l) - zstdMinMatch + seq.litLen = 0 + + // Since litlen is always 0, this is offset 1. + seq.offset = 1 + s += l + nextEmit = s + if debugSequences { + println("sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + + // Swap offset 1 and 2. + offset1, offset2 = offset2, offset1 + if s >= sLimit { + // Finished + break encodeLoop + } + cv = load6432(src, s) + } + } + + if int(nextEmit) < len(src) { + blk.literals = append(blk.literals, src[nextEmit:]...) + blk.extraLits = len(src) - int(nextEmit) + } + blk.recentOffsets[0] = uint32(offset1) + blk.recentOffsets[1] = uint32(offset2) + if debugEncoder { + println("returning, recent offsets:", blk.recentOffsets, "extra literals:", blk.extraLits) + } +} + +// EncodeNoHist will encode a block with no history and no following blocks. +// Most notable difference is that src will not be copied for history and +// we do not need to check for max match length. +func (e *betterFastEncoder) EncodeNoHist(blk *blockEnc, src []byte) { + e.ensureHist(len(src)) + e.Encode(blk, src) +} + +// Encode improves compression... +func (e *betterFastEncoderDict) Encode(blk *blockEnc, src []byte) { + const ( + // Input margin is the number of bytes we read (8) + // and the maximum we will read ahead (2) + inputMargin = 8 + 2 + minNonLiteralBlockSize = 16 + ) + + // Protect against e.cur wraparound. + for e.cur >= bufferReset { + if len(e.hist) == 0 { + for i := range e.table[:] { + e.table[i] = tableEntry{} + } + for i := range e.longTable[:] { + e.longTable[i] = prevEntry{} + } + e.cur = e.maxMatchOff + e.allDirty = true + break + } + // Shift down everything in the table that isn't already too far away. + minOff := e.cur + int32(len(e.hist)) - e.maxMatchOff + for i := range e.table[:] { + v := e.table[i].offset + if v < minOff { + v = 0 + } else { + v = v - e.cur + e.maxMatchOff + } + e.table[i].offset = v + } + for i := range e.longTable[:] { + v := e.longTable[i].offset + v2 := e.longTable[i].prev + if v < minOff { + v = 0 + v2 = 0 + } else { + v = v - e.cur + e.maxMatchOff + if v2 < minOff { + v2 = 0 + } else { + v2 = v2 - e.cur + e.maxMatchOff + } + } + e.longTable[i] = prevEntry{ + offset: v, + prev: v2, + } + } + e.allDirty = true + e.cur = e.maxMatchOff + break + } + + s := e.addBlock(src) + blk.size = len(src) + if len(src) < minNonLiteralBlockSize { + blk.extraLits = len(src) + blk.literals = blk.literals[:len(src)] + copy(blk.literals, src) + return + } + + // Override src + src = e.hist + sLimit := int32(len(src)) - inputMargin + // stepSize is the number of bytes to skip on every main loop iteration. + // It should be >= 1. + const stepSize = 1 + + const kSearchStrength = 9 + + // nextEmit is where in src the next emitLiteral should start from. + nextEmit := s + cv := load6432(src, s) + + // Relative offsets + offset1 := int32(blk.recentOffsets[0]) + offset2 := int32(blk.recentOffsets[1]) + + addLiterals := func(s *seq, until int32) { + if until == nextEmit { + return + } + blk.literals = append(blk.literals, src[nextEmit:until]...) + s.litLen = uint32(until - nextEmit) + } + if debugEncoder { + println("recent offsets:", blk.recentOffsets) + } + +encodeLoop: + for { + var t int32 + // We allow the encoder to optionally turn off repeat offsets across blocks + canRepeat := len(blk.sequences) > 2 + var matched int32 + + for { + if debugAsserts && canRepeat && offset1 == 0 { + panic("offset0 was 0") + } + + nextHashL := hashLen(cv, betterLongTableBits, betterLongLen) + nextHashS := hashLen(cv, betterShortTableBits, betterShortLen) + candidateL := e.longTable[nextHashL] + candidateS := e.table[nextHashS] + + const repOff = 1 + repIndex := s - offset1 + repOff + off := s + e.cur + e.longTable[nextHashL] = prevEntry{offset: off, prev: candidateL.offset} + e.markLongShardDirty(nextHashL) + e.table[nextHashS] = tableEntry{offset: off, val: uint32(cv)} + e.markShortShardDirty(nextHashS) + + if canRepeat { + if repIndex >= 0 && load3232(src, repIndex) == uint32(cv>>(repOff*8)) { + // Consider history as well. + var seq seq + lenght := 4 + e.matchlen(s+4+repOff, repIndex+4, src) + + seq.matchLen = uint32(lenght - zstdMinMatch) + + // We might be able to match backwards. + // Extend as long as we can. + start := s + repOff + // We end the search early, so we don't risk 0 literals + // and have to do special offset treatment. + startLimit := nextEmit + 1 + + tMin := s - e.maxMatchOff + if tMin < 0 { + tMin = 0 + } + for repIndex > tMin && start > startLimit && src[repIndex-1] == src[start-1] && seq.matchLen < maxMatchLength-zstdMinMatch-1 { + repIndex-- + start-- + seq.matchLen++ + } + addLiterals(&seq, start) + + // rep 0 + seq.offset = 1 + if debugSequences { + println("repeat sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + + // Index match start+1 (long) -> s - 1 + index0 := s + repOff + s += lenght + repOff + + nextEmit = s + if s >= sLimit { + if debugEncoder { + println("repeat ended", s, lenght) + + } + break encodeLoop + } + // Index skipped... + for index0 < s-1 { + cv0 := load6432(src, index0) + cv1 := cv0 >> 8 + h0 := hashLen(cv0, betterLongTableBits, betterLongLen) + off := index0 + e.cur + e.longTable[h0] = prevEntry{offset: off, prev: e.longTable[h0].offset} + e.markLongShardDirty(h0) + h1 := hashLen(cv1, betterShortTableBits, betterShortLen) + e.table[h1] = tableEntry{offset: off + 1, val: uint32(cv1)} + e.markShortShardDirty(h1) + index0 += 2 + } + cv = load6432(src, s) + continue + } + const repOff2 = 1 + + // We deviate from the reference encoder and also check offset 2. + // Still slower and not much better, so disabled. + // repIndex = s - offset2 + repOff2 + if false && repIndex >= 0 && load6432(src, repIndex) == load6432(src, s+repOff) { + // Consider history as well. + var seq seq + lenght := 8 + e.matchlen(s+8+repOff2, repIndex+8, src) + + seq.matchLen = uint32(lenght - zstdMinMatch) + + // We might be able to match backwards. + // Extend as long as we can. + start := s + repOff2 + // We end the search early, so we don't risk 0 literals + // and have to do special offset treatment. + startLimit := nextEmit + 1 + + tMin := s - e.maxMatchOff + if tMin < 0 { + tMin = 0 + } + for repIndex > tMin && start > startLimit && src[repIndex-1] == src[start-1] && seq.matchLen < maxMatchLength-zstdMinMatch-1 { + repIndex-- + start-- + seq.matchLen++ + } + addLiterals(&seq, start) + + // rep 2 + seq.offset = 2 + if debugSequences { + println("repeat sequence 2", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + + index0 := s + repOff2 + s += lenght + repOff2 + nextEmit = s + if s >= sLimit { + if debugEncoder { + println("repeat ended", s, lenght) + + } + break encodeLoop + } + + // Index skipped... + for index0 < s-1 { + cv0 := load6432(src, index0) + cv1 := cv0 >> 8 + h0 := hashLen(cv0, betterLongTableBits, betterLongLen) + off := index0 + e.cur + e.longTable[h0] = prevEntry{offset: off, prev: e.longTable[h0].offset} + e.markLongShardDirty(h0) + h1 := hashLen(cv1, betterShortTableBits, betterShortLen) + e.table[h1] = tableEntry{offset: off + 1, val: uint32(cv1)} + e.markShortShardDirty(h1) + index0 += 2 + } + cv = load6432(src, s) + // Swap offsets + offset1, offset2 = offset2, offset1 + continue + } + } + // Find the offsets of our two matches. + coffsetL := candidateL.offset - e.cur + coffsetLP := candidateL.prev - e.cur + + // Check if we have a long match. + if s-coffsetL < e.maxMatchOff && cv == load6432(src, coffsetL) { + // Found a long match, at least 8 bytes. + matched = e.matchlen(s+8, coffsetL+8, src) + 8 + t = coffsetL + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + if debugAsserts && s-t > e.maxMatchOff { + panic("s - t >e.maxMatchOff") + } + if debugMatches { + println("long match") + } + + if s-coffsetLP < e.maxMatchOff && cv == load6432(src, coffsetLP) { + // Found a long match, at least 8 bytes. + prevMatch := e.matchlen(s+8, coffsetLP+8, src) + 8 + if prevMatch > matched { + matched = prevMatch + t = coffsetLP + } + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + if debugAsserts && s-t > e.maxMatchOff { + panic("s - t >e.maxMatchOff") + } + if debugMatches { + println("long match") + } + } + break + } + + // Check if we have a long match on prev. + if s-coffsetLP < e.maxMatchOff && cv == load6432(src, coffsetLP) { + // Found a long match, at least 8 bytes. + matched = e.matchlen(s+8, coffsetLP+8, src) + 8 + t = coffsetLP + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + if debugAsserts && s-t > e.maxMatchOff { + panic("s - t >e.maxMatchOff") + } + if debugMatches { + println("long match") + } + break + } + + coffsetS := candidateS.offset - e.cur + + // Check if we have a short match. + if s-coffsetS < e.maxMatchOff && uint32(cv) == candidateS.val { + // found a regular match + matched = e.matchlen(s+4, coffsetS+4, src) + 4 + + // See if we can find a long match at s+1 + const checkAt = 1 + cv := load6432(src, s+checkAt) + nextHashL = hashLen(cv, betterLongTableBits, betterLongLen) + candidateL = e.longTable[nextHashL] + coffsetL = candidateL.offset - e.cur + + // We can store it, since we have at least a 4 byte match. + e.longTable[nextHashL] = prevEntry{offset: s + checkAt + e.cur, prev: candidateL.offset} + e.markLongShardDirty(nextHashL) + if s-coffsetL < e.maxMatchOff && cv == load6432(src, coffsetL) { + // Found a long match, at least 8 bytes. + matchedNext := e.matchlen(s+8+checkAt, coffsetL+8, src) + 8 + if matchedNext > matched { + t = coffsetL + s += checkAt + matched = matchedNext + if debugMatches { + println("long match (after short)") + } + break + } + } + + // Check prev long... + coffsetL = candidateL.prev - e.cur + if s-coffsetL < e.maxMatchOff && cv == load6432(src, coffsetL) { + // Found a long match, at least 8 bytes. + matchedNext := e.matchlen(s+8+checkAt, coffsetL+8, src) + 8 + if matchedNext > matched { + t = coffsetL + s += checkAt + matched = matchedNext + if debugMatches { + println("prev long match (after short)") + } + break + } + } + t = coffsetS + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + if debugAsserts && s-t > e.maxMatchOff { + panic("s - t >e.maxMatchOff") + } + if debugAsserts && t < 0 { + panic("t<0") + } + if debugMatches { + println("short match") + } + break + } + + // No match found, move forward in input. + s += stepSize + ((s - nextEmit) >> (kSearchStrength - 1)) + if s >= sLimit { + break encodeLoop + } + cv = load6432(src, s) + } + // Try to find a better match by searching for a long match at the end of the current best match + if s+matched < sLimit { + nextHashL := hashLen(load6432(src, s+matched), betterLongTableBits, betterLongLen) + cv := load3232(src, s) + candidateL := e.longTable[nextHashL] + coffsetL := candidateL.offset - e.cur - matched + if coffsetL >= 0 && coffsetL < s && s-coffsetL < e.maxMatchOff && cv == load3232(src, coffsetL) { + // Found a long match, at least 4 bytes. + matchedNext := e.matchlen(s+4, coffsetL+4, src) + 4 + if matchedNext > matched { + t = coffsetL + matched = matchedNext + if debugMatches { + println("long match at end-of-match") + } + } + } + + // Check prev long... + if true { + coffsetL = candidateL.prev - e.cur - matched + if coffsetL >= 0 && coffsetL < s && s-coffsetL < e.maxMatchOff && cv == load3232(src, coffsetL) { + // Found a long match, at least 4 bytes. + matchedNext := e.matchlen(s+4, coffsetL+4, src) + 4 + if matchedNext > matched { + t = coffsetL + matched = matchedNext + if debugMatches { + println("prev long match at end-of-match") + } + } + } + } + } + // A match has been found. Update recent offsets. + offset2 = offset1 + offset1 = s - t + + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + + if debugAsserts && canRepeat && int(offset1) > len(src) { + panic("invalid offset") + } + + // Extend the n-byte match as long as possible. + l := matched + + // Extend backwards + tMin := s - e.maxMatchOff + if tMin < 0 { + tMin = 0 + } + for t > tMin && s > nextEmit && src[t-1] == src[s-1] && l < maxMatchLength { + s-- + t-- + l++ + } + + // Write our sequence + var seq seq + seq.litLen = uint32(s - nextEmit) + seq.matchLen = uint32(l - zstdMinMatch) + if seq.litLen > 0 { + blk.literals = append(blk.literals, src[nextEmit:s]...) + } + seq.offset = uint32(s-t) + 3 + s += l + if debugSequences { + println("sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + nextEmit = s + if s >= sLimit { + break encodeLoop + } + + // Index match start+1 (long) -> s - 1 + index0 := s - l + 1 + for index0 < s-1 { + cv0 := load6432(src, index0) + cv1 := cv0 >> 8 + h0 := hashLen(cv0, betterLongTableBits, betterLongLen) + off := index0 + e.cur + e.longTable[h0] = prevEntry{offset: off, prev: e.longTable[h0].offset} + e.markLongShardDirty(h0) + h1 := hashLen(cv1, betterShortTableBits, betterShortLen) + e.table[h1] = tableEntry{offset: off + 1, val: uint32(cv1)} + e.markShortShardDirty(h1) + index0 += 2 + } + + cv = load6432(src, s) + if !canRepeat { + continue + } + + // Check offset 2 + for { + o2 := s - offset2 + if load3232(src, o2) != uint32(cv) { + // Do regular search + break + } + + // Store this, since we have it. + nextHashL := hashLen(cv, betterLongTableBits, betterLongLen) + nextHashS := hashLen(cv, betterShortTableBits, betterShortLen) + + // We have at least 4 byte match. + // No need to check backwards. We come straight from a match + l := 4 + e.matchlen(s+4, o2+4, src) + + e.longTable[nextHashL] = prevEntry{offset: s + e.cur, prev: e.longTable[nextHashL].offset} + e.markLongShardDirty(nextHashL) + e.table[nextHashS] = tableEntry{offset: s + e.cur, val: uint32(cv)} + e.markShortShardDirty(nextHashS) + seq.matchLen = uint32(l) - zstdMinMatch + seq.litLen = 0 + + // Since litlen is always 0, this is offset 1. + seq.offset = 1 + s += l + nextEmit = s + if debugSequences { + println("sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + + // Swap offset 1 and 2. + offset1, offset2 = offset2, offset1 + if s >= sLimit { + // Finished + break encodeLoop + } + cv = load6432(src, s) + } + } + + if int(nextEmit) < len(src) { + blk.literals = append(blk.literals, src[nextEmit:]...) + blk.extraLits = len(src) - int(nextEmit) + } + blk.recentOffsets[0] = uint32(offset1) + blk.recentOffsets[1] = uint32(offset2) + if debugEncoder { + println("returning, recent offsets:", blk.recentOffsets, "extra literals:", blk.extraLits) + } +} + +// ResetDict will reset and set a dictionary if not nil +func (e *betterFastEncoder) Reset(d *dict, singleBlock bool) { + e.resetBase(d, singleBlock) + if d != nil { + panic("betterFastEncoder: Reset with dict") + } +} + +// ResetDict will reset and set a dictionary if not nil +func (e *betterFastEncoderDict) Reset(d *dict, singleBlock bool) { + e.resetBase(d, singleBlock) + if d == nil { + return + } + // Init or copy dict table + if len(e.dictTable) != len(e.table) || d.id != e.lastDictID { + if len(e.dictTable) != len(e.table) { + e.dictTable = make([]tableEntry, len(e.table)) + } + end := int32(len(d.content)) - 8 + e.maxMatchOff + for i := e.maxMatchOff; i < end; i += 4 { + const hashLog = betterShortTableBits + + cv := load6432(d.content, i-e.maxMatchOff) + nextHash := hashLen(cv, hashLog, betterShortLen) // 0 -> 4 + nextHash1 := hashLen(cv>>8, hashLog, betterShortLen) // 1 -> 5 + nextHash2 := hashLen(cv>>16, hashLog, betterShortLen) // 2 -> 6 + nextHash3 := hashLen(cv>>24, hashLog, betterShortLen) // 3 -> 7 + e.dictTable[nextHash] = tableEntry{ + val: uint32(cv), + offset: i, + } + e.dictTable[nextHash1] = tableEntry{ + val: uint32(cv >> 8), + offset: i + 1, + } + e.dictTable[nextHash2] = tableEntry{ + val: uint32(cv >> 16), + offset: i + 2, + } + e.dictTable[nextHash3] = tableEntry{ + val: uint32(cv >> 24), + offset: i + 3, + } + } + e.lastDictID = d.id + e.allDirty = true + } + + // Init or copy dict table + if len(e.dictLongTable) != len(e.longTable) || d.id != e.lastDictID { + if len(e.dictLongTable) != len(e.longTable) { + e.dictLongTable = make([]prevEntry, len(e.longTable)) + } + if len(d.content) >= 8 { + cv := load6432(d.content, 0) + h := hashLen(cv, betterLongTableBits, betterLongLen) + e.dictLongTable[h] = prevEntry{ + offset: e.maxMatchOff, + prev: e.dictLongTable[h].offset, + } + + end := int32(len(d.content)) - 8 + e.maxMatchOff + off := 8 // First to read + for i := e.maxMatchOff + 1; i < end; i++ { + cv = cv>>8 | (uint64(d.content[off]) << 56) + h := hashLen(cv, betterLongTableBits, betterLongLen) + e.dictLongTable[h] = prevEntry{ + offset: i, + prev: e.dictLongTable[h].offset, + } + off++ + } + } + e.lastDictID = d.id + e.allDirty = true + } + + // Reset table to initial state + { + dirtyShardCnt := 0 + if !e.allDirty { + for i := range e.shortTableShardDirty { + if e.shortTableShardDirty[i] { + dirtyShardCnt++ + } + } + } + const shardCnt = betterShortTableShardCnt + const shardSize = betterShortTableShardSize + if e.allDirty || dirtyShardCnt > shardCnt*4/6 { + copy(e.table[:], e.dictTable) + for i := range e.shortTableShardDirty { + e.shortTableShardDirty[i] = false + } + } else { + for i := range e.shortTableShardDirty { + if !e.shortTableShardDirty[i] { + continue + } + + copy(e.table[i*shardSize:(i+1)*shardSize], e.dictTable[i*shardSize:(i+1)*shardSize]) + e.shortTableShardDirty[i] = false + } + } + } + { + dirtyShardCnt := 0 + if !e.allDirty { + for i := range e.shortTableShardDirty { + if e.shortTableShardDirty[i] { + dirtyShardCnt++ + } + } + } + const shardCnt = betterLongTableShardCnt + const shardSize = betterLongTableShardSize + if e.allDirty || dirtyShardCnt > shardCnt*4/6 { + copy(e.longTable[:], e.dictLongTable) + for i := range e.longTableShardDirty { + e.longTableShardDirty[i] = false + } + } else { + for i := range e.longTableShardDirty { + if !e.longTableShardDirty[i] { + continue + } + + copy(e.longTable[i*shardSize:(i+1)*shardSize], e.dictLongTable[i*shardSize:(i+1)*shardSize]) + e.longTableShardDirty[i] = false + } + } + } + e.cur = e.maxMatchOff + e.allDirty = false +} + +func (e *betterFastEncoderDict) markLongShardDirty(entryNum uint32) { + e.longTableShardDirty[entryNum/betterLongTableShardSize] = true +} + +func (e *betterFastEncoderDict) markShortShardDirty(entryNum uint32) { + e.shortTableShardDirty[entryNum/betterShortTableShardSize] = true +} diff --git a/vendor/github.com/klauspost/compress/zstd/enc_dfast.go b/vendor/github.com/klauspost/compress/zstd/enc_dfast.go new file mode 100644 index 00000000..1f4a9a24 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/enc_dfast.go @@ -0,0 +1,1127 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +import "fmt" + +const ( + dFastLongTableBits = 17 // Bits used in the long match table + dFastLongTableSize = 1 << dFastLongTableBits // Size of the table + dFastLongTableMask = dFastLongTableSize - 1 // Mask for table indices. Redundant, but can eliminate bounds checks. + dFastLongLen = 8 // Bytes used for table hash + + dLongTableShardCnt = 1 << (dFastLongTableBits - dictShardBits) // Number of shards in the table + dLongTableShardSize = dFastLongTableSize / tableShardCnt // Size of an individual shard + + dFastShortTableBits = tableBits // Bits used in the short match table + dFastShortTableSize = 1 << dFastShortTableBits // Size of the table + dFastShortTableMask = dFastShortTableSize - 1 // Mask for table indices. Redundant, but can eliminate bounds checks. + dFastShortLen = 5 // Bytes used for table hash + +) + +type doubleFastEncoder struct { + fastEncoder + longTable [dFastLongTableSize]tableEntry +} + +type doubleFastEncoderDict struct { + fastEncoderDict + longTable [dFastLongTableSize]tableEntry + dictLongTable []tableEntry + longTableShardDirty [dLongTableShardCnt]bool +} + +// Encode mimmics functionality in zstd_dfast.c +func (e *doubleFastEncoder) Encode(blk *blockEnc, src []byte) { + const ( + // Input margin is the number of bytes we read (8) + // and the maximum we will read ahead (2) + inputMargin = 8 + 2 + minNonLiteralBlockSize = 16 + ) + + // Protect against e.cur wraparound. + for e.cur >= bufferReset { + if len(e.hist) == 0 { + for i := range e.table[:] { + e.table[i] = tableEntry{} + } + for i := range e.longTable[:] { + e.longTable[i] = tableEntry{} + } + e.cur = e.maxMatchOff + break + } + // Shift down everything in the table that isn't already too far away. + minOff := e.cur + int32(len(e.hist)) - e.maxMatchOff + for i := range e.table[:] { + v := e.table[i].offset + if v < minOff { + v = 0 + } else { + v = v - e.cur + e.maxMatchOff + } + e.table[i].offset = v + } + for i := range e.longTable[:] { + v := e.longTable[i].offset + if v < minOff { + v = 0 + } else { + v = v - e.cur + e.maxMatchOff + } + e.longTable[i].offset = v + } + e.cur = e.maxMatchOff + break + } + + s := e.addBlock(src) + blk.size = len(src) + if len(src) < minNonLiteralBlockSize { + blk.extraLits = len(src) + blk.literals = blk.literals[:len(src)] + copy(blk.literals, src) + return + } + + // Override src + src = e.hist + sLimit := int32(len(src)) - inputMargin + // stepSize is the number of bytes to skip on every main loop iteration. + // It should be >= 1. + const stepSize = 1 + + const kSearchStrength = 8 + + // nextEmit is where in src the next emitLiteral should start from. + nextEmit := s + cv := load6432(src, s) + + // Relative offsets + offset1 := int32(blk.recentOffsets[0]) + offset2 := int32(blk.recentOffsets[1]) + + addLiterals := func(s *seq, until int32) { + if until == nextEmit { + return + } + blk.literals = append(blk.literals, src[nextEmit:until]...) + s.litLen = uint32(until - nextEmit) + } + if debugEncoder { + println("recent offsets:", blk.recentOffsets) + } + +encodeLoop: + for { + var t int32 + // We allow the encoder to optionally turn off repeat offsets across blocks + canRepeat := len(blk.sequences) > 2 + + for { + if debugAsserts && canRepeat && offset1 == 0 { + panic("offset0 was 0") + } + + nextHashL := hashLen(cv, dFastLongTableBits, dFastLongLen) + nextHashS := hashLen(cv, dFastShortTableBits, dFastShortLen) + candidateL := e.longTable[nextHashL] + candidateS := e.table[nextHashS] + + const repOff = 1 + repIndex := s - offset1 + repOff + entry := tableEntry{offset: s + e.cur, val: uint32(cv)} + e.longTable[nextHashL] = entry + e.table[nextHashS] = entry + + if canRepeat { + if repIndex >= 0 && load3232(src, repIndex) == uint32(cv>>(repOff*8)) { + // Consider history as well. + var seq seq + lenght := 4 + e.matchlen(s+4+repOff, repIndex+4, src) + + seq.matchLen = uint32(lenght - zstdMinMatch) + + // We might be able to match backwards. + // Extend as long as we can. + start := s + repOff + // We end the search early, so we don't risk 0 literals + // and have to do special offset treatment. + startLimit := nextEmit + 1 + + tMin := s - e.maxMatchOff + if tMin < 0 { + tMin = 0 + } + for repIndex > tMin && start > startLimit && src[repIndex-1] == src[start-1] && seq.matchLen < maxMatchLength-zstdMinMatch-1 { + repIndex-- + start-- + seq.matchLen++ + } + addLiterals(&seq, start) + + // rep 0 + seq.offset = 1 + if debugSequences { + println("repeat sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + s += lenght + repOff + nextEmit = s + if s >= sLimit { + if debugEncoder { + println("repeat ended", s, lenght) + + } + break encodeLoop + } + cv = load6432(src, s) + continue + } + } + // Find the offsets of our two matches. + coffsetL := s - (candidateL.offset - e.cur) + coffsetS := s - (candidateS.offset - e.cur) + + // Check if we have a long match. + if coffsetL < e.maxMatchOff && uint32(cv) == candidateL.val { + // Found a long match, likely at least 8 bytes. + // Reference encoder checks all 8 bytes, we only check 4, + // but the likelihood of both the first 4 bytes and the hash matching should be enough. + t = candidateL.offset - e.cur + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + if debugAsserts && s-t > e.maxMatchOff { + panic("s - t >e.maxMatchOff") + } + if debugMatches { + println("long match") + } + break + } + + // Check if we have a short match. + if coffsetS < e.maxMatchOff && uint32(cv) == candidateS.val { + // found a regular match + // See if we can find a long match at s+1 + const checkAt = 1 + cv := load6432(src, s+checkAt) + nextHashL = hashLen(cv, dFastLongTableBits, dFastLongLen) + candidateL = e.longTable[nextHashL] + coffsetL = s - (candidateL.offset - e.cur) + checkAt + + // We can store it, since we have at least a 4 byte match. + e.longTable[nextHashL] = tableEntry{offset: s + checkAt + e.cur, val: uint32(cv)} + if coffsetL < e.maxMatchOff && uint32(cv) == candidateL.val { + // Found a long match, likely at least 8 bytes. + // Reference encoder checks all 8 bytes, we only check 4, + // but the likelihood of both the first 4 bytes and the hash matching should be enough. + t = candidateL.offset - e.cur + s += checkAt + if debugMatches { + println("long match (after short)") + } + break + } + + t = candidateS.offset - e.cur + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + if debugAsserts && s-t > e.maxMatchOff { + panic("s - t >e.maxMatchOff") + } + if debugAsserts && t < 0 { + panic("t<0") + } + if debugMatches { + println("short match") + } + break + } + + // No match found, move forward in input. + s += stepSize + ((s - nextEmit) >> (kSearchStrength - 1)) + if s >= sLimit { + break encodeLoop + } + cv = load6432(src, s) + } + + // A 4-byte match has been found. Update recent offsets. + // We'll later see if more than 4 bytes. + offset2 = offset1 + offset1 = s - t + + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + + if debugAsserts && canRepeat && int(offset1) > len(src) { + panic("invalid offset") + } + + // Extend the 4-byte match as long as possible. + l := e.matchlen(s+4, t+4, src) + 4 + + // Extend backwards + tMin := s - e.maxMatchOff + if tMin < 0 { + tMin = 0 + } + for t > tMin && s > nextEmit && src[t-1] == src[s-1] && l < maxMatchLength { + s-- + t-- + l++ + } + + // Write our sequence + var seq seq + seq.litLen = uint32(s - nextEmit) + seq.matchLen = uint32(l - zstdMinMatch) + if seq.litLen > 0 { + blk.literals = append(blk.literals, src[nextEmit:s]...) + } + seq.offset = uint32(s-t) + 3 + s += l + if debugSequences { + println("sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + nextEmit = s + if s >= sLimit { + break encodeLoop + } + + // Index match start+1 (long) and start+2 (short) + index0 := s - l + 1 + // Index match end-2 (long) and end-1 (short) + index1 := s - 2 + + cv0 := load6432(src, index0) + cv1 := load6432(src, index1) + te0 := tableEntry{offset: index0 + e.cur, val: uint32(cv0)} + te1 := tableEntry{offset: index1 + e.cur, val: uint32(cv1)} + e.longTable[hashLen(cv0, dFastLongTableBits, dFastLongLen)] = te0 + e.longTable[hashLen(cv1, dFastLongTableBits, dFastLongLen)] = te1 + cv0 >>= 8 + cv1 >>= 8 + te0.offset++ + te1.offset++ + te0.val = uint32(cv0) + te1.val = uint32(cv1) + e.table[hashLen(cv0, dFastShortTableBits, dFastShortLen)] = te0 + e.table[hashLen(cv1, dFastShortTableBits, dFastShortLen)] = te1 + + cv = load6432(src, s) + + if !canRepeat { + continue + } + + // Check offset 2 + for { + o2 := s - offset2 + if load3232(src, o2) != uint32(cv) { + // Do regular search + break + } + + // Store this, since we have it. + nextHashS := hashLen(cv, dFastShortTableBits, dFastShortLen) + nextHashL := hashLen(cv, dFastLongTableBits, dFastLongLen) + + // We have at least 4 byte match. + // No need to check backwards. We come straight from a match + l := 4 + e.matchlen(s+4, o2+4, src) + + entry := tableEntry{offset: s + e.cur, val: uint32(cv)} + e.longTable[nextHashL] = entry + e.table[nextHashS] = entry + seq.matchLen = uint32(l) - zstdMinMatch + seq.litLen = 0 + + // Since litlen is always 0, this is offset 1. + seq.offset = 1 + s += l + nextEmit = s + if debugSequences { + println("sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + + // Swap offset 1 and 2. + offset1, offset2 = offset2, offset1 + if s >= sLimit { + // Finished + break encodeLoop + } + cv = load6432(src, s) + } + } + + if int(nextEmit) < len(src) { + blk.literals = append(blk.literals, src[nextEmit:]...) + blk.extraLits = len(src) - int(nextEmit) + } + blk.recentOffsets[0] = uint32(offset1) + blk.recentOffsets[1] = uint32(offset2) + if debugEncoder { + println("returning, recent offsets:", blk.recentOffsets, "extra literals:", blk.extraLits) + } +} + +// EncodeNoHist will encode a block with no history and no following blocks. +// Most notable difference is that src will not be copied for history and +// we do not need to check for max match length. +func (e *doubleFastEncoder) EncodeNoHist(blk *blockEnc, src []byte) { + const ( + // Input margin is the number of bytes we read (8) + // and the maximum we will read ahead (2) + inputMargin = 8 + 2 + minNonLiteralBlockSize = 16 + ) + + // Protect against e.cur wraparound. + if e.cur >= bufferReset { + for i := range e.table[:] { + e.table[i] = tableEntry{} + } + for i := range e.longTable[:] { + e.longTable[i] = tableEntry{} + } + e.cur = e.maxMatchOff + } + + s := int32(0) + blk.size = len(src) + if len(src) < minNonLiteralBlockSize { + blk.extraLits = len(src) + blk.literals = blk.literals[:len(src)] + copy(blk.literals, src) + return + } + + // Override src + sLimit := int32(len(src)) - inputMargin + // stepSize is the number of bytes to skip on every main loop iteration. + // It should be >= 1. + const stepSize = 1 + + const kSearchStrength = 8 + + // nextEmit is where in src the next emitLiteral should start from. + nextEmit := s + cv := load6432(src, s) + + // Relative offsets + offset1 := int32(blk.recentOffsets[0]) + offset2 := int32(blk.recentOffsets[1]) + + addLiterals := func(s *seq, until int32) { + if until == nextEmit { + return + } + blk.literals = append(blk.literals, src[nextEmit:until]...) + s.litLen = uint32(until - nextEmit) + } + if debugEncoder { + println("recent offsets:", blk.recentOffsets) + } + +encodeLoop: + for { + var t int32 + for { + + nextHashL := hashLen(cv, dFastLongTableBits, dFastLongLen) + nextHashS := hashLen(cv, dFastShortTableBits, dFastShortLen) + candidateL := e.longTable[nextHashL] + candidateS := e.table[nextHashS] + + const repOff = 1 + repIndex := s - offset1 + repOff + entry := tableEntry{offset: s + e.cur, val: uint32(cv)} + e.longTable[nextHashL] = entry + e.table[nextHashS] = entry + + if len(blk.sequences) > 2 { + if load3232(src, repIndex) == uint32(cv>>(repOff*8)) { + // Consider history as well. + var seq seq + //length := 4 + e.matchlen(s+4+repOff, repIndex+4, src) + length := 4 + int32(matchLen(src[s+4+repOff:], src[repIndex+4:])) + + seq.matchLen = uint32(length - zstdMinMatch) + + // We might be able to match backwards. + // Extend as long as we can. + start := s + repOff + // We end the search early, so we don't risk 0 literals + // and have to do special offset treatment. + startLimit := nextEmit + 1 + + tMin := s - e.maxMatchOff + if tMin < 0 { + tMin = 0 + } + for repIndex > tMin && start > startLimit && src[repIndex-1] == src[start-1] { + repIndex-- + start-- + seq.matchLen++ + } + addLiterals(&seq, start) + + // rep 0 + seq.offset = 1 + if debugSequences { + println("repeat sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + s += length + repOff + nextEmit = s + if s >= sLimit { + if debugEncoder { + println("repeat ended", s, length) + + } + break encodeLoop + } + cv = load6432(src, s) + continue + } + } + // Find the offsets of our two matches. + coffsetL := s - (candidateL.offset - e.cur) + coffsetS := s - (candidateS.offset - e.cur) + + // Check if we have a long match. + if coffsetL < e.maxMatchOff && uint32(cv) == candidateL.val { + // Found a long match, likely at least 8 bytes. + // Reference encoder checks all 8 bytes, we only check 4, + // but the likelihood of both the first 4 bytes and the hash matching should be enough. + t = candidateL.offset - e.cur + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d). cur: %d", s, t, e.cur)) + } + if debugAsserts && s-t > e.maxMatchOff { + panic("s - t >e.maxMatchOff") + } + if debugMatches { + println("long match") + } + break + } + + // Check if we have a short match. + if coffsetS < e.maxMatchOff && uint32(cv) == candidateS.val { + // found a regular match + // See if we can find a long match at s+1 + const checkAt = 1 + cv := load6432(src, s+checkAt) + nextHashL = hashLen(cv, dFastLongTableBits, dFastLongLen) + candidateL = e.longTable[nextHashL] + coffsetL = s - (candidateL.offset - e.cur) + checkAt + + // We can store it, since we have at least a 4 byte match. + e.longTable[nextHashL] = tableEntry{offset: s + checkAt + e.cur, val: uint32(cv)} + if coffsetL < e.maxMatchOff && uint32(cv) == candidateL.val { + // Found a long match, likely at least 8 bytes. + // Reference encoder checks all 8 bytes, we only check 4, + // but the likelihood of both the first 4 bytes and the hash matching should be enough. + t = candidateL.offset - e.cur + s += checkAt + if debugMatches { + println("long match (after short)") + } + break + } + + t = candidateS.offset - e.cur + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + if debugAsserts && s-t > e.maxMatchOff { + panic("s - t >e.maxMatchOff") + } + if debugAsserts && t < 0 { + panic("t<0") + } + if debugMatches { + println("short match") + } + break + } + + // No match found, move forward in input. + s += stepSize + ((s - nextEmit) >> (kSearchStrength - 1)) + if s >= sLimit { + break encodeLoop + } + cv = load6432(src, s) + } + + // A 4-byte match has been found. Update recent offsets. + // We'll later see if more than 4 bytes. + offset2 = offset1 + offset1 = s - t + + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + + // Extend the 4-byte match as long as possible. + //l := e.matchlen(s+4, t+4, src) + 4 + l := int32(matchLen(src[s+4:], src[t+4:])) + 4 + + // Extend backwards + tMin := s - e.maxMatchOff + if tMin < 0 { + tMin = 0 + } + for t > tMin && s > nextEmit && src[t-1] == src[s-1] { + s-- + t-- + l++ + } + + // Write our sequence + var seq seq + seq.litLen = uint32(s - nextEmit) + seq.matchLen = uint32(l - zstdMinMatch) + if seq.litLen > 0 { + blk.literals = append(blk.literals, src[nextEmit:s]...) + } + seq.offset = uint32(s-t) + 3 + s += l + if debugSequences { + println("sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + nextEmit = s + if s >= sLimit { + break encodeLoop + } + + // Index match start+1 (long) and start+2 (short) + index0 := s - l + 1 + // Index match end-2 (long) and end-1 (short) + index1 := s - 2 + + cv0 := load6432(src, index0) + cv1 := load6432(src, index1) + te0 := tableEntry{offset: index0 + e.cur, val: uint32(cv0)} + te1 := tableEntry{offset: index1 + e.cur, val: uint32(cv1)} + e.longTable[hashLen(cv0, dFastLongTableBits, dFastLongLen)] = te0 + e.longTable[hashLen(cv1, dFastLongTableBits, dFastLongLen)] = te1 + cv0 >>= 8 + cv1 >>= 8 + te0.offset++ + te1.offset++ + te0.val = uint32(cv0) + te1.val = uint32(cv1) + e.table[hashLen(cv0, dFastShortTableBits, dFastShortLen)] = te0 + e.table[hashLen(cv1, dFastShortTableBits, dFastShortLen)] = te1 + + cv = load6432(src, s) + + if len(blk.sequences) <= 2 { + continue + } + + // Check offset 2 + for { + o2 := s - offset2 + if load3232(src, o2) != uint32(cv) { + // Do regular search + break + } + + // Store this, since we have it. + nextHashS := hashLen(cv1>>8, dFastShortTableBits, dFastShortLen) + nextHashL := hashLen(cv, dFastLongTableBits, dFastLongLen) + + // We have at least 4 byte match. + // No need to check backwards. We come straight from a match + //l := 4 + e.matchlen(s+4, o2+4, src) + l := 4 + int32(matchLen(src[s+4:], src[o2+4:])) + + entry := tableEntry{offset: s + e.cur, val: uint32(cv)} + e.longTable[nextHashL] = entry + e.table[nextHashS] = entry + seq.matchLen = uint32(l) - zstdMinMatch + seq.litLen = 0 + + // Since litlen is always 0, this is offset 1. + seq.offset = 1 + s += l + nextEmit = s + if debugSequences { + println("sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + + // Swap offset 1 and 2. + offset1, offset2 = offset2, offset1 + if s >= sLimit { + // Finished + break encodeLoop + } + cv = load6432(src, s) + } + } + + if int(nextEmit) < len(src) { + blk.literals = append(blk.literals, src[nextEmit:]...) + blk.extraLits = len(src) - int(nextEmit) + } + if debugEncoder { + println("returning, recent offsets:", blk.recentOffsets, "extra literals:", blk.extraLits) + } + + // We do not store history, so we must offset e.cur to avoid false matches for next user. + if e.cur < bufferReset { + e.cur += int32(len(src)) + } +} + +// Encode will encode the content, with a dictionary if initialized for it. +func (e *doubleFastEncoderDict) Encode(blk *blockEnc, src []byte) { + const ( + // Input margin is the number of bytes we read (8) + // and the maximum we will read ahead (2) + inputMargin = 8 + 2 + minNonLiteralBlockSize = 16 + ) + + // Protect against e.cur wraparound. + for e.cur >= bufferReset { + if len(e.hist) == 0 { + for i := range e.table[:] { + e.table[i] = tableEntry{} + } + for i := range e.longTable[:] { + e.longTable[i] = tableEntry{} + } + e.markAllShardsDirty() + e.cur = e.maxMatchOff + break + } + // Shift down everything in the table that isn't already too far away. + minOff := e.cur + int32(len(e.hist)) - e.maxMatchOff + for i := range e.table[:] { + v := e.table[i].offset + if v < minOff { + v = 0 + } else { + v = v - e.cur + e.maxMatchOff + } + e.table[i].offset = v + } + for i := range e.longTable[:] { + v := e.longTable[i].offset + if v < minOff { + v = 0 + } else { + v = v - e.cur + e.maxMatchOff + } + e.longTable[i].offset = v + } + e.markAllShardsDirty() + e.cur = e.maxMatchOff + break + } + + s := e.addBlock(src) + blk.size = len(src) + if len(src) < minNonLiteralBlockSize { + blk.extraLits = len(src) + blk.literals = blk.literals[:len(src)] + copy(blk.literals, src) + return + } + + // Override src + src = e.hist + sLimit := int32(len(src)) - inputMargin + // stepSize is the number of bytes to skip on every main loop iteration. + // It should be >= 1. + const stepSize = 1 + + const kSearchStrength = 8 + + // nextEmit is where in src the next emitLiteral should start from. + nextEmit := s + cv := load6432(src, s) + + // Relative offsets + offset1 := int32(blk.recentOffsets[0]) + offset2 := int32(blk.recentOffsets[1]) + + addLiterals := func(s *seq, until int32) { + if until == nextEmit { + return + } + blk.literals = append(blk.literals, src[nextEmit:until]...) + s.litLen = uint32(until - nextEmit) + } + if debugEncoder { + println("recent offsets:", blk.recentOffsets) + } + +encodeLoop: + for { + var t int32 + // We allow the encoder to optionally turn off repeat offsets across blocks + canRepeat := len(blk.sequences) > 2 + + for { + if debugAsserts && canRepeat && offset1 == 0 { + panic("offset0 was 0") + } + + nextHashL := hashLen(cv, dFastLongTableBits, dFastLongLen) + nextHashS := hashLen(cv, dFastShortTableBits, dFastShortLen) + candidateL := e.longTable[nextHashL] + candidateS := e.table[nextHashS] + + const repOff = 1 + repIndex := s - offset1 + repOff + entry := tableEntry{offset: s + e.cur, val: uint32(cv)} + e.longTable[nextHashL] = entry + e.markLongShardDirty(nextHashL) + e.table[nextHashS] = entry + e.markShardDirty(nextHashS) + + if canRepeat { + if repIndex >= 0 && load3232(src, repIndex) == uint32(cv>>(repOff*8)) { + // Consider history as well. + var seq seq + lenght := 4 + e.matchlen(s+4+repOff, repIndex+4, src) + + seq.matchLen = uint32(lenght - zstdMinMatch) + + // We might be able to match backwards. + // Extend as long as we can. + start := s + repOff + // We end the search early, so we don't risk 0 literals + // and have to do special offset treatment. + startLimit := nextEmit + 1 + + tMin := s - e.maxMatchOff + if tMin < 0 { + tMin = 0 + } + for repIndex > tMin && start > startLimit && src[repIndex-1] == src[start-1] && seq.matchLen < maxMatchLength-zstdMinMatch-1 { + repIndex-- + start-- + seq.matchLen++ + } + addLiterals(&seq, start) + + // rep 0 + seq.offset = 1 + if debugSequences { + println("repeat sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + s += lenght + repOff + nextEmit = s + if s >= sLimit { + if debugEncoder { + println("repeat ended", s, lenght) + + } + break encodeLoop + } + cv = load6432(src, s) + continue + } + } + // Find the offsets of our two matches. + coffsetL := s - (candidateL.offset - e.cur) + coffsetS := s - (candidateS.offset - e.cur) + + // Check if we have a long match. + if coffsetL < e.maxMatchOff && uint32(cv) == candidateL.val { + // Found a long match, likely at least 8 bytes. + // Reference encoder checks all 8 bytes, we only check 4, + // but the likelihood of both the first 4 bytes and the hash matching should be enough. + t = candidateL.offset - e.cur + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + if debugAsserts && s-t > e.maxMatchOff { + panic("s - t >e.maxMatchOff") + } + if debugMatches { + println("long match") + } + break + } + + // Check if we have a short match. + if coffsetS < e.maxMatchOff && uint32(cv) == candidateS.val { + // found a regular match + // See if we can find a long match at s+1 + const checkAt = 1 + cv := load6432(src, s+checkAt) + nextHashL = hashLen(cv, dFastLongTableBits, dFastLongLen) + candidateL = e.longTable[nextHashL] + coffsetL = s - (candidateL.offset - e.cur) + checkAt + + // We can store it, since we have at least a 4 byte match. + e.longTable[nextHashL] = tableEntry{offset: s + checkAt + e.cur, val: uint32(cv)} + e.markLongShardDirty(nextHashL) + if coffsetL < e.maxMatchOff && uint32(cv) == candidateL.val { + // Found a long match, likely at least 8 bytes. + // Reference encoder checks all 8 bytes, we only check 4, + // but the likelihood of both the first 4 bytes and the hash matching should be enough. + t = candidateL.offset - e.cur + s += checkAt + if debugMatches { + println("long match (after short)") + } + break + } + + t = candidateS.offset - e.cur + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + if debugAsserts && s-t > e.maxMatchOff { + panic("s - t >e.maxMatchOff") + } + if debugAsserts && t < 0 { + panic("t<0") + } + if debugMatches { + println("short match") + } + break + } + + // No match found, move forward in input. + s += stepSize + ((s - nextEmit) >> (kSearchStrength - 1)) + if s >= sLimit { + break encodeLoop + } + cv = load6432(src, s) + } + + // A 4-byte match has been found. Update recent offsets. + // We'll later see if more than 4 bytes. + offset2 = offset1 + offset1 = s - t + + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + + if debugAsserts && canRepeat && int(offset1) > len(src) { + panic("invalid offset") + } + + // Extend the 4-byte match as long as possible. + l := e.matchlen(s+4, t+4, src) + 4 + + // Extend backwards + tMin := s - e.maxMatchOff + if tMin < 0 { + tMin = 0 + } + for t > tMin && s > nextEmit && src[t-1] == src[s-1] && l < maxMatchLength { + s-- + t-- + l++ + } + + // Write our sequence + var seq seq + seq.litLen = uint32(s - nextEmit) + seq.matchLen = uint32(l - zstdMinMatch) + if seq.litLen > 0 { + blk.literals = append(blk.literals, src[nextEmit:s]...) + } + seq.offset = uint32(s-t) + 3 + s += l + if debugSequences { + println("sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + nextEmit = s + if s >= sLimit { + break encodeLoop + } + + // Index match start+1 (long) and start+2 (short) + index0 := s - l + 1 + // Index match end-2 (long) and end-1 (short) + index1 := s - 2 + + cv0 := load6432(src, index0) + cv1 := load6432(src, index1) + te0 := tableEntry{offset: index0 + e.cur, val: uint32(cv0)} + te1 := tableEntry{offset: index1 + e.cur, val: uint32(cv1)} + longHash1 := hashLen(cv0, dFastLongTableBits, dFastLongLen) + longHash2 := hashLen(cv1, dFastLongTableBits, dFastLongLen) + e.longTable[longHash1] = te0 + e.longTable[longHash2] = te1 + e.markLongShardDirty(longHash1) + e.markLongShardDirty(longHash2) + cv0 >>= 8 + cv1 >>= 8 + te0.offset++ + te1.offset++ + te0.val = uint32(cv0) + te1.val = uint32(cv1) + hashVal1 := hashLen(cv0, dFastShortTableBits, dFastShortLen) + hashVal2 := hashLen(cv1, dFastShortTableBits, dFastShortLen) + e.table[hashVal1] = te0 + e.markShardDirty(hashVal1) + e.table[hashVal2] = te1 + e.markShardDirty(hashVal2) + + cv = load6432(src, s) + + if !canRepeat { + continue + } + + // Check offset 2 + for { + o2 := s - offset2 + if load3232(src, o2) != uint32(cv) { + // Do regular search + break + } + + // Store this, since we have it. + nextHashL := hashLen(cv, dFastLongTableBits, dFastLongLen) + nextHashS := hashLen(cv, dFastShortTableBits, dFastShortLen) + + // We have at least 4 byte match. + // No need to check backwards. We come straight from a match + l := 4 + e.matchlen(s+4, o2+4, src) + + entry := tableEntry{offset: s + e.cur, val: uint32(cv)} + e.longTable[nextHashL] = entry + e.markLongShardDirty(nextHashL) + e.table[nextHashS] = entry + e.markShardDirty(nextHashS) + seq.matchLen = uint32(l) - zstdMinMatch + seq.litLen = 0 + + // Since litlen is always 0, this is offset 1. + seq.offset = 1 + s += l + nextEmit = s + if debugSequences { + println("sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + + // Swap offset 1 and 2. + offset1, offset2 = offset2, offset1 + if s >= sLimit { + // Finished + break encodeLoop + } + cv = load6432(src, s) + } + } + + if int(nextEmit) < len(src) { + blk.literals = append(blk.literals, src[nextEmit:]...) + blk.extraLits = len(src) - int(nextEmit) + } + blk.recentOffsets[0] = uint32(offset1) + blk.recentOffsets[1] = uint32(offset2) + if debugEncoder { + println("returning, recent offsets:", blk.recentOffsets, "extra literals:", blk.extraLits) + } + // If we encoded more than 64K mark all dirty. + if len(src) > 64<<10 { + e.markAllShardsDirty() + } +} + +// ResetDict will reset and set a dictionary if not nil +func (e *doubleFastEncoder) Reset(d *dict, singleBlock bool) { + e.fastEncoder.Reset(d, singleBlock) + if d != nil { + panic("doubleFastEncoder: Reset with dict not supported") + } +} + +// ResetDict will reset and set a dictionary if not nil +func (e *doubleFastEncoderDict) Reset(d *dict, singleBlock bool) { + allDirty := e.allDirty + e.fastEncoderDict.Reset(d, singleBlock) + if d == nil { + return + } + + // Init or copy dict table + if len(e.dictLongTable) != len(e.longTable) || d.id != e.lastDictID { + if len(e.dictLongTable) != len(e.longTable) { + e.dictLongTable = make([]tableEntry, len(e.longTable)) + } + if len(d.content) >= 8 { + cv := load6432(d.content, 0) + e.dictLongTable[hashLen(cv, dFastLongTableBits, dFastLongLen)] = tableEntry{ + val: uint32(cv), + offset: e.maxMatchOff, + } + end := int32(len(d.content)) - 8 + e.maxMatchOff + for i := e.maxMatchOff + 1; i < end; i++ { + cv = cv>>8 | (uint64(d.content[i-e.maxMatchOff+7]) << 56) + e.dictLongTable[hashLen(cv, dFastLongTableBits, dFastLongLen)] = tableEntry{ + val: uint32(cv), + offset: i, + } + } + } + e.lastDictID = d.id + e.allDirty = true + } + // Reset table to initial state + e.cur = e.maxMatchOff + + dirtyShardCnt := 0 + if !allDirty { + for i := range e.longTableShardDirty { + if e.longTableShardDirty[i] { + dirtyShardCnt++ + } + } + } + + if allDirty || dirtyShardCnt > dLongTableShardCnt/2 { + //copy(e.longTable[:], e.dictLongTable) + e.longTable = *(*[dFastLongTableSize]tableEntry)(e.dictLongTable) + for i := range e.longTableShardDirty { + e.longTableShardDirty[i] = false + } + return + } + for i := range e.longTableShardDirty { + if !e.longTableShardDirty[i] { + continue + } + + // copy(e.longTable[i*dLongTableShardSize:(i+1)*dLongTableShardSize], e.dictLongTable[i*dLongTableShardSize:(i+1)*dLongTableShardSize]) + *(*[dLongTableShardSize]tableEntry)(e.longTable[i*dLongTableShardSize:]) = *(*[dLongTableShardSize]tableEntry)(e.dictLongTable[i*dLongTableShardSize:]) + + e.longTableShardDirty[i] = false + } +} + +func (e *doubleFastEncoderDict) markLongShardDirty(entryNum uint32) { + e.longTableShardDirty[entryNum/dLongTableShardSize] = true +} diff --git a/vendor/github.com/klauspost/compress/zstd/enc_fast.go b/vendor/github.com/klauspost/compress/zstd/enc_fast.go new file mode 100644 index 00000000..181edc02 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/enc_fast.go @@ -0,0 +1,900 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +import ( + "fmt" +) + +const ( + tableBits = 15 // Bits used in the table + tableSize = 1 << tableBits // Size of the table + tableShardCnt = 1 << (tableBits - dictShardBits) // Number of shards in the table + tableShardSize = tableSize / tableShardCnt // Size of an individual shard + tableFastHashLen = 6 + tableMask = tableSize - 1 // Mask for table indices. Redundant, but can eliminate bounds checks. + maxMatchLength = 131074 +) + +type tableEntry struct { + val uint32 + offset int32 +} + +type fastEncoder struct { + fastBase + table [tableSize]tableEntry +} + +type fastEncoderDict struct { + fastEncoder + dictTable []tableEntry + tableShardDirty [tableShardCnt]bool + allDirty bool +} + +// Encode mimmics functionality in zstd_fast.c +func (e *fastEncoder) Encode(blk *blockEnc, src []byte) { + const ( + inputMargin = 8 + minNonLiteralBlockSize = 1 + 1 + inputMargin + ) + + // Protect against e.cur wraparound. + for e.cur >= bufferReset { + if len(e.hist) == 0 { + for i := range e.table[:] { + e.table[i] = tableEntry{} + } + e.cur = e.maxMatchOff + break + } + // Shift down everything in the table that isn't already too far away. + minOff := e.cur + int32(len(e.hist)) - e.maxMatchOff + for i := range e.table[:] { + v := e.table[i].offset + if v < minOff { + v = 0 + } else { + v = v - e.cur + e.maxMatchOff + } + e.table[i].offset = v + } + e.cur = e.maxMatchOff + break + } + + s := e.addBlock(src) + blk.size = len(src) + if len(src) < minNonLiteralBlockSize { + blk.extraLits = len(src) + blk.literals = blk.literals[:len(src)] + copy(blk.literals, src) + return + } + + // Override src + src = e.hist + sLimit := int32(len(src)) - inputMargin + // stepSize is the number of bytes to skip on every main loop iteration. + // It should be >= 2. + const stepSize = 2 + + // TEMPLATE + const hashLog = tableBits + // seems global, but would be nice to tweak. + const kSearchStrength = 6 + + // nextEmit is where in src the next emitLiteral should start from. + nextEmit := s + cv := load6432(src, s) + + // Relative offsets + offset1 := int32(blk.recentOffsets[0]) + offset2 := int32(blk.recentOffsets[1]) + + addLiterals := func(s *seq, until int32) { + if until == nextEmit { + return + } + blk.literals = append(blk.literals, src[nextEmit:until]...) + s.litLen = uint32(until - nextEmit) + } + if debugEncoder { + println("recent offsets:", blk.recentOffsets) + } + +encodeLoop: + for { + // t will contain the match offset when we find one. + // When existing the search loop, we have already checked 4 bytes. + var t int32 + + // We will not use repeat offsets across blocks. + // By not using them for the first 3 matches + canRepeat := len(blk.sequences) > 2 + + for { + if debugAsserts && canRepeat && offset1 == 0 { + panic("offset0 was 0") + } + + nextHash := hashLen(cv, hashLog, tableFastHashLen) + nextHash2 := hashLen(cv>>8, hashLog, tableFastHashLen) + candidate := e.table[nextHash] + candidate2 := e.table[nextHash2] + repIndex := s - offset1 + 2 + + e.table[nextHash] = tableEntry{offset: s + e.cur, val: uint32(cv)} + e.table[nextHash2] = tableEntry{offset: s + e.cur + 1, val: uint32(cv >> 8)} + + if canRepeat && repIndex >= 0 && load3232(src, repIndex) == uint32(cv>>16) { + // Consider history as well. + var seq seq + var length int32 + length = 4 + e.matchlen(s+6, repIndex+4, src) + seq.matchLen = uint32(length - zstdMinMatch) + + // We might be able to match backwards. + // Extend as long as we can. + start := s + 2 + // We end the search early, so we don't risk 0 literals + // and have to do special offset treatment. + startLimit := nextEmit + 1 + + sMin := s - e.maxMatchOff + if sMin < 0 { + sMin = 0 + } + for repIndex > sMin && start > startLimit && src[repIndex-1] == src[start-1] && seq.matchLen < maxMatchLength-zstdMinMatch { + repIndex-- + start-- + seq.matchLen++ + } + addLiterals(&seq, start) + + // rep 0 + seq.offset = 1 + if debugSequences { + println("repeat sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + s += length + 2 + nextEmit = s + if s >= sLimit { + if debugEncoder { + println("repeat ended", s, length) + + } + break encodeLoop + } + cv = load6432(src, s) + continue + } + coffset0 := s - (candidate.offset - e.cur) + coffset1 := s - (candidate2.offset - e.cur) + 1 + if coffset0 < e.maxMatchOff && uint32(cv) == candidate.val { + // found a regular match + t = candidate.offset - e.cur + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + if debugAsserts && s-t > e.maxMatchOff { + panic("s - t >e.maxMatchOff") + } + break + } + + if coffset1 < e.maxMatchOff && uint32(cv>>8) == candidate2.val { + // found a regular match + t = candidate2.offset - e.cur + s++ + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + if debugAsserts && s-t > e.maxMatchOff { + panic("s - t >e.maxMatchOff") + } + if debugAsserts && t < 0 { + panic("t<0") + } + break + } + s += stepSize + ((s - nextEmit) >> (kSearchStrength - 1)) + if s >= sLimit { + break encodeLoop + } + cv = load6432(src, s) + } + // A 4-byte match has been found. We'll later see if more than 4 bytes. + offset2 = offset1 + offset1 = s - t + + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + + if debugAsserts && canRepeat && int(offset1) > len(src) { + panic("invalid offset") + } + + // Extend the 4-byte match as long as possible. + l := e.matchlen(s+4, t+4, src) + 4 + + // Extend backwards + tMin := s - e.maxMatchOff + if tMin < 0 { + tMin = 0 + } + for t > tMin && s > nextEmit && src[t-1] == src[s-1] && l < maxMatchLength { + s-- + t-- + l++ + } + + // Write our sequence. + var seq seq + seq.litLen = uint32(s - nextEmit) + seq.matchLen = uint32(l - zstdMinMatch) + if seq.litLen > 0 { + blk.literals = append(blk.literals, src[nextEmit:s]...) + } + // Don't use repeat offsets + seq.offset = uint32(s-t) + 3 + s += l + if debugSequences { + println("sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + nextEmit = s + if s >= sLimit { + break encodeLoop + } + cv = load6432(src, s) + + // Check offset 2 + if o2 := s - offset2; canRepeat && load3232(src, o2) == uint32(cv) { + // We have at least 4 byte match. + // No need to check backwards. We come straight from a match + l := 4 + e.matchlen(s+4, o2+4, src) + + // Store this, since we have it. + nextHash := hashLen(cv, hashLog, tableFastHashLen) + e.table[nextHash] = tableEntry{offset: s + e.cur, val: uint32(cv)} + seq.matchLen = uint32(l) - zstdMinMatch + seq.litLen = 0 + // Since litlen is always 0, this is offset 1. + seq.offset = 1 + s += l + nextEmit = s + if debugSequences { + println("sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + + // Swap offset 1 and 2. + offset1, offset2 = offset2, offset1 + if s >= sLimit { + break encodeLoop + } + // Prepare next loop. + cv = load6432(src, s) + } + } + + if int(nextEmit) < len(src) { + blk.literals = append(blk.literals, src[nextEmit:]...) + blk.extraLits = len(src) - int(nextEmit) + } + blk.recentOffsets[0] = uint32(offset1) + blk.recentOffsets[1] = uint32(offset2) + if debugEncoder { + println("returning, recent offsets:", blk.recentOffsets, "extra literals:", blk.extraLits) + } +} + +// EncodeNoHist will encode a block with no history and no following blocks. +// Most notable difference is that src will not be copied for history and +// we do not need to check for max match length. +func (e *fastEncoder) EncodeNoHist(blk *blockEnc, src []byte) { + const ( + inputMargin = 8 + minNonLiteralBlockSize = 1 + 1 + inputMargin + ) + if debugEncoder { + if len(src) > maxBlockSize { + panic("src too big") + } + } + + // Protect against e.cur wraparound. + if e.cur >= bufferReset { + for i := range e.table[:] { + e.table[i] = tableEntry{} + } + e.cur = e.maxMatchOff + } + + s := int32(0) + blk.size = len(src) + if len(src) < minNonLiteralBlockSize { + blk.extraLits = len(src) + blk.literals = blk.literals[:len(src)] + copy(blk.literals, src) + return + } + + sLimit := int32(len(src)) - inputMargin + // stepSize is the number of bytes to skip on every main loop iteration. + // It should be >= 2. + const stepSize = 2 + + // TEMPLATE + const hashLog = tableBits + // seems global, but would be nice to tweak. + const kSearchStrength = 6 + + // nextEmit is where in src the next emitLiteral should start from. + nextEmit := s + cv := load6432(src, s) + + // Relative offsets + offset1 := int32(blk.recentOffsets[0]) + offset2 := int32(blk.recentOffsets[1]) + + addLiterals := func(s *seq, until int32) { + if until == nextEmit { + return + } + blk.literals = append(blk.literals, src[nextEmit:until]...) + s.litLen = uint32(until - nextEmit) + } + if debugEncoder { + println("recent offsets:", blk.recentOffsets) + } + +encodeLoop: + for { + // t will contain the match offset when we find one. + // When existing the search loop, we have already checked 4 bytes. + var t int32 + + // We will not use repeat offsets across blocks. + // By not using them for the first 3 matches + + for { + nextHash := hashLen(cv, hashLog, tableFastHashLen) + nextHash2 := hashLen(cv>>8, hashLog, tableFastHashLen) + candidate := e.table[nextHash] + candidate2 := e.table[nextHash2] + repIndex := s - offset1 + 2 + + e.table[nextHash] = tableEntry{offset: s + e.cur, val: uint32(cv)} + e.table[nextHash2] = tableEntry{offset: s + e.cur + 1, val: uint32(cv >> 8)} + + if len(blk.sequences) > 2 && load3232(src, repIndex) == uint32(cv>>16) { + // Consider history as well. + var seq seq + length := 4 + e.matchlen(s+6, repIndex+4, src) + + seq.matchLen = uint32(length - zstdMinMatch) + + // We might be able to match backwards. + // Extend as long as we can. + start := s + 2 + // We end the search early, so we don't risk 0 literals + // and have to do special offset treatment. + startLimit := nextEmit + 1 + + sMin := s - e.maxMatchOff + if sMin < 0 { + sMin = 0 + } + for repIndex > sMin && start > startLimit && src[repIndex-1] == src[start-1] { + repIndex-- + start-- + seq.matchLen++ + } + addLiterals(&seq, start) + + // rep 0 + seq.offset = 1 + if debugSequences { + println("repeat sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + s += length + 2 + nextEmit = s + if s >= sLimit { + if debugEncoder { + println("repeat ended", s, length) + + } + break encodeLoop + } + cv = load6432(src, s) + continue + } + coffset0 := s - (candidate.offset - e.cur) + coffset1 := s - (candidate2.offset - e.cur) + 1 + if coffset0 < e.maxMatchOff && uint32(cv) == candidate.val { + // found a regular match + t = candidate.offset - e.cur + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + if debugAsserts && s-t > e.maxMatchOff { + panic("s - t >e.maxMatchOff") + } + if debugAsserts && t < 0 { + panic(fmt.Sprintf("t (%d) < 0, candidate.offset: %d, e.cur: %d, coffset0: %d, e.maxMatchOff: %d", t, candidate.offset, e.cur, coffset0, e.maxMatchOff)) + } + break + } + + if coffset1 < e.maxMatchOff && uint32(cv>>8) == candidate2.val { + // found a regular match + t = candidate2.offset - e.cur + s++ + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + if debugAsserts && s-t > e.maxMatchOff { + panic("s - t >e.maxMatchOff") + } + if debugAsserts && t < 0 { + panic("t<0") + } + break + } + s += stepSize + ((s - nextEmit) >> (kSearchStrength - 1)) + if s >= sLimit { + break encodeLoop + } + cv = load6432(src, s) + } + // A 4-byte match has been found. We'll later see if more than 4 bytes. + offset2 = offset1 + offset1 = s - t + + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + + if debugAsserts && t < 0 { + panic(fmt.Sprintf("t (%d) < 0 ", t)) + } + // Extend the 4-byte match as long as possible. + l := e.matchlen(s+4, t+4, src) + 4 + + // Extend backwards + tMin := s - e.maxMatchOff + if tMin < 0 { + tMin = 0 + } + for t > tMin && s > nextEmit && src[t-1] == src[s-1] { + s-- + t-- + l++ + } + + // Write our sequence. + var seq seq + seq.litLen = uint32(s - nextEmit) + seq.matchLen = uint32(l - zstdMinMatch) + if seq.litLen > 0 { + blk.literals = append(blk.literals, src[nextEmit:s]...) + } + // Don't use repeat offsets + seq.offset = uint32(s-t) + 3 + s += l + if debugSequences { + println("sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + nextEmit = s + if s >= sLimit { + break encodeLoop + } + cv = load6432(src, s) + + // Check offset 2 + if o2 := s - offset2; len(blk.sequences) > 2 && load3232(src, o2) == uint32(cv) { + // We have at least 4 byte match. + // No need to check backwards. We come straight from a match + l := 4 + e.matchlen(s+4, o2+4, src) + + // Store this, since we have it. + nextHash := hashLen(cv, hashLog, tableFastHashLen) + e.table[nextHash] = tableEntry{offset: s + e.cur, val: uint32(cv)} + seq.matchLen = uint32(l) - zstdMinMatch + seq.litLen = 0 + // Since litlen is always 0, this is offset 1. + seq.offset = 1 + s += l + nextEmit = s + if debugSequences { + println("sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + + // Swap offset 1 and 2. + offset1, offset2 = offset2, offset1 + if s >= sLimit { + break encodeLoop + } + // Prepare next loop. + cv = load6432(src, s) + } + } + + if int(nextEmit) < len(src) { + blk.literals = append(blk.literals, src[nextEmit:]...) + blk.extraLits = len(src) - int(nextEmit) + } + if debugEncoder { + println("returning, recent offsets:", blk.recentOffsets, "extra literals:", blk.extraLits) + } + // We do not store history, so we must offset e.cur to avoid false matches for next user. + if e.cur < bufferReset { + e.cur += int32(len(src)) + } +} + +// Encode will encode the content, with a dictionary if initialized for it. +func (e *fastEncoderDict) Encode(blk *blockEnc, src []byte) { + const ( + inputMargin = 8 + minNonLiteralBlockSize = 1 + 1 + inputMargin + ) + if e.allDirty || len(src) > 32<<10 { + e.fastEncoder.Encode(blk, src) + e.allDirty = true + return + } + // Protect against e.cur wraparound. + for e.cur >= bufferReset { + if len(e.hist) == 0 { + for i := range e.table[:] { + e.table[i] = tableEntry{} + } + e.cur = e.maxMatchOff + break + } + // Shift down everything in the table that isn't already too far away. + minOff := e.cur + int32(len(e.hist)) - e.maxMatchOff + for i := range e.table[:] { + v := e.table[i].offset + if v < minOff { + v = 0 + } else { + v = v - e.cur + e.maxMatchOff + } + e.table[i].offset = v + } + e.cur = e.maxMatchOff + break + } + + s := e.addBlock(src) + blk.size = len(src) + if len(src) < minNonLiteralBlockSize { + blk.extraLits = len(src) + blk.literals = blk.literals[:len(src)] + copy(blk.literals, src) + return + } + + // Override src + src = e.hist + sLimit := int32(len(src)) - inputMargin + // stepSize is the number of bytes to skip on every main loop iteration. + // It should be >= 2. + const stepSize = 2 + + // TEMPLATE + const hashLog = tableBits + // seems global, but would be nice to tweak. + const kSearchStrength = 7 + + // nextEmit is where in src the next emitLiteral should start from. + nextEmit := s + cv := load6432(src, s) + + // Relative offsets + offset1 := int32(blk.recentOffsets[0]) + offset2 := int32(blk.recentOffsets[1]) + + addLiterals := func(s *seq, until int32) { + if until == nextEmit { + return + } + blk.literals = append(blk.literals, src[nextEmit:until]...) + s.litLen = uint32(until - nextEmit) + } + if debugEncoder { + println("recent offsets:", blk.recentOffsets) + } + +encodeLoop: + for { + // t will contain the match offset when we find one. + // When existing the search loop, we have already checked 4 bytes. + var t int32 + + // We will not use repeat offsets across blocks. + // By not using them for the first 3 matches + canRepeat := len(blk.sequences) > 2 + + for { + if debugAsserts && canRepeat && offset1 == 0 { + panic("offset0 was 0") + } + + nextHash := hashLen(cv, hashLog, tableFastHashLen) + nextHash2 := hashLen(cv>>8, hashLog, tableFastHashLen) + candidate := e.table[nextHash] + candidate2 := e.table[nextHash2] + repIndex := s - offset1 + 2 + + e.table[nextHash] = tableEntry{offset: s + e.cur, val: uint32(cv)} + e.markShardDirty(nextHash) + e.table[nextHash2] = tableEntry{offset: s + e.cur + 1, val: uint32(cv >> 8)} + e.markShardDirty(nextHash2) + + if canRepeat && repIndex >= 0 && load3232(src, repIndex) == uint32(cv>>16) { + // Consider history as well. + var seq seq + var length int32 + length = 4 + e.matchlen(s+6, repIndex+4, src) + + seq.matchLen = uint32(length - zstdMinMatch) + + // We might be able to match backwards. + // Extend as long as we can. + start := s + 2 + // We end the search early, so we don't risk 0 literals + // and have to do special offset treatment. + startLimit := nextEmit + 1 + + sMin := s - e.maxMatchOff + if sMin < 0 { + sMin = 0 + } + for repIndex > sMin && start > startLimit && src[repIndex-1] == src[start-1] && seq.matchLen < maxMatchLength-zstdMinMatch { + repIndex-- + start-- + seq.matchLen++ + } + addLiterals(&seq, start) + + // rep 0 + seq.offset = 1 + if debugSequences { + println("repeat sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + s += length + 2 + nextEmit = s + if s >= sLimit { + if debugEncoder { + println("repeat ended", s, length) + + } + break encodeLoop + } + cv = load6432(src, s) + continue + } + coffset0 := s - (candidate.offset - e.cur) + coffset1 := s - (candidate2.offset - e.cur) + 1 + if coffset0 < e.maxMatchOff && uint32(cv) == candidate.val { + // found a regular match + t = candidate.offset - e.cur + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + if debugAsserts && s-t > e.maxMatchOff { + panic("s - t >e.maxMatchOff") + } + break + } + + if coffset1 < e.maxMatchOff && uint32(cv>>8) == candidate2.val { + // found a regular match + t = candidate2.offset - e.cur + s++ + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + if debugAsserts && s-t > e.maxMatchOff { + panic("s - t >e.maxMatchOff") + } + if debugAsserts && t < 0 { + panic("t<0") + } + break + } + s += stepSize + ((s - nextEmit) >> (kSearchStrength - 1)) + if s >= sLimit { + break encodeLoop + } + cv = load6432(src, s) + } + // A 4-byte match has been found. We'll later see if more than 4 bytes. + offset2 = offset1 + offset1 = s - t + + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + + if debugAsserts && canRepeat && int(offset1) > len(src) { + panic("invalid offset") + } + + // Extend the 4-byte match as long as possible. + l := e.matchlen(s+4, t+4, src) + 4 + + // Extend backwards + tMin := s - e.maxMatchOff + if tMin < 0 { + tMin = 0 + } + for t > tMin && s > nextEmit && src[t-1] == src[s-1] && l < maxMatchLength { + s-- + t-- + l++ + } + + // Write our sequence. + var seq seq + seq.litLen = uint32(s - nextEmit) + seq.matchLen = uint32(l - zstdMinMatch) + if seq.litLen > 0 { + blk.literals = append(blk.literals, src[nextEmit:s]...) + } + // Don't use repeat offsets + seq.offset = uint32(s-t) + 3 + s += l + if debugSequences { + println("sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + nextEmit = s + if s >= sLimit { + break encodeLoop + } + cv = load6432(src, s) + + // Check offset 2 + if o2 := s - offset2; canRepeat && load3232(src, o2) == uint32(cv) { + // We have at least 4 byte match. + // No need to check backwards. We come straight from a match + l := 4 + e.matchlen(s+4, o2+4, src) + + // Store this, since we have it. + nextHash := hashLen(cv, hashLog, tableFastHashLen) + e.table[nextHash] = tableEntry{offset: s + e.cur, val: uint32(cv)} + e.markShardDirty(nextHash) + seq.matchLen = uint32(l) - zstdMinMatch + seq.litLen = 0 + // Since litlen is always 0, this is offset 1. + seq.offset = 1 + s += l + nextEmit = s + if debugSequences { + println("sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + + // Swap offset 1 and 2. + offset1, offset2 = offset2, offset1 + if s >= sLimit { + break encodeLoop + } + // Prepare next loop. + cv = load6432(src, s) + } + } + + if int(nextEmit) < len(src) { + blk.literals = append(blk.literals, src[nextEmit:]...) + blk.extraLits = len(src) - int(nextEmit) + } + blk.recentOffsets[0] = uint32(offset1) + blk.recentOffsets[1] = uint32(offset2) + if debugEncoder { + println("returning, recent offsets:", blk.recentOffsets, "extra literals:", blk.extraLits) + } +} + +// ResetDict will reset and set a dictionary if not nil +func (e *fastEncoder) Reset(d *dict, singleBlock bool) { + e.resetBase(d, singleBlock) + if d != nil { + panic("fastEncoder: Reset with dict") + } +} + +// ResetDict will reset and set a dictionary if not nil +func (e *fastEncoderDict) Reset(d *dict, singleBlock bool) { + e.resetBase(d, singleBlock) + if d == nil { + return + } + + // Init or copy dict table + if len(e.dictTable) != len(e.table) || d.id != e.lastDictID { + if len(e.dictTable) != len(e.table) { + e.dictTable = make([]tableEntry, len(e.table)) + } + if true { + end := e.maxMatchOff + int32(len(d.content)) - 8 + for i := e.maxMatchOff; i < end; i += 3 { + const hashLog = tableBits + + cv := load6432(d.content, i-e.maxMatchOff) + nextHash := hashLen(cv, hashLog, tableFastHashLen) // 0 -> 5 + nextHash1 := hashLen(cv>>8, hashLog, tableFastHashLen) // 1 -> 6 + nextHash2 := hashLen(cv>>16, hashLog, tableFastHashLen) // 2 -> 7 + e.dictTable[nextHash] = tableEntry{ + val: uint32(cv), + offset: i, + } + e.dictTable[nextHash1] = tableEntry{ + val: uint32(cv >> 8), + offset: i + 1, + } + e.dictTable[nextHash2] = tableEntry{ + val: uint32(cv >> 16), + offset: i + 2, + } + } + } + e.lastDictID = d.id + e.allDirty = true + } + + e.cur = e.maxMatchOff + dirtyShardCnt := 0 + if !e.allDirty { + for i := range e.tableShardDirty { + if e.tableShardDirty[i] { + dirtyShardCnt++ + } + } + } + + const shardCnt = tableShardCnt + const shardSize = tableShardSize + if e.allDirty || dirtyShardCnt > shardCnt*4/6 { + //copy(e.table[:], e.dictTable) + e.table = *(*[tableSize]tableEntry)(e.dictTable) + for i := range e.tableShardDirty { + e.tableShardDirty[i] = false + } + e.allDirty = false + return + } + for i := range e.tableShardDirty { + if !e.tableShardDirty[i] { + continue + } + + //copy(e.table[i*shardSize:(i+1)*shardSize], e.dictTable[i*shardSize:(i+1)*shardSize]) + *(*[shardSize]tableEntry)(e.table[i*shardSize:]) = *(*[shardSize]tableEntry)(e.dictTable[i*shardSize:]) + e.tableShardDirty[i] = false + } + e.allDirty = false +} + +func (e *fastEncoderDict) markAllShardsDirty() { + e.allDirty = true +} + +func (e *fastEncoderDict) markShardDirty(entryNum uint32) { + e.tableShardDirty[entryNum/tableShardSize] = true +} diff --git a/vendor/github.com/klauspost/compress/zstd/encoder.go b/vendor/github.com/klauspost/compress/zstd/encoder.go new file mode 100644 index 00000000..7aaaedb2 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/encoder.go @@ -0,0 +1,641 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +import ( + "crypto/rand" + "fmt" + "io" + rdebug "runtime/debug" + "sync" + + "github.com/klauspost/compress/zstd/internal/xxhash" +) + +// Encoder provides encoding to Zstandard. +// An Encoder can be used for either compressing a stream via the +// io.WriteCloser interface supported by the Encoder or as multiple independent +// tasks via the EncodeAll function. +// Smaller encodes are encouraged to use the EncodeAll function. +// Use NewWriter to create a new instance. +type Encoder struct { + o encoderOptions + encoders chan encoder + state encoderState + init sync.Once +} + +type encoder interface { + Encode(blk *blockEnc, src []byte) + EncodeNoHist(blk *blockEnc, src []byte) + Block() *blockEnc + CRC() *xxhash.Digest + AppendCRC([]byte) []byte + WindowSize(size int64) int32 + UseBlock(*blockEnc) + Reset(d *dict, singleBlock bool) +} + +type encoderState struct { + w io.Writer + filling []byte + current []byte + previous []byte + encoder encoder + writing *blockEnc + err error + writeErr error + nWritten int64 + nInput int64 + frameContentSize int64 + headerWritten bool + eofWritten bool + fullFrameWritten bool + + // This waitgroup indicates an encode is running. + wg sync.WaitGroup + // This waitgroup indicates we have a block encoding/writing. + wWg sync.WaitGroup +} + +// NewWriter will create a new Zstandard encoder. +// If the encoder will be used for encoding blocks a nil writer can be used. +func NewWriter(w io.Writer, opts ...EOption) (*Encoder, error) { + initPredefined() + var e Encoder + e.o.setDefault() + for _, o := range opts { + err := o(&e.o) + if err != nil { + return nil, err + } + } + if w != nil { + e.Reset(w) + } + return &e, nil +} + +func (e *Encoder) initialize() { + if e.o.concurrent == 0 { + e.o.setDefault() + } + e.encoders = make(chan encoder, e.o.concurrent) + for i := 0; i < e.o.concurrent; i++ { + enc := e.o.encoder() + e.encoders <- enc + } +} + +// Reset will re-initialize the writer and new writes will encode to the supplied writer +// as a new, independent stream. +func (e *Encoder) Reset(w io.Writer) { + s := &e.state + s.wg.Wait() + s.wWg.Wait() + if cap(s.filling) == 0 { + s.filling = make([]byte, 0, e.o.blockSize) + } + if e.o.concurrent > 1 { + if cap(s.current) == 0 { + s.current = make([]byte, 0, e.o.blockSize) + } + if cap(s.previous) == 0 { + s.previous = make([]byte, 0, e.o.blockSize) + } + s.current = s.current[:0] + s.previous = s.previous[:0] + if s.writing == nil { + s.writing = &blockEnc{lowMem: e.o.lowMem} + s.writing.init() + } + s.writing.initNewEncode() + } + if s.encoder == nil { + s.encoder = e.o.encoder() + } + s.filling = s.filling[:0] + s.encoder.Reset(e.o.dict, false) + s.headerWritten = false + s.eofWritten = false + s.fullFrameWritten = false + s.w = w + s.err = nil + s.nWritten = 0 + s.nInput = 0 + s.writeErr = nil + s.frameContentSize = 0 +} + +// ResetContentSize will reset and set a content size for the next stream. +// If the bytes written does not match the size given an error will be returned +// when calling Close(). +// This is removed when Reset is called. +// Sizes <= 0 results in no content size set. +func (e *Encoder) ResetContentSize(w io.Writer, size int64) { + e.Reset(w) + if size >= 0 { + e.state.frameContentSize = size + } +} + +// Write data to the encoder. +// Input data will be buffered and as the buffer fills up +// content will be compressed and written to the output. +// When done writing, use Close to flush the remaining output +// and write CRC if requested. +func (e *Encoder) Write(p []byte) (n int, err error) { + s := &e.state + for len(p) > 0 { + if len(p)+len(s.filling) < e.o.blockSize { + if e.o.crc { + _, _ = s.encoder.CRC().Write(p) + } + s.filling = append(s.filling, p...) + return n + len(p), nil + } + add := p + if len(p)+len(s.filling) > e.o.blockSize { + add = add[:e.o.blockSize-len(s.filling)] + } + if e.o.crc { + _, _ = s.encoder.CRC().Write(add) + } + s.filling = append(s.filling, add...) + p = p[len(add):] + n += len(add) + if len(s.filling) < e.o.blockSize { + return n, nil + } + err := e.nextBlock(false) + if err != nil { + return n, err + } + if debugAsserts && len(s.filling) > 0 { + panic(len(s.filling)) + } + } + return n, nil +} + +// nextBlock will synchronize and start compressing input in e.state.filling. +// If an error has occurred during encoding it will be returned. +func (e *Encoder) nextBlock(final bool) error { + s := &e.state + // Wait for current block. + s.wg.Wait() + if s.err != nil { + return s.err + } + if len(s.filling) > e.o.blockSize { + return fmt.Errorf("block > maxStoreBlockSize") + } + if !s.headerWritten { + // If we have a single block encode, do a sync compression. + if final && len(s.filling) == 0 && !e.o.fullZero { + s.headerWritten = true + s.fullFrameWritten = true + s.eofWritten = true + return nil + } + if final && len(s.filling) > 0 { + s.current = e.EncodeAll(s.filling, s.current[:0]) + var n2 int + n2, s.err = s.w.Write(s.current) + if s.err != nil { + return s.err + } + s.nWritten += int64(n2) + s.nInput += int64(len(s.filling)) + s.current = s.current[:0] + s.filling = s.filling[:0] + s.headerWritten = true + s.fullFrameWritten = true + s.eofWritten = true + return nil + } + + var tmp [maxHeaderSize]byte + fh := frameHeader{ + ContentSize: uint64(s.frameContentSize), + WindowSize: uint32(s.encoder.WindowSize(s.frameContentSize)), + SingleSegment: false, + Checksum: e.o.crc, + DictID: e.o.dict.ID(), + } + + dst, err := fh.appendTo(tmp[:0]) + if err != nil { + return err + } + s.headerWritten = true + s.wWg.Wait() + var n2 int + n2, s.err = s.w.Write(dst) + if s.err != nil { + return s.err + } + s.nWritten += int64(n2) + } + if s.eofWritten { + // Ensure we only write it once. + final = false + } + + if len(s.filling) == 0 { + // Final block, but no data. + if final { + enc := s.encoder + blk := enc.Block() + blk.reset(nil) + blk.last = true + blk.encodeRaw(nil) + s.wWg.Wait() + _, s.err = s.w.Write(blk.output) + s.nWritten += int64(len(blk.output)) + s.eofWritten = true + } + return s.err + } + + // SYNC: + if e.o.concurrent == 1 { + src := s.filling + s.nInput += int64(len(s.filling)) + if debugEncoder { + println("Adding sync block,", len(src), "bytes, final:", final) + } + enc := s.encoder + blk := enc.Block() + blk.reset(nil) + enc.Encode(blk, src) + blk.last = final + if final { + s.eofWritten = true + } + + err := errIncompressible + // If we got the exact same number of literals as input, + // assume the literals cannot be compressed. + if len(src) != len(blk.literals) || len(src) != e.o.blockSize { + err = blk.encode(src, e.o.noEntropy, !e.o.allLitEntropy) + } + switch err { + case errIncompressible: + if debugEncoder { + println("Storing incompressible block as raw") + } + blk.encodeRaw(src) + // In fast mode, we do not transfer offsets, so we don't have to deal with changing the. + case nil: + default: + s.err = err + return err + } + _, s.err = s.w.Write(blk.output) + s.nWritten += int64(len(blk.output)) + s.filling = s.filling[:0] + return s.err + } + + // Move blocks forward. + s.filling, s.current, s.previous = s.previous[:0], s.filling, s.current + s.nInput += int64(len(s.current)) + s.wg.Add(1) + go func(src []byte) { + if debugEncoder { + println("Adding block,", len(src), "bytes, final:", final) + } + defer func() { + if r := recover(); r != nil { + s.err = fmt.Errorf("panic while encoding: %v", r) + rdebug.PrintStack() + } + s.wg.Done() + }() + enc := s.encoder + blk := enc.Block() + enc.Encode(blk, src) + blk.last = final + if final { + s.eofWritten = true + } + // Wait for pending writes. + s.wWg.Wait() + if s.writeErr != nil { + s.err = s.writeErr + return + } + // Transfer encoders from previous write block. + blk.swapEncoders(s.writing) + // Transfer recent offsets to next. + enc.UseBlock(s.writing) + s.writing = blk + s.wWg.Add(1) + go func() { + defer func() { + if r := recover(); r != nil { + s.writeErr = fmt.Errorf("panic while encoding/writing: %v", r) + rdebug.PrintStack() + } + s.wWg.Done() + }() + err := errIncompressible + // If we got the exact same number of literals as input, + // assume the literals cannot be compressed. + if len(src) != len(blk.literals) || len(src) != e.o.blockSize { + err = blk.encode(src, e.o.noEntropy, !e.o.allLitEntropy) + } + switch err { + case errIncompressible: + if debugEncoder { + println("Storing incompressible block as raw") + } + blk.encodeRaw(src) + // In fast mode, we do not transfer offsets, so we don't have to deal with changing the. + case nil: + default: + s.writeErr = err + return + } + _, s.writeErr = s.w.Write(blk.output) + s.nWritten += int64(len(blk.output)) + }() + }(s.current) + return nil +} + +// ReadFrom reads data from r until EOF or error. +// The return value n is the number of bytes read. +// Any error except io.EOF encountered during the read is also returned. +// +// The Copy function uses ReaderFrom if available. +func (e *Encoder) ReadFrom(r io.Reader) (n int64, err error) { + if debugEncoder { + println("Using ReadFrom") + } + + // Flush any current writes. + if len(e.state.filling) > 0 { + if err := e.nextBlock(false); err != nil { + return 0, err + } + } + e.state.filling = e.state.filling[:e.o.blockSize] + src := e.state.filling + for { + n2, err := r.Read(src) + if e.o.crc { + _, _ = e.state.encoder.CRC().Write(src[:n2]) + } + // src is now the unfilled part... + src = src[n2:] + n += int64(n2) + switch err { + case io.EOF: + e.state.filling = e.state.filling[:len(e.state.filling)-len(src)] + if debugEncoder { + println("ReadFrom: got EOF final block:", len(e.state.filling)) + } + return n, nil + case nil: + default: + if debugEncoder { + println("ReadFrom: got error:", err) + } + e.state.err = err + return n, err + } + if len(src) > 0 { + if debugEncoder { + println("ReadFrom: got space left in source:", len(src)) + } + continue + } + err = e.nextBlock(false) + if err != nil { + return n, err + } + e.state.filling = e.state.filling[:e.o.blockSize] + src = e.state.filling + } +} + +// Flush will send the currently written data to output +// and block until everything has been written. +// This should only be used on rare occasions where pushing the currently queued data is critical. +func (e *Encoder) Flush() error { + s := &e.state + if len(s.filling) > 0 { + err := e.nextBlock(false) + if err != nil { + return err + } + } + s.wg.Wait() + s.wWg.Wait() + if s.err != nil { + return s.err + } + return s.writeErr +} + +// Close will flush the final output and close the stream. +// The function will block until everything has been written. +// The Encoder can still be re-used after calling this. +func (e *Encoder) Close() error { + s := &e.state + if s.encoder == nil { + return nil + } + err := e.nextBlock(true) + if err != nil { + return err + } + if s.frameContentSize > 0 { + if s.nInput != s.frameContentSize { + return fmt.Errorf("frame content size %d given, but %d bytes was written", s.frameContentSize, s.nInput) + } + } + if e.state.fullFrameWritten { + return s.err + } + s.wg.Wait() + s.wWg.Wait() + + if s.err != nil { + return s.err + } + if s.writeErr != nil { + return s.writeErr + } + + // Write CRC + if e.o.crc && s.err == nil { + // heap alloc. + var tmp [4]byte + _, s.err = s.w.Write(s.encoder.AppendCRC(tmp[:0])) + s.nWritten += 4 + } + + // Add padding with content from crypto/rand.Reader + if s.err == nil && e.o.pad > 0 { + add := calcSkippableFrame(s.nWritten, int64(e.o.pad)) + frame, err := skippableFrame(s.filling[:0], add, rand.Reader) + if err != nil { + return err + } + _, s.err = s.w.Write(frame) + } + return s.err +} + +// EncodeAll will encode all input in src and append it to dst. +// This function can be called concurrently, but each call will only run on a single goroutine. +// If empty input is given, nothing is returned, unless WithZeroFrames is specified. +// Encoded blocks can be concatenated and the result will be the combined input stream. +// Data compressed with EncodeAll can be decoded with the Decoder, +// using either a stream or DecodeAll. +func (e *Encoder) EncodeAll(src, dst []byte) []byte { + if len(src) == 0 { + if e.o.fullZero { + // Add frame header. + fh := frameHeader{ + ContentSize: 0, + WindowSize: MinWindowSize, + SingleSegment: true, + // Adding a checksum would be a waste of space. + Checksum: false, + DictID: 0, + } + dst, _ = fh.appendTo(dst) + + // Write raw block as last one only. + var blk blockHeader + blk.setSize(0) + blk.setType(blockTypeRaw) + blk.setLast(true) + dst = blk.appendTo(dst) + } + return dst + } + e.init.Do(e.initialize) + enc := <-e.encoders + defer func() { + // Release encoder reference to last block. + // If a non-single block is needed the encoder will reset again. + e.encoders <- enc + }() + // Use single segments when above minimum window and below window size. + single := len(src) <= e.o.windowSize && len(src) > MinWindowSize + if e.o.single != nil { + single = *e.o.single + } + fh := frameHeader{ + ContentSize: uint64(len(src)), + WindowSize: uint32(enc.WindowSize(int64(len(src)))), + SingleSegment: single, + Checksum: e.o.crc, + DictID: e.o.dict.ID(), + } + + // If less than 1MB, allocate a buffer up front. + if len(dst) == 0 && cap(dst) == 0 && len(src) < 1<<20 && !e.o.lowMem { + dst = make([]byte, 0, len(src)) + } + dst, err := fh.appendTo(dst) + if err != nil { + panic(err) + } + + // If we can do everything in one block, prefer that. + if len(src) <= e.o.blockSize { + enc.Reset(e.o.dict, true) + // Slightly faster with no history and everything in one block. + if e.o.crc { + _, _ = enc.CRC().Write(src) + } + blk := enc.Block() + blk.last = true + if e.o.dict == nil { + enc.EncodeNoHist(blk, src) + } else { + enc.Encode(blk, src) + } + + // If we got the exact same number of literals as input, + // assume the literals cannot be compressed. + err := errIncompressible + oldout := blk.output + if len(blk.literals) != len(src) || len(src) != e.o.blockSize { + // Output directly to dst + blk.output = dst + err = blk.encode(src, e.o.noEntropy, !e.o.allLitEntropy) + } + + switch err { + case errIncompressible: + if debugEncoder { + println("Storing incompressible block as raw") + } + dst = blk.encodeRawTo(dst, src) + case nil: + dst = blk.output + default: + panic(err) + } + blk.output = oldout + } else { + enc.Reset(e.o.dict, false) + blk := enc.Block() + for len(src) > 0 { + todo := src + if len(todo) > e.o.blockSize { + todo = todo[:e.o.blockSize] + } + src = src[len(todo):] + if e.o.crc { + _, _ = enc.CRC().Write(todo) + } + blk.pushOffsets() + enc.Encode(blk, todo) + if len(src) == 0 { + blk.last = true + } + err := errIncompressible + // If we got the exact same number of literals as input, + // assume the literals cannot be compressed. + if len(blk.literals) != len(todo) || len(todo) != e.o.blockSize { + err = blk.encode(todo, e.o.noEntropy, !e.o.allLitEntropy) + } + + switch err { + case errIncompressible: + if debugEncoder { + println("Storing incompressible block as raw") + } + dst = blk.encodeRawTo(dst, todo) + blk.popOffsets() + case nil: + dst = append(dst, blk.output...) + default: + panic(err) + } + blk.reset(nil) + } + } + if e.o.crc { + dst = enc.AppendCRC(dst) + } + // Add padding with content from crypto/rand.Reader + if e.o.pad > 0 { + add := calcSkippableFrame(int64(len(dst)), int64(e.o.pad)) + dst, err = skippableFrame(dst, add, rand.Reader) + if err != nil { + panic(err) + } + } + return dst +} diff --git a/vendor/github.com/klauspost/compress/zstd/encoder_options.go b/vendor/github.com/klauspost/compress/zstd/encoder_options.go new file mode 100644 index 00000000..a7c5e1aa --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/encoder_options.go @@ -0,0 +1,317 @@ +package zstd + +import ( + "errors" + "fmt" + "runtime" + "strings" +) + +// EOption is an option for creating a encoder. +type EOption func(*encoderOptions) error + +// options retains accumulated state of multiple options. +type encoderOptions struct { + concurrent int + level EncoderLevel + single *bool + pad int + blockSize int + windowSize int + crc bool + fullZero bool + noEntropy bool + allLitEntropy bool + customWindow bool + customALEntropy bool + customBlockSize bool + lowMem bool + dict *dict +} + +func (o *encoderOptions) setDefault() { + *o = encoderOptions{ + concurrent: runtime.GOMAXPROCS(0), + crc: true, + single: nil, + blockSize: maxCompressedBlockSize, + windowSize: 8 << 20, + level: SpeedDefault, + allLitEntropy: true, + lowMem: false, + } +} + +// encoder returns an encoder with the selected options. +func (o encoderOptions) encoder() encoder { + switch o.level { + case SpeedFastest: + if o.dict != nil { + return &fastEncoderDict{fastEncoder: fastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), lowMem: o.lowMem}}} + } + return &fastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), lowMem: o.lowMem}} + + case SpeedDefault: + if o.dict != nil { + return &doubleFastEncoderDict{fastEncoderDict: fastEncoderDict{fastEncoder: fastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), lowMem: o.lowMem}}}} + } + return &doubleFastEncoder{fastEncoder: fastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), lowMem: o.lowMem}}} + case SpeedBetterCompression: + if o.dict != nil { + return &betterFastEncoderDict{betterFastEncoder: betterFastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), lowMem: o.lowMem}}} + } + return &betterFastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), lowMem: o.lowMem}} + case SpeedBestCompression: + return &bestFastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), lowMem: o.lowMem}} + } + panic("unknown compression level") +} + +// WithEncoderCRC will add CRC value to output. +// Output will be 4 bytes larger. +func WithEncoderCRC(b bool) EOption { + return func(o *encoderOptions) error { o.crc = b; return nil } +} + +// WithEncoderConcurrency will set the concurrency, +// meaning the maximum number of encoders to run concurrently. +// The value supplied must be at least 1. +// For streams, setting a value of 1 will disable async compression. +// By default this will be set to GOMAXPROCS. +func WithEncoderConcurrency(n int) EOption { + return func(o *encoderOptions) error { + if n <= 0 { + return fmt.Errorf("concurrency must be at least 1") + } + o.concurrent = n + return nil + } +} + +// WithWindowSize will set the maximum allowed back-reference distance. +// The value must be a power of two between MinWindowSize and MaxWindowSize. +// A larger value will enable better compression but allocate more memory and, +// for above-default values, take considerably longer. +// The default value is determined by the compression level. +func WithWindowSize(n int) EOption { + return func(o *encoderOptions) error { + switch { + case n < MinWindowSize: + return fmt.Errorf("window size must be at least %d", MinWindowSize) + case n > MaxWindowSize: + return fmt.Errorf("window size must be at most %d", MaxWindowSize) + case (n & (n - 1)) != 0: + return errors.New("window size must be a power of 2") + } + + o.windowSize = n + o.customWindow = true + if o.blockSize > o.windowSize { + o.blockSize = o.windowSize + o.customBlockSize = true + } + return nil + } +} + +// WithEncoderPadding will add padding to all output so the size will be a multiple of n. +// This can be used to obfuscate the exact output size or make blocks of a certain size. +// The contents will be a skippable frame, so it will be invisible by the decoder. +// n must be > 0 and <= 1GB, 1<<30 bytes. +// The padded area will be filled with data from crypto/rand.Reader. +// If `EncodeAll` is used with data already in the destination, the total size will be multiple of this. +func WithEncoderPadding(n int) EOption { + return func(o *encoderOptions) error { + if n <= 0 { + return fmt.Errorf("padding must be at least 1") + } + // No need to waste our time. + if n == 1 { + o.pad = 0 + } + if n > 1<<30 { + return fmt.Errorf("padding must less than 1GB (1<<30 bytes) ") + } + o.pad = n + return nil + } +} + +// EncoderLevel predefines encoder compression levels. +// Only use the constants made available, since the actual mapping +// of these values are very likely to change and your compression could change +// unpredictably when upgrading the library. +type EncoderLevel int + +const ( + speedNotSet EncoderLevel = iota + + // SpeedFastest will choose the fastest reasonable compression. + // This is roughly equivalent to the fastest Zstandard mode. + SpeedFastest + + // SpeedDefault is the default "pretty fast" compression option. + // This is roughly equivalent to the default Zstandard mode (level 3). + SpeedDefault + + // SpeedBetterCompression will yield better compression than the default. + // Currently it is about zstd level 7-8 with ~ 2x-3x the default CPU usage. + // By using this, notice that CPU usage may go up in the future. + SpeedBetterCompression + + // SpeedBestCompression will choose the best available compression option. + // This will offer the best compression no matter the CPU cost. + SpeedBestCompression + + // speedLast should be kept as the last actual compression option. + // The is not for external usage, but is used to keep track of the valid options. + speedLast +) + +// EncoderLevelFromString will convert a string representation of an encoding level back +// to a compression level. The compare is not case sensitive. +// If the string wasn't recognized, (false, SpeedDefault) will be returned. +func EncoderLevelFromString(s string) (bool, EncoderLevel) { + for l := speedNotSet + 1; l < speedLast; l++ { + if strings.EqualFold(s, l.String()) { + return true, l + } + } + return false, SpeedDefault +} + +// EncoderLevelFromZstd will return an encoder level that closest matches the compression +// ratio of a specific zstd compression level. +// Many input values will provide the same compression level. +func EncoderLevelFromZstd(level int) EncoderLevel { + switch { + case level < 3: + return SpeedFastest + case level >= 3 && level < 6: + return SpeedDefault + case level >= 6 && level < 10: + return SpeedBetterCompression + default: + return SpeedBestCompression + } +} + +// String provides a string representation of the compression level. +func (e EncoderLevel) String() string { + switch e { + case SpeedFastest: + return "fastest" + case SpeedDefault: + return "default" + case SpeedBetterCompression: + return "better" + case SpeedBestCompression: + return "best" + default: + return "invalid" + } +} + +// WithEncoderLevel specifies a predefined compression level. +func WithEncoderLevel(l EncoderLevel) EOption { + return func(o *encoderOptions) error { + switch { + case l <= speedNotSet || l >= speedLast: + return fmt.Errorf("unknown encoder level") + } + o.level = l + if !o.customWindow { + switch o.level { + case SpeedFastest: + o.windowSize = 4 << 20 + if !o.customBlockSize { + o.blockSize = 1 << 16 + } + case SpeedDefault: + o.windowSize = 8 << 20 + case SpeedBetterCompression: + o.windowSize = 16 << 20 + case SpeedBestCompression: + o.windowSize = 32 << 20 + } + } + if !o.customALEntropy { + o.allLitEntropy = l > SpeedFastest + } + + return nil + } +} + +// WithZeroFrames will encode 0 length input as full frames. +// This can be needed for compatibility with zstandard usage, +// but is not needed for this package. +func WithZeroFrames(b bool) EOption { + return func(o *encoderOptions) error { + o.fullZero = b + return nil + } +} + +// WithAllLitEntropyCompression will apply entropy compression if no matches are found. +// Disabling this will skip incompressible data faster, but in cases with no matches but +// skewed character distribution compression is lost. +// Default value depends on the compression level selected. +func WithAllLitEntropyCompression(b bool) EOption { + return func(o *encoderOptions) error { + o.customALEntropy = true + o.allLitEntropy = b + return nil + } +} + +// WithNoEntropyCompression will always skip entropy compression of literals. +// This can be useful if content has matches, but unlikely to benefit from entropy +// compression. Usually the slight speed improvement is not worth enabling this. +func WithNoEntropyCompression(b bool) EOption { + return func(o *encoderOptions) error { + o.noEntropy = b + return nil + } +} + +// WithSingleSegment will set the "single segment" flag when EncodeAll is used. +// If this flag is set, data must be regenerated within a single continuous memory segment. +// In this case, Window_Descriptor byte is skipped, but Frame_Content_Size is necessarily present. +// As a consequence, the decoder must allocate a memory segment of size equal or larger than size of your content. +// In order to preserve the decoder from unreasonable memory requirements, +// a decoder is allowed to reject a compressed frame which requests a memory size beyond decoder's authorized range. +// For broader compatibility, decoders are recommended to support memory sizes of at least 8 MB. +// This is only a recommendation, each decoder is free to support higher or lower limits, depending on local limitations. +// If this is not specified, block encodes will automatically choose this based on the input size and the window size. +// This setting has no effect on streamed encodes. +func WithSingleSegment(b bool) EOption { + return func(o *encoderOptions) error { + o.single = &b + return nil + } +} + +// WithLowerEncoderMem will trade in some memory cases trade less memory usage for +// slower encoding speed. +// This will not change the window size which is the primary function for reducing +// memory usage. See WithWindowSize. +func WithLowerEncoderMem(b bool) EOption { + return func(o *encoderOptions) error { + o.lowMem = b + return nil + } +} + +// WithEncoderDict allows to register a dictionary that will be used for the encode. +// The encoder *may* choose to use no dictionary instead for certain payloads. +func WithEncoderDict(dict []byte) EOption { + return func(o *encoderOptions) error { + d, err := loadDict(dict) + if err != nil { + return err + } + o.dict = d + return nil + } +} diff --git a/vendor/github.com/klauspost/compress/zstd/framedec.go b/vendor/github.com/klauspost/compress/zstd/framedec.go new file mode 100644 index 00000000..b6c50541 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/framedec.go @@ -0,0 +1,436 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +import ( + "bytes" + "encoding/hex" + "errors" + "io" + + "github.com/klauspost/compress/zstd/internal/xxhash" +) + +type frameDec struct { + o decoderOptions + crc *xxhash.Digest + + WindowSize uint64 + + // Frame history passed between blocks + history history + + rawInput byteBuffer + + // Byte buffer that can be reused for small input blocks. + bBuf byteBuf + + FrameContentSize uint64 + + DictionaryID *uint32 + HasCheckSum bool + SingleSegment bool +} + +const ( + // MinWindowSize is the minimum Window Size, which is 1 KB. + MinWindowSize = 1 << 10 + + // MaxWindowSize is the maximum encoder window size + // and the default decoder maximum window size. + MaxWindowSize = 1 << 29 +) + +var ( + frameMagic = []byte{0x28, 0xb5, 0x2f, 0xfd} + skippableFrameMagic = []byte{0x2a, 0x4d, 0x18} +) + +func newFrameDec(o decoderOptions) *frameDec { + if o.maxWindowSize > o.maxDecodedSize { + o.maxWindowSize = o.maxDecodedSize + } + d := frameDec{ + o: o, + } + return &d +} + +// reset will read the frame header and prepare for block decoding. +// If nothing can be read from the input, io.EOF will be returned. +// Any other error indicated that the stream contained data, but +// there was a problem. +func (d *frameDec) reset(br byteBuffer) error { + d.HasCheckSum = false + d.WindowSize = 0 + var signature [4]byte + for { + var err error + // Check if we can read more... + b, err := br.readSmall(1) + switch err { + case io.EOF, io.ErrUnexpectedEOF: + return io.EOF + default: + return err + case nil: + signature[0] = b[0] + } + // Read the rest, don't allow io.ErrUnexpectedEOF + b, err = br.readSmall(3) + switch err { + case io.EOF: + return io.EOF + default: + return err + case nil: + copy(signature[1:], b) + } + + if !bytes.Equal(signature[1:4], skippableFrameMagic) || signature[0]&0xf0 != 0x50 { + if debugDecoder { + println("Not skippable", hex.EncodeToString(signature[:]), hex.EncodeToString(skippableFrameMagic)) + } + // Break if not skippable frame. + break + } + // Read size to skip + b, err = br.readSmall(4) + if err != nil { + if debugDecoder { + println("Reading Frame Size", err) + } + return err + } + n := uint32(b[0]) | (uint32(b[1]) << 8) | (uint32(b[2]) << 16) | (uint32(b[3]) << 24) + println("Skipping frame with", n, "bytes.") + err = br.skipN(int64(n)) + if err != nil { + if debugDecoder { + println("Reading discarded frame", err) + } + return err + } + } + if !bytes.Equal(signature[:], frameMagic) { + if debugDecoder { + println("Got magic numbers: ", signature, "want:", frameMagic) + } + return ErrMagicMismatch + } + + // Read Frame_Header_Descriptor + fhd, err := br.readByte() + if err != nil { + if debugDecoder { + println("Reading Frame_Header_Descriptor", err) + } + return err + } + d.SingleSegment = fhd&(1<<5) != 0 + + if fhd&(1<<3) != 0 { + return errors.New("reserved bit set on frame header") + } + + // Read Window_Descriptor + // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#window_descriptor + d.WindowSize = 0 + if !d.SingleSegment { + wd, err := br.readByte() + if err != nil { + if debugDecoder { + println("Reading Window_Descriptor", err) + } + return err + } + printf("raw: %x, mantissa: %d, exponent: %d\n", wd, wd&7, wd>>3) + windowLog := 10 + (wd >> 3) + windowBase := uint64(1) << windowLog + windowAdd := (windowBase / 8) * uint64(wd&0x7) + d.WindowSize = windowBase + windowAdd + } + + // Read Dictionary_ID + // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#dictionary_id + d.DictionaryID = nil + if size := fhd & 3; size != 0 { + if size == 3 { + size = 4 + } + + b, err := br.readSmall(int(size)) + if err != nil { + println("Reading Dictionary_ID", err) + return err + } + var id uint32 + switch size { + case 1: + id = uint32(b[0]) + case 2: + id = uint32(b[0]) | (uint32(b[1]) << 8) + case 4: + id = uint32(b[0]) | (uint32(b[1]) << 8) | (uint32(b[2]) << 16) | (uint32(b[3]) << 24) + } + if debugDecoder { + println("Dict size", size, "ID:", id) + } + if id > 0 { + // ID 0 means "sorry, no dictionary anyway". + // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#dictionary-format + d.DictionaryID = &id + } + } + + // Read Frame_Content_Size + // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#frame_content_size + var fcsSize int + v := fhd >> 6 + switch v { + case 0: + if d.SingleSegment { + fcsSize = 1 + } + default: + fcsSize = 1 << v + } + d.FrameContentSize = fcsUnknown + if fcsSize > 0 { + b, err := br.readSmall(fcsSize) + if err != nil { + println("Reading Frame content", err) + return err + } + switch fcsSize { + case 1: + d.FrameContentSize = uint64(b[0]) + case 2: + // When FCS_Field_Size is 2, the offset of 256 is added. + d.FrameContentSize = uint64(b[0]) | (uint64(b[1]) << 8) + 256 + case 4: + d.FrameContentSize = uint64(b[0]) | (uint64(b[1]) << 8) | (uint64(b[2]) << 16) | (uint64(b[3]) << 24) + case 8: + d1 := uint32(b[0]) | (uint32(b[1]) << 8) | (uint32(b[2]) << 16) | (uint32(b[3]) << 24) + d2 := uint32(b[4]) | (uint32(b[5]) << 8) | (uint32(b[6]) << 16) | (uint32(b[7]) << 24) + d.FrameContentSize = uint64(d1) | (uint64(d2) << 32) + } + if debugDecoder { + println("Read FCS:", d.FrameContentSize) + } + } + + // Move this to shared. + d.HasCheckSum = fhd&(1<<2) != 0 + if d.HasCheckSum { + if d.crc == nil { + d.crc = xxhash.New() + } + d.crc.Reset() + } + + if d.WindowSize > d.o.maxWindowSize { + if debugDecoder { + printf("window size %d > max %d\n", d.WindowSize, d.o.maxWindowSize) + } + return ErrWindowSizeExceeded + } + + if d.WindowSize == 0 && d.SingleSegment { + // We may not need window in this case. + d.WindowSize = d.FrameContentSize + if d.WindowSize < MinWindowSize { + d.WindowSize = MinWindowSize + } + if d.WindowSize > d.o.maxDecodedSize { + if debugDecoder { + printf("window size %d > max %d\n", d.WindowSize, d.o.maxWindowSize) + } + return ErrDecoderSizeExceeded + } + } + + // The minimum Window_Size is 1 KB. + if d.WindowSize < MinWindowSize { + if debugDecoder { + println("got window size: ", d.WindowSize) + } + return ErrWindowSizeTooSmall + } + d.history.windowSize = int(d.WindowSize) + if !d.o.lowMem || d.history.windowSize < maxBlockSize { + // Alloc 2x window size if not low-mem, or very small window size. + d.history.allocFrameBuffer = d.history.windowSize * 2 + } else { + // Alloc with one additional block + d.history.allocFrameBuffer = d.history.windowSize + maxBlockSize + } + + if debugDecoder { + println("Frame: Dict:", d.DictionaryID, "FrameContentSize:", d.FrameContentSize, "singleseg:", d.SingleSegment, "window:", d.WindowSize, "crc:", d.HasCheckSum) + } + + // history contains input - maybe we do something + d.rawInput = br + return nil +} + +// next will start decoding the next block from stream. +func (d *frameDec) next(block *blockDec) error { + if debugDecoder { + println("decoding new block") + } + err := block.reset(d.rawInput, d.WindowSize) + if err != nil { + println("block error:", err) + // Signal the frame decoder we have a problem. + block.sendErr(err) + return err + } + return nil +} + +// checkCRC will check the checksum if the frame has one. +// Will return ErrCRCMismatch if crc check failed, otherwise nil. +func (d *frameDec) checkCRC() error { + if !d.HasCheckSum { + return nil + } + + // We can overwrite upper tmp now + want, err := d.rawInput.readSmall(4) + if err != nil { + println("CRC missing?", err) + return err + } + + if d.o.ignoreChecksum { + return nil + } + + var tmp [4]byte + got := d.crc.Sum64() + // Flip to match file order. + tmp[0] = byte(got >> 0) + tmp[1] = byte(got >> 8) + tmp[2] = byte(got >> 16) + tmp[3] = byte(got >> 24) + + if !bytes.Equal(tmp[:], want) { + if debugDecoder { + println("CRC Check Failed:", tmp[:], "!=", want) + } + return ErrCRCMismatch + } + if debugDecoder { + println("CRC ok", tmp[:]) + } + return nil +} + +// consumeCRC reads the checksum data if the frame has one. +func (d *frameDec) consumeCRC() error { + if d.HasCheckSum { + _, err := d.rawInput.readSmall(4) + if err != nil { + println("CRC missing?", err) + return err + } + } + + return nil +} + +// runDecoder will run the decoder for the remainder of the frame. +func (d *frameDec) runDecoder(dst []byte, dec *blockDec) ([]byte, error) { + saved := d.history.b + + // We use the history for output to avoid copying it. + d.history.b = dst + d.history.ignoreBuffer = len(dst) + // Store input length, so we only check new data. + crcStart := len(dst) + d.history.decoders.maxSyncLen = 0 + if d.o.limitToCap { + d.history.decoders.maxSyncLen = uint64(cap(dst) - len(dst)) + } + if d.FrameContentSize != fcsUnknown { + if !d.o.limitToCap || d.FrameContentSize+uint64(len(dst)) < d.history.decoders.maxSyncLen { + d.history.decoders.maxSyncLen = d.FrameContentSize + uint64(len(dst)) + } + if d.history.decoders.maxSyncLen > d.o.maxDecodedSize { + if debugDecoder { + println("maxSyncLen:", d.history.decoders.maxSyncLen, "> maxDecodedSize:", d.o.maxDecodedSize) + } + return dst, ErrDecoderSizeExceeded + } + if debugDecoder { + println("maxSyncLen:", d.history.decoders.maxSyncLen) + } + if !d.o.limitToCap && uint64(cap(dst)) < d.history.decoders.maxSyncLen { + // Alloc for output + dst2 := make([]byte, len(dst), d.history.decoders.maxSyncLen+compressedBlockOverAlloc) + copy(dst2, dst) + dst = dst2 + } + } + var err error + for { + err = dec.reset(d.rawInput, d.WindowSize) + if err != nil { + break + } + if debugDecoder { + println("next block:", dec) + } + err = dec.decodeBuf(&d.history) + if err != nil { + break + } + if uint64(len(d.history.b)-crcStart) > d.o.maxDecodedSize { + println("runDecoder: maxDecodedSize exceeded", uint64(len(d.history.b)-crcStart), ">", d.o.maxDecodedSize) + err = ErrDecoderSizeExceeded + break + } + if d.o.limitToCap && len(d.history.b) > cap(dst) { + println("runDecoder: cap exceeded", uint64(len(d.history.b)), ">", cap(dst)) + err = ErrDecoderSizeExceeded + break + } + if uint64(len(d.history.b)-crcStart) > d.FrameContentSize { + println("runDecoder: FrameContentSize exceeded", uint64(len(d.history.b)-crcStart), ">", d.FrameContentSize) + err = ErrFrameSizeExceeded + break + } + if dec.Last { + break + } + if debugDecoder { + println("runDecoder: FrameContentSize", uint64(len(d.history.b)-crcStart), "<=", d.FrameContentSize) + } + } + dst = d.history.b + if err == nil { + if d.FrameContentSize != fcsUnknown && uint64(len(d.history.b)-crcStart) != d.FrameContentSize { + err = ErrFrameSizeMismatch + } else if d.HasCheckSum { + if d.o.ignoreChecksum { + err = d.consumeCRC() + } else { + var n int + n, err = d.crc.Write(dst[crcStart:]) + if err == nil { + if n != len(dst)-crcStart { + err = io.ErrShortWrite + } else { + err = d.checkCRC() + } + } + } + } + } + d.history.b = saved + return dst, err +} diff --git a/vendor/github.com/klauspost/compress/zstd/frameenc.go b/vendor/github.com/klauspost/compress/zstd/frameenc.go new file mode 100644 index 00000000..4ef7f5a3 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/frameenc.go @@ -0,0 +1,137 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +import ( + "encoding/binary" + "fmt" + "io" + "math" + "math/bits" +) + +type frameHeader struct { + ContentSize uint64 + WindowSize uint32 + SingleSegment bool + Checksum bool + DictID uint32 +} + +const maxHeaderSize = 14 + +func (f frameHeader) appendTo(dst []byte) ([]byte, error) { + dst = append(dst, frameMagic...) + var fhd uint8 + if f.Checksum { + fhd |= 1 << 2 + } + if f.SingleSegment { + fhd |= 1 << 5 + } + + var dictIDContent []byte + if f.DictID > 0 { + var tmp [4]byte + if f.DictID < 256 { + fhd |= 1 + tmp[0] = uint8(f.DictID) + dictIDContent = tmp[:1] + } else if f.DictID < 1<<16 { + fhd |= 2 + binary.LittleEndian.PutUint16(tmp[:2], uint16(f.DictID)) + dictIDContent = tmp[:2] + } else { + fhd |= 3 + binary.LittleEndian.PutUint32(tmp[:4], f.DictID) + dictIDContent = tmp[:4] + } + } + var fcs uint8 + if f.ContentSize >= 256 { + fcs++ + } + if f.ContentSize >= 65536+256 { + fcs++ + } + if f.ContentSize >= 0xffffffff { + fcs++ + } + + fhd |= fcs << 6 + + dst = append(dst, fhd) + if !f.SingleSegment { + const winLogMin = 10 + windowLog := (bits.Len32(f.WindowSize-1) - winLogMin) << 3 + dst = append(dst, uint8(windowLog)) + } + if f.DictID > 0 { + dst = append(dst, dictIDContent...) + } + switch fcs { + case 0: + if f.SingleSegment { + dst = append(dst, uint8(f.ContentSize)) + } + // Unless SingleSegment is set, framessizes < 256 are nto stored. + case 1: + f.ContentSize -= 256 + dst = append(dst, uint8(f.ContentSize), uint8(f.ContentSize>>8)) + case 2: + dst = append(dst, uint8(f.ContentSize), uint8(f.ContentSize>>8), uint8(f.ContentSize>>16), uint8(f.ContentSize>>24)) + case 3: + dst = append(dst, uint8(f.ContentSize), uint8(f.ContentSize>>8), uint8(f.ContentSize>>16), uint8(f.ContentSize>>24), + uint8(f.ContentSize>>32), uint8(f.ContentSize>>40), uint8(f.ContentSize>>48), uint8(f.ContentSize>>56)) + default: + panic("invalid fcs") + } + return dst, nil +} + +const skippableFrameHeader = 4 + 4 + +// calcSkippableFrame will return a total size to be added for written +// to be divisible by multiple. +// The value will always be > skippableFrameHeader. +// The function will panic if written < 0 or wantMultiple <= 0. +func calcSkippableFrame(written, wantMultiple int64) int { + if wantMultiple <= 0 { + panic("wantMultiple <= 0") + } + if written < 0 { + panic("written < 0") + } + leftOver := written % wantMultiple + if leftOver == 0 { + return 0 + } + toAdd := wantMultiple - leftOver + for toAdd < skippableFrameHeader { + toAdd += wantMultiple + } + return int(toAdd) +} + +// skippableFrame will add a skippable frame with a total size of bytes. +// total should be >= skippableFrameHeader and < math.MaxUint32. +func skippableFrame(dst []byte, total int, r io.Reader) ([]byte, error) { + if total == 0 { + return dst, nil + } + if total < skippableFrameHeader { + return dst, fmt.Errorf("requested skippable frame (%d) < 8", total) + } + if int64(total) > math.MaxUint32 { + return dst, fmt.Errorf("requested skippable frame (%d) > max uint32", total) + } + dst = append(dst, 0x50, 0x2a, 0x4d, 0x18) + f := uint32(total - skippableFrameHeader) + dst = append(dst, uint8(f), uint8(f>>8), uint8(f>>16), uint8(f>>24)) + start := len(dst) + dst = append(dst, make([]byte, f)...) + _, err := io.ReadFull(r, dst[start:]) + return dst, err +} diff --git a/vendor/github.com/klauspost/compress/zstd/fse_decoder.go b/vendor/github.com/klauspost/compress/zstd/fse_decoder.go new file mode 100644 index 00000000..2f8860a7 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/fse_decoder.go @@ -0,0 +1,307 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +import ( + "encoding/binary" + "errors" + "fmt" + "io" +) + +const ( + tablelogAbsoluteMax = 9 +) + +const ( + /*!MEMORY_USAGE : + * Memory usage formula : N->2^N Bytes (examples : 10 -> 1KB; 12 -> 4KB ; 16 -> 64KB; 20 -> 1MB; etc.) + * Increasing memory usage improves compression ratio + * Reduced memory usage can improve speed, due to cache effect + * Recommended max value is 14, for 16KB, which nicely fits into Intel x86 L1 cache */ + maxMemoryUsage = tablelogAbsoluteMax + 2 + + maxTableLog = maxMemoryUsage - 2 + maxTablesize = 1 << maxTableLog + maxTableMask = (1 << maxTableLog) - 1 + minTablelog = 5 + maxSymbolValue = 255 +) + +// fseDecoder provides temporary storage for compression and decompression. +type fseDecoder struct { + dt [maxTablesize]decSymbol // Decompression table. + symbolLen uint16 // Length of active part of the symbol table. + actualTableLog uint8 // Selected tablelog. + maxBits uint8 // Maximum number of additional bits + + // used for table creation to avoid allocations. + stateTable [256]uint16 + norm [maxSymbolValue + 1]int16 + preDefined bool +} + +// tableStep returns the next table index. +func tableStep(tableSize uint32) uint32 { + return (tableSize >> 1) + (tableSize >> 3) + 3 +} + +// readNCount will read the symbol distribution so decoding tables can be constructed. +func (s *fseDecoder) readNCount(b *byteReader, maxSymbol uint16) error { + var ( + charnum uint16 + previous0 bool + ) + if b.remain() < 4 { + return errors.New("input too small") + } + bitStream := b.Uint32NC() + nbBits := uint((bitStream & 0xF) + minTablelog) // extract tableLog + if nbBits > tablelogAbsoluteMax { + println("Invalid tablelog:", nbBits) + return errors.New("tableLog too large") + } + bitStream >>= 4 + bitCount := uint(4) + + s.actualTableLog = uint8(nbBits) + remaining := int32((1 << nbBits) + 1) + threshold := int32(1 << nbBits) + gotTotal := int32(0) + nbBits++ + + for remaining > 1 && charnum <= maxSymbol { + if previous0 { + //println("prev0") + n0 := charnum + for (bitStream & 0xFFFF) == 0xFFFF { + //println("24 x 0") + n0 += 24 + if r := b.remain(); r > 5 { + b.advance(2) + // The check above should make sure we can read 32 bits + bitStream = b.Uint32NC() >> bitCount + } else { + // end of bit stream + bitStream >>= 16 + bitCount += 16 + } + } + //printf("bitstream: %d, 0b%b", bitStream&3, bitStream) + for (bitStream & 3) == 3 { + n0 += 3 + bitStream >>= 2 + bitCount += 2 + } + n0 += uint16(bitStream & 3) + bitCount += 2 + + if n0 > maxSymbolValue { + return errors.New("maxSymbolValue too small") + } + //println("inserting ", n0-charnum, "zeroes from idx", charnum, "ending before", n0) + for charnum < n0 { + s.norm[uint8(charnum)] = 0 + charnum++ + } + + if r := b.remain(); r >= 7 || r-int(bitCount>>3) >= 4 { + b.advance(bitCount >> 3) + bitCount &= 7 + // The check above should make sure we can read 32 bits + bitStream = b.Uint32NC() >> bitCount + } else { + bitStream >>= 2 + } + } + + max := (2*threshold - 1) - remaining + var count int32 + + if int32(bitStream)&(threshold-1) < max { + count = int32(bitStream) & (threshold - 1) + if debugAsserts && nbBits < 1 { + panic("nbBits underflow") + } + bitCount += nbBits - 1 + } else { + count = int32(bitStream) & (2*threshold - 1) + if count >= threshold { + count -= max + } + bitCount += nbBits + } + + // extra accuracy + count-- + if count < 0 { + // -1 means +1 + remaining += count + gotTotal -= count + } else { + remaining -= count + gotTotal += count + } + s.norm[charnum&0xff] = int16(count) + charnum++ + previous0 = count == 0 + for remaining < threshold { + nbBits-- + threshold >>= 1 + } + + if r := b.remain(); r >= 7 || r-int(bitCount>>3) >= 4 { + b.advance(bitCount >> 3) + bitCount &= 7 + // The check above should make sure we can read 32 bits + bitStream = b.Uint32NC() >> (bitCount & 31) + } else { + bitCount -= (uint)(8 * (len(b.b) - 4 - b.off)) + b.off = len(b.b) - 4 + bitStream = b.Uint32() >> (bitCount & 31) + } + } + s.symbolLen = charnum + if s.symbolLen <= 1 { + return fmt.Errorf("symbolLen (%d) too small", s.symbolLen) + } + if s.symbolLen > maxSymbolValue+1 { + return fmt.Errorf("symbolLen (%d) too big", s.symbolLen) + } + if remaining != 1 { + return fmt.Errorf("corruption detected (remaining %d != 1)", remaining) + } + if bitCount > 32 { + return fmt.Errorf("corruption detected (bitCount %d > 32)", bitCount) + } + if gotTotal != 1<> 3) + return s.buildDtable() +} + +func (s *fseDecoder) mustReadFrom(r io.Reader) { + fatalErr := func(err error) { + if err != nil { + panic(err) + } + } + // dt [maxTablesize]decSymbol // Decompression table. + // symbolLen uint16 // Length of active part of the symbol table. + // actualTableLog uint8 // Selected tablelog. + // maxBits uint8 // Maximum number of additional bits + // // used for table creation to avoid allocations. + // stateTable [256]uint16 + // norm [maxSymbolValue + 1]int16 + // preDefined bool + fatalErr(binary.Read(r, binary.LittleEndian, &s.dt)) + fatalErr(binary.Read(r, binary.LittleEndian, &s.symbolLen)) + fatalErr(binary.Read(r, binary.LittleEndian, &s.actualTableLog)) + fatalErr(binary.Read(r, binary.LittleEndian, &s.maxBits)) + fatalErr(binary.Read(r, binary.LittleEndian, &s.stateTable)) + fatalErr(binary.Read(r, binary.LittleEndian, &s.norm)) + fatalErr(binary.Read(r, binary.LittleEndian, &s.preDefined)) +} + +// decSymbol contains information about a state entry, +// Including the state offset base, the output symbol and +// the number of bits to read for the low part of the destination state. +// Using a composite uint64 is faster than a struct with separate members. +type decSymbol uint64 + +func newDecSymbol(nbits, addBits uint8, newState uint16, baseline uint32) decSymbol { + return decSymbol(nbits) | (decSymbol(addBits) << 8) | (decSymbol(newState) << 16) | (decSymbol(baseline) << 32) +} + +func (d decSymbol) nbBits() uint8 { + return uint8(d) +} + +func (d decSymbol) addBits() uint8 { + return uint8(d >> 8) +} + +func (d decSymbol) newState() uint16 { + return uint16(d >> 16) +} + +func (d decSymbol) baselineInt() int { + return int(d >> 32) +} + +func (d *decSymbol) setNBits(nBits uint8) { + const mask = 0xffffffffffffff00 + *d = (*d & mask) | decSymbol(nBits) +} + +func (d *decSymbol) setAddBits(addBits uint8) { + const mask = 0xffffffffffff00ff + *d = (*d & mask) | (decSymbol(addBits) << 8) +} + +func (d *decSymbol) setNewState(state uint16) { + const mask = 0xffffffff0000ffff + *d = (*d & mask) | decSymbol(state)<<16 +} + +func (d *decSymbol) setExt(addBits uint8, baseline uint32) { + const mask = 0xffff00ff + *d = (*d & mask) | (decSymbol(addBits) << 8) | (decSymbol(baseline) << 32) +} + +// decSymbolValue returns the transformed decSymbol for the given symbol. +func decSymbolValue(symb uint8, t []baseOffset) (decSymbol, error) { + if int(symb) >= len(t) { + return 0, fmt.Errorf("rle symbol %d >= max %d", symb, len(t)) + } + lu := t[symb] + return newDecSymbol(0, lu.addBits, 0, lu.baseLine), nil +} + +// setRLE will set the decoder til RLE mode. +func (s *fseDecoder) setRLE(symbol decSymbol) { + s.actualTableLog = 0 + s.maxBits = symbol.addBits() + s.dt[0] = symbol +} + +// transform will transform the decoder table into a table usable for +// decoding without having to apply the transformation while decoding. +// The state will contain the base value and the number of bits to read. +func (s *fseDecoder) transform(t []baseOffset) error { + tableSize := uint16(1 << s.actualTableLog) + s.maxBits = 0 + for i, v := range s.dt[:tableSize] { + add := v.addBits() + if int(add) >= len(t) { + return fmt.Errorf("invalid decoding table entry %d, symbol %d >= max (%d)", i, v.addBits(), len(t)) + } + lu := t[add] + if lu.addBits > s.maxBits { + s.maxBits = lu.addBits + } + v.setExt(lu.addBits, lu.baseLine) + s.dt[i] = v + } + return nil +} + +type fseState struct { + dt []decSymbol + state decSymbol +} + +// Initialize and decodeAsync first state and symbol. +func (s *fseState) init(br *bitReader, tableLog uint8, dt []decSymbol) { + s.dt = dt + br.fill() + s.state = dt[br.getBits(tableLog)] +} + +// final returns the current state symbol without decoding the next. +func (s decSymbol) final() (int, uint8) { + return s.baselineInt(), s.addBits() +} diff --git a/vendor/github.com/klauspost/compress/zstd/fse_decoder_amd64.go b/vendor/github.com/klauspost/compress/zstd/fse_decoder_amd64.go new file mode 100644 index 00000000..d04a829b --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/fse_decoder_amd64.go @@ -0,0 +1,65 @@ +//go:build amd64 && !appengine && !noasm && gc +// +build amd64,!appengine,!noasm,gc + +package zstd + +import ( + "fmt" +) + +type buildDtableAsmContext struct { + // inputs + stateTable *uint16 + norm *int16 + dt *uint64 + + // outputs --- set by the procedure in the case of error; + // for interpretation please see the error handling part below + errParam1 uint64 + errParam2 uint64 +} + +// buildDtable_asm is an x86 assembly implementation of fseDecoder.buildDtable. +// Function returns non-zero exit code on error. +// +//go:noescape +func buildDtable_asm(s *fseDecoder, ctx *buildDtableAsmContext) int + +// please keep in sync with _generate/gen_fse.go +const ( + errorCorruptedNormalizedCounter = 1 + errorNewStateTooBig = 2 + errorNewStateNoBits = 3 +) + +// buildDtable will build the decoding table. +func (s *fseDecoder) buildDtable() error { + ctx := buildDtableAsmContext{ + stateTable: &s.stateTable[0], + norm: &s.norm[0], + dt: (*uint64)(&s.dt[0]), + } + code := buildDtable_asm(s, &ctx) + + if code != 0 { + switch code { + case errorCorruptedNormalizedCounter: + position := ctx.errParam1 + return fmt.Errorf("corrupted input (position=%d, expected 0)", position) + + case errorNewStateTooBig: + newState := decSymbol(ctx.errParam1) + size := ctx.errParam2 + return fmt.Errorf("newState (%d) outside table size (%d)", newState, size) + + case errorNewStateNoBits: + newState := decSymbol(ctx.errParam1) + oldState := decSymbol(ctx.errParam2) + return fmt.Errorf("newState (%d) == oldState (%d) and no bits", newState, oldState) + + default: + return fmt.Errorf("buildDtable_asm returned unhandled nonzero code = %d", code) + } + } + return nil +} diff --git a/vendor/github.com/klauspost/compress/zstd/fse_decoder_amd64.s b/vendor/github.com/klauspost/compress/zstd/fse_decoder_amd64.s new file mode 100644 index 00000000..bcde3986 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/fse_decoder_amd64.s @@ -0,0 +1,126 @@ +// Code generated by command: go run gen_fse.go -out ../fse_decoder_amd64.s -pkg=zstd. DO NOT EDIT. + +//go:build !appengine && !noasm && gc && !noasm + +// func buildDtable_asm(s *fseDecoder, ctx *buildDtableAsmContext) int +TEXT ·buildDtable_asm(SB), $0-24 + MOVQ ctx+8(FP), CX + MOVQ s+0(FP), DI + + // Load values + MOVBQZX 4098(DI), DX + XORQ AX, AX + BTSQ DX, AX + MOVQ (CX), BX + MOVQ 16(CX), SI + LEAQ -1(AX), R8 + MOVQ 8(CX), CX + MOVWQZX 4096(DI), DI + + // End load values + // Init, lay down lowprob symbols + XORQ R9, R9 + JMP init_main_loop_condition + +init_main_loop: + MOVWQSX (CX)(R9*2), R10 + CMPW R10, $-1 + JNE do_not_update_high_threshold + MOVB R9, 1(SI)(R8*8) + DECQ R8 + MOVQ $0x0000000000000001, R10 + +do_not_update_high_threshold: + MOVW R10, (BX)(R9*2) + INCQ R9 + +init_main_loop_condition: + CMPQ R9, DI + JL init_main_loop + + // Spread symbols + // Calculate table step + MOVQ AX, R9 + SHRQ $0x01, R9 + MOVQ AX, R10 + SHRQ $0x03, R10 + LEAQ 3(R9)(R10*1), R9 + + // Fill add bits values + LEAQ -1(AX), R10 + XORQ R11, R11 + XORQ R12, R12 + JMP spread_main_loop_condition + +spread_main_loop: + XORQ R13, R13 + MOVWQSX (CX)(R12*2), R14 + JMP spread_inner_loop_condition + +spread_inner_loop: + MOVB R12, 1(SI)(R11*8) + +adjust_position: + ADDQ R9, R11 + ANDQ R10, R11 + CMPQ R11, R8 + JG adjust_position + INCQ R13 + +spread_inner_loop_condition: + CMPQ R13, R14 + JL spread_inner_loop + INCQ R12 + +spread_main_loop_condition: + CMPQ R12, DI + JL spread_main_loop + TESTQ R11, R11 + JZ spread_check_ok + MOVQ ctx+8(FP), AX + MOVQ R11, 24(AX) + MOVQ $+1, ret+16(FP) + RET + +spread_check_ok: + // Build Decoding table + XORQ DI, DI + +build_table_main_table: + MOVBQZX 1(SI)(DI*8), CX + MOVWQZX (BX)(CX*2), R8 + LEAQ 1(R8), R9 + MOVW R9, (BX)(CX*2) + MOVQ R8, R9 + BSRQ R9, R9 + MOVQ DX, CX + SUBQ R9, CX + SHLQ CL, R8 + SUBQ AX, R8 + MOVB CL, (SI)(DI*8) + MOVW R8, 2(SI)(DI*8) + CMPQ R8, AX + JLE build_table_check1_ok + MOVQ ctx+8(FP), CX + MOVQ R8, 24(CX) + MOVQ AX, 32(CX) + MOVQ $+2, ret+16(FP) + RET + +build_table_check1_ok: + TESTB CL, CL + JNZ build_table_check2_ok + CMPW R8, DI + JNE build_table_check2_ok + MOVQ ctx+8(FP), AX + MOVQ R8, 24(AX) + MOVQ DI, 32(AX) + MOVQ $+3, ret+16(FP) + RET + +build_table_check2_ok: + INCQ DI + CMPQ DI, AX + JL build_table_main_table + MOVQ $+0, ret+16(FP) + RET diff --git a/vendor/github.com/klauspost/compress/zstd/fse_decoder_generic.go b/vendor/github.com/klauspost/compress/zstd/fse_decoder_generic.go new file mode 100644 index 00000000..332e51fe --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/fse_decoder_generic.go @@ -0,0 +1,72 @@ +//go:build !amd64 || appengine || !gc || noasm +// +build !amd64 appengine !gc noasm + +package zstd + +import ( + "errors" + "fmt" +) + +// buildDtable will build the decoding table. +func (s *fseDecoder) buildDtable() error { + tableSize := uint32(1 << s.actualTableLog) + highThreshold := tableSize - 1 + symbolNext := s.stateTable[:256] + + // Init, lay down lowprob symbols + { + for i, v := range s.norm[:s.symbolLen] { + if v == -1 { + s.dt[highThreshold].setAddBits(uint8(i)) + highThreshold-- + symbolNext[i] = 1 + } else { + symbolNext[i] = uint16(v) + } + } + } + + // Spread symbols + { + tableMask := tableSize - 1 + step := tableStep(tableSize) + position := uint32(0) + for ss, v := range s.norm[:s.symbolLen] { + for i := 0; i < int(v); i++ { + s.dt[position].setAddBits(uint8(ss)) + position = (position + step) & tableMask + for position > highThreshold { + // lowprob area + position = (position + step) & tableMask + } + } + } + if position != 0 { + // position must reach all cells once, otherwise normalizedCounter is incorrect + return errors.New("corrupted input (position != 0)") + } + } + + // Build Decoding table + { + tableSize := uint16(1 << s.actualTableLog) + for u, v := range s.dt[:tableSize] { + symbol := v.addBits() + nextState := symbolNext[symbol] + symbolNext[symbol] = nextState + 1 + nBits := s.actualTableLog - byte(highBits(uint32(nextState))) + s.dt[u&maxTableMask].setNBits(nBits) + newState := (nextState << nBits) - tableSize + if newState > tableSize { + return fmt.Errorf("newState (%d) outside table size (%d)", newState, tableSize) + } + if newState == uint16(u) && nBits == 0 { + // Seems weird that this is possible with nbits > 0. + return fmt.Errorf("newState (%d) == oldState (%d) and no bits", newState, u) + } + s.dt[u&maxTableMask].setNewState(newState) + } + } + return nil +} diff --git a/vendor/github.com/klauspost/compress/zstd/fse_encoder.go b/vendor/github.com/klauspost/compress/zstd/fse_encoder.go new file mode 100644 index 00000000..ab26326a --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/fse_encoder.go @@ -0,0 +1,701 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +import ( + "errors" + "fmt" + "math" +) + +const ( + // For encoding we only support up to + maxEncTableLog = 8 + maxEncTablesize = 1 << maxTableLog + maxEncTableMask = (1 << maxTableLog) - 1 + minEncTablelog = 5 + maxEncSymbolValue = maxMatchLengthSymbol +) + +// Scratch provides temporary storage for compression and decompression. +type fseEncoder struct { + symbolLen uint16 // Length of active part of the symbol table. + actualTableLog uint8 // Selected tablelog. + ct cTable // Compression tables. + maxCount int // count of the most probable symbol + zeroBits bool // no bits has prob > 50%. + clearCount bool // clear count + useRLE bool // This encoder is for RLE + preDefined bool // This encoder is predefined. + reUsed bool // Set to know when the encoder has been reused. + rleVal uint8 // RLE Symbol + maxBits uint8 // Maximum output bits after transform. + + // TODO: Technically zstd should be fine with 64 bytes. + count [256]uint32 + norm [256]int16 +} + +// cTable contains tables used for compression. +type cTable struct { + tableSymbol []byte + stateTable []uint16 + symbolTT []symbolTransform +} + +// symbolTransform contains the state transform for a symbol. +type symbolTransform struct { + deltaNbBits uint32 + deltaFindState int16 + outBits uint8 +} + +// String prints values as a human readable string. +func (s symbolTransform) String() string { + return fmt.Sprintf("{deltabits: %08x, findstate:%d outbits:%d}", s.deltaNbBits, s.deltaFindState, s.outBits) +} + +// Histogram allows to populate the histogram and skip that step in the compression, +// It otherwise allows to inspect the histogram when compression is done. +// To indicate that you have populated the histogram call HistogramFinished +// with the value of the highest populated symbol, as well as the number of entries +// in the most populated entry. These are accepted at face value. +func (s *fseEncoder) Histogram() *[256]uint32 { + return &s.count +} + +// HistogramFinished can be called to indicate that the histogram has been populated. +// maxSymbol is the index of the highest set symbol of the next data segment. +// maxCount is the number of entries in the most populated entry. +// These are accepted at face value. +func (s *fseEncoder) HistogramFinished(maxSymbol uint8, maxCount int) { + s.maxCount = maxCount + s.symbolLen = uint16(maxSymbol) + 1 + s.clearCount = maxCount != 0 +} + +// allocCtable will allocate tables needed for compression. +// If existing tables a re big enough, they are simply re-used. +func (s *fseEncoder) allocCtable() { + tableSize := 1 << s.actualTableLog + // get tableSymbol that is big enough. + if cap(s.ct.tableSymbol) < tableSize { + s.ct.tableSymbol = make([]byte, tableSize) + } + s.ct.tableSymbol = s.ct.tableSymbol[:tableSize] + + ctSize := tableSize + if cap(s.ct.stateTable) < ctSize { + s.ct.stateTable = make([]uint16, ctSize) + } + s.ct.stateTable = s.ct.stateTable[:ctSize] + + if cap(s.ct.symbolTT) < 256 { + s.ct.symbolTT = make([]symbolTransform, 256) + } + s.ct.symbolTT = s.ct.symbolTT[:256] +} + +// buildCTable will populate the compression table so it is ready to be used. +func (s *fseEncoder) buildCTable() error { + tableSize := uint32(1 << s.actualTableLog) + highThreshold := tableSize - 1 + var cumul [256]int16 + + s.allocCtable() + tableSymbol := s.ct.tableSymbol[:tableSize] + // symbol start positions + { + cumul[0] = 0 + for ui, v := range s.norm[:s.symbolLen-1] { + u := byte(ui) // one less than reference + if v == -1 { + // Low proba symbol + cumul[u+1] = cumul[u] + 1 + tableSymbol[highThreshold] = u + highThreshold-- + } else { + cumul[u+1] = cumul[u] + v + } + } + // Encode last symbol separately to avoid overflowing u + u := int(s.symbolLen - 1) + v := s.norm[s.symbolLen-1] + if v == -1 { + // Low proba symbol + cumul[u+1] = cumul[u] + 1 + tableSymbol[highThreshold] = byte(u) + highThreshold-- + } else { + cumul[u+1] = cumul[u] + v + } + if uint32(cumul[s.symbolLen]) != tableSize { + return fmt.Errorf("internal error: expected cumul[s.symbolLen] (%d) == tableSize (%d)", cumul[s.symbolLen], tableSize) + } + cumul[s.symbolLen] = int16(tableSize) + 1 + } + // Spread symbols + s.zeroBits = false + { + step := tableStep(tableSize) + tableMask := tableSize - 1 + var position uint32 + // if any symbol > largeLimit, we may have 0 bits output. + largeLimit := int16(1 << (s.actualTableLog - 1)) + for ui, v := range s.norm[:s.symbolLen] { + symbol := byte(ui) + if v > largeLimit { + s.zeroBits = true + } + for nbOccurrences := int16(0); nbOccurrences < v; nbOccurrences++ { + tableSymbol[position] = symbol + position = (position + step) & tableMask + for position > highThreshold { + position = (position + step) & tableMask + } /* Low proba area */ + } + } + + // Check if we have gone through all positions + if position != 0 { + return errors.New("position!=0") + } + } + + // Build table + table := s.ct.stateTable + { + tsi := int(tableSize) + for u, v := range tableSymbol { + // TableU16 : sorted by symbol order; gives next state value + table[cumul[v]] = uint16(tsi + u) + cumul[v]++ + } + } + + // Build Symbol Transformation Table + { + total := int16(0) + symbolTT := s.ct.symbolTT[:s.symbolLen] + tableLog := s.actualTableLog + tl := (uint32(tableLog) << 16) - (1 << tableLog) + for i, v := range s.norm[:s.symbolLen] { + switch v { + case 0: + case -1, 1: + symbolTT[i].deltaNbBits = tl + symbolTT[i].deltaFindState = total - 1 + total++ + default: + maxBitsOut := uint32(tableLog) - highBit(uint32(v-1)) + minStatePlus := uint32(v) << maxBitsOut + symbolTT[i].deltaNbBits = (maxBitsOut << 16) - minStatePlus + symbolTT[i].deltaFindState = total - v + total += v + } + } + if total != int16(tableSize) { + return fmt.Errorf("total mismatch %d (got) != %d (want)", total, tableSize) + } + } + return nil +} + +var rtbTable = [...]uint32{0, 473195, 504333, 520860, 550000, 700000, 750000, 830000} + +func (s *fseEncoder) setRLE(val byte) { + s.allocCtable() + s.actualTableLog = 0 + s.ct.stateTable = s.ct.stateTable[:1] + s.ct.symbolTT[val] = symbolTransform{ + deltaFindState: 0, + deltaNbBits: 0, + } + if debugEncoder { + println("setRLE: val", val, "symbolTT", s.ct.symbolTT[val]) + } + s.rleVal = val + s.useRLE = true +} + +// setBits will set output bits for the transform. +// if nil is provided, the number of bits is equal to the index. +func (s *fseEncoder) setBits(transform []byte) { + if s.reUsed || s.preDefined { + return + } + if s.useRLE { + if transform == nil { + s.ct.symbolTT[s.rleVal].outBits = s.rleVal + s.maxBits = s.rleVal + return + } + s.maxBits = transform[s.rleVal] + s.ct.symbolTT[s.rleVal].outBits = s.maxBits + return + } + if transform == nil { + for i := range s.ct.symbolTT[:s.symbolLen] { + s.ct.symbolTT[i].outBits = uint8(i) + } + s.maxBits = uint8(s.symbolLen - 1) + return + } + s.maxBits = 0 + for i, v := range transform[:s.symbolLen] { + s.ct.symbolTT[i].outBits = v + if v > s.maxBits { + // We could assume bits always going up, but we play safe. + s.maxBits = v + } + } +} + +// normalizeCount will normalize the count of the symbols so +// the total is equal to the table size. +// If successful, compression tables will also be made ready. +func (s *fseEncoder) normalizeCount(length int) error { + if s.reUsed { + return nil + } + s.optimalTableLog(length) + var ( + tableLog = s.actualTableLog + scale = 62 - uint64(tableLog) + step = (1 << 62) / uint64(length) + vStep = uint64(1) << (scale - 20) + stillToDistribute = int16(1 << tableLog) + largest int + largestP int16 + lowThreshold = (uint32)(length >> tableLog) + ) + if s.maxCount == length { + s.useRLE = true + return nil + } + s.useRLE = false + for i, cnt := range s.count[:s.symbolLen] { + // already handled + // if (count[s] == s.length) return 0; /* rle special case */ + + if cnt == 0 { + s.norm[i] = 0 + continue + } + if cnt <= lowThreshold { + s.norm[i] = -1 + stillToDistribute-- + } else { + proba := (int16)((uint64(cnt) * step) >> scale) + if proba < 8 { + restToBeat := vStep * uint64(rtbTable[proba]) + v := uint64(cnt)*step - (uint64(proba) << scale) + if v > restToBeat { + proba++ + } + } + if proba > largestP { + largestP = proba + largest = i + } + s.norm[i] = proba + stillToDistribute -= proba + } + } + + if -stillToDistribute >= (s.norm[largest] >> 1) { + // corner case, need another normalization method + err := s.normalizeCount2(length) + if err != nil { + return err + } + if debugAsserts { + err = s.validateNorm() + if err != nil { + return err + } + } + return s.buildCTable() + } + s.norm[largest] += stillToDistribute + if debugAsserts { + err := s.validateNorm() + if err != nil { + return err + } + } + return s.buildCTable() +} + +// Secondary normalization method. +// To be used when primary method fails. +func (s *fseEncoder) normalizeCount2(length int) error { + const notYetAssigned = -2 + var ( + distributed uint32 + total = uint32(length) + tableLog = s.actualTableLog + lowThreshold = total >> tableLog + lowOne = (total * 3) >> (tableLog + 1) + ) + for i, cnt := range s.count[:s.symbolLen] { + if cnt == 0 { + s.norm[i] = 0 + continue + } + if cnt <= lowThreshold { + s.norm[i] = -1 + distributed++ + total -= cnt + continue + } + if cnt <= lowOne { + s.norm[i] = 1 + distributed++ + total -= cnt + continue + } + s.norm[i] = notYetAssigned + } + toDistribute := (1 << tableLog) - distributed + + if (total / toDistribute) > lowOne { + // risk of rounding to zero + lowOne = (total * 3) / (toDistribute * 2) + for i, cnt := range s.count[:s.symbolLen] { + if (s.norm[i] == notYetAssigned) && (cnt <= lowOne) { + s.norm[i] = 1 + distributed++ + total -= cnt + continue + } + } + toDistribute = (1 << tableLog) - distributed + } + if distributed == uint32(s.symbolLen)+1 { + // all values are pretty poor; + // probably incompressible data (should have already been detected); + // find max, then give all remaining points to max + var maxV int + var maxC uint32 + for i, cnt := range s.count[:s.symbolLen] { + if cnt > maxC { + maxV = i + maxC = cnt + } + } + s.norm[maxV] += int16(toDistribute) + return nil + } + + if total == 0 { + // all of the symbols were low enough for the lowOne or lowThreshold + for i := uint32(0); toDistribute > 0; i = (i + 1) % (uint32(s.symbolLen)) { + if s.norm[i] > 0 { + toDistribute-- + s.norm[i]++ + } + } + return nil + } + + var ( + vStepLog = 62 - uint64(tableLog) + mid = uint64((1 << (vStepLog - 1)) - 1) + rStep = (((1 << vStepLog) * uint64(toDistribute)) + mid) / uint64(total) // scale on remaining + tmpTotal = mid + ) + for i, cnt := range s.count[:s.symbolLen] { + if s.norm[i] == notYetAssigned { + var ( + end = tmpTotal + uint64(cnt)*rStep + sStart = uint32(tmpTotal >> vStepLog) + sEnd = uint32(end >> vStepLog) + weight = sEnd - sStart + ) + if weight < 1 { + return errors.New("weight < 1") + } + s.norm[i] = int16(weight) + tmpTotal = end + } + } + return nil +} + +// optimalTableLog calculates and sets the optimal tableLog in s.actualTableLog +func (s *fseEncoder) optimalTableLog(length int) { + tableLog := uint8(maxEncTableLog) + minBitsSrc := highBit(uint32(length)) + 1 + minBitsSymbols := highBit(uint32(s.symbolLen-1)) + 2 + minBits := uint8(minBitsSymbols) + if minBitsSrc < minBitsSymbols { + minBits = uint8(minBitsSrc) + } + + maxBitsSrc := uint8(highBit(uint32(length-1))) - 2 + if maxBitsSrc < tableLog { + // Accuracy can be reduced + tableLog = maxBitsSrc + } + if minBits > tableLog { + tableLog = minBits + } + // Need a minimum to safely represent all symbol values + if tableLog < minEncTablelog { + tableLog = minEncTablelog + } + if tableLog > maxEncTableLog { + tableLog = maxEncTableLog + } + s.actualTableLog = tableLog +} + +// validateNorm validates the normalized histogram table. +func (s *fseEncoder) validateNorm() (err error) { + var total int + for _, v := range s.norm[:s.symbolLen] { + if v >= 0 { + total += int(v) + } else { + total -= int(v) + } + } + defer func() { + if err == nil { + return + } + fmt.Printf("selected TableLog: %d, Symbol length: %d\n", s.actualTableLog, s.symbolLen) + for i, v := range s.norm[:s.symbolLen] { + fmt.Printf("%3d: %5d -> %4d \n", i, s.count[i], v) + } + }() + if total != (1 << s.actualTableLog) { + return fmt.Errorf("warning: Total == %d != %d", total, 1<> 3) + 3 + 2 + + // Write Table Size + bitStream = uint32(tableLog - minEncTablelog) + bitCount = uint(4) + remaining = int16(tableSize + 1) /* +1 for extra accuracy */ + threshold = int16(tableSize) + nbBits = uint(tableLog + 1) + outP = len(out) + ) + if cap(out) < outP+maxHeaderSize { + out = append(out, make([]byte, maxHeaderSize*3)...) + out = out[:len(out)-maxHeaderSize*3] + } + out = out[:outP+maxHeaderSize] + + // stops at 1 + for remaining > 1 { + if previous0 { + start := charnum + for s.norm[charnum] == 0 { + charnum++ + } + for charnum >= start+24 { + start += 24 + bitStream += uint32(0xFFFF) << bitCount + out[outP] = byte(bitStream) + out[outP+1] = byte(bitStream >> 8) + outP += 2 + bitStream >>= 16 + } + for charnum >= start+3 { + start += 3 + bitStream += 3 << bitCount + bitCount += 2 + } + bitStream += uint32(charnum-start) << bitCount + bitCount += 2 + if bitCount > 16 { + out[outP] = byte(bitStream) + out[outP+1] = byte(bitStream >> 8) + outP += 2 + bitStream >>= 16 + bitCount -= 16 + } + } + + count := s.norm[charnum] + charnum++ + max := (2*threshold - 1) - remaining + if count < 0 { + remaining += count + } else { + remaining -= count + } + count++ // +1 for extra accuracy + if count >= threshold { + count += max // [0..max[ [max..threshold[ (...) [threshold+max 2*threshold[ + } + bitStream += uint32(count) << bitCount + bitCount += nbBits + if count < max { + bitCount-- + } + + previous0 = count == 1 + if remaining < 1 { + return nil, errors.New("internal error: remaining < 1") + } + for remaining < threshold { + nbBits-- + threshold >>= 1 + } + + if bitCount > 16 { + out[outP] = byte(bitStream) + out[outP+1] = byte(bitStream >> 8) + outP += 2 + bitStream >>= 16 + bitCount -= 16 + } + } + + if outP+2 > len(out) { + return nil, fmt.Errorf("internal error: %d > %d, maxheader: %d, sl: %d, tl: %d, normcount: %v", outP+2, len(out), maxHeaderSize, s.symbolLen, int(tableLog), s.norm[:s.symbolLen]) + } + out[outP] = byte(bitStream) + out[outP+1] = byte(bitStream >> 8) + outP += int((bitCount + 7) / 8) + + if charnum > s.symbolLen { + return nil, errors.New("internal error: charnum > s.symbolLen") + } + return out[:outP], nil +} + +// Approximate symbol cost, as fractional value, using fixed-point format (accuracyLog fractional bits) +// note 1 : assume symbolValue is valid (<= maxSymbolValue) +// note 2 : if freq[symbolValue]==0, @return a fake cost of tableLog+1 bits * +func (s *fseEncoder) bitCost(symbolValue uint8, accuracyLog uint32) uint32 { + minNbBits := s.ct.symbolTT[symbolValue].deltaNbBits >> 16 + threshold := (minNbBits + 1) << 16 + if debugAsserts { + if !(s.actualTableLog < 16) { + panic("!s.actualTableLog < 16") + } + // ensure enough room for renormalization double shift + if !(uint8(accuracyLog) < 31-s.actualTableLog) { + panic("!uint8(accuracyLog) < 31-s.actualTableLog") + } + } + tableSize := uint32(1) << s.actualTableLog + deltaFromThreshold := threshold - (s.ct.symbolTT[symbolValue].deltaNbBits + tableSize) + // linear interpolation (very approximate) + normalizedDeltaFromThreshold := (deltaFromThreshold << accuracyLog) >> s.actualTableLog + bitMultiplier := uint32(1) << accuracyLog + if debugAsserts { + if s.ct.symbolTT[symbolValue].deltaNbBits+tableSize > threshold { + panic("s.ct.symbolTT[symbolValue].deltaNbBits+tableSize > threshold") + } + if normalizedDeltaFromThreshold > bitMultiplier { + panic("normalizedDeltaFromThreshold > bitMultiplier") + } + } + return (minNbBits+1)*bitMultiplier - normalizedDeltaFromThreshold +} + +// Returns the cost in bits of encoding the distribution in count using ctable. +// Histogram should only be up to the last non-zero symbol. +// Returns an -1 if ctable cannot represent all the symbols in count. +func (s *fseEncoder) approxSize(hist []uint32) uint32 { + if int(s.symbolLen) < len(hist) { + // More symbols than we have. + return math.MaxUint32 + } + if s.useRLE { + // We will never reuse RLE encoders. + return math.MaxUint32 + } + const kAccuracyLog = 8 + badCost := (uint32(s.actualTableLog) + 1) << kAccuracyLog + var cost uint32 + for i, v := range hist { + if v == 0 { + continue + } + if s.norm[i] == 0 { + return math.MaxUint32 + } + bitCost := s.bitCost(uint8(i), kAccuracyLog) + if bitCost > badCost { + return math.MaxUint32 + } + cost += v * bitCost + } + return cost >> kAccuracyLog +} + +// maxHeaderSize returns the maximum header size in bits. +// This is not exact size, but we want a penalty for new tables anyway. +func (s *fseEncoder) maxHeaderSize() uint32 { + if s.preDefined { + return 0 + } + if s.useRLE { + return 8 + } + return (((uint32(s.symbolLen) * uint32(s.actualTableLog)) >> 3) + 3) * 8 +} + +// cState contains the compression state of a stream. +type cState struct { + bw *bitWriter + stateTable []uint16 + state uint16 +} + +// init will initialize the compression state to the first symbol of the stream. +func (c *cState) init(bw *bitWriter, ct *cTable, first symbolTransform) { + c.bw = bw + c.stateTable = ct.stateTable + if len(c.stateTable) == 1 { + // RLE + c.stateTable[0] = uint16(0) + c.state = 0 + return + } + nbBitsOut := (first.deltaNbBits + (1 << 15)) >> 16 + im := int32((nbBitsOut << 16) - first.deltaNbBits) + lu := (im >> nbBitsOut) + int32(first.deltaFindState) + c.state = c.stateTable[lu] +} + +// flush will write the tablelog to the output and flush the remaining full bytes. +func (c *cState) flush(tableLog uint8) { + c.bw.flush32() + c.bw.addBits16NC(c.state, tableLog) +} diff --git a/vendor/github.com/klauspost/compress/zstd/fse_predefined.go b/vendor/github.com/klauspost/compress/zstd/fse_predefined.go new file mode 100644 index 00000000..474cb77d --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/fse_predefined.go @@ -0,0 +1,158 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +import ( + "fmt" + "math" + "sync" +) + +var ( + // fsePredef are the predefined fse tables as defined here: + // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#default-distributions + // These values are already transformed. + fsePredef [3]fseDecoder + + // fsePredefEnc are the predefined encoder based on fse tables as defined here: + // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#default-distributions + // These values are already transformed. + fsePredefEnc [3]fseEncoder + + // symbolTableX contain the transformations needed for each type as defined in + // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#the-codes-for-literals-lengths-match-lengths-and-offsets + symbolTableX [3][]baseOffset + + // maxTableSymbol is the biggest supported symbol for each table type + // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#the-codes-for-literals-lengths-match-lengths-and-offsets + maxTableSymbol = [3]uint8{tableLiteralLengths: maxLiteralLengthSymbol, tableOffsets: maxOffsetLengthSymbol, tableMatchLengths: maxMatchLengthSymbol} + + // bitTables is the bits table for each table. + bitTables = [3][]byte{tableLiteralLengths: llBitsTable[:], tableOffsets: nil, tableMatchLengths: mlBitsTable[:]} +) + +type tableIndex uint8 + +const ( + // indexes for fsePredef and symbolTableX + tableLiteralLengths tableIndex = 0 + tableOffsets tableIndex = 1 + tableMatchLengths tableIndex = 2 + + maxLiteralLengthSymbol = 35 + maxOffsetLengthSymbol = 30 + maxMatchLengthSymbol = 52 +) + +// baseOffset is used for calculating transformations. +type baseOffset struct { + baseLine uint32 + addBits uint8 +} + +// fillBase will precalculate base offsets with the given bit distributions. +func fillBase(dst []baseOffset, base uint32, bits ...uint8) { + if len(bits) != len(dst) { + panic(fmt.Sprintf("len(dst) (%d) != len(bits) (%d)", len(dst), len(bits))) + } + for i, bit := range bits { + if base > math.MaxInt32 { + panic("invalid decoding table, base overflows int32") + } + + dst[i] = baseOffset{ + baseLine: base, + addBits: bit, + } + base += 1 << bit + } +} + +var predef sync.Once + +func initPredefined() { + predef.Do(func() { + // Literals length codes + tmp := make([]baseOffset, 36) + for i := range tmp[:16] { + tmp[i] = baseOffset{ + baseLine: uint32(i), + addBits: 0, + } + } + fillBase(tmp[16:], 16, 1, 1, 1, 1, 2, 2, 3, 3, 4, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16) + symbolTableX[tableLiteralLengths] = tmp + + // Match length codes + tmp = make([]baseOffset, 53) + for i := range tmp[:32] { + tmp[i] = baseOffset{ + // The transformation adds the 3 length. + baseLine: uint32(i) + 3, + addBits: 0, + } + } + fillBase(tmp[32:], 35, 1, 1, 1, 1, 2, 2, 3, 3, 4, 4, 5, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16) + symbolTableX[tableMatchLengths] = tmp + + // Offset codes + tmp = make([]baseOffset, maxOffsetBits+1) + tmp[1] = baseOffset{ + baseLine: 1, + addBits: 1, + } + fillBase(tmp[2:], 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30) + symbolTableX[tableOffsets] = tmp + + // Fill predefined tables and transform them. + // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#default-distributions + for i := range fsePredef[:] { + f := &fsePredef[i] + switch tableIndex(i) { + case tableLiteralLengths: + // https://github.com/facebook/zstd/blob/ededcfca57366461021c922720878c81a5854a0a/lib/decompress/zstd_decompress_block.c#L243 + f.actualTableLog = 6 + copy(f.norm[:], []int16{4, 3, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 1, 1, 1, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 3, 2, 1, 1, 1, 1, 1, + -1, -1, -1, -1}) + f.symbolLen = 36 + case tableOffsets: + // https://github.com/facebook/zstd/blob/ededcfca57366461021c922720878c81a5854a0a/lib/decompress/zstd_decompress_block.c#L281 + f.actualTableLog = 5 + copy(f.norm[:], []int16{ + 1, 1, 1, 1, 1, 1, 2, 2, 2, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, -1, -1, -1, -1, -1}) + f.symbolLen = 29 + case tableMatchLengths: + //https://github.com/facebook/zstd/blob/ededcfca57366461021c922720878c81a5854a0a/lib/decompress/zstd_decompress_block.c#L304 + f.actualTableLog = 6 + copy(f.norm[:], []int16{ + 1, 4, 3, 2, 2, 2, 2, 2, 2, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, -1, -1, + -1, -1, -1, -1, -1}) + f.symbolLen = 53 + } + if err := f.buildDtable(); err != nil { + panic(fmt.Errorf("building table %v: %v", tableIndex(i), err)) + } + if err := f.transform(symbolTableX[i]); err != nil { + panic(fmt.Errorf("building table %v: %v", tableIndex(i), err)) + } + f.preDefined = true + + // Create encoder as well + enc := &fsePredefEnc[i] + copy(enc.norm[:], f.norm[:]) + enc.symbolLen = f.symbolLen + enc.actualTableLog = f.actualTableLog + if err := enc.buildCTable(); err != nil { + panic(fmt.Errorf("building encoding table %v: %v", tableIndex(i), err)) + } + enc.setBits(bitTables[i]) + enc.preDefined = true + } + }) +} diff --git a/vendor/github.com/klauspost/compress/zstd/hash.go b/vendor/github.com/klauspost/compress/zstd/hash.go new file mode 100644 index 00000000..5d73c21e --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/hash.go @@ -0,0 +1,35 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +const ( + prime3bytes = 506832829 + prime4bytes = 2654435761 + prime5bytes = 889523592379 + prime6bytes = 227718039650203 + prime7bytes = 58295818150454627 + prime8bytes = 0xcf1bbcdcb7a56463 +) + +// hashLen returns a hash of the lowest mls bytes of with length output bits. +// mls must be >=3 and <=8. Any other value will return hash for 4 bytes. +// length should always be < 32. +// Preferably length and mls should be a constant for inlining. +func hashLen(u uint64, length, mls uint8) uint32 { + switch mls { + case 3: + return (uint32(u<<8) * prime3bytes) >> (32 - length) + case 5: + return uint32(((u << (64 - 40)) * prime5bytes) >> (64 - length)) + case 6: + return uint32(((u << (64 - 48)) * prime6bytes) >> (64 - length)) + case 7: + return uint32(((u << (64 - 56)) * prime7bytes) >> (64 - length)) + case 8: + return uint32((u * prime8bytes) >> (64 - length)) + default: + return (uint32(u) * prime4bytes) >> (32 - length) + } +} diff --git a/vendor/github.com/klauspost/compress/zstd/history.go b/vendor/github.com/klauspost/compress/zstd/history.go new file mode 100644 index 00000000..09164856 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/history.go @@ -0,0 +1,116 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +import ( + "github.com/klauspost/compress/huff0" +) + +// history contains the information transferred between blocks. +type history struct { + // Literal decompression + huffTree *huff0.Scratch + + // Sequence decompression + decoders sequenceDecs + recentOffsets [3]int + + // History buffer... + b []byte + + // ignoreBuffer is meant to ignore a number of bytes + // when checking for matches in history + ignoreBuffer int + + windowSize int + allocFrameBuffer int // needed? + error bool + dict *dict +} + +// reset will reset the history to initial state of a frame. +// The history must already have been initialized to the desired size. +func (h *history) reset() { + h.b = h.b[:0] + h.ignoreBuffer = 0 + h.error = false + h.recentOffsets = [3]int{1, 4, 8} + h.decoders.freeDecoders() + h.decoders = sequenceDecs{br: h.decoders.br} + h.freeHuffDecoder() + h.huffTree = nil + h.dict = nil + //printf("history created: %+v (l: %d, c: %d)", *h, len(h.b), cap(h.b)) +} + +func (h *history) freeHuffDecoder() { + if h.huffTree != nil { + if h.dict == nil || h.dict.litEnc != h.huffTree { + huffDecoderPool.Put(h.huffTree) + h.huffTree = nil + } + } +} + +func (h *history) setDict(dict *dict) { + if dict == nil { + return + } + h.dict = dict + h.decoders.litLengths = dict.llDec + h.decoders.offsets = dict.ofDec + h.decoders.matchLengths = dict.mlDec + h.decoders.dict = dict.content + h.recentOffsets = dict.offsets + h.huffTree = dict.litEnc +} + +// append bytes to history. +// This function will make sure there is space for it, +// if the buffer has been allocated with enough extra space. +func (h *history) append(b []byte) { + if len(b) >= h.windowSize { + // Discard all history by simply overwriting + h.b = h.b[:h.windowSize] + copy(h.b, b[len(b)-h.windowSize:]) + return + } + + // If there is space, append it. + if len(b) < cap(h.b)-len(h.b) { + h.b = append(h.b, b...) + return + } + + // Move data down so we only have window size left. + // We know we have less than window size in b at this point. + discard := len(b) + len(h.b) - h.windowSize + copy(h.b, h.b[discard:]) + h.b = h.b[:h.windowSize] + copy(h.b[h.windowSize-len(b):], b) +} + +// ensureBlock will ensure there is space for at least one block... +func (h *history) ensureBlock() { + if cap(h.b) < h.allocFrameBuffer { + h.b = make([]byte, 0, h.allocFrameBuffer) + return + } + + avail := cap(h.b) - len(h.b) + if avail >= h.windowSize || avail > maxCompressedBlockSize { + return + } + // Move data down so we only have window size left. + // We know we have less than window size in b at this point. + discard := len(h.b) - h.windowSize + copy(h.b, h.b[discard:]) + h.b = h.b[:h.windowSize] +} + +// append bytes to history without ever discarding anything. +func (h *history) appendKeep(b []byte) { + h.b = append(h.b, b...) +} diff --git a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/LICENSE.txt b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/LICENSE.txt new file mode 100644 index 00000000..24b53065 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/LICENSE.txt @@ -0,0 +1,22 @@ +Copyright (c) 2016 Caleb Spare + +MIT License + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +"Software"), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/README.md b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/README.md new file mode 100644 index 00000000..69aa3bb5 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/README.md @@ -0,0 +1,58 @@ +# xxhash + +VENDORED: Go to [github.com/cespare/xxhash](https://github.com/cespare/xxhash) for original package. + + +[![GoDoc](https://godoc.org/github.com/cespare/xxhash?status.svg)](https://godoc.org/github.com/cespare/xxhash) +[![Build Status](https://travis-ci.org/cespare/xxhash.svg?branch=master)](https://travis-ci.org/cespare/xxhash) + +xxhash is a Go implementation of the 64-bit +[xxHash](http://cyan4973.github.io/xxHash/) algorithm, XXH64. This is a +high-quality hashing algorithm that is much faster than anything in the Go +standard library. + +This package provides a straightforward API: + +``` +func Sum64(b []byte) uint64 +func Sum64String(s string) uint64 +type Digest struct{ ... } + func New() *Digest +``` + +The `Digest` type implements hash.Hash64. Its key methods are: + +``` +func (*Digest) Write([]byte) (int, error) +func (*Digest) WriteString(string) (int, error) +func (*Digest) Sum64() uint64 +``` + +This implementation provides a fast pure-Go implementation and an even faster +assembly implementation for amd64. + +## Benchmarks + +Here are some quick benchmarks comparing the pure-Go and assembly +implementations of Sum64. + +| input size | purego | asm | +| --- | --- | --- | +| 5 B | 979.66 MB/s | 1291.17 MB/s | +| 100 B | 7475.26 MB/s | 7973.40 MB/s | +| 4 KB | 17573.46 MB/s | 17602.65 MB/s | +| 10 MB | 17131.46 MB/s | 17142.16 MB/s | + +These numbers were generated on Ubuntu 18.04 with an Intel i7-8700K CPU using +the following commands under Go 1.11.2: + +``` +$ go test -tags purego -benchtime 10s -bench '/xxhash,direct,bytes' +$ go test -benchtime 10s -bench '/xxhash,direct,bytes' +``` + +## Projects using this package + +- [InfluxDB](https://github.com/influxdata/influxdb) +- [Prometheus](https://github.com/prometheus/prometheus) +- [FreeCache](https://github.com/coocood/freecache) diff --git a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash.go b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash.go new file mode 100644 index 00000000..2c112a0a --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash.go @@ -0,0 +1,237 @@ +// Package xxhash implements the 64-bit variant of xxHash (XXH64) as described +// at http://cyan4973.github.io/xxHash/. +// THIS IS VENDORED: Go to github.com/cespare/xxhash for original package. + +package xxhash + +import ( + "encoding/binary" + "errors" + "math/bits" +) + +const ( + prime1 uint64 = 11400714785074694791 + prime2 uint64 = 14029467366897019727 + prime3 uint64 = 1609587929392839161 + prime4 uint64 = 9650029242287828579 + prime5 uint64 = 2870177450012600261 +) + +// NOTE(caleb): I'm using both consts and vars of the primes. Using consts where +// possible in the Go code is worth a small (but measurable) performance boost +// by avoiding some MOVQs. Vars are needed for the asm and also are useful for +// convenience in the Go code in a few places where we need to intentionally +// avoid constant arithmetic (e.g., v1 := prime1 + prime2 fails because the +// result overflows a uint64). +var ( + prime1v = prime1 + prime2v = prime2 + prime3v = prime3 + prime4v = prime4 + prime5v = prime5 +) + +// Digest implements hash.Hash64. +type Digest struct { + v1 uint64 + v2 uint64 + v3 uint64 + v4 uint64 + total uint64 + mem [32]byte + n int // how much of mem is used +} + +// New creates a new Digest that computes the 64-bit xxHash algorithm. +func New() *Digest { + var d Digest + d.Reset() + return &d +} + +// Reset clears the Digest's state so that it can be reused. +func (d *Digest) Reset() { + d.v1 = prime1v + prime2 + d.v2 = prime2 + d.v3 = 0 + d.v4 = -prime1v + d.total = 0 + d.n = 0 +} + +// Size always returns 8 bytes. +func (d *Digest) Size() int { return 8 } + +// BlockSize always returns 32 bytes. +func (d *Digest) BlockSize() int { return 32 } + +// Write adds more data to d. It always returns len(b), nil. +func (d *Digest) Write(b []byte) (n int, err error) { + n = len(b) + d.total += uint64(n) + + if d.n+n < 32 { + // This new data doesn't even fill the current block. + copy(d.mem[d.n:], b) + d.n += n + return + } + + if d.n > 0 { + // Finish off the partial block. + copy(d.mem[d.n:], b) + d.v1 = round(d.v1, u64(d.mem[0:8])) + d.v2 = round(d.v2, u64(d.mem[8:16])) + d.v3 = round(d.v3, u64(d.mem[16:24])) + d.v4 = round(d.v4, u64(d.mem[24:32])) + b = b[32-d.n:] + d.n = 0 + } + + if len(b) >= 32 { + // One or more full blocks left. + nw := writeBlocks(d, b) + b = b[nw:] + } + + // Store any remaining partial block. + copy(d.mem[:], b) + d.n = len(b) + + return +} + +// Sum appends the current hash to b and returns the resulting slice. +func (d *Digest) Sum(b []byte) []byte { + s := d.Sum64() + return append( + b, + byte(s>>56), + byte(s>>48), + byte(s>>40), + byte(s>>32), + byte(s>>24), + byte(s>>16), + byte(s>>8), + byte(s), + ) +} + +// Sum64 returns the current hash. +func (d *Digest) Sum64() uint64 { + var h uint64 + + if d.total >= 32 { + v1, v2, v3, v4 := d.v1, d.v2, d.v3, d.v4 + h = rol1(v1) + rol7(v2) + rol12(v3) + rol18(v4) + h = mergeRound(h, v1) + h = mergeRound(h, v2) + h = mergeRound(h, v3) + h = mergeRound(h, v4) + } else { + h = d.v3 + prime5 + } + + h += d.total + + i, end := 0, d.n + for ; i+8 <= end; i += 8 { + k1 := round(0, u64(d.mem[i:i+8])) + h ^= k1 + h = rol27(h)*prime1 + prime4 + } + if i+4 <= end { + h ^= uint64(u32(d.mem[i:i+4])) * prime1 + h = rol23(h)*prime2 + prime3 + i += 4 + } + for i < end { + h ^= uint64(d.mem[i]) * prime5 + h = rol11(h) * prime1 + i++ + } + + h ^= h >> 33 + h *= prime2 + h ^= h >> 29 + h *= prime3 + h ^= h >> 32 + + return h +} + +const ( + magic = "xxh\x06" + marshaledSize = len(magic) + 8*5 + 32 +) + +// MarshalBinary implements the encoding.BinaryMarshaler interface. +func (d *Digest) MarshalBinary() ([]byte, error) { + b := make([]byte, 0, marshaledSize) + b = append(b, magic...) + b = appendUint64(b, d.v1) + b = appendUint64(b, d.v2) + b = appendUint64(b, d.v3) + b = appendUint64(b, d.v4) + b = appendUint64(b, d.total) + b = append(b, d.mem[:d.n]...) + b = b[:len(b)+len(d.mem)-d.n] + return b, nil +} + +// UnmarshalBinary implements the encoding.BinaryUnmarshaler interface. +func (d *Digest) UnmarshalBinary(b []byte) error { + if len(b) < len(magic) || string(b[:len(magic)]) != magic { + return errors.New("xxhash: invalid hash state identifier") + } + if len(b) != marshaledSize { + return errors.New("xxhash: invalid hash state size") + } + b = b[len(magic):] + b, d.v1 = consumeUint64(b) + b, d.v2 = consumeUint64(b) + b, d.v3 = consumeUint64(b) + b, d.v4 = consumeUint64(b) + b, d.total = consumeUint64(b) + copy(d.mem[:], b) + d.n = int(d.total % uint64(len(d.mem))) + return nil +} + +func appendUint64(b []byte, x uint64) []byte { + var a [8]byte + binary.LittleEndian.PutUint64(a[:], x) + return append(b, a[:]...) +} + +func consumeUint64(b []byte) ([]byte, uint64) { + x := u64(b) + return b[8:], x +} + +func u64(b []byte) uint64 { return binary.LittleEndian.Uint64(b) } +func u32(b []byte) uint32 { return binary.LittleEndian.Uint32(b) } + +func round(acc, input uint64) uint64 { + acc += input * prime2 + acc = rol31(acc) + acc *= prime1 + return acc +} + +func mergeRound(acc, val uint64) uint64 { + val = round(0, val) + acc ^= val + acc = acc*prime1 + prime4 + return acc +} + +func rol1(x uint64) uint64 { return bits.RotateLeft64(x, 1) } +func rol7(x uint64) uint64 { return bits.RotateLeft64(x, 7) } +func rol11(x uint64) uint64 { return bits.RotateLeft64(x, 11) } +func rol12(x uint64) uint64 { return bits.RotateLeft64(x, 12) } +func rol18(x uint64) uint64 { return bits.RotateLeft64(x, 18) } +func rol23(x uint64) uint64 { return bits.RotateLeft64(x, 23) } +func rol27(x uint64) uint64 { return bits.RotateLeft64(x, 27) } +func rol31(x uint64) uint64 { return bits.RotateLeft64(x, 31) } diff --git a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_amd64.s b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_amd64.s new file mode 100644 index 00000000..cea17856 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_amd64.s @@ -0,0 +1,216 @@ +// +build !appengine +// +build gc +// +build !purego +// +build !noasm + +#include "textflag.h" + +// Register allocation: +// AX h +// SI pointer to advance through b +// DX n +// BX loop end +// R8 v1, k1 +// R9 v2 +// R10 v3 +// R11 v4 +// R12 tmp +// R13 prime1v +// R14 prime2v +// DI prime4v + +// round reads from and advances the buffer pointer in SI. +// It assumes that R13 has prime1v and R14 has prime2v. +#define round(r) \ + MOVQ (SI), R12 \ + ADDQ $8, SI \ + IMULQ R14, R12 \ + ADDQ R12, r \ + ROLQ $31, r \ + IMULQ R13, r + +// mergeRound applies a merge round on the two registers acc and val. +// It assumes that R13 has prime1v, R14 has prime2v, and DI has prime4v. +#define mergeRound(acc, val) \ + IMULQ R14, val \ + ROLQ $31, val \ + IMULQ R13, val \ + XORQ val, acc \ + IMULQ R13, acc \ + ADDQ DI, acc + +// func Sum64(b []byte) uint64 +TEXT ·Sum64(SB), NOSPLIT, $0-32 + // Load fixed primes. + MOVQ ·prime1v(SB), R13 + MOVQ ·prime2v(SB), R14 + MOVQ ·prime4v(SB), DI + + // Load slice. + MOVQ b_base+0(FP), SI + MOVQ b_len+8(FP), DX + LEAQ (SI)(DX*1), BX + + // The first loop limit will be len(b)-32. + SUBQ $32, BX + + // Check whether we have at least one block. + CMPQ DX, $32 + JLT noBlocks + + // Set up initial state (v1, v2, v3, v4). + MOVQ R13, R8 + ADDQ R14, R8 + MOVQ R14, R9 + XORQ R10, R10 + XORQ R11, R11 + SUBQ R13, R11 + + // Loop until SI > BX. +blockLoop: + round(R8) + round(R9) + round(R10) + round(R11) + + CMPQ SI, BX + JLE blockLoop + + MOVQ R8, AX + ROLQ $1, AX + MOVQ R9, R12 + ROLQ $7, R12 + ADDQ R12, AX + MOVQ R10, R12 + ROLQ $12, R12 + ADDQ R12, AX + MOVQ R11, R12 + ROLQ $18, R12 + ADDQ R12, AX + + mergeRound(AX, R8) + mergeRound(AX, R9) + mergeRound(AX, R10) + mergeRound(AX, R11) + + JMP afterBlocks + +noBlocks: + MOVQ ·prime5v(SB), AX + +afterBlocks: + ADDQ DX, AX + + // Right now BX has len(b)-32, and we want to loop until SI > len(b)-8. + ADDQ $24, BX + + CMPQ SI, BX + JG fourByte + +wordLoop: + // Calculate k1. + MOVQ (SI), R8 + ADDQ $8, SI + IMULQ R14, R8 + ROLQ $31, R8 + IMULQ R13, R8 + + XORQ R8, AX + ROLQ $27, AX + IMULQ R13, AX + ADDQ DI, AX + + CMPQ SI, BX + JLE wordLoop + +fourByte: + ADDQ $4, BX + CMPQ SI, BX + JG singles + + MOVL (SI), R8 + ADDQ $4, SI + IMULQ R13, R8 + XORQ R8, AX + + ROLQ $23, AX + IMULQ R14, AX + ADDQ ·prime3v(SB), AX + +singles: + ADDQ $4, BX + CMPQ SI, BX + JGE finalize + +singlesLoop: + MOVBQZX (SI), R12 + ADDQ $1, SI + IMULQ ·prime5v(SB), R12 + XORQ R12, AX + + ROLQ $11, AX + IMULQ R13, AX + + CMPQ SI, BX + JL singlesLoop + +finalize: + MOVQ AX, R12 + SHRQ $33, R12 + XORQ R12, AX + IMULQ R14, AX + MOVQ AX, R12 + SHRQ $29, R12 + XORQ R12, AX + IMULQ ·prime3v(SB), AX + MOVQ AX, R12 + SHRQ $32, R12 + XORQ R12, AX + + MOVQ AX, ret+24(FP) + RET + +// writeBlocks uses the same registers as above except that it uses AX to store +// the d pointer. + +// func writeBlocks(d *Digest, b []byte) int +TEXT ·writeBlocks(SB), NOSPLIT, $0-40 + // Load fixed primes needed for round. + MOVQ ·prime1v(SB), R13 + MOVQ ·prime2v(SB), R14 + + // Load slice. + MOVQ b_base+8(FP), SI + MOVQ b_len+16(FP), DX + LEAQ (SI)(DX*1), BX + SUBQ $32, BX + + // Load vN from d. + MOVQ d+0(FP), AX + MOVQ 0(AX), R8 // v1 + MOVQ 8(AX), R9 // v2 + MOVQ 16(AX), R10 // v3 + MOVQ 24(AX), R11 // v4 + + // We don't need to check the loop condition here; this function is + // always called with at least one block of data to process. +blockLoop: + round(R8) + round(R9) + round(R10) + round(R11) + + CMPQ SI, BX + JLE blockLoop + + // Copy vN back to d. + MOVQ R8, 0(AX) + MOVQ R9, 8(AX) + MOVQ R10, 16(AX) + MOVQ R11, 24(AX) + + // The number of bytes written is SI minus the old base pointer. + SUBQ b_base+8(FP), SI + MOVQ SI, ret+32(FP) + + RET diff --git a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_arm64.s b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_arm64.s new file mode 100644 index 00000000..4d64a17d --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_arm64.s @@ -0,0 +1,186 @@ +// +build gc,!purego,!noasm + +#include "textflag.h" + +// Register allocation. +#define digest R1 +#define h R2 // Return value. +#define p R3 // Input pointer. +#define len R4 +#define nblocks R5 // len / 32. +#define prime1 R7 +#define prime2 R8 +#define prime3 R9 +#define prime4 R10 +#define prime5 R11 +#define v1 R12 +#define v2 R13 +#define v3 R14 +#define v4 R15 +#define x1 R20 +#define x2 R21 +#define x3 R22 +#define x4 R23 + +#define round(acc, x) \ + MADD prime2, acc, x, acc \ + ROR $64-31, acc \ + MUL prime1, acc \ + +// x = round(0, x). +#define round0(x) \ + MUL prime2, x \ + ROR $64-31, x \ + MUL prime1, x \ + +#define mergeRound(x) \ + round0(x) \ + EOR x, h \ + MADD h, prime4, prime1, h \ + +// Update v[1-4] with 32-byte blocks. Assumes len >= 32. +#define blocksLoop() \ + LSR $5, len, nblocks \ + PCALIGN $16 \ + loop: \ + LDP.P 32(p), (x1, x2) \ + round(v1, x1) \ + LDP -16(p), (x3, x4) \ + round(v2, x2) \ + SUB $1, nblocks \ + round(v3, x3) \ + round(v4, x4) \ + CBNZ nblocks, loop \ + +// The primes are repeated here to ensure that they're stored +// in a contiguous array, so we can load them with LDP. +DATA primes<> +0(SB)/8, $11400714785074694791 +DATA primes<> +8(SB)/8, $14029467366897019727 +DATA primes<>+16(SB)/8, $1609587929392839161 +DATA primes<>+24(SB)/8, $9650029242287828579 +DATA primes<>+32(SB)/8, $2870177450012600261 +GLOBL primes<>(SB), NOPTR+RODATA, $40 + +// func Sum64(b []byte) uint64 +TEXT ·Sum64(SB), NOFRAME+NOSPLIT, $0-32 + LDP b_base+0(FP), (p, len) + + LDP primes<> +0(SB), (prime1, prime2) + LDP primes<>+16(SB), (prime3, prime4) + MOVD primes<>+32(SB), prime5 + + CMP $32, len + CSEL LO, prime5, ZR, h // if len < 32 { h = prime5 } else { h = 0 } + BLO afterLoop + + ADD prime1, prime2, v1 + MOVD prime2, v2 + MOVD $0, v3 + NEG prime1, v4 + + blocksLoop() + + ROR $64-1, v1, x1 + ROR $64-7, v2, x2 + ADD x1, x2 + ROR $64-12, v3, x3 + ROR $64-18, v4, x4 + ADD x3, x4 + ADD x2, x4, h + + mergeRound(v1) + mergeRound(v2) + mergeRound(v3) + mergeRound(v4) + +afterLoop: + ADD len, h + + TBZ $4, len, try8 + LDP.P 16(p), (x1, x2) + + round0(x1) + ROR $64-27, h + EOR x1 @> 64-27, h, h + MADD h, prime4, prime1, h + + round0(x2) + ROR $64-27, h + EOR x2 @> 64-27, h + MADD h, prime4, prime1, h + +try8: + TBZ $3, len, try4 + MOVD.P 8(p), x1 + + round0(x1) + ROR $64-27, h + EOR x1 @> 64-27, h + MADD h, prime4, prime1, h + +try4: + TBZ $2, len, try2 + MOVWU.P 4(p), x2 + + MUL prime1, x2 + ROR $64-23, h + EOR x2 @> 64-23, h + MADD h, prime3, prime2, h + +try2: + TBZ $1, len, try1 + MOVHU.P 2(p), x3 + AND $255, x3, x1 + LSR $8, x3, x2 + + MUL prime5, x1 + ROR $64-11, h + EOR x1 @> 64-11, h + MUL prime1, h + + MUL prime5, x2 + ROR $64-11, h + EOR x2 @> 64-11, h + MUL prime1, h + +try1: + TBZ $0, len, end + MOVBU (p), x4 + + MUL prime5, x4 + ROR $64-11, h + EOR x4 @> 64-11, h + MUL prime1, h + +end: + EOR h >> 33, h + MUL prime2, h + EOR h >> 29, h + MUL prime3, h + EOR h >> 32, h + + MOVD h, ret+24(FP) + RET + +// func writeBlocks(d *Digest, b []byte) int +// +// Assumes len(b) >= 32. +TEXT ·writeBlocks(SB), NOFRAME+NOSPLIT, $0-40 + LDP primes<>(SB), (prime1, prime2) + + // Load state. Assume v[1-4] are stored contiguously. + MOVD d+0(FP), digest + LDP 0(digest), (v1, v2) + LDP 16(digest), (v3, v4) + + LDP b_base+8(FP), (p, len) + + blocksLoop() + + // Store updated state. + STP (v1, v2), 0(digest) + STP (v3, v4), 16(digest) + + BIC $31, len + MOVD len, ret+32(FP) + RET diff --git a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_asm.go b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_asm.go new file mode 100644 index 00000000..1a1fac9c --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_asm.go @@ -0,0 +1,16 @@ +//go:build (amd64 || arm64) && !appengine && gc && !purego && !noasm +// +build amd64 arm64 +// +build !appengine +// +build gc +// +build !purego +// +build !noasm + +package xxhash + +// Sum64 computes the 64-bit xxHash digest of b. +// +//go:noescape +func Sum64(b []byte) uint64 + +//go:noescape +func writeBlocks(d *Digest, b []byte) int diff --git a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_other.go b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_other.go new file mode 100644 index 00000000..209cb4a9 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_other.go @@ -0,0 +1,77 @@ +//go:build (!amd64 && !arm64) || appengine || !gc || purego || noasm +// +build !amd64,!arm64 appengine !gc purego noasm + +package xxhash + +// Sum64 computes the 64-bit xxHash digest of b. +func Sum64(b []byte) uint64 { + // A simpler version would be + // d := New() + // d.Write(b) + // return d.Sum64() + // but this is faster, particularly for small inputs. + + n := len(b) + var h uint64 + + if n >= 32 { + v1 := prime1v + prime2 + v2 := prime2 + v3 := uint64(0) + v4 := -prime1v + for len(b) >= 32 { + v1 = round(v1, u64(b[0:8:len(b)])) + v2 = round(v2, u64(b[8:16:len(b)])) + v3 = round(v3, u64(b[16:24:len(b)])) + v4 = round(v4, u64(b[24:32:len(b)])) + b = b[32:len(b):len(b)] + } + h = rol1(v1) + rol7(v2) + rol12(v3) + rol18(v4) + h = mergeRound(h, v1) + h = mergeRound(h, v2) + h = mergeRound(h, v3) + h = mergeRound(h, v4) + } else { + h = prime5 + } + + h += uint64(n) + + i, end := 0, len(b) + for ; i+8 <= end; i += 8 { + k1 := round(0, u64(b[i:i+8:len(b)])) + h ^= k1 + h = rol27(h)*prime1 + prime4 + } + if i+4 <= end { + h ^= uint64(u32(b[i:i+4:len(b)])) * prime1 + h = rol23(h)*prime2 + prime3 + i += 4 + } + for ; i < end; i++ { + h ^= uint64(b[i]) * prime5 + h = rol11(h) * prime1 + } + + h ^= h >> 33 + h *= prime2 + h ^= h >> 29 + h *= prime3 + h ^= h >> 32 + + return h +} + +func writeBlocks(d *Digest, b []byte) int { + v1, v2, v3, v4 := d.v1, d.v2, d.v3, d.v4 + n := len(b) + for len(b) >= 32 { + v1 = round(v1, u64(b[0:8:len(b)])) + v2 = round(v2, u64(b[8:16:len(b)])) + v3 = round(v3, u64(b[16:24:len(b)])) + v4 = round(v4, u64(b[24:32:len(b)])) + b = b[32:len(b):len(b)] + } + d.v1, d.v2, d.v3, d.v4 = v1, v2, v3, v4 + return n - len(b) +} diff --git a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_safe.go b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_safe.go new file mode 100644 index 00000000..6f3b0cb1 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_safe.go @@ -0,0 +1,11 @@ +package xxhash + +// Sum64String computes the 64-bit xxHash digest of s. +func Sum64String(s string) uint64 { + return Sum64([]byte(s)) +} + +// WriteString adds more data to d. It always returns len(s), nil. +func (d *Digest) WriteString(s string) (n int, err error) { + return d.Write([]byte(s)) +} diff --git a/vendor/github.com/klauspost/compress/zstd/seqdec.go b/vendor/github.com/klauspost/compress/zstd/seqdec.go new file mode 100644 index 00000000..f833d154 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/seqdec.go @@ -0,0 +1,509 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +import ( + "errors" + "fmt" + "io" +) + +type seq struct { + litLen uint32 + matchLen uint32 + offset uint32 + + // Codes are stored here for the encoder + // so they only have to be looked up once. + llCode, mlCode, ofCode uint8 +} + +type seqVals struct { + ll, ml, mo int +} + +func (s seq) String() string { + if s.offset <= 3 { + if s.offset == 0 { + return fmt.Sprint("litLen:", s.litLen, ", matchLen:", s.matchLen+zstdMinMatch, ", offset: INVALID (0)") + } + return fmt.Sprint("litLen:", s.litLen, ", matchLen:", s.matchLen+zstdMinMatch, ", offset:", s.offset, " (repeat)") + } + return fmt.Sprint("litLen:", s.litLen, ", matchLen:", s.matchLen+zstdMinMatch, ", offset:", s.offset-3, " (new)") +} + +type seqCompMode uint8 + +const ( + compModePredefined seqCompMode = iota + compModeRLE + compModeFSE + compModeRepeat +) + +type sequenceDec struct { + // decoder keeps track of the current state and updates it from the bitstream. + fse *fseDecoder + state fseState + repeat bool +} + +// init the state of the decoder with input from stream. +func (s *sequenceDec) init(br *bitReader) error { + if s.fse == nil { + return errors.New("sequence decoder not defined") + } + s.state.init(br, s.fse.actualTableLog, s.fse.dt[:1< cap(s.out) { + addBytes := s.seqSize + len(s.out) + s.out = append(s.out, make([]byte, addBytes)...) + s.out = s.out[:len(s.out)-addBytes] + } + + if debugDecoder { + printf("Execute %d seqs with hist %d, dict %d, literals: %d into %d bytes\n", len(seqs), len(hist), len(s.dict), len(s.literals), s.seqSize) + } + + var t = len(s.out) + out := s.out[:t+s.seqSize] + + for _, seq := range seqs { + // Add literals + copy(out[t:], s.literals[:seq.ll]) + t += seq.ll + s.literals = s.literals[seq.ll:] + + // Copy from dictionary... + if seq.mo > t+len(hist) || seq.mo > s.windowSize { + if len(s.dict) == 0 { + return fmt.Errorf("match offset (%d) bigger than current history (%d)", seq.mo, t+len(hist)) + } + + // we may be in dictionary. + dictO := len(s.dict) - (seq.mo - (t + len(hist))) + if dictO < 0 || dictO >= len(s.dict) { + return fmt.Errorf("match offset (%d) bigger than current history+dict (%d)", seq.mo, t+len(hist)+len(s.dict)) + } + end := dictO + seq.ml + if end > len(s.dict) { + n := len(s.dict) - dictO + copy(out[t:], s.dict[dictO:]) + t += n + seq.ml -= n + } else { + copy(out[t:], s.dict[dictO:end]) + t += end - dictO + continue + } + } + + // Copy from history. + if v := seq.mo - t; v > 0 { + // v is the start position in history from end. + start := len(hist) - v + if seq.ml > v { + // Some goes into current block. + // Copy remainder of history + copy(out[t:], hist[start:]) + t += v + seq.ml -= v + } else { + copy(out[t:], hist[start:start+seq.ml]) + t += seq.ml + continue + } + } + // We must be in current buffer now + if seq.ml > 0 { + start := t - seq.mo + if seq.ml <= t-start { + // No overlap + copy(out[t:], out[start:start+seq.ml]) + t += seq.ml + continue + } else { + // Overlapping copy + // Extend destination slice and copy one byte at the time. + src := out[start : start+seq.ml] + dst := out[t:] + dst = dst[:len(src)] + t += len(src) + // Destination is the space we just added. + for i := range src { + dst[i] = src[i] + } + } + } + } + + // Add final literals + copy(out[t:], s.literals) + if debugDecoder { + t += len(s.literals) + if t != len(out) { + panic(fmt.Errorf("length mismatch, want %d, got %d, ss: %d", len(out), t, s.seqSize)) + } + } + s.out = out + + return nil +} + +// decode sequences from the stream with the provided history. +func (s *sequenceDecs) decodeSync(hist []byte) error { + supported, err := s.decodeSyncSimple(hist) + if supported { + return err + } + + br := s.br + seqs := s.nSeqs + startSize := len(s.out) + // Grab full sizes tables, to avoid bounds checks. + llTable, mlTable, ofTable := s.litLengths.fse.dt[:maxTablesize], s.matchLengths.fse.dt[:maxTablesize], s.offsets.fse.dt[:maxTablesize] + llState, mlState, ofState := s.litLengths.state.state, s.matchLengths.state.state, s.offsets.state.state + out := s.out + maxBlockSize := maxCompressedBlockSize + if s.windowSize < maxBlockSize { + maxBlockSize = s.windowSize + } + + for i := seqs - 1; i >= 0; i-- { + if br.overread() { + printf("reading sequence %d, exceeded available data\n", seqs-i) + return io.ErrUnexpectedEOF + } + var ll, mo, ml int + if br.off > 4+((maxOffsetBits+16+16)>>3) { + // inlined function: + // ll, mo, ml = s.nextFast(br, llState, mlState, ofState) + + // Final will not read from stream. + var llB, mlB, moB uint8 + ll, llB = llState.final() + ml, mlB = mlState.final() + mo, moB = ofState.final() + + // extra bits are stored in reverse order. + br.fillFast() + mo += br.getBits(moB) + if s.maxBits > 32 { + br.fillFast() + } + ml += br.getBits(mlB) + ll += br.getBits(llB) + + if moB > 1 { + s.prevOffset[2] = s.prevOffset[1] + s.prevOffset[1] = s.prevOffset[0] + s.prevOffset[0] = mo + } else { + // mo = s.adjustOffset(mo, ll, moB) + // Inlined for rather big speedup + if ll == 0 { + // There is an exception though, when current sequence's literals_length = 0. + // In this case, repeated offsets are shifted by one, so an offset_value of 1 means Repeated_Offset2, + // an offset_value of 2 means Repeated_Offset3, and an offset_value of 3 means Repeated_Offset1 - 1_byte. + mo++ + } + + if mo == 0 { + mo = s.prevOffset[0] + } else { + var temp int + if mo == 3 { + temp = s.prevOffset[0] - 1 + } else { + temp = s.prevOffset[mo] + } + + if temp == 0 { + // 0 is not valid; input is corrupted; force offset to 1 + println("WARNING: temp was 0") + temp = 1 + } + + if mo != 1 { + s.prevOffset[2] = s.prevOffset[1] + } + s.prevOffset[1] = s.prevOffset[0] + s.prevOffset[0] = temp + mo = temp + } + } + br.fillFast() + } else { + ll, mo, ml = s.next(br, llState, mlState, ofState) + br.fill() + } + + if debugSequences { + println("Seq", seqs-i-1, "Litlen:", ll, "mo:", mo, "(abs) ml:", ml) + } + + if ll > len(s.literals) { + return fmt.Errorf("unexpected literal count, want %d bytes, but only %d is available", ll, len(s.literals)) + } + size := ll + ml + len(out) + if size-startSize > maxBlockSize { + if size-startSize == 424242 { + panic("here") + } + return fmt.Errorf("output bigger than max block size (%d)", maxBlockSize) + } + if size > cap(out) { + // Not enough size, which can happen under high volume block streaming conditions + // but could be if destination slice is too small for sync operations. + // over-allocating here can create a large amount of GC pressure so we try to keep + // it as contained as possible + used := len(out) - startSize + addBytes := 256 + ll + ml + used>>2 + // Clamp to max block size. + if used+addBytes > maxBlockSize { + addBytes = maxBlockSize - used + } + out = append(out, make([]byte, addBytes)...) + out = out[:len(out)-addBytes] + } + if ml > maxMatchLen { + return fmt.Errorf("match len (%d) bigger than max allowed length", ml) + } + + // Add literals + out = append(out, s.literals[:ll]...) + s.literals = s.literals[ll:] + + if mo == 0 && ml > 0 { + return fmt.Errorf("zero matchoff and matchlen (%d) > 0", ml) + } + + if mo > len(out)+len(hist) || mo > s.windowSize { + if len(s.dict) == 0 { + return fmt.Errorf("match offset (%d) bigger than current history (%d)", mo, len(out)+len(hist)-startSize) + } + + // we may be in dictionary. + dictO := len(s.dict) - (mo - (len(out) + len(hist))) + if dictO < 0 || dictO >= len(s.dict) { + return fmt.Errorf("match offset (%d) bigger than current history (%d)", mo, len(out)+len(hist)-startSize) + } + end := dictO + ml + if end > len(s.dict) { + out = append(out, s.dict[dictO:]...) + ml -= len(s.dict) - dictO + } else { + out = append(out, s.dict[dictO:end]...) + mo = 0 + ml = 0 + } + } + + // Copy from history. + // TODO: Blocks without history could be made to ignore this completely. + if v := mo - len(out); v > 0 { + // v is the start position in history from end. + start := len(hist) - v + if ml > v { + // Some goes into current block. + // Copy remainder of history + out = append(out, hist[start:]...) + ml -= v + } else { + out = append(out, hist[start:start+ml]...) + ml = 0 + } + } + // We must be in current buffer now + if ml > 0 { + start := len(out) - mo + if ml <= len(out)-start { + // No overlap + out = append(out, out[start:start+ml]...) + } else { + // Overlapping copy + // Extend destination slice and copy one byte at the time. + out = out[:len(out)+ml] + src := out[start : start+ml] + // Destination is the space we just added. + dst := out[len(out)-ml:] + dst = dst[:len(src)] + for i := range src { + dst[i] = src[i] + } + } + } + if i == 0 { + // This is the last sequence, so we shouldn't update state. + break + } + + // Manually inlined, ~ 5-20% faster + // Update all 3 states at once. Approx 20% faster. + nBits := llState.nbBits() + mlState.nbBits() + ofState.nbBits() + if nBits == 0 { + llState = llTable[llState.newState()&maxTableMask] + mlState = mlTable[mlState.newState()&maxTableMask] + ofState = ofTable[ofState.newState()&maxTableMask] + } else { + bits := br.get32BitsFast(nBits) + + lowBits := uint16(bits >> ((ofState.nbBits() + mlState.nbBits()) & 31)) + llState = llTable[(llState.newState()+lowBits)&maxTableMask] + + lowBits = uint16(bits >> (ofState.nbBits() & 31)) + lowBits &= bitMask[mlState.nbBits()&15] + mlState = mlTable[(mlState.newState()+lowBits)&maxTableMask] + + lowBits = uint16(bits) & bitMask[ofState.nbBits()&15] + ofState = ofTable[(ofState.newState()+lowBits)&maxTableMask] + } + } + + // Check if space for literals + if size := len(s.literals) + len(s.out) - startSize; size > maxBlockSize { + return fmt.Errorf("output bigger than max block size (%d)", maxBlockSize) + } + + // Add final literals + s.out = append(out, s.literals...) + return br.close() +} + +var bitMask [16]uint16 + +func init() { + for i := range bitMask[:] { + bitMask[i] = uint16((1 << uint(i)) - 1) + } +} + +func (s *sequenceDecs) next(br *bitReader, llState, mlState, ofState decSymbol) (ll, mo, ml int) { + // Final will not read from stream. + ll, llB := llState.final() + ml, mlB := mlState.final() + mo, moB := ofState.final() + + // extra bits are stored in reverse order. + br.fill() + if s.maxBits <= 32 { + mo += br.getBits(moB) + ml += br.getBits(mlB) + ll += br.getBits(llB) + } else { + mo += br.getBits(moB) + br.fill() + // matchlength+literal length, max 32 bits + ml += br.getBits(mlB) + ll += br.getBits(llB) + + } + mo = s.adjustOffset(mo, ll, moB) + return +} + +func (s *sequenceDecs) adjustOffset(offset, litLen int, offsetB uint8) int { + if offsetB > 1 { + s.prevOffset[2] = s.prevOffset[1] + s.prevOffset[1] = s.prevOffset[0] + s.prevOffset[0] = offset + return offset + } + + if litLen == 0 { + // There is an exception though, when current sequence's literals_length = 0. + // In this case, repeated offsets are shifted by one, so an offset_value of 1 means Repeated_Offset2, + // an offset_value of 2 means Repeated_Offset3, and an offset_value of 3 means Repeated_Offset1 - 1_byte. + offset++ + } + + if offset == 0 { + return s.prevOffset[0] + } + var temp int + if offset == 3 { + temp = s.prevOffset[0] - 1 + } else { + temp = s.prevOffset[offset] + } + + if temp == 0 { + // 0 is not valid; input is corrupted; force offset to 1 + println("temp was 0") + temp = 1 + } + + if offset != 1 { + s.prevOffset[2] = s.prevOffset[1] + } + s.prevOffset[1] = s.prevOffset[0] + s.prevOffset[0] = temp + return temp +} diff --git a/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.go b/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.go new file mode 100644 index 00000000..191384ad --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.go @@ -0,0 +1,379 @@ +//go:build amd64 && !appengine && !noasm && gc +// +build amd64,!appengine,!noasm,gc + +package zstd + +import ( + "fmt" + + "github.com/klauspost/compress/internal/cpuinfo" +) + +type decodeSyncAsmContext struct { + llTable []decSymbol + mlTable []decSymbol + ofTable []decSymbol + llState uint64 + mlState uint64 + ofState uint64 + iteration int + litRemain int + out []byte + outPosition int + literals []byte + litPosition int + history []byte + windowSize int + ll int // set on error (not for all errors, please refer to _generate/gen.go) + ml int // set on error (not for all errors, please refer to _generate/gen.go) + mo int // set on error (not for all errors, please refer to _generate/gen.go) +} + +// sequenceDecs_decodeSync_amd64 implements the main loop of sequenceDecs.decodeSync in x86 asm. +// +// Please refer to seqdec_generic.go for the reference implementation. +// +//go:noescape +func sequenceDecs_decodeSync_amd64(s *sequenceDecs, br *bitReader, ctx *decodeSyncAsmContext) int + +// sequenceDecs_decodeSync_bmi2 implements the main loop of sequenceDecs.decodeSync in x86 asm with BMI2 extensions. +// +//go:noescape +func sequenceDecs_decodeSync_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeSyncAsmContext) int + +// sequenceDecs_decodeSync_safe_amd64 does the same as above, but does not write more than output buffer. +// +//go:noescape +func sequenceDecs_decodeSync_safe_amd64(s *sequenceDecs, br *bitReader, ctx *decodeSyncAsmContext) int + +// sequenceDecs_decodeSync_safe_bmi2 does the same as above, but does not write more than output buffer. +// +//go:noescape +func sequenceDecs_decodeSync_safe_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeSyncAsmContext) int + +// decode sequences from the stream with the provided history but without a dictionary. +func (s *sequenceDecs) decodeSyncSimple(hist []byte) (bool, error) { + if len(s.dict) > 0 { + return false, nil + } + if s.maxSyncLen == 0 && cap(s.out)-len(s.out) < maxCompressedBlockSize { + return false, nil + } + + // FIXME: Using unsafe memory copies leads to rare, random crashes + // with fuzz testing. It is therefore disabled for now. + const useSafe = true + /* + useSafe := false + if s.maxSyncLen == 0 && cap(s.out)-len(s.out) < maxCompressedBlockSizeAlloc { + useSafe = true + } + if s.maxSyncLen > 0 && cap(s.out)-len(s.out)-compressedBlockOverAlloc < int(s.maxSyncLen) { + useSafe = true + } + if cap(s.literals) < len(s.literals)+compressedBlockOverAlloc { + useSafe = true + } + */ + + br := s.br + + maxBlockSize := maxCompressedBlockSize + if s.windowSize < maxBlockSize { + maxBlockSize = s.windowSize + } + + ctx := decodeSyncAsmContext{ + llTable: s.litLengths.fse.dt[:maxTablesize], + mlTable: s.matchLengths.fse.dt[:maxTablesize], + ofTable: s.offsets.fse.dt[:maxTablesize], + llState: uint64(s.litLengths.state.state), + mlState: uint64(s.matchLengths.state.state), + ofState: uint64(s.offsets.state.state), + iteration: s.nSeqs - 1, + litRemain: len(s.literals), + out: s.out, + outPosition: len(s.out), + literals: s.literals, + windowSize: s.windowSize, + history: hist, + } + + s.seqSize = 0 + startSize := len(s.out) + + var errCode int + if cpuinfo.HasBMI2() { + if useSafe { + errCode = sequenceDecs_decodeSync_safe_bmi2(s, br, &ctx) + } else { + errCode = sequenceDecs_decodeSync_bmi2(s, br, &ctx) + } + } else { + if useSafe { + errCode = sequenceDecs_decodeSync_safe_amd64(s, br, &ctx) + } else { + errCode = sequenceDecs_decodeSync_amd64(s, br, &ctx) + } + } + switch errCode { + case noError: + break + + case errorMatchLenOfsMismatch: + return true, fmt.Errorf("zero matchoff and matchlen (%d) > 0", ctx.ml) + + case errorMatchLenTooBig: + return true, fmt.Errorf("match len (%d) bigger than max allowed length", ctx.ml) + + case errorMatchOffTooBig: + return true, fmt.Errorf("match offset (%d) bigger than current history (%d)", + ctx.mo, ctx.outPosition+len(hist)-startSize) + + case errorNotEnoughLiterals: + return true, fmt.Errorf("unexpected literal count, want %d bytes, but only %d is available", + ctx.ll, ctx.litRemain+ctx.ll) + + case errorNotEnoughSpace: + size := ctx.outPosition + ctx.ll + ctx.ml + if debugDecoder { + println("msl:", s.maxSyncLen, "cap", cap(s.out), "bef:", startSize, "sz:", size-startSize, "mbs:", maxBlockSize, "outsz:", cap(s.out)-startSize) + } + return true, fmt.Errorf("output bigger than max block size (%d)", maxBlockSize) + + default: + return true, fmt.Errorf("sequenceDecs_decode returned erronous code %d", errCode) + } + + s.seqSize += ctx.litRemain + if s.seqSize > maxBlockSize { + return true, fmt.Errorf("output bigger than max block size (%d)", maxBlockSize) + + } + err := br.close() + if err != nil { + printf("Closing sequences: %v, %+v\n", err, *br) + return true, err + } + + s.literals = s.literals[ctx.litPosition:] + t := ctx.outPosition + s.out = s.out[:t] + + // Add final literals + s.out = append(s.out, s.literals...) + if debugDecoder { + t += len(s.literals) + if t != len(s.out) { + panic(fmt.Errorf("length mismatch, want %d, got %d", len(s.out), t)) + } + } + + return true, nil +} + +// -------------------------------------------------------------------------------- + +type decodeAsmContext struct { + llTable []decSymbol + mlTable []decSymbol + ofTable []decSymbol + llState uint64 + mlState uint64 + ofState uint64 + iteration int + seqs []seqVals + litRemain int +} + +const noError = 0 + +// error reported when mo == 0 && ml > 0 +const errorMatchLenOfsMismatch = 1 + +// error reported when ml > maxMatchLen +const errorMatchLenTooBig = 2 + +// error reported when mo > available history or mo > s.windowSize +const errorMatchOffTooBig = 3 + +// error reported when the sum of literal lengths exeeceds the literal buffer size +const errorNotEnoughLiterals = 4 + +// error reported when capacity of `out` is too small +const errorNotEnoughSpace = 5 + +// sequenceDecs_decode implements the main loop of sequenceDecs in x86 asm. +// +// Please refer to seqdec_generic.go for the reference implementation. +// +//go:noescape +func sequenceDecs_decode_amd64(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int + +// sequenceDecs_decode implements the main loop of sequenceDecs in x86 asm. +// +// Please refer to seqdec_generic.go for the reference implementation. +// +//go:noescape +func sequenceDecs_decode_56_amd64(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int + +// sequenceDecs_decode implements the main loop of sequenceDecs in x86 asm with BMI2 extensions. +// +//go:noescape +func sequenceDecs_decode_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int + +// sequenceDecs_decode implements the main loop of sequenceDecs in x86 asm with BMI2 extensions. +// +//go:noescape +func sequenceDecs_decode_56_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int + +// decode sequences from the stream without the provided history. +func (s *sequenceDecs) decode(seqs []seqVals) error { + br := s.br + + maxBlockSize := maxCompressedBlockSize + if s.windowSize < maxBlockSize { + maxBlockSize = s.windowSize + } + + ctx := decodeAsmContext{ + llTable: s.litLengths.fse.dt[:maxTablesize], + mlTable: s.matchLengths.fse.dt[:maxTablesize], + ofTable: s.offsets.fse.dt[:maxTablesize], + llState: uint64(s.litLengths.state.state), + mlState: uint64(s.matchLengths.state.state), + ofState: uint64(s.offsets.state.state), + seqs: seqs, + iteration: len(seqs) - 1, + litRemain: len(s.literals), + } + + s.seqSize = 0 + lte56bits := s.maxBits+s.offsets.fse.actualTableLog+s.matchLengths.fse.actualTableLog+s.litLengths.fse.actualTableLog <= 56 + var errCode int + if cpuinfo.HasBMI2() { + if lte56bits { + errCode = sequenceDecs_decode_56_bmi2(s, br, &ctx) + } else { + errCode = sequenceDecs_decode_bmi2(s, br, &ctx) + } + } else { + if lte56bits { + errCode = sequenceDecs_decode_56_amd64(s, br, &ctx) + } else { + errCode = sequenceDecs_decode_amd64(s, br, &ctx) + } + } + if errCode != 0 { + i := len(seqs) - ctx.iteration - 1 + switch errCode { + case errorMatchLenOfsMismatch: + ml := ctx.seqs[i].ml + return fmt.Errorf("zero matchoff and matchlen (%d) > 0", ml) + + case errorMatchLenTooBig: + ml := ctx.seqs[i].ml + return fmt.Errorf("match len (%d) bigger than max allowed length", ml) + + case errorNotEnoughLiterals: + ll := ctx.seqs[i].ll + return fmt.Errorf("unexpected literal count, want %d bytes, but only %d is available", ll, ctx.litRemain+ll) + } + + return fmt.Errorf("sequenceDecs_decode_amd64 returned erronous code %d", errCode) + } + + if ctx.litRemain < 0 { + return fmt.Errorf("literal count is too big: total available %d, total requested %d", + len(s.literals), len(s.literals)-ctx.litRemain) + } + + s.seqSize += ctx.litRemain + if s.seqSize > maxBlockSize { + return fmt.Errorf("output bigger than max block size (%d)", maxBlockSize) + } + err := br.close() + if err != nil { + printf("Closing sequences: %v, %+v\n", err, *br) + } + return err +} + +// -------------------------------------------------------------------------------- + +type executeAsmContext struct { + seqs []seqVals + seqIndex int + out []byte + history []byte + literals []byte + outPosition int + litPosition int + windowSize int +} + +// sequenceDecs_executeSimple_amd64 implements the main loop of sequenceDecs.executeSimple in x86 asm. +// +// Returns false if a match offset is too big. +// +// Please refer to seqdec_generic.go for the reference implementation. +// +//go:noescape +func sequenceDecs_executeSimple_amd64(ctx *executeAsmContext) bool + +// Same as above, but with safe memcopies +// +//go:noescape +func sequenceDecs_executeSimple_safe_amd64(ctx *executeAsmContext) bool + +// executeSimple handles cases when dictionary is not used. +func (s *sequenceDecs) executeSimple(seqs []seqVals, hist []byte) error { + // Ensure we have enough output size... + if len(s.out)+s.seqSize+compressedBlockOverAlloc > cap(s.out) { + addBytes := s.seqSize + len(s.out) + compressedBlockOverAlloc + s.out = append(s.out, make([]byte, addBytes)...) + s.out = s.out[:len(s.out)-addBytes] + } + + if debugDecoder { + printf("Execute %d seqs with literals: %d into %d bytes\n", len(seqs), len(s.literals), s.seqSize) + } + + var t = len(s.out) + out := s.out[:t+s.seqSize] + + ctx := executeAsmContext{ + seqs: seqs, + seqIndex: 0, + out: out, + history: hist, + outPosition: t, + litPosition: 0, + literals: s.literals, + windowSize: s.windowSize, + } + var ok bool + if cap(s.literals) < len(s.literals)+compressedBlockOverAlloc { + ok = sequenceDecs_executeSimple_safe_amd64(&ctx) + } else { + ok = sequenceDecs_executeSimple_amd64(&ctx) + } + if !ok { + return fmt.Errorf("match offset (%d) bigger than current history (%d)", + seqs[ctx.seqIndex].mo, ctx.outPosition+len(hist)) + } + s.literals = s.literals[ctx.litPosition:] + t = ctx.outPosition + + // Add final literals + copy(out[t:], s.literals) + if debugDecoder { + t += len(s.literals) + if t != len(out) { + panic(fmt.Errorf("length mismatch, want %d, got %d, ss: %d", len(out), t, s.seqSize)) + } + } + s.out = out + + return nil +} diff --git a/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.s b/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.s new file mode 100644 index 00000000..52e5703c --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.s @@ -0,0 +1,4099 @@ +// Code generated by command: go run gen.go -out ../seqdec_amd64.s -pkg=zstd. DO NOT EDIT. + +//go:build !appengine && !noasm && gc && !noasm + +// func sequenceDecs_decode_amd64(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int +// Requires: CMOV +TEXT ·sequenceDecs_decode_amd64(SB), $8-32 + MOVQ br+8(FP), AX + MOVQ 32(AX), DX + MOVBQZX 40(AX), BX + MOVQ 24(AX), SI + MOVQ (AX), AX + ADDQ SI, AX + MOVQ AX, (SP) + MOVQ ctx+16(FP), AX + MOVQ 72(AX), DI + MOVQ 80(AX), R8 + MOVQ 88(AX), R9 + MOVQ 104(AX), R10 + MOVQ s+0(FP), AX + MOVQ 144(AX), R11 + MOVQ 152(AX), R12 + MOVQ 160(AX), R13 + +sequenceDecs_decode_amd64_main_loop: + MOVQ (SP), R14 + + // Fill bitreader to have enough for the offset and match length. + CMPQ SI, $0x08 + JL sequenceDecs_decode_amd64_fill_byte_by_byte + MOVQ BX, AX + SHRQ $0x03, AX + SUBQ AX, R14 + MOVQ (R14), DX + SUBQ AX, SI + ANDQ $0x07, BX + JMP sequenceDecs_decode_amd64_fill_end + +sequenceDecs_decode_amd64_fill_byte_by_byte: + CMPQ SI, $0x00 + JLE sequenceDecs_decode_amd64_fill_end + CMPQ BX, $0x07 + JLE sequenceDecs_decode_amd64_fill_end + SHLQ $0x08, DX + SUBQ $0x01, R14 + SUBQ $0x01, SI + SUBQ $0x08, BX + MOVBQZX (R14), AX + ORQ AX, DX + JMP sequenceDecs_decode_amd64_fill_byte_by_byte + +sequenceDecs_decode_amd64_fill_end: + // Update offset + MOVQ R9, AX + MOVQ BX, CX + MOVQ DX, R15 + SHLQ CL, R15 + MOVB AH, CL + SHRQ $0x20, AX + TESTQ CX, CX + JZ sequenceDecs_decode_amd64_of_update_zero + ADDQ CX, BX + CMPQ BX, $0x40 + JA sequenceDecs_decode_amd64_of_update_zero + CMPQ CX, $0x40 + JAE sequenceDecs_decode_amd64_of_update_zero + NEGQ CX + SHRQ CL, R15 + ADDQ R15, AX + +sequenceDecs_decode_amd64_of_update_zero: + MOVQ AX, 16(R10) + + // Update match length + MOVQ R8, AX + MOVQ BX, CX + MOVQ DX, R15 + SHLQ CL, R15 + MOVB AH, CL + SHRQ $0x20, AX + TESTQ CX, CX + JZ sequenceDecs_decode_amd64_ml_update_zero + ADDQ CX, BX + CMPQ BX, $0x40 + JA sequenceDecs_decode_amd64_ml_update_zero + CMPQ CX, $0x40 + JAE sequenceDecs_decode_amd64_ml_update_zero + NEGQ CX + SHRQ CL, R15 + ADDQ R15, AX + +sequenceDecs_decode_amd64_ml_update_zero: + MOVQ AX, 8(R10) + + // Fill bitreader to have enough for the remaining + CMPQ SI, $0x08 + JL sequenceDecs_decode_amd64_fill_2_byte_by_byte + MOVQ BX, AX + SHRQ $0x03, AX + SUBQ AX, R14 + MOVQ (R14), DX + SUBQ AX, SI + ANDQ $0x07, BX + JMP sequenceDecs_decode_amd64_fill_2_end + +sequenceDecs_decode_amd64_fill_2_byte_by_byte: + CMPQ SI, $0x00 + JLE sequenceDecs_decode_amd64_fill_2_end + CMPQ BX, $0x07 + JLE sequenceDecs_decode_amd64_fill_2_end + SHLQ $0x08, DX + SUBQ $0x01, R14 + SUBQ $0x01, SI + SUBQ $0x08, BX + MOVBQZX (R14), AX + ORQ AX, DX + JMP sequenceDecs_decode_amd64_fill_2_byte_by_byte + +sequenceDecs_decode_amd64_fill_2_end: + // Update literal length + MOVQ DI, AX + MOVQ BX, CX + MOVQ DX, R15 + SHLQ CL, R15 + MOVB AH, CL + SHRQ $0x20, AX + TESTQ CX, CX + JZ sequenceDecs_decode_amd64_ll_update_zero + ADDQ CX, BX + CMPQ BX, $0x40 + JA sequenceDecs_decode_amd64_ll_update_zero + CMPQ CX, $0x40 + JAE sequenceDecs_decode_amd64_ll_update_zero + NEGQ CX + SHRQ CL, R15 + ADDQ R15, AX + +sequenceDecs_decode_amd64_ll_update_zero: + MOVQ AX, (R10) + + // Fill bitreader for state updates + MOVQ R14, (SP) + MOVQ R9, AX + SHRQ $0x08, AX + MOVBQZX AL, AX + MOVQ ctx+16(FP), CX + CMPQ 96(CX), $0x00 + JZ sequenceDecs_decode_amd64_skip_update + + // Update Literal Length State + MOVBQZX DI, R14 + SHRQ $0x10, DI + MOVWQZX DI, DI + LEAQ (BX)(R14*1), CX + MOVQ DX, R15 + MOVQ CX, BX + ROLQ CL, R15 + MOVL $0x00000001, BP + MOVB R14, CL + SHLL CL, BP + DECL BP + ANDQ BP, R15 + ADDQ R15, DI + + // Load ctx.llTable + MOVQ ctx+16(FP), CX + MOVQ (CX), CX + MOVQ (CX)(DI*8), DI + + // Update Match Length State + MOVBQZX R8, R14 + SHRQ $0x10, R8 + MOVWQZX R8, R8 + LEAQ (BX)(R14*1), CX + MOVQ DX, R15 + MOVQ CX, BX + ROLQ CL, R15 + MOVL $0x00000001, BP + MOVB R14, CL + SHLL CL, BP + DECL BP + ANDQ BP, R15 + ADDQ R15, R8 + + // Load ctx.mlTable + MOVQ ctx+16(FP), CX + MOVQ 24(CX), CX + MOVQ (CX)(R8*8), R8 + + // Update Offset State + MOVBQZX R9, R14 + SHRQ $0x10, R9 + MOVWQZX R9, R9 + LEAQ (BX)(R14*1), CX + MOVQ DX, R15 + MOVQ CX, BX + ROLQ CL, R15 + MOVL $0x00000001, BP + MOVB R14, CL + SHLL CL, BP + DECL BP + ANDQ BP, R15 + ADDQ R15, R9 + + // Load ctx.ofTable + MOVQ ctx+16(FP), CX + MOVQ 48(CX), CX + MOVQ (CX)(R9*8), R9 + +sequenceDecs_decode_amd64_skip_update: + // Adjust offset + MOVQ 16(R10), CX + CMPQ AX, $0x01 + JBE sequenceDecs_decode_amd64_adjust_offsetB_1_or_0 + MOVQ R12, R13 + MOVQ R11, R12 + MOVQ CX, R11 + JMP sequenceDecs_decode_amd64_after_adjust + +sequenceDecs_decode_amd64_adjust_offsetB_1_or_0: + CMPQ (R10), $0x00000000 + JNE sequenceDecs_decode_amd64_adjust_offset_maybezero + INCQ CX + JMP sequenceDecs_decode_amd64_adjust_offset_nonzero + +sequenceDecs_decode_amd64_adjust_offset_maybezero: + TESTQ CX, CX + JNZ sequenceDecs_decode_amd64_adjust_offset_nonzero + MOVQ R11, CX + JMP sequenceDecs_decode_amd64_after_adjust + +sequenceDecs_decode_amd64_adjust_offset_nonzero: + CMPQ CX, $0x01 + JB sequenceDecs_decode_amd64_adjust_zero + JEQ sequenceDecs_decode_amd64_adjust_one + CMPQ CX, $0x02 + JA sequenceDecs_decode_amd64_adjust_three + JMP sequenceDecs_decode_amd64_adjust_two + +sequenceDecs_decode_amd64_adjust_zero: + MOVQ R11, AX + JMP sequenceDecs_decode_amd64_adjust_test_temp_valid + +sequenceDecs_decode_amd64_adjust_one: + MOVQ R12, AX + JMP sequenceDecs_decode_amd64_adjust_test_temp_valid + +sequenceDecs_decode_amd64_adjust_two: + MOVQ R13, AX + JMP sequenceDecs_decode_amd64_adjust_test_temp_valid + +sequenceDecs_decode_amd64_adjust_three: + LEAQ -1(R11), AX + +sequenceDecs_decode_amd64_adjust_test_temp_valid: + TESTQ AX, AX + JNZ sequenceDecs_decode_amd64_adjust_temp_valid + MOVQ $0x00000001, AX + +sequenceDecs_decode_amd64_adjust_temp_valid: + CMPQ CX, $0x01 + CMOVQNE R12, R13 + MOVQ R11, R12 + MOVQ AX, R11 + MOVQ AX, CX + +sequenceDecs_decode_amd64_after_adjust: + MOVQ CX, 16(R10) + + // Check values + MOVQ 8(R10), AX + MOVQ (R10), R14 + LEAQ (AX)(R14*1), R15 + MOVQ s+0(FP), BP + ADDQ R15, 256(BP) + MOVQ ctx+16(FP), R15 + SUBQ R14, 128(R15) + JS error_not_enough_literals + CMPQ AX, $0x00020002 + JA sequenceDecs_decode_amd64_error_match_len_too_big + TESTQ CX, CX + JNZ sequenceDecs_decode_amd64_match_len_ofs_ok + TESTQ AX, AX + JNZ sequenceDecs_decode_amd64_error_match_len_ofs_mismatch + +sequenceDecs_decode_amd64_match_len_ofs_ok: + ADDQ $0x18, R10 + MOVQ ctx+16(FP), AX + DECQ 96(AX) + JNS sequenceDecs_decode_amd64_main_loop + MOVQ s+0(FP), AX + MOVQ R11, 144(AX) + MOVQ R12, 152(AX) + MOVQ R13, 160(AX) + MOVQ br+8(FP), AX + MOVQ DX, 32(AX) + MOVB BL, 40(AX) + MOVQ SI, 24(AX) + + // Return success + MOVQ $0x00000000, ret+24(FP) + RET + + // Return with match length error +sequenceDecs_decode_amd64_error_match_len_ofs_mismatch: + MOVQ $0x00000001, ret+24(FP) + RET + + // Return with match too long error +sequenceDecs_decode_amd64_error_match_len_too_big: + MOVQ $0x00000002, ret+24(FP) + RET + + // Return with match offset too long error + MOVQ $0x00000003, ret+24(FP) + RET + + // Return with not enough literals error +error_not_enough_literals: + MOVQ $0x00000004, ret+24(FP) + RET + + // Return with not enough output space error + MOVQ $0x00000005, ret+24(FP) + RET + +// func sequenceDecs_decode_56_amd64(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int +// Requires: CMOV +TEXT ·sequenceDecs_decode_56_amd64(SB), $8-32 + MOVQ br+8(FP), AX + MOVQ 32(AX), DX + MOVBQZX 40(AX), BX + MOVQ 24(AX), SI + MOVQ (AX), AX + ADDQ SI, AX + MOVQ AX, (SP) + MOVQ ctx+16(FP), AX + MOVQ 72(AX), DI + MOVQ 80(AX), R8 + MOVQ 88(AX), R9 + MOVQ 104(AX), R10 + MOVQ s+0(FP), AX + MOVQ 144(AX), R11 + MOVQ 152(AX), R12 + MOVQ 160(AX), R13 + +sequenceDecs_decode_56_amd64_main_loop: + MOVQ (SP), R14 + + // Fill bitreader to have enough for the offset and match length. + CMPQ SI, $0x08 + JL sequenceDecs_decode_56_amd64_fill_byte_by_byte + MOVQ BX, AX + SHRQ $0x03, AX + SUBQ AX, R14 + MOVQ (R14), DX + SUBQ AX, SI + ANDQ $0x07, BX + JMP sequenceDecs_decode_56_amd64_fill_end + +sequenceDecs_decode_56_amd64_fill_byte_by_byte: + CMPQ SI, $0x00 + JLE sequenceDecs_decode_56_amd64_fill_end + CMPQ BX, $0x07 + JLE sequenceDecs_decode_56_amd64_fill_end + SHLQ $0x08, DX + SUBQ $0x01, R14 + SUBQ $0x01, SI + SUBQ $0x08, BX + MOVBQZX (R14), AX + ORQ AX, DX + JMP sequenceDecs_decode_56_amd64_fill_byte_by_byte + +sequenceDecs_decode_56_amd64_fill_end: + // Update offset + MOVQ R9, AX + MOVQ BX, CX + MOVQ DX, R15 + SHLQ CL, R15 + MOVB AH, CL + SHRQ $0x20, AX + TESTQ CX, CX + JZ sequenceDecs_decode_56_amd64_of_update_zero + ADDQ CX, BX + CMPQ BX, $0x40 + JA sequenceDecs_decode_56_amd64_of_update_zero + CMPQ CX, $0x40 + JAE sequenceDecs_decode_56_amd64_of_update_zero + NEGQ CX + SHRQ CL, R15 + ADDQ R15, AX + +sequenceDecs_decode_56_amd64_of_update_zero: + MOVQ AX, 16(R10) + + // Update match length + MOVQ R8, AX + MOVQ BX, CX + MOVQ DX, R15 + SHLQ CL, R15 + MOVB AH, CL + SHRQ $0x20, AX + TESTQ CX, CX + JZ sequenceDecs_decode_56_amd64_ml_update_zero + ADDQ CX, BX + CMPQ BX, $0x40 + JA sequenceDecs_decode_56_amd64_ml_update_zero + CMPQ CX, $0x40 + JAE sequenceDecs_decode_56_amd64_ml_update_zero + NEGQ CX + SHRQ CL, R15 + ADDQ R15, AX + +sequenceDecs_decode_56_amd64_ml_update_zero: + MOVQ AX, 8(R10) + + // Update literal length + MOVQ DI, AX + MOVQ BX, CX + MOVQ DX, R15 + SHLQ CL, R15 + MOVB AH, CL + SHRQ $0x20, AX + TESTQ CX, CX + JZ sequenceDecs_decode_56_amd64_ll_update_zero + ADDQ CX, BX + CMPQ BX, $0x40 + JA sequenceDecs_decode_56_amd64_ll_update_zero + CMPQ CX, $0x40 + JAE sequenceDecs_decode_56_amd64_ll_update_zero + NEGQ CX + SHRQ CL, R15 + ADDQ R15, AX + +sequenceDecs_decode_56_amd64_ll_update_zero: + MOVQ AX, (R10) + + // Fill bitreader for state updates + MOVQ R14, (SP) + MOVQ R9, AX + SHRQ $0x08, AX + MOVBQZX AL, AX + MOVQ ctx+16(FP), CX + CMPQ 96(CX), $0x00 + JZ sequenceDecs_decode_56_amd64_skip_update + + // Update Literal Length State + MOVBQZX DI, R14 + SHRQ $0x10, DI + MOVWQZX DI, DI + LEAQ (BX)(R14*1), CX + MOVQ DX, R15 + MOVQ CX, BX + ROLQ CL, R15 + MOVL $0x00000001, BP + MOVB R14, CL + SHLL CL, BP + DECL BP + ANDQ BP, R15 + ADDQ R15, DI + + // Load ctx.llTable + MOVQ ctx+16(FP), CX + MOVQ (CX), CX + MOVQ (CX)(DI*8), DI + + // Update Match Length State + MOVBQZX R8, R14 + SHRQ $0x10, R8 + MOVWQZX R8, R8 + LEAQ (BX)(R14*1), CX + MOVQ DX, R15 + MOVQ CX, BX + ROLQ CL, R15 + MOVL $0x00000001, BP + MOVB R14, CL + SHLL CL, BP + DECL BP + ANDQ BP, R15 + ADDQ R15, R8 + + // Load ctx.mlTable + MOVQ ctx+16(FP), CX + MOVQ 24(CX), CX + MOVQ (CX)(R8*8), R8 + + // Update Offset State + MOVBQZX R9, R14 + SHRQ $0x10, R9 + MOVWQZX R9, R9 + LEAQ (BX)(R14*1), CX + MOVQ DX, R15 + MOVQ CX, BX + ROLQ CL, R15 + MOVL $0x00000001, BP + MOVB R14, CL + SHLL CL, BP + DECL BP + ANDQ BP, R15 + ADDQ R15, R9 + + // Load ctx.ofTable + MOVQ ctx+16(FP), CX + MOVQ 48(CX), CX + MOVQ (CX)(R9*8), R9 + +sequenceDecs_decode_56_amd64_skip_update: + // Adjust offset + MOVQ 16(R10), CX + CMPQ AX, $0x01 + JBE sequenceDecs_decode_56_amd64_adjust_offsetB_1_or_0 + MOVQ R12, R13 + MOVQ R11, R12 + MOVQ CX, R11 + JMP sequenceDecs_decode_56_amd64_after_adjust + +sequenceDecs_decode_56_amd64_adjust_offsetB_1_or_0: + CMPQ (R10), $0x00000000 + JNE sequenceDecs_decode_56_amd64_adjust_offset_maybezero + INCQ CX + JMP sequenceDecs_decode_56_amd64_adjust_offset_nonzero + +sequenceDecs_decode_56_amd64_adjust_offset_maybezero: + TESTQ CX, CX + JNZ sequenceDecs_decode_56_amd64_adjust_offset_nonzero + MOVQ R11, CX + JMP sequenceDecs_decode_56_amd64_after_adjust + +sequenceDecs_decode_56_amd64_adjust_offset_nonzero: + CMPQ CX, $0x01 + JB sequenceDecs_decode_56_amd64_adjust_zero + JEQ sequenceDecs_decode_56_amd64_adjust_one + CMPQ CX, $0x02 + JA sequenceDecs_decode_56_amd64_adjust_three + JMP sequenceDecs_decode_56_amd64_adjust_two + +sequenceDecs_decode_56_amd64_adjust_zero: + MOVQ R11, AX + JMP sequenceDecs_decode_56_amd64_adjust_test_temp_valid + +sequenceDecs_decode_56_amd64_adjust_one: + MOVQ R12, AX + JMP sequenceDecs_decode_56_amd64_adjust_test_temp_valid + +sequenceDecs_decode_56_amd64_adjust_two: + MOVQ R13, AX + JMP sequenceDecs_decode_56_amd64_adjust_test_temp_valid + +sequenceDecs_decode_56_amd64_adjust_three: + LEAQ -1(R11), AX + +sequenceDecs_decode_56_amd64_adjust_test_temp_valid: + TESTQ AX, AX + JNZ sequenceDecs_decode_56_amd64_adjust_temp_valid + MOVQ $0x00000001, AX + +sequenceDecs_decode_56_amd64_adjust_temp_valid: + CMPQ CX, $0x01 + CMOVQNE R12, R13 + MOVQ R11, R12 + MOVQ AX, R11 + MOVQ AX, CX + +sequenceDecs_decode_56_amd64_after_adjust: + MOVQ CX, 16(R10) + + // Check values + MOVQ 8(R10), AX + MOVQ (R10), R14 + LEAQ (AX)(R14*1), R15 + MOVQ s+0(FP), BP + ADDQ R15, 256(BP) + MOVQ ctx+16(FP), R15 + SUBQ R14, 128(R15) + JS error_not_enough_literals + CMPQ AX, $0x00020002 + JA sequenceDecs_decode_56_amd64_error_match_len_too_big + TESTQ CX, CX + JNZ sequenceDecs_decode_56_amd64_match_len_ofs_ok + TESTQ AX, AX + JNZ sequenceDecs_decode_56_amd64_error_match_len_ofs_mismatch + +sequenceDecs_decode_56_amd64_match_len_ofs_ok: + ADDQ $0x18, R10 + MOVQ ctx+16(FP), AX + DECQ 96(AX) + JNS sequenceDecs_decode_56_amd64_main_loop + MOVQ s+0(FP), AX + MOVQ R11, 144(AX) + MOVQ R12, 152(AX) + MOVQ R13, 160(AX) + MOVQ br+8(FP), AX + MOVQ DX, 32(AX) + MOVB BL, 40(AX) + MOVQ SI, 24(AX) + + // Return success + MOVQ $0x00000000, ret+24(FP) + RET + + // Return with match length error +sequenceDecs_decode_56_amd64_error_match_len_ofs_mismatch: + MOVQ $0x00000001, ret+24(FP) + RET + + // Return with match too long error +sequenceDecs_decode_56_amd64_error_match_len_too_big: + MOVQ $0x00000002, ret+24(FP) + RET + + // Return with match offset too long error + MOVQ $0x00000003, ret+24(FP) + RET + + // Return with not enough literals error +error_not_enough_literals: + MOVQ $0x00000004, ret+24(FP) + RET + + // Return with not enough output space error + MOVQ $0x00000005, ret+24(FP) + RET + +// func sequenceDecs_decode_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int +// Requires: BMI, BMI2, CMOV +TEXT ·sequenceDecs_decode_bmi2(SB), $8-32 + MOVQ br+8(FP), CX + MOVQ 32(CX), AX + MOVBQZX 40(CX), DX + MOVQ 24(CX), BX + MOVQ (CX), CX + ADDQ BX, CX + MOVQ CX, (SP) + MOVQ ctx+16(FP), CX + MOVQ 72(CX), SI + MOVQ 80(CX), DI + MOVQ 88(CX), R8 + MOVQ 104(CX), R9 + MOVQ s+0(FP), CX + MOVQ 144(CX), R10 + MOVQ 152(CX), R11 + MOVQ 160(CX), R12 + +sequenceDecs_decode_bmi2_main_loop: + MOVQ (SP), R13 + + // Fill bitreader to have enough for the offset and match length. + CMPQ BX, $0x08 + JL sequenceDecs_decode_bmi2_fill_byte_by_byte + MOVQ DX, CX + SHRQ $0x03, CX + SUBQ CX, R13 + MOVQ (R13), AX + SUBQ CX, BX + ANDQ $0x07, DX + JMP sequenceDecs_decode_bmi2_fill_end + +sequenceDecs_decode_bmi2_fill_byte_by_byte: + CMPQ BX, $0x00 + JLE sequenceDecs_decode_bmi2_fill_end + CMPQ DX, $0x07 + JLE sequenceDecs_decode_bmi2_fill_end + SHLQ $0x08, AX + SUBQ $0x01, R13 + SUBQ $0x01, BX + SUBQ $0x08, DX + MOVBQZX (R13), CX + ORQ CX, AX + JMP sequenceDecs_decode_bmi2_fill_byte_by_byte + +sequenceDecs_decode_bmi2_fill_end: + // Update offset + MOVQ $0x00000808, CX + BEXTRQ CX, R8, R14 + MOVQ AX, R15 + LEAQ (DX)(R14*1), CX + ROLQ CL, R15 + BZHIQ R14, R15, R15 + MOVQ CX, DX + MOVQ R8, CX + SHRQ $0x20, CX + ADDQ R15, CX + MOVQ CX, 16(R9) + + // Update match length + MOVQ $0x00000808, CX + BEXTRQ CX, DI, R14 + MOVQ AX, R15 + LEAQ (DX)(R14*1), CX + ROLQ CL, R15 + BZHIQ R14, R15, R15 + MOVQ CX, DX + MOVQ DI, CX + SHRQ $0x20, CX + ADDQ R15, CX + MOVQ CX, 8(R9) + + // Fill bitreader to have enough for the remaining + CMPQ BX, $0x08 + JL sequenceDecs_decode_bmi2_fill_2_byte_by_byte + MOVQ DX, CX + SHRQ $0x03, CX + SUBQ CX, R13 + MOVQ (R13), AX + SUBQ CX, BX + ANDQ $0x07, DX + JMP sequenceDecs_decode_bmi2_fill_2_end + +sequenceDecs_decode_bmi2_fill_2_byte_by_byte: + CMPQ BX, $0x00 + JLE sequenceDecs_decode_bmi2_fill_2_end + CMPQ DX, $0x07 + JLE sequenceDecs_decode_bmi2_fill_2_end + SHLQ $0x08, AX + SUBQ $0x01, R13 + SUBQ $0x01, BX + SUBQ $0x08, DX + MOVBQZX (R13), CX + ORQ CX, AX + JMP sequenceDecs_decode_bmi2_fill_2_byte_by_byte + +sequenceDecs_decode_bmi2_fill_2_end: + // Update literal length + MOVQ $0x00000808, CX + BEXTRQ CX, SI, R14 + MOVQ AX, R15 + LEAQ (DX)(R14*1), CX + ROLQ CL, R15 + BZHIQ R14, R15, R15 + MOVQ CX, DX + MOVQ SI, CX + SHRQ $0x20, CX + ADDQ R15, CX + MOVQ CX, (R9) + + // Fill bitreader for state updates + MOVQ R13, (SP) + MOVQ $0x00000808, CX + BEXTRQ CX, R8, R13 + MOVQ ctx+16(FP), CX + CMPQ 96(CX), $0x00 + JZ sequenceDecs_decode_bmi2_skip_update + LEAQ (SI)(DI*1), R14 + ADDQ R8, R14 + MOVBQZX R14, R14 + LEAQ (DX)(R14*1), CX + MOVQ AX, R15 + MOVQ CX, DX + ROLQ CL, R15 + BZHIQ R14, R15, R15 + + // Update Offset State + BZHIQ R8, R15, CX + SHRXQ R8, R15, R15 + MOVQ $0x00001010, R14 + BEXTRQ R14, R8, R8 + ADDQ CX, R8 + + // Load ctx.ofTable + MOVQ ctx+16(FP), CX + MOVQ 48(CX), CX + MOVQ (CX)(R8*8), R8 + + // Update Match Length State + BZHIQ DI, R15, CX + SHRXQ DI, R15, R15 + MOVQ $0x00001010, R14 + BEXTRQ R14, DI, DI + ADDQ CX, DI + + // Load ctx.mlTable + MOVQ ctx+16(FP), CX + MOVQ 24(CX), CX + MOVQ (CX)(DI*8), DI + + // Update Literal Length State + BZHIQ SI, R15, CX + MOVQ $0x00001010, R14 + BEXTRQ R14, SI, SI + ADDQ CX, SI + + // Load ctx.llTable + MOVQ ctx+16(FP), CX + MOVQ (CX), CX + MOVQ (CX)(SI*8), SI + +sequenceDecs_decode_bmi2_skip_update: + // Adjust offset + MOVQ 16(R9), CX + CMPQ R13, $0x01 + JBE sequenceDecs_decode_bmi2_adjust_offsetB_1_or_0 + MOVQ R11, R12 + MOVQ R10, R11 + MOVQ CX, R10 + JMP sequenceDecs_decode_bmi2_after_adjust + +sequenceDecs_decode_bmi2_adjust_offsetB_1_or_0: + CMPQ (R9), $0x00000000 + JNE sequenceDecs_decode_bmi2_adjust_offset_maybezero + INCQ CX + JMP sequenceDecs_decode_bmi2_adjust_offset_nonzero + +sequenceDecs_decode_bmi2_adjust_offset_maybezero: + TESTQ CX, CX + JNZ sequenceDecs_decode_bmi2_adjust_offset_nonzero + MOVQ R10, CX + JMP sequenceDecs_decode_bmi2_after_adjust + +sequenceDecs_decode_bmi2_adjust_offset_nonzero: + CMPQ CX, $0x01 + JB sequenceDecs_decode_bmi2_adjust_zero + JEQ sequenceDecs_decode_bmi2_adjust_one + CMPQ CX, $0x02 + JA sequenceDecs_decode_bmi2_adjust_three + JMP sequenceDecs_decode_bmi2_adjust_two + +sequenceDecs_decode_bmi2_adjust_zero: + MOVQ R10, R13 + JMP sequenceDecs_decode_bmi2_adjust_test_temp_valid + +sequenceDecs_decode_bmi2_adjust_one: + MOVQ R11, R13 + JMP sequenceDecs_decode_bmi2_adjust_test_temp_valid + +sequenceDecs_decode_bmi2_adjust_two: + MOVQ R12, R13 + JMP sequenceDecs_decode_bmi2_adjust_test_temp_valid + +sequenceDecs_decode_bmi2_adjust_three: + LEAQ -1(R10), R13 + +sequenceDecs_decode_bmi2_adjust_test_temp_valid: + TESTQ R13, R13 + JNZ sequenceDecs_decode_bmi2_adjust_temp_valid + MOVQ $0x00000001, R13 + +sequenceDecs_decode_bmi2_adjust_temp_valid: + CMPQ CX, $0x01 + CMOVQNE R11, R12 + MOVQ R10, R11 + MOVQ R13, R10 + MOVQ R13, CX + +sequenceDecs_decode_bmi2_after_adjust: + MOVQ CX, 16(R9) + + // Check values + MOVQ 8(R9), R13 + MOVQ (R9), R14 + LEAQ (R13)(R14*1), R15 + MOVQ s+0(FP), BP + ADDQ R15, 256(BP) + MOVQ ctx+16(FP), R15 + SUBQ R14, 128(R15) + JS error_not_enough_literals + CMPQ R13, $0x00020002 + JA sequenceDecs_decode_bmi2_error_match_len_too_big + TESTQ CX, CX + JNZ sequenceDecs_decode_bmi2_match_len_ofs_ok + TESTQ R13, R13 + JNZ sequenceDecs_decode_bmi2_error_match_len_ofs_mismatch + +sequenceDecs_decode_bmi2_match_len_ofs_ok: + ADDQ $0x18, R9 + MOVQ ctx+16(FP), CX + DECQ 96(CX) + JNS sequenceDecs_decode_bmi2_main_loop + MOVQ s+0(FP), CX + MOVQ R10, 144(CX) + MOVQ R11, 152(CX) + MOVQ R12, 160(CX) + MOVQ br+8(FP), CX + MOVQ AX, 32(CX) + MOVB DL, 40(CX) + MOVQ BX, 24(CX) + + // Return success + MOVQ $0x00000000, ret+24(FP) + RET + + // Return with match length error +sequenceDecs_decode_bmi2_error_match_len_ofs_mismatch: + MOVQ $0x00000001, ret+24(FP) + RET + + // Return with match too long error +sequenceDecs_decode_bmi2_error_match_len_too_big: + MOVQ $0x00000002, ret+24(FP) + RET + + // Return with match offset too long error + MOVQ $0x00000003, ret+24(FP) + RET + + // Return with not enough literals error +error_not_enough_literals: + MOVQ $0x00000004, ret+24(FP) + RET + + // Return with not enough output space error + MOVQ $0x00000005, ret+24(FP) + RET + +// func sequenceDecs_decode_56_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int +// Requires: BMI, BMI2, CMOV +TEXT ·sequenceDecs_decode_56_bmi2(SB), $8-32 + MOVQ br+8(FP), CX + MOVQ 32(CX), AX + MOVBQZX 40(CX), DX + MOVQ 24(CX), BX + MOVQ (CX), CX + ADDQ BX, CX + MOVQ CX, (SP) + MOVQ ctx+16(FP), CX + MOVQ 72(CX), SI + MOVQ 80(CX), DI + MOVQ 88(CX), R8 + MOVQ 104(CX), R9 + MOVQ s+0(FP), CX + MOVQ 144(CX), R10 + MOVQ 152(CX), R11 + MOVQ 160(CX), R12 + +sequenceDecs_decode_56_bmi2_main_loop: + MOVQ (SP), R13 + + // Fill bitreader to have enough for the offset and match length. + CMPQ BX, $0x08 + JL sequenceDecs_decode_56_bmi2_fill_byte_by_byte + MOVQ DX, CX + SHRQ $0x03, CX + SUBQ CX, R13 + MOVQ (R13), AX + SUBQ CX, BX + ANDQ $0x07, DX + JMP sequenceDecs_decode_56_bmi2_fill_end + +sequenceDecs_decode_56_bmi2_fill_byte_by_byte: + CMPQ BX, $0x00 + JLE sequenceDecs_decode_56_bmi2_fill_end + CMPQ DX, $0x07 + JLE sequenceDecs_decode_56_bmi2_fill_end + SHLQ $0x08, AX + SUBQ $0x01, R13 + SUBQ $0x01, BX + SUBQ $0x08, DX + MOVBQZX (R13), CX + ORQ CX, AX + JMP sequenceDecs_decode_56_bmi2_fill_byte_by_byte + +sequenceDecs_decode_56_bmi2_fill_end: + // Update offset + MOVQ $0x00000808, CX + BEXTRQ CX, R8, R14 + MOVQ AX, R15 + LEAQ (DX)(R14*1), CX + ROLQ CL, R15 + BZHIQ R14, R15, R15 + MOVQ CX, DX + MOVQ R8, CX + SHRQ $0x20, CX + ADDQ R15, CX + MOVQ CX, 16(R9) + + // Update match length + MOVQ $0x00000808, CX + BEXTRQ CX, DI, R14 + MOVQ AX, R15 + LEAQ (DX)(R14*1), CX + ROLQ CL, R15 + BZHIQ R14, R15, R15 + MOVQ CX, DX + MOVQ DI, CX + SHRQ $0x20, CX + ADDQ R15, CX + MOVQ CX, 8(R9) + + // Update literal length + MOVQ $0x00000808, CX + BEXTRQ CX, SI, R14 + MOVQ AX, R15 + LEAQ (DX)(R14*1), CX + ROLQ CL, R15 + BZHIQ R14, R15, R15 + MOVQ CX, DX + MOVQ SI, CX + SHRQ $0x20, CX + ADDQ R15, CX + MOVQ CX, (R9) + + // Fill bitreader for state updates + MOVQ R13, (SP) + MOVQ $0x00000808, CX + BEXTRQ CX, R8, R13 + MOVQ ctx+16(FP), CX + CMPQ 96(CX), $0x00 + JZ sequenceDecs_decode_56_bmi2_skip_update + LEAQ (SI)(DI*1), R14 + ADDQ R8, R14 + MOVBQZX R14, R14 + LEAQ (DX)(R14*1), CX + MOVQ AX, R15 + MOVQ CX, DX + ROLQ CL, R15 + BZHIQ R14, R15, R15 + + // Update Offset State + BZHIQ R8, R15, CX + SHRXQ R8, R15, R15 + MOVQ $0x00001010, R14 + BEXTRQ R14, R8, R8 + ADDQ CX, R8 + + // Load ctx.ofTable + MOVQ ctx+16(FP), CX + MOVQ 48(CX), CX + MOVQ (CX)(R8*8), R8 + + // Update Match Length State + BZHIQ DI, R15, CX + SHRXQ DI, R15, R15 + MOVQ $0x00001010, R14 + BEXTRQ R14, DI, DI + ADDQ CX, DI + + // Load ctx.mlTable + MOVQ ctx+16(FP), CX + MOVQ 24(CX), CX + MOVQ (CX)(DI*8), DI + + // Update Literal Length State + BZHIQ SI, R15, CX + MOVQ $0x00001010, R14 + BEXTRQ R14, SI, SI + ADDQ CX, SI + + // Load ctx.llTable + MOVQ ctx+16(FP), CX + MOVQ (CX), CX + MOVQ (CX)(SI*8), SI + +sequenceDecs_decode_56_bmi2_skip_update: + // Adjust offset + MOVQ 16(R9), CX + CMPQ R13, $0x01 + JBE sequenceDecs_decode_56_bmi2_adjust_offsetB_1_or_0 + MOVQ R11, R12 + MOVQ R10, R11 + MOVQ CX, R10 + JMP sequenceDecs_decode_56_bmi2_after_adjust + +sequenceDecs_decode_56_bmi2_adjust_offsetB_1_or_0: + CMPQ (R9), $0x00000000 + JNE sequenceDecs_decode_56_bmi2_adjust_offset_maybezero + INCQ CX + JMP sequenceDecs_decode_56_bmi2_adjust_offset_nonzero + +sequenceDecs_decode_56_bmi2_adjust_offset_maybezero: + TESTQ CX, CX + JNZ sequenceDecs_decode_56_bmi2_adjust_offset_nonzero + MOVQ R10, CX + JMP sequenceDecs_decode_56_bmi2_after_adjust + +sequenceDecs_decode_56_bmi2_adjust_offset_nonzero: + CMPQ CX, $0x01 + JB sequenceDecs_decode_56_bmi2_adjust_zero + JEQ sequenceDecs_decode_56_bmi2_adjust_one + CMPQ CX, $0x02 + JA sequenceDecs_decode_56_bmi2_adjust_three + JMP sequenceDecs_decode_56_bmi2_adjust_two + +sequenceDecs_decode_56_bmi2_adjust_zero: + MOVQ R10, R13 + JMP sequenceDecs_decode_56_bmi2_adjust_test_temp_valid + +sequenceDecs_decode_56_bmi2_adjust_one: + MOVQ R11, R13 + JMP sequenceDecs_decode_56_bmi2_adjust_test_temp_valid + +sequenceDecs_decode_56_bmi2_adjust_two: + MOVQ R12, R13 + JMP sequenceDecs_decode_56_bmi2_adjust_test_temp_valid + +sequenceDecs_decode_56_bmi2_adjust_three: + LEAQ -1(R10), R13 + +sequenceDecs_decode_56_bmi2_adjust_test_temp_valid: + TESTQ R13, R13 + JNZ sequenceDecs_decode_56_bmi2_adjust_temp_valid + MOVQ $0x00000001, R13 + +sequenceDecs_decode_56_bmi2_adjust_temp_valid: + CMPQ CX, $0x01 + CMOVQNE R11, R12 + MOVQ R10, R11 + MOVQ R13, R10 + MOVQ R13, CX + +sequenceDecs_decode_56_bmi2_after_adjust: + MOVQ CX, 16(R9) + + // Check values + MOVQ 8(R9), R13 + MOVQ (R9), R14 + LEAQ (R13)(R14*1), R15 + MOVQ s+0(FP), BP + ADDQ R15, 256(BP) + MOVQ ctx+16(FP), R15 + SUBQ R14, 128(R15) + JS error_not_enough_literals + CMPQ R13, $0x00020002 + JA sequenceDecs_decode_56_bmi2_error_match_len_too_big + TESTQ CX, CX + JNZ sequenceDecs_decode_56_bmi2_match_len_ofs_ok + TESTQ R13, R13 + JNZ sequenceDecs_decode_56_bmi2_error_match_len_ofs_mismatch + +sequenceDecs_decode_56_bmi2_match_len_ofs_ok: + ADDQ $0x18, R9 + MOVQ ctx+16(FP), CX + DECQ 96(CX) + JNS sequenceDecs_decode_56_bmi2_main_loop + MOVQ s+0(FP), CX + MOVQ R10, 144(CX) + MOVQ R11, 152(CX) + MOVQ R12, 160(CX) + MOVQ br+8(FP), CX + MOVQ AX, 32(CX) + MOVB DL, 40(CX) + MOVQ BX, 24(CX) + + // Return success + MOVQ $0x00000000, ret+24(FP) + RET + + // Return with match length error +sequenceDecs_decode_56_bmi2_error_match_len_ofs_mismatch: + MOVQ $0x00000001, ret+24(FP) + RET + + // Return with match too long error +sequenceDecs_decode_56_bmi2_error_match_len_too_big: + MOVQ $0x00000002, ret+24(FP) + RET + + // Return with match offset too long error + MOVQ $0x00000003, ret+24(FP) + RET + + // Return with not enough literals error +error_not_enough_literals: + MOVQ $0x00000004, ret+24(FP) + RET + + // Return with not enough output space error + MOVQ $0x00000005, ret+24(FP) + RET + +// func sequenceDecs_executeSimple_amd64(ctx *executeAsmContext) bool +// Requires: SSE +TEXT ·sequenceDecs_executeSimple_amd64(SB), $8-9 + MOVQ ctx+0(FP), R10 + MOVQ 8(R10), CX + TESTQ CX, CX + JZ empty_seqs + MOVQ (R10), AX + MOVQ 24(R10), DX + MOVQ 32(R10), BX + MOVQ 80(R10), SI + MOVQ 104(R10), DI + MOVQ 120(R10), R8 + MOVQ 56(R10), R9 + MOVQ 64(R10), R10 + ADDQ R10, R9 + + // seqsBase += 24 * seqIndex + LEAQ (DX)(DX*2), R11 + SHLQ $0x03, R11 + ADDQ R11, AX + + // outBase += outPosition + ADDQ DI, BX + +main_loop: + MOVQ (AX), R11 + MOVQ 16(AX), R12 + MOVQ 8(AX), R13 + + // Copy literals + TESTQ R11, R11 + JZ check_offset + XORQ R14, R14 + +copy_1: + MOVUPS (SI)(R14*1), X0 + MOVUPS X0, (BX)(R14*1) + ADDQ $0x10, R14 + CMPQ R14, R11 + JB copy_1 + ADDQ R11, SI + ADDQ R11, BX + ADDQ R11, DI + + // Malformed input if seq.mo > t+len(hist) || seq.mo > s.windowSize) +check_offset: + LEAQ (DI)(R10*1), R11 + CMPQ R12, R11 + JG error_match_off_too_big + CMPQ R12, R8 + JG error_match_off_too_big + + // Copy match from history + MOVQ R12, R11 + SUBQ DI, R11 + JLS copy_match + MOVQ R9, R14 + SUBQ R11, R14 + CMPQ R13, R11 + JG copy_all_from_history + MOVQ R13, R11 + SUBQ $0x10, R11 + JB copy_4_small + +copy_4_loop: + MOVUPS (R14), X0 + MOVUPS X0, (BX) + ADDQ $0x10, R14 + ADDQ $0x10, BX + SUBQ $0x10, R11 + JAE copy_4_loop + LEAQ 16(R14)(R11*1), R14 + LEAQ 16(BX)(R11*1), BX + MOVUPS -16(R14), X0 + MOVUPS X0, -16(BX) + JMP copy_4_end + +copy_4_small: + CMPQ R13, $0x03 + JE copy_4_move_3 + CMPQ R13, $0x08 + JB copy_4_move_4through7 + JMP copy_4_move_8through16 + +copy_4_move_3: + MOVW (R14), R11 + MOVB 2(R14), R12 + MOVW R11, (BX) + MOVB R12, 2(BX) + ADDQ R13, R14 + ADDQ R13, BX + JMP copy_4_end + +copy_4_move_4through7: + MOVL (R14), R11 + MOVL -4(R14)(R13*1), R12 + MOVL R11, (BX) + MOVL R12, -4(BX)(R13*1) + ADDQ R13, R14 + ADDQ R13, BX + JMP copy_4_end + +copy_4_move_8through16: + MOVQ (R14), R11 + MOVQ -8(R14)(R13*1), R12 + MOVQ R11, (BX) + MOVQ R12, -8(BX)(R13*1) + ADDQ R13, R14 + ADDQ R13, BX + +copy_4_end: + ADDQ R13, DI + ADDQ $0x18, AX + INCQ DX + CMPQ DX, CX + JB main_loop + JMP loop_finished + +copy_all_from_history: + MOVQ R11, R15 + SUBQ $0x10, R15 + JB copy_5_small + +copy_5_loop: + MOVUPS (R14), X0 + MOVUPS X0, (BX) + ADDQ $0x10, R14 + ADDQ $0x10, BX + SUBQ $0x10, R15 + JAE copy_5_loop + LEAQ 16(R14)(R15*1), R14 + LEAQ 16(BX)(R15*1), BX + MOVUPS -16(R14), X0 + MOVUPS X0, -16(BX) + JMP copy_5_end + +copy_5_small: + CMPQ R11, $0x03 + JE copy_5_move_3 + JB copy_5_move_1or2 + CMPQ R11, $0x08 + JB copy_5_move_4through7 + JMP copy_5_move_8through16 + +copy_5_move_1or2: + MOVB (R14), R15 + MOVB -1(R14)(R11*1), BP + MOVB R15, (BX) + MOVB BP, -1(BX)(R11*1) + ADDQ R11, R14 + ADDQ R11, BX + JMP copy_5_end + +copy_5_move_3: + MOVW (R14), R15 + MOVB 2(R14), BP + MOVW R15, (BX) + MOVB BP, 2(BX) + ADDQ R11, R14 + ADDQ R11, BX + JMP copy_5_end + +copy_5_move_4through7: + MOVL (R14), R15 + MOVL -4(R14)(R11*1), BP + MOVL R15, (BX) + MOVL BP, -4(BX)(R11*1) + ADDQ R11, R14 + ADDQ R11, BX + JMP copy_5_end + +copy_5_move_8through16: + MOVQ (R14), R15 + MOVQ -8(R14)(R11*1), BP + MOVQ R15, (BX) + MOVQ BP, -8(BX)(R11*1) + ADDQ R11, R14 + ADDQ R11, BX + +copy_5_end: + ADDQ R11, DI + SUBQ R11, R13 + + // Copy match from the current buffer +copy_match: + MOVQ BX, R11 + SUBQ R12, R11 + + // ml <= mo + CMPQ R13, R12 + JA copy_overlapping_match + + // Copy non-overlapping match + ADDQ R13, DI + MOVQ BX, R12 + ADDQ R13, BX + +copy_2: + MOVUPS (R11), X0 + MOVUPS X0, (R12) + ADDQ $0x10, R11 + ADDQ $0x10, R12 + SUBQ $0x10, R13 + JHI copy_2 + JMP handle_loop + + // Copy overlapping match +copy_overlapping_match: + ADDQ R13, DI + +copy_slow_3: + MOVB (R11), R12 + MOVB R12, (BX) + INCQ R11 + INCQ BX + DECQ R13 + JNZ copy_slow_3 + +handle_loop: + ADDQ $0x18, AX + INCQ DX + CMPQ DX, CX + JB main_loop + +loop_finished: + // Return value + MOVB $0x01, ret+8(FP) + + // Update the context + MOVQ ctx+0(FP), AX + MOVQ DX, 24(AX) + MOVQ DI, 104(AX) + MOVQ 80(AX), CX + SUBQ CX, SI + MOVQ SI, 112(AX) + RET + +error_match_off_too_big: + // Return value + MOVB $0x00, ret+8(FP) + + // Update the context + MOVQ ctx+0(FP), AX + MOVQ DX, 24(AX) + MOVQ DI, 104(AX) + MOVQ 80(AX), CX + SUBQ CX, SI + MOVQ SI, 112(AX) + RET + +empty_seqs: + // Return value + MOVB $0x01, ret+8(FP) + RET + +// func sequenceDecs_executeSimple_safe_amd64(ctx *executeAsmContext) bool +// Requires: SSE +TEXT ·sequenceDecs_executeSimple_safe_amd64(SB), $8-9 + MOVQ ctx+0(FP), R10 + MOVQ 8(R10), CX + TESTQ CX, CX + JZ empty_seqs + MOVQ (R10), AX + MOVQ 24(R10), DX + MOVQ 32(R10), BX + MOVQ 80(R10), SI + MOVQ 104(R10), DI + MOVQ 120(R10), R8 + MOVQ 56(R10), R9 + MOVQ 64(R10), R10 + ADDQ R10, R9 + + // seqsBase += 24 * seqIndex + LEAQ (DX)(DX*2), R11 + SHLQ $0x03, R11 + ADDQ R11, AX + + // outBase += outPosition + ADDQ DI, BX + +main_loop: + MOVQ (AX), R11 + MOVQ 16(AX), R12 + MOVQ 8(AX), R13 + + // Copy literals + TESTQ R11, R11 + JZ check_offset + MOVQ R11, R14 + SUBQ $0x10, R14 + JB copy_1_small + +copy_1_loop: + MOVUPS (SI), X0 + MOVUPS X0, (BX) + ADDQ $0x10, SI + ADDQ $0x10, BX + SUBQ $0x10, R14 + JAE copy_1_loop + LEAQ 16(SI)(R14*1), SI + LEAQ 16(BX)(R14*1), BX + MOVUPS -16(SI), X0 + MOVUPS X0, -16(BX) + JMP copy_1_end + +copy_1_small: + CMPQ R11, $0x03 + JE copy_1_move_3 + JB copy_1_move_1or2 + CMPQ R11, $0x08 + JB copy_1_move_4through7 + JMP copy_1_move_8through16 + +copy_1_move_1or2: + MOVB (SI), R14 + MOVB -1(SI)(R11*1), R15 + MOVB R14, (BX) + MOVB R15, -1(BX)(R11*1) + ADDQ R11, SI + ADDQ R11, BX + JMP copy_1_end + +copy_1_move_3: + MOVW (SI), R14 + MOVB 2(SI), R15 + MOVW R14, (BX) + MOVB R15, 2(BX) + ADDQ R11, SI + ADDQ R11, BX + JMP copy_1_end + +copy_1_move_4through7: + MOVL (SI), R14 + MOVL -4(SI)(R11*1), R15 + MOVL R14, (BX) + MOVL R15, -4(BX)(R11*1) + ADDQ R11, SI + ADDQ R11, BX + JMP copy_1_end + +copy_1_move_8through16: + MOVQ (SI), R14 + MOVQ -8(SI)(R11*1), R15 + MOVQ R14, (BX) + MOVQ R15, -8(BX)(R11*1) + ADDQ R11, SI + ADDQ R11, BX + +copy_1_end: + ADDQ R11, DI + + // Malformed input if seq.mo > t+len(hist) || seq.mo > s.windowSize) +check_offset: + LEAQ (DI)(R10*1), R11 + CMPQ R12, R11 + JG error_match_off_too_big + CMPQ R12, R8 + JG error_match_off_too_big + + // Copy match from history + MOVQ R12, R11 + SUBQ DI, R11 + JLS copy_match + MOVQ R9, R14 + SUBQ R11, R14 + CMPQ R13, R11 + JG copy_all_from_history + MOVQ R13, R11 + SUBQ $0x10, R11 + JB copy_4_small + +copy_4_loop: + MOVUPS (R14), X0 + MOVUPS X0, (BX) + ADDQ $0x10, R14 + ADDQ $0x10, BX + SUBQ $0x10, R11 + JAE copy_4_loop + LEAQ 16(R14)(R11*1), R14 + LEAQ 16(BX)(R11*1), BX + MOVUPS -16(R14), X0 + MOVUPS X0, -16(BX) + JMP copy_4_end + +copy_4_small: + CMPQ R13, $0x03 + JE copy_4_move_3 + CMPQ R13, $0x08 + JB copy_4_move_4through7 + JMP copy_4_move_8through16 + +copy_4_move_3: + MOVW (R14), R11 + MOVB 2(R14), R12 + MOVW R11, (BX) + MOVB R12, 2(BX) + ADDQ R13, R14 + ADDQ R13, BX + JMP copy_4_end + +copy_4_move_4through7: + MOVL (R14), R11 + MOVL -4(R14)(R13*1), R12 + MOVL R11, (BX) + MOVL R12, -4(BX)(R13*1) + ADDQ R13, R14 + ADDQ R13, BX + JMP copy_4_end + +copy_4_move_8through16: + MOVQ (R14), R11 + MOVQ -8(R14)(R13*1), R12 + MOVQ R11, (BX) + MOVQ R12, -8(BX)(R13*1) + ADDQ R13, R14 + ADDQ R13, BX + +copy_4_end: + ADDQ R13, DI + ADDQ $0x18, AX + INCQ DX + CMPQ DX, CX + JB main_loop + JMP loop_finished + +copy_all_from_history: + MOVQ R11, R15 + SUBQ $0x10, R15 + JB copy_5_small + +copy_5_loop: + MOVUPS (R14), X0 + MOVUPS X0, (BX) + ADDQ $0x10, R14 + ADDQ $0x10, BX + SUBQ $0x10, R15 + JAE copy_5_loop + LEAQ 16(R14)(R15*1), R14 + LEAQ 16(BX)(R15*1), BX + MOVUPS -16(R14), X0 + MOVUPS X0, -16(BX) + JMP copy_5_end + +copy_5_small: + CMPQ R11, $0x03 + JE copy_5_move_3 + JB copy_5_move_1or2 + CMPQ R11, $0x08 + JB copy_5_move_4through7 + JMP copy_5_move_8through16 + +copy_5_move_1or2: + MOVB (R14), R15 + MOVB -1(R14)(R11*1), BP + MOVB R15, (BX) + MOVB BP, -1(BX)(R11*1) + ADDQ R11, R14 + ADDQ R11, BX + JMP copy_5_end + +copy_5_move_3: + MOVW (R14), R15 + MOVB 2(R14), BP + MOVW R15, (BX) + MOVB BP, 2(BX) + ADDQ R11, R14 + ADDQ R11, BX + JMP copy_5_end + +copy_5_move_4through7: + MOVL (R14), R15 + MOVL -4(R14)(R11*1), BP + MOVL R15, (BX) + MOVL BP, -4(BX)(R11*1) + ADDQ R11, R14 + ADDQ R11, BX + JMP copy_5_end + +copy_5_move_8through16: + MOVQ (R14), R15 + MOVQ -8(R14)(R11*1), BP + MOVQ R15, (BX) + MOVQ BP, -8(BX)(R11*1) + ADDQ R11, R14 + ADDQ R11, BX + +copy_5_end: + ADDQ R11, DI + SUBQ R11, R13 + + // Copy match from the current buffer +copy_match: + MOVQ BX, R11 + SUBQ R12, R11 + + // ml <= mo + CMPQ R13, R12 + JA copy_overlapping_match + + // Copy non-overlapping match + ADDQ R13, DI + MOVQ R13, R12 + SUBQ $0x10, R12 + JB copy_2_small + +copy_2_loop: + MOVUPS (R11), X0 + MOVUPS X0, (BX) + ADDQ $0x10, R11 + ADDQ $0x10, BX + SUBQ $0x10, R12 + JAE copy_2_loop + LEAQ 16(R11)(R12*1), R11 + LEAQ 16(BX)(R12*1), BX + MOVUPS -16(R11), X0 + MOVUPS X0, -16(BX) + JMP copy_2_end + +copy_2_small: + CMPQ R13, $0x03 + JE copy_2_move_3 + JB copy_2_move_1or2 + CMPQ R13, $0x08 + JB copy_2_move_4through7 + JMP copy_2_move_8through16 + +copy_2_move_1or2: + MOVB (R11), R12 + MOVB -1(R11)(R13*1), R14 + MOVB R12, (BX) + MOVB R14, -1(BX)(R13*1) + ADDQ R13, R11 + ADDQ R13, BX + JMP copy_2_end + +copy_2_move_3: + MOVW (R11), R12 + MOVB 2(R11), R14 + MOVW R12, (BX) + MOVB R14, 2(BX) + ADDQ R13, R11 + ADDQ R13, BX + JMP copy_2_end + +copy_2_move_4through7: + MOVL (R11), R12 + MOVL -4(R11)(R13*1), R14 + MOVL R12, (BX) + MOVL R14, -4(BX)(R13*1) + ADDQ R13, R11 + ADDQ R13, BX + JMP copy_2_end + +copy_2_move_8through16: + MOVQ (R11), R12 + MOVQ -8(R11)(R13*1), R14 + MOVQ R12, (BX) + MOVQ R14, -8(BX)(R13*1) + ADDQ R13, R11 + ADDQ R13, BX + +copy_2_end: + JMP handle_loop + + // Copy overlapping match +copy_overlapping_match: + ADDQ R13, DI + +copy_slow_3: + MOVB (R11), R12 + MOVB R12, (BX) + INCQ R11 + INCQ BX + DECQ R13 + JNZ copy_slow_3 + +handle_loop: + ADDQ $0x18, AX + INCQ DX + CMPQ DX, CX + JB main_loop + +loop_finished: + // Return value + MOVB $0x01, ret+8(FP) + + // Update the context + MOVQ ctx+0(FP), AX + MOVQ DX, 24(AX) + MOVQ DI, 104(AX) + MOVQ 80(AX), CX + SUBQ CX, SI + MOVQ SI, 112(AX) + RET + +error_match_off_too_big: + // Return value + MOVB $0x00, ret+8(FP) + + // Update the context + MOVQ ctx+0(FP), AX + MOVQ DX, 24(AX) + MOVQ DI, 104(AX) + MOVQ 80(AX), CX + SUBQ CX, SI + MOVQ SI, 112(AX) + RET + +empty_seqs: + // Return value + MOVB $0x01, ret+8(FP) + RET + +// func sequenceDecs_decodeSync_amd64(s *sequenceDecs, br *bitReader, ctx *decodeSyncAsmContext) int +// Requires: CMOV, SSE +TEXT ·sequenceDecs_decodeSync_amd64(SB), $64-32 + MOVQ br+8(FP), AX + MOVQ 32(AX), DX + MOVBQZX 40(AX), BX + MOVQ 24(AX), SI + MOVQ (AX), AX + ADDQ SI, AX + MOVQ AX, (SP) + MOVQ ctx+16(FP), AX + MOVQ 72(AX), DI + MOVQ 80(AX), R8 + MOVQ 88(AX), R9 + XORQ CX, CX + MOVQ CX, 8(SP) + MOVQ CX, 16(SP) + MOVQ CX, 24(SP) + MOVQ 112(AX), R10 + MOVQ 128(AX), CX + MOVQ CX, 32(SP) + MOVQ 144(AX), R11 + MOVQ 136(AX), R12 + MOVQ 200(AX), CX + MOVQ CX, 56(SP) + MOVQ 176(AX), CX + MOVQ CX, 48(SP) + MOVQ 184(AX), AX + MOVQ AX, 40(SP) + MOVQ 40(SP), AX + ADDQ AX, 48(SP) + + // Calculate poiter to s.out[cap(s.out)] (a past-end pointer) + ADDQ R10, 32(SP) + + // outBase += outPosition + ADDQ R12, R10 + +sequenceDecs_decodeSync_amd64_main_loop: + MOVQ (SP), R13 + + // Fill bitreader to have enough for the offset and match length. + CMPQ SI, $0x08 + JL sequenceDecs_decodeSync_amd64_fill_byte_by_byte + MOVQ BX, AX + SHRQ $0x03, AX + SUBQ AX, R13 + MOVQ (R13), DX + SUBQ AX, SI + ANDQ $0x07, BX + JMP sequenceDecs_decodeSync_amd64_fill_end + +sequenceDecs_decodeSync_amd64_fill_byte_by_byte: + CMPQ SI, $0x00 + JLE sequenceDecs_decodeSync_amd64_fill_end + CMPQ BX, $0x07 + JLE sequenceDecs_decodeSync_amd64_fill_end + SHLQ $0x08, DX + SUBQ $0x01, R13 + SUBQ $0x01, SI + SUBQ $0x08, BX + MOVBQZX (R13), AX + ORQ AX, DX + JMP sequenceDecs_decodeSync_amd64_fill_byte_by_byte + +sequenceDecs_decodeSync_amd64_fill_end: + // Update offset + MOVQ R9, AX + MOVQ BX, CX + MOVQ DX, R14 + SHLQ CL, R14 + MOVB AH, CL + SHRQ $0x20, AX + TESTQ CX, CX + JZ sequenceDecs_decodeSync_amd64_of_update_zero + ADDQ CX, BX + CMPQ BX, $0x40 + JA sequenceDecs_decodeSync_amd64_of_update_zero + CMPQ CX, $0x40 + JAE sequenceDecs_decodeSync_amd64_of_update_zero + NEGQ CX + SHRQ CL, R14 + ADDQ R14, AX + +sequenceDecs_decodeSync_amd64_of_update_zero: + MOVQ AX, 8(SP) + + // Update match length + MOVQ R8, AX + MOVQ BX, CX + MOVQ DX, R14 + SHLQ CL, R14 + MOVB AH, CL + SHRQ $0x20, AX + TESTQ CX, CX + JZ sequenceDecs_decodeSync_amd64_ml_update_zero + ADDQ CX, BX + CMPQ BX, $0x40 + JA sequenceDecs_decodeSync_amd64_ml_update_zero + CMPQ CX, $0x40 + JAE sequenceDecs_decodeSync_amd64_ml_update_zero + NEGQ CX + SHRQ CL, R14 + ADDQ R14, AX + +sequenceDecs_decodeSync_amd64_ml_update_zero: + MOVQ AX, 16(SP) + + // Fill bitreader to have enough for the remaining + CMPQ SI, $0x08 + JL sequenceDecs_decodeSync_amd64_fill_2_byte_by_byte + MOVQ BX, AX + SHRQ $0x03, AX + SUBQ AX, R13 + MOVQ (R13), DX + SUBQ AX, SI + ANDQ $0x07, BX + JMP sequenceDecs_decodeSync_amd64_fill_2_end + +sequenceDecs_decodeSync_amd64_fill_2_byte_by_byte: + CMPQ SI, $0x00 + JLE sequenceDecs_decodeSync_amd64_fill_2_end + CMPQ BX, $0x07 + JLE sequenceDecs_decodeSync_amd64_fill_2_end + SHLQ $0x08, DX + SUBQ $0x01, R13 + SUBQ $0x01, SI + SUBQ $0x08, BX + MOVBQZX (R13), AX + ORQ AX, DX + JMP sequenceDecs_decodeSync_amd64_fill_2_byte_by_byte + +sequenceDecs_decodeSync_amd64_fill_2_end: + // Update literal length + MOVQ DI, AX + MOVQ BX, CX + MOVQ DX, R14 + SHLQ CL, R14 + MOVB AH, CL + SHRQ $0x20, AX + TESTQ CX, CX + JZ sequenceDecs_decodeSync_amd64_ll_update_zero + ADDQ CX, BX + CMPQ BX, $0x40 + JA sequenceDecs_decodeSync_amd64_ll_update_zero + CMPQ CX, $0x40 + JAE sequenceDecs_decodeSync_amd64_ll_update_zero + NEGQ CX + SHRQ CL, R14 + ADDQ R14, AX + +sequenceDecs_decodeSync_amd64_ll_update_zero: + MOVQ AX, 24(SP) + + // Fill bitreader for state updates + MOVQ R13, (SP) + MOVQ R9, AX + SHRQ $0x08, AX + MOVBQZX AL, AX + MOVQ ctx+16(FP), CX + CMPQ 96(CX), $0x00 + JZ sequenceDecs_decodeSync_amd64_skip_update + + // Update Literal Length State + MOVBQZX DI, R13 + SHRQ $0x10, DI + MOVWQZX DI, DI + LEAQ (BX)(R13*1), CX + MOVQ DX, R14 + MOVQ CX, BX + ROLQ CL, R14 + MOVL $0x00000001, R15 + MOVB R13, CL + SHLL CL, R15 + DECL R15 + ANDQ R15, R14 + ADDQ R14, DI + + // Load ctx.llTable + MOVQ ctx+16(FP), CX + MOVQ (CX), CX + MOVQ (CX)(DI*8), DI + + // Update Match Length State + MOVBQZX R8, R13 + SHRQ $0x10, R8 + MOVWQZX R8, R8 + LEAQ (BX)(R13*1), CX + MOVQ DX, R14 + MOVQ CX, BX + ROLQ CL, R14 + MOVL $0x00000001, R15 + MOVB R13, CL + SHLL CL, R15 + DECL R15 + ANDQ R15, R14 + ADDQ R14, R8 + + // Load ctx.mlTable + MOVQ ctx+16(FP), CX + MOVQ 24(CX), CX + MOVQ (CX)(R8*8), R8 + + // Update Offset State + MOVBQZX R9, R13 + SHRQ $0x10, R9 + MOVWQZX R9, R9 + LEAQ (BX)(R13*1), CX + MOVQ DX, R14 + MOVQ CX, BX + ROLQ CL, R14 + MOVL $0x00000001, R15 + MOVB R13, CL + SHLL CL, R15 + DECL R15 + ANDQ R15, R14 + ADDQ R14, R9 + + // Load ctx.ofTable + MOVQ ctx+16(FP), CX + MOVQ 48(CX), CX + MOVQ (CX)(R9*8), R9 + +sequenceDecs_decodeSync_amd64_skip_update: + // Adjust offset + MOVQ s+0(FP), CX + MOVQ 8(SP), R13 + CMPQ AX, $0x01 + JBE sequenceDecs_decodeSync_amd64_adjust_offsetB_1_or_0 + MOVUPS 144(CX), X0 + MOVQ R13, 144(CX) + MOVUPS X0, 152(CX) + JMP sequenceDecs_decodeSync_amd64_after_adjust + +sequenceDecs_decodeSync_amd64_adjust_offsetB_1_or_0: + CMPQ 24(SP), $0x00000000 + JNE sequenceDecs_decodeSync_amd64_adjust_offset_maybezero + INCQ R13 + JMP sequenceDecs_decodeSync_amd64_adjust_offset_nonzero + +sequenceDecs_decodeSync_amd64_adjust_offset_maybezero: + TESTQ R13, R13 + JNZ sequenceDecs_decodeSync_amd64_adjust_offset_nonzero + MOVQ 144(CX), R13 + JMP sequenceDecs_decodeSync_amd64_after_adjust + +sequenceDecs_decodeSync_amd64_adjust_offset_nonzero: + MOVQ R13, AX + XORQ R14, R14 + MOVQ $-1, R15 + CMPQ R13, $0x03 + CMOVQEQ R14, AX + CMOVQEQ R15, R14 + ADDQ 144(CX)(AX*8), R14 + JNZ sequenceDecs_decodeSync_amd64_adjust_temp_valid + MOVQ $0x00000001, R14 + +sequenceDecs_decodeSync_amd64_adjust_temp_valid: + CMPQ R13, $0x01 + JZ sequenceDecs_decodeSync_amd64_adjust_skip + MOVQ 152(CX), AX + MOVQ AX, 160(CX) + +sequenceDecs_decodeSync_amd64_adjust_skip: + MOVQ 144(CX), AX + MOVQ AX, 152(CX) + MOVQ R14, 144(CX) + MOVQ R14, R13 + +sequenceDecs_decodeSync_amd64_after_adjust: + MOVQ R13, 8(SP) + + // Check values + MOVQ 16(SP), AX + MOVQ 24(SP), CX + LEAQ (AX)(CX*1), R14 + MOVQ s+0(FP), R15 + ADDQ R14, 256(R15) + MOVQ ctx+16(FP), R14 + SUBQ CX, 104(R14) + JS error_not_enough_literals + CMPQ AX, $0x00020002 + JA sequenceDecs_decodeSync_amd64_error_match_len_too_big + TESTQ R13, R13 + JNZ sequenceDecs_decodeSync_amd64_match_len_ofs_ok + TESTQ AX, AX + JNZ sequenceDecs_decodeSync_amd64_error_match_len_ofs_mismatch + +sequenceDecs_decodeSync_amd64_match_len_ofs_ok: + MOVQ 24(SP), AX + MOVQ 8(SP), CX + MOVQ 16(SP), R13 + + // Check if we have enough space in s.out + LEAQ (AX)(R13*1), R14 + ADDQ R10, R14 + CMPQ R14, 32(SP) + JA error_not_enough_space + + // Copy literals + TESTQ AX, AX + JZ check_offset + XORQ R14, R14 + +copy_1: + MOVUPS (R11)(R14*1), X0 + MOVUPS X0, (R10)(R14*1) + ADDQ $0x10, R14 + CMPQ R14, AX + JB copy_1 + ADDQ AX, R11 + ADDQ AX, R10 + ADDQ AX, R12 + + // Malformed input if seq.mo > t+len(hist) || seq.mo > s.windowSize) +check_offset: + MOVQ R12, AX + ADDQ 40(SP), AX + CMPQ CX, AX + JG error_match_off_too_big + CMPQ CX, 56(SP) + JG error_match_off_too_big + + // Copy match from history + MOVQ CX, AX + SUBQ R12, AX + JLS copy_match + MOVQ 48(SP), R14 + SUBQ AX, R14 + CMPQ R13, AX + JG copy_all_from_history + MOVQ R13, AX + SUBQ $0x10, AX + JB copy_4_small + +copy_4_loop: + MOVUPS (R14), X0 + MOVUPS X0, (R10) + ADDQ $0x10, R14 + ADDQ $0x10, R10 + SUBQ $0x10, AX + JAE copy_4_loop + LEAQ 16(R14)(AX*1), R14 + LEAQ 16(R10)(AX*1), R10 + MOVUPS -16(R14), X0 + MOVUPS X0, -16(R10) + JMP copy_4_end + +copy_4_small: + CMPQ R13, $0x03 + JE copy_4_move_3 + CMPQ R13, $0x08 + JB copy_4_move_4through7 + JMP copy_4_move_8through16 + +copy_4_move_3: + MOVW (R14), AX + MOVB 2(R14), CL + MOVW AX, (R10) + MOVB CL, 2(R10) + ADDQ R13, R14 + ADDQ R13, R10 + JMP copy_4_end + +copy_4_move_4through7: + MOVL (R14), AX + MOVL -4(R14)(R13*1), CX + MOVL AX, (R10) + MOVL CX, -4(R10)(R13*1) + ADDQ R13, R14 + ADDQ R13, R10 + JMP copy_4_end + +copy_4_move_8through16: + MOVQ (R14), AX + MOVQ -8(R14)(R13*1), CX + MOVQ AX, (R10) + MOVQ CX, -8(R10)(R13*1) + ADDQ R13, R14 + ADDQ R13, R10 + +copy_4_end: + ADDQ R13, R12 + JMP handle_loop + JMP loop_finished + +copy_all_from_history: + MOVQ AX, R15 + SUBQ $0x10, R15 + JB copy_5_small + +copy_5_loop: + MOVUPS (R14), X0 + MOVUPS X0, (R10) + ADDQ $0x10, R14 + ADDQ $0x10, R10 + SUBQ $0x10, R15 + JAE copy_5_loop + LEAQ 16(R14)(R15*1), R14 + LEAQ 16(R10)(R15*1), R10 + MOVUPS -16(R14), X0 + MOVUPS X0, -16(R10) + JMP copy_5_end + +copy_5_small: + CMPQ AX, $0x03 + JE copy_5_move_3 + JB copy_5_move_1or2 + CMPQ AX, $0x08 + JB copy_5_move_4through7 + JMP copy_5_move_8through16 + +copy_5_move_1or2: + MOVB (R14), R15 + MOVB -1(R14)(AX*1), BP + MOVB R15, (R10) + MOVB BP, -1(R10)(AX*1) + ADDQ AX, R14 + ADDQ AX, R10 + JMP copy_5_end + +copy_5_move_3: + MOVW (R14), R15 + MOVB 2(R14), BP + MOVW R15, (R10) + MOVB BP, 2(R10) + ADDQ AX, R14 + ADDQ AX, R10 + JMP copy_5_end + +copy_5_move_4through7: + MOVL (R14), R15 + MOVL -4(R14)(AX*1), BP + MOVL R15, (R10) + MOVL BP, -4(R10)(AX*1) + ADDQ AX, R14 + ADDQ AX, R10 + JMP copy_5_end + +copy_5_move_8through16: + MOVQ (R14), R15 + MOVQ -8(R14)(AX*1), BP + MOVQ R15, (R10) + MOVQ BP, -8(R10)(AX*1) + ADDQ AX, R14 + ADDQ AX, R10 + +copy_5_end: + ADDQ AX, R12 + SUBQ AX, R13 + + // Copy match from the current buffer +copy_match: + MOVQ R10, AX + SUBQ CX, AX + + // ml <= mo + CMPQ R13, CX + JA copy_overlapping_match + + // Copy non-overlapping match + ADDQ R13, R12 + MOVQ R10, CX + ADDQ R13, R10 + +copy_2: + MOVUPS (AX), X0 + MOVUPS X0, (CX) + ADDQ $0x10, AX + ADDQ $0x10, CX + SUBQ $0x10, R13 + JHI copy_2 + JMP handle_loop + + // Copy overlapping match +copy_overlapping_match: + ADDQ R13, R12 + +copy_slow_3: + MOVB (AX), CL + MOVB CL, (R10) + INCQ AX + INCQ R10 + DECQ R13 + JNZ copy_slow_3 + +handle_loop: + MOVQ ctx+16(FP), AX + DECQ 96(AX) + JNS sequenceDecs_decodeSync_amd64_main_loop + +loop_finished: + MOVQ br+8(FP), AX + MOVQ DX, 32(AX) + MOVB BL, 40(AX) + MOVQ SI, 24(AX) + + // Update the context + MOVQ ctx+16(FP), AX + MOVQ R12, 136(AX) + MOVQ 144(AX), CX + SUBQ CX, R11 + MOVQ R11, 168(AX) + + // Return success + MOVQ $0x00000000, ret+24(FP) + RET + + // Return with match length error +sequenceDecs_decodeSync_amd64_error_match_len_ofs_mismatch: + MOVQ 16(SP), AX + MOVQ ctx+16(FP), CX + MOVQ AX, 216(CX) + MOVQ $0x00000001, ret+24(FP) + RET + + // Return with match too long error +sequenceDecs_decodeSync_amd64_error_match_len_too_big: + MOVQ ctx+16(FP), AX + MOVQ 16(SP), CX + MOVQ CX, 216(AX) + MOVQ $0x00000002, ret+24(FP) + RET + + // Return with match offset too long error +error_match_off_too_big: + MOVQ ctx+16(FP), AX + MOVQ 8(SP), CX + MOVQ CX, 224(AX) + MOVQ R12, 136(AX) + MOVQ $0x00000003, ret+24(FP) + RET + + // Return with not enough literals error +error_not_enough_literals: + MOVQ ctx+16(FP), AX + MOVQ 24(SP), CX + MOVQ CX, 208(AX) + MOVQ $0x00000004, ret+24(FP) + RET + + // Return with not enough output space error +error_not_enough_space: + MOVQ ctx+16(FP), AX + MOVQ 24(SP), CX + MOVQ CX, 208(AX) + MOVQ 16(SP), CX + MOVQ CX, 216(AX) + MOVQ R12, 136(AX) + MOVQ $0x00000005, ret+24(FP) + RET + +// func sequenceDecs_decodeSync_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeSyncAsmContext) int +// Requires: BMI, BMI2, CMOV, SSE +TEXT ·sequenceDecs_decodeSync_bmi2(SB), $64-32 + MOVQ br+8(FP), CX + MOVQ 32(CX), AX + MOVBQZX 40(CX), DX + MOVQ 24(CX), BX + MOVQ (CX), CX + ADDQ BX, CX + MOVQ CX, (SP) + MOVQ ctx+16(FP), CX + MOVQ 72(CX), SI + MOVQ 80(CX), DI + MOVQ 88(CX), R8 + XORQ R9, R9 + MOVQ R9, 8(SP) + MOVQ R9, 16(SP) + MOVQ R9, 24(SP) + MOVQ 112(CX), R9 + MOVQ 128(CX), R10 + MOVQ R10, 32(SP) + MOVQ 144(CX), R10 + MOVQ 136(CX), R11 + MOVQ 200(CX), R12 + MOVQ R12, 56(SP) + MOVQ 176(CX), R12 + MOVQ R12, 48(SP) + MOVQ 184(CX), CX + MOVQ CX, 40(SP) + MOVQ 40(SP), CX + ADDQ CX, 48(SP) + + // Calculate poiter to s.out[cap(s.out)] (a past-end pointer) + ADDQ R9, 32(SP) + + // outBase += outPosition + ADDQ R11, R9 + +sequenceDecs_decodeSync_bmi2_main_loop: + MOVQ (SP), R12 + + // Fill bitreader to have enough for the offset and match length. + CMPQ BX, $0x08 + JL sequenceDecs_decodeSync_bmi2_fill_byte_by_byte + MOVQ DX, CX + SHRQ $0x03, CX + SUBQ CX, R12 + MOVQ (R12), AX + SUBQ CX, BX + ANDQ $0x07, DX + JMP sequenceDecs_decodeSync_bmi2_fill_end + +sequenceDecs_decodeSync_bmi2_fill_byte_by_byte: + CMPQ BX, $0x00 + JLE sequenceDecs_decodeSync_bmi2_fill_end + CMPQ DX, $0x07 + JLE sequenceDecs_decodeSync_bmi2_fill_end + SHLQ $0x08, AX + SUBQ $0x01, R12 + SUBQ $0x01, BX + SUBQ $0x08, DX + MOVBQZX (R12), CX + ORQ CX, AX + JMP sequenceDecs_decodeSync_bmi2_fill_byte_by_byte + +sequenceDecs_decodeSync_bmi2_fill_end: + // Update offset + MOVQ $0x00000808, CX + BEXTRQ CX, R8, R13 + MOVQ AX, R14 + LEAQ (DX)(R13*1), CX + ROLQ CL, R14 + BZHIQ R13, R14, R14 + MOVQ CX, DX + MOVQ R8, CX + SHRQ $0x20, CX + ADDQ R14, CX + MOVQ CX, 8(SP) + + // Update match length + MOVQ $0x00000808, CX + BEXTRQ CX, DI, R13 + MOVQ AX, R14 + LEAQ (DX)(R13*1), CX + ROLQ CL, R14 + BZHIQ R13, R14, R14 + MOVQ CX, DX + MOVQ DI, CX + SHRQ $0x20, CX + ADDQ R14, CX + MOVQ CX, 16(SP) + + // Fill bitreader to have enough for the remaining + CMPQ BX, $0x08 + JL sequenceDecs_decodeSync_bmi2_fill_2_byte_by_byte + MOVQ DX, CX + SHRQ $0x03, CX + SUBQ CX, R12 + MOVQ (R12), AX + SUBQ CX, BX + ANDQ $0x07, DX + JMP sequenceDecs_decodeSync_bmi2_fill_2_end + +sequenceDecs_decodeSync_bmi2_fill_2_byte_by_byte: + CMPQ BX, $0x00 + JLE sequenceDecs_decodeSync_bmi2_fill_2_end + CMPQ DX, $0x07 + JLE sequenceDecs_decodeSync_bmi2_fill_2_end + SHLQ $0x08, AX + SUBQ $0x01, R12 + SUBQ $0x01, BX + SUBQ $0x08, DX + MOVBQZX (R12), CX + ORQ CX, AX + JMP sequenceDecs_decodeSync_bmi2_fill_2_byte_by_byte + +sequenceDecs_decodeSync_bmi2_fill_2_end: + // Update literal length + MOVQ $0x00000808, CX + BEXTRQ CX, SI, R13 + MOVQ AX, R14 + LEAQ (DX)(R13*1), CX + ROLQ CL, R14 + BZHIQ R13, R14, R14 + MOVQ CX, DX + MOVQ SI, CX + SHRQ $0x20, CX + ADDQ R14, CX + MOVQ CX, 24(SP) + + // Fill bitreader for state updates + MOVQ R12, (SP) + MOVQ $0x00000808, CX + BEXTRQ CX, R8, R12 + MOVQ ctx+16(FP), CX + CMPQ 96(CX), $0x00 + JZ sequenceDecs_decodeSync_bmi2_skip_update + LEAQ (SI)(DI*1), R13 + ADDQ R8, R13 + MOVBQZX R13, R13 + LEAQ (DX)(R13*1), CX + MOVQ AX, R14 + MOVQ CX, DX + ROLQ CL, R14 + BZHIQ R13, R14, R14 + + // Update Offset State + BZHIQ R8, R14, CX + SHRXQ R8, R14, R14 + MOVQ $0x00001010, R13 + BEXTRQ R13, R8, R8 + ADDQ CX, R8 + + // Load ctx.ofTable + MOVQ ctx+16(FP), CX + MOVQ 48(CX), CX + MOVQ (CX)(R8*8), R8 + + // Update Match Length State + BZHIQ DI, R14, CX + SHRXQ DI, R14, R14 + MOVQ $0x00001010, R13 + BEXTRQ R13, DI, DI + ADDQ CX, DI + + // Load ctx.mlTable + MOVQ ctx+16(FP), CX + MOVQ 24(CX), CX + MOVQ (CX)(DI*8), DI + + // Update Literal Length State + BZHIQ SI, R14, CX + MOVQ $0x00001010, R13 + BEXTRQ R13, SI, SI + ADDQ CX, SI + + // Load ctx.llTable + MOVQ ctx+16(FP), CX + MOVQ (CX), CX + MOVQ (CX)(SI*8), SI + +sequenceDecs_decodeSync_bmi2_skip_update: + // Adjust offset + MOVQ s+0(FP), CX + MOVQ 8(SP), R13 + CMPQ R12, $0x01 + JBE sequenceDecs_decodeSync_bmi2_adjust_offsetB_1_or_0 + MOVUPS 144(CX), X0 + MOVQ R13, 144(CX) + MOVUPS X0, 152(CX) + JMP sequenceDecs_decodeSync_bmi2_after_adjust + +sequenceDecs_decodeSync_bmi2_adjust_offsetB_1_or_0: + CMPQ 24(SP), $0x00000000 + JNE sequenceDecs_decodeSync_bmi2_adjust_offset_maybezero + INCQ R13 + JMP sequenceDecs_decodeSync_bmi2_adjust_offset_nonzero + +sequenceDecs_decodeSync_bmi2_adjust_offset_maybezero: + TESTQ R13, R13 + JNZ sequenceDecs_decodeSync_bmi2_adjust_offset_nonzero + MOVQ 144(CX), R13 + JMP sequenceDecs_decodeSync_bmi2_after_adjust + +sequenceDecs_decodeSync_bmi2_adjust_offset_nonzero: + MOVQ R13, R12 + XORQ R14, R14 + MOVQ $-1, R15 + CMPQ R13, $0x03 + CMOVQEQ R14, R12 + CMOVQEQ R15, R14 + ADDQ 144(CX)(R12*8), R14 + JNZ sequenceDecs_decodeSync_bmi2_adjust_temp_valid + MOVQ $0x00000001, R14 + +sequenceDecs_decodeSync_bmi2_adjust_temp_valid: + CMPQ R13, $0x01 + JZ sequenceDecs_decodeSync_bmi2_adjust_skip + MOVQ 152(CX), R12 + MOVQ R12, 160(CX) + +sequenceDecs_decodeSync_bmi2_adjust_skip: + MOVQ 144(CX), R12 + MOVQ R12, 152(CX) + MOVQ R14, 144(CX) + MOVQ R14, R13 + +sequenceDecs_decodeSync_bmi2_after_adjust: + MOVQ R13, 8(SP) + + // Check values + MOVQ 16(SP), CX + MOVQ 24(SP), R12 + LEAQ (CX)(R12*1), R14 + MOVQ s+0(FP), R15 + ADDQ R14, 256(R15) + MOVQ ctx+16(FP), R14 + SUBQ R12, 104(R14) + JS error_not_enough_literals + CMPQ CX, $0x00020002 + JA sequenceDecs_decodeSync_bmi2_error_match_len_too_big + TESTQ R13, R13 + JNZ sequenceDecs_decodeSync_bmi2_match_len_ofs_ok + TESTQ CX, CX + JNZ sequenceDecs_decodeSync_bmi2_error_match_len_ofs_mismatch + +sequenceDecs_decodeSync_bmi2_match_len_ofs_ok: + MOVQ 24(SP), CX + MOVQ 8(SP), R12 + MOVQ 16(SP), R13 + + // Check if we have enough space in s.out + LEAQ (CX)(R13*1), R14 + ADDQ R9, R14 + CMPQ R14, 32(SP) + JA error_not_enough_space + + // Copy literals + TESTQ CX, CX + JZ check_offset + XORQ R14, R14 + +copy_1: + MOVUPS (R10)(R14*1), X0 + MOVUPS X0, (R9)(R14*1) + ADDQ $0x10, R14 + CMPQ R14, CX + JB copy_1 + ADDQ CX, R10 + ADDQ CX, R9 + ADDQ CX, R11 + + // Malformed input if seq.mo > t+len(hist) || seq.mo > s.windowSize) +check_offset: + MOVQ R11, CX + ADDQ 40(SP), CX + CMPQ R12, CX + JG error_match_off_too_big + CMPQ R12, 56(SP) + JG error_match_off_too_big + + // Copy match from history + MOVQ R12, CX + SUBQ R11, CX + JLS copy_match + MOVQ 48(SP), R14 + SUBQ CX, R14 + CMPQ R13, CX + JG copy_all_from_history + MOVQ R13, CX + SUBQ $0x10, CX + JB copy_4_small + +copy_4_loop: + MOVUPS (R14), X0 + MOVUPS X0, (R9) + ADDQ $0x10, R14 + ADDQ $0x10, R9 + SUBQ $0x10, CX + JAE copy_4_loop + LEAQ 16(R14)(CX*1), R14 + LEAQ 16(R9)(CX*1), R9 + MOVUPS -16(R14), X0 + MOVUPS X0, -16(R9) + JMP copy_4_end + +copy_4_small: + CMPQ R13, $0x03 + JE copy_4_move_3 + CMPQ R13, $0x08 + JB copy_4_move_4through7 + JMP copy_4_move_8through16 + +copy_4_move_3: + MOVW (R14), CX + MOVB 2(R14), R12 + MOVW CX, (R9) + MOVB R12, 2(R9) + ADDQ R13, R14 + ADDQ R13, R9 + JMP copy_4_end + +copy_4_move_4through7: + MOVL (R14), CX + MOVL -4(R14)(R13*1), R12 + MOVL CX, (R9) + MOVL R12, -4(R9)(R13*1) + ADDQ R13, R14 + ADDQ R13, R9 + JMP copy_4_end + +copy_4_move_8through16: + MOVQ (R14), CX + MOVQ -8(R14)(R13*1), R12 + MOVQ CX, (R9) + MOVQ R12, -8(R9)(R13*1) + ADDQ R13, R14 + ADDQ R13, R9 + +copy_4_end: + ADDQ R13, R11 + JMP handle_loop + JMP loop_finished + +copy_all_from_history: + MOVQ CX, R15 + SUBQ $0x10, R15 + JB copy_5_small + +copy_5_loop: + MOVUPS (R14), X0 + MOVUPS X0, (R9) + ADDQ $0x10, R14 + ADDQ $0x10, R9 + SUBQ $0x10, R15 + JAE copy_5_loop + LEAQ 16(R14)(R15*1), R14 + LEAQ 16(R9)(R15*1), R9 + MOVUPS -16(R14), X0 + MOVUPS X0, -16(R9) + JMP copy_5_end + +copy_5_small: + CMPQ CX, $0x03 + JE copy_5_move_3 + JB copy_5_move_1or2 + CMPQ CX, $0x08 + JB copy_5_move_4through7 + JMP copy_5_move_8through16 + +copy_5_move_1or2: + MOVB (R14), R15 + MOVB -1(R14)(CX*1), BP + MOVB R15, (R9) + MOVB BP, -1(R9)(CX*1) + ADDQ CX, R14 + ADDQ CX, R9 + JMP copy_5_end + +copy_5_move_3: + MOVW (R14), R15 + MOVB 2(R14), BP + MOVW R15, (R9) + MOVB BP, 2(R9) + ADDQ CX, R14 + ADDQ CX, R9 + JMP copy_5_end + +copy_5_move_4through7: + MOVL (R14), R15 + MOVL -4(R14)(CX*1), BP + MOVL R15, (R9) + MOVL BP, -4(R9)(CX*1) + ADDQ CX, R14 + ADDQ CX, R9 + JMP copy_5_end + +copy_5_move_8through16: + MOVQ (R14), R15 + MOVQ -8(R14)(CX*1), BP + MOVQ R15, (R9) + MOVQ BP, -8(R9)(CX*1) + ADDQ CX, R14 + ADDQ CX, R9 + +copy_5_end: + ADDQ CX, R11 + SUBQ CX, R13 + + // Copy match from the current buffer +copy_match: + MOVQ R9, CX + SUBQ R12, CX + + // ml <= mo + CMPQ R13, R12 + JA copy_overlapping_match + + // Copy non-overlapping match + ADDQ R13, R11 + MOVQ R9, R12 + ADDQ R13, R9 + +copy_2: + MOVUPS (CX), X0 + MOVUPS X0, (R12) + ADDQ $0x10, CX + ADDQ $0x10, R12 + SUBQ $0x10, R13 + JHI copy_2 + JMP handle_loop + + // Copy overlapping match +copy_overlapping_match: + ADDQ R13, R11 + +copy_slow_3: + MOVB (CX), R12 + MOVB R12, (R9) + INCQ CX + INCQ R9 + DECQ R13 + JNZ copy_slow_3 + +handle_loop: + MOVQ ctx+16(FP), CX + DECQ 96(CX) + JNS sequenceDecs_decodeSync_bmi2_main_loop + +loop_finished: + MOVQ br+8(FP), CX + MOVQ AX, 32(CX) + MOVB DL, 40(CX) + MOVQ BX, 24(CX) + + // Update the context + MOVQ ctx+16(FP), AX + MOVQ R11, 136(AX) + MOVQ 144(AX), CX + SUBQ CX, R10 + MOVQ R10, 168(AX) + + // Return success + MOVQ $0x00000000, ret+24(FP) + RET + + // Return with match length error +sequenceDecs_decodeSync_bmi2_error_match_len_ofs_mismatch: + MOVQ 16(SP), AX + MOVQ ctx+16(FP), CX + MOVQ AX, 216(CX) + MOVQ $0x00000001, ret+24(FP) + RET + + // Return with match too long error +sequenceDecs_decodeSync_bmi2_error_match_len_too_big: + MOVQ ctx+16(FP), AX + MOVQ 16(SP), CX + MOVQ CX, 216(AX) + MOVQ $0x00000002, ret+24(FP) + RET + + // Return with match offset too long error +error_match_off_too_big: + MOVQ ctx+16(FP), AX + MOVQ 8(SP), CX + MOVQ CX, 224(AX) + MOVQ R11, 136(AX) + MOVQ $0x00000003, ret+24(FP) + RET + + // Return with not enough literals error +error_not_enough_literals: + MOVQ ctx+16(FP), AX + MOVQ 24(SP), CX + MOVQ CX, 208(AX) + MOVQ $0x00000004, ret+24(FP) + RET + + // Return with not enough output space error +error_not_enough_space: + MOVQ ctx+16(FP), AX + MOVQ 24(SP), CX + MOVQ CX, 208(AX) + MOVQ 16(SP), CX + MOVQ CX, 216(AX) + MOVQ R11, 136(AX) + MOVQ $0x00000005, ret+24(FP) + RET + +// func sequenceDecs_decodeSync_safe_amd64(s *sequenceDecs, br *bitReader, ctx *decodeSyncAsmContext) int +// Requires: CMOV, SSE +TEXT ·sequenceDecs_decodeSync_safe_amd64(SB), $64-32 + MOVQ br+8(FP), AX + MOVQ 32(AX), DX + MOVBQZX 40(AX), BX + MOVQ 24(AX), SI + MOVQ (AX), AX + ADDQ SI, AX + MOVQ AX, (SP) + MOVQ ctx+16(FP), AX + MOVQ 72(AX), DI + MOVQ 80(AX), R8 + MOVQ 88(AX), R9 + XORQ CX, CX + MOVQ CX, 8(SP) + MOVQ CX, 16(SP) + MOVQ CX, 24(SP) + MOVQ 112(AX), R10 + MOVQ 128(AX), CX + MOVQ CX, 32(SP) + MOVQ 144(AX), R11 + MOVQ 136(AX), R12 + MOVQ 200(AX), CX + MOVQ CX, 56(SP) + MOVQ 176(AX), CX + MOVQ CX, 48(SP) + MOVQ 184(AX), AX + MOVQ AX, 40(SP) + MOVQ 40(SP), AX + ADDQ AX, 48(SP) + + // Calculate poiter to s.out[cap(s.out)] (a past-end pointer) + ADDQ R10, 32(SP) + + // outBase += outPosition + ADDQ R12, R10 + +sequenceDecs_decodeSync_safe_amd64_main_loop: + MOVQ (SP), R13 + + // Fill bitreader to have enough for the offset and match length. + CMPQ SI, $0x08 + JL sequenceDecs_decodeSync_safe_amd64_fill_byte_by_byte + MOVQ BX, AX + SHRQ $0x03, AX + SUBQ AX, R13 + MOVQ (R13), DX + SUBQ AX, SI + ANDQ $0x07, BX + JMP sequenceDecs_decodeSync_safe_amd64_fill_end + +sequenceDecs_decodeSync_safe_amd64_fill_byte_by_byte: + CMPQ SI, $0x00 + JLE sequenceDecs_decodeSync_safe_amd64_fill_end + CMPQ BX, $0x07 + JLE sequenceDecs_decodeSync_safe_amd64_fill_end + SHLQ $0x08, DX + SUBQ $0x01, R13 + SUBQ $0x01, SI + SUBQ $0x08, BX + MOVBQZX (R13), AX + ORQ AX, DX + JMP sequenceDecs_decodeSync_safe_amd64_fill_byte_by_byte + +sequenceDecs_decodeSync_safe_amd64_fill_end: + // Update offset + MOVQ R9, AX + MOVQ BX, CX + MOVQ DX, R14 + SHLQ CL, R14 + MOVB AH, CL + SHRQ $0x20, AX + TESTQ CX, CX + JZ sequenceDecs_decodeSync_safe_amd64_of_update_zero + ADDQ CX, BX + CMPQ BX, $0x40 + JA sequenceDecs_decodeSync_safe_amd64_of_update_zero + CMPQ CX, $0x40 + JAE sequenceDecs_decodeSync_safe_amd64_of_update_zero + NEGQ CX + SHRQ CL, R14 + ADDQ R14, AX + +sequenceDecs_decodeSync_safe_amd64_of_update_zero: + MOVQ AX, 8(SP) + + // Update match length + MOVQ R8, AX + MOVQ BX, CX + MOVQ DX, R14 + SHLQ CL, R14 + MOVB AH, CL + SHRQ $0x20, AX + TESTQ CX, CX + JZ sequenceDecs_decodeSync_safe_amd64_ml_update_zero + ADDQ CX, BX + CMPQ BX, $0x40 + JA sequenceDecs_decodeSync_safe_amd64_ml_update_zero + CMPQ CX, $0x40 + JAE sequenceDecs_decodeSync_safe_amd64_ml_update_zero + NEGQ CX + SHRQ CL, R14 + ADDQ R14, AX + +sequenceDecs_decodeSync_safe_amd64_ml_update_zero: + MOVQ AX, 16(SP) + + // Fill bitreader to have enough for the remaining + CMPQ SI, $0x08 + JL sequenceDecs_decodeSync_safe_amd64_fill_2_byte_by_byte + MOVQ BX, AX + SHRQ $0x03, AX + SUBQ AX, R13 + MOVQ (R13), DX + SUBQ AX, SI + ANDQ $0x07, BX + JMP sequenceDecs_decodeSync_safe_amd64_fill_2_end + +sequenceDecs_decodeSync_safe_amd64_fill_2_byte_by_byte: + CMPQ SI, $0x00 + JLE sequenceDecs_decodeSync_safe_amd64_fill_2_end + CMPQ BX, $0x07 + JLE sequenceDecs_decodeSync_safe_amd64_fill_2_end + SHLQ $0x08, DX + SUBQ $0x01, R13 + SUBQ $0x01, SI + SUBQ $0x08, BX + MOVBQZX (R13), AX + ORQ AX, DX + JMP sequenceDecs_decodeSync_safe_amd64_fill_2_byte_by_byte + +sequenceDecs_decodeSync_safe_amd64_fill_2_end: + // Update literal length + MOVQ DI, AX + MOVQ BX, CX + MOVQ DX, R14 + SHLQ CL, R14 + MOVB AH, CL + SHRQ $0x20, AX + TESTQ CX, CX + JZ sequenceDecs_decodeSync_safe_amd64_ll_update_zero + ADDQ CX, BX + CMPQ BX, $0x40 + JA sequenceDecs_decodeSync_safe_amd64_ll_update_zero + CMPQ CX, $0x40 + JAE sequenceDecs_decodeSync_safe_amd64_ll_update_zero + NEGQ CX + SHRQ CL, R14 + ADDQ R14, AX + +sequenceDecs_decodeSync_safe_amd64_ll_update_zero: + MOVQ AX, 24(SP) + + // Fill bitreader for state updates + MOVQ R13, (SP) + MOVQ R9, AX + SHRQ $0x08, AX + MOVBQZX AL, AX + MOVQ ctx+16(FP), CX + CMPQ 96(CX), $0x00 + JZ sequenceDecs_decodeSync_safe_amd64_skip_update + + // Update Literal Length State + MOVBQZX DI, R13 + SHRQ $0x10, DI + MOVWQZX DI, DI + LEAQ (BX)(R13*1), CX + MOVQ DX, R14 + MOVQ CX, BX + ROLQ CL, R14 + MOVL $0x00000001, R15 + MOVB R13, CL + SHLL CL, R15 + DECL R15 + ANDQ R15, R14 + ADDQ R14, DI + + // Load ctx.llTable + MOVQ ctx+16(FP), CX + MOVQ (CX), CX + MOVQ (CX)(DI*8), DI + + // Update Match Length State + MOVBQZX R8, R13 + SHRQ $0x10, R8 + MOVWQZX R8, R8 + LEAQ (BX)(R13*1), CX + MOVQ DX, R14 + MOVQ CX, BX + ROLQ CL, R14 + MOVL $0x00000001, R15 + MOVB R13, CL + SHLL CL, R15 + DECL R15 + ANDQ R15, R14 + ADDQ R14, R8 + + // Load ctx.mlTable + MOVQ ctx+16(FP), CX + MOVQ 24(CX), CX + MOVQ (CX)(R8*8), R8 + + // Update Offset State + MOVBQZX R9, R13 + SHRQ $0x10, R9 + MOVWQZX R9, R9 + LEAQ (BX)(R13*1), CX + MOVQ DX, R14 + MOVQ CX, BX + ROLQ CL, R14 + MOVL $0x00000001, R15 + MOVB R13, CL + SHLL CL, R15 + DECL R15 + ANDQ R15, R14 + ADDQ R14, R9 + + // Load ctx.ofTable + MOVQ ctx+16(FP), CX + MOVQ 48(CX), CX + MOVQ (CX)(R9*8), R9 + +sequenceDecs_decodeSync_safe_amd64_skip_update: + // Adjust offset + MOVQ s+0(FP), CX + MOVQ 8(SP), R13 + CMPQ AX, $0x01 + JBE sequenceDecs_decodeSync_safe_amd64_adjust_offsetB_1_or_0 + MOVUPS 144(CX), X0 + MOVQ R13, 144(CX) + MOVUPS X0, 152(CX) + JMP sequenceDecs_decodeSync_safe_amd64_after_adjust + +sequenceDecs_decodeSync_safe_amd64_adjust_offsetB_1_or_0: + CMPQ 24(SP), $0x00000000 + JNE sequenceDecs_decodeSync_safe_amd64_adjust_offset_maybezero + INCQ R13 + JMP sequenceDecs_decodeSync_safe_amd64_adjust_offset_nonzero + +sequenceDecs_decodeSync_safe_amd64_adjust_offset_maybezero: + TESTQ R13, R13 + JNZ sequenceDecs_decodeSync_safe_amd64_adjust_offset_nonzero + MOVQ 144(CX), R13 + JMP sequenceDecs_decodeSync_safe_amd64_after_adjust + +sequenceDecs_decodeSync_safe_amd64_adjust_offset_nonzero: + MOVQ R13, AX + XORQ R14, R14 + MOVQ $-1, R15 + CMPQ R13, $0x03 + CMOVQEQ R14, AX + CMOVQEQ R15, R14 + ADDQ 144(CX)(AX*8), R14 + JNZ sequenceDecs_decodeSync_safe_amd64_adjust_temp_valid + MOVQ $0x00000001, R14 + +sequenceDecs_decodeSync_safe_amd64_adjust_temp_valid: + CMPQ R13, $0x01 + JZ sequenceDecs_decodeSync_safe_amd64_adjust_skip + MOVQ 152(CX), AX + MOVQ AX, 160(CX) + +sequenceDecs_decodeSync_safe_amd64_adjust_skip: + MOVQ 144(CX), AX + MOVQ AX, 152(CX) + MOVQ R14, 144(CX) + MOVQ R14, R13 + +sequenceDecs_decodeSync_safe_amd64_after_adjust: + MOVQ R13, 8(SP) + + // Check values + MOVQ 16(SP), AX + MOVQ 24(SP), CX + LEAQ (AX)(CX*1), R14 + MOVQ s+0(FP), R15 + ADDQ R14, 256(R15) + MOVQ ctx+16(FP), R14 + SUBQ CX, 104(R14) + JS error_not_enough_literals + CMPQ AX, $0x00020002 + JA sequenceDecs_decodeSync_safe_amd64_error_match_len_too_big + TESTQ R13, R13 + JNZ sequenceDecs_decodeSync_safe_amd64_match_len_ofs_ok + TESTQ AX, AX + JNZ sequenceDecs_decodeSync_safe_amd64_error_match_len_ofs_mismatch + +sequenceDecs_decodeSync_safe_amd64_match_len_ofs_ok: + MOVQ 24(SP), AX + MOVQ 8(SP), CX + MOVQ 16(SP), R13 + + // Check if we have enough space in s.out + LEAQ (AX)(R13*1), R14 + ADDQ R10, R14 + CMPQ R14, 32(SP) + JA error_not_enough_space + + // Copy literals + TESTQ AX, AX + JZ check_offset + MOVQ AX, R14 + SUBQ $0x10, R14 + JB copy_1_small + +copy_1_loop: + MOVUPS (R11), X0 + MOVUPS X0, (R10) + ADDQ $0x10, R11 + ADDQ $0x10, R10 + SUBQ $0x10, R14 + JAE copy_1_loop + LEAQ 16(R11)(R14*1), R11 + LEAQ 16(R10)(R14*1), R10 + MOVUPS -16(R11), X0 + MOVUPS X0, -16(R10) + JMP copy_1_end + +copy_1_small: + CMPQ AX, $0x03 + JE copy_1_move_3 + JB copy_1_move_1or2 + CMPQ AX, $0x08 + JB copy_1_move_4through7 + JMP copy_1_move_8through16 + +copy_1_move_1or2: + MOVB (R11), R14 + MOVB -1(R11)(AX*1), R15 + MOVB R14, (R10) + MOVB R15, -1(R10)(AX*1) + ADDQ AX, R11 + ADDQ AX, R10 + JMP copy_1_end + +copy_1_move_3: + MOVW (R11), R14 + MOVB 2(R11), R15 + MOVW R14, (R10) + MOVB R15, 2(R10) + ADDQ AX, R11 + ADDQ AX, R10 + JMP copy_1_end + +copy_1_move_4through7: + MOVL (R11), R14 + MOVL -4(R11)(AX*1), R15 + MOVL R14, (R10) + MOVL R15, -4(R10)(AX*1) + ADDQ AX, R11 + ADDQ AX, R10 + JMP copy_1_end + +copy_1_move_8through16: + MOVQ (R11), R14 + MOVQ -8(R11)(AX*1), R15 + MOVQ R14, (R10) + MOVQ R15, -8(R10)(AX*1) + ADDQ AX, R11 + ADDQ AX, R10 + +copy_1_end: + ADDQ AX, R12 + + // Malformed input if seq.mo > t+len(hist) || seq.mo > s.windowSize) +check_offset: + MOVQ R12, AX + ADDQ 40(SP), AX + CMPQ CX, AX + JG error_match_off_too_big + CMPQ CX, 56(SP) + JG error_match_off_too_big + + // Copy match from history + MOVQ CX, AX + SUBQ R12, AX + JLS copy_match + MOVQ 48(SP), R14 + SUBQ AX, R14 + CMPQ R13, AX + JG copy_all_from_history + MOVQ R13, AX + SUBQ $0x10, AX + JB copy_4_small + +copy_4_loop: + MOVUPS (R14), X0 + MOVUPS X0, (R10) + ADDQ $0x10, R14 + ADDQ $0x10, R10 + SUBQ $0x10, AX + JAE copy_4_loop + LEAQ 16(R14)(AX*1), R14 + LEAQ 16(R10)(AX*1), R10 + MOVUPS -16(R14), X0 + MOVUPS X0, -16(R10) + JMP copy_4_end + +copy_4_small: + CMPQ R13, $0x03 + JE copy_4_move_3 + CMPQ R13, $0x08 + JB copy_4_move_4through7 + JMP copy_4_move_8through16 + +copy_4_move_3: + MOVW (R14), AX + MOVB 2(R14), CL + MOVW AX, (R10) + MOVB CL, 2(R10) + ADDQ R13, R14 + ADDQ R13, R10 + JMP copy_4_end + +copy_4_move_4through7: + MOVL (R14), AX + MOVL -4(R14)(R13*1), CX + MOVL AX, (R10) + MOVL CX, -4(R10)(R13*1) + ADDQ R13, R14 + ADDQ R13, R10 + JMP copy_4_end + +copy_4_move_8through16: + MOVQ (R14), AX + MOVQ -8(R14)(R13*1), CX + MOVQ AX, (R10) + MOVQ CX, -8(R10)(R13*1) + ADDQ R13, R14 + ADDQ R13, R10 + +copy_4_end: + ADDQ R13, R12 + JMP handle_loop + JMP loop_finished + +copy_all_from_history: + MOVQ AX, R15 + SUBQ $0x10, R15 + JB copy_5_small + +copy_5_loop: + MOVUPS (R14), X0 + MOVUPS X0, (R10) + ADDQ $0x10, R14 + ADDQ $0x10, R10 + SUBQ $0x10, R15 + JAE copy_5_loop + LEAQ 16(R14)(R15*1), R14 + LEAQ 16(R10)(R15*1), R10 + MOVUPS -16(R14), X0 + MOVUPS X0, -16(R10) + JMP copy_5_end + +copy_5_small: + CMPQ AX, $0x03 + JE copy_5_move_3 + JB copy_5_move_1or2 + CMPQ AX, $0x08 + JB copy_5_move_4through7 + JMP copy_5_move_8through16 + +copy_5_move_1or2: + MOVB (R14), R15 + MOVB -1(R14)(AX*1), BP + MOVB R15, (R10) + MOVB BP, -1(R10)(AX*1) + ADDQ AX, R14 + ADDQ AX, R10 + JMP copy_5_end + +copy_5_move_3: + MOVW (R14), R15 + MOVB 2(R14), BP + MOVW R15, (R10) + MOVB BP, 2(R10) + ADDQ AX, R14 + ADDQ AX, R10 + JMP copy_5_end + +copy_5_move_4through7: + MOVL (R14), R15 + MOVL -4(R14)(AX*1), BP + MOVL R15, (R10) + MOVL BP, -4(R10)(AX*1) + ADDQ AX, R14 + ADDQ AX, R10 + JMP copy_5_end + +copy_5_move_8through16: + MOVQ (R14), R15 + MOVQ -8(R14)(AX*1), BP + MOVQ R15, (R10) + MOVQ BP, -8(R10)(AX*1) + ADDQ AX, R14 + ADDQ AX, R10 + +copy_5_end: + ADDQ AX, R12 + SUBQ AX, R13 + + // Copy match from the current buffer +copy_match: + MOVQ R10, AX + SUBQ CX, AX + + // ml <= mo + CMPQ R13, CX + JA copy_overlapping_match + + // Copy non-overlapping match + ADDQ R13, R12 + MOVQ R13, CX + SUBQ $0x10, CX + JB copy_2_small + +copy_2_loop: + MOVUPS (AX), X0 + MOVUPS X0, (R10) + ADDQ $0x10, AX + ADDQ $0x10, R10 + SUBQ $0x10, CX + JAE copy_2_loop + LEAQ 16(AX)(CX*1), AX + LEAQ 16(R10)(CX*1), R10 + MOVUPS -16(AX), X0 + MOVUPS X0, -16(R10) + JMP copy_2_end + +copy_2_small: + CMPQ R13, $0x03 + JE copy_2_move_3 + JB copy_2_move_1or2 + CMPQ R13, $0x08 + JB copy_2_move_4through7 + JMP copy_2_move_8through16 + +copy_2_move_1or2: + MOVB (AX), CL + MOVB -1(AX)(R13*1), R14 + MOVB CL, (R10) + MOVB R14, -1(R10)(R13*1) + ADDQ R13, AX + ADDQ R13, R10 + JMP copy_2_end + +copy_2_move_3: + MOVW (AX), CX + MOVB 2(AX), R14 + MOVW CX, (R10) + MOVB R14, 2(R10) + ADDQ R13, AX + ADDQ R13, R10 + JMP copy_2_end + +copy_2_move_4through7: + MOVL (AX), CX + MOVL -4(AX)(R13*1), R14 + MOVL CX, (R10) + MOVL R14, -4(R10)(R13*1) + ADDQ R13, AX + ADDQ R13, R10 + JMP copy_2_end + +copy_2_move_8through16: + MOVQ (AX), CX + MOVQ -8(AX)(R13*1), R14 + MOVQ CX, (R10) + MOVQ R14, -8(R10)(R13*1) + ADDQ R13, AX + ADDQ R13, R10 + +copy_2_end: + JMP handle_loop + + // Copy overlapping match +copy_overlapping_match: + ADDQ R13, R12 + +copy_slow_3: + MOVB (AX), CL + MOVB CL, (R10) + INCQ AX + INCQ R10 + DECQ R13 + JNZ copy_slow_3 + +handle_loop: + MOVQ ctx+16(FP), AX + DECQ 96(AX) + JNS sequenceDecs_decodeSync_safe_amd64_main_loop + +loop_finished: + MOVQ br+8(FP), AX + MOVQ DX, 32(AX) + MOVB BL, 40(AX) + MOVQ SI, 24(AX) + + // Update the context + MOVQ ctx+16(FP), AX + MOVQ R12, 136(AX) + MOVQ 144(AX), CX + SUBQ CX, R11 + MOVQ R11, 168(AX) + + // Return success + MOVQ $0x00000000, ret+24(FP) + RET + + // Return with match length error +sequenceDecs_decodeSync_safe_amd64_error_match_len_ofs_mismatch: + MOVQ 16(SP), AX + MOVQ ctx+16(FP), CX + MOVQ AX, 216(CX) + MOVQ $0x00000001, ret+24(FP) + RET + + // Return with match too long error +sequenceDecs_decodeSync_safe_amd64_error_match_len_too_big: + MOVQ ctx+16(FP), AX + MOVQ 16(SP), CX + MOVQ CX, 216(AX) + MOVQ $0x00000002, ret+24(FP) + RET + + // Return with match offset too long error +error_match_off_too_big: + MOVQ ctx+16(FP), AX + MOVQ 8(SP), CX + MOVQ CX, 224(AX) + MOVQ R12, 136(AX) + MOVQ $0x00000003, ret+24(FP) + RET + + // Return with not enough literals error +error_not_enough_literals: + MOVQ ctx+16(FP), AX + MOVQ 24(SP), CX + MOVQ CX, 208(AX) + MOVQ $0x00000004, ret+24(FP) + RET + + // Return with not enough output space error +error_not_enough_space: + MOVQ ctx+16(FP), AX + MOVQ 24(SP), CX + MOVQ CX, 208(AX) + MOVQ 16(SP), CX + MOVQ CX, 216(AX) + MOVQ R12, 136(AX) + MOVQ $0x00000005, ret+24(FP) + RET + +// func sequenceDecs_decodeSync_safe_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeSyncAsmContext) int +// Requires: BMI, BMI2, CMOV, SSE +TEXT ·sequenceDecs_decodeSync_safe_bmi2(SB), $64-32 + MOVQ br+8(FP), CX + MOVQ 32(CX), AX + MOVBQZX 40(CX), DX + MOVQ 24(CX), BX + MOVQ (CX), CX + ADDQ BX, CX + MOVQ CX, (SP) + MOVQ ctx+16(FP), CX + MOVQ 72(CX), SI + MOVQ 80(CX), DI + MOVQ 88(CX), R8 + XORQ R9, R9 + MOVQ R9, 8(SP) + MOVQ R9, 16(SP) + MOVQ R9, 24(SP) + MOVQ 112(CX), R9 + MOVQ 128(CX), R10 + MOVQ R10, 32(SP) + MOVQ 144(CX), R10 + MOVQ 136(CX), R11 + MOVQ 200(CX), R12 + MOVQ R12, 56(SP) + MOVQ 176(CX), R12 + MOVQ R12, 48(SP) + MOVQ 184(CX), CX + MOVQ CX, 40(SP) + MOVQ 40(SP), CX + ADDQ CX, 48(SP) + + // Calculate poiter to s.out[cap(s.out)] (a past-end pointer) + ADDQ R9, 32(SP) + + // outBase += outPosition + ADDQ R11, R9 + +sequenceDecs_decodeSync_safe_bmi2_main_loop: + MOVQ (SP), R12 + + // Fill bitreader to have enough for the offset and match length. + CMPQ BX, $0x08 + JL sequenceDecs_decodeSync_safe_bmi2_fill_byte_by_byte + MOVQ DX, CX + SHRQ $0x03, CX + SUBQ CX, R12 + MOVQ (R12), AX + SUBQ CX, BX + ANDQ $0x07, DX + JMP sequenceDecs_decodeSync_safe_bmi2_fill_end + +sequenceDecs_decodeSync_safe_bmi2_fill_byte_by_byte: + CMPQ BX, $0x00 + JLE sequenceDecs_decodeSync_safe_bmi2_fill_end + CMPQ DX, $0x07 + JLE sequenceDecs_decodeSync_safe_bmi2_fill_end + SHLQ $0x08, AX + SUBQ $0x01, R12 + SUBQ $0x01, BX + SUBQ $0x08, DX + MOVBQZX (R12), CX + ORQ CX, AX + JMP sequenceDecs_decodeSync_safe_bmi2_fill_byte_by_byte + +sequenceDecs_decodeSync_safe_bmi2_fill_end: + // Update offset + MOVQ $0x00000808, CX + BEXTRQ CX, R8, R13 + MOVQ AX, R14 + LEAQ (DX)(R13*1), CX + ROLQ CL, R14 + BZHIQ R13, R14, R14 + MOVQ CX, DX + MOVQ R8, CX + SHRQ $0x20, CX + ADDQ R14, CX + MOVQ CX, 8(SP) + + // Update match length + MOVQ $0x00000808, CX + BEXTRQ CX, DI, R13 + MOVQ AX, R14 + LEAQ (DX)(R13*1), CX + ROLQ CL, R14 + BZHIQ R13, R14, R14 + MOVQ CX, DX + MOVQ DI, CX + SHRQ $0x20, CX + ADDQ R14, CX + MOVQ CX, 16(SP) + + // Fill bitreader to have enough for the remaining + CMPQ BX, $0x08 + JL sequenceDecs_decodeSync_safe_bmi2_fill_2_byte_by_byte + MOVQ DX, CX + SHRQ $0x03, CX + SUBQ CX, R12 + MOVQ (R12), AX + SUBQ CX, BX + ANDQ $0x07, DX + JMP sequenceDecs_decodeSync_safe_bmi2_fill_2_end + +sequenceDecs_decodeSync_safe_bmi2_fill_2_byte_by_byte: + CMPQ BX, $0x00 + JLE sequenceDecs_decodeSync_safe_bmi2_fill_2_end + CMPQ DX, $0x07 + JLE sequenceDecs_decodeSync_safe_bmi2_fill_2_end + SHLQ $0x08, AX + SUBQ $0x01, R12 + SUBQ $0x01, BX + SUBQ $0x08, DX + MOVBQZX (R12), CX + ORQ CX, AX + JMP sequenceDecs_decodeSync_safe_bmi2_fill_2_byte_by_byte + +sequenceDecs_decodeSync_safe_bmi2_fill_2_end: + // Update literal length + MOVQ $0x00000808, CX + BEXTRQ CX, SI, R13 + MOVQ AX, R14 + LEAQ (DX)(R13*1), CX + ROLQ CL, R14 + BZHIQ R13, R14, R14 + MOVQ CX, DX + MOVQ SI, CX + SHRQ $0x20, CX + ADDQ R14, CX + MOVQ CX, 24(SP) + + // Fill bitreader for state updates + MOVQ R12, (SP) + MOVQ $0x00000808, CX + BEXTRQ CX, R8, R12 + MOVQ ctx+16(FP), CX + CMPQ 96(CX), $0x00 + JZ sequenceDecs_decodeSync_safe_bmi2_skip_update + LEAQ (SI)(DI*1), R13 + ADDQ R8, R13 + MOVBQZX R13, R13 + LEAQ (DX)(R13*1), CX + MOVQ AX, R14 + MOVQ CX, DX + ROLQ CL, R14 + BZHIQ R13, R14, R14 + + // Update Offset State + BZHIQ R8, R14, CX + SHRXQ R8, R14, R14 + MOVQ $0x00001010, R13 + BEXTRQ R13, R8, R8 + ADDQ CX, R8 + + // Load ctx.ofTable + MOVQ ctx+16(FP), CX + MOVQ 48(CX), CX + MOVQ (CX)(R8*8), R8 + + // Update Match Length State + BZHIQ DI, R14, CX + SHRXQ DI, R14, R14 + MOVQ $0x00001010, R13 + BEXTRQ R13, DI, DI + ADDQ CX, DI + + // Load ctx.mlTable + MOVQ ctx+16(FP), CX + MOVQ 24(CX), CX + MOVQ (CX)(DI*8), DI + + // Update Literal Length State + BZHIQ SI, R14, CX + MOVQ $0x00001010, R13 + BEXTRQ R13, SI, SI + ADDQ CX, SI + + // Load ctx.llTable + MOVQ ctx+16(FP), CX + MOVQ (CX), CX + MOVQ (CX)(SI*8), SI + +sequenceDecs_decodeSync_safe_bmi2_skip_update: + // Adjust offset + MOVQ s+0(FP), CX + MOVQ 8(SP), R13 + CMPQ R12, $0x01 + JBE sequenceDecs_decodeSync_safe_bmi2_adjust_offsetB_1_or_0 + MOVUPS 144(CX), X0 + MOVQ R13, 144(CX) + MOVUPS X0, 152(CX) + JMP sequenceDecs_decodeSync_safe_bmi2_after_adjust + +sequenceDecs_decodeSync_safe_bmi2_adjust_offsetB_1_or_0: + CMPQ 24(SP), $0x00000000 + JNE sequenceDecs_decodeSync_safe_bmi2_adjust_offset_maybezero + INCQ R13 + JMP sequenceDecs_decodeSync_safe_bmi2_adjust_offset_nonzero + +sequenceDecs_decodeSync_safe_bmi2_adjust_offset_maybezero: + TESTQ R13, R13 + JNZ sequenceDecs_decodeSync_safe_bmi2_adjust_offset_nonzero + MOVQ 144(CX), R13 + JMP sequenceDecs_decodeSync_safe_bmi2_after_adjust + +sequenceDecs_decodeSync_safe_bmi2_adjust_offset_nonzero: + MOVQ R13, R12 + XORQ R14, R14 + MOVQ $-1, R15 + CMPQ R13, $0x03 + CMOVQEQ R14, R12 + CMOVQEQ R15, R14 + ADDQ 144(CX)(R12*8), R14 + JNZ sequenceDecs_decodeSync_safe_bmi2_adjust_temp_valid + MOVQ $0x00000001, R14 + +sequenceDecs_decodeSync_safe_bmi2_adjust_temp_valid: + CMPQ R13, $0x01 + JZ sequenceDecs_decodeSync_safe_bmi2_adjust_skip + MOVQ 152(CX), R12 + MOVQ R12, 160(CX) + +sequenceDecs_decodeSync_safe_bmi2_adjust_skip: + MOVQ 144(CX), R12 + MOVQ R12, 152(CX) + MOVQ R14, 144(CX) + MOVQ R14, R13 + +sequenceDecs_decodeSync_safe_bmi2_after_adjust: + MOVQ R13, 8(SP) + + // Check values + MOVQ 16(SP), CX + MOVQ 24(SP), R12 + LEAQ (CX)(R12*1), R14 + MOVQ s+0(FP), R15 + ADDQ R14, 256(R15) + MOVQ ctx+16(FP), R14 + SUBQ R12, 104(R14) + JS error_not_enough_literals + CMPQ CX, $0x00020002 + JA sequenceDecs_decodeSync_safe_bmi2_error_match_len_too_big + TESTQ R13, R13 + JNZ sequenceDecs_decodeSync_safe_bmi2_match_len_ofs_ok + TESTQ CX, CX + JNZ sequenceDecs_decodeSync_safe_bmi2_error_match_len_ofs_mismatch + +sequenceDecs_decodeSync_safe_bmi2_match_len_ofs_ok: + MOVQ 24(SP), CX + MOVQ 8(SP), R12 + MOVQ 16(SP), R13 + + // Check if we have enough space in s.out + LEAQ (CX)(R13*1), R14 + ADDQ R9, R14 + CMPQ R14, 32(SP) + JA error_not_enough_space + + // Copy literals + TESTQ CX, CX + JZ check_offset + MOVQ CX, R14 + SUBQ $0x10, R14 + JB copy_1_small + +copy_1_loop: + MOVUPS (R10), X0 + MOVUPS X0, (R9) + ADDQ $0x10, R10 + ADDQ $0x10, R9 + SUBQ $0x10, R14 + JAE copy_1_loop + LEAQ 16(R10)(R14*1), R10 + LEAQ 16(R9)(R14*1), R9 + MOVUPS -16(R10), X0 + MOVUPS X0, -16(R9) + JMP copy_1_end + +copy_1_small: + CMPQ CX, $0x03 + JE copy_1_move_3 + JB copy_1_move_1or2 + CMPQ CX, $0x08 + JB copy_1_move_4through7 + JMP copy_1_move_8through16 + +copy_1_move_1or2: + MOVB (R10), R14 + MOVB -1(R10)(CX*1), R15 + MOVB R14, (R9) + MOVB R15, -1(R9)(CX*1) + ADDQ CX, R10 + ADDQ CX, R9 + JMP copy_1_end + +copy_1_move_3: + MOVW (R10), R14 + MOVB 2(R10), R15 + MOVW R14, (R9) + MOVB R15, 2(R9) + ADDQ CX, R10 + ADDQ CX, R9 + JMP copy_1_end + +copy_1_move_4through7: + MOVL (R10), R14 + MOVL -4(R10)(CX*1), R15 + MOVL R14, (R9) + MOVL R15, -4(R9)(CX*1) + ADDQ CX, R10 + ADDQ CX, R9 + JMP copy_1_end + +copy_1_move_8through16: + MOVQ (R10), R14 + MOVQ -8(R10)(CX*1), R15 + MOVQ R14, (R9) + MOVQ R15, -8(R9)(CX*1) + ADDQ CX, R10 + ADDQ CX, R9 + +copy_1_end: + ADDQ CX, R11 + + // Malformed input if seq.mo > t+len(hist) || seq.mo > s.windowSize) +check_offset: + MOVQ R11, CX + ADDQ 40(SP), CX + CMPQ R12, CX + JG error_match_off_too_big + CMPQ R12, 56(SP) + JG error_match_off_too_big + + // Copy match from history + MOVQ R12, CX + SUBQ R11, CX + JLS copy_match + MOVQ 48(SP), R14 + SUBQ CX, R14 + CMPQ R13, CX + JG copy_all_from_history + MOVQ R13, CX + SUBQ $0x10, CX + JB copy_4_small + +copy_4_loop: + MOVUPS (R14), X0 + MOVUPS X0, (R9) + ADDQ $0x10, R14 + ADDQ $0x10, R9 + SUBQ $0x10, CX + JAE copy_4_loop + LEAQ 16(R14)(CX*1), R14 + LEAQ 16(R9)(CX*1), R9 + MOVUPS -16(R14), X0 + MOVUPS X0, -16(R9) + JMP copy_4_end + +copy_4_small: + CMPQ R13, $0x03 + JE copy_4_move_3 + CMPQ R13, $0x08 + JB copy_4_move_4through7 + JMP copy_4_move_8through16 + +copy_4_move_3: + MOVW (R14), CX + MOVB 2(R14), R12 + MOVW CX, (R9) + MOVB R12, 2(R9) + ADDQ R13, R14 + ADDQ R13, R9 + JMP copy_4_end + +copy_4_move_4through7: + MOVL (R14), CX + MOVL -4(R14)(R13*1), R12 + MOVL CX, (R9) + MOVL R12, -4(R9)(R13*1) + ADDQ R13, R14 + ADDQ R13, R9 + JMP copy_4_end + +copy_4_move_8through16: + MOVQ (R14), CX + MOVQ -8(R14)(R13*1), R12 + MOVQ CX, (R9) + MOVQ R12, -8(R9)(R13*1) + ADDQ R13, R14 + ADDQ R13, R9 + +copy_4_end: + ADDQ R13, R11 + JMP handle_loop + JMP loop_finished + +copy_all_from_history: + MOVQ CX, R15 + SUBQ $0x10, R15 + JB copy_5_small + +copy_5_loop: + MOVUPS (R14), X0 + MOVUPS X0, (R9) + ADDQ $0x10, R14 + ADDQ $0x10, R9 + SUBQ $0x10, R15 + JAE copy_5_loop + LEAQ 16(R14)(R15*1), R14 + LEAQ 16(R9)(R15*1), R9 + MOVUPS -16(R14), X0 + MOVUPS X0, -16(R9) + JMP copy_5_end + +copy_5_small: + CMPQ CX, $0x03 + JE copy_5_move_3 + JB copy_5_move_1or2 + CMPQ CX, $0x08 + JB copy_5_move_4through7 + JMP copy_5_move_8through16 + +copy_5_move_1or2: + MOVB (R14), R15 + MOVB -1(R14)(CX*1), BP + MOVB R15, (R9) + MOVB BP, -1(R9)(CX*1) + ADDQ CX, R14 + ADDQ CX, R9 + JMP copy_5_end + +copy_5_move_3: + MOVW (R14), R15 + MOVB 2(R14), BP + MOVW R15, (R9) + MOVB BP, 2(R9) + ADDQ CX, R14 + ADDQ CX, R9 + JMP copy_5_end + +copy_5_move_4through7: + MOVL (R14), R15 + MOVL -4(R14)(CX*1), BP + MOVL R15, (R9) + MOVL BP, -4(R9)(CX*1) + ADDQ CX, R14 + ADDQ CX, R9 + JMP copy_5_end + +copy_5_move_8through16: + MOVQ (R14), R15 + MOVQ -8(R14)(CX*1), BP + MOVQ R15, (R9) + MOVQ BP, -8(R9)(CX*1) + ADDQ CX, R14 + ADDQ CX, R9 + +copy_5_end: + ADDQ CX, R11 + SUBQ CX, R13 + + // Copy match from the current buffer +copy_match: + MOVQ R9, CX + SUBQ R12, CX + + // ml <= mo + CMPQ R13, R12 + JA copy_overlapping_match + + // Copy non-overlapping match + ADDQ R13, R11 + MOVQ R13, R12 + SUBQ $0x10, R12 + JB copy_2_small + +copy_2_loop: + MOVUPS (CX), X0 + MOVUPS X0, (R9) + ADDQ $0x10, CX + ADDQ $0x10, R9 + SUBQ $0x10, R12 + JAE copy_2_loop + LEAQ 16(CX)(R12*1), CX + LEAQ 16(R9)(R12*1), R9 + MOVUPS -16(CX), X0 + MOVUPS X0, -16(R9) + JMP copy_2_end + +copy_2_small: + CMPQ R13, $0x03 + JE copy_2_move_3 + JB copy_2_move_1or2 + CMPQ R13, $0x08 + JB copy_2_move_4through7 + JMP copy_2_move_8through16 + +copy_2_move_1or2: + MOVB (CX), R12 + MOVB -1(CX)(R13*1), R14 + MOVB R12, (R9) + MOVB R14, -1(R9)(R13*1) + ADDQ R13, CX + ADDQ R13, R9 + JMP copy_2_end + +copy_2_move_3: + MOVW (CX), R12 + MOVB 2(CX), R14 + MOVW R12, (R9) + MOVB R14, 2(R9) + ADDQ R13, CX + ADDQ R13, R9 + JMP copy_2_end + +copy_2_move_4through7: + MOVL (CX), R12 + MOVL -4(CX)(R13*1), R14 + MOVL R12, (R9) + MOVL R14, -4(R9)(R13*1) + ADDQ R13, CX + ADDQ R13, R9 + JMP copy_2_end + +copy_2_move_8through16: + MOVQ (CX), R12 + MOVQ -8(CX)(R13*1), R14 + MOVQ R12, (R9) + MOVQ R14, -8(R9)(R13*1) + ADDQ R13, CX + ADDQ R13, R9 + +copy_2_end: + JMP handle_loop + + // Copy overlapping match +copy_overlapping_match: + ADDQ R13, R11 + +copy_slow_3: + MOVB (CX), R12 + MOVB R12, (R9) + INCQ CX + INCQ R9 + DECQ R13 + JNZ copy_slow_3 + +handle_loop: + MOVQ ctx+16(FP), CX + DECQ 96(CX) + JNS sequenceDecs_decodeSync_safe_bmi2_main_loop + +loop_finished: + MOVQ br+8(FP), CX + MOVQ AX, 32(CX) + MOVB DL, 40(CX) + MOVQ BX, 24(CX) + + // Update the context + MOVQ ctx+16(FP), AX + MOVQ R11, 136(AX) + MOVQ 144(AX), CX + SUBQ CX, R10 + MOVQ R10, 168(AX) + + // Return success + MOVQ $0x00000000, ret+24(FP) + RET + + // Return with match length error +sequenceDecs_decodeSync_safe_bmi2_error_match_len_ofs_mismatch: + MOVQ 16(SP), AX + MOVQ ctx+16(FP), CX + MOVQ AX, 216(CX) + MOVQ $0x00000001, ret+24(FP) + RET + + // Return with match too long error +sequenceDecs_decodeSync_safe_bmi2_error_match_len_too_big: + MOVQ ctx+16(FP), AX + MOVQ 16(SP), CX + MOVQ CX, 216(AX) + MOVQ $0x00000002, ret+24(FP) + RET + + // Return with match offset too long error +error_match_off_too_big: + MOVQ ctx+16(FP), AX + MOVQ 8(SP), CX + MOVQ CX, 224(AX) + MOVQ R11, 136(AX) + MOVQ $0x00000003, ret+24(FP) + RET + + // Return with not enough literals error +error_not_enough_literals: + MOVQ ctx+16(FP), AX + MOVQ 24(SP), CX + MOVQ CX, 208(AX) + MOVQ $0x00000004, ret+24(FP) + RET + + // Return with not enough output space error +error_not_enough_space: + MOVQ ctx+16(FP), AX + MOVQ 24(SP), CX + MOVQ CX, 208(AX) + MOVQ 16(SP), CX + MOVQ CX, 216(AX) + MOVQ R11, 136(AX) + MOVQ $0x00000005, ret+24(FP) + RET diff --git a/vendor/github.com/klauspost/compress/zstd/seqdec_generic.go b/vendor/github.com/klauspost/compress/zstd/seqdec_generic.go new file mode 100644 index 00000000..ac2a80d2 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/seqdec_generic.go @@ -0,0 +1,237 @@ +//go:build !amd64 || appengine || !gc || noasm +// +build !amd64 appengine !gc noasm + +package zstd + +import ( + "fmt" + "io" +) + +// decode sequences from the stream with the provided history but without dictionary. +func (s *sequenceDecs) decodeSyncSimple(hist []byte) (bool, error) { + return false, nil +} + +// decode sequences from the stream without the provided history. +func (s *sequenceDecs) decode(seqs []seqVals) error { + br := s.br + + // Grab full sizes tables, to avoid bounds checks. + llTable, mlTable, ofTable := s.litLengths.fse.dt[:maxTablesize], s.matchLengths.fse.dt[:maxTablesize], s.offsets.fse.dt[:maxTablesize] + llState, mlState, ofState := s.litLengths.state.state, s.matchLengths.state.state, s.offsets.state.state + s.seqSize = 0 + litRemain := len(s.literals) + + maxBlockSize := maxCompressedBlockSize + if s.windowSize < maxBlockSize { + maxBlockSize = s.windowSize + } + for i := range seqs { + var ll, mo, ml int + if br.off > 4+((maxOffsetBits+16+16)>>3) { + // inlined function: + // ll, mo, ml = s.nextFast(br, llState, mlState, ofState) + + // Final will not read from stream. + var llB, mlB, moB uint8 + ll, llB = llState.final() + ml, mlB = mlState.final() + mo, moB = ofState.final() + + // extra bits are stored in reverse order. + br.fillFast() + mo += br.getBits(moB) + if s.maxBits > 32 { + br.fillFast() + } + ml += br.getBits(mlB) + ll += br.getBits(llB) + + if moB > 1 { + s.prevOffset[2] = s.prevOffset[1] + s.prevOffset[1] = s.prevOffset[0] + s.prevOffset[0] = mo + } else { + // mo = s.adjustOffset(mo, ll, moB) + // Inlined for rather big speedup + if ll == 0 { + // There is an exception though, when current sequence's literals_length = 0. + // In this case, repeated offsets are shifted by one, so an offset_value of 1 means Repeated_Offset2, + // an offset_value of 2 means Repeated_Offset3, and an offset_value of 3 means Repeated_Offset1 - 1_byte. + mo++ + } + + if mo == 0 { + mo = s.prevOffset[0] + } else { + var temp int + if mo == 3 { + temp = s.prevOffset[0] - 1 + } else { + temp = s.prevOffset[mo] + } + + if temp == 0 { + // 0 is not valid; input is corrupted; force offset to 1 + println("WARNING: temp was 0") + temp = 1 + } + + if mo != 1 { + s.prevOffset[2] = s.prevOffset[1] + } + s.prevOffset[1] = s.prevOffset[0] + s.prevOffset[0] = temp + mo = temp + } + } + br.fillFast() + } else { + if br.overread() { + if debugDecoder { + printf("reading sequence %d, exceeded available data\n", i) + } + return io.ErrUnexpectedEOF + } + ll, mo, ml = s.next(br, llState, mlState, ofState) + br.fill() + } + + if debugSequences { + println("Seq", i, "Litlen:", ll, "mo:", mo, "(abs) ml:", ml) + } + // Evaluate. + // We might be doing this async, so do it early. + if mo == 0 && ml > 0 { + return fmt.Errorf("zero matchoff and matchlen (%d) > 0", ml) + } + if ml > maxMatchLen { + return fmt.Errorf("match len (%d) bigger than max allowed length", ml) + } + s.seqSize += ll + ml + if s.seqSize > maxBlockSize { + return fmt.Errorf("output bigger than max block size (%d)", maxBlockSize) + } + litRemain -= ll + if litRemain < 0 { + return fmt.Errorf("unexpected literal count, want %d bytes, but only %d is available", ll, litRemain+ll) + } + seqs[i] = seqVals{ + ll: ll, + ml: ml, + mo: mo, + } + if i == len(seqs)-1 { + // This is the last sequence, so we shouldn't update state. + break + } + + // Manually inlined, ~ 5-20% faster + // Update all 3 states at once. Approx 20% faster. + nBits := llState.nbBits() + mlState.nbBits() + ofState.nbBits() + if nBits == 0 { + llState = llTable[llState.newState()&maxTableMask] + mlState = mlTable[mlState.newState()&maxTableMask] + ofState = ofTable[ofState.newState()&maxTableMask] + } else { + bits := br.get32BitsFast(nBits) + lowBits := uint16(bits >> ((ofState.nbBits() + mlState.nbBits()) & 31)) + llState = llTable[(llState.newState()+lowBits)&maxTableMask] + + lowBits = uint16(bits >> (ofState.nbBits() & 31)) + lowBits &= bitMask[mlState.nbBits()&15] + mlState = mlTable[(mlState.newState()+lowBits)&maxTableMask] + + lowBits = uint16(bits) & bitMask[ofState.nbBits()&15] + ofState = ofTable[(ofState.newState()+lowBits)&maxTableMask] + } + } + s.seqSize += litRemain + if s.seqSize > maxBlockSize { + return fmt.Errorf("output bigger than max block size (%d)", maxBlockSize) + } + err := br.close() + if err != nil { + printf("Closing sequences: %v, %+v\n", err, *br) + } + return err +} + +// executeSimple handles cases when a dictionary is not used. +func (s *sequenceDecs) executeSimple(seqs []seqVals, hist []byte) error { + // Ensure we have enough output size... + if len(s.out)+s.seqSize > cap(s.out) { + addBytes := s.seqSize + len(s.out) + s.out = append(s.out, make([]byte, addBytes)...) + s.out = s.out[:len(s.out)-addBytes] + } + + if debugDecoder { + printf("Execute %d seqs with literals: %d into %d bytes\n", len(seqs), len(s.literals), s.seqSize) + } + + var t = len(s.out) + out := s.out[:t+s.seqSize] + + for _, seq := range seqs { + // Add literals + copy(out[t:], s.literals[:seq.ll]) + t += seq.ll + s.literals = s.literals[seq.ll:] + + // Malformed input + if seq.mo > t+len(hist) || seq.mo > s.windowSize { + return fmt.Errorf("match offset (%d) bigger than current history (%d)", seq.mo, t+len(hist)) + } + + // Copy from history. + if v := seq.mo - t; v > 0 { + // v is the start position in history from end. + start := len(hist) - v + if seq.ml > v { + // Some goes into the current block. + // Copy remainder of history + copy(out[t:], hist[start:]) + t += v + seq.ml -= v + } else { + copy(out[t:], hist[start:start+seq.ml]) + t += seq.ml + continue + } + } + + // We must be in the current buffer now + if seq.ml > 0 { + start := t - seq.mo + if seq.ml <= t-start { + // No overlap + copy(out[t:], out[start:start+seq.ml]) + t += seq.ml + } else { + // Overlapping copy + // Extend destination slice and copy one byte at the time. + src := out[start : start+seq.ml] + dst := out[t:] + dst = dst[:len(src)] + t += len(src) + // Destination is the space we just added. + for i := range src { + dst[i] = src[i] + } + } + } + } + // Add final literals + copy(out[t:], s.literals) + if debugDecoder { + t += len(s.literals) + if t != len(out) { + panic(fmt.Errorf("length mismatch, want %d, got %d, ss: %d", len(out), t, s.seqSize)) + } + } + s.out = out + + return nil +} diff --git a/vendor/github.com/klauspost/compress/zstd/seqenc.go b/vendor/github.com/klauspost/compress/zstd/seqenc.go new file mode 100644 index 00000000..8014174a --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/seqenc.go @@ -0,0 +1,114 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +import "math/bits" + +type seqCoders struct { + llEnc, ofEnc, mlEnc *fseEncoder + llPrev, ofPrev, mlPrev *fseEncoder +} + +// swap coders with another (block). +func (s *seqCoders) swap(other *seqCoders) { + *s, *other = *other, *s +} + +// setPrev will update the previous encoders to the actually used ones +// and make sure a fresh one is in the main slot. +func (s *seqCoders) setPrev(ll, ml, of *fseEncoder) { + compareSwap := func(used *fseEncoder, current, prev **fseEncoder) { + // We used the new one, more current to history and reuse the previous history + if *current == used { + *prev, *current = *current, *prev + c := *current + p := *prev + c.reUsed = false + p.reUsed = true + return + } + if used == *prev { + return + } + // Ensure we cannot reuse by accident + prevEnc := *prev + prevEnc.symbolLen = 0 + } + compareSwap(ll, &s.llEnc, &s.llPrev) + compareSwap(ml, &s.mlEnc, &s.mlPrev) + compareSwap(of, &s.ofEnc, &s.ofPrev) +} + +func highBit(val uint32) (n uint32) { + return uint32(bits.Len32(val) - 1) +} + +var llCodeTable = [64]byte{0, 1, 2, 3, 4, 5, 6, 7, + 8, 9, 10, 11, 12, 13, 14, 15, + 16, 16, 17, 17, 18, 18, 19, 19, + 20, 20, 20, 20, 21, 21, 21, 21, + 22, 22, 22, 22, 22, 22, 22, 22, + 23, 23, 23, 23, 23, 23, 23, 23, + 24, 24, 24, 24, 24, 24, 24, 24, + 24, 24, 24, 24, 24, 24, 24, 24} + +// Up to 6 bits +const maxLLCode = 35 + +// llBitsTable translates from ll code to number of bits. +var llBitsTable = [maxLLCode + 1]byte{ + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 1, 1, 1, 1, 2, 2, 3, 3, + 4, 6, 7, 8, 9, 10, 11, 12, + 13, 14, 15, 16} + +// llCode returns the code that represents the literal length requested. +func llCode(litLength uint32) uint8 { + const llDeltaCode = 19 + if litLength <= 63 { + // Compiler insists on bounds check (Go 1.12) + return llCodeTable[litLength&63] + } + return uint8(highBit(litLength)) + llDeltaCode +} + +var mlCodeTable = [128]byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, + 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, + 32, 32, 33, 33, 34, 34, 35, 35, 36, 36, 36, 36, 37, 37, 37, 37, + 38, 38, 38, 38, 38, 38, 38, 38, 39, 39, 39, 39, 39, 39, 39, 39, + 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, + 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, + 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, + 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42} + +// Up to 6 bits +const maxMLCode = 52 + +// mlBitsTable translates from ml code to number of bits. +var mlBitsTable = [maxMLCode + 1]byte{ + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 1, 1, 1, 1, 2, 2, 3, 3, + 4, 4, 5, 7, 8, 9, 10, 11, + 12, 13, 14, 15, 16} + +// note : mlBase = matchLength - MINMATCH; +// because it's the format it's stored in seqStore->sequences +func mlCode(mlBase uint32) uint8 { + const mlDeltaCode = 36 + if mlBase <= 127 { + // Compiler insists on bounds check (Go 1.12) + return mlCodeTable[mlBase&127] + } + return uint8(highBit(mlBase)) + mlDeltaCode +} + +func ofCode(offset uint32) uint8 { + // A valid offset will always be > 0. + return uint8(bits.Len32(offset) - 1) +} diff --git a/vendor/github.com/klauspost/compress/zstd/snappy.go b/vendor/github.com/klauspost/compress/zstd/snappy.go new file mode 100644 index 00000000..9e1baad7 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/snappy.go @@ -0,0 +1,435 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +import ( + "encoding/binary" + "errors" + "hash/crc32" + "io" + + "github.com/klauspost/compress/huff0" + snappy "github.com/klauspost/compress/internal/snapref" +) + +const ( + snappyTagLiteral = 0x00 + snappyTagCopy1 = 0x01 + snappyTagCopy2 = 0x02 + snappyTagCopy4 = 0x03 +) + +const ( + snappyChecksumSize = 4 + snappyMagicBody = "sNaPpY" + + // snappyMaxBlockSize is the maximum size of the input to encodeBlock. It is not + // part of the wire format per se, but some parts of the encoder assume + // that an offset fits into a uint16. + // + // Also, for the framing format (Writer type instead of Encode function), + // https://github.com/google/snappy/blob/master/framing_format.txt says + // that "the uncompressed data in a chunk must be no longer than 65536 + // bytes". + snappyMaxBlockSize = 65536 + + // snappyMaxEncodedLenOfMaxBlockSize equals MaxEncodedLen(snappyMaxBlockSize), but is + // hard coded to be a const instead of a variable, so that obufLen can also + // be a const. Their equivalence is confirmed by + // TestMaxEncodedLenOfMaxBlockSize. + snappyMaxEncodedLenOfMaxBlockSize = 76490 +) + +const ( + chunkTypeCompressedData = 0x00 + chunkTypeUncompressedData = 0x01 + chunkTypePadding = 0xfe + chunkTypeStreamIdentifier = 0xff +) + +var ( + // ErrSnappyCorrupt reports that the input is invalid. + ErrSnappyCorrupt = errors.New("snappy: corrupt input") + // ErrSnappyTooLarge reports that the uncompressed length is too large. + ErrSnappyTooLarge = errors.New("snappy: decoded block is too large") + // ErrSnappyUnsupported reports that the input isn't supported. + ErrSnappyUnsupported = errors.New("snappy: unsupported input") + + errUnsupportedLiteralLength = errors.New("snappy: unsupported literal length") +) + +// SnappyConverter can read SnappyConverter-compressed streams and convert them to zstd. +// Conversion is done by converting the stream directly from Snappy without intermediate +// full decoding. +// Therefore the compression ratio is much less than what can be done by a full decompression +// and compression, and a faulty Snappy stream may lead to a faulty Zstandard stream without +// any errors being generated. +// No CRC value is being generated and not all CRC values of the Snappy stream are checked. +// However, it provides really fast recompression of Snappy streams. +// The converter can be reused to avoid allocations, even after errors. +type SnappyConverter struct { + r io.Reader + err error + buf []byte + block *blockEnc +} + +// Convert the Snappy stream supplied in 'in' and write the zStandard stream to 'w'. +// If any error is detected on the Snappy stream it is returned. +// The number of bytes written is returned. +func (r *SnappyConverter) Convert(in io.Reader, w io.Writer) (int64, error) { + initPredefined() + r.err = nil + r.r = in + if r.block == nil { + r.block = &blockEnc{} + r.block.init() + } + r.block.initNewEncode() + if len(r.buf) != snappyMaxEncodedLenOfMaxBlockSize+snappyChecksumSize { + r.buf = make([]byte, snappyMaxEncodedLenOfMaxBlockSize+snappyChecksumSize) + } + r.block.litEnc.Reuse = huff0.ReusePolicyNone + var written int64 + var readHeader bool + { + var header []byte + var n int + header, r.err = frameHeader{WindowSize: snappyMaxBlockSize}.appendTo(r.buf[:0]) + + n, r.err = w.Write(header) + if r.err != nil { + return written, r.err + } + written += int64(n) + } + + for { + if !r.readFull(r.buf[:4], true) { + // Add empty last block + r.block.reset(nil) + r.block.last = true + err := r.block.encodeLits(r.block.literals, false) + if err != nil { + return written, err + } + n, err := w.Write(r.block.output) + if err != nil { + return written, err + } + written += int64(n) + + return written, r.err + } + chunkType := r.buf[0] + if !readHeader { + if chunkType != chunkTypeStreamIdentifier { + println("chunkType != chunkTypeStreamIdentifier", chunkType) + r.err = ErrSnappyCorrupt + return written, r.err + } + readHeader = true + } + chunkLen := int(r.buf[1]) | int(r.buf[2])<<8 | int(r.buf[3])<<16 + if chunkLen > len(r.buf) { + println("chunkLen > len(r.buf)", chunkType) + r.err = ErrSnappyUnsupported + return written, r.err + } + + // The chunk types are specified at + // https://github.com/google/snappy/blob/master/framing_format.txt + switch chunkType { + case chunkTypeCompressedData: + // Section 4.2. Compressed data (chunk type 0x00). + if chunkLen < snappyChecksumSize { + println("chunkLen < snappyChecksumSize", chunkLen, snappyChecksumSize) + r.err = ErrSnappyCorrupt + return written, r.err + } + buf := r.buf[:chunkLen] + if !r.readFull(buf, false) { + return written, r.err + } + //checksum := uint32(buf[0]) | uint32(buf[1])<<8 | uint32(buf[2])<<16 | uint32(buf[3])<<24 + buf = buf[snappyChecksumSize:] + + n, hdr, err := snappyDecodedLen(buf) + if err != nil { + r.err = err + return written, r.err + } + buf = buf[hdr:] + if n > snappyMaxBlockSize { + println("n > snappyMaxBlockSize", n, snappyMaxBlockSize) + r.err = ErrSnappyCorrupt + return written, r.err + } + r.block.reset(nil) + r.block.pushOffsets() + if err := decodeSnappy(r.block, buf); err != nil { + r.err = err + return written, r.err + } + if r.block.size+r.block.extraLits != n { + printf("invalid size, want %d, got %d\n", n, r.block.size+r.block.extraLits) + r.err = ErrSnappyCorrupt + return written, r.err + } + err = r.block.encode(nil, false, false) + switch err { + case errIncompressible: + r.block.popOffsets() + r.block.reset(nil) + r.block.literals, err = snappy.Decode(r.block.literals[:n], r.buf[snappyChecksumSize:chunkLen]) + if err != nil { + return written, err + } + err = r.block.encodeLits(r.block.literals, false) + if err != nil { + return written, err + } + case nil: + default: + return written, err + } + + n, r.err = w.Write(r.block.output) + if r.err != nil { + return written, err + } + written += int64(n) + continue + case chunkTypeUncompressedData: + if debugEncoder { + println("Uncompressed, chunklen", chunkLen) + } + // Section 4.3. Uncompressed data (chunk type 0x01). + if chunkLen < snappyChecksumSize { + println("chunkLen < snappyChecksumSize", chunkLen, snappyChecksumSize) + r.err = ErrSnappyCorrupt + return written, r.err + } + r.block.reset(nil) + buf := r.buf[:snappyChecksumSize] + if !r.readFull(buf, false) { + return written, r.err + } + checksum := uint32(buf[0]) | uint32(buf[1])<<8 | uint32(buf[2])<<16 | uint32(buf[3])<<24 + // Read directly into r.decoded instead of via r.buf. + n := chunkLen - snappyChecksumSize + if n > snappyMaxBlockSize { + println("n > snappyMaxBlockSize", n, snappyMaxBlockSize) + r.err = ErrSnappyCorrupt + return written, r.err + } + r.block.literals = r.block.literals[:n] + if !r.readFull(r.block.literals, false) { + return written, r.err + } + if snappyCRC(r.block.literals) != checksum { + println("literals crc mismatch") + r.err = ErrSnappyCorrupt + return written, r.err + } + err := r.block.encodeLits(r.block.literals, false) + if err != nil { + return written, err + } + n, r.err = w.Write(r.block.output) + if r.err != nil { + return written, err + } + written += int64(n) + continue + + case chunkTypeStreamIdentifier: + if debugEncoder { + println("stream id", chunkLen, len(snappyMagicBody)) + } + // Section 4.1. Stream identifier (chunk type 0xff). + if chunkLen != len(snappyMagicBody) { + println("chunkLen != len(snappyMagicBody)", chunkLen, len(snappyMagicBody)) + r.err = ErrSnappyCorrupt + return written, r.err + } + if !r.readFull(r.buf[:len(snappyMagicBody)], false) { + return written, r.err + } + for i := 0; i < len(snappyMagicBody); i++ { + if r.buf[i] != snappyMagicBody[i] { + println("r.buf[i] != snappyMagicBody[i]", r.buf[i], snappyMagicBody[i], i) + r.err = ErrSnappyCorrupt + return written, r.err + } + } + continue + } + + if chunkType <= 0x7f { + // Section 4.5. Reserved unskippable chunks (chunk types 0x02-0x7f). + println("chunkType <= 0x7f") + r.err = ErrSnappyUnsupported + return written, r.err + } + // Section 4.4 Padding (chunk type 0xfe). + // Section 4.6. Reserved skippable chunks (chunk types 0x80-0xfd). + if !r.readFull(r.buf[:chunkLen], false) { + return written, r.err + } + } +} + +// decodeSnappy writes the decoding of src to dst. It assumes that the varint-encoded +// length of the decompressed bytes has already been read. +func decodeSnappy(blk *blockEnc, src []byte) error { + //decodeRef(make([]byte, snappyMaxBlockSize), src) + var s, length int + lits := blk.extraLits + var offset uint32 + for s < len(src) { + switch src[s] & 0x03 { + case snappyTagLiteral: + x := uint32(src[s] >> 2) + switch { + case x < 60: + s++ + case x == 60: + s += 2 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + println("uint(s) > uint(len(src)", s, src) + return ErrSnappyCorrupt + } + x = uint32(src[s-1]) + case x == 61: + s += 3 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + println("uint(s) > uint(len(src)", s, src) + return ErrSnappyCorrupt + } + x = uint32(src[s-2]) | uint32(src[s-1])<<8 + case x == 62: + s += 4 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + println("uint(s) > uint(len(src)", s, src) + return ErrSnappyCorrupt + } + x = uint32(src[s-3]) | uint32(src[s-2])<<8 | uint32(src[s-1])<<16 + case x == 63: + s += 5 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + println("uint(s) > uint(len(src)", s, src) + return ErrSnappyCorrupt + } + x = uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24 + } + if x > snappyMaxBlockSize { + println("x > snappyMaxBlockSize", x, snappyMaxBlockSize) + return ErrSnappyCorrupt + } + length = int(x) + 1 + if length <= 0 { + println("length <= 0 ", length) + + return errUnsupportedLiteralLength + } + //if length > snappyMaxBlockSize-d || uint32(length) > len(src)-s { + // return ErrSnappyCorrupt + //} + + blk.literals = append(blk.literals, src[s:s+length]...) + //println(length, "litLen") + lits += length + s += length + continue + + case snappyTagCopy1: + s += 2 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + println("uint(s) > uint(len(src)", s, len(src)) + return ErrSnappyCorrupt + } + length = 4 + int(src[s-2])>>2&0x7 + offset = uint32(src[s-2])&0xe0<<3 | uint32(src[s-1]) + + case snappyTagCopy2: + s += 3 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + println("uint(s) > uint(len(src)", s, len(src)) + return ErrSnappyCorrupt + } + length = 1 + int(src[s-3])>>2 + offset = uint32(src[s-2]) | uint32(src[s-1])<<8 + + case snappyTagCopy4: + s += 5 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + println("uint(s) > uint(len(src)", s, len(src)) + return ErrSnappyCorrupt + } + length = 1 + int(src[s-5])>>2 + offset = uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24 + } + + if offset <= 0 || blk.size+lits < int(offset) /*|| length > len(blk)-d */ { + println("offset <= 0 || blk.size+lits < int(offset)", offset, blk.size+lits, int(offset), blk.size, lits) + + return ErrSnappyCorrupt + } + + // Check if offset is one of the recent offsets. + // Adjusts the output offset accordingly. + // Gives a tiny bit of compression, typically around 1%. + if false { + offset = blk.matchOffset(offset, uint32(lits)) + } else { + offset += 3 + } + + blk.sequences = append(blk.sequences, seq{ + litLen: uint32(lits), + offset: offset, + matchLen: uint32(length) - zstdMinMatch, + }) + blk.size += length + lits + lits = 0 + } + blk.extraLits = lits + return nil +} + +func (r *SnappyConverter) readFull(p []byte, allowEOF bool) (ok bool) { + if _, r.err = io.ReadFull(r.r, p); r.err != nil { + if r.err == io.ErrUnexpectedEOF || (r.err == io.EOF && !allowEOF) { + r.err = ErrSnappyCorrupt + } + return false + } + return true +} + +var crcTable = crc32.MakeTable(crc32.Castagnoli) + +// crc implements the checksum specified in section 3 of +// https://github.com/google/snappy/blob/master/framing_format.txt +func snappyCRC(b []byte) uint32 { + c := crc32.Update(0, crcTable, b) + return c>>15 | c<<17 + 0xa282ead8 +} + +// snappyDecodedLen returns the length of the decoded block and the number of bytes +// that the length header occupied. +func snappyDecodedLen(src []byte) (blockLen, headerLen int, err error) { + v, n := binary.Uvarint(src) + if n <= 0 || v > 0xffffffff { + return 0, 0, ErrSnappyCorrupt + } + + const wordSize = 32 << (^uint(0) >> 32 & 1) + if wordSize == 32 && v > 0x7fffffff { + return 0, 0, ErrSnappyTooLarge + } + return int(v), n, nil +} diff --git a/vendor/github.com/klauspost/compress/zstd/zip.go b/vendor/github.com/klauspost/compress/zstd/zip.go new file mode 100644 index 00000000..29c15c8c --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/zip.go @@ -0,0 +1,141 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. + +package zstd + +import ( + "errors" + "io" + "sync" +) + +// ZipMethodWinZip is the method for Zstandard compressed data inside Zip files for WinZip. +// See https://www.winzip.com/win/en/comp_info.html +const ZipMethodWinZip = 93 + +// ZipMethodPKWare is the original method number used by PKWARE to indicate Zstandard compression. +// Deprecated: This has been deprecated by PKWARE, use ZipMethodWinZip instead for compression. +// See https://pkware.cachefly.net/webdocs/APPNOTE/APPNOTE-6.3.9.TXT +const ZipMethodPKWare = 20 + +// zipReaderPool is the default reader pool. +var zipReaderPool = sync.Pool{New: func() interface{} { + z, err := NewReader(nil, WithDecoderLowmem(true), WithDecoderMaxWindow(128<<20), WithDecoderConcurrency(1)) + if err != nil { + panic(err) + } + return z +}} + +// newZipReader creates a pooled zip decompressor. +func newZipReader(opts ...DOption) func(r io.Reader) io.ReadCloser { + pool := &zipReaderPool + if len(opts) > 0 { + opts = append([]DOption{WithDecoderLowmem(true), WithDecoderMaxWindow(128 << 20)}, opts...) + // Force concurrency 1 + opts = append(opts, WithDecoderConcurrency(1)) + // Create our own pool + pool = &sync.Pool{} + } + return func(r io.Reader) io.ReadCloser { + dec, ok := pool.Get().(*Decoder) + if ok { + dec.Reset(r) + } else { + d, err := NewReader(r, opts...) + if err != nil { + panic(err) + } + dec = d + } + return &pooledZipReader{dec: dec, pool: pool} + } +} + +type pooledZipReader struct { + mu sync.Mutex // guards Close and Read + pool *sync.Pool + dec *Decoder +} + +func (r *pooledZipReader) Read(p []byte) (n int, err error) { + r.mu.Lock() + defer r.mu.Unlock() + if r.dec == nil { + return 0, errors.New("read after close or EOF") + } + dec, err := r.dec.Read(p) + if err == io.EOF { + r.dec.Reset(nil) + r.pool.Put(r.dec) + r.dec = nil + } + return dec, err +} + +func (r *pooledZipReader) Close() error { + r.mu.Lock() + defer r.mu.Unlock() + var err error + if r.dec != nil { + err = r.dec.Reset(nil) + r.pool.Put(r.dec) + r.dec = nil + } + return err +} + +type pooledZipWriter struct { + mu sync.Mutex // guards Close and Read + enc *Encoder + pool *sync.Pool +} + +func (w *pooledZipWriter) Write(p []byte) (n int, err error) { + w.mu.Lock() + defer w.mu.Unlock() + if w.enc == nil { + return 0, errors.New("Write after Close") + } + return w.enc.Write(p) +} + +func (w *pooledZipWriter) Close() error { + w.mu.Lock() + defer w.mu.Unlock() + var err error + if w.enc != nil { + err = w.enc.Close() + w.pool.Put(w.enc) + w.enc = nil + } + return err +} + +// ZipCompressor returns a compressor that can be registered with zip libraries. +// The provided encoder options will be used on all encodes. +func ZipCompressor(opts ...EOption) func(w io.Writer) (io.WriteCloser, error) { + var pool sync.Pool + return func(w io.Writer) (io.WriteCloser, error) { + enc, ok := pool.Get().(*Encoder) + if ok { + enc.Reset(w) + } else { + var err error + enc, err = NewWriter(w, opts...) + if err != nil { + return nil, err + } + } + return &pooledZipWriter{enc: enc, pool: &pool}, nil + } +} + +// ZipDecompressor returns a decompressor that can be registered with zip libraries. +// See ZipCompressor for example. +// Options can be specified. WithDecoderConcurrency(1) is forced, +// and by default a 128MB maximum decompression window is specified. +// The window size can be overridden if required. +func ZipDecompressor(opts ...DOption) func(r io.Reader) io.ReadCloser { + return newZipReader(opts...) +} diff --git a/vendor/github.com/klauspost/compress/zstd/zstd.go b/vendor/github.com/klauspost/compress/zstd/zstd.go new file mode 100644 index 00000000..3eb3f1c8 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/zstd.go @@ -0,0 +1,152 @@ +// Package zstd provides decompression of zstandard files. +// +// For advanced usage and examples, go to the README: https://github.com/klauspost/compress/tree/master/zstd#zstd +package zstd + +import ( + "bytes" + "encoding/binary" + "errors" + "log" + "math" + "math/bits" +) + +// enable debug printing +const debug = false + +// enable encoding debug printing +const debugEncoder = debug + +// enable decoding debug printing +const debugDecoder = debug + +// Enable extra assertions. +const debugAsserts = debug || false + +// print sequence details +const debugSequences = false + +// print detailed matching information +const debugMatches = false + +// force encoder to use predefined tables. +const forcePreDef = false + +// zstdMinMatch is the minimum zstd match length. +const zstdMinMatch = 3 + +// Reset the buffer offset when reaching this. +const bufferReset = math.MaxInt32 - MaxWindowSize + +// fcsUnknown is used for unknown frame content size. +const fcsUnknown = math.MaxUint64 + +var ( + // ErrReservedBlockType is returned when a reserved block type is found. + // Typically this indicates wrong or corrupted input. + ErrReservedBlockType = errors.New("invalid input: reserved block type encountered") + + // ErrCompressedSizeTooBig is returned when a block is bigger than allowed. + // Typically this indicates wrong or corrupted input. + ErrCompressedSizeTooBig = errors.New("invalid input: compressed size too big") + + // ErrBlockTooSmall is returned when a block is too small to be decoded. + // Typically returned on invalid input. + ErrBlockTooSmall = errors.New("block too small") + + // ErrUnexpectedBlockSize is returned when a block has unexpected size. + // Typically returned on invalid input. + ErrUnexpectedBlockSize = errors.New("unexpected block size") + + // ErrMagicMismatch is returned when a "magic" number isn't what is expected. + // Typically this indicates wrong or corrupted input. + ErrMagicMismatch = errors.New("invalid input: magic number mismatch") + + // ErrWindowSizeExceeded is returned when a reference exceeds the valid window size. + // Typically this indicates wrong or corrupted input. + ErrWindowSizeExceeded = errors.New("window size exceeded") + + // ErrWindowSizeTooSmall is returned when no window size is specified. + // Typically this indicates wrong or corrupted input. + ErrWindowSizeTooSmall = errors.New("invalid input: window size was too small") + + // ErrDecoderSizeExceeded is returned if decompressed size exceeds the configured limit. + ErrDecoderSizeExceeded = errors.New("decompressed size exceeds configured limit") + + // ErrUnknownDictionary is returned if the dictionary ID is unknown. + // For the time being dictionaries are not supported. + ErrUnknownDictionary = errors.New("unknown dictionary") + + // ErrFrameSizeExceeded is returned if the stated frame size is exceeded. + // This is only returned if SingleSegment is specified on the frame. + ErrFrameSizeExceeded = errors.New("frame size exceeded") + + // ErrFrameSizeMismatch is returned if the stated frame size does not match the expected size. + // This is only returned if SingleSegment is specified on the frame. + ErrFrameSizeMismatch = errors.New("frame size does not match size on stream") + + // ErrCRCMismatch is returned if CRC mismatches. + ErrCRCMismatch = errors.New("CRC check failed") + + // ErrDecoderClosed will be returned if the Decoder was used after + // Close has been called. + ErrDecoderClosed = errors.New("decoder used after Close") + + // ErrDecoderNilInput is returned when a nil Reader was provided + // and an operation other than Reset/DecodeAll/Close was attempted. + ErrDecoderNilInput = errors.New("nil input provided as reader") +) + +func println(a ...interface{}) { + if debug || debugDecoder || debugEncoder { + log.Println(a...) + } +} + +func printf(format string, a ...interface{}) { + if debug || debugDecoder || debugEncoder { + log.Printf(format, a...) + } +} + +// matchLen returns the maximum length. +// a must be the shortest of the two. +// The function also returns whether all bytes matched. +func matchLen(a, b []byte) int { + b = b[:len(a)] + for i := 0; i < len(a)-7; i += 8 { + if diff := load64(a, i) ^ load64(b, i); diff != 0 { + return i + (bits.TrailingZeros64(diff) >> 3) + } + } + + checked := (len(a) >> 3) << 3 + a = a[checked:] + b = b[checked:] + for i := range a { + if a[i] != b[i] { + return i + checked + } + } + return len(a) + checked +} + +func load3232(b []byte, i int32) uint32 { + return binary.LittleEndian.Uint32(b[i:]) +} + +func load6432(b []byte, i int32) uint64 { + return binary.LittleEndian.Uint64(b[i:]) +} + +func load64(b []byte, i int) uint64 { + return binary.LittleEndian.Uint64(b[i:]) +} + +type byter interface { + Bytes() []byte + Len() int +} + +var _ byter = &bytes.Buffer{} diff --git a/vendor/github.com/mattn/go-isatty/isatty_bsd.go b/vendor/github.com/mattn/go-isatty/isatty_bsd.go index 39bbcf00..d569c0c9 100644 --- a/vendor/github.com/mattn/go-isatty/isatty_bsd.go +++ b/vendor/github.com/mattn/go-isatty/isatty_bsd.go @@ -1,5 +1,5 @@ -//go:build (darwin || freebsd || openbsd || netbsd || dragonfly) && !appengine -// +build darwin freebsd openbsd netbsd dragonfly +//go:build (darwin || freebsd || openbsd || netbsd || dragonfly || hurd) && !appengine +// +build darwin freebsd openbsd netbsd dragonfly hurd // +build !appengine package isatty diff --git a/vendor/github.com/ishidawataru/sctp/LICENSE b/vendor/github.com/moby/patternmatcher/LICENSE similarity index 93% rename from vendor/github.com/ishidawataru/sctp/LICENSE rename to vendor/github.com/moby/patternmatcher/LICENSE index 8dada3ed..6d8d58fb 100644 --- a/vendor/github.com/ishidawataru/sctp/LICENSE +++ b/vendor/github.com/moby/patternmatcher/LICENSE @@ -1,6 +1,7 @@ + Apache License Version 2.0, January 2004 - http://www.apache.org/licenses/ + https://www.apache.org/licenses/ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION @@ -175,24 +176,13 @@ END OF TERMS AND CONDITIONS - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "{}" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright {yyyy} {name of copyright owner} + Copyright 2013-2018 Docker, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + https://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, diff --git a/vendor/github.com/moby/patternmatcher/NOTICE b/vendor/github.com/moby/patternmatcher/NOTICE new file mode 100644 index 00000000..e5154640 --- /dev/null +++ b/vendor/github.com/moby/patternmatcher/NOTICE @@ -0,0 +1,16 @@ +Docker +Copyright 2012-2017 Docker, Inc. + +This product includes software developed at Docker, Inc. (https://www.docker.com). + +The following is courtesy of our legal counsel: + + +Use and transfer of Docker may be subject to certain restrictions by the +United States and other governments. +It is your responsibility to ensure that your use and/or transfer does not +violate applicable laws. + +For more information, please see https://www.bis.doc.gov + +See also https://www.apache.org/dev/crypto.html and/or seek legal counsel. diff --git a/vendor/github.com/moby/patternmatcher/patternmatcher.go b/vendor/github.com/moby/patternmatcher/patternmatcher.go new file mode 100644 index 00000000..37a1a59a --- /dev/null +++ b/vendor/github.com/moby/patternmatcher/patternmatcher.go @@ -0,0 +1,474 @@ +package patternmatcher + +import ( + "errors" + "os" + "path/filepath" + "regexp" + "strings" + "text/scanner" + "unicode/utf8" +) + +// escapeBytes is a bitmap used to check whether a character should be escaped when creating the regex. +var escapeBytes [8]byte + +// shouldEscape reports whether a rune should be escaped as part of the regex. +// +// This only includes characters that require escaping in regex but are also NOT valid filepath pattern characters. +// Additionally, '\' is not excluded because there is specific logic to properly handle this, as it's a path separator +// on Windows. +// +// Adapted from regexp::QuoteMeta in go stdlib. +// See https://cs.opensource.google/go/go/+/refs/tags/go1.17.2:src/regexp/regexp.go;l=703-715;drc=refs%2Ftags%2Fgo1.17.2 +func shouldEscape(b rune) bool { + return b < utf8.RuneSelf && escapeBytes[b%8]&(1<<(b/8)) != 0 +} + +func init() { + for _, b := range []byte(`.+()|{}$`) { + escapeBytes[b%8] |= 1 << (b / 8) + } +} + +// PatternMatcher allows checking paths against a list of patterns +type PatternMatcher struct { + patterns []*Pattern + exclusions bool +} + +// New creates a new matcher object for specific patterns that can +// be used later to match against patterns against paths +func New(patterns []string) (*PatternMatcher, error) { + pm := &PatternMatcher{ + patterns: make([]*Pattern, 0, len(patterns)), + } + for _, p := range patterns { + // Eliminate leading and trailing whitespace. + p = strings.TrimSpace(p) + if p == "" { + continue + } + p = filepath.Clean(p) + newp := &Pattern{} + if p[0] == '!' { + if len(p) == 1 { + return nil, errors.New("illegal exclusion pattern: \"!\"") + } + newp.exclusion = true + p = p[1:] + pm.exclusions = true + } + // Do some syntax checking on the pattern. + // filepath's Match() has some really weird rules that are inconsistent + // so instead of trying to dup their logic, just call Match() for its + // error state and if there is an error in the pattern return it. + // If this becomes an issue we can remove this since its really only + // needed in the error (syntax) case - which isn't really critical. + if _, err := filepath.Match(p, "."); err != nil { + return nil, err + } + newp.cleanedPattern = p + newp.dirs = strings.Split(p, string(os.PathSeparator)) + pm.patterns = append(pm.patterns, newp) + } + return pm, nil +} + +// Matches returns true if "file" matches any of the patterns +// and isn't excluded by any of the subsequent patterns. +// +// The "file" argument should be a slash-delimited path. +// +// Matches is not safe to call concurrently. +// +// Deprecated: This implementation is buggy (it only checks a single parent dir +// against the pattern) and will be removed soon. Use either +// MatchesOrParentMatches or MatchesUsingParentResults instead. +func (pm *PatternMatcher) Matches(file string) (bool, error) { + matched := false + file = filepath.FromSlash(file) + parentPath := filepath.Dir(file) + parentPathDirs := strings.Split(parentPath, string(os.PathSeparator)) + + for _, pattern := range pm.patterns { + // Skip evaluation if this is an inclusion and the filename + // already matched the pattern, or it's an exclusion and it has + // not matched the pattern yet. + if pattern.exclusion != matched { + continue + } + + match, err := pattern.match(file) + if err != nil { + return false, err + } + + if !match && parentPath != "." { + // Check to see if the pattern matches one of our parent dirs. + if len(pattern.dirs) <= len(parentPathDirs) { + match, _ = pattern.match(strings.Join(parentPathDirs[:len(pattern.dirs)], string(os.PathSeparator))) + } + } + + if match { + matched = !pattern.exclusion + } + } + + return matched, nil +} + +// MatchesOrParentMatches returns true if "file" matches any of the patterns +// and isn't excluded by any of the subsequent patterns. +// +// The "file" argument should be a slash-delimited path. +// +// Matches is not safe to call concurrently. +func (pm *PatternMatcher) MatchesOrParentMatches(file string) (bool, error) { + matched := false + file = filepath.FromSlash(file) + parentPath := filepath.Dir(file) + parentPathDirs := strings.Split(parentPath, string(os.PathSeparator)) + + for _, pattern := range pm.patterns { + // Skip evaluation if this is an inclusion and the filename + // already matched the pattern, or it's an exclusion and it has + // not matched the pattern yet. + if pattern.exclusion != matched { + continue + } + + match, err := pattern.match(file) + if err != nil { + return false, err + } + + if !match && parentPath != "." { + // Check to see if the pattern matches one of our parent dirs. + for i := range parentPathDirs { + match, _ = pattern.match(strings.Join(parentPathDirs[:i+1], string(os.PathSeparator))) + if match { + break + } + } + } + + if match { + matched = !pattern.exclusion + } + } + + return matched, nil +} + +// MatchesUsingParentResult returns true if "file" matches any of the patterns +// and isn't excluded by any of the subsequent patterns. The functionality is +// the same as Matches, but as an optimization, the caller keeps track of +// whether the parent directory matched. +// +// The "file" argument should be a slash-delimited path. +// +// MatchesUsingParentResult is not safe to call concurrently. +// +// Deprecated: this function does behave correctly in some cases (see +// https://github.com/docker/buildx/issues/850). +// +// Use MatchesUsingParentResults instead. +func (pm *PatternMatcher) MatchesUsingParentResult(file string, parentMatched bool) (bool, error) { + matched := parentMatched + file = filepath.FromSlash(file) + + for _, pattern := range pm.patterns { + // Skip evaluation if this is an inclusion and the filename + // already matched the pattern, or it's an exclusion and it has + // not matched the pattern yet. + if pattern.exclusion != matched { + continue + } + + match, err := pattern.match(file) + if err != nil { + return false, err + } + + if match { + matched = !pattern.exclusion + } + } + return matched, nil +} + +// MatchInfo tracks information about parent dir matches while traversing a +// filesystem. +type MatchInfo struct { + parentMatched []bool +} + +// MatchesUsingParentResults returns true if "file" matches any of the patterns +// and isn't excluded by any of the subsequent patterns. The functionality is +// the same as Matches, but as an optimization, the caller passes in +// intermediate results from matching the parent directory. +// +// The "file" argument should be a slash-delimited path. +// +// MatchesUsingParentResults is not safe to call concurrently. +func (pm *PatternMatcher) MatchesUsingParentResults(file string, parentMatchInfo MatchInfo) (bool, MatchInfo, error) { + parentMatched := parentMatchInfo.parentMatched + if len(parentMatched) != 0 && len(parentMatched) != len(pm.patterns) { + return false, MatchInfo{}, errors.New("wrong number of values in parentMatched") + } + + file = filepath.FromSlash(file) + matched := false + + matchInfo := MatchInfo{ + parentMatched: make([]bool, len(pm.patterns)), + } + for i, pattern := range pm.patterns { + match := false + // If the parent matched this pattern, we don't need to recheck. + if len(parentMatched) != 0 { + match = parentMatched[i] + } + + if !match { + // Skip evaluation if this is an inclusion and the filename + // already matched the pattern, or it's an exclusion and it has + // not matched the pattern yet. + if pattern.exclusion != matched { + continue + } + + var err error + match, err = pattern.match(file) + if err != nil { + return false, matchInfo, err + } + + // If the zero value of MatchInfo was passed in, we don't have + // any information about the parent dir's match results, and we + // apply the same logic as MatchesOrParentMatches. + if !match && len(parentMatched) == 0 { + if parentPath := filepath.Dir(file); parentPath != "." { + parentPathDirs := strings.Split(parentPath, string(os.PathSeparator)) + // Check to see if the pattern matches one of our parent dirs. + for i := range parentPathDirs { + match, _ = pattern.match(strings.Join(parentPathDirs[:i+1], string(os.PathSeparator))) + if match { + break + } + } + } + } + } + matchInfo.parentMatched[i] = match + + if match { + matched = !pattern.exclusion + } + } + return matched, matchInfo, nil +} + +// Exclusions returns true if any of the patterns define exclusions +func (pm *PatternMatcher) Exclusions() bool { + return pm.exclusions +} + +// Patterns returns array of active patterns +func (pm *PatternMatcher) Patterns() []*Pattern { + return pm.patterns +} + +// Pattern defines a single regexp used to filter file paths. +type Pattern struct { + matchType matchType + cleanedPattern string + dirs []string + regexp *regexp.Regexp + exclusion bool +} + +type matchType int + +const ( + unknownMatch matchType = iota + exactMatch + prefixMatch + suffixMatch + regexpMatch +) + +func (p *Pattern) String() string { + return p.cleanedPattern +} + +// Exclusion returns true if this pattern defines exclusion +func (p *Pattern) Exclusion() bool { + return p.exclusion +} + +func (p *Pattern) match(path string) (bool, error) { + if p.matchType == unknownMatch { + if err := p.compile(string(os.PathSeparator)); err != nil { + return false, filepath.ErrBadPattern + } + } + + switch p.matchType { + case exactMatch: + return path == p.cleanedPattern, nil + case prefixMatch: + // strip trailing ** + return strings.HasPrefix(path, p.cleanedPattern[:len(p.cleanedPattern)-2]), nil + case suffixMatch: + // strip leading ** + suffix := p.cleanedPattern[2:] + if strings.HasSuffix(path, suffix) { + return true, nil + } + // **/foo matches "foo" + return suffix[0] == os.PathSeparator && path == suffix[1:], nil + case regexpMatch: + return p.regexp.MatchString(path), nil + } + + return false, nil +} + +func (p *Pattern) compile(sl string) error { + regStr := "^" + pattern := p.cleanedPattern + // Go through the pattern and convert it to a regexp. + // We use a scanner so we can support utf-8 chars. + var scan scanner.Scanner + scan.Init(strings.NewReader(pattern)) + + escSL := sl + if sl == `\` { + escSL += `\` + } + + p.matchType = exactMatch + for i := 0; scan.Peek() != scanner.EOF; i++ { + ch := scan.Next() + + if ch == '*' { + if scan.Peek() == '*' { + // is some flavor of "**" + scan.Next() + + // Treat **/ as ** so eat the "/" + if string(scan.Peek()) == sl { + scan.Next() + } + + if scan.Peek() == scanner.EOF { + // is "**EOF" - to align with .gitignore just accept all + if p.matchType == exactMatch { + p.matchType = prefixMatch + } else { + regStr += ".*" + p.matchType = regexpMatch + } + } else { + // is "**" + // Note that this allows for any # of /'s (even 0) because + // the .* will eat everything, even /'s + regStr += "(.*" + escSL + ")?" + p.matchType = regexpMatch + } + + if i == 0 { + p.matchType = suffixMatch + } + } else { + // is "*" so map it to anything but "/" + regStr += "[^" + escSL + "]*" + p.matchType = regexpMatch + } + } else if ch == '?' { + // "?" is any char except "/" + regStr += "[^" + escSL + "]" + p.matchType = regexpMatch + } else if shouldEscape(ch) { + // Escape some regexp special chars that have no meaning + // in golang's filepath.Match + regStr += `\` + string(ch) + } else if ch == '\\' { + // escape next char. Note that a trailing \ in the pattern + // will be left alone (but need to escape it) + if sl == `\` { + // On windows map "\" to "\\", meaning an escaped backslash, + // and then just continue because filepath.Match on + // Windows doesn't allow escaping at all + regStr += escSL + continue + } + if scan.Peek() != scanner.EOF { + regStr += `\` + string(scan.Next()) + p.matchType = regexpMatch + } else { + regStr += `\` + } + } else if ch == '[' || ch == ']' { + regStr += string(ch) + p.matchType = regexpMatch + } else { + regStr += string(ch) + } + } + + if p.matchType != regexpMatch { + return nil + } + + regStr += "$" + + re, err := regexp.Compile(regStr) + if err != nil { + return err + } + + p.regexp = re + p.matchType = regexpMatch + return nil +} + +// Matches returns true if file matches any of the patterns +// and isn't excluded by any of the subsequent patterns. +// +// This implementation is buggy (it only checks a single parent dir against the +// pattern) and will be removed soon. Use MatchesOrParentMatches instead. +func Matches(file string, patterns []string) (bool, error) { + pm, err := New(patterns) + if err != nil { + return false, err + } + file = filepath.Clean(file) + + if file == "." { + // Don't let them exclude everything, kind of silly. + return false, nil + } + + return pm.Matches(file) +} + +// MatchesOrParentMatches returns true if file matches any of the patterns +// and isn't excluded by any of the subsequent patterns. +func MatchesOrParentMatches(file string, patterns []string) (bool, error) { + pm, err := New(patterns) + if err != nil { + return false, err + } + file = filepath.Clean(file) + + if file == "." { + // Don't let them exclude everything, kind of silly. + return false, nil + } + + return pm.MatchesOrParentMatches(file) +} diff --git a/vendor/go.opencensus.io/LICENSE b/vendor/github.com/moby/sys/sequential/LICENSE similarity index 99% rename from vendor/go.opencensus.io/LICENSE rename to vendor/github.com/moby/sys/sequential/LICENSE index 7a4a3ea2..d6456956 100644 --- a/vendor/go.opencensus.io/LICENSE +++ b/vendor/github.com/moby/sys/sequential/LICENSE @@ -199,4 +199,4 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and - limitations under the License. \ No newline at end of file + limitations under the License. diff --git a/vendor/github.com/moby/sys/sequential/doc.go b/vendor/github.com/moby/sys/sequential/doc.go new file mode 100644 index 00000000..af281750 --- /dev/null +++ b/vendor/github.com/moby/sys/sequential/doc.go @@ -0,0 +1,15 @@ +// Package sequential provides a set of functions for managing sequential +// files on Windows. +// +// The origin of these functions are the golang OS and windows packages, +// slightly modified to only cope with files, not directories due to the +// specific use case. +// +// The alteration is to allow a file on Windows to be opened with +// FILE_FLAG_SEQUENTIAL_SCAN (particular for docker load), to avoid eating +// the standby list, particularly when accessing large files such as layer.tar. +// +// For non-Windows platforms, the package provides wrappers for the equivalents +// in the os packages. They are passthrough on Unix platforms, and only relevant +// on Windows. +package sequential diff --git a/vendor/github.com/moby/sys/sequential/sequential_unix.go b/vendor/github.com/moby/sys/sequential/sequential_unix.go new file mode 100644 index 00000000..a3c7340e --- /dev/null +++ b/vendor/github.com/moby/sys/sequential/sequential_unix.go @@ -0,0 +1,45 @@ +//go:build !windows +// +build !windows + +package sequential + +import "os" + +// Create creates the named file with mode 0666 (before umask), truncating +// it if it already exists. If successful, methods on the returned +// File can be used for I/O; the associated file descriptor has mode +// O_RDWR. +// If there is an error, it will be of type *PathError. +func Create(name string) (*os.File, error) { + return os.Create(name) +} + +// Open opens the named file for reading. If successful, methods on +// the returned file can be used for reading; the associated file +// descriptor has mode O_RDONLY. +// If there is an error, it will be of type *PathError. +func Open(name string) (*os.File, error) { + return os.Open(name) +} + +// OpenFile is the generalized open call; most users will use Open +// or Create instead. It opens the named file with specified flag +// (O_RDONLY etc.) and perm, (0666 etc.) if applicable. If successful, +// methods on the returned File can be used for I/O. +// If there is an error, it will be of type *PathError. +func OpenFile(name string, flag int, perm os.FileMode) (*os.File, error) { + return os.OpenFile(name, flag, perm) +} + +// CreateTemp creates a new temporary file in the directory dir +// with a name beginning with prefix, opens the file for reading +// and writing, and returns the resulting *os.File. +// If dir is the empty string, TempFile uses the default directory +// for temporary files (see os.TempDir). +// Multiple programs calling TempFile simultaneously +// will not choose the same file. The caller can use f.Name() +// to find the pathname of the file. It is the caller's responsibility +// to remove the file when no longer needed. +func CreateTemp(dir, prefix string) (f *os.File, err error) { + return os.CreateTemp(dir, prefix) +} diff --git a/vendor/github.com/moby/sys/sequential/sequential_windows.go b/vendor/github.com/moby/sys/sequential/sequential_windows.go new file mode 100644 index 00000000..3f7f0d83 --- /dev/null +++ b/vendor/github.com/moby/sys/sequential/sequential_windows.go @@ -0,0 +1,161 @@ +package sequential + +import ( + "os" + "path/filepath" + "strconv" + "sync" + "syscall" + "time" + "unsafe" + + "golang.org/x/sys/windows" +) + +// Create creates the named file with mode 0666 (before umask), truncating +// it if it already exists. If successful, methods on the returned +// File can be used for I/O; the associated file descriptor has mode +// O_RDWR. +// If there is an error, it will be of type *PathError. +func Create(name string) (*os.File, error) { + return OpenFile(name, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0) +} + +// Open opens the named file for reading. If successful, methods on +// the returned file can be used for reading; the associated file +// descriptor has mode O_RDONLY. +// If there is an error, it will be of type *PathError. +func Open(name string) (*os.File, error) { + return OpenFile(name, os.O_RDONLY, 0) +} + +// OpenFile is the generalized open call; most users will use Open +// or Create instead. +// If there is an error, it will be of type *PathError. +func OpenFile(name string, flag int, _ os.FileMode) (*os.File, error) { + if name == "" { + return nil, &os.PathError{Op: "open", Path: name, Err: syscall.ENOENT} + } + r, err := openFileSequential(name, flag, 0) + if err == nil { + return r, nil + } + return nil, &os.PathError{Op: "open", Path: name, Err: err} +} + +func openFileSequential(name string, flag int, _ os.FileMode) (file *os.File, err error) { + r, e := openSequential(name, flag|windows.O_CLOEXEC, 0) + if e != nil { + return nil, e + } + return os.NewFile(uintptr(r), name), nil +} + +func makeInheritSa() *windows.SecurityAttributes { + var sa windows.SecurityAttributes + sa.Length = uint32(unsafe.Sizeof(sa)) + sa.InheritHandle = 1 + return &sa +} + +func openSequential(path string, mode int, _ uint32) (fd windows.Handle, err error) { + if len(path) == 0 { + return windows.InvalidHandle, windows.ERROR_FILE_NOT_FOUND + } + pathp, err := windows.UTF16PtrFromString(path) + if err != nil { + return windows.InvalidHandle, err + } + var access uint32 + switch mode & (windows.O_RDONLY | windows.O_WRONLY | windows.O_RDWR) { + case windows.O_RDONLY: + access = windows.GENERIC_READ + case windows.O_WRONLY: + access = windows.GENERIC_WRITE + case windows.O_RDWR: + access = windows.GENERIC_READ | windows.GENERIC_WRITE + } + if mode&windows.O_CREAT != 0 { + access |= windows.GENERIC_WRITE + } + if mode&windows.O_APPEND != 0 { + access &^= windows.GENERIC_WRITE + access |= windows.FILE_APPEND_DATA + } + sharemode := uint32(windows.FILE_SHARE_READ | windows.FILE_SHARE_WRITE) + var sa *windows.SecurityAttributes + if mode&windows.O_CLOEXEC == 0 { + sa = makeInheritSa() + } + var createmode uint32 + switch { + case mode&(windows.O_CREAT|windows.O_EXCL) == (windows.O_CREAT | windows.O_EXCL): + createmode = windows.CREATE_NEW + case mode&(windows.O_CREAT|windows.O_TRUNC) == (windows.O_CREAT | windows.O_TRUNC): + createmode = windows.CREATE_ALWAYS + case mode&windows.O_CREAT == windows.O_CREAT: + createmode = windows.OPEN_ALWAYS + case mode&windows.O_TRUNC == windows.O_TRUNC: + createmode = windows.TRUNCATE_EXISTING + default: + createmode = windows.OPEN_EXISTING + } + // Use FILE_FLAG_SEQUENTIAL_SCAN rather than FILE_ATTRIBUTE_NORMAL as implemented in golang. + // https://msdn.microsoft.com/en-us/library/windows/desktop/aa363858(v=vs.85).aspx + const fileFlagSequentialScan = 0x08000000 // FILE_FLAG_SEQUENTIAL_SCAN + h, e := windows.CreateFile(pathp, access, sharemode, sa, createmode, fileFlagSequentialScan, 0) + return h, e +} + +// Helpers for CreateTemp +var rand uint32 +var randmu sync.Mutex + +func reseed() uint32 { + return uint32(time.Now().UnixNano() + int64(os.Getpid())) +} + +func nextSuffix() string { + randmu.Lock() + r := rand + if r == 0 { + r = reseed() + } + r = r*1664525 + 1013904223 // constants from Numerical Recipes + rand = r + randmu.Unlock() + return strconv.Itoa(int(1e9 + r%1e9))[1:] +} + +// CreateTemp is a copy of os.CreateTemp, modified to use sequential +// file access. Below is the original comment from golang: +// TempFile creates a new temporary file in the directory dir +// with a name beginning with prefix, opens the file for reading +// and writing, and returns the resulting *os.File. +// If dir is the empty string, TempFile uses the default directory +// for temporary files (see os.TempDir). +// Multiple programs calling TempFile simultaneously +// will not choose the same file. The caller can use f.Name() +// to find the pathname of the file. It is the caller's responsibility +// to remove the file when no longer needed. +func CreateTemp(dir, prefix string) (f *os.File, err error) { + if dir == "" { + dir = os.TempDir() + } + + nconflict := 0 + for i := 0; i < 10000; i++ { + name := filepath.Join(dir, prefix+nextSuffix()) + f, err = OpenFile(name, os.O_RDWR|os.O_CREATE|os.O_EXCL, 0o600) + if os.IsExist(err) { + if nconflict++; nconflict > 10 { + randmu.Lock() + rand = reseed() + randmu.Unlock() + } + continue + } + break + } + return +} diff --git a/vendor/github.com/pierrec/lz4/.gitignore b/vendor/github.com/pierrec/lz4/.gitignore deleted file mode 100644 index 5e987350..00000000 --- a/vendor/github.com/pierrec/lz4/.gitignore +++ /dev/null @@ -1,34 +0,0 @@ -# Created by https://www.gitignore.io/api/macos - -### macOS ### -*.DS_Store -.AppleDouble -.LSOverride - -# Icon must end with two \r -Icon - - -# Thumbnails -._* - -# Files that might appear in the root of a volume -.DocumentRevisions-V100 -.fseventsd -.Spotlight-V100 -.TemporaryItems -.Trashes -.VolumeIcon.icns -.com.apple.timemachine.donotpresent - -# Directories potentially created on remote AFP share -.AppleDB -.AppleDesktop -Network Trash Folder -Temporary Items -.apdisk - -# End of https://www.gitignore.io/api/macos - -cmd/*/*exe -.idea \ No newline at end of file diff --git a/vendor/github.com/pierrec/lz4/.travis.yml b/vendor/github.com/pierrec/lz4/.travis.yml deleted file mode 100644 index fd6c6db7..00000000 --- a/vendor/github.com/pierrec/lz4/.travis.yml +++ /dev/null @@ -1,24 +0,0 @@ -language: go - -env: - - GO111MODULE=off - -go: - - 1.9.x - - 1.10.x - - 1.11.x - - 1.12.x - - master - -matrix: - fast_finish: true - allow_failures: - - go: master - -sudo: false - -script: - - go test -v -cpu=2 - - go test -v -cpu=2 -race - - go test -v -cpu=2 -tags noasm - - go test -v -cpu=2 -race -tags noasm diff --git a/vendor/github.com/pierrec/lz4/LICENSE b/vendor/github.com/pierrec/lz4/LICENSE deleted file mode 100644 index bd899d83..00000000 --- a/vendor/github.com/pierrec/lz4/LICENSE +++ /dev/null @@ -1,28 +0,0 @@ -Copyright (c) 2015, Pierre Curto -All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are met: - -* Redistributions of source code must retain the above copyright notice, this - list of conditions and the following disclaimer. - -* Redistributions in binary form must reproduce the above copyright notice, - this list of conditions and the following disclaimer in the documentation - and/or other materials provided with the distribution. - -* Neither the name of xxHash nor the names of its - contributors may be used to endorse or promote products derived from - this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE -FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL -DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR -SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER -CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, -OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - diff --git a/vendor/github.com/pierrec/lz4/README.md b/vendor/github.com/pierrec/lz4/README.md deleted file mode 100644 index 4ee388e8..00000000 --- a/vendor/github.com/pierrec/lz4/README.md +++ /dev/null @@ -1,90 +0,0 @@ -# lz4 : LZ4 compression in pure Go - -[![GoDoc](https://godoc.org/github.com/pierrec/lz4?status.svg)](https://godoc.org/github.com/pierrec/lz4) -[![Build Status](https://travis-ci.org/pierrec/lz4.svg?branch=master)](https://travis-ci.org/pierrec/lz4) -[![Go Report Card](https://goreportcard.com/badge/github.com/pierrec/lz4)](https://goreportcard.com/report/github.com/pierrec/lz4) -[![GitHub tag (latest SemVer)](https://img.shields.io/github/tag/pierrec/lz4.svg?style=social)](https://github.com/pierrec/lz4/tags) - -## Overview - -This package provides a streaming interface to [LZ4 data streams](http://fastcompression.blogspot.fr/2013/04/lz4-streaming-format-final.html) as well as low level compress and uncompress functions for LZ4 data blocks. -The implementation is based on the reference C [one](https://github.com/lz4/lz4). - -## Install - -Assuming you have the go toolchain installed: - -``` -go get github.com/pierrec/lz4 -``` - -There is a command line interface tool to compress and decompress LZ4 files. - -``` -go install github.com/pierrec/lz4/cmd/lz4c -``` - -Usage - -``` -Usage of lz4c: - -version - print the program version - -Subcommands: -Compress the given files or from stdin to stdout. -compress [arguments] [ ...] - -bc - enable block checksum - -l int - compression level (0=fastest) - -sc - disable stream checksum - -size string - block max size [64K,256K,1M,4M] (default "4M") - -Uncompress the given files or from stdin to stdout. -uncompress [arguments] [ ...] - -``` - - -## Example - -``` -// Compress and uncompress an input string. -s := "hello world" -r := strings.NewReader(s) - -// The pipe will uncompress the data from the writer. -pr, pw := io.Pipe() -zw := lz4.NewWriter(pw) -zr := lz4.NewReader(pr) - -go func() { - // Compress the input string. - _, _ = io.Copy(zw, r) - _ = zw.Close() // Make sure the writer is closed - _ = pw.Close() // Terminate the pipe -}() - -_, _ = io.Copy(os.Stdout, zr) - -// Output: -// hello world -``` - -## Contributing - -Contributions are very welcome for bug fixing, performance improvements...! - -- Open an issue with a proper description -- Send a pull request with appropriate test case(s) - -## Contributors - -Thanks to all [contributors](https://github.com/pierrec/lz4/graphs/contributors) so far! - -Special thanks to [@Zariel](https://github.com/Zariel) for his asm implementation of the decoder. - -Special thanks to [@klauspost](https://github.com/klauspost) for his work on optimizing the code. diff --git a/vendor/github.com/pierrec/lz4/block.go b/vendor/github.com/pierrec/lz4/block.go deleted file mode 100644 index 664d9be5..00000000 --- a/vendor/github.com/pierrec/lz4/block.go +++ /dev/null @@ -1,413 +0,0 @@ -package lz4 - -import ( - "encoding/binary" - "math/bits" - "sync" -) - -// blockHash hashes the lower 6 bytes into a value < htSize. -func blockHash(x uint64) uint32 { - const prime6bytes = 227718039650203 - return uint32(((x << (64 - 48)) * prime6bytes) >> (64 - hashLog)) -} - -// CompressBlockBound returns the maximum size of a given buffer of size n, when not compressible. -func CompressBlockBound(n int) int { - return n + n/255 + 16 -} - -// UncompressBlock uncompresses the source buffer into the destination one, -// and returns the uncompressed size. -// -// The destination buffer must be sized appropriately. -// -// An error is returned if the source data is invalid or the destination buffer is too small. -func UncompressBlock(src, dst []byte) (int, error) { - if len(src) == 0 { - return 0, nil - } - if di := decodeBlock(dst, src); di >= 0 { - return di, nil - } - return 0, ErrInvalidSourceShortBuffer -} - -// CompressBlock compresses the source buffer into the destination one. -// This is the fast version of LZ4 compression and also the default one. -// -// The argument hashTable is scratch space for a hash table used by the -// compressor. If provided, it should have length at least 1<<16. If it is -// shorter (or nil), CompressBlock allocates its own hash table. -// -// The size of the compressed data is returned. -// -// If the destination buffer size is lower than CompressBlockBound and -// the compressed size is 0 and no error, then the data is incompressible. -// -// An error is returned if the destination buffer is too small. -func CompressBlock(src, dst []byte, hashTable []int) (_ int, err error) { - defer recoverBlock(&err) - - // Return 0, nil only if the destination buffer size is < CompressBlockBound. - isNotCompressible := len(dst) < CompressBlockBound(len(src)) - - // adaptSkipLog sets how quickly the compressor begins skipping blocks when data is incompressible. - // This significantly speeds up incompressible data and usually has very small impact on compression. - // bytes to skip = 1 + (bytes since last match >> adaptSkipLog) - const adaptSkipLog = 7 - if len(hashTable) < htSize { - htIface := htPool.Get() - defer htPool.Put(htIface) - hashTable = (*(htIface).(*[htSize]int))[:] - } - // Prove to the compiler the table has at least htSize elements. - // The compiler can see that "uint32() >> hashShift" cannot be out of bounds. - hashTable = hashTable[:htSize] - - // si: Current position of the search. - // anchor: Position of the current literals. - var si, di, anchor int - sn := len(src) - mfLimit - if sn <= 0 { - goto lastLiterals - } - - // Fast scan strategy: the hash table only stores the last 4 bytes sequences. - for si < sn { - // Hash the next 6 bytes (sequence)... - match := binary.LittleEndian.Uint64(src[si:]) - h := blockHash(match) - h2 := blockHash(match >> 8) - - // We check a match at s, s+1 and s+2 and pick the first one we get. - // Checking 3 only requires us to load the source one. - ref := hashTable[h] - ref2 := hashTable[h2] - hashTable[h] = si - hashTable[h2] = si + 1 - offset := si - ref - - // If offset <= 0 we got an old entry in the hash table. - if offset <= 0 || offset >= winSize || // Out of window. - uint32(match) != binary.LittleEndian.Uint32(src[ref:]) { // Hash collision on different matches. - // No match. Start calculating another hash. - // The processor can usually do this out-of-order. - h = blockHash(match >> 16) - ref = hashTable[h] - - // Check the second match at si+1 - si += 1 - offset = si - ref2 - - if offset <= 0 || offset >= winSize || - uint32(match>>8) != binary.LittleEndian.Uint32(src[ref2:]) { - // No match. Check the third match at si+2 - si += 1 - offset = si - ref - hashTable[h] = si - - if offset <= 0 || offset >= winSize || - uint32(match>>16) != binary.LittleEndian.Uint32(src[ref:]) { - // Skip one extra byte (at si+3) before we check 3 matches again. - si += 2 + (si-anchor)>>adaptSkipLog - continue - } - } - } - - // Match found. - lLen := si - anchor // Literal length. - // We already matched 4 bytes. - mLen := 4 - - // Extend backwards if we can, reducing literals. - tOff := si - offset - 1 - for lLen > 0 && tOff >= 0 && src[si-1] == src[tOff] { - si-- - tOff-- - lLen-- - mLen++ - } - - // Add the match length, so we continue search at the end. - // Use mLen to store the offset base. - si, mLen = si+mLen, si+minMatch - - // Find the longest match by looking by batches of 8 bytes. - for si+8 < sn { - x := binary.LittleEndian.Uint64(src[si:]) ^ binary.LittleEndian.Uint64(src[si-offset:]) - if x == 0 { - si += 8 - } else { - // Stop is first non-zero byte. - si += bits.TrailingZeros64(x) >> 3 - break - } - } - - mLen = si - mLen - if mLen < 0xF { - dst[di] = byte(mLen) - } else { - dst[di] = 0xF - } - - // Encode literals length. - if lLen < 0xF { - dst[di] |= byte(lLen << 4) - } else { - dst[di] |= 0xF0 - di++ - l := lLen - 0xF - for ; l >= 0xFF; l -= 0xFF { - dst[di] = 0xFF - di++ - } - dst[di] = byte(l) - } - di++ - - // Literals. - copy(dst[di:di+lLen], src[anchor:anchor+lLen]) - di += lLen + 2 - anchor = si - - // Encode offset. - _ = dst[di] // Bound check elimination. - dst[di-2], dst[di-1] = byte(offset), byte(offset>>8) - - // Encode match length part 2. - if mLen >= 0xF { - for mLen -= 0xF; mLen >= 0xFF; mLen -= 0xFF { - dst[di] = 0xFF - di++ - } - dst[di] = byte(mLen) - di++ - } - // Check if we can load next values. - if si >= sn { - break - } - // Hash match end-2 - h = blockHash(binary.LittleEndian.Uint64(src[si-2:])) - hashTable[h] = si - 2 - } - -lastLiterals: - if isNotCompressible && anchor == 0 { - // Incompressible. - return 0, nil - } - - // Last literals. - lLen := len(src) - anchor - if lLen < 0xF { - dst[di] = byte(lLen << 4) - } else { - dst[di] = 0xF0 - di++ - for lLen -= 0xF; lLen >= 0xFF; lLen -= 0xFF { - dst[di] = 0xFF - di++ - } - dst[di] = byte(lLen) - } - di++ - - // Write the last literals. - if isNotCompressible && di >= anchor { - // Incompressible. - return 0, nil - } - di += copy(dst[di:di+len(src)-anchor], src[anchor:]) - return di, nil -} - -// Pool of hash tables for CompressBlock. -var htPool = sync.Pool{ - New: func() interface{} { - return new([htSize]int) - }, -} - -// blockHash hashes 4 bytes into a value < winSize. -func blockHashHC(x uint32) uint32 { - const hasher uint32 = 2654435761 // Knuth multiplicative hash. - return x * hasher >> (32 - winSizeLog) -} - -// CompressBlockHC compresses the source buffer src into the destination dst -// with max search depth (use 0 or negative value for no max). -// -// CompressBlockHC compression ratio is better than CompressBlock but it is also slower. -// -// The size of the compressed data is returned. -// -// If the destination buffer size is lower than CompressBlockBound and -// the compressed size is 0 and no error, then the data is incompressible. -// -// An error is returned if the destination buffer is too small. -func CompressBlockHC(src, dst []byte, depth int) (_ int, err error) { - defer recoverBlock(&err) - - // Return 0, nil only if the destination buffer size is < CompressBlockBound. - isNotCompressible := len(dst) < CompressBlockBound(len(src)) - - // adaptSkipLog sets how quickly the compressor begins skipping blocks when data is incompressible. - // This significantly speeds up incompressible data and usually has very small impact on compression. - // bytes to skip = 1 + (bytes since last match >> adaptSkipLog) - const adaptSkipLog = 7 - - var si, di, anchor int - - // hashTable: stores the last position found for a given hash - // chainTable: stores previous positions for a given hash - var hashTable, chainTable [winSize]int - - if depth <= 0 { - depth = winSize - } - - sn := len(src) - mfLimit - if sn <= 0 { - goto lastLiterals - } - - for si < sn { - // Hash the next 4 bytes (sequence). - match := binary.LittleEndian.Uint32(src[si:]) - h := blockHashHC(match) - - // Follow the chain until out of window and give the longest match. - mLen := 0 - offset := 0 - for next, try := hashTable[h], depth; try > 0 && next > 0 && si-next < winSize; next = chainTable[next&winMask] { - // The first (mLen==0) or next byte (mLen>=minMatch) at current match length - // must match to improve on the match length. - if src[next+mLen] != src[si+mLen] { - continue - } - ml := 0 - // Compare the current position with a previous with the same hash. - for ml < sn-si { - x := binary.LittleEndian.Uint64(src[next+ml:]) ^ binary.LittleEndian.Uint64(src[si+ml:]) - if x == 0 { - ml += 8 - } else { - // Stop is first non-zero byte. - ml += bits.TrailingZeros64(x) >> 3 - break - } - } - if ml < minMatch || ml <= mLen { - // Match too small (>adaptSkipLog - continue - } - - // Match found. - // Update hash/chain tables with overlapping bytes: - // si already hashed, add everything from si+1 up to the match length. - winStart := si + 1 - if ws := si + mLen - winSize; ws > winStart { - winStart = ws - } - for si, ml := winStart, si+mLen; si < ml; { - match >>= 8 - match |= uint32(src[si+3]) << 24 - h := blockHashHC(match) - chainTable[si&winMask] = hashTable[h] - hashTable[h] = si - si++ - } - - lLen := si - anchor - si += mLen - mLen -= minMatch // Match length does not include minMatch. - - if mLen < 0xF { - dst[di] = byte(mLen) - } else { - dst[di] = 0xF - } - - // Encode literals length. - if lLen < 0xF { - dst[di] |= byte(lLen << 4) - } else { - dst[di] |= 0xF0 - di++ - l := lLen - 0xF - for ; l >= 0xFF; l -= 0xFF { - dst[di] = 0xFF - di++ - } - dst[di] = byte(l) - } - di++ - - // Literals. - copy(dst[di:di+lLen], src[anchor:anchor+lLen]) - di += lLen - anchor = si - - // Encode offset. - di += 2 - dst[di-2], dst[di-1] = byte(offset), byte(offset>>8) - - // Encode match length part 2. - if mLen >= 0xF { - for mLen -= 0xF; mLen >= 0xFF; mLen -= 0xFF { - dst[di] = 0xFF - di++ - } - dst[di] = byte(mLen) - di++ - } - } - - if isNotCompressible && anchor == 0 { - // Incompressible. - return 0, nil - } - - // Last literals. -lastLiterals: - lLen := len(src) - anchor - if lLen < 0xF { - dst[di] = byte(lLen << 4) - } else { - dst[di] = 0xF0 - di++ - lLen -= 0xF - for ; lLen >= 0xFF; lLen -= 0xFF { - dst[di] = 0xFF - di++ - } - dst[di] = byte(lLen) - } - di++ - - // Write the last literals. - if isNotCompressible && di >= anchor { - // Incompressible. - return 0, nil - } - di += copy(dst[di:di+len(src)-anchor], src[anchor:]) - return di, nil -} diff --git a/vendor/github.com/pierrec/lz4/debug.go b/vendor/github.com/pierrec/lz4/debug.go deleted file mode 100644 index bc5e78d4..00000000 --- a/vendor/github.com/pierrec/lz4/debug.go +++ /dev/null @@ -1,23 +0,0 @@ -// +build lz4debug - -package lz4 - -import ( - "fmt" - "os" - "path/filepath" - "runtime" -) - -const debugFlag = true - -func debug(args ...interface{}) { - _, file, line, _ := runtime.Caller(1) - file = filepath.Base(file) - - f := fmt.Sprintf("LZ4: %s:%d %s", file, line, args[0]) - if f[len(f)-1] != '\n' { - f += "\n" - } - fmt.Fprintf(os.Stderr, f, args[1:]...) -} diff --git a/vendor/github.com/pierrec/lz4/debug_stub.go b/vendor/github.com/pierrec/lz4/debug_stub.go deleted file mode 100644 index 44211ad9..00000000 --- a/vendor/github.com/pierrec/lz4/debug_stub.go +++ /dev/null @@ -1,7 +0,0 @@ -// +build !lz4debug - -package lz4 - -const debugFlag = false - -func debug(args ...interface{}) {} diff --git a/vendor/github.com/pierrec/lz4/decode_amd64.go b/vendor/github.com/pierrec/lz4/decode_amd64.go deleted file mode 100644 index 43cc14fb..00000000 --- a/vendor/github.com/pierrec/lz4/decode_amd64.go +++ /dev/null @@ -1,8 +0,0 @@ -// +build !appengine -// +build gc -// +build !noasm - -package lz4 - -//go:noescape -func decodeBlock(dst, src []byte) int diff --git a/vendor/github.com/pierrec/lz4/decode_amd64.s b/vendor/github.com/pierrec/lz4/decode_amd64.s deleted file mode 100644 index 20fef397..00000000 --- a/vendor/github.com/pierrec/lz4/decode_amd64.s +++ /dev/null @@ -1,375 +0,0 @@ -// +build !appengine -// +build gc -// +build !noasm - -#include "textflag.h" - -// AX scratch -// BX scratch -// CX scratch -// DX token -// -// DI &dst -// SI &src -// R8 &dst + len(dst) -// R9 &src + len(src) -// R11 &dst -// R12 short output end -// R13 short input end -// func decodeBlock(dst, src []byte) int -// using 50 bytes of stack currently -TEXT ·decodeBlock(SB), NOSPLIT, $64-56 - MOVQ dst_base+0(FP), DI - MOVQ DI, R11 - MOVQ dst_len+8(FP), R8 - ADDQ DI, R8 - - MOVQ src_base+24(FP), SI - MOVQ src_len+32(FP), R9 - ADDQ SI, R9 - - // shortcut ends - // short output end - MOVQ R8, R12 - SUBQ $32, R12 - // short input end - MOVQ R9, R13 - SUBQ $16, R13 - -loop: - // for si < len(src) - CMPQ SI, R9 - JGE end - - // token := uint32(src[si]) - MOVBQZX (SI), DX - INCQ SI - - // lit_len = token >> 4 - // if lit_len > 0 - // CX = lit_len - MOVQ DX, CX - SHRQ $4, CX - - // if lit_len != 0xF - CMPQ CX, $0xF - JEQ lit_len_loop_pre - CMPQ DI, R12 - JGE lit_len_loop_pre - CMPQ SI, R13 - JGE lit_len_loop_pre - - // copy shortcut - - // A two-stage shortcut for the most common case: - // 1) If the literal length is 0..14, and there is enough space, - // enter the shortcut and copy 16 bytes on behalf of the literals - // (in the fast mode, only 8 bytes can be safely copied this way). - // 2) Further if the match length is 4..18, copy 18 bytes in a similar - // manner; but we ensure that there's enough space in the output for - // those 18 bytes earlier, upon entering the shortcut (in other words, - // there is a combined check for both stages). - - // copy literal - MOVOU (SI), X0 - MOVOU X0, (DI) - ADDQ CX, DI - ADDQ CX, SI - - MOVQ DX, CX - ANDQ $0xF, CX - - // The second stage: prepare for match copying, decode full info. - // If it doesn't work out, the info won't be wasted. - // offset := uint16(data[:2]) - MOVWQZX (SI), DX - ADDQ $2, SI - - MOVQ DI, AX - SUBQ DX, AX - CMPQ AX, DI - JGT err_short_buf - - // if we can't do the second stage then jump straight to read the - // match length, we already have the offset. - CMPQ CX, $0xF - JEQ match_len_loop_pre - CMPQ DX, $8 - JLT match_len_loop_pre - CMPQ AX, R11 - JLT err_short_buf - - // memcpy(op + 0, match + 0, 8); - MOVQ (AX), BX - MOVQ BX, (DI) - // memcpy(op + 8, match + 8, 8); - MOVQ 8(AX), BX - MOVQ BX, 8(DI) - // memcpy(op +16, match +16, 2); - MOVW 16(AX), BX - MOVW BX, 16(DI) - - ADDQ $4, DI // minmatch - ADDQ CX, DI - - // shortcut complete, load next token - JMP loop - -lit_len_loop_pre: - // if lit_len > 0 - CMPQ CX, $0 - JEQ offset - CMPQ CX, $0xF - JNE copy_literal - -lit_len_loop: - // for src[si] == 0xFF - CMPB (SI), $0xFF - JNE lit_len_finalise - - // bounds check src[si+1] - MOVQ SI, AX - ADDQ $1, AX - CMPQ AX, R9 - JGT err_short_buf - - // lit_len += 0xFF - ADDQ $0xFF, CX - INCQ SI - JMP lit_len_loop - -lit_len_finalise: - // lit_len += int(src[si]) - // si++ - MOVBQZX (SI), AX - ADDQ AX, CX - INCQ SI - -copy_literal: - // bounds check src and dst - MOVQ SI, AX - ADDQ CX, AX - CMPQ AX, R9 - JGT err_short_buf - - MOVQ DI, AX - ADDQ CX, AX - CMPQ AX, R8 - JGT err_short_buf - - // whats a good cut off to call memmove? - CMPQ CX, $16 - JGT memmove_lit - - // if len(dst[di:]) < 16 - MOVQ R8, AX - SUBQ DI, AX - CMPQ AX, $16 - JLT memmove_lit - - // if len(src[si:]) < 16 - MOVQ R9, AX - SUBQ SI, AX - CMPQ AX, $16 - JLT memmove_lit - - MOVOU (SI), X0 - MOVOU X0, (DI) - - JMP finish_lit_copy - -memmove_lit: - // memmove(to, from, len) - MOVQ DI, 0(SP) - MOVQ SI, 8(SP) - MOVQ CX, 16(SP) - // spill - MOVQ DI, 24(SP) - MOVQ SI, 32(SP) - MOVQ CX, 40(SP) // need len to inc SI, DI after - MOVB DX, 48(SP) - CALL runtime·memmove(SB) - - // restore registers - MOVQ 24(SP), DI - MOVQ 32(SP), SI - MOVQ 40(SP), CX - MOVB 48(SP), DX - - // recalc initial values - MOVQ dst_base+0(FP), R8 - MOVQ R8, R11 - ADDQ dst_len+8(FP), R8 - MOVQ src_base+24(FP), R9 - ADDQ src_len+32(FP), R9 - MOVQ R8, R12 - SUBQ $32, R12 - MOVQ R9, R13 - SUBQ $16, R13 - -finish_lit_copy: - ADDQ CX, SI - ADDQ CX, DI - - CMPQ SI, R9 - JGE end - -offset: - // CX := mLen - // free up DX to use for offset - MOVQ DX, CX - - MOVQ SI, AX - ADDQ $2, AX - CMPQ AX, R9 - JGT err_short_buf - - // offset - // DX := int(src[si]) | int(src[si+1])<<8 - MOVWQZX (SI), DX - ADDQ $2, SI - - // 0 offset is invalid - CMPQ DX, $0 - JEQ err_corrupt - - ANDB $0xF, CX - -match_len_loop_pre: - // if mlen != 0xF - CMPB CX, $0xF - JNE copy_match - -match_len_loop: - // for src[si] == 0xFF - // lit_len += 0xFF - CMPB (SI), $0xFF - JNE match_len_finalise - - // bounds check src[si+1] - MOVQ SI, AX - ADDQ $1, AX - CMPQ AX, R9 - JGT err_short_buf - - ADDQ $0xFF, CX - INCQ SI - JMP match_len_loop - -match_len_finalise: - // lit_len += int(src[si]) - // si++ - MOVBQZX (SI), AX - ADDQ AX, CX - INCQ SI - -copy_match: - // mLen += minMatch - ADDQ $4, CX - - // check we have match_len bytes left in dst - // di+match_len < len(dst) - MOVQ DI, AX - ADDQ CX, AX - CMPQ AX, R8 - JGT err_short_buf - - // DX = offset - // CX = match_len - // BX = &dst + (di - offset) - MOVQ DI, BX - SUBQ DX, BX - - // check BX is within dst - // if BX < &dst - CMPQ BX, R11 - JLT err_short_buf - - // if offset + match_len < di - MOVQ BX, AX - ADDQ CX, AX - CMPQ DI, AX - JGT copy_interior_match - - // AX := len(dst[:di]) - // MOVQ DI, AX - // SUBQ R11, AX - - // copy 16 bytes at a time - // if di-offset < 16 copy 16-(di-offset) bytes to di - // then do the remaining - -copy_match_loop: - // for match_len >= 0 - // dst[di] = dst[i] - // di++ - // i++ - MOVB (BX), AX - MOVB AX, (DI) - INCQ DI - INCQ BX - DECQ CX - - CMPQ CX, $0 - JGT copy_match_loop - - JMP loop - -copy_interior_match: - CMPQ CX, $16 - JGT memmove_match - - // if len(dst[di:]) < 16 - MOVQ R8, AX - SUBQ DI, AX - CMPQ AX, $16 - JLT memmove_match - - MOVOU (BX), X0 - MOVOU X0, (DI) - - ADDQ CX, DI - JMP loop - -memmove_match: - // memmove(to, from, len) - MOVQ DI, 0(SP) - MOVQ BX, 8(SP) - MOVQ CX, 16(SP) - // spill - MOVQ DI, 24(SP) - MOVQ SI, 32(SP) - MOVQ CX, 40(SP) // need len to inc SI, DI after - CALL runtime·memmove(SB) - - // restore registers - MOVQ 24(SP), DI - MOVQ 32(SP), SI - MOVQ 40(SP), CX - - // recalc initial values - MOVQ dst_base+0(FP), R8 - MOVQ R8, R11 // TODO: make these sensible numbers - ADDQ dst_len+8(FP), R8 - MOVQ src_base+24(FP), R9 - ADDQ src_len+32(FP), R9 - MOVQ R8, R12 - SUBQ $32, R12 - MOVQ R9, R13 - SUBQ $16, R13 - - ADDQ CX, DI - JMP loop - -err_corrupt: - MOVQ $-1, ret+48(FP) - RET - -err_short_buf: - MOVQ $-2, ret+48(FP) - RET - -end: - SUBQ R11, DI - MOVQ DI, ret+48(FP) - RET diff --git a/vendor/github.com/pierrec/lz4/decode_other.go b/vendor/github.com/pierrec/lz4/decode_other.go deleted file mode 100644 index 919888ed..00000000 --- a/vendor/github.com/pierrec/lz4/decode_other.go +++ /dev/null @@ -1,98 +0,0 @@ -// +build !amd64 appengine !gc noasm - -package lz4 - -func decodeBlock(dst, src []byte) (ret int) { - const hasError = -2 - defer func() { - if recover() != nil { - ret = hasError - } - }() - - var si, di int - for { - // Literals and match lengths (token). - b := int(src[si]) - si++ - - // Literals. - if lLen := b >> 4; lLen > 0 { - switch { - case lLen < 0xF && si+16 < len(src): - // Shortcut 1 - // if we have enough room in src and dst, and the literals length - // is small enough (0..14) then copy all 16 bytes, even if not all - // are part of the literals. - copy(dst[di:], src[si:si+16]) - si += lLen - di += lLen - if mLen := b & 0xF; mLen < 0xF { - // Shortcut 2 - // if the match length (4..18) fits within the literals, then copy - // all 18 bytes, even if not all are part of the literals. - mLen += 4 - if offset := int(src[si]) | int(src[si+1])<<8; mLen <= offset { - i := di - offset - end := i + 18 - if end > len(dst) { - // The remaining buffer may not hold 18 bytes. - // See https://github.com/pierrec/lz4/issues/51. - end = len(dst) - } - copy(dst[di:], dst[i:end]) - si += 2 - di += mLen - continue - } - } - case lLen == 0xF: - for src[si] == 0xFF { - lLen += 0xFF - si++ - } - lLen += int(src[si]) - si++ - fallthrough - default: - copy(dst[di:di+lLen], src[si:si+lLen]) - si += lLen - di += lLen - } - } - if si >= len(src) { - return di - } - - offset := int(src[si]) | int(src[si+1])<<8 - if offset == 0 { - return hasError - } - si += 2 - - // Match. - mLen := b & 0xF - if mLen == 0xF { - for src[si] == 0xFF { - mLen += 0xFF - si++ - } - mLen += int(src[si]) - si++ - } - mLen += minMatch - - // Copy the match. - expanded := dst[di-offset:] - if mLen > offset { - // Efficiently copy the match dst[di-offset:di] into the dst slice. - bytesToCopy := offset * (mLen / offset) - for n := offset; n <= bytesToCopy+offset; n *= 2 { - copy(expanded[n:], expanded[:n]) - } - di += bytesToCopy - mLen -= bytesToCopy - } - di += copy(dst[di:di+mLen], expanded[:mLen]) - } -} diff --git a/vendor/github.com/pierrec/lz4/errors.go b/vendor/github.com/pierrec/lz4/errors.go deleted file mode 100644 index 1c45d181..00000000 --- a/vendor/github.com/pierrec/lz4/errors.go +++ /dev/null @@ -1,30 +0,0 @@ -package lz4 - -import ( - "errors" - "fmt" - "os" - rdebug "runtime/debug" -) - -var ( - // ErrInvalidSourceShortBuffer is returned by UncompressBlock or CompressBLock when a compressed - // block is corrupted or the destination buffer is not large enough for the uncompressed data. - ErrInvalidSourceShortBuffer = errors.New("lz4: invalid source or destination buffer too short") - // ErrInvalid is returned when reading an invalid LZ4 archive. - ErrInvalid = errors.New("lz4: bad magic number") - // ErrBlockDependency is returned when attempting to decompress an archive created with block dependency. - ErrBlockDependency = errors.New("lz4: block dependency not supported") - // ErrUnsupportedSeek is returned when attempting to Seek any way but forward from the current position. - ErrUnsupportedSeek = errors.New("lz4: can only seek forward from io.SeekCurrent") -) - -func recoverBlock(e *error) { - if r := recover(); r != nil && *e == nil { - if debugFlag { - fmt.Fprintln(os.Stderr, r) - rdebug.PrintStack() - } - *e = ErrInvalidSourceShortBuffer - } -} diff --git a/vendor/github.com/pierrec/lz4/internal/xxh32/xxh32zero.go b/vendor/github.com/pierrec/lz4/internal/xxh32/xxh32zero.go deleted file mode 100644 index 7a76a6bc..00000000 --- a/vendor/github.com/pierrec/lz4/internal/xxh32/xxh32zero.go +++ /dev/null @@ -1,223 +0,0 @@ -// Package xxh32 implements the very fast XXH hashing algorithm (32 bits version). -// (https://github.com/Cyan4973/XXH/) -package xxh32 - -import ( - "encoding/binary" -) - -const ( - prime1 uint32 = 2654435761 - prime2 uint32 = 2246822519 - prime3 uint32 = 3266489917 - prime4 uint32 = 668265263 - prime5 uint32 = 374761393 - - primeMask = 0xFFFFFFFF - prime1plus2 = uint32((uint64(prime1) + uint64(prime2)) & primeMask) // 606290984 - prime1minus = uint32((-int64(prime1)) & primeMask) // 1640531535 -) - -// XXHZero represents an xxhash32 object with seed 0. -type XXHZero struct { - v1 uint32 - v2 uint32 - v3 uint32 - v4 uint32 - totalLen uint64 - buf [16]byte - bufused int -} - -// Sum appends the current hash to b and returns the resulting slice. -// It does not change the underlying hash state. -func (xxh XXHZero) Sum(b []byte) []byte { - h32 := xxh.Sum32() - return append(b, byte(h32), byte(h32>>8), byte(h32>>16), byte(h32>>24)) -} - -// Reset resets the Hash to its initial state. -func (xxh *XXHZero) Reset() { - xxh.v1 = prime1plus2 - xxh.v2 = prime2 - xxh.v3 = 0 - xxh.v4 = prime1minus - xxh.totalLen = 0 - xxh.bufused = 0 -} - -// Size returns the number of bytes returned by Sum(). -func (xxh *XXHZero) Size() int { - return 4 -} - -// BlockSize gives the minimum number of bytes accepted by Write(). -func (xxh *XXHZero) BlockSize() int { - return 1 -} - -// Write adds input bytes to the Hash. -// It never returns an error. -func (xxh *XXHZero) Write(input []byte) (int, error) { - if xxh.totalLen == 0 { - xxh.Reset() - } - n := len(input) - m := xxh.bufused - - xxh.totalLen += uint64(n) - - r := len(xxh.buf) - m - if n < r { - copy(xxh.buf[m:], input) - xxh.bufused += len(input) - return n, nil - } - - p := 0 - // Causes compiler to work directly from registers instead of stack: - v1, v2, v3, v4 := xxh.v1, xxh.v2, xxh.v3, xxh.v4 - if m > 0 { - // some data left from previous update - copy(xxh.buf[xxh.bufused:], input[:r]) - xxh.bufused += len(input) - r - - // fast rotl(13) - buf := xxh.buf[:16] // BCE hint. - v1 = rol13(v1+binary.LittleEndian.Uint32(buf[:])*prime2) * prime1 - v2 = rol13(v2+binary.LittleEndian.Uint32(buf[4:])*prime2) * prime1 - v3 = rol13(v3+binary.LittleEndian.Uint32(buf[8:])*prime2) * prime1 - v4 = rol13(v4+binary.LittleEndian.Uint32(buf[12:])*prime2) * prime1 - p = r - xxh.bufused = 0 - } - - for n := n - 16; p <= n; p += 16 { - sub := input[p:][:16] //BCE hint for compiler - v1 = rol13(v1+binary.LittleEndian.Uint32(sub[:])*prime2) * prime1 - v2 = rol13(v2+binary.LittleEndian.Uint32(sub[4:])*prime2) * prime1 - v3 = rol13(v3+binary.LittleEndian.Uint32(sub[8:])*prime2) * prime1 - v4 = rol13(v4+binary.LittleEndian.Uint32(sub[12:])*prime2) * prime1 - } - xxh.v1, xxh.v2, xxh.v3, xxh.v4 = v1, v2, v3, v4 - - copy(xxh.buf[xxh.bufused:], input[p:]) - xxh.bufused += len(input) - p - - return n, nil -} - -// Sum32 returns the 32 bits Hash value. -func (xxh *XXHZero) Sum32() uint32 { - h32 := uint32(xxh.totalLen) - if h32 >= 16 { - h32 += rol1(xxh.v1) + rol7(xxh.v2) + rol12(xxh.v3) + rol18(xxh.v4) - } else { - h32 += prime5 - } - - p := 0 - n := xxh.bufused - buf := xxh.buf - for n := n - 4; p <= n; p += 4 { - h32 += binary.LittleEndian.Uint32(buf[p:p+4]) * prime3 - h32 = rol17(h32) * prime4 - } - for ; p < n; p++ { - h32 += uint32(buf[p]) * prime5 - h32 = rol11(h32) * prime1 - } - - h32 ^= h32 >> 15 - h32 *= prime2 - h32 ^= h32 >> 13 - h32 *= prime3 - h32 ^= h32 >> 16 - - return h32 -} - -// ChecksumZero returns the 32bits Hash value. -func ChecksumZero(input []byte) uint32 { - n := len(input) - h32 := uint32(n) - - if n < 16 { - h32 += prime5 - } else { - v1 := prime1plus2 - v2 := prime2 - v3 := uint32(0) - v4 := prime1minus - p := 0 - for n := n - 16; p <= n; p += 16 { - sub := input[p:][:16] //BCE hint for compiler - v1 = rol13(v1+binary.LittleEndian.Uint32(sub[:])*prime2) * prime1 - v2 = rol13(v2+binary.LittleEndian.Uint32(sub[4:])*prime2) * prime1 - v3 = rol13(v3+binary.LittleEndian.Uint32(sub[8:])*prime2) * prime1 - v4 = rol13(v4+binary.LittleEndian.Uint32(sub[12:])*prime2) * prime1 - } - input = input[p:] - n -= p - h32 += rol1(v1) + rol7(v2) + rol12(v3) + rol18(v4) - } - - p := 0 - for n := n - 4; p <= n; p += 4 { - h32 += binary.LittleEndian.Uint32(input[p:p+4]) * prime3 - h32 = rol17(h32) * prime4 - } - for p < n { - h32 += uint32(input[p]) * prime5 - h32 = rol11(h32) * prime1 - p++ - } - - h32 ^= h32 >> 15 - h32 *= prime2 - h32 ^= h32 >> 13 - h32 *= prime3 - h32 ^= h32 >> 16 - - return h32 -} - -// Uint32Zero hashes x with seed 0. -func Uint32Zero(x uint32) uint32 { - h := prime5 + 4 + x*prime3 - h = rol17(h) * prime4 - h ^= h >> 15 - h *= prime2 - h ^= h >> 13 - h *= prime3 - h ^= h >> 16 - return h -} - -func rol1(u uint32) uint32 { - return u<<1 | u>>31 -} - -func rol7(u uint32) uint32 { - return u<<7 | u>>25 -} - -func rol11(u uint32) uint32 { - return u<<11 | u>>21 -} - -func rol12(u uint32) uint32 { - return u<<12 | u>>20 -} - -func rol13(u uint32) uint32 { - return u<<13 | u>>19 -} - -func rol17(u uint32) uint32 { - return u<<17 | u>>15 -} - -func rol18(u uint32) uint32 { - return u<<18 | u>>14 -} diff --git a/vendor/github.com/pierrec/lz4/lz4.go b/vendor/github.com/pierrec/lz4/lz4.go deleted file mode 100644 index a3284bdf..00000000 --- a/vendor/github.com/pierrec/lz4/lz4.go +++ /dev/null @@ -1,116 +0,0 @@ -// Package lz4 implements reading and writing lz4 compressed data (a frame), -// as specified in http://fastcompression.blogspot.fr/2013/04/lz4-streaming-format-final.html. -// -// Although the block level compression and decompression functions are exposed and are fully compatible -// with the lz4 block format definition, they are low level and should not be used directly. -// For a complete description of an lz4 compressed block, see: -// http://fastcompression.blogspot.fr/2011/05/lz4-explained.html -// -// See https://github.com/Cyan4973/lz4 for the reference C implementation. -// -package lz4 - -import ( - "math/bits" - "sync" -) - -const ( - // Extension is the LZ4 frame file name extension - Extension = ".lz4" - // Version is the LZ4 frame format version - Version = 1 - - frameMagic uint32 = 0x184D2204 - frameSkipMagic uint32 = 0x184D2A50 - frameMagicLegacy uint32 = 0x184C2102 - - // The following constants are used to setup the compression algorithm. - minMatch = 4 // the minimum size of the match sequence size (4 bytes) - winSizeLog = 16 // LZ4 64Kb window size limit - winSize = 1 << winSizeLog - winMask = winSize - 1 // 64Kb window of previous data for dependent blocks - compressedBlockFlag = 1 << 31 - compressedBlockMask = compressedBlockFlag - 1 - - // hashLog determines the size of the hash table used to quickly find a previous match position. - // Its value influences the compression speed and memory usage, the lower the faster, - // but at the expense of the compression ratio. - // 16 seems to be the best compromise for fast compression. - hashLog = 16 - htSize = 1 << hashLog - - mfLimit = 10 + minMatch // The last match cannot start within the last 14 bytes. -) - -// map the block max size id with its value in bytes: 64Kb, 256Kb, 1Mb and 4Mb. -const ( - blockSize64K = 1 << (16 + 2*iota) - blockSize256K - blockSize1M - blockSize4M -) - -var ( - // Keep a pool of buffers for each valid block sizes. - bsMapValue = [...]*sync.Pool{ - newBufferPool(2 * blockSize64K), - newBufferPool(2 * blockSize256K), - newBufferPool(2 * blockSize1M), - newBufferPool(2 * blockSize4M), - } -) - -// newBufferPool returns a pool for buffers of the given size. -func newBufferPool(size int) *sync.Pool { - return &sync.Pool{ - New: func() interface{} { - return make([]byte, size) - }, - } -} - -// getBuffer returns a buffer to its pool. -func getBuffer(size int) []byte { - idx := blockSizeValueToIndex(size) - 4 - return bsMapValue[idx].Get().([]byte) -} - -// putBuffer returns a buffer to its pool. -func putBuffer(size int, buf []byte) { - if cap(buf) > 0 { - idx := blockSizeValueToIndex(size) - 4 - bsMapValue[idx].Put(buf[:cap(buf)]) - } -} -func blockSizeIndexToValue(i byte) int { - return 1 << (16 + 2*uint(i)) -} -func isValidBlockSize(size int) bool { - const blockSizeMask = blockSize64K | blockSize256K | blockSize1M | blockSize4M - - return size&blockSizeMask > 0 && bits.OnesCount(uint(size)) == 1 -} -func blockSizeValueToIndex(size int) byte { - return 4 + byte(bits.TrailingZeros(uint(size)>>16)/2) -} - -// Header describes the various flags that can be set on a Writer or obtained from a Reader. -// The default values match those of the LZ4 frame format definition -// (http://fastcompression.blogspot.com/2013/04/lz4-streaming-format-final.html). -// -// NB. in a Reader, in case of concatenated frames, the Header values may change between Read() calls. -// It is the caller's responsibility to check them if necessary. -type Header struct { - BlockChecksum bool // Compressed blocks checksum flag. - NoChecksum bool // Frame checksum flag. - BlockMaxSize int // Size of the uncompressed data block (one of [64KB, 256KB, 1MB, 4MB]). Default=4MB. - Size uint64 // Frame total size. It is _not_ computed by the Writer. - CompressionLevel int // Compression level (higher is better, use 0 for fastest compression). - done bool // Header processed flag (Read or Write and checked). -} - -// Reset reset internal status -func (h *Header) Reset() { - h.done = false -} diff --git a/vendor/github.com/pierrec/lz4/lz4_go1.10.go b/vendor/github.com/pierrec/lz4/lz4_go1.10.go deleted file mode 100644 index 9a0fb007..00000000 --- a/vendor/github.com/pierrec/lz4/lz4_go1.10.go +++ /dev/null @@ -1,29 +0,0 @@ -//+build go1.10 - -package lz4 - -import ( - "fmt" - "strings" -) - -func (h Header) String() string { - var s strings.Builder - - s.WriteString(fmt.Sprintf("%T{", h)) - if h.BlockChecksum { - s.WriteString("BlockChecksum: true ") - } - if h.NoChecksum { - s.WriteString("NoChecksum: true ") - } - if bs := h.BlockMaxSize; bs != 0 && bs != 4<<20 { - s.WriteString(fmt.Sprintf("BlockMaxSize: %d ", bs)) - } - if l := h.CompressionLevel; l != 0 { - s.WriteString(fmt.Sprintf("CompressionLevel: %d ", l)) - } - s.WriteByte('}') - - return s.String() -} diff --git a/vendor/github.com/pierrec/lz4/lz4_notgo1.10.go b/vendor/github.com/pierrec/lz4/lz4_notgo1.10.go deleted file mode 100644 index 12c761a2..00000000 --- a/vendor/github.com/pierrec/lz4/lz4_notgo1.10.go +++ /dev/null @@ -1,29 +0,0 @@ -//+build !go1.10 - -package lz4 - -import ( - "bytes" - "fmt" -) - -func (h Header) String() string { - var s bytes.Buffer - - s.WriteString(fmt.Sprintf("%T{", h)) - if h.BlockChecksum { - s.WriteString("BlockChecksum: true ") - } - if h.NoChecksum { - s.WriteString("NoChecksum: true ") - } - if bs := h.BlockMaxSize; bs != 0 && bs != 4<<20 { - s.WriteString(fmt.Sprintf("BlockMaxSize: %d ", bs)) - } - if l := h.CompressionLevel; l != 0 { - s.WriteString(fmt.Sprintf("CompressionLevel: %d ", l)) - } - s.WriteByte('}') - - return s.String() -} diff --git a/vendor/github.com/pierrec/lz4/reader.go b/vendor/github.com/pierrec/lz4/reader.go deleted file mode 100644 index 87dd72bd..00000000 --- a/vendor/github.com/pierrec/lz4/reader.go +++ /dev/null @@ -1,335 +0,0 @@ -package lz4 - -import ( - "encoding/binary" - "fmt" - "io" - "io/ioutil" - - "github.com/pierrec/lz4/internal/xxh32" -) - -// Reader implements the LZ4 frame decoder. -// The Header is set after the first call to Read(). -// The Header may change between Read() calls in case of concatenated frames. -type Reader struct { - Header - // Handler called when a block has been successfully read. - // It provides the number of bytes read. - OnBlockDone func(size int) - - buf [8]byte // Scrap buffer. - pos int64 // Current position in src. - src io.Reader // Source. - zdata []byte // Compressed data. - data []byte // Uncompressed data. - idx int // Index of unread bytes into data. - checksum xxh32.XXHZero // Frame hash. - skip int64 // Bytes to skip before next read. - dpos int64 // Position in dest -} - -// NewReader returns a new LZ4 frame decoder. -// No access to the underlying io.Reader is performed. -func NewReader(src io.Reader) *Reader { - r := &Reader{src: src} - return r -} - -// readHeader checks the frame magic number and parses the frame descriptoz. -// Skippable frames are supported even as a first frame although the LZ4 -// specifications recommends skippable frames not to be used as first frames. -func (z *Reader) readHeader(first bool) error { - defer z.checksum.Reset() - - buf := z.buf[:] - for { - magic, err := z.readUint32() - if err != nil { - z.pos += 4 - if !first && err == io.ErrUnexpectedEOF { - return io.EOF - } - return err - } - if magic == frameMagic { - break - } - if magic>>8 != frameSkipMagic>>8 { - return ErrInvalid - } - skipSize, err := z.readUint32() - if err != nil { - return err - } - z.pos += 4 - m, err := io.CopyN(ioutil.Discard, z.src, int64(skipSize)) - if err != nil { - return err - } - z.pos += m - } - - // Header. - if _, err := io.ReadFull(z.src, buf[:2]); err != nil { - return err - } - z.pos += 8 - - b := buf[0] - if v := b >> 6; v != Version { - return fmt.Errorf("lz4: invalid version: got %d; expected %d", v, Version) - } - if b>>5&1 == 0 { - return ErrBlockDependency - } - z.BlockChecksum = b>>4&1 > 0 - frameSize := b>>3&1 > 0 - z.NoChecksum = b>>2&1 == 0 - - bmsID := buf[1] >> 4 & 0x7 - if bmsID < 4 || bmsID > 7 { - return fmt.Errorf("lz4: invalid block max size ID: %d", bmsID) - } - bSize := blockSizeIndexToValue(bmsID - 4) - z.BlockMaxSize = bSize - - // Allocate the compressed/uncompressed buffers. - // The compressed buffer cannot exceed the uncompressed one. - if n := 2 * bSize; cap(z.zdata) < n { - z.zdata = make([]byte, n, n) - } - if debugFlag { - debug("header block max size id=%d size=%d", bmsID, bSize) - } - z.zdata = z.zdata[:bSize] - z.data = z.zdata[:cap(z.zdata)][bSize:] - z.idx = len(z.data) - - _, _ = z.checksum.Write(buf[0:2]) - - if frameSize { - buf := buf[:8] - if _, err := io.ReadFull(z.src, buf); err != nil { - return err - } - z.Size = binary.LittleEndian.Uint64(buf) - z.pos += 8 - _, _ = z.checksum.Write(buf) - } - - // Header checksum. - if _, err := io.ReadFull(z.src, buf[:1]); err != nil { - return err - } - z.pos++ - if h := byte(z.checksum.Sum32() >> 8 & 0xFF); h != buf[0] { - return fmt.Errorf("lz4: invalid header checksum: got %x; expected %x", buf[0], h) - } - - z.Header.done = true - if debugFlag { - debug("header read: %v", z.Header) - } - - return nil -} - -// Read decompresses data from the underlying source into the supplied buffer. -// -// Since there can be multiple streams concatenated, Header values may -// change between calls to Read(). If that is the case, no data is actually read from -// the underlying io.Reader, to allow for potential input buffer resizing. -func (z *Reader) Read(buf []byte) (int, error) { - if debugFlag { - debug("Read buf len=%d", len(buf)) - } - if !z.Header.done { - if err := z.readHeader(true); err != nil { - return 0, err - } - if debugFlag { - debug("header read OK compressed buffer %d / %d uncompressed buffer %d : %d index=%d", - len(z.zdata), cap(z.zdata), len(z.data), cap(z.data), z.idx) - } - } - - if len(buf) == 0 { - return 0, nil - } - - if z.idx == len(z.data) { - // No data ready for reading, process the next block. - if debugFlag { - debug("reading block from writer") - } - // Reset uncompressed buffer - z.data = z.zdata[:cap(z.zdata)][len(z.zdata):] - - // Block length: 0 = end of frame, highest bit set: uncompressed. - bLen, err := z.readUint32() - if err != nil { - return 0, err - } - z.pos += 4 - - if bLen == 0 { - // End of frame reached. - if !z.NoChecksum { - // Validate the frame checksum. - checksum, err := z.readUint32() - if err != nil { - return 0, err - } - if debugFlag { - debug("frame checksum got=%x / want=%x", z.checksum.Sum32(), checksum) - } - z.pos += 4 - if h := z.checksum.Sum32(); checksum != h { - return 0, fmt.Errorf("lz4: invalid frame checksum: got %x; expected %x", h, checksum) - } - } - - // Get ready for the next concatenated frame and keep the position. - pos := z.pos - z.Reset(z.src) - z.pos = pos - - // Since multiple frames can be concatenated, check for more. - return 0, z.readHeader(false) - } - - if debugFlag { - debug("raw block size %d", bLen) - } - if bLen&compressedBlockFlag > 0 { - // Uncompressed block. - bLen &= compressedBlockMask - if debugFlag { - debug("uncompressed block size %d", bLen) - } - if int(bLen) > cap(z.data) { - return 0, fmt.Errorf("lz4: invalid block size: %d", bLen) - } - z.data = z.data[:bLen] - if _, err := io.ReadFull(z.src, z.data); err != nil { - return 0, err - } - z.pos += int64(bLen) - if z.OnBlockDone != nil { - z.OnBlockDone(int(bLen)) - } - - if z.BlockChecksum { - checksum, err := z.readUint32() - if err != nil { - return 0, err - } - z.pos += 4 - - if h := xxh32.ChecksumZero(z.data); h != checksum { - return 0, fmt.Errorf("lz4: invalid block checksum: got %x; expected %x", h, checksum) - } - } - - } else { - // Compressed block. - if debugFlag { - debug("compressed block size %d", bLen) - } - if int(bLen) > cap(z.data) { - return 0, fmt.Errorf("lz4: invalid block size: %d", bLen) - } - zdata := z.zdata[:bLen] - if _, err := io.ReadFull(z.src, zdata); err != nil { - return 0, err - } - z.pos += int64(bLen) - - if z.BlockChecksum { - checksum, err := z.readUint32() - if err != nil { - return 0, err - } - z.pos += 4 - - if h := xxh32.ChecksumZero(zdata); h != checksum { - return 0, fmt.Errorf("lz4: invalid block checksum: got %x; expected %x", h, checksum) - } - } - - n, err := UncompressBlock(zdata, z.data) - if err != nil { - return 0, err - } - z.data = z.data[:n] - if z.OnBlockDone != nil { - z.OnBlockDone(n) - } - } - - if !z.NoChecksum { - _, _ = z.checksum.Write(z.data) - if debugFlag { - debug("current frame checksum %x", z.checksum.Sum32()) - } - } - z.idx = 0 - } - - if z.skip > int64(len(z.data[z.idx:])) { - z.skip -= int64(len(z.data[z.idx:])) - z.dpos += int64(len(z.data[z.idx:])) - z.idx = len(z.data) - return 0, nil - } - - z.idx += int(z.skip) - z.dpos += z.skip - z.skip = 0 - - n := copy(buf, z.data[z.idx:]) - z.idx += n - z.dpos += int64(n) - if debugFlag { - debug("copied %d bytes to input", n) - } - - return n, nil -} - -// Seek implements io.Seeker, but supports seeking forward from the current -// position only. Any other seek will return an error. Allows skipping output -// bytes which aren't needed, which in some scenarios is faster than reading -// and discarding them. -// Note this may cause future calls to Read() to read 0 bytes if all of the -// data they would have returned is skipped. -func (z *Reader) Seek(offset int64, whence int) (int64, error) { - if offset < 0 || whence != io.SeekCurrent { - return z.dpos + z.skip, ErrUnsupportedSeek - } - z.skip += offset - return z.dpos + z.skip, nil -} - -// Reset discards the Reader's state and makes it equivalent to the -// result of its original state from NewReader, but reading from r instead. -// This permits reusing a Reader rather than allocating a new one. -func (z *Reader) Reset(r io.Reader) { - z.Header = Header{} - z.pos = 0 - z.src = r - z.zdata = z.zdata[:0] - z.data = z.data[:0] - z.idx = 0 - z.checksum.Reset() -} - -// readUint32 reads an uint32 into the supplied buffer. -// The idea is to make use of the already allocated buffers avoiding additional allocations. -func (z *Reader) readUint32() (uint32, error) { - buf := z.buf[:4] - _, err := io.ReadFull(z.src, buf) - x := binary.LittleEndian.Uint32(buf) - return x, err -} diff --git a/vendor/github.com/pierrec/lz4/reader_legacy.go b/vendor/github.com/pierrec/lz4/reader_legacy.go deleted file mode 100644 index 1670a77d..00000000 --- a/vendor/github.com/pierrec/lz4/reader_legacy.go +++ /dev/null @@ -1,207 +0,0 @@ -package lz4 - -import ( - "encoding/binary" - "fmt" - "io" -) - -// ReaderLegacy implements the LZ4Demo frame decoder. -// The Header is set after the first call to Read(). -type ReaderLegacy struct { - Header - // Handler called when a block has been successfully read. - // It provides the number of bytes read. - OnBlockDone func(size int) - - lastBlock bool - buf [8]byte // Scrap buffer. - pos int64 // Current position in src. - src io.Reader // Source. - zdata []byte // Compressed data. - data []byte // Uncompressed data. - idx int // Index of unread bytes into data. - skip int64 // Bytes to skip before next read. - dpos int64 // Position in dest -} - -// NewReaderLegacy returns a new LZ4Demo frame decoder. -// No access to the underlying io.Reader is performed. -func NewReaderLegacy(src io.Reader) *ReaderLegacy { - r := &ReaderLegacy{src: src} - return r -} - -// readHeader checks the frame magic number and parses the frame descriptoz. -// Skippable frames are supported even as a first frame although the LZ4 -// specifications recommends skippable frames not to be used as first frames. -func (z *ReaderLegacy) readLegacyHeader() error { - z.lastBlock = false - magic, err := z.readUint32() - if err != nil { - z.pos += 4 - if err == io.ErrUnexpectedEOF { - return io.EOF - } - return err - } - if magic != frameMagicLegacy { - return ErrInvalid - } - z.pos += 4 - - // Legacy has fixed 8MB blocksizes - // https://github.com/lz4/lz4/blob/dev/doc/lz4_Frame_format.md#legacy-frame - bSize := blockSize4M * 2 - - // Allocate the compressed/uncompressed buffers. - // The compressed buffer cannot exceed the uncompressed one. - if n := 2 * bSize; cap(z.zdata) < n { - z.zdata = make([]byte, n, n) - } - if debugFlag { - debug("header block max size size=%d", bSize) - } - z.zdata = z.zdata[:bSize] - z.data = z.zdata[:cap(z.zdata)][bSize:] - z.idx = len(z.data) - - z.Header.done = true - if debugFlag { - debug("header read: %v", z.Header) - } - - return nil -} - -// Read decompresses data from the underlying source into the supplied buffer. -// -// Since there can be multiple streams concatenated, Header values may -// change between calls to Read(). If that is the case, no data is actually read from -// the underlying io.Reader, to allow for potential input buffer resizing. -func (z *ReaderLegacy) Read(buf []byte) (int, error) { - if debugFlag { - debug("Read buf len=%d", len(buf)) - } - if !z.Header.done { - if err := z.readLegacyHeader(); err != nil { - return 0, err - } - if debugFlag { - debug("header read OK compressed buffer %d / %d uncompressed buffer %d : %d index=%d", - len(z.zdata), cap(z.zdata), len(z.data), cap(z.data), z.idx) - } - } - - if len(buf) == 0 { - return 0, nil - } - - if z.idx == len(z.data) { - // No data ready for reading, process the next block. - if debugFlag { - debug(" reading block from writer %d %d", z.idx, blockSize4M*2) - } - - // Reset uncompressed buffer - z.data = z.zdata[:cap(z.zdata)][len(z.zdata):] - - bLen, err := z.readUint32() - if err != nil { - return 0, err - } - if debugFlag { - debug(" bLen %d (0x%x) offset = %d (0x%x)", bLen, bLen, z.pos, z.pos) - } - z.pos += 4 - - // Legacy blocks are always compressed, even when detrimental - if debugFlag { - debug(" compressed block size %d", bLen) - } - - if int(bLen) > cap(z.data) { - return 0, fmt.Errorf("lz4: invalid block size: %d", bLen) - } - zdata := z.zdata[:bLen] - if _, err := io.ReadFull(z.src, zdata); err != nil { - return 0, err - } - z.pos += int64(bLen) - - n, err := UncompressBlock(zdata, z.data) - if err != nil { - return 0, err - } - - z.data = z.data[:n] - if z.OnBlockDone != nil { - z.OnBlockDone(n) - } - - z.idx = 0 - - // Legacy blocks are fixed to 8MB, if we read a decompressed block smaller than this - // it means we've reached the end... - if n < blockSize4M*2 { - z.lastBlock = true - } - } - - if z.skip > int64(len(z.data[z.idx:])) { - z.skip -= int64(len(z.data[z.idx:])) - z.dpos += int64(len(z.data[z.idx:])) - z.idx = len(z.data) - return 0, nil - } - - z.idx += int(z.skip) - z.dpos += z.skip - z.skip = 0 - - n := copy(buf, z.data[z.idx:]) - z.idx += n - z.dpos += int64(n) - if debugFlag { - debug("%v] copied %d bytes to input (%d:%d)", z.lastBlock, n, z.idx, len(z.data)) - } - if z.lastBlock && len(z.data) == z.idx { - return n, io.EOF - } - return n, nil -} - -// Seek implements io.Seeker, but supports seeking forward from the current -// position only. Any other seek will return an error. Allows skipping output -// bytes which aren't needed, which in some scenarios is faster than reading -// and discarding them. -// Note this may cause future calls to Read() to read 0 bytes if all of the -// data they would have returned is skipped. -func (z *ReaderLegacy) Seek(offset int64, whence int) (int64, error) { - if offset < 0 || whence != io.SeekCurrent { - return z.dpos + z.skip, ErrUnsupportedSeek - } - z.skip += offset - return z.dpos + z.skip, nil -} - -// Reset discards the Reader's state and makes it equivalent to the -// result of its original state from NewReader, but reading from r instead. -// This permits reusing a Reader rather than allocating a new one. -func (z *ReaderLegacy) Reset(r io.Reader) { - z.Header = Header{} - z.pos = 0 - z.src = r - z.zdata = z.zdata[:0] - z.data = z.data[:0] - z.idx = 0 -} - -// readUint32 reads an uint32 into the supplied buffer. -// The idea is to make use of the already allocated buffers avoiding additional allocations. -func (z *ReaderLegacy) readUint32() (uint32, error) { - buf := z.buf[:4] - _, err := io.ReadFull(z.src, buf) - x := binary.LittleEndian.Uint32(buf) - return x, err -} diff --git a/vendor/github.com/pierrec/lz4/writer.go b/vendor/github.com/pierrec/lz4/writer.go deleted file mode 100644 index f066d563..00000000 --- a/vendor/github.com/pierrec/lz4/writer.go +++ /dev/null @@ -1,422 +0,0 @@ -package lz4 - -import ( - "encoding/binary" - "fmt" - "io" - "runtime" - - "github.com/pierrec/lz4/internal/xxh32" -) - -// zResult contains the results of compressing a block. -type zResult struct { - size uint32 // Block header - data []byte // Compressed data - checksum uint32 // Data checksum -} - -// Writer implements the LZ4 frame encoder. -type Writer struct { - Header - // Handler called when a block has been successfully written out. - // It provides the number of bytes written. - OnBlockDone func(size int) - - buf [19]byte // magic number(4) + header(flags(2)+[Size(8)+DictID(4)]+checksum(1)) does not exceed 19 bytes - dst io.Writer // Destination. - checksum xxh32.XXHZero // Frame checksum. - data []byte // Data to be compressed + buffer for compressed data. - idx int // Index into data. - hashtable [winSize]int // Hash table used in CompressBlock(). - - // For concurrency. - c chan chan zResult // Channel for block compression goroutines and writer goroutine. - err error // Any error encountered while writing to the underlying destination. -} - -// NewWriter returns a new LZ4 frame encoder. -// No access to the underlying io.Writer is performed. -// The supplied Header is checked at the first Write. -// It is ok to change it before the first Write but then not until a Reset() is performed. -func NewWriter(dst io.Writer) *Writer { - z := new(Writer) - z.Reset(dst) - return z -} - -// WithConcurrency sets the number of concurrent go routines used for compression. -// A negative value sets the concurrency to GOMAXPROCS. -func (z *Writer) WithConcurrency(n int) *Writer { - switch { - case n == 0 || n == 1: - z.c = nil - return z - case n < 0: - n = runtime.GOMAXPROCS(0) - } - z.c = make(chan chan zResult, n) - // Writer goroutine managing concurrent block compression goroutines. - go func() { - // Process next block compression item. - for c := range z.c { - // Read the next compressed block result. - // Waiting here ensures that the blocks are output in the order they were sent. - // The incoming channel is always closed as it indicates to the caller that - // the block has been processed. - res := <-c - n := len(res.data) - if n == 0 { - // Notify the block compression routine that we are done with its result. - // This is used when a sentinel block is sent to terminate the compression. - close(c) - return - } - // Write the block. - if err := z.writeUint32(res.size); err != nil && z.err == nil { - z.err = err - } - if _, err := z.dst.Write(res.data); err != nil && z.err == nil { - z.err = err - } - if z.BlockChecksum { - if err := z.writeUint32(res.checksum); err != nil && z.err == nil { - z.err = err - } - } - // It is now safe to release the buffer as no longer in use by any goroutine. - putBuffer(cap(res.data), res.data) - if h := z.OnBlockDone; h != nil { - h(n) - } - close(c) - } - }() - return z -} - -// newBuffers instantiates new buffers which size matches the one in Header. -// The returned buffers are for decompression and compression respectively. -func (z *Writer) newBuffers() { - bSize := z.Header.BlockMaxSize - buf := getBuffer(bSize) - z.data = buf[:bSize] // Uncompressed buffer is the first half. -} - -// freeBuffers puts the writer's buffers back to the pool. -func (z *Writer) freeBuffers() { - // Put the buffer back into the pool, if any. - putBuffer(z.Header.BlockMaxSize, z.data) - z.data = nil -} - -// writeHeader builds and writes the header (magic+header) to the underlying io.Writer. -func (z *Writer) writeHeader() error { - // Default to 4Mb if BlockMaxSize is not set. - if z.Header.BlockMaxSize == 0 { - z.Header.BlockMaxSize = blockSize4M - } - // The only option that needs to be validated. - bSize := z.Header.BlockMaxSize - if !isValidBlockSize(z.Header.BlockMaxSize) { - return fmt.Errorf("lz4: invalid block max size: %d", bSize) - } - // Allocate the compressed/uncompressed buffers. - // The compressed buffer cannot exceed the uncompressed one. - z.newBuffers() - z.idx = 0 - - // Size is optional. - buf := z.buf[:] - - // Set the fixed size data: magic number, block max size and flags. - binary.LittleEndian.PutUint32(buf[0:], frameMagic) - flg := byte(Version << 6) - flg |= 1 << 5 // No block dependency. - if z.Header.BlockChecksum { - flg |= 1 << 4 - } - if z.Header.Size > 0 { - flg |= 1 << 3 - } - if !z.Header.NoChecksum { - flg |= 1 << 2 - } - buf[4] = flg - buf[5] = blockSizeValueToIndex(z.Header.BlockMaxSize) << 4 - - // Current buffer size: magic(4) + flags(1) + block max size (1). - n := 6 - // Optional items. - if z.Header.Size > 0 { - binary.LittleEndian.PutUint64(buf[n:], z.Header.Size) - n += 8 - } - - // The header checksum includes the flags, block max size and optional Size. - buf[n] = byte(xxh32.ChecksumZero(buf[4:n]) >> 8 & 0xFF) - z.checksum.Reset() - - // Header ready, write it out. - if _, err := z.dst.Write(buf[0 : n+1]); err != nil { - return err - } - z.Header.done = true - if debugFlag { - debug("wrote header %v", z.Header) - } - - return nil -} - -// Write compresses data from the supplied buffer into the underlying io.Writer. -// Write does not return until the data has been written. -func (z *Writer) Write(buf []byte) (int, error) { - if !z.Header.done { - if err := z.writeHeader(); err != nil { - return 0, err - } - } - if debugFlag { - debug("input buffer len=%d index=%d", len(buf), z.idx) - } - - zn := len(z.data) - var n int - for len(buf) > 0 { - if z.idx == 0 && len(buf) >= zn { - // Avoid a copy as there is enough data for a block. - if err := z.compressBlock(buf[:zn]); err != nil { - return n, err - } - n += zn - buf = buf[zn:] - continue - } - // Accumulate the data to be compressed. - m := copy(z.data[z.idx:], buf) - n += m - z.idx += m - buf = buf[m:] - if debugFlag { - debug("%d bytes copied to buf, current index %d", n, z.idx) - } - - if z.idx < len(z.data) { - // Buffer not filled. - if debugFlag { - debug("need more data for compression") - } - return n, nil - } - - // Buffer full. - if err := z.compressBlock(z.data); err != nil { - return n, err - } - z.idx = 0 - } - - return n, nil -} - -// compressBlock compresses a block. -func (z *Writer) compressBlock(data []byte) error { - if !z.NoChecksum { - _, _ = z.checksum.Write(data) - } - - if z.c != nil { - c := make(chan zResult) - z.c <- c // Send now to guarantee order - - // get a buffer from the pool and copy the data over - block := getBuffer(z.Header.BlockMaxSize)[:len(data)] - copy(block, data) - - go writerCompressBlock(c, z.Header, block) - return nil - } - - zdata := z.data[z.Header.BlockMaxSize:cap(z.data)] - // The compressed block size cannot exceed the input's. - var zn int - - if level := z.Header.CompressionLevel; level != 0 { - zn, _ = CompressBlockHC(data, zdata, level) - } else { - zn, _ = CompressBlock(data, zdata, z.hashtable[:]) - } - - var bLen uint32 - if debugFlag { - debug("block compression %d => %d", len(data), zn) - } - if zn > 0 && zn < len(data) { - // Compressible and compressed size smaller than uncompressed: ok! - bLen = uint32(zn) - zdata = zdata[:zn] - } else { - // Uncompressed block. - bLen = uint32(len(data)) | compressedBlockFlag - zdata = data - } - if debugFlag { - debug("block compression to be written len=%d data len=%d", bLen, len(zdata)) - } - - // Write the block. - if err := z.writeUint32(bLen); err != nil { - return err - } - written, err := z.dst.Write(zdata) - if err != nil { - return err - } - if h := z.OnBlockDone; h != nil { - h(written) - } - - if !z.BlockChecksum { - if debugFlag { - debug("current frame checksum %x", z.checksum.Sum32()) - } - return nil - } - checksum := xxh32.ChecksumZero(zdata) - if debugFlag { - debug("block checksum %x", checksum) - defer func() { debug("current frame checksum %x", z.checksum.Sum32()) }() - } - return z.writeUint32(checksum) -} - -// Flush flushes any pending compressed data to the underlying writer. -// Flush does not return until the data has been written. -// If the underlying writer returns an error, Flush returns that error. -func (z *Writer) Flush() error { - if debugFlag { - debug("flush with index %d", z.idx) - } - if z.idx == 0 { - return nil - } - - data := getBuffer(z.Header.BlockMaxSize)[:len(z.data[:z.idx])] - copy(data, z.data[:z.idx]) - - z.idx = 0 - if z.c == nil { - return z.compressBlock(data) - } - if !z.NoChecksum { - _, _ = z.checksum.Write(data) - } - c := make(chan zResult) - z.c <- c - writerCompressBlock(c, z.Header, data) - return nil -} - -func (z *Writer) close() error { - if z.c == nil { - return nil - } - // Send a sentinel block (no data to compress) to terminate the writer main goroutine. - c := make(chan zResult) - z.c <- c - c <- zResult{} - // Wait for the main goroutine to complete. - <-c - // At this point the main goroutine has shut down or is about to return. - z.c = nil - return z.err -} - -// Close closes the Writer, flushing any unwritten data to the underlying io.Writer, but does not close the underlying io.Writer. -func (z *Writer) Close() error { - if !z.Header.done { - if err := z.writeHeader(); err != nil { - return err - } - } - if err := z.Flush(); err != nil { - return err - } - if err := z.close(); err != nil { - return err - } - z.freeBuffers() - - if debugFlag { - debug("writing last empty block") - } - if err := z.writeUint32(0); err != nil { - return err - } - if z.NoChecksum { - return nil - } - checksum := z.checksum.Sum32() - if debugFlag { - debug("stream checksum %x", checksum) - } - return z.writeUint32(checksum) -} - -// Reset clears the state of the Writer z such that it is equivalent to its -// initial state from NewWriter, but instead writing to w. -// No access to the underlying io.Writer is performed. -func (z *Writer) Reset(w io.Writer) { - n := cap(z.c) - _ = z.close() - z.freeBuffers() - z.Header.Reset() - z.dst = w - z.checksum.Reset() - z.idx = 0 - z.err = nil - // reset hashtable to ensure deterministic output. - for i := range z.hashtable { - z.hashtable[i] = 0 - } - z.WithConcurrency(n) -} - -// writeUint32 writes a uint32 to the underlying writer. -func (z *Writer) writeUint32(x uint32) error { - buf := z.buf[:4] - binary.LittleEndian.PutUint32(buf, x) - _, err := z.dst.Write(buf) - return err -} - -// writerCompressBlock compresses data into a pooled buffer and writes its result -// out to the input channel. -func writerCompressBlock(c chan zResult, header Header, data []byte) { - zdata := getBuffer(header.BlockMaxSize) - // The compressed block size cannot exceed the input's. - var zn int - if level := header.CompressionLevel; level != 0 { - zn, _ = CompressBlockHC(data, zdata, level) - } else { - var hashTable [winSize]int - zn, _ = CompressBlock(data, zdata, hashTable[:]) - } - var res zResult - if zn > 0 && zn < len(data) { - res.size = uint32(zn) - res.data = zdata[:zn] - // release the uncompressed block since it is not used anymore - putBuffer(header.BlockMaxSize, data) - } else { - res.size = uint32(len(data)) | compressedBlockFlag - res.data = data - // release the compressed block since it was not used - putBuffer(header.BlockMaxSize, zdata) - } - if header.BlockChecksum { - res.checksum = xxh32.ChecksumZero(res.data) - } - c <- res -} diff --git a/vendor/github.com/pierrec/lz4/writer_legacy.go b/vendor/github.com/pierrec/lz4/writer_legacy.go deleted file mode 100644 index ca8dc8c7..00000000 --- a/vendor/github.com/pierrec/lz4/writer_legacy.go +++ /dev/null @@ -1,182 +0,0 @@ -package lz4 - -import ( - "encoding/binary" - "io" -) - -// WriterLegacy implements the LZ4Demo frame decoder. -type WriterLegacy struct { - Header - // Handler called when a block has been successfully read. - // It provides the number of bytes read. - OnBlockDone func(size int) - - dst io.Writer // Destination. - data []byte // Data to be compressed + buffer for compressed data. - idx int // Index into data. - hashtable [winSize]int // Hash table used in CompressBlock(). -} - -// NewWriterLegacy returns a new LZ4 encoder for the legacy frame format. -// No access to the underlying io.Writer is performed. -// The supplied Header is checked at the first Write. -// It is ok to change it before the first Write but then not until a Reset() is performed. -func NewWriterLegacy(dst io.Writer) *WriterLegacy { - z := new(WriterLegacy) - z.Reset(dst) - return z -} - -// Write compresses data from the supplied buffer into the underlying io.Writer. -// Write does not return until the data has been written. -func (z *WriterLegacy) Write(buf []byte) (int, error) { - if !z.Header.done { - if err := z.writeHeader(); err != nil { - return 0, err - } - } - if debugFlag { - debug("input buffer len=%d index=%d", len(buf), z.idx) - } - - zn := len(z.data) - var n int - for len(buf) > 0 { - if z.idx == 0 && len(buf) >= zn { - // Avoid a copy as there is enough data for a block. - if err := z.compressBlock(buf[:zn]); err != nil { - return n, err - } - n += zn - buf = buf[zn:] - continue - } - // Accumulate the data to be compressed. - m := copy(z.data[z.idx:], buf) - n += m - z.idx += m - buf = buf[m:] - if debugFlag { - debug("%d bytes copied to buf, current index %d", n, z.idx) - } - - if z.idx < len(z.data) { - // Buffer not filled. - if debugFlag { - debug("need more data for compression") - } - return n, nil - } - - // Buffer full. - if err := z.compressBlock(z.data); err != nil { - return n, err - } - z.idx = 0 - } - - return n, nil -} - -// writeHeader builds and writes the header to the underlying io.Writer. -func (z *WriterLegacy) writeHeader() error { - // Legacy has fixed 8MB blocksizes - // https://github.com/lz4/lz4/blob/dev/doc/lz4_Frame_format.md#legacy-frame - bSize := 2 * blockSize4M - - buf := make([]byte, 2*bSize, 2*bSize) - z.data = buf[:bSize] // Uncompressed buffer is the first half. - - z.idx = 0 - - // Header consists of one mageic number, write it out. - if err := binary.Write(z.dst, binary.LittleEndian, frameMagicLegacy); err != nil { - return err - } - z.Header.done = true - if debugFlag { - debug("wrote header %v", z.Header) - } - - return nil -} - -// compressBlock compresses a block. -func (z *WriterLegacy) compressBlock(data []byte) error { - bSize := 2 * blockSize4M - zdata := z.data[bSize:cap(z.data)] - // The compressed block size cannot exceed the input's. - var zn int - - if level := z.Header.CompressionLevel; level != 0 { - zn, _ = CompressBlockHC(data, zdata, level) - } else { - zn, _ = CompressBlock(data, zdata, z.hashtable[:]) - } - - if debugFlag { - debug("block compression %d => %d", len(data), zn) - } - zdata = zdata[:zn] - - // Write the block. - if err := binary.Write(z.dst, binary.LittleEndian, uint32(zn)); err != nil { - return err - } - written, err := z.dst.Write(zdata) - if err != nil { - return err - } - if h := z.OnBlockDone; h != nil { - h(written) - } - return nil -} - -// Flush flushes any pending compressed data to the underlying writer. -// Flush does not return until the data has been written. -// If the underlying writer returns an error, Flush returns that error. -func (z *WriterLegacy) Flush() error { - if debugFlag { - debug("flush with index %d", z.idx) - } - if z.idx == 0 { - return nil - } - - data := z.data[:z.idx] - z.idx = 0 - return z.compressBlock(data) -} - -// Close closes the WriterLegacy, flushing any unwritten data to the underlying io.Writer, but does not close the underlying io.Writer. -func (z *WriterLegacy) Close() error { - if !z.Header.done { - if err := z.writeHeader(); err != nil { - return err - } - } - if err := z.Flush(); err != nil { - return err - } - - if debugFlag { - debug("writing last empty block") - } - - return nil -} - -// Reset clears the state of the WriterLegacy z such that it is equivalent to its -// initial state from NewWriterLegacy, but instead writing to w. -// No access to the underlying io.Writer is performed. -func (z *WriterLegacy) Reset(w io.Writer) { - z.Header.Reset() - z.dst = w - z.idx = 0 - // reset hashtable to ensure deterministic output. - for i := range z.hashtable { - z.hashtable[i] = 0 - } -} diff --git a/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_aix_nocgo.go b/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_aix_nocgo.go index d158000e..1a291532 100644 --- a/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_aix_nocgo.go +++ b/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_aix_nocgo.go @@ -6,8 +6,8 @@ package cpu import ( "context" "regexp" - "strings" "strconv" + "strings" "github.com/shirou/gopsutil/v3/internal/common" ) @@ -28,19 +28,19 @@ func TimesWithContext(ctx context.Context, percpu bool) ([]TimesStat, error) { } ret := TimesStat{CPU: "cpu-total"} - h := whiteSpaces.Split(lines[len(lines)-3], -1) // headers - v := whiteSpaces.Split(lines[len(lines)-2], -1) // values + h := whiteSpaces.Split(lines[len(lines)-3], -1) // headers + v := whiteSpaces.Split(lines[len(lines)-2], -1) // values for i, header := range h { if t, err := strconv.ParseFloat(v[i], 64); err == nil { switch header { - case `%usr`: - ret.User = t - case `%sys`: - ret.System = t - case `%wio`: - ret.Iowait = t - case `%idle`: - ret.Idle = t + case `%usr`: + ret.User = t + case `%sys`: + ret.System = t + case `%wio`: + ret.Iowait = t + case `%idle`: + ret.Idle = t } } } diff --git a/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_darwin.go b/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_darwin.go index 7acb258d..41f395e5 100644 --- a/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_darwin.go +++ b/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_darwin.go @@ -8,6 +8,7 @@ import ( "strconv" "strings" + "github.com/shoenig/go-m1cpu" "github.com/tklauser/go-sysconf" "golang.org/x/sys/unix" ) @@ -85,11 +86,15 @@ func InfoWithContext(ctx context.Context) ([]InfoStat, error) { c.CacheSize = int32(cacheSize) c.VendorID, _ = unix.Sysctl("machdep.cpu.vendor") - // Use the rated frequency of the CPU. This is a static value and does not - // account for low power or Turbo Boost modes. - cpuFrequency, err := unix.SysctlUint64("hw.cpufrequency") - if err == nil { - c.Mhz = float64(cpuFrequency) / 1000000.0 + if m1cpu.IsAppleSilicon() { + c.Mhz = float64(m1cpu.PCoreHz() / 1_000_000) + } else { + // Use the rated frequency of the CPU. This is a static value and does not + // account for low power or Turbo Boost modes. + cpuFrequency, err := unix.SysctlUint64("hw.cpufrequency") + if err == nil { + c.Mhz = float64(cpuFrequency) / 1000000.0 + } } return append(ret, c), nil diff --git a/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_linux.go b/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_linux.go index a60e462d..d4c575e8 100644 --- a/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_linux.go +++ b/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_linux.go @@ -11,8 +11,9 @@ import ( "strconv" "strings" - "github.com/shirou/gopsutil/v3/internal/common" "github.com/tklauser/go-sysconf" + + "github.com/shirou/gopsutil/v3/internal/common" ) var ClocksPerSec = float64(100) @@ -190,7 +191,7 @@ func InfoWithContext(ctx context.Context) ([]InfoStat, error) { switch key { case "Processor": processorName = value - case "processor": + case "processor", "cpu number": if c.CPU >= 0 { finishCPUInfo(&c) ret = append(ret, c) @@ -203,6 +204,9 @@ func InfoWithContext(ctx context.Context) ([]InfoStat, error) { c.CPU = int32(t) case "vendorId", "vendor_id": c.VendorID = value + if strings.Contains(value, "S390") { + processorName = "S390" + } case "CPU implementer": if v, err := strconv.ParseUint(value, 0, 8); err == nil { switch v { @@ -253,7 +257,7 @@ func InfoWithContext(ctx context.Context) ([]InfoStat, error) { } } } - case "model name", "cpu": + case "Model Name", "model name", "cpu": c.ModelName = value if strings.Contains(value, "POWER8") || strings.Contains(value, "POWER7") { @@ -273,7 +277,7 @@ func InfoWithContext(ctx context.Context) ([]InfoStat, error) { return ret, err } c.Stepping = int32(t) - case "cpu MHz", "clock": + case "cpu MHz", "clock", "cpu MHz dynamic": // treat this as the fallback value, thus we ignore error if t, err := strconv.ParseFloat(strings.Replace(value, "MHz", "", 1), 64); err == nil { c.Mhz = t diff --git a/vendor/github.com/shirou/gopsutil/v3/disk/disk.go b/vendor/github.com/shirou/gopsutil/v3/disk/disk.go index dd4cc1d5..0d4b2534 100644 --- a/vendor/github.com/shirou/gopsutil/v3/disk/disk.go +++ b/vendor/github.com/shirou/gopsutil/v3/disk/disk.go @@ -9,6 +9,8 @@ import ( var invoke common.Invoker = common.Invoke{} +type Warnings = common.Warnings + type UsageStat struct { Path string `json:"path"` Fstype string `json:"fstype"` diff --git a/vendor/github.com/shirou/gopsutil/v3/disk/disk_aix_nocgo.go b/vendor/github.com/shirou/gopsutil/v3/disk/disk_aix_nocgo.go index 190a2d90..4f93c752 100644 --- a/vendor/github.com/shirou/gopsutil/v3/disk/disk_aix_nocgo.go +++ b/vendor/github.com/shirou/gopsutil/v3/disk/disk_aix_nocgo.go @@ -8,8 +8,8 @@ import ( "regexp" "strings" - "golang.org/x/sys/unix" "github.com/shirou/gopsutil/v3/internal/common" + "golang.org/x/sys/unix" ) var whiteSpaces = regexp.MustCompile(`\s+`) @@ -17,11 +17,11 @@ var startBlank = regexp.MustCompile(`^\s+`) var ignoreFSType = map[string]bool{"procfs": true} var FSType = map[int]string{ - 0: "jfs2", 1: "namefs", 2: "nfs", 3: "jfs", 5: "cdrom", 6: "proc", - 16: "special-fs", 17: "cache-fs", 18: "nfs3", 19: "automount-fs", 20: "pool-fs", 32: "vxfs", - 33: "veritas-fs", 34: "udfs", 35: "nfs4", 36: "nfs4-pseudo", 37: "smbfs", 38: "mcr-pseudofs", - 39: "ahafs", 40: "sterm-nfs", 41: "asmfs", - } + 0: "jfs2", 1: "namefs", 2: "nfs", 3: "jfs", 5: "cdrom", 6: "proc", + 16: "special-fs", 17: "cache-fs", 18: "nfs3", 19: "automount-fs", 20: "pool-fs", 32: "vxfs", + 33: "veritas-fs", 34: "udfs", 35: "nfs4", 36: "nfs4-pseudo", 37: "smbfs", 38: "mcr-pseudofs", + 39: "ahafs", 40: "sterm-nfs", 41: "asmfs", +} func PartitionsWithContext(ctx context.Context, all bool) ([]PartitionStat, error) { var ret []PartitionStat @@ -42,7 +42,7 @@ func PartitionsWithContext(ctx context.Context, all bool) ([]PartitionStat, erro start := 0 finished := false for pos, ch := range lines[1] { - if ch == ' ' && ! finished { + if ch == ' ' && !finished { name := strings.TrimSpace(lines[0][start:pos]) colidx[name] = idx finished = true diff --git a/vendor/github.com/shirou/gopsutil/v3/disk/disk_freebsd.go b/vendor/github.com/shirou/gopsutil/v3/disk/disk_freebsd.go index 753ce9ac..9b53106c 100644 --- a/vendor/github.com/shirou/gopsutil/v3/disk/disk_freebsd.go +++ b/vendor/github.com/shirou/gopsutil/v3/disk/disk_freebsd.go @@ -12,9 +12,8 @@ import ( "strconv" "strings" - "golang.org/x/sys/unix" - "github.com/shirou/gopsutil/v3/internal/common" + "golang.org/x/sys/unix" ) // PartitionsWithContext returns disk partition. diff --git a/vendor/github.com/shirou/gopsutil/v3/disk/disk_linux.go b/vendor/github.com/shirou/gopsutil/v3/disk/disk_linux.go index 3911af9c..14712f9e 100644 --- a/vendor/github.com/shirou/gopsutil/v3/disk/disk_linux.go +++ b/vendor/github.com/shirou/gopsutil/v3/disk/disk_linux.go @@ -16,8 +16,9 @@ import ( "strconv" "strings" - "github.com/shirou/gopsutil/v3/internal/common" "golang.org/x/sys/unix" + + "github.com/shirou/gopsutil/v3/internal/common" ) const ( @@ -341,7 +342,7 @@ func PartitionsWithContext(ctx context.Context, all bool) ([]PartitionStat, erro } if strings.HasPrefix(d.Device, "/dev/mapper/") { - devpath, err := filepath.EvalSymlinks(common.HostDev(strings.Replace(d.Device, "/dev", "", -1))) + devpath, err := filepath.EvalSymlinks(common.HostDev(strings.Replace(d.Device, "/dev", "", 1))) if err == nil { d.Device = devpath } diff --git a/vendor/github.com/shirou/gopsutil/v3/disk/disk_windows.go b/vendor/github.com/shirou/gopsutil/v3/disk/disk_windows.go index 5dfd1ca9..8a1a28d6 100644 --- a/vendor/github.com/shirou/gopsutil/v3/disk/disk_windows.go +++ b/vendor/github.com/shirou/gopsutil/v3/disk/disk_windows.go @@ -7,6 +7,7 @@ import ( "bytes" "context" "fmt" + "sync" "syscall" "unsafe" @@ -15,8 +16,6 @@ import ( "golang.org/x/sys/windows/registry" ) -type Warnings = common.Warnings - var ( procGetDiskFreeSpaceExW = common.Modkernel32.NewProc("GetDiskFreeSpaceExW") procGetLogicalDriveStringsW = common.Modkernel32.NewProc("GetLogicalDriveStringsW") @@ -90,12 +89,20 @@ func PartitionsWithContext(ctx context.Context, all bool) ([]PartitionStat, erro var ret []PartitionStat retChan := make(chan []PartitionStat) errChan := make(chan error) - defer close(retChan) - defer close(errChan) - lpBuffer := make([]byte, 254) + var waitgrp sync.WaitGroup + waitgrp.Add(1) + defer waitgrp.Done() + f := func() { + defer func() { + waitgrp.Wait() + // fires when this func and the outside func finishes. + close(errChan) + close(retChan) + }() + diskret, _, err := procGetLogicalDriveStringsW.Call( uintptr(len(lpBuffer)), uintptr(unsafe.Pointer(&lpBuffer[0]))) diff --git a/vendor/github.com/shirou/gopsutil/v3/host/host.go b/vendor/github.com/shirou/gopsutil/v3/host/host.go index f363eed8..c7e84e3a 100644 --- a/vendor/github.com/shirou/gopsutil/v3/host/host.go +++ b/vendor/github.com/shirou/gopsutil/v3/host/host.go @@ -11,6 +11,8 @@ import ( "github.com/shirou/gopsutil/v3/internal/common" ) +type Warnings = common.Warnings + var invoke common.Invoker = common.Invoke{} // A HostInfoStat describes the host status. diff --git a/vendor/github.com/shirou/gopsutil/v3/host/host_linux.go b/vendor/github.com/shirou/gopsutil/v3/host/host_linux.go index 5c07c697..adce92e2 100644 --- a/vendor/github.com/shirou/gopsutil/v3/host/host_linux.go +++ b/vendor/github.com/shirou/gopsutil/v3/host/host_linux.go @@ -15,11 +15,10 @@ import ( "strconv" "strings" - "github.com/shirou/gopsutil/v3/internal/common" "golang.org/x/sys/unix" -) -type Warnings = common.Warnings + "github.com/shirou/gopsutil/v3/internal/common" +) type lsbStruct struct { ID string @@ -207,6 +206,12 @@ func PlatformInformationWithContext(ctx context.Context) (platform string, famil } else if lsb.ID == "LinuxMint" { platform = "linuxmint" version = lsb.Release + } else if lsb.ID == "Kylin" { + platform = "Kylin" + version = lsb.Release + } else if lsb.ID == `"Cumulus Linux"` { + platform = "cumuluslinux" + version = lsb.Release } else { if common.PathExistsWithContents("/usr/bin/raspi-config") { platform = "raspbian" @@ -284,7 +289,7 @@ func PlatformInformationWithContext(ctx context.Context) (platform string, famil platform = strings.Trim(platform, `"`) switch platform { - case "debian", "ubuntu", "linuxmint", "raspbian": + case "debian", "ubuntu", "linuxmint", "raspbian", "Kylin", "cumuluslinux": family = "debian" case "fedora": family = "fedora" diff --git a/vendor/github.com/shirou/gopsutil/v3/host/host_linux_loong64.go b/vendor/github.com/shirou/gopsutil/v3/host/host_linux_loong64.go new file mode 100644 index 00000000..edf1be59 --- /dev/null +++ b/vendor/github.com/shirou/gopsutil/v3/host/host_linux_loong64.go @@ -0,0 +1,48 @@ +// Code generated by cmd/cgo -godefs; DO NOT EDIT. +// cgo -godefs host/types_linux.go + +//go:build linux && loong64 +// +build linux,loong64 + +package host + +const ( + sizeofPtr = 0x8 + sizeofShort = 0x2 + sizeofInt = 0x4 + sizeofLong = 0x8 + sizeofLongLong = 0x8 + sizeOfUtmp = 0x190 +) + +type ( + _C_short int16 + _C_int int32 + _C_long int64 + _C_long_long int64 +) + +type ( + utmp struct { + Type int16 + Pid int32 + Line [32]int8 + Id [4]int8 + User [32]int8 + Host [256]int8 + Exit exit_status + Session int64 + Tv timeval + Addr_v6 [4]int32 + X__glibc_reserved [20]int8 + Pad_cgo_0 [4]byte + } + exit_status struct { + Termination int16 + Exit int16 + } + timeval struct { + Sec int64 + Usec int64 + } +) diff --git a/vendor/github.com/shirou/gopsutil/v3/internal/common/binary.go b/vendor/github.com/shirou/gopsutil/v3/internal/common/binary.go index 446c3597..5e8d43db 100644 --- a/vendor/github.com/shirou/gopsutil/v3/internal/common/binary.go +++ b/vendor/github.com/shirou/gopsutil/v3/internal/common/binary.go @@ -21,6 +21,7 @@ package common // high-performance serialization, especially for large data structures, // should look at more advanced solutions such as the encoding/gob // package or protocol buffers. + import ( "errors" "io" diff --git a/vendor/github.com/shirou/gopsutil/v3/internal/common/common.go b/vendor/github.com/shirou/gopsutil/v3/internal/common/common.go index 323402dc..c1e96ca7 100644 --- a/vendor/github.com/shirou/gopsutil/v3/internal/common/common.go +++ b/vendor/github.com/shirou/gopsutil/v3/internal/common/common.go @@ -6,6 +6,7 @@ package common // - linux (amd64, arm) // - freebsd (amd64) // - windows (amd64) + import ( "bufio" "bytes" diff --git a/vendor/github.com/shirou/gopsutil/v3/internal/common/common_windows.go b/vendor/github.com/shirou/gopsutil/v3/internal/common/common_windows.go index 295b70bf..301b2315 100644 --- a/vendor/github.com/shirou/gopsutil/v3/internal/common/common_windows.go +++ b/vendor/github.com/shirou/gopsutil/v3/internal/common/common_windows.go @@ -218,9 +218,12 @@ func WMIQueryWithContext(ctx context.Context, query string, dst interface{}, con } // Convert paths using native DOS format like: -// "\Device\HarddiskVolume1\Windows\systemew\file.txt" +// +// "\Device\HarddiskVolume1\Windows\systemew\file.txt" +// // into: -// "C:\Windows\systemew\file.txt" +// +// "C:\Windows\systemew\file.txt" func ConvertDOSPath(p string) string { rawDrive := strings.Join(strings.Split(p, `\`)[:3], `\`) diff --git a/vendor/github.com/shirou/gopsutil/v3/mem/mem_aix_nocgo.go b/vendor/github.com/shirou/gopsutil/v3/mem/mem_aix_nocgo.go index 09ffd8ed..fc9e4922 100644 --- a/vendor/github.com/shirou/gopsutil/v3/mem/mem_aix_nocgo.go +++ b/vendor/github.com/shirou/gopsutil/v3/mem/mem_aix_nocgo.go @@ -71,7 +71,7 @@ func callSVMon(ctx context.Context) (*VirtualMemoryStat, *SwapMemoryStat, error) swap.Total = t * pagesize } if t, err := strconv.ParseUint(p[3], 10, 64); err == nil { - swap.Free = swap.Total - t * pagesize + swap.Free = swap.Total - t*pagesize } } break diff --git a/vendor/github.com/shirou/gopsutil/v3/mem/mem_freebsd.go b/vendor/github.com/shirou/gopsutil/v3/mem/mem_freebsd.go index 44543ef7..9a56785b 100644 --- a/vendor/github.com/shirou/gopsutil/v3/mem/mem_freebsd.go +++ b/vendor/github.com/shirou/gopsutil/v3/mem/mem_freebsd.go @@ -8,9 +8,8 @@ import ( "errors" "unsafe" - "golang.org/x/sys/unix" - "github.com/shirou/gopsutil/v3/internal/common" + "golang.org/x/sys/unix" ) func VirtualMemory() (*VirtualMemoryStat, error) { diff --git a/vendor/github.com/shirou/gopsutil/v3/mem/mem_linux.go b/vendor/github.com/shirou/gopsutil/v3/mem/mem_linux.go index 9a5d693b..56fb1cc6 100644 --- a/vendor/github.com/shirou/gopsutil/v3/mem/mem_linux.go +++ b/vendor/github.com/shirou/gopsutil/v3/mem/mem_linux.go @@ -14,8 +14,9 @@ import ( "strconv" "strings" - "github.com/shirou/gopsutil/v3/internal/common" "golang.org/x/sys/unix" + + "github.com/shirou/gopsutil/v3/internal/common" ) type VirtualMemoryExStat struct { diff --git a/vendor/github.com/shirou/gopsutil/v3/mem/mem_openbsd.go b/vendor/github.com/shirou/gopsutil/v3/mem/mem_openbsd.go index 97644923..e37d5abe 100644 --- a/vendor/github.com/shirou/gopsutil/v3/mem/mem_openbsd.go +++ b/vendor/github.com/shirou/gopsutil/v3/mem/mem_openbsd.go @@ -9,6 +9,7 @@ import ( "encoding/binary" "errors" "fmt" + "github.com/shirou/gopsutil/v3/internal/common" "golang.org/x/sys/unix" ) diff --git a/vendor/github.com/shirou/gopsutil/v3/net/net_aix_cgo.go b/vendor/github.com/shirou/gopsutil/v3/net/net_aix_cgo.go index 8cf8c914..8c34f881 100644 --- a/vendor/github.com/shirou/gopsutil/v3/net/net_aix_cgo.go +++ b/vendor/github.com/shirou/gopsutil/v3/net/net_aix_cgo.go @@ -18,14 +18,14 @@ func IOCountersWithContext(ctx context.Context, pernic bool) ([]IOCountersStat, iocounters := make([]IOCountersStat, 0, len(ifs)) for _, netif := range ifs { n := IOCountersStat{ - Name: netif.Name, - BytesSent: uint64(netif.OBytes), - BytesRecv: uint64(netif.IBytes), + Name: netif.Name, + BytesSent: uint64(netif.OBytes), + BytesRecv: uint64(netif.IBytes), PacketsSent: uint64(netif.OPackets), PacketsRecv: uint64(netif.IPackets), - Errin: uint64(netif.OErrors), - Errout: uint64(netif.IErrors), - Dropout: uint64(netif.XmitDrops), + Errin: uint64(netif.OErrors), + Errout: uint64(netif.IErrors), + Dropout: uint64(netif.XmitDrops), } iocounters = append(iocounters, n) } diff --git a/vendor/github.com/shirou/gopsutil/v3/net/net_windows.go b/vendor/github.com/shirou/gopsutil/v3/net/net_windows.go index 731c8f97..68b26bdc 100644 --- a/vendor/github.com/shirou/gopsutil/v3/net/net_windows.go +++ b/vendor/github.com/shirou/gopsutil/v3/net/net_windows.go @@ -211,7 +211,8 @@ func IOCountersByFileWithContext(ctx context.Context, pernic bool, filename stri // Return a list of network connections // Available kind: -// reference to netConnectionKindMap +// +// reference to netConnectionKindMap func Connections(kind string) ([]ConnectionStat, error) { return ConnectionsWithContext(context.Background(), kind) } diff --git a/vendor/github.com/shirou/gopsutil/v3/process/process_freebsd.go b/vendor/github.com/shirou/gopsutil/v3/process/process_freebsd.go index 779f8126..a123ccf9 100644 --- a/vendor/github.com/shirou/gopsutil/v3/process/process_freebsd.go +++ b/vendor/github.com/shirou/gopsutil/v3/process/process_freebsd.go @@ -69,7 +69,13 @@ func (p *Process) CwdWithContext(ctx context.Context) (string, error) { } func (p *Process) ExeWithContext(ctx context.Context) (string, error) { - return "", common.ErrNotImplementedError + mib := []int32{CTLKern, KernProc, KernProcPathname, p.Pid} + buf, _, err := common.CallSyscall(mib) + if err != nil { + return "", err + } + + return strings.Trim(string(buf), "\x00"), nil } func (p *Process) CmdlineWithContext(ctx context.Context) (string, error) { diff --git a/vendor/github.com/shirou/gopsutil/v3/process/process_linux.go b/vendor/github.com/shirou/gopsutil/v3/process/process_linux.go index d5b5bc32..6dbef4c2 100644 --- a/vendor/github.com/shirou/gopsutil/v3/process/process_linux.go +++ b/vendor/github.com/shirou/gopsutil/v3/process/process_linux.go @@ -16,11 +16,12 @@ import ( "strconv" "strings" + "github.com/tklauser/go-sysconf" + "golang.org/x/sys/unix" + "github.com/shirou/gopsutil/v3/cpu" "github.com/shirou/gopsutil/v3/internal/common" "github.com/shirou/gopsutil/v3/net" - "github.com/tklauser/go-sysconf" - "golang.org/x/sys/unix" ) var pageSize = uint64(os.Getpagesize()) diff --git a/vendor/github.com/shirou/gopsutil/v3/process/process_posix.go b/vendor/github.com/shirou/gopsutil/v3/process/process_posix.go index 88e2bff5..7bd9b056 100644 --- a/vendor/github.com/shirou/gopsutil/v3/process/process_posix.go +++ b/vendor/github.com/shirou/gopsutil/v3/process/process_posix.go @@ -14,8 +14,9 @@ import ( "strings" "syscall" - "github.com/shirou/gopsutil/v3/internal/common" "golang.org/x/sys/unix" + + "github.com/shirou/gopsutil/v3/internal/common" ) type Signal = syscall.Signal @@ -121,7 +122,7 @@ func PidExistsWithContext(ctx context.Context, pid int32) (bool, error) { if err == nil { return true, nil } - if err.Error() == "os: process already finished" { + if errors.Is(err, os.ErrProcessDone) { return false, nil } var errno syscall.Errno diff --git a/vendor/github.com/shirou/gopsutil/v3/process/process_windows_32bit.go b/vendor/github.com/shirou/gopsutil/v3/process/process_windows_32bit.go index 982287d9..db4d4533 100644 --- a/vendor/github.com/shirou/gopsutil/v3/process/process_windows_32bit.go +++ b/vendor/github.com/shirou/gopsutil/v3/process/process_windows_32bit.go @@ -8,9 +8,8 @@ import ( "syscall" "unsafe" - "golang.org/x/sys/windows" - "github.com/shirou/gopsutil/v3/internal/common" + "golang.org/x/sys/windows" ) type PROCESS_MEMORY_COUNTERS struct { diff --git a/vendor/github.com/shoenig/go-m1cpu/.golangci.yaml b/vendor/github.com/shoenig/go-m1cpu/.golangci.yaml new file mode 100644 index 00000000..dc6fefb9 --- /dev/null +++ b/vendor/github.com/shoenig/go-m1cpu/.golangci.yaml @@ -0,0 +1,12 @@ +run: + timeout: 5m +linters: + enable: + - gofmt + - errcheck + - errname + - errorlint + - bodyclose + - durationcheck + - whitespace + diff --git a/vendor/github.com/hashicorp/go-secure-stdlib/mlock/LICENSE b/vendor/github.com/shoenig/go-m1cpu/LICENSE similarity index 100% rename from vendor/github.com/hashicorp/go-secure-stdlib/mlock/LICENSE rename to vendor/github.com/shoenig/go-m1cpu/LICENSE diff --git a/vendor/github.com/shoenig/go-m1cpu/Makefile b/vendor/github.com/shoenig/go-m1cpu/Makefile new file mode 100644 index 00000000..28d78639 --- /dev/null +++ b/vendor/github.com/shoenig/go-m1cpu/Makefile @@ -0,0 +1,12 @@ +SHELL = bash + +default: test + +.PHONY: test +test: + @echo "--> Running Tests ..." + @go test -v -race ./... + +vet: + @echo "--> Vet Go sources ..." + @go vet ./... diff --git a/vendor/github.com/shoenig/go-m1cpu/README.md b/vendor/github.com/shoenig/go-m1cpu/README.md new file mode 100644 index 00000000..399657ac --- /dev/null +++ b/vendor/github.com/shoenig/go-m1cpu/README.md @@ -0,0 +1,66 @@ +# m1cpu + +[![Go Reference](https://pkg.go.dev/badge/github.com/shoenig/go-m1cpu.svg)](https://pkg.go.dev/github.com/shoenig/go-m1cpu) +[![MPL License](https://img.shields.io/github/license/shoenig/go-m1cpu?color=g&style=flat-square)](https://github.com/shoenig/go-m1cpu/blob/main/LICENSE) +[![Run CI Tests](https://github.com/shoenig/go-m1cpu/actions/workflows/ci.yaml/badge.svg)](https://github.com/shoenig/go-m1cpu/actions/workflows/ci.yaml) + +The `go-m1cpu` module is a library for inspecting Apple Silicon CPUs in Go. + +Use the `m1cpu` Go package for looking up the CPU frequency for Apple M1 and M2 CPUs. + +# Install + +```shell +go get github.com/shoenig/go-m1cpu@latest +``` + +# CGO + +This package requires the use of [CGO](https://go.dev/blog/cgo). + +Extracting the CPU properties is done via Apple's [IOKit](https://developer.apple.com/documentation/iokit?language=objc) +framework, which is accessible only through system C libraries. + +# Example + +Simple Go program to print Apple Silicon M1/M2 CPU speeds. + +```go +package main + +import ( + "fmt" + + "github.com/shoenig/go-m1cpu" +) + +func main() { + fmt.Println("Apple Silicon", m1cpu.IsAppleSilicon()) + + fmt.Println("pCore GHz", m1cpu.PCoreGHz()) + fmt.Println("eCore GHz", m1cpu.ECoreGHz()) + + fmt.Println("pCore Hz", m1cpu.PCoreHz()) + fmt.Println("eCore Hz", m1cpu.ECoreHz()) +} +``` + +Using `go test` to print out available information. + +``` +➜ go test -v -run Show +=== RUN Test_Show + cpu_test.go:42: pCore Hz 3504000000 + cpu_test.go:43: eCore Hz 2424000000 + cpu_test.go:44: pCore GHz 3.504 + cpu_test.go:45: eCore GHz 2.424 + cpu_test.go:46: pCore count 8 + cpu_test.go:47: eCoreCount 4 + cpu_test.go:50: pCore Caches 196608 131072 16777216 + cpu_test.go:53: eCore Caches 131072 65536 4194304 +--- PASS: Test_Show (0.00s) +``` + +# License + +Open source under the [MPL](LICENSE) diff --git a/vendor/github.com/shoenig/go-m1cpu/cpu.go b/vendor/github.com/shoenig/go-m1cpu/cpu.go new file mode 100644 index 00000000..9e55eb45 --- /dev/null +++ b/vendor/github.com/shoenig/go-m1cpu/cpu.go @@ -0,0 +1,213 @@ +//go:build darwin && arm64 && cgo + +package m1cpu + +// #cgo LDFLAGS: -framework CoreFoundation -framework IOKit +// #include +// #include +// #include +// #include +// +// #ifndef MAC_OS_VERSION_12_0 +// #define kIOMainPortDefault kIOMasterPortDefault +// #endif +// +// #define HzToGHz(hz) ((hz) / 1000000000.0) +// +// UInt64 global_pCoreHz; +// UInt64 global_eCoreHz; +// int global_pCoreCount; +// int global_eCoreCount; +// int global_pCoreL1InstCacheSize; +// int global_eCoreL1InstCacheSize; +// int global_pCoreL1DataCacheSize; +// int global_eCoreL1DataCacheSize; +// int global_pCoreL2CacheSize; +// int global_eCoreL2CacheSize; +// char global_brand[32]; +// +// UInt64 getFrequency(CFTypeRef typeRef) { +// CFDataRef cfData = typeRef; +// +// CFIndex size = CFDataGetLength(cfData); +// UInt8 buf[size]; +// CFDataGetBytes(cfData, CFRangeMake(0, size), buf); +// +// UInt8 b1 = buf[size-5]; +// UInt8 b2 = buf[size-6]; +// UInt8 b3 = buf[size-7]; +// UInt8 b4 = buf[size-8]; +// +// UInt64 pCoreHz = 0x00000000FFFFFFFF & ((b1<<24) | (b2 << 16) | (b3 << 8) | (b4)); +// return pCoreHz; +// } +// +// int sysctl_int(const char * name) { +// int value = -1; +// size_t size = 8; +// sysctlbyname(name, &value, &size, NULL, 0); +// return value; +// } +// +// void sysctl_string(const char * name, char * dest) { +// size_t size = 32; +// sysctlbyname(name, dest, &size, NULL, 0); +// } +// +// void initialize() { +// global_pCoreCount = sysctl_int("hw.perflevel0.physicalcpu"); +// global_eCoreCount = sysctl_int("hw.perflevel1.physicalcpu"); +// global_pCoreL1InstCacheSize = sysctl_int("hw.perflevel0.l1icachesize"); +// global_eCoreL1InstCacheSize = sysctl_int("hw.perflevel1.l1icachesize"); +// global_pCoreL1DataCacheSize = sysctl_int("hw.perflevel0.l1dcachesize"); +// global_eCoreL1DataCacheSize = sysctl_int("hw.perflevel1.l1dcachesize"); +// global_pCoreL2CacheSize = sysctl_int("hw.perflevel0.l2cachesize"); +// global_eCoreL2CacheSize = sysctl_int("hw.perflevel1.l2cachesize"); +// sysctl_string("machdep.cpu.brand_string", global_brand); +// +// CFMutableDictionaryRef matching = IOServiceMatching("AppleARMIODevice"); +// io_iterator_t iter; +// IOServiceGetMatchingServices(kIOMainPortDefault, matching, &iter); +// +// const size_t bufsize = 512; +// io_object_t obj; +// while ((obj = IOIteratorNext(iter))) { +// char class[bufsize]; +// IOObjectGetClass(obj, class); +// char name[bufsize]; +// IORegistryEntryGetName(obj, name); +// +// if (strncmp(name, "pmgr", bufsize) == 0) { +// CFTypeRef pCoreRef = IORegistryEntryCreateCFProperty(obj, CFSTR("voltage-states5-sram"), kCFAllocatorDefault, 0); +// CFTypeRef eCoreRef = IORegistryEntryCreateCFProperty(obj, CFSTR("voltage-states1-sram"), kCFAllocatorDefault, 0); +// +// long long pCoreHz = getFrequency(pCoreRef); +// long long eCoreHz = getFrequency(eCoreRef); +// +// global_pCoreHz = pCoreHz; +// global_eCoreHz = eCoreHz; +// return; +// } +// } +// } +// +// UInt64 eCoreHz() { +// return global_eCoreHz; +// } +// +// UInt64 pCoreHz() { +// return global_pCoreHz; +// } +// +// Float64 eCoreGHz() { +// return HzToGHz(global_eCoreHz); +// } +// +// Float64 pCoreGHz() { +// return HzToGHz(global_pCoreHz); +// } +// +// int pCoreCount() { +// return global_pCoreCount; +// } +// +// int eCoreCount() { +// return global_eCoreCount; +// } +// +// int pCoreL1InstCacheSize() { +// return global_pCoreL1InstCacheSize; +// } +// +// int pCoreL1DataCacheSize() { +// return global_pCoreL1DataCacheSize; +// } +// +// int pCoreL2CacheSize() { +// return global_pCoreL2CacheSize; +// } +// +// int eCoreL1InstCacheSize() { +// return global_eCoreL1InstCacheSize; +// } +// +// int eCoreL1DataCacheSize() { +// return global_eCoreL1DataCacheSize; +// } +// +// int eCoreL2CacheSize() { +// return global_eCoreL2CacheSize; +// } +// +// char * modelName() { +// return global_brand; +// } +import "C" + +func init() { + C.initialize() +} + +// IsAppleSilicon returns true on this platform. +func IsAppleSilicon() bool { + return true +} + +// PCoreHZ returns the max frequency in Hertz of the P-Core of an Apple Silicon CPU. +func PCoreHz() uint64 { + return uint64(C.pCoreHz()) +} + +// ECoreHZ returns the max frequency in Hertz of the E-Core of an Apple Silicon CPU. +func ECoreHz() uint64 { + return uint64(C.eCoreHz()) +} + +// PCoreGHz returns the max frequency in Gigahertz of the P-Core of an Apple Silicon CPU. +func PCoreGHz() float64 { + return float64(C.pCoreGHz()) +} + +// ECoreGHz returns the max frequency in Gigahertz of the E-Core of an Apple Silicon CPU. +func ECoreGHz() float64 { + return float64(C.eCoreGHz()) +} + +// PCoreCount returns the number of physical P (performance) cores. +func PCoreCount() int { + return int(C.pCoreCount()) +} + +// ECoreCount returns the number of physical E (efficiency) cores. +func ECoreCount() int { + return int(C.eCoreCount()) +} + +// PCoreCacheSize returns the sizes of the P (performance) core cache sizes +// in the order of +// +// - L1 instruction cache +// - L1 data cache +// - L2 cache +func PCoreCache() (int, int, int) { + return int(C.pCoreL1InstCacheSize()), + int(C.pCoreL1DataCacheSize()), + int(C.pCoreL2CacheSize()) +} + +// ECoreCacheSize returns the sizes of the E (efficiency) core cache sizes +// in the order of +// +// - L1 instruction cache +// - L1 data cache +// - L2 cache +func ECoreCache() (int, int, int) { + return int(C.eCoreL1InstCacheSize()), + int(C.eCoreL1DataCacheSize()), + int(C.eCoreL2CacheSize()) +} + +// ModelName returns the model name of the CPU. +func ModelName() string { + return C.GoString(C.modelName()) +} diff --git a/vendor/github.com/shoenig/go-m1cpu/incompatible.go b/vendor/github.com/shoenig/go-m1cpu/incompatible.go new file mode 100644 index 00000000..d425025a --- /dev/null +++ b/vendor/github.com/shoenig/go-m1cpu/incompatible.go @@ -0,0 +1,53 @@ +//go:build !darwin || !arm64 || !cgo + +package m1cpu + +// IsAppleSilicon return false on this platform. +func IsAppleSilicon() bool { + return false +} + +// PCoreHZ requires darwin/arm64 +func PCoreHz() uint64 { + panic("m1cpu: not a darwin/arm64 system") +} + +// ECoreHZ requires darwin/arm64 +func ECoreHz() uint64 { + panic("m1cpu: not a darwin/arm64 system") +} + +// PCoreGHz requires darwin/arm64 +func PCoreGHz() float64 { + panic("m1cpu: not a darwin/arm64 system") +} + +// ECoreGHz requires darwin/arm64 +func ECoreGHz() float64 { + panic("m1cpu: not a darwin/arm64 system") +} + +// PCoreCount requires darwin/arm64 +func PCoreCount() int { + panic("m1cpu: not a darwin/arm64 system") +} + +// ECoreCount requires darwin/arm64 +func ECoreCount() int { + panic("m1cpu: not a darwin/arm64 system") +} + +// PCoreCacheSize requires darwin/arm64 +func PCoreCache() (int, int, int) { + panic("m1cpu: not a darwin/arm64 system") +} + +// ECoreCacheSize requires darwin/arm64 +func ECoreCache() (int, int, int) { + panic("m1cpu: not a darwin/arm64 system") +} + +// ModelName requires darwin/arm64 +func ModelName() string { + panic("m1cpu: not a darwin/arm64 system") +} diff --git a/vendor/github.com/shoenig/test/interfaces/interfaces.go b/vendor/github.com/shoenig/test/interfaces/interfaces.go index 9f4f26e8..60d726bb 100644 --- a/vendor/github.com/shoenig/test/interfaces/interfaces.go +++ b/vendor/github.com/shoenig/test/interfaces/interfaces.go @@ -6,11 +6,35 @@ import ( "github.com/shoenig/test/internal/constraints" ) +// MinFunc represents a type implementing the Min method. +type MinFunc[T any] interface { + Min() T +} + +// MaxFunc represents a type implementing the Max method. +type MaxFunc[T any] interface { + Max() T +} + // EqualFunc represents a type implementing the Equal method. type EqualFunc[A any] interface { Equal(A) bool } +// CopyFunc represents a type implementing the Copy method. +type CopyFunc[A any] interface { + Copy() A +} + +// CopyEqual represents a type satisfying both EqualFunc and CopyFunc. +type CopyEqual[T any] interface { + EqualFunc[T] + CopyFunc[T] +} + +// TweakFunc is used for modifying a value in tests. +type TweakFunc[E CopyEqual[E]] func(E) + // LessFunc represents any type implementing the Less method. type LessFunc[A any] interface { Less(A) bool diff --git a/vendor/github.com/shoenig/test/internal/assertions/assertions.go b/vendor/github.com/shoenig/test/internal/assertions/assertions.go index 3ded105f..fa24d0fc 100644 --- a/vendor/github.com/shoenig/test/internal/assertions/assertions.go +++ b/vendor/github.com/shoenig/test/internal/assertions/assertions.go @@ -264,7 +264,7 @@ func validJSON(input []byte) (s string) { return } -func EqSliceFunc[A any](exp, val []A, eq func(a, b A) bool) (s string) { +func EqSliceFunc[A, B any](exp []B, val []A, eq func(a A, b B) bool) (s string) { lenA, lenB := len(exp), len(val) if lenA != lenB { @@ -277,7 +277,7 @@ func EqSliceFunc[A any](exp, val []A, eq func(a, b A) bool) (s string) { miss := false for i := 0; i < lenA; i++ { - if !eq(exp[i], val[i]) { + if !eq(val[i], exp[i]) { miss = true break } @@ -303,7 +303,6 @@ func Equal[E interfaces.EqualFunc[E]](exp, val E) (s string) { func NotEqual[E interfaces.EqualFunc[E]](exp, val E) (s string) { if val.Equal(exp) { s = "expected inequality via .Equal method\n" - s += diff(exp, val, nil) } return } @@ -407,6 +406,14 @@ func SliceNotContains[A any](slice []A, item A, opts ...cmp.Option) (s string) { return } +func SliceNotContainsFunc[A, B any](slice []A, item B, eq func(a A, b B) bool) (s string) { + if containsFunc(slice, item, eq) { + s = "expected slice to not contain item but it does\n" + s += bullet("unwanted item %#v\n", item) + } + return +} + func SliceContainsAll[A any](slice, items []A) (s string) { if len(slice) != len(items) { s = "expected slice and items to contain same number of elements\n" @@ -535,6 +542,24 @@ func BetweenExclusive[O constraints.Ordered](lower, val, upper O) (s string) { return } +func Min[A any, C interfaces.MinFunc[A]](expect A, collection C, opts ...cmp.Option) (s string) { + min := collection.Min() + if !equal(expect, min, opts) { + s = "expected a different value for min\n" + s += diff(expect, min, opts) + } + return +} + +func Max[A any, C interfaces.MaxFunc[A]](expect A, collection C, opts ...cmp.Option) (s string) { + max := collection.Max() + if !equal(expect, max, opts) { + s = "expected a different value for max\n" + s += diff(expect, max, opts) + } + return +} + func Ascending[O constraints.Ordered](slice []O) (s string) { for i := 0; i < len(slice)-1; i++ { if slice[i] > slice[i+1] { @@ -559,6 +584,19 @@ func AscendingFunc[A any](slice []A, less func(a, b A) bool) (s string) { return } +func AscendingCmp[A any](slice []A, compare func(a, b A) int) (s string) { + for i := 0; i < len(slice)-1; i++ { + cmp := compare(slice[i], slice[i+1]) + if cmp > 0 { + s = fmt.Sprintf("expected compare(slice[%d], slice[%d]) <= 0\n", i, i+1) + s += bullet("slice[%d]: %v\n", i, slice[i]) + s += bullet("slice[%d]: %v\n", i+1, slice[i+1]) + return + } + } + return +} + func AscendingLess[L interfaces.LessFunc[L]](slice []L) (s string) { for i := 0; i < len(slice)-1; i++ { if !slice[i].Less(slice[i+1]) { @@ -595,6 +633,19 @@ func DescendingFunc[A any](slice []A, less func(a, b A) bool) (s string) { return } +func DescendingCmp[A any](slice []A, compare func(a, b A) int) (s string) { + for i := 0; i < len(slice)-1; i++ { + cmp := compare(slice[i], slice[i+1]) + if cmp < 0 { + s = fmt.Sprintf("expected compare(slice[%d], slice[%d]) >= 0\n", i, i+1) + s += bullet("slice[%d]: %v\n", i, slice[i]) + s += bullet("slice[%d]: %v\n", i+1, slice[i+1]) + return + } + } + return +} + func DescendingLess[L interfaces.LessFunc[L]](slice []L) (s string) { for i := 0; i < len(slice)-1; i++ { if !(slice[i+1].Less(slice[i])) { @@ -1224,6 +1275,31 @@ func Wait(wc *wait.Constraint) (s string) { return } +type Tweak[E interfaces.CopyEqual[E]] struct { + Field string + Apply interfaces.TweakFunc[E] +} + +// StructEqual will apply each Tweak and assert E.Equal captures the modification. +func StructEqual[E interfaces.CopyEqual[E]](original E, tweaks []Tweak[E]) (s string) { + for _, tweak := range tweaks { + if tweak.Field == "" { + return "Tweak.Field must be set" + } else if tweak.Apply == nil { + return "Tweak.Apply must be set" + } + clone := original.Copy() + if s = Equal[E](original, clone); s != "" { + return + } + tweak.Apply(clone) + if s = NotEqual[E](original, clone); s != "" { + return + } + } + return +} + func bullet(msg string, args ...any) string { return fmt.Sprintf("↪ "+msg, args...) } diff --git a/vendor/github.com/shoenig/test/internal/util/slices.go b/vendor/github.com/shoenig/test/internal/util/slices.go new file mode 100644 index 00000000..4ea2563c --- /dev/null +++ b/vendor/github.com/shoenig/test/internal/util/slices.go @@ -0,0 +1,10 @@ +package util + +// CloneSliceFunc creates a copy of A by first applying convert to each element. +func CloneSliceFunc[A, B any](original []A, convert func(item A) B) []B { + clone := make([]B, len(original)) + for i := 0; i < len(original); i++ { + clone[i] = convert(original[i]) + } + return clone +} diff --git a/vendor/github.com/shoenig/test/must/must.go b/vendor/github.com/shoenig/test/must/must.go index bcf3f906..3fb40564 100644 --- a/vendor/github.com/shoenig/test/must/must.go +++ b/vendor/github.com/shoenig/test/must/must.go @@ -12,6 +12,7 @@ import ( "github.com/shoenig/test/internal/assertions" "github.com/shoenig/test/internal/brokenfs" "github.com/shoenig/test/internal/constraints" + "github.com/shoenig/test/internal/util" "github.com/shoenig/test/wait" ) @@ -147,8 +148,8 @@ func Lesser[L interfaces.LessFunc[L]](t T, exp, val L, settings ...Setting) { invoke(t, assertions.Lesser(exp, val), settings...) } -// SliceEqFunc asserts elements of exp and val are the same using eq. -func SliceEqFunc[A any](t T, exp, val []A, eq func(a, b A) bool, settings ...Setting) { +// SliceEqFunc asserts elements of val satisfy eq for the corresponding element in exp. +func SliceEqFunc[A, B any](t T, exp []B, val []A, eq func(expectation A, value B) bool, settings ...Setting) { t.Helper() invoke(t, assertions.EqSliceFunc(exp, val, eq), settings...) } @@ -217,6 +218,13 @@ func SliceNotContains[A any](t T, slice []A, item A, settings ...Setting) { invoke(t, assertions.SliceNotContains(slice, item), settings...) } +// SliceNotContainsFunc asserts item does not exist inslice, using eq to compare +// elements. +func SliceNotContainsFunc[A, B any](t T, slice []A, item B, eq func(a A, b B) bool, settings ...Setting) { + t.Helper() + invoke(t, assertions.SliceNotContainsFunc(slice, item, eq), settings...) +} + // SliceContainsAll asserts slice and items contain the same elements, but in // no particular order. The number of elements in slice and items must be the // same. @@ -310,37 +318,65 @@ func BetweenExclusive[O constraints.Ordered](t T, lower, val, upper O, settings invoke(t, assertions.BetweenExclusive(lower, val, upper), settings...) } -// Ascending asserts slice[n] ≤ slice[n+1] for each element n. +// Min asserts collection.Min() is equal to expect. +// +// The equality method may be configured with Cmp options. +func Min[A any, C interfaces.MinFunc[A]](t T, expect A, collection C, settings ...Setting) { + t.Helper() + invoke(t, assertions.Min(expect, collection, options(settings...)...), settings...) +} + +// Max asserts collection.Max() is equal to expect. +// +// The equality method may be configured with Cmp options. +func Max[A any, C interfaces.MaxFunc[A]](t T, expect A, collection C, settings ...Setting) { + t.Helper() + invoke(t, assertions.Max(expect, collection, options(settings...)...), settings...) +} + +// Ascending asserts slice[n] ≤ slice[n+1] for each element. func Ascending[O constraints.Ordered](t T, slice []O, settings ...Setting) { t.Helper() invoke(t, assertions.Ascending(slice), settings...) } -// AscendingFunc asserts slice[n] is less than slice[n+1] for each element n using the less comparator. +// AscendingFunc asserts slice[n] is less than slice[n+1] for each element using the less comparator. func AscendingFunc[A any](t T, slice []A, less func(A, A) bool, settings ...Setting) { t.Helper() invoke(t, assertions.AscendingFunc(slice, less), settings...) } -// AscendingLess asserts slice[n].Less(slice[n+1]) for each element n. +// AscendingCmp asserts slice[n] is less than slice[n+1] for each element using the cmp comparator. +func AscendingCmp[A any](t T, slice []A, compare func(A, A) int, settings ...Setting) { + t.Helper() + invoke(t, assertions.AscendingCmp(slice, compare), settings...) +} + +// AscendingLess asserts slice[n].Less(slice[n+1]) for each element. func AscendingLess[L interfaces.LessFunc[L]](t T, slice []L, settings ...Setting) { t.Helper() invoke(t, assertions.AscendingLess(slice), settings...) } -// Descending asserts slice[n] ≥ slice[n+1] for each element n. +// Descending asserts slice[n] ≥ slice[n+1] for each element. func Descending[O constraints.Ordered](t T, slice []O, settings ...Setting) { t.Helper() invoke(t, assertions.Descending(slice), settings...) } -// DescendingFunc asserts slice[n+1] is less than slice[n] for each element n using the less comparator. +// DescendingFunc asserts slice[n+1] is less than slice[n] for each element using the less comparator. func DescendingFunc[A any](t T, slice []A, less func(A, A) bool, settings ...Setting) { t.Helper() invoke(t, assertions.DescendingFunc(slice, less), settings...) } -// DescendingLess asserts slice[n+1].Less(slice[n]) for each element n. +// DescendingCmp asserts slice[n+1] is ≤ slice[n] for each element. +func DescendingCmp[A any](t T, slice []A, compare func(A, A) int, settings ...Setting) { + t.Helper() + invoke(t, assertions.DescendingCmp(slice, compare), settings...) +} + +// DescendingLess asserts slice[n+1].Less(slice[n]) for each element. func DescendingLess[L interfaces.LessFunc[L]](t T, slice []L, settings ...Setting) { t.Helper() invoke(t, assertions.DescendingLess(slice), settings...) @@ -715,3 +751,30 @@ func Wait(t T, wc *wait.Constraint, settings ...Setting) { t.Helper() invoke(t, assertions.Wait(wc), settings...) } + +// Tweak is used to modify a struct and assert its Equal method captures the +// modification. +// +// Field is the name of the struct field and is used only for error printing. +// Apply is a function that modifies E. +type Tweak[E interfaces.CopyEqual[E]] struct { + Field string + Apply interfaces.TweakFunc[E] +} + +// Tweaks is a slice of Tweak. +type Tweaks[E interfaces.CopyEqual[E]] []Tweak[E] + +// StructEqual will apply each Tweak and assert E.Equal captures the modification. +func StructEqual[E interfaces.CopyEqual[E]](t T, original E, tweaks Tweaks[E], settings ...Setting) { + t.Helper() + invoke(t, assertions.StructEqual( + original, + util.CloneSliceFunc( + tweaks, + func(tweak Tweak[E]) assertions.Tweak[E] { + return assertions.Tweak[E]{Field: tweak.Field, Apply: tweak.Apply} + }, + ), + ), settings...) +} diff --git a/vendor/go.etcd.io/bbolt/.gitignore b/vendor/go.etcd.io/bbolt/.gitignore deleted file mode 100644 index 9fa948eb..00000000 --- a/vendor/go.etcd.io/bbolt/.gitignore +++ /dev/null @@ -1,10 +0,0 @@ -*.prof -*.test -*.swp -/bin/ -cover.out -cover-*.out -/.idea -*.iml -/cmd/bbolt/bbolt - diff --git a/vendor/go.etcd.io/bbolt/LICENSE b/vendor/go.etcd.io/bbolt/LICENSE deleted file mode 100644 index 004e77fe..00000000 --- a/vendor/go.etcd.io/bbolt/LICENSE +++ /dev/null @@ -1,20 +0,0 @@ -The MIT License (MIT) - -Copyright (c) 2013 Ben Johnson - -Permission is hereby granted, free of charge, to any person obtaining a copy of -this software and associated documentation files (the "Software"), to deal in -the Software without restriction, including without limitation the rights to -use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of -the Software, and to permit persons to whom the Software is furnished to do so, -subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS -FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR -COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/go.etcd.io/bbolt/Makefile b/vendor/go.etcd.io/bbolt/Makefile deleted file mode 100644 index 18154c63..00000000 --- a/vendor/go.etcd.io/bbolt/Makefile +++ /dev/null @@ -1,63 +0,0 @@ -BRANCH=`git rev-parse --abbrev-ref HEAD` -COMMIT=`git rev-parse --short HEAD` -GOLDFLAGS="-X main.branch $(BRANCH) -X main.commit $(COMMIT)" - -TESTFLAGS_RACE=-race=false -ifdef ENABLE_RACE - TESTFLAGS_RACE=-race=true -endif - -TESTFLAGS_CPU= -ifdef CPU - TESTFLAGS_CPU=-cpu=$(CPU) -endif -TESTFLAGS = $(TESTFLAGS_RACE) $(TESTFLAGS_CPU) $(EXTRA_TESTFLAGS) - -.PHONY: fmt -fmt: - !(gofmt -l -s -d $(shell find . -name \*.go) | grep '[a-z]') - -.PHONY: lint -lint: - golangci-lint run ./... - -.PHONY: test -test: - @echo "hashmap freelist test" - TEST_FREELIST_TYPE=hashmap go test -v ${TESTFLAGS} -timeout 30m - TEST_FREELIST_TYPE=hashmap go test -v ${TESTFLAGS} ./cmd/bbolt - - @echo "array freelist test" - TEST_FREELIST_TYPE=array go test -v ${TESTFLAGS} -timeout 30m - TEST_FREELIST_TYPE=array go test -v ${TESTFLAGS} ./cmd/bbolt - -.PHONY: coverage -coverage: - @echo "hashmap freelist test" - TEST_FREELIST_TYPE=hashmap go test -v -timeout 30m \ - -coverprofile cover-freelist-hashmap.out -covermode atomic - - @echo "array freelist test" - TEST_FREELIST_TYPE=array go test -v -timeout 30m \ - -coverprofile cover-freelist-array.out -covermode atomic - -.PHONY: gofail-enable -gofail-enable: install-gofail - gofail enable . - -.PHONY: gofail-disable -gofail-disable: - gofail disable . - -.PHONY: install-gofail -install-gofail: - go install go.etcd.io/gofail - -.PHONY: test-failpoint -test-failpoint: - @echo "[failpoint] hashmap freelist test" - TEST_FREELIST_TYPE=hashmap go test -v ${TESTFLAGS} -timeout 30m ./tests/failpoint - - @echo "[failpoint] array freelist test" - TEST_FREELIST_TYPE=array go test -v ${TESTFLAGS} -timeout 30m ./tests/failpoint - diff --git a/vendor/go.etcd.io/bbolt/README.md b/vendor/go.etcd.io/bbolt/README.md deleted file mode 100644 index 2be669a6..00000000 --- a/vendor/go.etcd.io/bbolt/README.md +++ /dev/null @@ -1,967 +0,0 @@ -bbolt -===== - -[![Go Report Card](https://goreportcard.com/badge/github.com/etcd-io/bbolt?style=flat-square)](https://goreportcard.com/report/github.com/etcd-io/bbolt) -[![Coverage](https://codecov.io/gh/etcd-io/bbolt/branch/master/graph/badge.svg)](https://codecov.io/gh/etcd-io/bbolt) -[![Build Status Travis](https://img.shields.io/travis/etcd-io/bboltlabs.svg?style=flat-square&&branch=master)](https://travis-ci.com/etcd-io/bbolt) -[![Godoc](http://img.shields.io/badge/go-documentation-blue.svg?style=flat-square)](https://godoc.org/github.com/etcd-io/bbolt) -[![Releases](https://img.shields.io/github/release/etcd-io/bbolt/all.svg?style=flat-square)](https://github.com/etcd-io/bbolt/releases) -[![LICENSE](https://img.shields.io/github/license/etcd-io/bbolt.svg?style=flat-square)](https://github.com/etcd-io/bbolt/blob/master/LICENSE) - -bbolt is a fork of [Ben Johnson's][gh_ben] [Bolt][bolt] key/value -store. The purpose of this fork is to provide the Go community with an active -maintenance and development target for Bolt; the goal is improved reliability -and stability. bbolt includes bug fixes, performance enhancements, and features -not found in Bolt while preserving backwards compatibility with the Bolt API. - -Bolt is a pure Go key/value store inspired by [Howard Chu's][hyc_symas] -[LMDB project][lmdb]. The goal of the project is to provide a simple, -fast, and reliable database for projects that don't require a full database -server such as Postgres or MySQL. - -Since Bolt is meant to be used as such a low-level piece of functionality, -simplicity is key. The API will be small and only focus on getting values -and setting values. That's it. - -[gh_ben]: https://github.com/benbjohnson -[bolt]: https://github.com/boltdb/bolt -[hyc_symas]: https://twitter.com/hyc_symas -[lmdb]: https://www.symas.com/symas-embedded-database-lmdb - -## Project Status - -Bolt is stable, the API is fixed, and the file format is fixed. Full unit -test coverage and randomized black box testing are used to ensure database -consistency and thread safety. Bolt is currently used in high-load production -environments serving databases as large as 1TB. Many companies such as -Shopify and Heroku use Bolt-backed services every day. - -## Project versioning - -bbolt uses [semantic versioning](http://semver.org). -API should not change between patch and minor releases. -New minor versions may add additional features to the API. - -## Table of Contents - - - [Getting Started](#getting-started) - - [Installing](#installing) - - [Opening a database](#opening-a-database) - - [Transactions](#transactions) - - [Read-write transactions](#read-write-transactions) - - [Read-only transactions](#read-only-transactions) - - [Batch read-write transactions](#batch-read-write-transactions) - - [Managing transactions manually](#managing-transactions-manually) - - [Using buckets](#using-buckets) - - [Using key/value pairs](#using-keyvalue-pairs) - - [Autoincrementing integer for the bucket](#autoincrementing-integer-for-the-bucket) - - [Iterating over keys](#iterating-over-keys) - - [Prefix scans](#prefix-scans) - - [Range scans](#range-scans) - - [ForEach()](#foreach) - - [Nested buckets](#nested-buckets) - - [Database backups](#database-backups) - - [Statistics](#statistics) - - [Read-Only Mode](#read-only-mode) - - [Mobile Use (iOS/Android)](#mobile-use-iosandroid) - - [Resources](#resources) - - [Comparison with other databases](#comparison-with-other-databases) - - [Postgres, MySQL, & other relational databases](#postgres-mysql--other-relational-databases) - - [LevelDB, RocksDB](#leveldb-rocksdb) - - [LMDB](#lmdb) - - [Caveats & Limitations](#caveats--limitations) - - [Reading the Source](#reading-the-source) - - [Other Projects Using Bolt](#other-projects-using-bolt) - -## Getting Started - -### Installing - -To start using Bolt, install Go and run `go get`: -```sh -$ go get go.etcd.io/bbolt@latest -``` - -This will retrieve the library and update your `go.mod` and `go.sum` files. - -To run the command line utility, execute: -```sh -$ go run go.etcd.io/bbolt/cmd/bbolt@latest -``` - -Run `go install` to install the `bbolt` command line utility into -your `$GOBIN` path, which defaults to `$GOPATH/bin` or `$HOME/go/bin` if the -`GOPATH` environment variable is not set. -```sh -$ go install go.etcd.io/bbolt/cmd/bbolt@latest -``` - -### Importing bbolt - -To use bbolt as an embedded key-value store, import as: - -```go -import bolt "go.etcd.io/bbolt" - -db, err := bolt.Open(path, 0666, nil) -if err != nil { - return err -} -defer db.Close() -``` - - -### Opening a database - -The top-level object in Bolt is a `DB`. It is represented as a single file on -your disk and represents a consistent snapshot of your data. - -To open your database, simply use the `bolt.Open()` function: - -```go -package main - -import ( - "log" - - bolt "go.etcd.io/bbolt" -) - -func main() { - // Open the my.db data file in your current directory. - // It will be created if it doesn't exist. - db, err := bolt.Open("my.db", 0600, nil) - if err != nil { - log.Fatal(err) - } - defer db.Close() - - ... -} -``` - -Please note that Bolt obtains a file lock on the data file so multiple processes -cannot open the same database at the same time. Opening an already open Bolt -database will cause it to hang until the other process closes it. To prevent -an indefinite wait you can pass a timeout option to the `Open()` function: - -```go -db, err := bolt.Open("my.db", 0600, &bolt.Options{Timeout: 1 * time.Second}) -``` - - -### Transactions - -Bolt allows only one read-write transaction at a time but allows as many -read-only transactions as you want at a time. Each transaction has a consistent -view of the data as it existed when the transaction started. - -Individual transactions and all objects created from them (e.g. buckets, keys) -are not thread safe. To work with data in multiple goroutines you must start -a transaction for each one or use locking to ensure only one goroutine accesses -a transaction at a time. Creating transaction from the `DB` is thread safe. - -Transactions should not depend on one another and generally shouldn't be opened -simultaneously in the same goroutine. This can cause a deadlock as the read-write -transaction needs to periodically re-map the data file but it cannot do so while -any read-only transaction is open. Even a nested read-only transaction can cause -a deadlock, as the child transaction can block the parent transaction from releasing -its resources. - -#### Read-write transactions - -To start a read-write transaction, you can use the `DB.Update()` function: - -```go -err := db.Update(func(tx *bolt.Tx) error { - ... - return nil -}) -``` - -Inside the closure, you have a consistent view of the database. You commit the -transaction by returning `nil` at the end. You can also rollback the transaction -at any point by returning an error. All database operations are allowed inside -a read-write transaction. - -Always check the return error as it will report any disk failures that can cause -your transaction to not complete. If you return an error within your closure -it will be passed through. - - -#### Read-only transactions - -To start a read-only transaction, you can use the `DB.View()` function: - -```go -err := db.View(func(tx *bolt.Tx) error { - ... - return nil -}) -``` - -You also get a consistent view of the database within this closure, however, -no mutating operations are allowed within a read-only transaction. You can only -retrieve buckets, retrieve values, and copy the database within a read-only -transaction. - - -#### Batch read-write transactions - -Each `DB.Update()` waits for disk to commit the writes. This overhead -can be minimized by combining multiple updates with the `DB.Batch()` -function: - -```go -err := db.Batch(func(tx *bolt.Tx) error { - ... - return nil -}) -``` - -Concurrent Batch calls are opportunistically combined into larger -transactions. Batch is only useful when there are multiple goroutines -calling it. - -The trade-off is that `Batch` can call the given -function multiple times, if parts of the transaction fail. The -function must be idempotent and side effects must take effect only -after a successful return from `DB.Batch()`. - -For example: don't display messages from inside the function, instead -set variables in the enclosing scope: - -```go -var id uint64 -err := db.Batch(func(tx *bolt.Tx) error { - // Find last key in bucket, decode as bigendian uint64, increment - // by one, encode back to []byte, and add new key. - ... - id = newValue - return nil -}) -if err != nil { - return ... -} -fmt.Println("Allocated ID %d", id) -``` - - -#### Managing transactions manually - -The `DB.View()` and `DB.Update()` functions are wrappers around the `DB.Begin()` -function. These helper functions will start the transaction, execute a function, -and then safely close your transaction if an error is returned. This is the -recommended way to use Bolt transactions. - -However, sometimes you may want to manually start and end your transactions. -You can use the `DB.Begin()` function directly but **please** be sure to close -the transaction. - -```go -// Start a writable transaction. -tx, err := db.Begin(true) -if err != nil { - return err -} -defer tx.Rollback() - -// Use the transaction... -_, err := tx.CreateBucket([]byte("MyBucket")) -if err != nil { - return err -} - -// Commit the transaction and check for error. -if err := tx.Commit(); err != nil { - return err -} -``` - -The first argument to `DB.Begin()` is a boolean stating if the transaction -should be writable. - - -### Using buckets - -Buckets are collections of key/value pairs within the database. All keys in a -bucket must be unique. You can create a bucket using the `Tx.CreateBucket()` -function: - -```go -db.Update(func(tx *bolt.Tx) error { - b, err := tx.CreateBucket([]byte("MyBucket")) - if err != nil { - return fmt.Errorf("create bucket: %s", err) - } - return nil -}) -``` - -You can also create a bucket only if it doesn't exist by using the -`Tx.CreateBucketIfNotExists()` function. It's a common pattern to call this -function for all your top-level buckets after you open your database so you can -guarantee that they exist for future transactions. - -To delete a bucket, simply call the `Tx.DeleteBucket()` function. - - -### Using key/value pairs - -To save a key/value pair to a bucket, use the `Bucket.Put()` function: - -```go -db.Update(func(tx *bolt.Tx) error { - b := tx.Bucket([]byte("MyBucket")) - err := b.Put([]byte("answer"), []byte("42")) - return err -}) -``` - -This will set the value of the `"answer"` key to `"42"` in the `MyBucket` -bucket. To retrieve this value, we can use the `Bucket.Get()` function: - -```go -db.View(func(tx *bolt.Tx) error { - b := tx.Bucket([]byte("MyBucket")) - v := b.Get([]byte("answer")) - fmt.Printf("The answer is: %s\n", v) - return nil -}) -``` - -The `Get()` function does not return an error because its operation is -guaranteed to work (unless there is some kind of system failure). If the key -exists then it will return its byte slice value. If it doesn't exist then it -will return `nil`. It's important to note that you can have a zero-length value -set to a key which is different than the key not existing. - -Use the `Bucket.Delete()` function to delete a key from the bucket. - -Please note that values returned from `Get()` are only valid while the -transaction is open. If you need to use a value outside of the transaction -then you must use `copy()` to copy it to another byte slice. - - -### Autoincrementing integer for the bucket -By using the `NextSequence()` function, you can let Bolt determine a sequence -which can be used as the unique identifier for your key/value pairs. See the -example below. - -```go -// CreateUser saves u to the store. The new user ID is set on u once the data is persisted. -func (s *Store) CreateUser(u *User) error { - return s.db.Update(func(tx *bolt.Tx) error { - // Retrieve the users bucket. - // This should be created when the DB is first opened. - b := tx.Bucket([]byte("users")) - - // Generate ID for the user. - // This returns an error only if the Tx is closed or not writeable. - // That can't happen in an Update() call so I ignore the error check. - id, _ := b.NextSequence() - u.ID = int(id) - - // Marshal user data into bytes. - buf, err := json.Marshal(u) - if err != nil { - return err - } - - // Persist bytes to users bucket. - return b.Put(itob(u.ID), buf) - }) -} - -// itob returns an 8-byte big endian representation of v. -func itob(v int) []byte { - b := make([]byte, 8) - binary.BigEndian.PutUint64(b, uint64(v)) - return b -} - -type User struct { - ID int - ... -} -``` - -### Iterating over keys - -Bolt stores its keys in byte-sorted order within a bucket. This makes sequential -iteration over these keys extremely fast. To iterate over keys we'll use a -`Cursor`: - -```go -db.View(func(tx *bolt.Tx) error { - // Assume bucket exists and has keys - b := tx.Bucket([]byte("MyBucket")) - - c := b.Cursor() - - for k, v := c.First(); k != nil; k, v = c.Next() { - fmt.Printf("key=%s, value=%s\n", k, v) - } - - return nil -}) -``` - -The cursor allows you to move to a specific point in the list of keys and move -forward or backward through the keys one at a time. - -The following functions are available on the cursor: - -``` -First() Move to the first key. -Last() Move to the last key. -Seek() Move to a specific key. -Next() Move to the next key. -Prev() Move to the previous key. -``` - -Each of those functions has a return signature of `(key []byte, value []byte)`. -When you have iterated to the end of the cursor then `Next()` will return a -`nil` key. You must seek to a position using `First()`, `Last()`, or `Seek()` -before calling `Next()` or `Prev()`. If you do not seek to a position then -these functions will return a `nil` key. - -During iteration, if the key is non-`nil` but the value is `nil`, that means -the key refers to a bucket rather than a value. Use `Bucket.Bucket()` to -access the sub-bucket. - - -#### Prefix scans - -To iterate over a key prefix, you can combine `Seek()` and `bytes.HasPrefix()`: - -```go -db.View(func(tx *bolt.Tx) error { - // Assume bucket exists and has keys - c := tx.Bucket([]byte("MyBucket")).Cursor() - - prefix := []byte("1234") - for k, v := c.Seek(prefix); k != nil && bytes.HasPrefix(k, prefix); k, v = c.Next() { - fmt.Printf("key=%s, value=%s\n", k, v) - } - - return nil -}) -``` - -#### Range scans - -Another common use case is scanning over a range such as a time range. If you -use a sortable time encoding such as RFC3339 then you can query a specific -date range like this: - -```go -db.View(func(tx *bolt.Tx) error { - // Assume our events bucket exists and has RFC3339 encoded time keys. - c := tx.Bucket([]byte("Events")).Cursor() - - // Our time range spans the 90's decade. - min := []byte("1990-01-01T00:00:00Z") - max := []byte("2000-01-01T00:00:00Z") - - // Iterate over the 90's. - for k, v := c.Seek(min); k != nil && bytes.Compare(k, max) <= 0; k, v = c.Next() { - fmt.Printf("%s: %s\n", k, v) - } - - return nil -}) -``` - -Note that, while RFC3339 is sortable, the Golang implementation of RFC3339Nano does not use a fixed number of digits after the decimal point and is therefore not sortable. - - -#### ForEach() - -You can also use the function `ForEach()` if you know you'll be iterating over -all the keys in a bucket: - -```go -db.View(func(tx *bolt.Tx) error { - // Assume bucket exists and has keys - b := tx.Bucket([]byte("MyBucket")) - - b.ForEach(func(k, v []byte) error { - fmt.Printf("key=%s, value=%s\n", k, v) - return nil - }) - return nil -}) -``` - -Please note that keys and values in `ForEach()` are only valid while -the transaction is open. If you need to use a key or value outside of -the transaction, you must use `copy()` to copy it to another byte -slice. - -### Nested buckets - -You can also store a bucket in a key to create nested buckets. The API is the -same as the bucket management API on the `DB` object: - -```go -func (*Bucket) CreateBucket(key []byte) (*Bucket, error) -func (*Bucket) CreateBucketIfNotExists(key []byte) (*Bucket, error) -func (*Bucket) DeleteBucket(key []byte) error -``` - -Say you had a multi-tenant application where the root level bucket was the account bucket. Inside of this bucket was a sequence of accounts which themselves are buckets. And inside the sequence bucket you could have many buckets pertaining to the Account itself (Users, Notes, etc) isolating the information into logical groupings. - -```go - -// createUser creates a new user in the given account. -func createUser(accountID int, u *User) error { - // Start the transaction. - tx, err := db.Begin(true) - if err != nil { - return err - } - defer tx.Rollback() - - // Retrieve the root bucket for the account. - // Assume this has already been created when the account was set up. - root := tx.Bucket([]byte(strconv.FormatUint(accountID, 10))) - - // Setup the users bucket. - bkt, err := root.CreateBucketIfNotExists([]byte("USERS")) - if err != nil { - return err - } - - // Generate an ID for the new user. - userID, err := bkt.NextSequence() - if err != nil { - return err - } - u.ID = userID - - // Marshal and save the encoded user. - if buf, err := json.Marshal(u); err != nil { - return err - } else if err := bkt.Put([]byte(strconv.FormatUint(u.ID, 10)), buf); err != nil { - return err - } - - // Commit the transaction. - if err := tx.Commit(); err != nil { - return err - } - - return nil -} - -``` - - - - -### Database backups - -Bolt is a single file so it's easy to backup. You can use the `Tx.WriteTo()` -function to write a consistent view of the database to a writer. If you call -this from a read-only transaction, it will perform a hot backup and not block -your other database reads and writes. - -By default, it will use a regular file handle which will utilize the operating -system's page cache. See the [`Tx`](https://godoc.org/go.etcd.io/bbolt#Tx) -documentation for information about optimizing for larger-than-RAM datasets. - -One common use case is to backup over HTTP so you can use tools like `cURL` to -do database backups: - -```go -func BackupHandleFunc(w http.ResponseWriter, req *http.Request) { - err := db.View(func(tx *bolt.Tx) error { - w.Header().Set("Content-Type", "application/octet-stream") - w.Header().Set("Content-Disposition", `attachment; filename="my.db"`) - w.Header().Set("Content-Length", strconv.Itoa(int(tx.Size()))) - _, err := tx.WriteTo(w) - return err - }) - if err != nil { - http.Error(w, err.Error(), http.StatusInternalServerError) - } -} -``` - -Then you can backup using this command: - -```sh -$ curl http://localhost/backup > my.db -``` - -Or you can open your browser to `http://localhost/backup` and it will download -automatically. - -If you want to backup to another file you can use the `Tx.CopyFile()` helper -function. - - -### Statistics - -The database keeps a running count of many of the internal operations it -performs so you can better understand what's going on. By grabbing a snapshot -of these stats at two points in time we can see what operations were performed -in that time range. - -For example, we could start a goroutine to log stats every 10 seconds: - -```go -go func() { - // Grab the initial stats. - prev := db.Stats() - - for { - // Wait for 10s. - time.Sleep(10 * time.Second) - - // Grab the current stats and diff them. - stats := db.Stats() - diff := stats.Sub(&prev) - - // Encode stats to JSON and print to STDERR. - json.NewEncoder(os.Stderr).Encode(diff) - - // Save stats for the next loop. - prev = stats - } -}() -``` - -It's also useful to pipe these stats to a service such as statsd for monitoring -or to provide an HTTP endpoint that will perform a fixed-length sample. - - -### Read-Only Mode - -Sometimes it is useful to create a shared, read-only Bolt database. To this, -set the `Options.ReadOnly` flag when opening your database. Read-only mode -uses a shared lock to allow multiple processes to read from the database but -it will block any processes from opening the database in read-write mode. - -```go -db, err := bolt.Open("my.db", 0666, &bolt.Options{ReadOnly: true}) -if err != nil { - log.Fatal(err) -} -``` - -### Mobile Use (iOS/Android) - -Bolt is able to run on mobile devices by leveraging the binding feature of the -[gomobile](https://github.com/golang/mobile) tool. Create a struct that will -contain your database logic and a reference to a `*bolt.DB` with a initializing -constructor that takes in a filepath where the database file will be stored. -Neither Android nor iOS require extra permissions or cleanup from using this method. - -```go -func NewBoltDB(filepath string) *BoltDB { - db, err := bolt.Open(filepath+"/demo.db", 0600, nil) - if err != nil { - log.Fatal(err) - } - - return &BoltDB{db} -} - -type BoltDB struct { - db *bolt.DB - ... -} - -func (b *BoltDB) Path() string { - return b.db.Path() -} - -func (b *BoltDB) Close() { - b.db.Close() -} -``` - -Database logic should be defined as methods on this wrapper struct. - -To initialize this struct from the native language (both platforms now sync -their local storage to the cloud. These snippets disable that functionality for the -database file): - -#### Android - -```java -String path; -if (android.os.Build.VERSION.SDK_INT >=android.os.Build.VERSION_CODES.LOLLIPOP){ - path = getNoBackupFilesDir().getAbsolutePath(); -} else{ - path = getFilesDir().getAbsolutePath(); -} -Boltmobiledemo.BoltDB boltDB = Boltmobiledemo.NewBoltDB(path) -``` - -#### iOS - -```objc -- (void)demo { - NSString* path = [NSSearchPathForDirectoriesInDomains(NSLibraryDirectory, - NSUserDomainMask, - YES) objectAtIndex:0]; - GoBoltmobiledemoBoltDB * demo = GoBoltmobiledemoNewBoltDB(path); - [self addSkipBackupAttributeToItemAtPath:demo.path]; - //Some DB Logic would go here - [demo close]; -} - -- (BOOL)addSkipBackupAttributeToItemAtPath:(NSString *) filePathString -{ - NSURL* URL= [NSURL fileURLWithPath: filePathString]; - assert([[NSFileManager defaultManager] fileExistsAtPath: [URL path]]); - - NSError *error = nil; - BOOL success = [URL setResourceValue: [NSNumber numberWithBool: YES] - forKey: NSURLIsExcludedFromBackupKey error: &error]; - if(!success){ - NSLog(@"Error excluding %@ from backup %@", [URL lastPathComponent], error); - } - return success; -} - -``` - -## Resources - -For more information on getting started with Bolt, check out the following articles: - -* [Intro to BoltDB: Painless Performant Persistence](http://npf.io/2014/07/intro-to-boltdb-painless-performant-persistence/) by [Nate Finch](https://github.com/natefinch). -* [Bolt -- an embedded key/value database for Go](https://www.progville.com/go/bolt-embedded-db-golang/) by Progville - - -## Comparison with other databases - -### Postgres, MySQL, & other relational databases - -Relational databases structure data into rows and are only accessible through -the use of SQL. This approach provides flexibility in how you store and query -your data but also incurs overhead in parsing and planning SQL statements. Bolt -accesses all data by a byte slice key. This makes Bolt fast to read and write -data by key but provides no built-in support for joining values together. - -Most relational databases (with the exception of SQLite) are standalone servers -that run separately from your application. This gives your systems -flexibility to connect multiple application servers to a single database -server but also adds overhead in serializing and transporting data over the -network. Bolt runs as a library included in your application so all data access -has to go through your application's process. This brings data closer to your -application but limits multi-process access to the data. - - -### LevelDB, RocksDB - -LevelDB and its derivatives (RocksDB, HyperLevelDB) are similar to Bolt in that -they are libraries bundled into the application, however, their underlying -structure is a log-structured merge-tree (LSM tree). An LSM tree optimizes -random writes by using a write ahead log and multi-tiered, sorted files called -SSTables. Bolt uses a B+tree internally and only a single file. Both approaches -have trade-offs. - -If you require a high random write throughput (>10,000 w/sec) or you need to use -spinning disks then LevelDB could be a good choice. If your application is -read-heavy or does a lot of range scans then Bolt could be a good choice. - -One other important consideration is that LevelDB does not have transactions. -It supports batch writing of key/values pairs and it supports read snapshots -but it will not give you the ability to do a compare-and-swap operation safely. -Bolt supports fully serializable ACID transactions. - - -### LMDB - -Bolt was originally a port of LMDB so it is architecturally similar. Both use -a B+tree, have ACID semantics with fully serializable transactions, and support -lock-free MVCC using a single writer and multiple readers. - -The two projects have somewhat diverged. LMDB heavily focuses on raw performance -while Bolt has focused on simplicity and ease of use. For example, LMDB allows -several unsafe actions such as direct writes for the sake of performance. Bolt -opts to disallow actions which can leave the database in a corrupted state. The -only exception to this in Bolt is `DB.NoSync`. - -There are also a few differences in API. LMDB requires a maximum mmap size when -opening an `mdb_env` whereas Bolt will handle incremental mmap resizing -automatically. LMDB overloads the getter and setter functions with multiple -flags whereas Bolt splits these specialized cases into their own functions. - - -## Caveats & Limitations - -It's important to pick the right tool for the job and Bolt is no exception. -Here are a few things to note when evaluating and using Bolt: - -* Bolt is good for read intensive workloads. Sequential write performance is - also fast but random writes can be slow. You can use `DB.Batch()` or add a - write-ahead log to help mitigate this issue. - -* Bolt uses a B+tree internally so there can be a lot of random page access. - SSDs provide a significant performance boost over spinning disks. - -* Try to avoid long running read transactions. Bolt uses copy-on-write so - old pages cannot be reclaimed while an old transaction is using them. - -* Byte slices returned from Bolt are only valid during a transaction. Once the - transaction has been committed or rolled back then the memory they point to - can be reused by a new page or can be unmapped from virtual memory and you'll - see an `unexpected fault address` panic when accessing it. - -* Bolt uses an exclusive write lock on the database file so it cannot be - shared by multiple processes. - -* Be careful when using `Bucket.FillPercent`. Setting a high fill percent for - buckets that have random inserts will cause your database to have very poor - page utilization. - -* Use larger buckets in general. Smaller buckets causes poor page utilization - once they become larger than the page size (typically 4KB). - -* Bulk loading a lot of random writes into a new bucket can be slow as the - page will not split until the transaction is committed. Randomly inserting - more than 100,000 key/value pairs into a single new bucket in a single - transaction is not advised. - -* Bolt uses a memory-mapped file so the underlying operating system handles the - caching of the data. Typically, the OS will cache as much of the file as it - can in memory and will release memory as needed to other processes. This means - that Bolt can show very high memory usage when working with large databases. - However, this is expected and the OS will release memory as needed. Bolt can - handle databases much larger than the available physical RAM, provided its - memory-map fits in the process virtual address space. It may be problematic - on 32-bits systems. - -* The data structures in the Bolt database are memory mapped so the data file - will be endian specific. This means that you cannot copy a Bolt file from a - little endian machine to a big endian machine and have it work. For most - users this is not a concern since most modern CPUs are little endian. - -* Because of the way pages are laid out on disk, Bolt cannot truncate data files - and return free pages back to the disk. Instead, Bolt maintains a free list - of unused pages within its data file. These free pages can be reused by later - transactions. This works well for many use cases as databases generally tend - to grow. However, it's important to note that deleting large chunks of data - will not allow you to reclaim that space on disk. - - For more information on page allocation, [see this comment][page-allocation]. - -[page-allocation]: https://github.com/boltdb/bolt/issues/308#issuecomment-74811638 - - -## Reading the Source - -Bolt is a relatively small code base (<5KLOC) for an embedded, serializable, -transactional key/value database so it can be a good starting point for people -interested in how databases work. - -The best places to start are the main entry points into Bolt: - -- `Open()` - Initializes the reference to the database. It's responsible for - creating the database if it doesn't exist, obtaining an exclusive lock on the - file, reading the meta pages, & memory-mapping the file. - -- `DB.Begin()` - Starts a read-only or read-write transaction depending on the - value of the `writable` argument. This requires briefly obtaining the "meta" - lock to keep track of open transactions. Only one read-write transaction can - exist at a time so the "rwlock" is acquired during the life of a read-write - transaction. - -- `Bucket.Put()` - Writes a key/value pair into a bucket. After validating the - arguments, a cursor is used to traverse the B+tree to the page and position - where they key & value will be written. Once the position is found, the bucket - materializes the underlying page and the page's parent pages into memory as - "nodes". These nodes are where mutations occur during read-write transactions. - These changes get flushed to disk during commit. - -- `Bucket.Get()` - Retrieves a key/value pair from a bucket. This uses a cursor - to move to the page & position of a key/value pair. During a read-only - transaction, the key and value data is returned as a direct reference to the - underlying mmap file so there's no allocation overhead. For read-write - transactions, this data may reference the mmap file or one of the in-memory - node values. - -- `Cursor` - This object is simply for traversing the B+tree of on-disk pages - or in-memory nodes. It can seek to a specific key, move to the first or last - value, or it can move forward or backward. The cursor handles the movement up - and down the B+tree transparently to the end user. - -- `Tx.Commit()` - Converts the in-memory dirty nodes and the list of free pages - into pages to be written to disk. Writing to disk then occurs in two phases. - First, the dirty pages are written to disk and an `fsync()` occurs. Second, a - new meta page with an incremented transaction ID is written and another - `fsync()` occurs. This two phase write ensures that partially written data - pages are ignored in the event of a crash since the meta page pointing to them - is never written. Partially written meta pages are invalidated because they - are written with a checksum. - -If you have additional notes that could be helpful for others, please submit -them via pull request. - - -## Other Projects Using Bolt - -Below is a list of public, open source projects that use Bolt: - -* [Algernon](https://github.com/xyproto/algernon) - A HTTP/2 web server with built-in support for Lua. Uses BoltDB as the default database backend. -* [Bazil](https://bazil.org/) - A file system that lets your data reside where it is most convenient for it to reside. -* [bolter](https://github.com/hasit/bolter) - Command-line app for viewing BoltDB file in your terminal. -* [boltcli](https://github.com/spacewander/boltcli) - the redis-cli for boltdb with Lua script support. -* [BoltHold](https://github.com/timshannon/bolthold) - An embeddable NoSQL store for Go types built on BoltDB -* [BoltStore](https://github.com/yosssi/boltstore) - Session store using Bolt. -* [Boltdb Boilerplate](https://github.com/bobintornado/boltdb-boilerplate) - Boilerplate wrapper around bolt aiming to make simple calls one-liners. -* [BoltDbWeb](https://github.com/evnix/boltdbweb) - A web based GUI for BoltDB files. -* [BoltDB Viewer](https://github.com/zc310/rich_boltdb) - A BoltDB Viewer Can run on Windows、Linux、Android system. -* [bleve](http://www.blevesearch.com/) - A pure Go search engine similar to ElasticSearch that uses Bolt as the default storage backend. -* [btcwallet](https://github.com/btcsuite/btcwallet) - A bitcoin wallet. -* [buckets](https://github.com/joyrexus/buckets) - a bolt wrapper streamlining - simple tx and key scans. -* [cayley](https://github.com/google/cayley) - Cayley is an open-source graph database using Bolt as optional backend. -* [ChainStore](https://github.com/pressly/chainstore) - Simple key-value interface to a variety of storage engines organized as a chain of operations. -* [🌰 Chestnut](https://github.com/jrapoport/chestnut) - Chestnut is encrypted storage for Go. -* [Consul](https://github.com/hashicorp/consul) - Consul is service discovery and configuration made easy. Distributed, highly available, and datacenter-aware. -* [DVID](https://github.com/janelia-flyem/dvid) - Added Bolt as optional storage engine and testing it against Basho-tuned leveldb. -* [dcrwallet](https://github.com/decred/dcrwallet) - A wallet for the Decred cryptocurrency. -* [drive](https://github.com/odeke-em/drive) - drive is an unofficial Google Drive command line client for \*NIX operating systems. -* [event-shuttle](https://github.com/sclasen/event-shuttle) - A Unix system service to collect and reliably deliver messages to Kafka. -* [Freehold](http://tshannon.bitbucket.org/freehold/) - An open, secure, and lightweight platform for your files and data. -* [Go Report Card](https://goreportcard.com/) - Go code quality report cards as a (free and open source) service. -* [GoWebApp](https://github.com/josephspurrier/gowebapp) - A basic MVC web application in Go using BoltDB. -* [GoShort](https://github.com/pankajkhairnar/goShort) - GoShort is a URL shortener written in Golang and BoltDB for persistent key/value storage and for routing it's using high performent HTTPRouter. -* [gopherpit](https://github.com/gopherpit/gopherpit) - A web service to manage Go remote import paths with custom domains -* [gokv](https://github.com/philippgille/gokv) - Simple key-value store abstraction and implementations for Go (Redis, Consul, etcd, bbolt, BadgerDB, LevelDB, Memcached, DynamoDB, S3, PostgreSQL, MongoDB, CockroachDB and many more) -* [Gitchain](https://github.com/gitchain/gitchain) - Decentralized, peer-to-peer Git repositories aka "Git meets Bitcoin". -* [InfluxDB](https://influxdata.com) - Scalable datastore for metrics, events, and real-time analytics. -* [ipLocator](https://github.com/AndreasBriese/ipLocator) - A fast ip-geo-location-server using bolt with bloom filters. -* [ipxed](https://github.com/kelseyhightower/ipxed) - Web interface and api for ipxed. -* [Ironsmith](https://github.com/timshannon/ironsmith) - A simple, script-driven continuous integration (build - > test -> release) tool, with no external dependencies -* [Kala](https://github.com/ajvb/kala) - Kala is a modern job scheduler optimized to run on a single node. It is persistent, JSON over HTTP API, ISO 8601 duration notation, and dependent jobs. -* [Key Value Access Language (KVAL)](https://github.com/kval-access-language) - A proposed grammar for key-value datastores offering a bbolt binding. -* [LedisDB](https://github.com/siddontang/ledisdb) - A high performance NoSQL, using Bolt as optional storage. -* [lru](https://github.com/crowdriff/lru) - Easy to use Bolt-backed Least-Recently-Used (LRU) read-through cache with chainable remote stores. -* [mbuckets](https://github.com/abhigupta912/mbuckets) - A Bolt wrapper that allows easy operations on multi level (nested) buckets. -* [MetricBase](https://github.com/msiebuhr/MetricBase) - Single-binary version of Graphite. -* [MuLiFS](https://github.com/dankomiocevic/mulifs) - Music Library Filesystem creates a filesystem to organise your music files. -* [NATS](https://github.com/nats-io/nats-streaming-server) - NATS Streaming uses bbolt for message and metadata storage. -* [Prometheus Annotation Server](https://github.com/oliver006/prom_annotation_server) - Annotation server for PromDash & Prometheus service monitoring system. -* [Rain](https://github.com/cenkalti/rain) - BitTorrent client and library. -* [reef-pi](https://github.com/reef-pi/reef-pi) - reef-pi is an award winning, modular, DIY reef tank controller using easy to learn electronics based on a Raspberry Pi. -* [Request Baskets](https://github.com/darklynx/request-baskets) - A web service to collect arbitrary HTTP requests and inspect them via REST API or simple web UI, similar to [RequestBin](http://requestb.in/) service -* [Seaweed File System](https://github.com/chrislusf/seaweedfs) - Highly scalable distributed key~file system with O(1) disk read. -* [stow](https://github.com/djherbis/stow) - a persistence manager for objects - backed by boltdb. -* [Storm](https://github.com/asdine/storm) - Simple and powerful ORM for BoltDB. -* [SimpleBolt](https://github.com/xyproto/simplebolt) - A simple way to use BoltDB. Deals mainly with strings. -* [Skybox Analytics](https://github.com/skybox/skybox) - A standalone funnel analysis tool for web analytics. -* [Scuttlebutt](https://github.com/benbjohnson/scuttlebutt) - Uses Bolt to store and process all Twitter mentions of GitHub projects. -* [tentacool](https://github.com/optiflows/tentacool) - REST api server to manage system stuff (IP, DNS, Gateway...) on a linux server. -* [torrent](https://github.com/anacrolix/torrent) - Full-featured BitTorrent client package and utilities in Go. BoltDB is a storage backend in development. -* [Wiki](https://github.com/peterhellberg/wiki) - A tiny wiki using Goji, BoltDB and Blackfriday. - -If you are using Bolt in a project please send a pull request to add it to the list. diff --git a/vendor/go.etcd.io/bbolt/bolt_386.go b/vendor/go.etcd.io/bbolt/bolt_386.go deleted file mode 100644 index aee25960..00000000 --- a/vendor/go.etcd.io/bbolt/bolt_386.go +++ /dev/null @@ -1,7 +0,0 @@ -package bbolt - -// maxMapSize represents the largest mmap size supported by Bolt. -const maxMapSize = 0x7FFFFFFF // 2GB - -// maxAllocSize is the size used when creating array pointers. -const maxAllocSize = 0xFFFFFFF diff --git a/vendor/go.etcd.io/bbolt/bolt_amd64.go b/vendor/go.etcd.io/bbolt/bolt_amd64.go deleted file mode 100644 index 5dd8f3f2..00000000 --- a/vendor/go.etcd.io/bbolt/bolt_amd64.go +++ /dev/null @@ -1,7 +0,0 @@ -package bbolt - -// maxMapSize represents the largest mmap size supported by Bolt. -const maxMapSize = 0xFFFFFFFFFFFF // 256TB - -// maxAllocSize is the size used when creating array pointers. -const maxAllocSize = 0x7FFFFFFF diff --git a/vendor/go.etcd.io/bbolt/bolt_arm.go b/vendor/go.etcd.io/bbolt/bolt_arm.go deleted file mode 100644 index aee25960..00000000 --- a/vendor/go.etcd.io/bbolt/bolt_arm.go +++ /dev/null @@ -1,7 +0,0 @@ -package bbolt - -// maxMapSize represents the largest mmap size supported by Bolt. -const maxMapSize = 0x7FFFFFFF // 2GB - -// maxAllocSize is the size used when creating array pointers. -const maxAllocSize = 0xFFFFFFF diff --git a/vendor/go.etcd.io/bbolt/bolt_arm64.go b/vendor/go.etcd.io/bbolt/bolt_arm64.go deleted file mode 100644 index 447bc197..00000000 --- a/vendor/go.etcd.io/bbolt/bolt_arm64.go +++ /dev/null @@ -1,10 +0,0 @@ -//go:build arm64 -// +build arm64 - -package bbolt - -// maxMapSize represents the largest mmap size supported by Bolt. -const maxMapSize = 0xFFFFFFFFFFFF // 256TB - -// maxAllocSize is the size used when creating array pointers. -const maxAllocSize = 0x7FFFFFFF diff --git a/vendor/go.etcd.io/bbolt/bolt_linux.go b/vendor/go.etcd.io/bbolt/bolt_linux.go deleted file mode 100644 index 7707bcac..00000000 --- a/vendor/go.etcd.io/bbolt/bolt_linux.go +++ /dev/null @@ -1,10 +0,0 @@ -package bbolt - -import ( - "syscall" -) - -// fdatasync flushes written data to a file descriptor. -func fdatasync(db *DB) error { - return syscall.Fdatasync(int(db.file.Fd())) -} diff --git a/vendor/go.etcd.io/bbolt/bolt_loong64.go b/vendor/go.etcd.io/bbolt/bolt_loong64.go deleted file mode 100644 index 31c17c1d..00000000 --- a/vendor/go.etcd.io/bbolt/bolt_loong64.go +++ /dev/null @@ -1,10 +0,0 @@ -//go:build loong64 -// +build loong64 - -package bbolt - -// maxMapSize represents the largest mmap size supported by Bolt. -const maxMapSize = 0xFFFFFFFFFFFF // 256TB - -// maxAllocSize is the size used when creating array pointers. -const maxAllocSize = 0x7FFFFFFF diff --git a/vendor/go.etcd.io/bbolt/bolt_mips64x.go b/vendor/go.etcd.io/bbolt/bolt_mips64x.go deleted file mode 100644 index a9385beb..00000000 --- a/vendor/go.etcd.io/bbolt/bolt_mips64x.go +++ /dev/null @@ -1,10 +0,0 @@ -//go:build mips64 || mips64le -// +build mips64 mips64le - -package bbolt - -// maxMapSize represents the largest mmap size supported by Bolt. -const maxMapSize = 0x8000000000 // 512GB - -// maxAllocSize is the size used when creating array pointers. -const maxAllocSize = 0x7FFFFFFF diff --git a/vendor/go.etcd.io/bbolt/bolt_mipsx.go b/vendor/go.etcd.io/bbolt/bolt_mipsx.go deleted file mode 100644 index ed734ff7..00000000 --- a/vendor/go.etcd.io/bbolt/bolt_mipsx.go +++ /dev/null @@ -1,10 +0,0 @@ -//go:build mips || mipsle -// +build mips mipsle - -package bbolt - -// maxMapSize represents the largest mmap size supported by Bolt. -const maxMapSize = 0x40000000 // 1GB - -// maxAllocSize is the size used when creating array pointers. -const maxAllocSize = 0xFFFFFFF diff --git a/vendor/go.etcd.io/bbolt/bolt_openbsd.go b/vendor/go.etcd.io/bbolt/bolt_openbsd.go deleted file mode 100644 index d7f50358..00000000 --- a/vendor/go.etcd.io/bbolt/bolt_openbsd.go +++ /dev/null @@ -1,27 +0,0 @@ -package bbolt - -import ( - "syscall" - "unsafe" -) - -const ( - msAsync = 1 << iota // perform asynchronous writes - msSync // perform synchronous writes - msInvalidate // invalidate cached data -) - -func msync(db *DB) error { - _, _, errno := syscall.Syscall(syscall.SYS_MSYNC, uintptr(unsafe.Pointer(db.data)), uintptr(db.datasz), msInvalidate) - if errno != 0 { - return errno - } - return nil -} - -func fdatasync(db *DB) error { - if db.data != nil { - return msync(db) - } - return db.file.Sync() -} diff --git a/vendor/go.etcd.io/bbolt/bolt_ppc.go b/vendor/go.etcd.io/bbolt/bolt_ppc.go deleted file mode 100644 index e403f57d..00000000 --- a/vendor/go.etcd.io/bbolt/bolt_ppc.go +++ /dev/null @@ -1,10 +0,0 @@ -//go:build ppc -// +build ppc - -package bbolt - -// maxMapSize represents the largest mmap size supported by Bolt. -const maxMapSize = 0x7FFFFFFF // 2GB - -// maxAllocSize is the size used when creating array pointers. -const maxAllocSize = 0xFFFFFFF diff --git a/vendor/go.etcd.io/bbolt/bolt_ppc64.go b/vendor/go.etcd.io/bbolt/bolt_ppc64.go deleted file mode 100644 index fcd86529..00000000 --- a/vendor/go.etcd.io/bbolt/bolt_ppc64.go +++ /dev/null @@ -1,10 +0,0 @@ -//go:build ppc64 -// +build ppc64 - -package bbolt - -// maxMapSize represents the largest mmap size supported by Bolt. -const maxMapSize = 0xFFFFFFFFFFFF // 256TB - -// maxAllocSize is the size used when creating array pointers. -const maxAllocSize = 0x7FFFFFFF diff --git a/vendor/go.etcd.io/bbolt/bolt_ppc64le.go b/vendor/go.etcd.io/bbolt/bolt_ppc64le.go deleted file mode 100644 index 20234aca..00000000 --- a/vendor/go.etcd.io/bbolt/bolt_ppc64le.go +++ /dev/null @@ -1,10 +0,0 @@ -//go:build ppc64le -// +build ppc64le - -package bbolt - -// maxMapSize represents the largest mmap size supported by Bolt. -const maxMapSize = 0xFFFFFFFFFFFF // 256TB - -// maxAllocSize is the size used when creating array pointers. -const maxAllocSize = 0x7FFFFFFF diff --git a/vendor/go.etcd.io/bbolt/bolt_riscv64.go b/vendor/go.etcd.io/bbolt/bolt_riscv64.go deleted file mode 100644 index 060f30c7..00000000 --- a/vendor/go.etcd.io/bbolt/bolt_riscv64.go +++ /dev/null @@ -1,10 +0,0 @@ -//go:build riscv64 -// +build riscv64 - -package bbolt - -// maxMapSize represents the largest mmap size supported by Bolt. -const maxMapSize = 0xFFFFFFFFFFFF // 256TB - -// maxAllocSize is the size used when creating array pointers. -const maxAllocSize = 0x7FFFFFFF diff --git a/vendor/go.etcd.io/bbolt/bolt_s390x.go b/vendor/go.etcd.io/bbolt/bolt_s390x.go deleted file mode 100644 index 92d2755a..00000000 --- a/vendor/go.etcd.io/bbolt/bolt_s390x.go +++ /dev/null @@ -1,10 +0,0 @@ -//go:build s390x -// +build s390x - -package bbolt - -// maxMapSize represents the largest mmap size supported by Bolt. -const maxMapSize = 0xFFFFFFFFFFFF // 256TB - -// maxAllocSize is the size used when creating array pointers. -const maxAllocSize = 0x7FFFFFFF diff --git a/vendor/go.etcd.io/bbolt/bolt_unix.go b/vendor/go.etcd.io/bbolt/bolt_unix.go deleted file mode 100644 index 757ae4d1..00000000 --- a/vendor/go.etcd.io/bbolt/bolt_unix.go +++ /dev/null @@ -1,87 +0,0 @@ -//go:build !windows && !plan9 && !solaris && !aix -// +build !windows,!plan9,!solaris,!aix - -package bbolt - -import ( - "fmt" - "syscall" - "time" - "unsafe" - - "golang.org/x/sys/unix" -) - -// flock acquires an advisory lock on a file descriptor. -func flock(db *DB, exclusive bool, timeout time.Duration) error { - var t time.Time - if timeout != 0 { - t = time.Now() - } - fd := db.file.Fd() - flag := syscall.LOCK_NB - if exclusive { - flag |= syscall.LOCK_EX - } else { - flag |= syscall.LOCK_SH - } - for { - // Attempt to obtain an exclusive lock. - err := syscall.Flock(int(fd), flag) - if err == nil { - return nil - } else if err != syscall.EWOULDBLOCK { - return err - } - - // If we timed out then return an error. - if timeout != 0 && time.Since(t) > timeout-flockRetryTimeout { - return ErrTimeout - } - - // Wait for a bit and try again. - time.Sleep(flockRetryTimeout) - } -} - -// funlock releases an advisory lock on a file descriptor. -func funlock(db *DB) error { - return syscall.Flock(int(db.file.Fd()), syscall.LOCK_UN) -} - -// mmap memory maps a DB's data file. -func mmap(db *DB, sz int) error { - // Map the data file to memory. - b, err := unix.Mmap(int(db.file.Fd()), 0, sz, syscall.PROT_READ, syscall.MAP_SHARED|db.MmapFlags) - if err != nil { - return err - } - - // Advise the kernel that the mmap is accessed randomly. - err = unix.Madvise(b, syscall.MADV_RANDOM) - if err != nil && err != syscall.ENOSYS { - // Ignore not implemented error in kernel because it still works. - return fmt.Errorf("madvise: %s", err) - } - - // Save the original byte slice and convert to a byte array pointer. - db.dataref = b - db.data = (*[maxMapSize]byte)(unsafe.Pointer(&b[0])) - db.datasz = sz - return nil -} - -// munmap unmaps a DB's data file from memory. -func munmap(db *DB) error { - // Ignore the unmap if we have no mapped data. - if db.dataref == nil { - return nil - } - - // Unmap using the original byte slice. - err := unix.Munmap(db.dataref) - db.dataref = nil - db.data = nil - db.datasz = 0 - return err -} diff --git a/vendor/go.etcd.io/bbolt/bolt_unix_aix.go b/vendor/go.etcd.io/bbolt/bolt_unix_aix.go deleted file mode 100644 index 6dea4294..00000000 --- a/vendor/go.etcd.io/bbolt/bolt_unix_aix.go +++ /dev/null @@ -1,91 +0,0 @@ -//go:build aix -// +build aix - -package bbolt - -import ( - "fmt" - "syscall" - "time" - "unsafe" - - "golang.org/x/sys/unix" -) - -// flock acquires an advisory lock on a file descriptor. -func flock(db *DB, exclusive bool, timeout time.Duration) error { - var t time.Time - if timeout != 0 { - t = time.Now() - } - fd := db.file.Fd() - var lockType int16 - if exclusive { - lockType = syscall.F_WRLCK - } else { - lockType = syscall.F_RDLCK - } - for { - // Attempt to obtain an exclusive lock. - lock := syscall.Flock_t{Type: lockType} - err := syscall.FcntlFlock(fd, syscall.F_SETLK, &lock) - if err == nil { - return nil - } else if err != syscall.EAGAIN { - return err - } - - // If we timed out then return an error. - if timeout != 0 && time.Since(t) > timeout-flockRetryTimeout { - return ErrTimeout - } - - // Wait for a bit and try again. - time.Sleep(flockRetryTimeout) - } -} - -// funlock releases an advisory lock on a file descriptor. -func funlock(db *DB) error { - var lock syscall.Flock_t - lock.Start = 0 - lock.Len = 0 - lock.Type = syscall.F_UNLCK - lock.Whence = 0 - return syscall.FcntlFlock(uintptr(db.file.Fd()), syscall.F_SETLK, &lock) -} - -// mmap memory maps a DB's data file. -func mmap(db *DB, sz int) error { - // Map the data file to memory. - b, err := unix.Mmap(int(db.file.Fd()), 0, sz, syscall.PROT_READ, syscall.MAP_SHARED|db.MmapFlags) - if err != nil { - return err - } - - // Advise the kernel that the mmap is accessed randomly. - if err := unix.Madvise(b, syscall.MADV_RANDOM); err != nil { - return fmt.Errorf("madvise: %s", err) - } - - // Save the original byte slice and convert to a byte array pointer. - db.dataref = b - db.data = (*[maxMapSize]byte)(unsafe.Pointer(&b[0])) - db.datasz = sz - return nil -} - -// munmap unmaps a DB's data file from memory. -func munmap(db *DB) error { - // Ignore the unmap if we have no mapped data. - if db.dataref == nil { - return nil - } - - // Unmap using the original byte slice. - err := unix.Munmap(db.dataref) - db.dataref = nil - db.data = nil - db.datasz = 0 - return err -} diff --git a/vendor/go.etcd.io/bbolt/bolt_unix_solaris.go b/vendor/go.etcd.io/bbolt/bolt_unix_solaris.go deleted file mode 100644 index babad657..00000000 --- a/vendor/go.etcd.io/bbolt/bolt_unix_solaris.go +++ /dev/null @@ -1,88 +0,0 @@ -package bbolt - -import ( - "fmt" - "syscall" - "time" - "unsafe" - - "golang.org/x/sys/unix" -) - -// flock acquires an advisory lock on a file descriptor. -func flock(db *DB, exclusive bool, timeout time.Duration) error { - var t time.Time - if timeout != 0 { - t = time.Now() - } - fd := db.file.Fd() - var lockType int16 - if exclusive { - lockType = syscall.F_WRLCK - } else { - lockType = syscall.F_RDLCK - } - for { - // Attempt to obtain an exclusive lock. - lock := syscall.Flock_t{Type: lockType} - err := syscall.FcntlFlock(fd, syscall.F_SETLK, &lock) - if err == nil { - return nil - } else if err != syscall.EAGAIN { - return err - } - - // If we timed out then return an error. - if timeout != 0 && time.Since(t) > timeout-flockRetryTimeout { - return ErrTimeout - } - - // Wait for a bit and try again. - time.Sleep(flockRetryTimeout) - } -} - -// funlock releases an advisory lock on a file descriptor. -func funlock(db *DB) error { - var lock syscall.Flock_t - lock.Start = 0 - lock.Len = 0 - lock.Type = syscall.F_UNLCK - lock.Whence = 0 - return syscall.FcntlFlock(uintptr(db.file.Fd()), syscall.F_SETLK, &lock) -} - -// mmap memory maps a DB's data file. -func mmap(db *DB, sz int) error { - // Map the data file to memory. - b, err := unix.Mmap(int(db.file.Fd()), 0, sz, syscall.PROT_READ, syscall.MAP_SHARED|db.MmapFlags) - if err != nil { - return err - } - - // Advise the kernel that the mmap is accessed randomly. - if err := unix.Madvise(b, syscall.MADV_RANDOM); err != nil { - return fmt.Errorf("madvise: %s", err) - } - - // Save the original byte slice and convert to a byte array pointer. - db.dataref = b - db.data = (*[maxMapSize]byte)(unsafe.Pointer(&b[0])) - db.datasz = sz - return nil -} - -// munmap unmaps a DB's data file from memory. -func munmap(db *DB) error { - // Ignore the unmap if we have no mapped data. - if db.dataref == nil { - return nil - } - - // Unmap using the original byte slice. - err := unix.Munmap(db.dataref) - db.dataref = nil - db.data = nil - db.datasz = 0 - return err -} diff --git a/vendor/go.etcd.io/bbolt/bolt_windows.go b/vendor/go.etcd.io/bbolt/bolt_windows.go deleted file mode 100644 index e5dde274..00000000 --- a/vendor/go.etcd.io/bbolt/bolt_windows.go +++ /dev/null @@ -1,117 +0,0 @@ -package bbolt - -import ( - "fmt" - "os" - "syscall" - "time" - "unsafe" - - "golang.org/x/sys/windows" -) - -// fdatasync flushes written data to a file descriptor. -func fdatasync(db *DB) error { - return db.file.Sync() -} - -// flock acquires an advisory lock on a file descriptor. -func flock(db *DB, exclusive bool, timeout time.Duration) error { - var t time.Time - if timeout != 0 { - t = time.Now() - } - var flags uint32 = windows.LOCKFILE_FAIL_IMMEDIATELY - if exclusive { - flags |= windows.LOCKFILE_EXCLUSIVE_LOCK - } - for { - // Fix for https://github.com/etcd-io/bbolt/issues/121. Use byte-range - // -1..0 as the lock on the database file. - var m1 uint32 = (1 << 32) - 1 // -1 in a uint32 - err := windows.LockFileEx(windows.Handle(db.file.Fd()), flags, 0, 1, 0, &windows.Overlapped{ - Offset: m1, - OffsetHigh: m1, - }) - - if err == nil { - return nil - } else if err != windows.ERROR_LOCK_VIOLATION { - return err - } - - // If we timed oumercit then return an error. - if timeout != 0 && time.Since(t) > timeout-flockRetryTimeout { - return ErrTimeout - } - - // Wait for a bit and try again. - time.Sleep(flockRetryTimeout) - } -} - -// funlock releases an advisory lock on a file descriptor. -func funlock(db *DB) error { - var m1 uint32 = (1 << 32) - 1 // -1 in a uint32 - return windows.UnlockFileEx(windows.Handle(db.file.Fd()), 0, 1, 0, &windows.Overlapped{ - Offset: m1, - OffsetHigh: m1, - }) -} - -// mmap memory maps a DB's data file. -// Based on: https://github.com/edsrzf/mmap-go -func mmap(db *DB, sz int) error { - var sizelo, sizehi uint32 - - if !db.readOnly { - // Truncate the database to the size of the mmap. - if err := db.file.Truncate(int64(sz)); err != nil { - return fmt.Errorf("truncate: %s", err) - } - sizehi = uint32(sz >> 32) - sizelo = uint32(sz) & 0xffffffff - } - - // Open a file mapping handle. - h, errno := syscall.CreateFileMapping(syscall.Handle(db.file.Fd()), nil, syscall.PAGE_READONLY, sizehi, sizelo, nil) - if h == 0 { - return os.NewSyscallError("CreateFileMapping", errno) - } - - // Create the memory map. - addr, errno := syscall.MapViewOfFile(h, syscall.FILE_MAP_READ, 0, 0, 0) - if addr == 0 { - // Do our best and report error returned from MapViewOfFile. - _ = syscall.CloseHandle(h) - return os.NewSyscallError("MapViewOfFile", errno) - } - - // Close mapping handle. - if err := syscall.CloseHandle(syscall.Handle(h)); err != nil { - return os.NewSyscallError("CloseHandle", err) - } - - // Convert to a byte array. - db.data = ((*[maxMapSize]byte)(unsafe.Pointer(addr))) - db.datasz = sz - - return nil -} - -// munmap unmaps a pointer from a file. -// Based on: https://github.com/edsrzf/mmap-go -func munmap(db *DB) error { - if db.data == nil { - return nil - } - - addr := (uintptr)(unsafe.Pointer(&db.data[0])) - var err1 error - if err := syscall.UnmapViewOfFile(addr); err != nil { - err1 = os.NewSyscallError("UnmapViewOfFile", err) - } - db.data = nil - db.datasz = 0 - return err1 -} diff --git a/vendor/go.etcd.io/bbolt/boltsync_unix.go b/vendor/go.etcd.io/bbolt/boltsync_unix.go deleted file mode 100644 index 81e09a53..00000000 --- a/vendor/go.etcd.io/bbolt/boltsync_unix.go +++ /dev/null @@ -1,9 +0,0 @@ -//go:build !windows && !plan9 && !linux && !openbsd -// +build !windows,!plan9,!linux,!openbsd - -package bbolt - -// fdatasync flushes written data to a file descriptor. -func fdatasync(db *DB) error { - return db.file.Sync() -} diff --git a/vendor/go.etcd.io/bbolt/bucket.go b/vendor/go.etcd.io/bbolt/bucket.go deleted file mode 100644 index 054467af..00000000 --- a/vendor/go.etcd.io/bbolt/bucket.go +++ /dev/null @@ -1,791 +0,0 @@ -package bbolt - -import ( - "bytes" - "fmt" - "unsafe" -) - -const ( - // MaxKeySize is the maximum length of a key, in bytes. - MaxKeySize = 32768 - - // MaxValueSize is the maximum length of a value, in bytes. - MaxValueSize = (1 << 31) - 2 -) - -const bucketHeaderSize = int(unsafe.Sizeof(bucket{})) - -const ( - minFillPercent = 0.1 - maxFillPercent = 1.0 -) - -// DefaultFillPercent is the percentage that split pages are filled. -// This value can be changed by setting Bucket.FillPercent. -const DefaultFillPercent = 0.5 - -// Bucket represents a collection of key/value pairs inside the database. -type Bucket struct { - *bucket - tx *Tx // the associated transaction - buckets map[string]*Bucket // subbucket cache - page *page // inline page reference - rootNode *node // materialized node for the root page. - nodes map[pgid]*node // node cache - - // Sets the threshold for filling nodes when they split. By default, - // the bucket will fill to 50% but it can be useful to increase this - // amount if you know that your write workloads are mostly append-only. - // - // This is non-persisted across transactions so it must be set in every Tx. - FillPercent float64 -} - -// bucket represents the on-file representation of a bucket. -// This is stored as the "value" of a bucket key. If the bucket is small enough, -// then its root page can be stored inline in the "value", after the bucket -// header. In the case of inline buckets, the "root" will be 0. -type bucket struct { - root pgid // page id of the bucket's root-level page - sequence uint64 // monotonically incrementing, used by NextSequence() -} - -// newBucket returns a new bucket associated with a transaction. -func newBucket(tx *Tx) Bucket { - var b = Bucket{tx: tx, FillPercent: DefaultFillPercent} - if tx.writable { - b.buckets = make(map[string]*Bucket) - b.nodes = make(map[pgid]*node) - } - return b -} - -// Tx returns the tx of the bucket. -func (b *Bucket) Tx() *Tx { - return b.tx -} - -// Root returns the root of the bucket. -func (b *Bucket) Root() pgid { - return b.root -} - -// Writable returns whether the bucket is writable. -func (b *Bucket) Writable() bool { - return b.tx.writable -} - -// Cursor creates a cursor associated with the bucket. -// The cursor is only valid as long as the transaction is open. -// Do not use a cursor after the transaction is closed. -func (b *Bucket) Cursor() *Cursor { - // Update transaction statistics. - b.tx.stats.IncCursorCount(1) - - // Allocate and return a cursor. - return &Cursor{ - bucket: b, - stack: make([]elemRef, 0), - } -} - -// Bucket retrieves a nested bucket by name. -// Returns nil if the bucket does not exist. -// The bucket instance is only valid for the lifetime of the transaction. -func (b *Bucket) Bucket(name []byte) *Bucket { - if b.buckets != nil { - if child := b.buckets[string(name)]; child != nil { - return child - } - } - - // Move cursor to key. - c := b.Cursor() - k, v, flags := c.seek(name) - - // Return nil if the key doesn't exist or it is not a bucket. - if !bytes.Equal(name, k) || (flags&bucketLeafFlag) == 0 { - return nil - } - - // Otherwise create a bucket and cache it. - var child = b.openBucket(v) - if b.buckets != nil { - b.buckets[string(name)] = child - } - - return child -} - -// Helper method that re-interprets a sub-bucket value -// from a parent into a Bucket -func (b *Bucket) openBucket(value []byte) *Bucket { - var child = newBucket(b.tx) - - // Unaligned access requires a copy to be made. - const unalignedMask = unsafe.Alignof(struct { - bucket - page - }{}) - 1 - unaligned := uintptr(unsafe.Pointer(&value[0]))&unalignedMask != 0 - if unaligned { - value = cloneBytes(value) - } - - // If this is a writable transaction then we need to copy the bucket entry. - // Read-only transactions can point directly at the mmap entry. - if b.tx.writable && !unaligned { - child.bucket = &bucket{} - *child.bucket = *(*bucket)(unsafe.Pointer(&value[0])) - } else { - child.bucket = (*bucket)(unsafe.Pointer(&value[0])) - } - - // Save a reference to the inline page if the bucket is inline. - if child.root == 0 { - child.page = (*page)(unsafe.Pointer(&value[bucketHeaderSize])) - } - - return &child -} - -// CreateBucket creates a new bucket at the given key and returns the new bucket. -// Returns an error if the key already exists, if the bucket name is blank, or if the bucket name is too long. -// The bucket instance is only valid for the lifetime of the transaction. -func (b *Bucket) CreateBucket(key []byte) (*Bucket, error) { - if b.tx.db == nil { - return nil, ErrTxClosed - } else if !b.tx.writable { - return nil, ErrTxNotWritable - } else if len(key) == 0 { - return nil, ErrBucketNameRequired - } - - // Move cursor to correct position. - c := b.Cursor() - k, _, flags := c.seek(key) - - // Return an error if there is an existing key. - if bytes.Equal(key, k) { - if (flags & bucketLeafFlag) != 0 { - return nil, ErrBucketExists - } - return nil, ErrIncompatibleValue - } - - // Create empty, inline bucket. - var bucket = Bucket{ - bucket: &bucket{}, - rootNode: &node{isLeaf: true}, - FillPercent: DefaultFillPercent, - } - var value = bucket.write() - - // Insert into node. - key = cloneBytes(key) - c.node().put(key, key, value, 0, bucketLeafFlag) - - // Since subbuckets are not allowed on inline buckets, we need to - // dereference the inline page, if it exists. This will cause the bucket - // to be treated as a regular, non-inline bucket for the rest of the tx. - b.page = nil - - return b.Bucket(key), nil -} - -// CreateBucketIfNotExists creates a new bucket if it doesn't already exist and returns a reference to it. -// Returns an error if the bucket name is blank, or if the bucket name is too long. -// The bucket instance is only valid for the lifetime of the transaction. -func (b *Bucket) CreateBucketIfNotExists(key []byte) (*Bucket, error) { - child, err := b.CreateBucket(key) - if err == ErrBucketExists { - return b.Bucket(key), nil - } else if err != nil { - return nil, err - } - return child, nil -} - -// DeleteBucket deletes a bucket at the given key. -// Returns an error if the bucket does not exist, or if the key represents a non-bucket value. -func (b *Bucket) DeleteBucket(key []byte) error { - if b.tx.db == nil { - return ErrTxClosed - } else if !b.Writable() { - return ErrTxNotWritable - } - - // Move cursor to correct position. - c := b.Cursor() - k, _, flags := c.seek(key) - - // Return an error if bucket doesn't exist or is not a bucket. - if !bytes.Equal(key, k) { - return ErrBucketNotFound - } else if (flags & bucketLeafFlag) == 0 { - return ErrIncompatibleValue - } - - // Recursively delete all child buckets. - child := b.Bucket(key) - err := child.ForEachBucket(func(k []byte) error { - if err := child.DeleteBucket(k); err != nil { - return fmt.Errorf("delete bucket: %s", err) - } - return nil - }) - if err != nil { - return err - } - - // Remove cached copy. - delete(b.buckets, string(key)) - - // Release all bucket pages to freelist. - child.nodes = nil - child.rootNode = nil - child.free() - - // Delete the node if we have a matching key. - c.node().del(key) - - return nil -} - -// Get retrieves the value for a key in the bucket. -// Returns a nil value if the key does not exist or if the key is a nested bucket. -// The returned value is only valid for the life of the transaction. -func (b *Bucket) Get(key []byte) []byte { - k, v, flags := b.Cursor().seek(key) - - // Return nil if this is a bucket. - if (flags & bucketLeafFlag) != 0 { - return nil - } - - // If our target node isn't the same key as what's passed in then return nil. - if !bytes.Equal(key, k) { - return nil - } - return v -} - -// Put sets the value for a key in the bucket. -// If the key exist then its previous value will be overwritten. -// Supplied value must remain valid for the life of the transaction. -// Returns an error if the bucket was created from a read-only transaction, if the key is blank, if the key is too large, or if the value is too large. -func (b *Bucket) Put(key []byte, value []byte) error { - if b.tx.db == nil { - return ErrTxClosed - } else if !b.Writable() { - return ErrTxNotWritable - } else if len(key) == 0 { - return ErrKeyRequired - } else if len(key) > MaxKeySize { - return ErrKeyTooLarge - } else if int64(len(value)) > MaxValueSize { - return ErrValueTooLarge - } - - // Move cursor to correct position. - c := b.Cursor() - k, _, flags := c.seek(key) - - // Return an error if there is an existing key with a bucket value. - if bytes.Equal(key, k) && (flags&bucketLeafFlag) != 0 { - return ErrIncompatibleValue - } - - // Insert into node. - key = cloneBytes(key) - c.node().put(key, key, value, 0, 0) - - return nil -} - -// Delete removes a key from the bucket. -// If the key does not exist then nothing is done and a nil error is returned. -// Returns an error if the bucket was created from a read-only transaction. -func (b *Bucket) Delete(key []byte) error { - if b.tx.db == nil { - return ErrTxClosed - } else if !b.Writable() { - return ErrTxNotWritable - } - - // Move cursor to correct position. - c := b.Cursor() - k, _, flags := c.seek(key) - - // Return nil if the key doesn't exist. - if !bytes.Equal(key, k) { - return nil - } - - // Return an error if there is already existing bucket value. - if (flags & bucketLeafFlag) != 0 { - return ErrIncompatibleValue - } - - // Delete the node if we have a matching key. - c.node().del(key) - - return nil -} - -// Sequence returns the current integer for the bucket without incrementing it. -func (b *Bucket) Sequence() uint64 { return b.bucket.sequence } - -// SetSequence updates the sequence number for the bucket. -func (b *Bucket) SetSequence(v uint64) error { - if b.tx.db == nil { - return ErrTxClosed - } else if !b.Writable() { - return ErrTxNotWritable - } - - // Materialize the root node if it hasn't been already so that the - // bucket will be saved during commit. - if b.rootNode == nil { - _ = b.node(b.root, nil) - } - - // Set the sequence. - b.bucket.sequence = v - return nil -} - -// NextSequence returns an autoincrementing integer for the bucket. -func (b *Bucket) NextSequence() (uint64, error) { - if b.tx.db == nil { - return 0, ErrTxClosed - } else if !b.Writable() { - return 0, ErrTxNotWritable - } - - // Materialize the root node if it hasn't been already so that the - // bucket will be saved during commit. - if b.rootNode == nil { - _ = b.node(b.root, nil) - } - - // Increment and return the sequence. - b.bucket.sequence++ - return b.bucket.sequence, nil -} - -// ForEach executes a function for each key/value pair in a bucket. -// Because ForEach uses a Cursor, the iteration over keys is in lexicographical order. -// If the provided function returns an error then the iteration is stopped and -// the error is returned to the caller. The provided function must not modify -// the bucket; this will result in undefined behavior. -func (b *Bucket) ForEach(fn func(k, v []byte) error) error { - if b.tx.db == nil { - return ErrTxClosed - } - c := b.Cursor() - for k, v := c.First(); k != nil; k, v = c.Next() { - if err := fn(k, v); err != nil { - return err - } - } - return nil -} - -func (b *Bucket) ForEachBucket(fn func(k []byte) error) error { - if b.tx.db == nil { - return ErrTxClosed - } - c := b.Cursor() - for k, _, flags := c.first(); k != nil; k, _, flags = c.next() { - if flags&bucketLeafFlag != 0 { - if err := fn(k); err != nil { - return err - } - } - } - return nil -} - -// Stats returns stats on a bucket. -func (b *Bucket) Stats() BucketStats { - var s, subStats BucketStats - pageSize := b.tx.db.pageSize - s.BucketN += 1 - if b.root == 0 { - s.InlineBucketN += 1 - } - b.forEachPage(func(p *page, depth int, pgstack []pgid) { - if (p.flags & leafPageFlag) != 0 { - s.KeyN += int(p.count) - - // used totals the used bytes for the page - used := pageHeaderSize - - if p.count != 0 { - // If page has any elements, add all element headers. - used += leafPageElementSize * uintptr(p.count-1) - - // Add all element key, value sizes. - // The computation takes advantage of the fact that the position - // of the last element's key/value equals to the total of the sizes - // of all previous elements' keys and values. - // It also includes the last element's header. - lastElement := p.leafPageElement(p.count - 1) - used += uintptr(lastElement.pos + lastElement.ksize + lastElement.vsize) - } - - if b.root == 0 { - // For inlined bucket just update the inline stats - s.InlineBucketInuse += int(used) - } else { - // For non-inlined bucket update all the leaf stats - s.LeafPageN++ - s.LeafInuse += int(used) - s.LeafOverflowN += int(p.overflow) - - // Collect stats from sub-buckets. - // Do that by iterating over all element headers - // looking for the ones with the bucketLeafFlag. - for i := uint16(0); i < p.count; i++ { - e := p.leafPageElement(i) - if (e.flags & bucketLeafFlag) != 0 { - // For any bucket element, open the element value - // and recursively call Stats on the contained bucket. - subStats.Add(b.openBucket(e.value()).Stats()) - } - } - } - } else if (p.flags & branchPageFlag) != 0 { - s.BranchPageN++ - lastElement := p.branchPageElement(p.count - 1) - - // used totals the used bytes for the page - // Add header and all element headers. - used := pageHeaderSize + (branchPageElementSize * uintptr(p.count-1)) - - // Add size of all keys and values. - // Again, use the fact that last element's position equals to - // the total of key, value sizes of all previous elements. - used += uintptr(lastElement.pos + lastElement.ksize) - s.BranchInuse += int(used) - s.BranchOverflowN += int(p.overflow) - } - - // Keep track of maximum page depth. - if depth+1 > s.Depth { - s.Depth = depth + 1 - } - }) - - // Alloc stats can be computed from page counts and pageSize. - s.BranchAlloc = (s.BranchPageN + s.BranchOverflowN) * pageSize - s.LeafAlloc = (s.LeafPageN + s.LeafOverflowN) * pageSize - - // Add the max depth of sub-buckets to get total nested depth. - s.Depth += subStats.Depth - // Add the stats for all sub-buckets - s.Add(subStats) - return s -} - -// forEachPage iterates over every page in a bucket, including inline pages. -func (b *Bucket) forEachPage(fn func(*page, int, []pgid)) { - // If we have an inline page then just use that. - if b.page != nil { - fn(b.page, 0, []pgid{b.root}) - return - } - - // Otherwise traverse the page hierarchy. - b.tx.forEachPage(b.root, fn) -} - -// forEachPageNode iterates over every page (or node) in a bucket. -// This also includes inline pages. -func (b *Bucket) forEachPageNode(fn func(*page, *node, int)) { - // If we have an inline page or root node then just use that. - if b.page != nil { - fn(b.page, nil, 0) - return - } - b._forEachPageNode(b.root, 0, fn) -} - -func (b *Bucket) _forEachPageNode(pgId pgid, depth int, fn func(*page, *node, int)) { - var p, n = b.pageNode(pgId) - - // Execute function. - fn(p, n, depth) - - // Recursively loop over children. - if p != nil { - if (p.flags & branchPageFlag) != 0 { - for i := 0; i < int(p.count); i++ { - elem := p.branchPageElement(uint16(i)) - b._forEachPageNode(elem.pgid, depth+1, fn) - } - } - } else { - if !n.isLeaf { - for _, inode := range n.inodes { - b._forEachPageNode(inode.pgid, depth+1, fn) - } - } - } -} - -// spill writes all the nodes for this bucket to dirty pages. -func (b *Bucket) spill() error { - // Spill all child buckets first. - for name, child := range b.buckets { - // If the child bucket is small enough and it has no child buckets then - // write it inline into the parent bucket's page. Otherwise spill it - // like a normal bucket and make the parent value a pointer to the page. - var value []byte - if child.inlineable() { - child.free() - value = child.write() - } else { - if err := child.spill(); err != nil { - return err - } - - // Update the child bucket header in this bucket. - value = make([]byte, unsafe.Sizeof(bucket{})) - var bucket = (*bucket)(unsafe.Pointer(&value[0])) - *bucket = *child.bucket - } - - // Skip writing the bucket if there are no materialized nodes. - if child.rootNode == nil { - continue - } - - // Update parent node. - var c = b.Cursor() - k, _, flags := c.seek([]byte(name)) - if !bytes.Equal([]byte(name), k) { - panic(fmt.Sprintf("misplaced bucket header: %x -> %x", []byte(name), k)) - } - if flags&bucketLeafFlag == 0 { - panic(fmt.Sprintf("unexpected bucket header flag: %x", flags)) - } - c.node().put([]byte(name), []byte(name), value, 0, bucketLeafFlag) - } - - // Ignore if there's not a materialized root node. - if b.rootNode == nil { - return nil - } - - // Spill nodes. - if err := b.rootNode.spill(); err != nil { - return err - } - b.rootNode = b.rootNode.root() - - // Update the root node for this bucket. - if b.rootNode.pgid >= b.tx.meta.pgid { - panic(fmt.Sprintf("pgid (%d) above high water mark (%d)", b.rootNode.pgid, b.tx.meta.pgid)) - } - b.root = b.rootNode.pgid - - return nil -} - -// inlineable returns true if a bucket is small enough to be written inline -// and if it contains no subbuckets. Otherwise returns false. -func (b *Bucket) inlineable() bool { - var n = b.rootNode - - // Bucket must only contain a single leaf node. - if n == nil || !n.isLeaf { - return false - } - - // Bucket is not inlineable if it contains subbuckets or if it goes beyond - // our threshold for inline bucket size. - var size = pageHeaderSize - for _, inode := range n.inodes { - size += leafPageElementSize + uintptr(len(inode.key)) + uintptr(len(inode.value)) - - if inode.flags&bucketLeafFlag != 0 { - return false - } else if size > b.maxInlineBucketSize() { - return false - } - } - - return true -} - -// Returns the maximum total size of a bucket to make it a candidate for inlining. -func (b *Bucket) maxInlineBucketSize() uintptr { - return uintptr(b.tx.db.pageSize / 4) -} - -// write allocates and writes a bucket to a byte slice. -func (b *Bucket) write() []byte { - // Allocate the appropriate size. - var n = b.rootNode - var value = make([]byte, bucketHeaderSize+n.size()) - - // Write a bucket header. - var bucket = (*bucket)(unsafe.Pointer(&value[0])) - *bucket = *b.bucket - - // Convert byte slice to a fake page and write the root node. - var p = (*page)(unsafe.Pointer(&value[bucketHeaderSize])) - n.write(p) - - return value -} - -// rebalance attempts to balance all nodes. -func (b *Bucket) rebalance() { - for _, n := range b.nodes { - n.rebalance() - } - for _, child := range b.buckets { - child.rebalance() - } -} - -// node creates a node from a page and associates it with a given parent. -func (b *Bucket) node(pgId pgid, parent *node) *node { - _assert(b.nodes != nil, "nodes map expected") - - // Retrieve node if it's already been created. - if n := b.nodes[pgId]; n != nil { - return n - } - - // Otherwise create a node and cache it. - n := &node{bucket: b, parent: parent} - if parent == nil { - b.rootNode = n - } else { - parent.children = append(parent.children, n) - } - - // Use the inline page if this is an inline bucket. - var p = b.page - if p == nil { - p = b.tx.page(pgId) - } - - // Read the page into the node and cache it. - n.read(p) - b.nodes[pgId] = n - - // Update statistics. - b.tx.stats.IncNodeCount(1) - - return n -} - -// free recursively frees all pages in the bucket. -func (b *Bucket) free() { - if b.root == 0 { - return - } - - var tx = b.tx - b.forEachPageNode(func(p *page, n *node, _ int) { - if p != nil { - tx.db.freelist.free(tx.meta.txid, p) - } else { - n.free() - } - }) - b.root = 0 -} - -// dereference removes all references to the old mmap. -func (b *Bucket) dereference() { - if b.rootNode != nil { - b.rootNode.root().dereference() - } - - for _, child := range b.buckets { - child.dereference() - } -} - -// pageNode returns the in-memory node, if it exists. -// Otherwise returns the underlying page. -func (b *Bucket) pageNode(id pgid) (*page, *node) { - // Inline buckets have a fake page embedded in their value so treat them - // differently. We'll return the rootNode (if available) or the fake page. - if b.root == 0 { - if id != 0 { - panic(fmt.Sprintf("inline bucket non-zero page access(2): %d != 0", id)) - } - if b.rootNode != nil { - return nil, b.rootNode - } - return b.page, nil - } - - // Check the node cache for non-inline buckets. - if b.nodes != nil { - if n := b.nodes[id]; n != nil { - return nil, n - } - } - - // Finally lookup the page from the transaction if no node is materialized. - return b.tx.page(id), nil -} - -// BucketStats records statistics about resources used by a bucket. -type BucketStats struct { - // Page count statistics. - BranchPageN int // number of logical branch pages - BranchOverflowN int // number of physical branch overflow pages - LeafPageN int // number of logical leaf pages - LeafOverflowN int // number of physical leaf overflow pages - - // Tree statistics. - KeyN int // number of keys/value pairs - Depth int // number of levels in B+tree - - // Page size utilization. - BranchAlloc int // bytes allocated for physical branch pages - BranchInuse int // bytes actually used for branch data - LeafAlloc int // bytes allocated for physical leaf pages - LeafInuse int // bytes actually used for leaf data - - // Bucket statistics - BucketN int // total number of buckets including the top bucket - InlineBucketN int // total number on inlined buckets - InlineBucketInuse int // bytes used for inlined buckets (also accounted for in LeafInuse) -} - -func (s *BucketStats) Add(other BucketStats) { - s.BranchPageN += other.BranchPageN - s.BranchOverflowN += other.BranchOverflowN - s.LeafPageN += other.LeafPageN - s.LeafOverflowN += other.LeafOverflowN - s.KeyN += other.KeyN - if s.Depth < other.Depth { - s.Depth = other.Depth - } - s.BranchAlloc += other.BranchAlloc - s.BranchInuse += other.BranchInuse - s.LeafAlloc += other.LeafAlloc - s.LeafInuse += other.LeafInuse - - s.BucketN += other.BucketN - s.InlineBucketN += other.InlineBucketN - s.InlineBucketInuse += other.InlineBucketInuse -} - -// cloneBytes returns a copy of a given slice. -func cloneBytes(v []byte) []byte { - var clone = make([]byte, len(v)) - copy(clone, v) - return clone -} diff --git a/vendor/go.etcd.io/bbolt/compact.go b/vendor/go.etcd.io/bbolt/compact.go deleted file mode 100644 index 5f1d4c3b..00000000 --- a/vendor/go.etcd.io/bbolt/compact.go +++ /dev/null @@ -1,119 +0,0 @@ -package bbolt - -// Compact will create a copy of the source DB and in the destination DB. This may -// reclaim space that the source database no longer has use for. txMaxSize can be -// used to limit the transactions size of this process and may trigger intermittent -// commits. A value of zero will ignore transaction sizes. -// TODO: merge with: https://github.com/etcd-io/etcd/blob/b7f0f52a16dbf83f18ca1d803f7892d750366a94/mvcc/backend/backend.go#L349 -func Compact(dst, src *DB, txMaxSize int64) error { - // commit regularly, or we'll run out of memory for large datasets if using one transaction. - var size int64 - tx, err := dst.Begin(true) - if err != nil { - return err - } - defer func() { - if tempErr := tx.Rollback(); tempErr != nil { - err = tempErr - } - }() - - if err := walk(src, func(keys [][]byte, k, v []byte, seq uint64) error { - // On each key/value, check if we have exceeded tx size. - sz := int64(len(k) + len(v)) - if size+sz > txMaxSize && txMaxSize != 0 { - // Commit previous transaction. - if err := tx.Commit(); err != nil { - return err - } - - // Start new transaction. - tx, err = dst.Begin(true) - if err != nil { - return err - } - size = 0 - } - size += sz - - // Create bucket on the root transaction if this is the first level. - nk := len(keys) - if nk == 0 { - bkt, err := tx.CreateBucket(k) - if err != nil { - return err - } - if err := bkt.SetSequence(seq); err != nil { - return err - } - return nil - } - - // Create buckets on subsequent levels, if necessary. - b := tx.Bucket(keys[0]) - if nk > 1 { - for _, k := range keys[1:] { - b = b.Bucket(k) - } - } - - // Fill the entire page for best compaction. - b.FillPercent = 1.0 - - // If there is no value then this is a bucket call. - if v == nil { - bkt, err := b.CreateBucket(k) - if err != nil { - return err - } - if err := bkt.SetSequence(seq); err != nil { - return err - } - return nil - } - - // Otherwise treat it as a key/value pair. - return b.Put(k, v) - }); err != nil { - return err - } - err = tx.Commit() - - return err -} - -// walkFunc is the type of the function called for keys (buckets and "normal" -// values) discovered by Walk. keys is the list of keys to descend to the bucket -// owning the discovered key/value pair k/v. -type walkFunc func(keys [][]byte, k, v []byte, seq uint64) error - -// walk walks recursively the bolt database db, calling walkFn for each key it finds. -func walk(db *DB, walkFn walkFunc) error { - return db.View(func(tx *Tx) error { - return tx.ForEach(func(name []byte, b *Bucket) error { - return walkBucket(b, nil, name, nil, b.Sequence(), walkFn) - }) - }) -} - -func walkBucket(b *Bucket, keypath [][]byte, k, v []byte, seq uint64, fn walkFunc) error { - // Execute callback. - if err := fn(keypath, k, v, seq); err != nil { - return err - } - - // If this is not a bucket then stop. - if v != nil { - return nil - } - - // Iterate over each child key/value. - keypath = append(keypath, k) - return b.ForEach(func(k, v []byte) error { - if v == nil { - bkt := b.Bucket(k) - return walkBucket(bkt, keypath, k, nil, bkt.Sequence(), fn) - } - return walkBucket(b, keypath, k, v, b.Sequence(), fn) - }) -} diff --git a/vendor/go.etcd.io/bbolt/cursor.go b/vendor/go.etcd.io/bbolt/cursor.go deleted file mode 100644 index 5dafb0ca..00000000 --- a/vendor/go.etcd.io/bbolt/cursor.go +++ /dev/null @@ -1,420 +0,0 @@ -package bbolt - -import ( - "bytes" - "fmt" - "sort" -) - -// Cursor represents an iterator that can traverse over all key/value pairs in a bucket -// in lexicographical order. -// Cursors see nested buckets with value == nil. -// Cursors can be obtained from a transaction and are valid as long as the transaction is open. -// -// Keys and values returned from the cursor are only valid for the life of the transaction. -// -// Changing data while traversing with a cursor may cause it to be invalidated -// and return unexpected keys and/or values. You must reposition your cursor -// after mutating data. -type Cursor struct { - bucket *Bucket - stack []elemRef -} - -// Bucket returns the bucket that this cursor was created from. -func (c *Cursor) Bucket() *Bucket { - return c.bucket -} - -// First moves the cursor to the first item in the bucket and returns its key and value. -// If the bucket is empty then a nil key and value are returned. -// The returned key and value are only valid for the life of the transaction. -func (c *Cursor) First() (key []byte, value []byte) { - _assert(c.bucket.tx.db != nil, "tx closed") - k, v, flags := c.first() - if (flags & uint32(bucketLeafFlag)) != 0 { - return k, nil - } - return k, v -} - -func (c *Cursor) first() (key []byte, value []byte, flags uint32) { - c.stack = c.stack[:0] - p, n := c.bucket.pageNode(c.bucket.root) - c.stack = append(c.stack, elemRef{page: p, node: n, index: 0}) - c.goToFirstElementOnTheStack() - - // If we land on an empty page then move to the next value. - // https://github.com/boltdb/bolt/issues/450 - if c.stack[len(c.stack)-1].count() == 0 { - c.next() - } - - k, v, flags := c.keyValue() - if (flags & uint32(bucketLeafFlag)) != 0 { - return k, nil, flags - } - return k, v, flags -} - -// Last moves the cursor to the last item in the bucket and returns its key and value. -// If the bucket is empty then a nil key and value are returned. -// The returned key and value are only valid for the life of the transaction. -func (c *Cursor) Last() (key []byte, value []byte) { - _assert(c.bucket.tx.db != nil, "tx closed") - c.stack = c.stack[:0] - p, n := c.bucket.pageNode(c.bucket.root) - ref := elemRef{page: p, node: n} - ref.index = ref.count() - 1 - c.stack = append(c.stack, ref) - c.last() - - // If this is an empty page (calling Delete may result in empty pages) - // we call prev to find the last page that is not empty - for len(c.stack) > 0 && c.stack[len(c.stack)-1].count() == 0 { - c.prev() - } - - if len(c.stack) == 0 { - return nil, nil - } - - k, v, flags := c.keyValue() - if (flags & uint32(bucketLeafFlag)) != 0 { - return k, nil - } - return k, v -} - -// Next moves the cursor to the next item in the bucket and returns its key and value. -// If the cursor is at the end of the bucket then a nil key and value are returned. -// The returned key and value are only valid for the life of the transaction. -func (c *Cursor) Next() (key []byte, value []byte) { - _assert(c.bucket.tx.db != nil, "tx closed") - k, v, flags := c.next() - if (flags & uint32(bucketLeafFlag)) != 0 { - return k, nil - } - return k, v -} - -// Prev moves the cursor to the previous item in the bucket and returns its key and value. -// If the cursor is at the beginning of the bucket then a nil key and value are returned. -// The returned key and value are only valid for the life of the transaction. -func (c *Cursor) Prev() (key []byte, value []byte) { - _assert(c.bucket.tx.db != nil, "tx closed") - k, v, flags := c.prev() - if (flags & uint32(bucketLeafFlag)) != 0 { - return k, nil - } - return k, v -} - -// Seek moves the cursor to a given key using a b-tree search and returns it. -// If the key does not exist then the next key is used. If no keys -// follow, a nil key is returned. -// The returned key and value are only valid for the life of the transaction. -func (c *Cursor) Seek(seek []byte) (key []byte, value []byte) { - _assert(c.bucket.tx.db != nil, "tx closed") - - k, v, flags := c.seek(seek) - - // If we ended up after the last element of a page then move to the next one. - if ref := &c.stack[len(c.stack)-1]; ref.index >= ref.count() { - k, v, flags = c.next() - } - - if k == nil { - return nil, nil - } else if (flags & uint32(bucketLeafFlag)) != 0 { - return k, nil - } - return k, v -} - -// Delete removes the current key/value under the cursor from the bucket. -// Delete fails if current key/value is a bucket or if the transaction is not writable. -func (c *Cursor) Delete() error { - if c.bucket.tx.db == nil { - return ErrTxClosed - } else if !c.bucket.Writable() { - return ErrTxNotWritable - } - - key, _, flags := c.keyValue() - // Return an error if current value is a bucket. - if (flags & bucketLeafFlag) != 0 { - return ErrIncompatibleValue - } - c.node().del(key) - - return nil -} - -// seek moves the cursor to a given key and returns it. -// If the key does not exist then the next key is used. -func (c *Cursor) seek(seek []byte) (key []byte, value []byte, flags uint32) { - // Start from root page/node and traverse to correct page. - c.stack = c.stack[:0] - c.search(seek, c.bucket.root) - - // If this is a bucket then return a nil value. - return c.keyValue() -} - -// first moves the cursor to the first leaf element under the last page in the stack. -func (c *Cursor) goToFirstElementOnTheStack() { - for { - // Exit when we hit a leaf page. - var ref = &c.stack[len(c.stack)-1] - if ref.isLeaf() { - break - } - - // Keep adding pages pointing to the first element to the stack. - var pgId pgid - if ref.node != nil { - pgId = ref.node.inodes[ref.index].pgid - } else { - pgId = ref.page.branchPageElement(uint16(ref.index)).pgid - } - p, n := c.bucket.pageNode(pgId) - c.stack = append(c.stack, elemRef{page: p, node: n, index: 0}) - } -} - -// last moves the cursor to the last leaf element under the last page in the stack. -func (c *Cursor) last() { - for { - // Exit when we hit a leaf page. - ref := &c.stack[len(c.stack)-1] - if ref.isLeaf() { - break - } - - // Keep adding pages pointing to the last element in the stack. - var pgId pgid - if ref.node != nil { - pgId = ref.node.inodes[ref.index].pgid - } else { - pgId = ref.page.branchPageElement(uint16(ref.index)).pgid - } - p, n := c.bucket.pageNode(pgId) - - var nextRef = elemRef{page: p, node: n} - nextRef.index = nextRef.count() - 1 - c.stack = append(c.stack, nextRef) - } -} - -// next moves to the next leaf element and returns the key and value. -// If the cursor is at the last leaf element then it stays there and returns nil. -func (c *Cursor) next() (key []byte, value []byte, flags uint32) { - for { - // Attempt to move over one element until we're successful. - // Move up the stack as we hit the end of each page in our stack. - var i int - for i = len(c.stack) - 1; i >= 0; i-- { - elem := &c.stack[i] - if elem.index < elem.count()-1 { - elem.index++ - break - } - } - - // If we've hit the root page then stop and return. This will leave the - // cursor on the last element of the last page. - if i == -1 { - return nil, nil, 0 - } - - // Otherwise start from where we left off in the stack and find the - // first element of the first leaf page. - c.stack = c.stack[:i+1] - c.goToFirstElementOnTheStack() - - // If this is an empty page then restart and move back up the stack. - // https://github.com/boltdb/bolt/issues/450 - if c.stack[len(c.stack)-1].count() == 0 { - continue - } - - return c.keyValue() - } -} - -// prev moves the cursor to the previous item in the bucket and returns its key and value. -// If the cursor is at the beginning of the bucket then a nil key and value are returned. -func (c *Cursor) prev() (key []byte, value []byte, flags uint32) { - // Attempt to move back one element until we're successful. - // Move up the stack as we hit the beginning of each page in our stack. - for i := len(c.stack) - 1; i >= 0; i-- { - elem := &c.stack[i] - if elem.index > 0 { - elem.index-- - break - } - c.stack = c.stack[:i] - } - - // If we've hit the end then return nil. - if len(c.stack) == 0 { - return nil, nil, 0 - } - - // Move down the stack to find the last element of the last leaf under this branch. - c.last() - return c.keyValue() -} - -// search recursively performs a binary search against a given page/node until it finds a given key. -func (c *Cursor) search(key []byte, pgId pgid) { - p, n := c.bucket.pageNode(pgId) - if p != nil && (p.flags&(branchPageFlag|leafPageFlag)) == 0 { - panic(fmt.Sprintf("invalid page type: %d: %x", p.id, p.flags)) - } - e := elemRef{page: p, node: n} - c.stack = append(c.stack, e) - - // If we're on a leaf page/node then find the specific node. - if e.isLeaf() { - c.nsearch(key) - return - } - - if n != nil { - c.searchNode(key, n) - return - } - c.searchPage(key, p) -} - -func (c *Cursor) searchNode(key []byte, n *node) { - var exact bool - index := sort.Search(len(n.inodes), func(i int) bool { - // TODO(benbjohnson): Optimize this range search. It's a bit hacky right now. - // sort.Search() finds the lowest index where f() != -1 but we need the highest index. - ret := bytes.Compare(n.inodes[i].key, key) - if ret == 0 { - exact = true - } - return ret != -1 - }) - if !exact && index > 0 { - index-- - } - c.stack[len(c.stack)-1].index = index - - // Recursively search to the next page. - c.search(key, n.inodes[index].pgid) -} - -func (c *Cursor) searchPage(key []byte, p *page) { - // Binary search for the correct range. - inodes := p.branchPageElements() - - var exact bool - index := sort.Search(int(p.count), func(i int) bool { - // TODO(benbjohnson): Optimize this range search. It's a bit hacky right now. - // sort.Search() finds the lowest index where f() != -1 but we need the highest index. - ret := bytes.Compare(inodes[i].key(), key) - if ret == 0 { - exact = true - } - return ret != -1 - }) - if !exact && index > 0 { - index-- - } - c.stack[len(c.stack)-1].index = index - - // Recursively search to the next page. - c.search(key, inodes[index].pgid) -} - -// nsearch searches the leaf node on the top of the stack for a key. -func (c *Cursor) nsearch(key []byte) { - e := &c.stack[len(c.stack)-1] - p, n := e.page, e.node - - // If we have a node then search its inodes. - if n != nil { - index := sort.Search(len(n.inodes), func(i int) bool { - return bytes.Compare(n.inodes[i].key, key) != -1 - }) - e.index = index - return - } - - // If we have a page then search its leaf elements. - inodes := p.leafPageElements() - index := sort.Search(int(p.count), func(i int) bool { - return bytes.Compare(inodes[i].key(), key) != -1 - }) - e.index = index -} - -// keyValue returns the key and value of the current leaf element. -func (c *Cursor) keyValue() ([]byte, []byte, uint32) { - ref := &c.stack[len(c.stack)-1] - - // If the cursor is pointing to the end of page/node then return nil. - if ref.count() == 0 || ref.index >= ref.count() { - return nil, nil, 0 - } - - // Retrieve value from node. - if ref.node != nil { - inode := &ref.node.inodes[ref.index] - return inode.key, inode.value, inode.flags - } - - // Or retrieve value from page. - elem := ref.page.leafPageElement(uint16(ref.index)) - return elem.key(), elem.value(), elem.flags -} - -// node returns the node that the cursor is currently positioned on. -func (c *Cursor) node() *node { - _assert(len(c.stack) > 0, "accessing a node with a zero-length cursor stack") - - // If the top of the stack is a leaf node then just return it. - if ref := &c.stack[len(c.stack)-1]; ref.node != nil && ref.isLeaf() { - return ref.node - } - - // Start from root and traverse down the hierarchy. - var n = c.stack[0].node - if n == nil { - n = c.bucket.node(c.stack[0].page.id, nil) - } - for _, ref := range c.stack[:len(c.stack)-1] { - _assert(!n.isLeaf, "expected branch node") - n = n.childAt(ref.index) - } - _assert(n.isLeaf, "expected leaf node") - return n -} - -// elemRef represents a reference to an element on a given page/node. -type elemRef struct { - page *page - node *node - index int -} - -// isLeaf returns whether the ref is pointing at a leaf page/node. -func (r *elemRef) isLeaf() bool { - if r.node != nil { - return r.node.isLeaf - } - return (r.page.flags & leafPageFlag) != 0 -} - -// count returns the number of inodes or page elements. -func (r *elemRef) count() int { - if r.node != nil { - return len(r.node.inodes) - } - return int(r.page.count) -} diff --git a/vendor/go.etcd.io/bbolt/db.go b/vendor/go.etcd.io/bbolt/db.go deleted file mode 100644 index c9422127..00000000 --- a/vendor/go.etcd.io/bbolt/db.go +++ /dev/null @@ -1,1361 +0,0 @@ -package bbolt - -import ( - "errors" - "fmt" - "hash/fnv" - "io" - "os" - "runtime" - "sort" - "sync" - "time" - "unsafe" -) - -// The largest step that can be taken when remapping the mmap. -const maxMmapStep = 1 << 30 // 1GB - -// The data file format version. -const version = 2 - -// Represents a marker value to indicate that a file is a Bolt DB. -const magic uint32 = 0xED0CDAED - -const pgidNoFreelist pgid = 0xffffffffffffffff - -// IgnoreNoSync specifies whether the NoSync field of a DB is ignored when -// syncing changes to a file. This is required as some operating systems, -// such as OpenBSD, do not have a unified buffer cache (UBC) and writes -// must be synchronized using the msync(2) syscall. -const IgnoreNoSync = runtime.GOOS == "openbsd" - -// Default values if not set in a DB instance. -const ( - DefaultMaxBatchSize int = 1000 - DefaultMaxBatchDelay = 10 * time.Millisecond - DefaultAllocSize = 16 * 1024 * 1024 -) - -// default page size for db is set to the OS page size. -var defaultPageSize = os.Getpagesize() - -// The time elapsed between consecutive file locking attempts. -const flockRetryTimeout = 50 * time.Millisecond - -// FreelistType is the type of the freelist backend -type FreelistType string - -const ( - // FreelistArrayType indicates backend freelist type is array - FreelistArrayType = FreelistType("array") - // FreelistMapType indicates backend freelist type is hashmap - FreelistMapType = FreelistType("hashmap") -) - -// DB represents a collection of buckets persisted to a file on disk. -// All data access is performed through transactions which can be obtained through the DB. -// All the functions on DB will return a ErrDatabaseNotOpen if accessed before Open() is called. -type DB struct { - // When enabled, the database will perform a Check() after every commit. - // A panic is issued if the database is in an inconsistent state. This - // flag has a large performance impact so it should only be used for - // debugging purposes. - StrictMode bool - - // Setting the NoSync flag will cause the database to skip fsync() - // calls after each commit. This can be useful when bulk loading data - // into a database and you can restart the bulk load in the event of - // a system failure or database corruption. Do not set this flag for - // normal use. - // - // If the package global IgnoreNoSync constant is true, this value is - // ignored. See the comment on that constant for more details. - // - // THIS IS UNSAFE. PLEASE USE WITH CAUTION. - NoSync bool - - // When true, skips syncing freelist to disk. This improves the database - // write performance under normal operation, but requires a full database - // re-sync during recovery. - NoFreelistSync bool - - // FreelistType sets the backend freelist type. There are two options. Array which is simple but endures - // dramatic performance degradation if database is large and fragmentation in freelist is common. - // The alternative one is using hashmap, it is faster in almost all circumstances - // but it doesn't guarantee that it offers the smallest page id available. In normal case it is safe. - // The default type is array - FreelistType FreelistType - - // When true, skips the truncate call when growing the database. - // Setting this to true is only safe on non-ext3/ext4 systems. - // Skipping truncation avoids preallocation of hard drive space and - // bypasses a truncate() and fsync() syscall on remapping. - // - // https://github.com/boltdb/bolt/issues/284 - NoGrowSync bool - - // When `true`, bbolt will always load the free pages when opening the DB. - // When opening db in write mode, this flag will always automatically - // set to `true`. - PreLoadFreelist bool - - // If you want to read the entire database fast, you can set MmapFlag to - // syscall.MAP_POPULATE on Linux 2.6.23+ for sequential read-ahead. - MmapFlags int - - // MaxBatchSize is the maximum size of a batch. Default value is - // copied from DefaultMaxBatchSize in Open. - // - // If <=0, disables batching. - // - // Do not change concurrently with calls to Batch. - MaxBatchSize int - - // MaxBatchDelay is the maximum delay before a batch starts. - // Default value is copied from DefaultMaxBatchDelay in Open. - // - // If <=0, effectively disables batching. - // - // Do not change concurrently with calls to Batch. - MaxBatchDelay time.Duration - - // AllocSize is the amount of space allocated when the database - // needs to create new pages. This is done to amortize the cost - // of truncate() and fsync() when growing the data file. - AllocSize int - - // Mlock locks database file in memory when set to true. - // It prevents major page faults, however used memory can't be reclaimed. - // - // Supported only on Unix via mlock/munlock syscalls. - Mlock bool - - path string - openFile func(string, int, os.FileMode) (*os.File, error) - file *os.File - // `dataref` isn't used at all on Windows, and the golangci-lint - // always fails on Windows platform. - //nolint - dataref []byte // mmap'ed readonly, write throws SEGV - data *[maxMapSize]byte - datasz int - filesz int // current on disk file size - meta0 *meta - meta1 *meta - pageSize int - opened bool - rwtx *Tx - txs []*Tx - stats Stats - - freelist *freelist - freelistLoad sync.Once - - pagePool sync.Pool - - batchMu sync.Mutex - batch *batch - - rwlock sync.Mutex // Allows only one writer at a time. - metalock sync.Mutex // Protects meta page access. - mmaplock sync.RWMutex // Protects mmap access during remapping. - statlock sync.RWMutex // Protects stats access. - - ops struct { - writeAt func(b []byte, off int64) (n int, err error) - } - - // Read only mode. - // When true, Update() and Begin(true) return ErrDatabaseReadOnly immediately. - readOnly bool -} - -// Path returns the path to currently open database file. -func (db *DB) Path() string { - return db.path -} - -// GoString returns the Go string representation of the database. -func (db *DB) GoString() string { - return fmt.Sprintf("bolt.DB{path:%q}", db.path) -} - -// String returns the string representation of the database. -func (db *DB) String() string { - return fmt.Sprintf("DB<%q>", db.path) -} - -// Open creates and opens a database at the given path. -// If the file does not exist then it will be created automatically. -// Passing in nil options will cause Bolt to open the database with the default options. -func Open(path string, mode os.FileMode, options *Options) (*DB, error) { - db := &DB{ - opened: true, - } - // Set default options if no options are provided. - if options == nil { - options = DefaultOptions - } - db.NoSync = options.NoSync - db.NoGrowSync = options.NoGrowSync - db.MmapFlags = options.MmapFlags - db.NoFreelistSync = options.NoFreelistSync - db.PreLoadFreelist = options.PreLoadFreelist - db.FreelistType = options.FreelistType - db.Mlock = options.Mlock - - // Set default values for later DB operations. - db.MaxBatchSize = DefaultMaxBatchSize - db.MaxBatchDelay = DefaultMaxBatchDelay - db.AllocSize = DefaultAllocSize - - flag := os.O_RDWR - if options.ReadOnly { - flag = os.O_RDONLY - db.readOnly = true - } else { - // always load free pages in write mode - db.PreLoadFreelist = true - } - - db.openFile = options.OpenFile - if db.openFile == nil { - db.openFile = os.OpenFile - } - - // Open data file and separate sync handler for metadata writes. - var err error - if db.file, err = db.openFile(path, flag|os.O_CREATE, mode); err != nil { - _ = db.close() - return nil, err - } - db.path = db.file.Name() - - // Lock file so that other processes using Bolt in read-write mode cannot - // use the database at the same time. This would cause corruption since - // the two processes would write meta pages and free pages separately. - // The database file is locked exclusively (only one process can grab the lock) - // if !options.ReadOnly. - // The database file is locked using the shared lock (more than one process may - // hold a lock at the same time) otherwise (options.ReadOnly is set). - if err := flock(db, !db.readOnly, options.Timeout); err != nil { - _ = db.close() - return nil, err - } - - // Default values for test hooks - db.ops.writeAt = db.file.WriteAt - - if db.pageSize = options.PageSize; db.pageSize == 0 { - // Set the default page size to the OS page size. - db.pageSize = defaultPageSize - } - - // Initialize the database if it doesn't exist. - if info, err := db.file.Stat(); err != nil { - _ = db.close() - return nil, err - } else if info.Size() == 0 { - // Initialize new files with meta pages. - if err := db.init(); err != nil { - // clean up file descriptor on initialization fail - _ = db.close() - return nil, err - } - } else { - // try to get the page size from the metadata pages - if pgSize, err := db.getPageSize(); err == nil { - db.pageSize = pgSize - } else { - _ = db.close() - return nil, ErrInvalid - } - } - - // Initialize page pool. - db.pagePool = sync.Pool{ - New: func() interface{} { - return make([]byte, db.pageSize) - }, - } - - // Memory map the data file. - if err := db.mmap(options.InitialMmapSize); err != nil { - _ = db.close() - return nil, err - } - - if db.PreLoadFreelist { - db.loadFreelist() - } - - if db.readOnly { - return db, nil - } - - // Flush freelist when transitioning from no sync to sync so - // NoFreelistSync unaware boltdb can open the db later. - if !db.NoFreelistSync && !db.hasSyncedFreelist() { - tx, err := db.Begin(true) - if tx != nil { - err = tx.Commit() - } - if err != nil { - _ = db.close() - return nil, err - } - } - - // Mark the database as opened and return. - return db, nil -} - -// getPageSize reads the pageSize from the meta pages. It tries -// to read the first meta page firstly. If the first page is invalid, -// then it tries to read the second page using the default page size. -func (db *DB) getPageSize() (int, error) { - var ( - meta0CanRead, meta1CanRead bool - ) - - // Read the first meta page to determine the page size. - if pgSize, canRead, err := db.getPageSizeFromFirstMeta(); err != nil { - // We cannot read the page size from page 0, but can read page 0. - meta0CanRead = canRead - } else { - return pgSize, nil - } - - // Read the second meta page to determine the page size. - if pgSize, canRead, err := db.getPageSizeFromSecondMeta(); err != nil { - // We cannot read the page size from page 1, but can read page 1. - meta1CanRead = canRead - } else { - return pgSize, nil - } - - // If we can't read the page size from both pages, but can read - // either page, then we assume it's the same as the OS or the one - // given, since that's how the page size was chosen in the first place. - // - // If both pages are invalid, and (this OS uses a different page size - // from what the database was created with or the given page size is - // different from what the database was created with), then we are out - // of luck and cannot access the database. - if meta0CanRead || meta1CanRead { - return db.pageSize, nil - } - - return 0, ErrInvalid -} - -// getPageSizeFromFirstMeta reads the pageSize from the first meta page -func (db *DB) getPageSizeFromFirstMeta() (int, bool, error) { - var buf [0x1000]byte - var metaCanRead bool - if bw, err := db.file.ReadAt(buf[:], 0); err == nil && bw == len(buf) { - metaCanRead = true - if m := db.pageInBuffer(buf[:], 0).meta(); m.validate() == nil { - return int(m.pageSize), metaCanRead, nil - } - } - return 0, metaCanRead, ErrInvalid -} - -// getPageSizeFromSecondMeta reads the pageSize from the second meta page -func (db *DB) getPageSizeFromSecondMeta() (int, bool, error) { - var ( - fileSize int64 - metaCanRead bool - ) - - // get the db file size - if info, err := db.file.Stat(); err != nil { - return 0, metaCanRead, err - } else { - fileSize = info.Size() - } - - // We need to read the second meta page, so we should skip the first page; - // but we don't know the exact page size yet, it's chicken & egg problem. - // The solution is to try all the possible page sizes, which starts from 1KB - // and until 16MB (1024<<14) or the end of the db file - // - // TODO: should we support larger page size? - for i := 0; i <= 14; i++ { - var buf [0x1000]byte - var pos int64 = 1024 << uint(i) - if pos >= fileSize-1024 { - break - } - bw, err := db.file.ReadAt(buf[:], pos) - if (err == nil && bw == len(buf)) || (err == io.EOF && int64(bw) == (fileSize-pos)) { - metaCanRead = true - if m := db.pageInBuffer(buf[:], 0).meta(); m.validate() == nil { - return int(m.pageSize), metaCanRead, nil - } - } - } - - return 0, metaCanRead, ErrInvalid -} - -// loadFreelist reads the freelist if it is synced, or reconstructs it -// by scanning the DB if it is not synced. It assumes there are no -// concurrent accesses being made to the freelist. -func (db *DB) loadFreelist() { - db.freelistLoad.Do(func() { - db.freelist = newFreelist(db.FreelistType) - if !db.hasSyncedFreelist() { - // Reconstruct free list by scanning the DB. - db.freelist.readIDs(db.freepages()) - } else { - // Read free list from freelist page. - db.freelist.read(db.page(db.meta().freelist)) - } - db.stats.FreePageN = db.freelist.free_count() - }) -} - -func (db *DB) hasSyncedFreelist() bool { - return db.meta().freelist != pgidNoFreelist -} - -// mmap opens the underlying memory-mapped file and initializes the meta references. -// minsz is the minimum size that the new mmap can be. -func (db *DB) mmap(minsz int) error { - db.mmaplock.Lock() - defer db.mmaplock.Unlock() - - info, err := db.file.Stat() - if err != nil { - return fmt.Errorf("mmap stat error: %s", err) - } else if int(info.Size()) < db.pageSize*2 { - return fmt.Errorf("file size too small") - } - - // Ensure the size is at least the minimum size. - fileSize := int(info.Size()) - var size = fileSize - if size < minsz { - size = minsz - } - size, err = db.mmapSize(size) - if err != nil { - return err - } - - if db.Mlock { - // Unlock db memory - if err := db.munlock(fileSize); err != nil { - return err - } - } - - // Dereference all mmap references before unmapping. - if db.rwtx != nil { - db.rwtx.root.dereference() - } - - // Unmap existing data before continuing. - if err := db.munmap(); err != nil { - return err - } - - // Memory-map the data file as a byte slice. - // gofail: var mapError string - // return errors.New(mapError) - if err := mmap(db, size); err != nil { - return err - } - - if db.Mlock { - // Don't allow swapping of data file - if err := db.mlock(fileSize); err != nil { - return err - } - } - - // Save references to the meta pages. - db.meta0 = db.page(0).meta() - db.meta1 = db.page(1).meta() - - // Validate the meta pages. We only return an error if both meta pages fail - // validation, since meta0 failing validation means that it wasn't saved - // properly -- but we can recover using meta1. And vice-versa. - err0 := db.meta0.validate() - err1 := db.meta1.validate() - if err0 != nil && err1 != nil { - return err0 - } - - return nil -} - -func (db *DB) invalidate() { - db.dataref = nil - db.data = nil - db.datasz = 0 - - db.meta0 = nil - db.meta1 = nil -} - -// munmap unmaps the data file from memory. -func (db *DB) munmap() error { - defer db.invalidate() - - // gofail: var unmapError string - // return errors.New(unmapError) - if err := munmap(db); err != nil { - return fmt.Errorf("unmap error: " + err.Error()) - } - - return nil -} - -// mmapSize determines the appropriate size for the mmap given the current size -// of the database. The minimum size is 32KB and doubles until it reaches 1GB. -// Returns an error if the new mmap size is greater than the max allowed. -func (db *DB) mmapSize(size int) (int, error) { - // Double the size from 32KB until 1GB. - for i := uint(15); i <= 30; i++ { - if size <= 1< maxMapSize { - return 0, fmt.Errorf("mmap too large") - } - - // If larger than 1GB then grow by 1GB at a time. - sz := int64(size) - if remainder := sz % int64(maxMmapStep); remainder > 0 { - sz += int64(maxMmapStep) - remainder - } - - // Ensure that the mmap size is a multiple of the page size. - // This should always be true since we're incrementing in MBs. - pageSize := int64(db.pageSize) - if (sz % pageSize) != 0 { - sz = ((sz / pageSize) + 1) * pageSize - } - - // If we've exceeded the max size then only grow up to the max size. - if sz > maxMapSize { - sz = maxMapSize - } - - return int(sz), nil -} - -func (db *DB) munlock(fileSize int) error { - if err := munlock(db, fileSize); err != nil { - return fmt.Errorf("munlock error: " + err.Error()) - } - return nil -} - -func (db *DB) mlock(fileSize int) error { - if err := mlock(db, fileSize); err != nil { - return fmt.Errorf("mlock error: " + err.Error()) - } - return nil -} - -func (db *DB) mrelock(fileSizeFrom, fileSizeTo int) error { - if err := db.munlock(fileSizeFrom); err != nil { - return err - } - if err := db.mlock(fileSizeTo); err != nil { - return err - } - return nil -} - -// init creates a new database file and initializes its meta pages. -func (db *DB) init() error { - // Create two meta pages on a buffer. - buf := make([]byte, db.pageSize*4) - for i := 0; i < 2; i++ { - p := db.pageInBuffer(buf, pgid(i)) - p.id = pgid(i) - p.flags = metaPageFlag - - // Initialize the meta page. - m := p.meta() - m.magic = magic - m.version = version - m.pageSize = uint32(db.pageSize) - m.freelist = 2 - m.root = bucket{root: 3} - m.pgid = 4 - m.txid = txid(i) - m.checksum = m.sum64() - } - - // Write an empty freelist at page 3. - p := db.pageInBuffer(buf, pgid(2)) - p.id = pgid(2) - p.flags = freelistPageFlag - p.count = 0 - - // Write an empty leaf page at page 4. - p = db.pageInBuffer(buf, pgid(3)) - p.id = pgid(3) - p.flags = leafPageFlag - p.count = 0 - - // Write the buffer to our data file. - if _, err := db.ops.writeAt(buf, 0); err != nil { - return err - } - if err := fdatasync(db); err != nil { - return err - } - db.filesz = len(buf) - - return nil -} - -// Close releases all database resources. -// It will block waiting for any open transactions to finish -// before closing the database and returning. -func (db *DB) Close() error { - db.rwlock.Lock() - defer db.rwlock.Unlock() - - db.metalock.Lock() - defer db.metalock.Unlock() - - db.mmaplock.Lock() - defer db.mmaplock.Unlock() - - return db.close() -} - -func (db *DB) close() error { - if !db.opened { - return nil - } - - db.opened = false - - db.freelist = nil - - // Clear ops. - db.ops.writeAt = nil - - // Close the mmap. - if err := db.munmap(); err != nil { - return err - } - - // Close file handles. - if db.file != nil { - // No need to unlock read-only file. - if !db.readOnly { - // Unlock the file. - if err := funlock(db); err != nil { - return fmt.Errorf("bolt.Close(): funlock error: %w", err) - } - } - - // Close the file descriptor. - if err := db.file.Close(); err != nil { - return fmt.Errorf("db file close: %s", err) - } - db.file = nil - } - - db.path = "" - return nil -} - -// Begin starts a new transaction. -// Multiple read-only transactions can be used concurrently but only one -// write transaction can be used at a time. Starting multiple write transactions -// will cause the calls to block and be serialized until the current write -// transaction finishes. -// -// Transactions should not be dependent on one another. Opening a read -// transaction and a write transaction in the same goroutine can cause the -// writer to deadlock because the database periodically needs to re-mmap itself -// as it grows and it cannot do that while a read transaction is open. -// -// If a long running read transaction (for example, a snapshot transaction) is -// needed, you might want to set DB.InitialMmapSize to a large enough value -// to avoid potential blocking of write transaction. -// -// IMPORTANT: You must close read-only transactions after you are finished or -// else the database will not reclaim old pages. -func (db *DB) Begin(writable bool) (*Tx, error) { - if writable { - return db.beginRWTx() - } - return db.beginTx() -} - -func (db *DB) beginTx() (*Tx, error) { - // Lock the meta pages while we initialize the transaction. We obtain - // the meta lock before the mmap lock because that's the order that the - // write transaction will obtain them. - db.metalock.Lock() - - // Obtain a read-only lock on the mmap. When the mmap is remapped it will - // obtain a write lock so all transactions must finish before it can be - // remapped. - db.mmaplock.RLock() - - // Exit if the database is not open yet. - if !db.opened { - db.mmaplock.RUnlock() - db.metalock.Unlock() - return nil, ErrDatabaseNotOpen - } - - // Exit if the database is not correctly mapped. - if db.data == nil { - db.mmaplock.RUnlock() - db.metalock.Unlock() - return nil, ErrInvalidMapping - } - - // Create a transaction associated with the database. - t := &Tx{} - t.init(db) - - // Keep track of transaction until it closes. - db.txs = append(db.txs, t) - n := len(db.txs) - - // Unlock the meta pages. - db.metalock.Unlock() - - // Update the transaction stats. - db.statlock.Lock() - db.stats.TxN++ - db.stats.OpenTxN = n - db.statlock.Unlock() - - return t, nil -} - -func (db *DB) beginRWTx() (*Tx, error) { - // If the database was opened with Options.ReadOnly, return an error. - if db.readOnly { - return nil, ErrDatabaseReadOnly - } - - // Obtain writer lock. This is released by the transaction when it closes. - // This enforces only one writer transaction at a time. - db.rwlock.Lock() - - // Once we have the writer lock then we can lock the meta pages so that - // we can set up the transaction. - db.metalock.Lock() - defer db.metalock.Unlock() - - // Exit if the database is not open yet. - if !db.opened { - db.rwlock.Unlock() - return nil, ErrDatabaseNotOpen - } - - // Exit if the database is not correctly mapped. - if db.data == nil { - db.rwlock.Unlock() - return nil, ErrInvalidMapping - } - - // Create a transaction associated with the database. - t := &Tx{writable: true} - t.init(db) - db.rwtx = t - db.freePages() - return t, nil -} - -// freePages releases any pages associated with closed read-only transactions. -func (db *DB) freePages() { - // Free all pending pages prior to earliest open transaction. - sort.Sort(txsById(db.txs)) - minid := txid(0xFFFFFFFFFFFFFFFF) - if len(db.txs) > 0 { - minid = db.txs[0].meta.txid - } - if minid > 0 { - db.freelist.release(minid - 1) - } - // Release unused txid extents. - for _, t := range db.txs { - db.freelist.releaseRange(minid, t.meta.txid-1) - minid = t.meta.txid + 1 - } - db.freelist.releaseRange(minid, txid(0xFFFFFFFFFFFFFFFF)) - // Any page both allocated and freed in an extent is safe to release. -} - -type txsById []*Tx - -func (t txsById) Len() int { return len(t) } -func (t txsById) Swap(i, j int) { t[i], t[j] = t[j], t[i] } -func (t txsById) Less(i, j int) bool { return t[i].meta.txid < t[j].meta.txid } - -// removeTx removes a transaction from the database. -func (db *DB) removeTx(tx *Tx) { - // Release the read lock on the mmap. - db.mmaplock.RUnlock() - - // Use the meta lock to restrict access to the DB object. - db.metalock.Lock() - - // Remove the transaction. - for i, t := range db.txs { - if t == tx { - last := len(db.txs) - 1 - db.txs[i] = db.txs[last] - db.txs[last] = nil - db.txs = db.txs[:last] - break - } - } - n := len(db.txs) - - // Unlock the meta pages. - db.metalock.Unlock() - - // Merge statistics. - db.statlock.Lock() - db.stats.OpenTxN = n - db.stats.TxStats.add(&tx.stats) - db.statlock.Unlock() -} - -// Update executes a function within the context of a read-write managed transaction. -// If no error is returned from the function then the transaction is committed. -// If an error is returned then the entire transaction is rolled back. -// Any error that is returned from the function or returned from the commit is -// returned from the Update() method. -// -// Attempting to manually commit or rollback within the function will cause a panic. -func (db *DB) Update(fn func(*Tx) error) error { - t, err := db.Begin(true) - if err != nil { - return err - } - - // Make sure the transaction rolls back in the event of a panic. - defer func() { - if t.db != nil { - t.rollback() - } - }() - - // Mark as a managed tx so that the inner function cannot manually commit. - t.managed = true - - // If an error is returned from the function then rollback and return error. - err = fn(t) - t.managed = false - if err != nil { - _ = t.Rollback() - return err - } - - return t.Commit() -} - -// View executes a function within the context of a managed read-only transaction. -// Any error that is returned from the function is returned from the View() method. -// -// Attempting to manually rollback within the function will cause a panic. -func (db *DB) View(fn func(*Tx) error) error { - t, err := db.Begin(false) - if err != nil { - return err - } - - // Make sure the transaction rolls back in the event of a panic. - defer func() { - if t.db != nil { - t.rollback() - } - }() - - // Mark as a managed tx so that the inner function cannot manually rollback. - t.managed = true - - // If an error is returned from the function then pass it through. - err = fn(t) - t.managed = false - if err != nil { - _ = t.Rollback() - return err - } - - return t.Rollback() -} - -// Batch calls fn as part of a batch. It behaves similar to Update, -// except: -// -// 1. concurrent Batch calls can be combined into a single Bolt -// transaction. -// -// 2. the function passed to Batch may be called multiple times, -// regardless of whether it returns error or not. -// -// This means that Batch function side effects must be idempotent and -// take permanent effect only after a successful return is seen in -// caller. -// -// The maximum batch size and delay can be adjusted with DB.MaxBatchSize -// and DB.MaxBatchDelay, respectively. -// -// Batch is only useful when there are multiple goroutines calling it. -func (db *DB) Batch(fn func(*Tx) error) error { - errCh := make(chan error, 1) - - db.batchMu.Lock() - if (db.batch == nil) || (db.batch != nil && len(db.batch.calls) >= db.MaxBatchSize) { - // There is no existing batch, or the existing batch is full; start a new one. - db.batch = &batch{ - db: db, - } - db.batch.timer = time.AfterFunc(db.MaxBatchDelay, db.batch.trigger) - } - db.batch.calls = append(db.batch.calls, call{fn: fn, err: errCh}) - if len(db.batch.calls) >= db.MaxBatchSize { - // wake up batch, it's ready to run - go db.batch.trigger() - } - db.batchMu.Unlock() - - err := <-errCh - if err == trySolo { - err = db.Update(fn) - } - return err -} - -type call struct { - fn func(*Tx) error - err chan<- error -} - -type batch struct { - db *DB - timer *time.Timer - start sync.Once - calls []call -} - -// trigger runs the batch if it hasn't already been run. -func (b *batch) trigger() { - b.start.Do(b.run) -} - -// run performs the transactions in the batch and communicates results -// back to DB.Batch. -func (b *batch) run() { - b.db.batchMu.Lock() - b.timer.Stop() - // Make sure no new work is added to this batch, but don't break - // other batches. - if b.db.batch == b { - b.db.batch = nil - } - b.db.batchMu.Unlock() - -retry: - for len(b.calls) > 0 { - var failIdx = -1 - err := b.db.Update(func(tx *Tx) error { - for i, c := range b.calls { - if err := safelyCall(c.fn, tx); err != nil { - failIdx = i - return err - } - } - return nil - }) - - if failIdx >= 0 { - // take the failing transaction out of the batch. it's - // safe to shorten b.calls here because db.batch no longer - // points to us, and we hold the mutex anyway. - c := b.calls[failIdx] - b.calls[failIdx], b.calls = b.calls[len(b.calls)-1], b.calls[:len(b.calls)-1] - // tell the submitter re-run it solo, continue with the rest of the batch - c.err <- trySolo - continue retry - } - - // pass success, or bolt internal errors, to all callers - for _, c := range b.calls { - c.err <- err - } - break retry - } -} - -// trySolo is a special sentinel error value used for signaling that a -// transaction function should be re-run. It should never be seen by -// callers. -var trySolo = errors.New("batch function returned an error and should be re-run solo") - -type panicked struct { - reason interface{} -} - -func (p panicked) Error() string { - if err, ok := p.reason.(error); ok { - return err.Error() - } - return fmt.Sprintf("panic: %v", p.reason) -} - -func safelyCall(fn func(*Tx) error, tx *Tx) (err error) { - defer func() { - if p := recover(); p != nil { - err = panicked{p} - } - }() - return fn(tx) -} - -// Sync executes fdatasync() against the database file handle. -// -// This is not necessary under normal operation, however, if you use NoSync -// then it allows you to force the database file to sync against the disk. -func (db *DB) Sync() error { return fdatasync(db) } - -// Stats retrieves ongoing performance stats for the database. -// This is only updated when a transaction closes. -func (db *DB) Stats() Stats { - db.statlock.RLock() - defer db.statlock.RUnlock() - return db.stats -} - -// This is for internal access to the raw data bytes from the C cursor, use -// carefully, or not at all. -func (db *DB) Info() *Info { - _assert(db.data != nil, "database file isn't correctly mapped") - return &Info{uintptr(unsafe.Pointer(&db.data[0])), db.pageSize} -} - -// page retrieves a page reference from the mmap based on the current page size. -func (db *DB) page(id pgid) *page { - pos := id * pgid(db.pageSize) - return (*page)(unsafe.Pointer(&db.data[pos])) -} - -// pageInBuffer retrieves a page reference from a given byte array based on the current page size. -func (db *DB) pageInBuffer(b []byte, id pgid) *page { - return (*page)(unsafe.Pointer(&b[id*pgid(db.pageSize)])) -} - -// meta retrieves the current meta page reference. -func (db *DB) meta() *meta { - // We have to return the meta with the highest txid which doesn't fail - // validation. Otherwise, we can cause errors when in fact the database is - // in a consistent state. metaA is the one with the higher txid. - metaA := db.meta0 - metaB := db.meta1 - if db.meta1.txid > db.meta0.txid { - metaA = db.meta1 - metaB = db.meta0 - } - - // Use higher meta page if valid. Otherwise, fallback to previous, if valid. - if err := metaA.validate(); err == nil { - return metaA - } else if err := metaB.validate(); err == nil { - return metaB - } - - // This should never be reached, because both meta1 and meta0 were validated - // on mmap() and we do fsync() on every write. - panic("bolt.DB.meta(): invalid meta pages") -} - -// allocate returns a contiguous block of memory starting at a given page. -func (db *DB) allocate(txid txid, count int) (*page, error) { - // Allocate a temporary buffer for the page. - var buf []byte - if count == 1 { - buf = db.pagePool.Get().([]byte) - } else { - buf = make([]byte, count*db.pageSize) - } - p := (*page)(unsafe.Pointer(&buf[0])) - p.overflow = uint32(count - 1) - - // Use pages from the freelist if they are available. - if p.id = db.freelist.allocate(txid, count); p.id != 0 { - return p, nil - } - - // Resize mmap() if we're at the end. - p.id = db.rwtx.meta.pgid - var minsz = int((p.id+pgid(count))+1) * db.pageSize - if minsz >= db.datasz { - if err := db.mmap(minsz); err != nil { - return nil, fmt.Errorf("mmap allocate error: %s", err) - } - } - - // Move the page id high water mark. - db.rwtx.meta.pgid += pgid(count) - - return p, nil -} - -// grow grows the size of the database to the given sz. -func (db *DB) grow(sz int) error { - // Ignore if the new size is less than available file size. - if sz <= db.filesz { - return nil - } - - // If the data is smaller than the alloc size then only allocate what's needed. - // Once it goes over the allocation size then allocate in chunks. - if db.datasz <= db.AllocSize { - sz = db.datasz - } else { - sz += db.AllocSize - } - - // Truncate and fsync to ensure file size metadata is flushed. - // https://github.com/boltdb/bolt/issues/284 - if !db.NoGrowSync && !db.readOnly { - if runtime.GOOS != "windows" { - if err := db.file.Truncate(int64(sz)); err != nil { - return fmt.Errorf("file resize error: %s", err) - } - } - if err := db.file.Sync(); err != nil { - return fmt.Errorf("file sync error: %s", err) - } - if db.Mlock { - // unlock old file and lock new one - if err := db.mrelock(db.filesz, sz); err != nil { - return fmt.Errorf("mlock/munlock error: %s", err) - } - } - } - - db.filesz = sz - return nil -} - -func (db *DB) IsReadOnly() bool { - return db.readOnly -} - -func (db *DB) freepages() []pgid { - tx, err := db.beginTx() - defer func() { - err = tx.Rollback() - if err != nil { - panic("freepages: failed to rollback tx") - } - }() - if err != nil { - panic("freepages: failed to open read only tx") - } - - reachable := make(map[pgid]*page) - nofreed := make(map[pgid]bool) - ech := make(chan error) - go func() { - for e := range ech { - panic(fmt.Sprintf("freepages: failed to get all reachable pages (%v)", e)) - } - }() - tx.checkBucket(&tx.root, reachable, nofreed, HexKVStringer(), ech) - close(ech) - - // TODO: If check bucket reported any corruptions (ech) we shouldn't proceed to freeing the pages. - - var fids []pgid - for i := pgid(2); i < db.meta().pgid; i++ { - if _, ok := reachable[i]; !ok { - fids = append(fids, i) - } - } - return fids -} - -// Options represents the options that can be set when opening a database. -type Options struct { - // Timeout is the amount of time to wait to obtain a file lock. - // When set to zero it will wait indefinitely. This option is only - // available on Darwin and Linux. - Timeout time.Duration - - // Sets the DB.NoGrowSync flag before memory mapping the file. - NoGrowSync bool - - // Do not sync freelist to disk. This improves the database write performance - // under normal operation, but requires a full database re-sync during recovery. - NoFreelistSync bool - - // PreLoadFreelist sets whether to load the free pages when opening - // the db file. Note when opening db in write mode, bbolt will always - // load the free pages. - PreLoadFreelist bool - - // FreelistType sets the backend freelist type. There are two options. Array which is simple but endures - // dramatic performance degradation if database is large and fragmentation in freelist is common. - // The alternative one is using hashmap, it is faster in almost all circumstances - // but it doesn't guarantee that it offers the smallest page id available. In normal case it is safe. - // The default type is array - FreelistType FreelistType - - // Open database in read-only mode. Uses flock(..., LOCK_SH |LOCK_NB) to - // grab a shared lock (UNIX). - ReadOnly bool - - // Sets the DB.MmapFlags flag before memory mapping the file. - MmapFlags int - - // InitialMmapSize is the initial mmap size of the database - // in bytes. Read transactions won't block write transaction - // if the InitialMmapSize is large enough to hold database mmap - // size. (See DB.Begin for more information) - // - // If <=0, the initial map size is 0. - // If initialMmapSize is smaller than the previous database size, - // it takes no effect. - InitialMmapSize int - - // PageSize overrides the default OS page size. - PageSize int - - // NoSync sets the initial value of DB.NoSync. Normally this can just be - // set directly on the DB itself when returned from Open(), but this option - // is useful in APIs which expose Options but not the underlying DB. - NoSync bool - - // OpenFile is used to open files. It defaults to os.OpenFile. This option - // is useful for writing hermetic tests. - OpenFile func(string, int, os.FileMode) (*os.File, error) - - // Mlock locks database file in memory when set to true. - // It prevents potential page faults, however - // used memory can't be reclaimed. (UNIX only) - Mlock bool -} - -// DefaultOptions represent the options used if nil options are passed into Open(). -// No timeout is used which will cause Bolt to wait indefinitely for a lock. -var DefaultOptions = &Options{ - Timeout: 0, - NoGrowSync: false, - FreelistType: FreelistArrayType, -} - -// Stats represents statistics about the database. -type Stats struct { - // Freelist stats - FreePageN int // total number of free pages on the freelist - PendingPageN int // total number of pending pages on the freelist - FreeAlloc int // total bytes allocated in free pages - FreelistInuse int // total bytes used by the freelist - - // Transaction stats - TxN int // total number of started read transactions - OpenTxN int // number of currently open read transactions - - TxStats TxStats // global, ongoing stats. -} - -// Sub calculates and returns the difference between two sets of database stats. -// This is useful when obtaining stats at two different points and time and -// you need the performance counters that occurred within that time span. -func (s *Stats) Sub(other *Stats) Stats { - if other == nil { - return *s - } - var diff Stats - diff.FreePageN = s.FreePageN - diff.PendingPageN = s.PendingPageN - diff.FreeAlloc = s.FreeAlloc - diff.FreelistInuse = s.FreelistInuse - diff.TxN = s.TxN - other.TxN - diff.TxStats = s.TxStats.Sub(&other.TxStats) - return diff -} - -type Info struct { - Data uintptr - PageSize int -} - -type meta struct { - magic uint32 - version uint32 - pageSize uint32 - flags uint32 - root bucket - freelist pgid - pgid pgid - txid txid - checksum uint64 -} - -// validate checks the marker bytes and version of the meta page to ensure it matches this binary. -func (m *meta) validate() error { - if m.magic != magic { - return ErrInvalid - } else if m.version != version { - return ErrVersionMismatch - } else if m.checksum != m.sum64() { - return ErrChecksum - } - return nil -} - -// copy copies one meta object to another. -func (m *meta) copy(dest *meta) { - *dest = *m -} - -// write writes the meta onto a page. -func (m *meta) write(p *page) { - if m.root.root >= m.pgid { - panic(fmt.Sprintf("root bucket pgid (%d) above high water mark (%d)", m.root.root, m.pgid)) - } else if m.freelist >= m.pgid && m.freelist != pgidNoFreelist { - // TODO: reject pgidNoFreeList if !NoFreelistSync - panic(fmt.Sprintf("freelist pgid (%d) above high water mark (%d)", m.freelist, m.pgid)) - } - - // Page id is either going to be 0 or 1 which we can determine by the transaction ID. - p.id = pgid(m.txid % 2) - p.flags |= metaPageFlag - - // Calculate the checksum. - m.checksum = m.sum64() - - m.copy(p.meta()) -} - -// generates the checksum for the meta. -func (m *meta) sum64() uint64 { - var h = fnv.New64a() - _, _ = h.Write((*[unsafe.Offsetof(meta{}.checksum)]byte)(unsafe.Pointer(m))[:]) - return h.Sum64() -} - -// _assert will panic with a given formatted message if the given condition is false. -func _assert(condition bool, msg string, v ...interface{}) { - if !condition { - panic(fmt.Sprintf("assertion failed: "+msg, v...)) - } -} diff --git a/vendor/go.etcd.io/bbolt/doc.go b/vendor/go.etcd.io/bbolt/doc.go deleted file mode 100644 index d1007e4b..00000000 --- a/vendor/go.etcd.io/bbolt/doc.go +++ /dev/null @@ -1,40 +0,0 @@ -/* -package bbolt implements a low-level key/value store in pure Go. It supports -fully serializable transactions, ACID semantics, and lock-free MVCC with -multiple readers and a single writer. Bolt can be used for projects that -want a simple data store without the need to add large dependencies such as -Postgres or MySQL. - -Bolt is a single-level, zero-copy, B+tree data store. This means that Bolt is -optimized for fast read access and does not require recovery in the event of a -system crash. Transactions which have not finished committing will simply be -rolled back in the event of a crash. - -The design of Bolt is based on Howard Chu's LMDB database project. - -Bolt currently works on Windows, Mac OS X, and Linux. - -# Basics - -There are only a few types in Bolt: DB, Bucket, Tx, and Cursor. The DB is -a collection of buckets and is represented by a single file on disk. A bucket is -a collection of unique keys that are associated with values. - -Transactions provide either read-only or read-write access to the database. -Read-only transactions can retrieve key/value pairs and can use Cursors to -iterate over the dataset sequentially. Read-write transactions can create and -delete buckets and can insert and remove keys. Only one read-write transaction -is allowed at a time. - -# Caveats - -The database uses a read-only, memory-mapped data file to ensure that -applications cannot corrupt the database, however, this means that keys and -values returned from Bolt cannot be changed. Writing to a read-only byte slice -will cause Go to panic. - -Keys and values retrieved from the database are only valid for the life of -the transaction. When used outside the transaction, these byte slices can -point to different data or can point to invalid memory which will cause a panic. -*/ -package bbolt diff --git a/vendor/go.etcd.io/bbolt/errors.go b/vendor/go.etcd.io/bbolt/errors.go deleted file mode 100644 index f2c3b20e..00000000 --- a/vendor/go.etcd.io/bbolt/errors.go +++ /dev/null @@ -1,78 +0,0 @@ -package bbolt - -import "errors" - -// These errors can be returned when opening or calling methods on a DB. -var ( - // ErrDatabaseNotOpen is returned when a DB instance is accessed before it - // is opened or after it is closed. - ErrDatabaseNotOpen = errors.New("database not open") - - // ErrDatabaseOpen is returned when opening a database that is - // already open. - ErrDatabaseOpen = errors.New("database already open") - - // ErrInvalid is returned when both meta pages on a database are invalid. - // This typically occurs when a file is not a bolt database. - ErrInvalid = errors.New("invalid database") - - // ErrInvalidMapping is returned when the database file fails to get mapped. - ErrInvalidMapping = errors.New("database isn't correctly mapped") - - // ErrVersionMismatch is returned when the data file was created with a - // different version of Bolt. - ErrVersionMismatch = errors.New("version mismatch") - - // ErrChecksum is returned when either meta page checksum does not match. - ErrChecksum = errors.New("checksum error") - - // ErrTimeout is returned when a database cannot obtain an exclusive lock - // on the data file after the timeout passed to Open(). - ErrTimeout = errors.New("timeout") -) - -// These errors can occur when beginning or committing a Tx. -var ( - // ErrTxNotWritable is returned when performing a write operation on a - // read-only transaction. - ErrTxNotWritable = errors.New("tx not writable") - - // ErrTxClosed is returned when committing or rolling back a transaction - // that has already been committed or rolled back. - ErrTxClosed = errors.New("tx closed") - - // ErrDatabaseReadOnly is returned when a mutating transaction is started on a - // read-only database. - ErrDatabaseReadOnly = errors.New("database is in read-only mode") - - // ErrFreePagesNotLoaded is returned when a readonly transaction without - // preloading the free pages is trying to access the free pages. - ErrFreePagesNotLoaded = errors.New("free pages are not pre-loaded") -) - -// These errors can occur when putting or deleting a value or a bucket. -var ( - // ErrBucketNotFound is returned when trying to access a bucket that has - // not been created yet. - ErrBucketNotFound = errors.New("bucket not found") - - // ErrBucketExists is returned when creating a bucket that already exists. - ErrBucketExists = errors.New("bucket already exists") - - // ErrBucketNameRequired is returned when creating a bucket with a blank name. - ErrBucketNameRequired = errors.New("bucket name required") - - // ErrKeyRequired is returned when inserting a zero-length key. - ErrKeyRequired = errors.New("key required") - - // ErrKeyTooLarge is returned when inserting a key that is larger than MaxKeySize. - ErrKeyTooLarge = errors.New("key too large") - - // ErrValueTooLarge is returned when inserting a value that is larger than MaxValueSize. - ErrValueTooLarge = errors.New("value too large") - - // ErrIncompatibleValue is returned when trying create or delete a bucket - // on an existing non-bucket key or when trying to create or delete a - // non-bucket key on an existing bucket key. - ErrIncompatibleValue = errors.New("incompatible value") -) diff --git a/vendor/go.etcd.io/bbolt/freelist.go b/vendor/go.etcd.io/bbolt/freelist.go deleted file mode 100644 index 50f2d0e1..00000000 --- a/vendor/go.etcd.io/bbolt/freelist.go +++ /dev/null @@ -1,405 +0,0 @@ -package bbolt - -import ( - "fmt" - "sort" - "unsafe" -) - -// txPending holds a list of pgids and corresponding allocation txns -// that are pending to be freed. -type txPending struct { - ids []pgid - alloctx []txid // txids allocating the ids - lastReleaseBegin txid // beginning txid of last matching releaseRange -} - -// pidSet holds the set of starting pgids which have the same span size -type pidSet map[pgid]struct{} - -// freelist represents a list of all pages that are available for allocation. -// It also tracks pages that have been freed but are still in use by open transactions. -type freelist struct { - freelistType FreelistType // freelist type - ids []pgid // all free and available free page ids. - allocs map[pgid]txid // mapping of txid that allocated a pgid. - pending map[txid]*txPending // mapping of soon-to-be free page ids by tx. - cache map[pgid]struct{} // fast lookup of all free and pending page ids. - freemaps map[uint64]pidSet // key is the size of continuous pages(span), value is a set which contains the starting pgids of same size - forwardMap map[pgid]uint64 // key is start pgid, value is its span size - backwardMap map[pgid]uint64 // key is end pgid, value is its span size - allocate func(txid txid, n int) pgid // the freelist allocate func - free_count func() int // the function which gives you free page number - mergeSpans func(ids pgids) // the mergeSpan func - getFreePageIDs func() []pgid // get free pgids func - readIDs func(pgids []pgid) // readIDs func reads list of pages and init the freelist -} - -// newFreelist returns an empty, initialized freelist. -func newFreelist(freelistType FreelistType) *freelist { - f := &freelist{ - freelistType: freelistType, - allocs: make(map[pgid]txid), - pending: make(map[txid]*txPending), - cache: make(map[pgid]struct{}), - freemaps: make(map[uint64]pidSet), - forwardMap: make(map[pgid]uint64), - backwardMap: make(map[pgid]uint64), - } - - if freelistType == FreelistMapType { - f.allocate = f.hashmapAllocate - f.free_count = f.hashmapFreeCount - f.mergeSpans = f.hashmapMergeSpans - f.getFreePageIDs = f.hashmapGetFreePageIDs - f.readIDs = f.hashmapReadIDs - } else { - f.allocate = f.arrayAllocate - f.free_count = f.arrayFreeCount - f.mergeSpans = f.arrayMergeSpans - f.getFreePageIDs = f.arrayGetFreePageIDs - f.readIDs = f.arrayReadIDs - } - - return f -} - -// size returns the size of the page after serialization. -func (f *freelist) size() int { - n := f.count() - if n >= 0xFFFF { - // The first element will be used to store the count. See freelist.write. - n++ - } - return int(pageHeaderSize) + (int(unsafe.Sizeof(pgid(0))) * n) -} - -// count returns count of pages on the freelist -func (f *freelist) count() int { - return f.free_count() + f.pending_count() -} - -// arrayFreeCount returns count of free pages(array version) -func (f *freelist) arrayFreeCount() int { - return len(f.ids) -} - -// pending_count returns count of pending pages -func (f *freelist) pending_count() int { - var count int - for _, txp := range f.pending { - count += len(txp.ids) - } - return count -} - -// copyall copies a list of all free ids and all pending ids in one sorted list. -// f.count returns the minimum length required for dst. -func (f *freelist) copyall(dst []pgid) { - m := make(pgids, 0, f.pending_count()) - for _, txp := range f.pending { - m = append(m, txp.ids...) - } - sort.Sort(m) - mergepgids(dst, f.getFreePageIDs(), m) -} - -// arrayAllocate returns the starting page id of a contiguous list of pages of a given size. -// If a contiguous block cannot be found then 0 is returned. -func (f *freelist) arrayAllocate(txid txid, n int) pgid { - if len(f.ids) == 0 { - return 0 - } - - var initial, previd pgid - for i, id := range f.ids { - if id <= 1 { - panic(fmt.Sprintf("invalid page allocation: %d", id)) - } - - // Reset initial page if this is not contiguous. - if previd == 0 || id-previd != 1 { - initial = id - } - - // If we found a contiguous block then remove it and return it. - if (id-initial)+1 == pgid(n) { - // If we're allocating off the beginning then take the fast path - // and just adjust the existing slice. This will use extra memory - // temporarily but the append() in free() will realloc the slice - // as is necessary. - if (i + 1) == n { - f.ids = f.ids[i+1:] - } else { - copy(f.ids[i-n+1:], f.ids[i+1:]) - f.ids = f.ids[:len(f.ids)-n] - } - - // Remove from the free cache. - for i := pgid(0); i < pgid(n); i++ { - delete(f.cache, initial+i) - } - f.allocs[initial] = txid - return initial - } - - previd = id - } - return 0 -} - -// free releases a page and its overflow for a given transaction id. -// If the page is already free then a panic will occur. -func (f *freelist) free(txid txid, p *page) { - if p.id <= 1 { - panic(fmt.Sprintf("cannot free page 0 or 1: %d", p.id)) - } - - // Free page and all its overflow pages. - txp := f.pending[txid] - if txp == nil { - txp = &txPending{} - f.pending[txid] = txp - } - allocTxid, ok := f.allocs[p.id] - if ok { - delete(f.allocs, p.id) - } else if (p.flags & freelistPageFlag) != 0 { - // Freelist is always allocated by prior tx. - allocTxid = txid - 1 - } - - for id := p.id; id <= p.id+pgid(p.overflow); id++ { - // Verify that page is not already free. - if _, ok := f.cache[id]; ok { - panic(fmt.Sprintf("page %d already freed", id)) - } - // Add to the freelist and cache. - txp.ids = append(txp.ids, id) - txp.alloctx = append(txp.alloctx, allocTxid) - f.cache[id] = struct{}{} - } -} - -// release moves all page ids for a transaction id (or older) to the freelist. -func (f *freelist) release(txid txid) { - m := make(pgids, 0) - for tid, txp := range f.pending { - if tid <= txid { - // Move transaction's pending pages to the available freelist. - // Don't remove from the cache since the page is still free. - m = append(m, txp.ids...) - delete(f.pending, tid) - } - } - f.mergeSpans(m) -} - -// releaseRange moves pending pages allocated within an extent [begin,end] to the free list. -func (f *freelist) releaseRange(begin, end txid) { - if begin > end { - return - } - var m pgids - for tid, txp := range f.pending { - if tid < begin || tid > end { - continue - } - // Don't recompute freed pages if ranges haven't updated. - if txp.lastReleaseBegin == begin { - continue - } - for i := 0; i < len(txp.ids); i++ { - if atx := txp.alloctx[i]; atx < begin || atx > end { - continue - } - m = append(m, txp.ids[i]) - txp.ids[i] = txp.ids[len(txp.ids)-1] - txp.ids = txp.ids[:len(txp.ids)-1] - txp.alloctx[i] = txp.alloctx[len(txp.alloctx)-1] - txp.alloctx = txp.alloctx[:len(txp.alloctx)-1] - i-- - } - txp.lastReleaseBegin = begin - if len(txp.ids) == 0 { - delete(f.pending, tid) - } - } - f.mergeSpans(m) -} - -// rollback removes the pages from a given pending tx. -func (f *freelist) rollback(txid txid) { - // Remove page ids from cache. - txp := f.pending[txid] - if txp == nil { - return - } - var m pgids - for i, pgid := range txp.ids { - delete(f.cache, pgid) - tx := txp.alloctx[i] - if tx == 0 { - continue - } - if tx != txid { - // Pending free aborted; restore page back to alloc list. - f.allocs[pgid] = tx - } else { - // Freed page was allocated by this txn; OK to throw away. - m = append(m, pgid) - } - } - // Remove pages from pending list and mark as free if allocated by txid. - delete(f.pending, txid) - f.mergeSpans(m) -} - -// freed returns whether a given page is in the free list. -func (f *freelist) freed(pgId pgid) bool { - _, ok := f.cache[pgId] - return ok -} - -// read initializes the freelist from a freelist page. -func (f *freelist) read(p *page) { - if (p.flags & freelistPageFlag) == 0 { - panic(fmt.Sprintf("invalid freelist page: %d, page type is %s", p.id, p.typ())) - } - // If the page.count is at the max uint16 value (64k) then it's considered - // an overflow and the size of the freelist is stored as the first element. - var idx, count = 0, int(p.count) - if count == 0xFFFF { - idx = 1 - c := *(*pgid)(unsafeAdd(unsafe.Pointer(p), unsafe.Sizeof(*p))) - count = int(c) - if count < 0 { - panic(fmt.Sprintf("leading element count %d overflows int", c)) - } - } - - // Copy the list of page ids from the freelist. - if count == 0 { - f.ids = nil - } else { - var ids []pgid - data := unsafeIndex(unsafe.Pointer(p), unsafe.Sizeof(*p), unsafe.Sizeof(ids[0]), idx) - unsafeSlice(unsafe.Pointer(&ids), data, count) - - // copy the ids, so we don't modify on the freelist page directly - idsCopy := make([]pgid, count) - copy(idsCopy, ids) - // Make sure they're sorted. - sort.Sort(pgids(idsCopy)) - - f.readIDs(idsCopy) - } -} - -// arrayReadIDs initializes the freelist from a given list of ids. -func (f *freelist) arrayReadIDs(ids []pgid) { - f.ids = ids - f.reindex() -} - -func (f *freelist) arrayGetFreePageIDs() []pgid { - return f.ids -} - -// write writes the page ids onto a freelist page. All free and pending ids are -// saved to disk since in the event of a program crash, all pending ids will -// become free. -func (f *freelist) write(p *page) error { - // Combine the old free pgids and pgids waiting on an open transaction. - - // Update the header flag. - p.flags |= freelistPageFlag - - // The page.count can only hold up to 64k elements so if we overflow that - // number then we handle it by putting the size in the first element. - l := f.count() - if l == 0 { - p.count = uint16(l) - } else if l < 0xFFFF { - p.count = uint16(l) - var ids []pgid - data := unsafeAdd(unsafe.Pointer(p), unsafe.Sizeof(*p)) - unsafeSlice(unsafe.Pointer(&ids), data, l) - f.copyall(ids) - } else { - p.count = 0xFFFF - var ids []pgid - data := unsafeAdd(unsafe.Pointer(p), unsafe.Sizeof(*p)) - unsafeSlice(unsafe.Pointer(&ids), data, l+1) - ids[0] = pgid(l) - f.copyall(ids[1:]) - } - - return nil -} - -// reload reads the freelist from a page and filters out pending items. -func (f *freelist) reload(p *page) { - f.read(p) - - // Build a cache of only pending pages. - pcache := make(map[pgid]bool) - for _, txp := range f.pending { - for _, pendingID := range txp.ids { - pcache[pendingID] = true - } - } - - // Check each page in the freelist and build a new available freelist - // with any pages not in the pending lists. - var a []pgid - for _, id := range f.getFreePageIDs() { - if !pcache[id] { - a = append(a, id) - } - } - - f.readIDs(a) -} - -// noSyncReload reads the freelist from pgids and filters out pending items. -func (f *freelist) noSyncReload(pgids []pgid) { - // Build a cache of only pending pages. - pcache := make(map[pgid]bool) - for _, txp := range f.pending { - for _, pendingID := range txp.ids { - pcache[pendingID] = true - } - } - - // Check each page in the freelist and build a new available freelist - // with any pages not in the pending lists. - var a []pgid - for _, id := range pgids { - if !pcache[id] { - a = append(a, id) - } - } - - f.readIDs(a) -} - -// reindex rebuilds the free cache based on available and pending free lists. -func (f *freelist) reindex() { - ids := f.getFreePageIDs() - f.cache = make(map[pgid]struct{}, len(ids)) - for _, id := range ids { - f.cache[id] = struct{}{} - } - for _, txp := range f.pending { - for _, pendingID := range txp.ids { - f.cache[pendingID] = struct{}{} - } - } -} - -// arrayMergeSpans try to merge list of pages(represented by pgids) with existing spans but using array -func (f *freelist) arrayMergeSpans(ids pgids) { - sort.Sort(ids) - f.ids = pgids(f.ids).merge(ids) -} diff --git a/vendor/go.etcd.io/bbolt/freelist_hmap.go b/vendor/go.etcd.io/bbolt/freelist_hmap.go deleted file mode 100644 index dbd67a1e..00000000 --- a/vendor/go.etcd.io/bbolt/freelist_hmap.go +++ /dev/null @@ -1,178 +0,0 @@ -package bbolt - -import "sort" - -// hashmapFreeCount returns count of free pages(hashmap version) -func (f *freelist) hashmapFreeCount() int { - // use the forwardMap to get the total count - count := 0 - for _, size := range f.forwardMap { - count += int(size) - } - return count -} - -// hashmapAllocate serves the same purpose as arrayAllocate, but use hashmap as backend -func (f *freelist) hashmapAllocate(txid txid, n int) pgid { - if n == 0 { - return 0 - } - - // if we have a exact size match just return short path - if bm, ok := f.freemaps[uint64(n)]; ok { - for pid := range bm { - // remove the span - f.delSpan(pid, uint64(n)) - - f.allocs[pid] = txid - - for i := pgid(0); i < pgid(n); i++ { - delete(f.cache, pid+i) - } - return pid - } - } - - // lookup the map to find larger span - for size, bm := range f.freemaps { - if size < uint64(n) { - continue - } - - for pid := range bm { - // remove the initial - f.delSpan(pid, size) - - f.allocs[pid] = txid - - remain := size - uint64(n) - - // add remain span - f.addSpan(pid+pgid(n), remain) - - for i := pgid(0); i < pgid(n); i++ { - delete(f.cache, pid+i) - } - return pid - } - } - - return 0 -} - -// hashmapReadIDs reads pgids as input an initial the freelist(hashmap version) -func (f *freelist) hashmapReadIDs(pgids []pgid) { - f.init(pgids) - - // Rebuild the page cache. - f.reindex() -} - -// hashmapGetFreePageIDs returns the sorted free page ids -func (f *freelist) hashmapGetFreePageIDs() []pgid { - count := f.free_count() - if count == 0 { - return nil - } - - m := make([]pgid, 0, count) - for start, size := range f.forwardMap { - for i := 0; i < int(size); i++ { - m = append(m, start+pgid(i)) - } - } - sort.Sort(pgids(m)) - - return m -} - -// hashmapMergeSpans try to merge list of pages(represented by pgids) with existing spans -func (f *freelist) hashmapMergeSpans(ids pgids) { - for _, id := range ids { - // try to see if we can merge and update - f.mergeWithExistingSpan(id) - } -} - -// mergeWithExistingSpan merges pid to the existing free spans, try to merge it backward and forward -func (f *freelist) mergeWithExistingSpan(pid pgid) { - prev := pid - 1 - next := pid + 1 - - preSize, mergeWithPrev := f.backwardMap[prev] - nextSize, mergeWithNext := f.forwardMap[next] - newStart := pid - newSize := uint64(1) - - if mergeWithPrev { - //merge with previous span - start := prev + 1 - pgid(preSize) - f.delSpan(start, preSize) - - newStart -= pgid(preSize) - newSize += preSize - } - - if mergeWithNext { - // merge with next span - f.delSpan(next, nextSize) - newSize += nextSize - } - - f.addSpan(newStart, newSize) -} - -func (f *freelist) addSpan(start pgid, size uint64) { - f.backwardMap[start-1+pgid(size)] = size - f.forwardMap[start] = size - if _, ok := f.freemaps[size]; !ok { - f.freemaps[size] = make(map[pgid]struct{}) - } - - f.freemaps[size][start] = struct{}{} -} - -func (f *freelist) delSpan(start pgid, size uint64) { - delete(f.forwardMap, start) - delete(f.backwardMap, start+pgid(size-1)) - delete(f.freemaps[size], start) - if len(f.freemaps[size]) == 0 { - delete(f.freemaps, size) - } -} - -// initial from pgids using when use hashmap version -// pgids must be sorted -func (f *freelist) init(pgids []pgid) { - if len(pgids) == 0 { - return - } - - size := uint64(1) - start := pgids[0] - - if !sort.SliceIsSorted([]pgid(pgids), func(i, j int) bool { return pgids[i] < pgids[j] }) { - panic("pgids not sorted") - } - - f.freemaps = make(map[uint64]pidSet) - f.forwardMap = make(map[pgid]uint64) - f.backwardMap = make(map[pgid]uint64) - - for i := 1; i < len(pgids); i++ { - // continuous page - if pgids[i] == pgids[i-1]+1 { - size++ - } else { - f.addSpan(start, size) - - size = 1 - start = pgids[i] - } - } - - // init the tail - if size != 0 && start != 0 { - f.addSpan(start, size) - } -} diff --git a/vendor/go.etcd.io/bbolt/mlock_unix.go b/vendor/go.etcd.io/bbolt/mlock_unix.go deleted file mode 100644 index 744a972f..00000000 --- a/vendor/go.etcd.io/bbolt/mlock_unix.go +++ /dev/null @@ -1,37 +0,0 @@ -//go:build !windows -// +build !windows - -package bbolt - -import "golang.org/x/sys/unix" - -// mlock locks memory of db file -func mlock(db *DB, fileSize int) error { - sizeToLock := fileSize - if sizeToLock > db.datasz { - // Can't lock more than mmaped slice - sizeToLock = db.datasz - } - if err := unix.Mlock(db.dataref[:sizeToLock]); err != nil { - return err - } - return nil -} - -// munlock unlocks memory of db file -func munlock(db *DB, fileSize int) error { - if db.dataref == nil { - return nil - } - - sizeToUnlock := fileSize - if sizeToUnlock > db.datasz { - // Can't unlock more than mmaped slice - sizeToUnlock = db.datasz - } - - if err := unix.Munlock(db.dataref[:sizeToUnlock]); err != nil { - return err - } - return nil -} diff --git a/vendor/go.etcd.io/bbolt/mlock_windows.go b/vendor/go.etcd.io/bbolt/mlock_windows.go deleted file mode 100644 index 00b0fb43..00000000 --- a/vendor/go.etcd.io/bbolt/mlock_windows.go +++ /dev/null @@ -1,11 +0,0 @@ -package bbolt - -// mlock locks memory of db file -func mlock(_ *DB, _ int) error { - panic("mlock is supported only on UNIX systems") -} - -// munlock unlocks memory of db file -func munlock(_ *DB, _ int) error { - panic("munlock is supported only on UNIX systems") -} diff --git a/vendor/go.etcd.io/bbolt/node.go b/vendor/go.etcd.io/bbolt/node.go deleted file mode 100644 index 9c56150d..00000000 --- a/vendor/go.etcd.io/bbolt/node.go +++ /dev/null @@ -1,610 +0,0 @@ -package bbolt - -import ( - "bytes" - "fmt" - "sort" - "unsafe" -) - -// node represents an in-memory, deserialized page. -type node struct { - bucket *Bucket - isLeaf bool - unbalanced bool - spilled bool - key []byte - pgid pgid - parent *node - children nodes - inodes inodes -} - -// root returns the top-level node this node is attached to. -func (n *node) root() *node { - if n.parent == nil { - return n - } - return n.parent.root() -} - -// minKeys returns the minimum number of inodes this node should have. -func (n *node) minKeys() int { - if n.isLeaf { - return 1 - } - return 2 -} - -// size returns the size of the node after serialization. -func (n *node) size() int { - sz, elsz := pageHeaderSize, n.pageElementSize() - for i := 0; i < len(n.inodes); i++ { - item := &n.inodes[i] - sz += elsz + uintptr(len(item.key)) + uintptr(len(item.value)) - } - return int(sz) -} - -// sizeLessThan returns true if the node is less than a given size. -// This is an optimization to avoid calculating a large node when we only need -// to know if it fits inside a certain page size. -func (n *node) sizeLessThan(v uintptr) bool { - sz, elsz := pageHeaderSize, n.pageElementSize() - for i := 0; i < len(n.inodes); i++ { - item := &n.inodes[i] - sz += elsz + uintptr(len(item.key)) + uintptr(len(item.value)) - if sz >= v { - return false - } - } - return true -} - -// pageElementSize returns the size of each page element based on the type of node. -func (n *node) pageElementSize() uintptr { - if n.isLeaf { - return leafPageElementSize - } - return branchPageElementSize -} - -// childAt returns the child node at a given index. -func (n *node) childAt(index int) *node { - if n.isLeaf { - panic(fmt.Sprintf("invalid childAt(%d) on a leaf node", index)) - } - return n.bucket.node(n.inodes[index].pgid, n) -} - -// childIndex returns the index of a given child node. -func (n *node) childIndex(child *node) int { - index := sort.Search(len(n.inodes), func(i int) bool { return bytes.Compare(n.inodes[i].key, child.key) != -1 }) - return index -} - -// numChildren returns the number of children. -func (n *node) numChildren() int { - return len(n.inodes) -} - -// nextSibling returns the next node with the same parent. -func (n *node) nextSibling() *node { - if n.parent == nil { - return nil - } - index := n.parent.childIndex(n) - if index >= n.parent.numChildren()-1 { - return nil - } - return n.parent.childAt(index + 1) -} - -// prevSibling returns the previous node with the same parent. -func (n *node) prevSibling() *node { - if n.parent == nil { - return nil - } - index := n.parent.childIndex(n) - if index == 0 { - return nil - } - return n.parent.childAt(index - 1) -} - -// put inserts a key/value. -func (n *node) put(oldKey, newKey, value []byte, pgId pgid, flags uint32) { - if pgId >= n.bucket.tx.meta.pgid { - panic(fmt.Sprintf("pgId (%d) above high water mark (%d)", pgId, n.bucket.tx.meta.pgid)) - } else if len(oldKey) <= 0 { - panic("put: zero-length old key") - } else if len(newKey) <= 0 { - panic("put: zero-length new key") - } - - // Find insertion index. - index := sort.Search(len(n.inodes), func(i int) bool { return bytes.Compare(n.inodes[i].key, oldKey) != -1 }) - - // Add capacity and shift nodes if we don't have an exact match and need to insert. - exact := (len(n.inodes) > 0 && index < len(n.inodes) && bytes.Equal(n.inodes[index].key, oldKey)) - if !exact { - n.inodes = append(n.inodes, inode{}) - copy(n.inodes[index+1:], n.inodes[index:]) - } - - inode := &n.inodes[index] - inode.flags = flags - inode.key = newKey - inode.value = value - inode.pgid = pgId - _assert(len(inode.key) > 0, "put: zero-length inode key") -} - -// del removes a key from the node. -func (n *node) del(key []byte) { - // Find index of key. - index := sort.Search(len(n.inodes), func(i int) bool { return bytes.Compare(n.inodes[i].key, key) != -1 }) - - // Exit if the key isn't found. - if index >= len(n.inodes) || !bytes.Equal(n.inodes[index].key, key) { - return - } - - // Delete inode from the node. - n.inodes = append(n.inodes[:index], n.inodes[index+1:]...) - - // Mark the node as needing rebalancing. - n.unbalanced = true -} - -// read initializes the node from a page. -func (n *node) read(p *page) { - n.pgid = p.id - n.isLeaf = ((p.flags & leafPageFlag) != 0) - n.inodes = make(inodes, int(p.count)) - - for i := 0; i < int(p.count); i++ { - inode := &n.inodes[i] - if n.isLeaf { - elem := p.leafPageElement(uint16(i)) - inode.flags = elem.flags - inode.key = elem.key() - inode.value = elem.value() - } else { - elem := p.branchPageElement(uint16(i)) - inode.pgid = elem.pgid - inode.key = elem.key() - } - _assert(len(inode.key) > 0, "read: zero-length inode key") - } - - // Save first key so we can find the node in the parent when we spill. - if len(n.inodes) > 0 { - n.key = n.inodes[0].key - _assert(len(n.key) > 0, "read: zero-length node key") - } else { - n.key = nil - } -} - -// write writes the items onto one or more pages. -// The page should have p.id (might be 0 for meta or bucket-inline page) and p.overflow set -// and the rest should be zeroed. -func (n *node) write(p *page) { - _assert(p.count == 0 && p.flags == 0, "node cannot be written into a not empty page") - - // Initialize page. - if n.isLeaf { - p.flags = leafPageFlag - } else { - p.flags = branchPageFlag - } - - if len(n.inodes) >= 0xFFFF { - panic(fmt.Sprintf("inode overflow: %d (pgid=%d)", len(n.inodes), p.id)) - } - p.count = uint16(len(n.inodes)) - - // Stop here if there are no items to write. - if p.count == 0 { - return - } - - // Loop over each item and write it to the page. - // off tracks the offset into p of the start of the next data. - off := unsafe.Sizeof(*p) + n.pageElementSize()*uintptr(len(n.inodes)) - for i, item := range n.inodes { - _assert(len(item.key) > 0, "write: zero-length inode key") - - // Create a slice to write into of needed size and advance - // byte pointer for next iteration. - sz := len(item.key) + len(item.value) - b := unsafeByteSlice(unsafe.Pointer(p), off, 0, sz) - off += uintptr(sz) - - // Write the page element. - if n.isLeaf { - elem := p.leafPageElement(uint16(i)) - elem.pos = uint32(uintptr(unsafe.Pointer(&b[0])) - uintptr(unsafe.Pointer(elem))) - elem.flags = item.flags - elem.ksize = uint32(len(item.key)) - elem.vsize = uint32(len(item.value)) - } else { - elem := p.branchPageElement(uint16(i)) - elem.pos = uint32(uintptr(unsafe.Pointer(&b[0])) - uintptr(unsafe.Pointer(elem))) - elem.ksize = uint32(len(item.key)) - elem.pgid = item.pgid - _assert(elem.pgid != p.id, "write: circular dependency occurred") - } - - // Write data for the element to the end of the page. - l := copy(b, item.key) - copy(b[l:], item.value) - } - - // DEBUG ONLY: n.dump() -} - -// split breaks up a node into multiple smaller nodes, if appropriate. -// This should only be called from the spill() function. -func (n *node) split(pageSize uintptr) []*node { - var nodes []*node - - node := n - for { - // Split node into two. - a, b := node.splitTwo(pageSize) - nodes = append(nodes, a) - - // If we can't split then exit the loop. - if b == nil { - break - } - - // Set node to b so it gets split on the next iteration. - node = b - } - - return nodes -} - -// splitTwo breaks up a node into two smaller nodes, if appropriate. -// This should only be called from the split() function. -func (n *node) splitTwo(pageSize uintptr) (*node, *node) { - // Ignore the split if the page doesn't have at least enough nodes for - // two pages or if the nodes can fit in a single page. - if len(n.inodes) <= (minKeysPerPage*2) || n.sizeLessThan(pageSize) { - return n, nil - } - - // Determine the threshold before starting a new node. - var fillPercent = n.bucket.FillPercent - if fillPercent < minFillPercent { - fillPercent = minFillPercent - } else if fillPercent > maxFillPercent { - fillPercent = maxFillPercent - } - threshold := int(float64(pageSize) * fillPercent) - - // Determine split position and sizes of the two pages. - splitIndex, _ := n.splitIndex(threshold) - - // Split node into two separate nodes. - // If there's no parent then we'll need to create one. - if n.parent == nil { - n.parent = &node{bucket: n.bucket, children: []*node{n}} - } - - // Create a new node and add it to the parent. - next := &node{bucket: n.bucket, isLeaf: n.isLeaf, parent: n.parent} - n.parent.children = append(n.parent.children, next) - - // Split inodes across two nodes. - next.inodes = n.inodes[splitIndex:] - n.inodes = n.inodes[:splitIndex] - - // Update the statistics. - n.bucket.tx.stats.IncSplit(1) - - return n, next -} - -// splitIndex finds the position where a page will fill a given threshold. -// It returns the index as well as the size of the first page. -// This is only be called from split(). -func (n *node) splitIndex(threshold int) (index, sz uintptr) { - sz = pageHeaderSize - - // Loop until we only have the minimum number of keys required for the second page. - for i := 0; i < len(n.inodes)-minKeysPerPage; i++ { - index = uintptr(i) - inode := n.inodes[i] - elsize := n.pageElementSize() + uintptr(len(inode.key)) + uintptr(len(inode.value)) - - // If we have at least the minimum number of keys and adding another - // node would put us over the threshold then exit and return. - if index >= minKeysPerPage && sz+elsize > uintptr(threshold) { - break - } - - // Add the element size to the total size. - sz += elsize - } - - return -} - -// spill writes the nodes to dirty pages and splits nodes as it goes. -// Returns an error if dirty pages cannot be allocated. -func (n *node) spill() error { - var tx = n.bucket.tx - if n.spilled { - return nil - } - - // Spill child nodes first. Child nodes can materialize sibling nodes in - // the case of split-merge so we cannot use a range loop. We have to check - // the children size on every loop iteration. - sort.Sort(n.children) - for i := 0; i < len(n.children); i++ { - if err := n.children[i].spill(); err != nil { - return err - } - } - - // We no longer need the child list because it's only used for spill tracking. - n.children = nil - - // Split nodes into appropriate sizes. The first node will always be n. - var nodes = n.split(uintptr(tx.db.pageSize)) - for _, node := range nodes { - // Add node's page to the freelist if it's not new. - if node.pgid > 0 { - tx.db.freelist.free(tx.meta.txid, tx.page(node.pgid)) - node.pgid = 0 - } - - // Allocate contiguous space for the node. - p, err := tx.allocate((node.size() + tx.db.pageSize - 1) / tx.db.pageSize) - if err != nil { - return err - } - - // Write the node. - if p.id >= tx.meta.pgid { - panic(fmt.Sprintf("pgid (%d) above high water mark (%d)", p.id, tx.meta.pgid)) - } - node.pgid = p.id - node.write(p) - node.spilled = true - - // Insert into parent inodes. - if node.parent != nil { - var key = node.key - if key == nil { - key = node.inodes[0].key - } - - node.parent.put(key, node.inodes[0].key, nil, node.pgid, 0) - node.key = node.inodes[0].key - _assert(len(node.key) > 0, "spill: zero-length node key") - } - - // Update the statistics. - tx.stats.IncSpill(1) - } - - // If the root node split and created a new root then we need to spill that - // as well. We'll clear out the children to make sure it doesn't try to respill. - if n.parent != nil && n.parent.pgid == 0 { - n.children = nil - return n.parent.spill() - } - - return nil -} - -// rebalance attempts to combine the node with sibling nodes if the node fill -// size is below a threshold or if there are not enough keys. -func (n *node) rebalance() { - if !n.unbalanced { - return - } - n.unbalanced = false - - // Update statistics. - n.bucket.tx.stats.IncRebalance(1) - - // Ignore if node is above threshold (25%) and has enough keys. - var threshold = n.bucket.tx.db.pageSize / 4 - if n.size() > threshold && len(n.inodes) > n.minKeys() { - return - } - - // Root node has special handling. - if n.parent == nil { - // If root node is a branch and only has one node then collapse it. - if !n.isLeaf && len(n.inodes) == 1 { - // Move root's child up. - child := n.bucket.node(n.inodes[0].pgid, n) - n.isLeaf = child.isLeaf - n.inodes = child.inodes[:] - n.children = child.children - - // Reparent all child nodes being moved. - for _, inode := range n.inodes { - if child, ok := n.bucket.nodes[inode.pgid]; ok { - child.parent = n - } - } - - // Remove old child. - child.parent = nil - delete(n.bucket.nodes, child.pgid) - child.free() - } - - return - } - - // If node has no keys then just remove it. - if n.numChildren() == 0 { - n.parent.del(n.key) - n.parent.removeChild(n) - delete(n.bucket.nodes, n.pgid) - n.free() - n.parent.rebalance() - return - } - - _assert(n.parent.numChildren() > 1, "parent must have at least 2 children") - - // Destination node is right sibling if idx == 0, otherwise left sibling. - var target *node - var useNextSibling = (n.parent.childIndex(n) == 0) - if useNextSibling { - target = n.nextSibling() - } else { - target = n.prevSibling() - } - - // If both this node and the target node are too small then merge them. - if useNextSibling { - // Reparent all child nodes being moved. - for _, inode := range target.inodes { - if child, ok := n.bucket.nodes[inode.pgid]; ok { - child.parent.removeChild(child) - child.parent = n - child.parent.children = append(child.parent.children, child) - } - } - - // Copy over inodes from target and remove target. - n.inodes = append(n.inodes, target.inodes...) - n.parent.del(target.key) - n.parent.removeChild(target) - delete(n.bucket.nodes, target.pgid) - target.free() - } else { - // Reparent all child nodes being moved. - for _, inode := range n.inodes { - if child, ok := n.bucket.nodes[inode.pgid]; ok { - child.parent.removeChild(child) - child.parent = target - child.parent.children = append(child.parent.children, child) - } - } - - // Copy over inodes to target and remove node. - target.inodes = append(target.inodes, n.inodes...) - n.parent.del(n.key) - n.parent.removeChild(n) - delete(n.bucket.nodes, n.pgid) - n.free() - } - - // Either this node or the target node was deleted from the parent so rebalance it. - n.parent.rebalance() -} - -// removes a node from the list of in-memory children. -// This does not affect the inodes. -func (n *node) removeChild(target *node) { - for i, child := range n.children { - if child == target { - n.children = append(n.children[:i], n.children[i+1:]...) - return - } - } -} - -// dereference causes the node to copy all its inode key/value references to heap memory. -// This is required when the mmap is reallocated so inodes are not pointing to stale data. -func (n *node) dereference() { - if n.key != nil { - key := make([]byte, len(n.key)) - copy(key, n.key) - n.key = key - _assert(n.pgid == 0 || len(n.key) > 0, "dereference: zero-length node key on existing node") - } - - for i := range n.inodes { - inode := &n.inodes[i] - - key := make([]byte, len(inode.key)) - copy(key, inode.key) - inode.key = key - _assert(len(inode.key) > 0, "dereference: zero-length inode key") - - value := make([]byte, len(inode.value)) - copy(value, inode.value) - inode.value = value - } - - // Recursively dereference children. - for _, child := range n.children { - child.dereference() - } - - // Update statistics. - n.bucket.tx.stats.IncNodeDeref(1) -} - -// free adds the node's underlying page to the freelist. -func (n *node) free() { - if n.pgid != 0 { - n.bucket.tx.db.freelist.free(n.bucket.tx.meta.txid, n.bucket.tx.page(n.pgid)) - n.pgid = 0 - } -} - -// dump writes the contents of the node to STDERR for debugging purposes. -/* -func (n *node) dump() { - // Write node header. - var typ = "branch" - if n.isLeaf { - typ = "leaf" - } - warnf("[NODE %d {type=%s count=%d}]", n.pgid, typ, len(n.inodes)) - - // Write out abbreviated version of each item. - for _, item := range n.inodes { - if n.isLeaf { - if item.flags&bucketLeafFlag != 0 { - bucket := (*bucket)(unsafe.Pointer(&item.value[0])) - warnf("+L %08x -> (bucket root=%d)", trunc(item.key, 4), bucket.root) - } else { - warnf("+L %08x -> %08x", trunc(item.key, 4), trunc(item.value, 4)) - } - } else { - warnf("+B %08x -> pgid=%d", trunc(item.key, 4), item.pgid) - } - } - warn("") -} -*/ - -func compareKeys(left, right []byte) int { - return bytes.Compare(left, right) -} - -type nodes []*node - -func (s nodes) Len() int { return len(s) } -func (s nodes) Swap(i, j int) { s[i], s[j] = s[j], s[i] } -func (s nodes) Less(i, j int) bool { - return bytes.Compare(s[i].inodes[0].key, s[j].inodes[0].key) == -1 -} - -// inode represents an internal node inside of a node. -// It can be used to point to elements in a page or point -// to an element which hasn't been added to a page yet. -type inode struct { - flags uint32 - pgid pgid - key []byte - value []byte -} - -type inodes []inode diff --git a/vendor/go.etcd.io/bbolt/page.go b/vendor/go.etcd.io/bbolt/page.go deleted file mode 100644 index 379645c9..00000000 --- a/vendor/go.etcd.io/bbolt/page.go +++ /dev/null @@ -1,214 +0,0 @@ -package bbolt - -import ( - "fmt" - "os" - "sort" - "unsafe" -) - -const pageHeaderSize = unsafe.Sizeof(page{}) - -const minKeysPerPage = 2 - -const branchPageElementSize = unsafe.Sizeof(branchPageElement{}) -const leafPageElementSize = unsafe.Sizeof(leafPageElement{}) - -const ( - branchPageFlag = 0x01 - leafPageFlag = 0x02 - metaPageFlag = 0x04 - freelistPageFlag = 0x10 -) - -const ( - bucketLeafFlag = 0x01 -) - -type pgid uint64 - -type page struct { - id pgid - flags uint16 - count uint16 - overflow uint32 -} - -// typ returns a human readable page type string used for debugging. -func (p *page) typ() string { - if (p.flags & branchPageFlag) != 0 { - return "branch" - } else if (p.flags & leafPageFlag) != 0 { - return "leaf" - } else if (p.flags & metaPageFlag) != 0 { - return "meta" - } else if (p.flags & freelistPageFlag) != 0 { - return "freelist" - } - return fmt.Sprintf("unknown<%02x>", p.flags) -} - -// meta returns a pointer to the metadata section of the page. -func (p *page) meta() *meta { - return (*meta)(unsafeAdd(unsafe.Pointer(p), unsafe.Sizeof(*p))) -} - -func (p *page) fastCheck(id pgid) { - _assert(p.id == id, "Page expected to be: %v, but self identifies as %v", id, p.id) - // Only one flag of page-type can be set. - _assert(p.flags == branchPageFlag || - p.flags == leafPageFlag || - p.flags == metaPageFlag || - p.flags == freelistPageFlag, - "page %v: has unexpected type/flags: %x", p.id, p.flags) -} - -// leafPageElement retrieves the leaf node by index -func (p *page) leafPageElement(index uint16) *leafPageElement { - return (*leafPageElement)(unsafeIndex(unsafe.Pointer(p), unsafe.Sizeof(*p), - leafPageElementSize, int(index))) -} - -// leafPageElements retrieves a list of leaf nodes. -func (p *page) leafPageElements() []leafPageElement { - if p.count == 0 { - return nil - } - var elems []leafPageElement - data := unsafeAdd(unsafe.Pointer(p), unsafe.Sizeof(*p)) - unsafeSlice(unsafe.Pointer(&elems), data, int(p.count)) - return elems -} - -// branchPageElement retrieves the branch node by index -func (p *page) branchPageElement(index uint16) *branchPageElement { - return (*branchPageElement)(unsafeIndex(unsafe.Pointer(p), unsafe.Sizeof(*p), - unsafe.Sizeof(branchPageElement{}), int(index))) -} - -// branchPageElements retrieves a list of branch nodes. -func (p *page) branchPageElements() []branchPageElement { - if p.count == 0 { - return nil - } - var elems []branchPageElement - data := unsafeAdd(unsafe.Pointer(p), unsafe.Sizeof(*p)) - unsafeSlice(unsafe.Pointer(&elems), data, int(p.count)) - return elems -} - -// dump writes n bytes of the page to STDERR as hex output. -func (p *page) hexdump(n int) { - buf := unsafeByteSlice(unsafe.Pointer(p), 0, 0, n) - fmt.Fprintf(os.Stderr, "%x\n", buf) -} - -type pages []*page - -func (s pages) Len() int { return len(s) } -func (s pages) Swap(i, j int) { s[i], s[j] = s[j], s[i] } -func (s pages) Less(i, j int) bool { return s[i].id < s[j].id } - -// branchPageElement represents a node on a branch page. -type branchPageElement struct { - pos uint32 - ksize uint32 - pgid pgid -} - -// key returns a byte slice of the node key. -func (n *branchPageElement) key() []byte { - return unsafeByteSlice(unsafe.Pointer(n), 0, int(n.pos), int(n.pos)+int(n.ksize)) -} - -// leafPageElement represents a node on a leaf page. -type leafPageElement struct { - flags uint32 - pos uint32 - ksize uint32 - vsize uint32 -} - -// key returns a byte slice of the node key. -func (n *leafPageElement) key() []byte { - i := int(n.pos) - j := i + int(n.ksize) - return unsafeByteSlice(unsafe.Pointer(n), 0, i, j) -} - -// value returns a byte slice of the node value. -func (n *leafPageElement) value() []byte { - i := int(n.pos) + int(n.ksize) - j := i + int(n.vsize) - return unsafeByteSlice(unsafe.Pointer(n), 0, i, j) -} - -// PageInfo represents human readable information about a page. -type PageInfo struct { - ID int - Type string - Count int - OverflowCount int -} - -type pgids []pgid - -func (s pgids) Len() int { return len(s) } -func (s pgids) Swap(i, j int) { s[i], s[j] = s[j], s[i] } -func (s pgids) Less(i, j int) bool { return s[i] < s[j] } - -// merge returns the sorted union of a and b. -func (a pgids) merge(b pgids) pgids { - // Return the opposite slice if one is nil. - if len(a) == 0 { - return b - } - if len(b) == 0 { - return a - } - merged := make(pgids, len(a)+len(b)) - mergepgids(merged, a, b) - return merged -} - -// mergepgids copies the sorted union of a and b into dst. -// If dst is too small, it panics. -func mergepgids(dst, a, b pgids) { - if len(dst) < len(a)+len(b) { - panic(fmt.Errorf("mergepgids bad len %d < %d + %d", len(dst), len(a), len(b))) - } - // Copy in the opposite slice if one is nil. - if len(a) == 0 { - copy(dst, b) - return - } - if len(b) == 0 { - copy(dst, a) - return - } - - // Merged will hold all elements from both lists. - merged := dst[:0] - - // Assign lead to the slice with a lower starting value, follow to the higher value. - lead, follow := a, b - if b[0] < a[0] { - lead, follow = b, a - } - - // Continue while there are elements in the lead. - for len(lead) > 0 { - // Merge largest prefix of lead that is ahead of follow[0]. - n := sort.Search(len(lead), func(i int) bool { return lead[i] > follow[0] }) - merged = append(merged, lead[:n]...) - if n >= len(lead) { - break - } - - // Swap lead and follow. - lead, follow = follow, lead[n:] - } - - // Append what's left in follow. - _ = append(merged, follow...) -} diff --git a/vendor/go.etcd.io/bbolt/tx.go b/vendor/go.etcd.io/bbolt/tx.go deleted file mode 100644 index 2fac8c0a..00000000 --- a/vendor/go.etcd.io/bbolt/tx.go +++ /dev/null @@ -1,797 +0,0 @@ -package bbolt - -import ( - "fmt" - "io" - "os" - "sort" - "strings" - "sync/atomic" - "time" - "unsafe" -) - -// txid represents the internal transaction identifier. -type txid uint64 - -// Tx represents a read-only or read/write transaction on the database. -// Read-only transactions can be used for retrieving values for keys and creating cursors. -// Read/write transactions can create and remove buckets and create and remove keys. -// -// IMPORTANT: You must commit or rollback transactions when you are done with -// them. Pages can not be reclaimed by the writer until no more transactions -// are using them. A long running read transaction can cause the database to -// quickly grow. -type Tx struct { - writable bool - managed bool - db *DB - meta *meta - root Bucket - pages map[pgid]*page - stats TxStats - commitHandlers []func() - - // WriteFlag specifies the flag for write-related methods like WriteTo(). - // Tx opens the database file with the specified flag to copy the data. - // - // By default, the flag is unset, which works well for mostly in-memory - // workloads. For databases that are much larger than available RAM, - // set the flag to syscall.O_DIRECT to avoid trashing the page cache. - WriteFlag int -} - -// init initializes the transaction. -func (tx *Tx) init(db *DB) { - tx.db = db - tx.pages = nil - - // Copy the meta page since it can be changed by the writer. - tx.meta = &meta{} - db.meta().copy(tx.meta) - - // Copy over the root bucket. - tx.root = newBucket(tx) - tx.root.bucket = &bucket{} - *tx.root.bucket = tx.meta.root - - // Increment the transaction id and add a page cache for writable transactions. - if tx.writable { - tx.pages = make(map[pgid]*page) - tx.meta.txid += txid(1) - } -} - -// ID returns the transaction id. -func (tx *Tx) ID() int { - return int(tx.meta.txid) -} - -// DB returns a reference to the database that created the transaction. -func (tx *Tx) DB() *DB { - return tx.db -} - -// Size returns current database size in bytes as seen by this transaction. -func (tx *Tx) Size() int64 { - return int64(tx.meta.pgid) * int64(tx.db.pageSize) -} - -// Writable returns whether the transaction can perform write operations. -func (tx *Tx) Writable() bool { - return tx.writable -} - -// Cursor creates a cursor associated with the root bucket. -// All items in the cursor will return a nil value because all root bucket keys point to buckets. -// The cursor is only valid as long as the transaction is open. -// Do not use a cursor after the transaction is closed. -func (tx *Tx) Cursor() *Cursor { - return tx.root.Cursor() -} - -// Stats retrieves a copy of the current transaction statistics. -func (tx *Tx) Stats() TxStats { - return tx.stats -} - -// Bucket retrieves a bucket by name. -// Returns nil if the bucket does not exist. -// The bucket instance is only valid for the lifetime of the transaction. -func (tx *Tx) Bucket(name []byte) *Bucket { - return tx.root.Bucket(name) -} - -// CreateBucket creates a new bucket. -// Returns an error if the bucket already exists, if the bucket name is blank, or if the bucket name is too long. -// The bucket instance is only valid for the lifetime of the transaction. -func (tx *Tx) CreateBucket(name []byte) (*Bucket, error) { - return tx.root.CreateBucket(name) -} - -// CreateBucketIfNotExists creates a new bucket if it doesn't already exist. -// Returns an error if the bucket name is blank, or if the bucket name is too long. -// The bucket instance is only valid for the lifetime of the transaction. -func (tx *Tx) CreateBucketIfNotExists(name []byte) (*Bucket, error) { - return tx.root.CreateBucketIfNotExists(name) -} - -// DeleteBucket deletes a bucket. -// Returns an error if the bucket cannot be found or if the key represents a non-bucket value. -func (tx *Tx) DeleteBucket(name []byte) error { - return tx.root.DeleteBucket(name) -} - -// ForEach executes a function for each bucket in the root. -// If the provided function returns an error then the iteration is stopped and -// the error is returned to the caller. -func (tx *Tx) ForEach(fn func(name []byte, b *Bucket) error) error { - return tx.root.ForEach(func(k, v []byte) error { - return fn(k, tx.root.Bucket(k)) - }) -} - -// OnCommit adds a handler function to be executed after the transaction successfully commits. -func (tx *Tx) OnCommit(fn func()) { - tx.commitHandlers = append(tx.commitHandlers, fn) -} - -// Commit writes all changes to disk and updates the meta page. -// Returns an error if a disk write error occurs, or if Commit is -// called on a read-only transaction. -func (tx *Tx) Commit() error { - _assert(!tx.managed, "managed tx commit not allowed") - if tx.db == nil { - return ErrTxClosed - } else if !tx.writable { - return ErrTxNotWritable - } - - // TODO(benbjohnson): Use vectorized I/O to write out dirty pages. - - // Rebalance nodes which have had deletions. - var startTime = time.Now() - tx.root.rebalance() - if tx.stats.GetRebalance() > 0 { - tx.stats.IncRebalanceTime(time.Since(startTime)) - } - - opgid := tx.meta.pgid - - // spill data onto dirty pages. - startTime = time.Now() - if err := tx.root.spill(); err != nil { - tx.rollback() - return err - } - tx.stats.IncSpillTime(time.Since(startTime)) - - // Free the old root bucket. - tx.meta.root.root = tx.root.root - - // Free the old freelist because commit writes out a fresh freelist. - if tx.meta.freelist != pgidNoFreelist { - tx.db.freelist.free(tx.meta.txid, tx.db.page(tx.meta.freelist)) - } - - if !tx.db.NoFreelistSync { - err := tx.commitFreelist() - if err != nil { - return err - } - } else { - tx.meta.freelist = pgidNoFreelist - } - - // If the high water mark has moved up then attempt to grow the database. - if tx.meta.pgid > opgid { - if err := tx.db.grow(int(tx.meta.pgid+1) * tx.db.pageSize); err != nil { - tx.rollback() - return err - } - } - - // Write dirty pages to disk. - startTime = time.Now() - if err := tx.write(); err != nil { - tx.rollback() - return err - } - - // If strict mode is enabled then perform a consistency check. - if tx.db.StrictMode { - ch := tx.Check() - var errs []string - for { - err, ok := <-ch - if !ok { - break - } - errs = append(errs, err.Error()) - } - if len(errs) > 0 { - panic("check fail: " + strings.Join(errs, "\n")) - } - } - - // Write meta to disk. - if err := tx.writeMeta(); err != nil { - tx.rollback() - return err - } - tx.stats.IncWriteTime(time.Since(startTime)) - - // Finalize the transaction. - tx.close() - - // Execute commit handlers now that the locks have been removed. - for _, fn := range tx.commitHandlers { - fn() - } - - return nil -} - -func (tx *Tx) commitFreelist() error { - // Allocate new pages for the new free list. This will overestimate - // the size of the freelist but not underestimate the size (which would be bad). - p, err := tx.allocate((tx.db.freelist.size() / tx.db.pageSize) + 1) - if err != nil { - tx.rollback() - return err - } - if err := tx.db.freelist.write(p); err != nil { - tx.rollback() - return err - } - tx.meta.freelist = p.id - - return nil -} - -// Rollback closes the transaction and ignores all previous updates. Read-only -// transactions must be rolled back and not committed. -func (tx *Tx) Rollback() error { - _assert(!tx.managed, "managed tx rollback not allowed") - if tx.db == nil { - return ErrTxClosed - } - tx.nonPhysicalRollback() - return nil -} - -// nonPhysicalRollback is called when user calls Rollback directly, in this case we do not need to reload the free pages from disk. -func (tx *Tx) nonPhysicalRollback() { - if tx.db == nil { - return - } - if tx.writable { - tx.db.freelist.rollback(tx.meta.txid) - } - tx.close() -} - -// rollback needs to reload the free pages from disk in case some system error happens like fsync error. -func (tx *Tx) rollback() { - if tx.db == nil { - return - } - if tx.writable { - tx.db.freelist.rollback(tx.meta.txid) - // When mmap fails, the `data`, `dataref` and `datasz` may be reset to - // zero values, and there is no way to reload free page IDs in this case. - if tx.db.data != nil { - if !tx.db.hasSyncedFreelist() { - // Reconstruct free page list by scanning the DB to get the whole free page list. - // Note: scaning the whole db is heavy if your db size is large in NoSyncFreeList mode. - tx.db.freelist.noSyncReload(tx.db.freepages()) - } else { - // Read free page list from freelist page. - tx.db.freelist.reload(tx.db.page(tx.db.meta().freelist)) - } - } - } - tx.close() -} - -func (tx *Tx) close() { - if tx.db == nil { - return - } - if tx.writable { - // Grab freelist stats. - var freelistFreeN = tx.db.freelist.free_count() - var freelistPendingN = tx.db.freelist.pending_count() - var freelistAlloc = tx.db.freelist.size() - - // Remove transaction ref & writer lock. - tx.db.rwtx = nil - tx.db.rwlock.Unlock() - - // Merge statistics. - tx.db.statlock.Lock() - tx.db.stats.FreePageN = freelistFreeN - tx.db.stats.PendingPageN = freelistPendingN - tx.db.stats.FreeAlloc = (freelistFreeN + freelistPendingN) * tx.db.pageSize - tx.db.stats.FreelistInuse = freelistAlloc - tx.db.stats.TxStats.add(&tx.stats) - tx.db.statlock.Unlock() - } else { - tx.db.removeTx(tx) - } - - // Clear all references. - tx.db = nil - tx.meta = nil - tx.root = Bucket{tx: tx} - tx.pages = nil -} - -// Copy writes the entire database to a writer. -// This function exists for backwards compatibility. -// -// Deprecated; Use WriteTo() instead. -func (tx *Tx) Copy(w io.Writer) error { - _, err := tx.WriteTo(w) - return err -} - -// WriteTo writes the entire database to a writer. -// If err == nil then exactly tx.Size() bytes will be written into the writer. -func (tx *Tx) WriteTo(w io.Writer) (n int64, err error) { - // Attempt to open reader with WriteFlag - f, err := tx.db.openFile(tx.db.path, os.O_RDONLY|tx.WriteFlag, 0) - if err != nil { - return 0, err - } - defer func() { - if cerr := f.Close(); err == nil { - err = cerr - } - }() - - // Generate a meta page. We use the same page data for both meta pages. - buf := make([]byte, tx.db.pageSize) - page := (*page)(unsafe.Pointer(&buf[0])) - page.flags = metaPageFlag - *page.meta() = *tx.meta - - // Write meta 0. - page.id = 0 - page.meta().checksum = page.meta().sum64() - nn, err := w.Write(buf) - n += int64(nn) - if err != nil { - return n, fmt.Errorf("meta 0 copy: %s", err) - } - - // Write meta 1 with a lower transaction id. - page.id = 1 - page.meta().txid -= 1 - page.meta().checksum = page.meta().sum64() - nn, err = w.Write(buf) - n += int64(nn) - if err != nil { - return n, fmt.Errorf("meta 1 copy: %s", err) - } - - // Move past the meta pages in the file. - if _, err := f.Seek(int64(tx.db.pageSize*2), io.SeekStart); err != nil { - return n, fmt.Errorf("seek: %s", err) - } - - // Copy data pages. - wn, err := io.CopyN(w, f, tx.Size()-int64(tx.db.pageSize*2)) - n += wn - if err != nil { - return n, err - } - - return n, nil -} - -// CopyFile copies the entire database to file at the given path. -// A reader transaction is maintained during the copy so it is safe to continue -// using the database while a copy is in progress. -func (tx *Tx) CopyFile(path string, mode os.FileMode) error { - f, err := tx.db.openFile(path, os.O_RDWR|os.O_CREATE|os.O_TRUNC, mode) - if err != nil { - return err - } - - _, err = tx.WriteTo(f) - if err != nil { - _ = f.Close() - return err - } - return f.Close() -} - -// allocate returns a contiguous block of memory starting at a given page. -func (tx *Tx) allocate(count int) (*page, error) { - p, err := tx.db.allocate(tx.meta.txid, count) - if err != nil { - return nil, err - } - - // Save to our page cache. - tx.pages[p.id] = p - - // Update statistics. - tx.stats.IncPageCount(int64(count)) - tx.stats.IncPageAlloc(int64(count * tx.db.pageSize)) - - return p, nil -} - -// write writes any dirty pages to disk. -func (tx *Tx) write() error { - // Sort pages by id. - pages := make(pages, 0, len(tx.pages)) - for _, p := range tx.pages { - pages = append(pages, p) - } - // Clear out page cache early. - tx.pages = make(map[pgid]*page) - sort.Sort(pages) - - // Write pages to disk in order. - for _, p := range pages { - rem := (uint64(p.overflow) + 1) * uint64(tx.db.pageSize) - offset := int64(p.id) * int64(tx.db.pageSize) - var written uintptr - - // Write out page in "max allocation" sized chunks. - for { - sz := rem - if sz > maxAllocSize-1 { - sz = maxAllocSize - 1 - } - buf := unsafeByteSlice(unsafe.Pointer(p), written, 0, int(sz)) - - if _, err := tx.db.ops.writeAt(buf, offset); err != nil { - return err - } - - // Update statistics. - tx.stats.IncWrite(1) - - // Exit inner for loop if we've written all the chunks. - rem -= sz - if rem == 0 { - break - } - - // Otherwise move offset forward and move pointer to next chunk. - offset += int64(sz) - written += uintptr(sz) - } - } - - // Ignore file sync if flag is set on DB. - if !tx.db.NoSync || IgnoreNoSync { - if err := fdatasync(tx.db); err != nil { - return err - } - } - - // Put small pages back to page pool. - for _, p := range pages { - // Ignore page sizes over 1 page. - // These are allocated using make() instead of the page pool. - if int(p.overflow) != 0 { - continue - } - - buf := unsafeByteSlice(unsafe.Pointer(p), 0, 0, tx.db.pageSize) - - // See https://go.googlesource.com/go/+/f03c9202c43e0abb130669852082117ca50aa9b1 - for i := range buf { - buf[i] = 0 - } - tx.db.pagePool.Put(buf) //nolint:staticcheck - } - - return nil -} - -// writeMeta writes the meta to the disk. -func (tx *Tx) writeMeta() error { - // Create a temporary buffer for the meta page. - buf := make([]byte, tx.db.pageSize) - p := tx.db.pageInBuffer(buf, 0) - tx.meta.write(p) - - // Write the meta page to file. - if _, err := tx.db.ops.writeAt(buf, int64(p.id)*int64(tx.db.pageSize)); err != nil { - return err - } - if !tx.db.NoSync || IgnoreNoSync { - if err := fdatasync(tx.db); err != nil { - return err - } - } - - // Update statistics. - tx.stats.IncWrite(1) - - return nil -} - -// page returns a reference to the page with a given id. -// If page has been written to then a temporary buffered page is returned. -func (tx *Tx) page(id pgid) *page { - // Check the dirty pages first. - if tx.pages != nil { - if p, ok := tx.pages[id]; ok { - p.fastCheck(id) - return p - } - } - - // Otherwise return directly from the mmap. - p := tx.db.page(id) - p.fastCheck(id) - return p -} - -// forEachPage iterates over every page within a given page and executes a function. -func (tx *Tx) forEachPage(pgidnum pgid, fn func(*page, int, []pgid)) { - stack := make([]pgid, 10) - stack[0] = pgidnum - tx.forEachPageInternal(stack[:1], fn) -} - -func (tx *Tx) forEachPageInternal(pgidstack []pgid, fn func(*page, int, []pgid)) { - p := tx.page(pgidstack[len(pgidstack)-1]) - - // Execute function. - fn(p, len(pgidstack)-1, pgidstack) - - // Recursively loop over children. - if (p.flags & branchPageFlag) != 0 { - for i := 0; i < int(p.count); i++ { - elem := p.branchPageElement(uint16(i)) - tx.forEachPageInternal(append(pgidstack, elem.pgid), fn) - } - } -} - -// Page returns page information for a given page number. -// This is only safe for concurrent use when used by a writable transaction. -func (tx *Tx) Page(id int) (*PageInfo, error) { - if tx.db == nil { - return nil, ErrTxClosed - } else if pgid(id) >= tx.meta.pgid { - return nil, nil - } - - if tx.db.freelist == nil { - return nil, ErrFreePagesNotLoaded - } - - // Build the page info. - p := tx.db.page(pgid(id)) - info := &PageInfo{ - ID: id, - Count: int(p.count), - OverflowCount: int(p.overflow), - } - - // Determine the type (or if it's free). - if tx.db.freelist.freed(pgid(id)) { - info.Type = "free" - } else { - info.Type = p.typ() - } - - return info, nil -} - -// TxStats represents statistics about the actions performed by the transaction. -type TxStats struct { - // Page statistics. - // - // DEPRECATED: Use GetPageCount() or IncPageCount() - PageCount int64 // number of page allocations - // DEPRECATED: Use GetPageAlloc() or IncPageAlloc() - PageAlloc int64 // total bytes allocated - - // Cursor statistics. - // - // DEPRECATED: Use GetCursorCount() or IncCursorCount() - CursorCount int64 // number of cursors created - - // Node statistics - // - // DEPRECATED: Use GetNodeCount() or IncNodeCount() - NodeCount int64 // number of node allocations - // DEPRECATED: Use GetNodeDeref() or IncNodeDeref() - NodeDeref int64 // number of node dereferences - - // Rebalance statistics. - // - // DEPRECATED: Use GetRebalance() or IncRebalance() - Rebalance int64 // number of node rebalances - // DEPRECATED: Use GetRebalanceTime() or IncRebalanceTime() - RebalanceTime time.Duration // total time spent rebalancing - - // Split/Spill statistics. - // - // DEPRECATED: Use GetSplit() or IncSplit() - Split int64 // number of nodes split - // DEPRECATED: Use GetSpill() or IncSpill() - Spill int64 // number of nodes spilled - // DEPRECATED: Use GetSpillTime() or IncSpillTime() - SpillTime time.Duration // total time spent spilling - - // Write statistics. - // - // DEPRECATED: Use GetWrite() or IncWrite() - Write int64 // number of writes performed - // DEPRECATED: Use GetWriteTime() or IncWriteTime() - WriteTime time.Duration // total time spent writing to disk -} - -func (s *TxStats) add(other *TxStats) { - s.IncPageCount(other.GetPageCount()) - s.IncPageAlloc(other.GetPageAlloc()) - s.IncCursorCount(other.GetCursorCount()) - s.IncNodeCount(other.GetNodeCount()) - s.IncNodeDeref(other.GetNodeDeref()) - s.IncRebalance(other.GetRebalance()) - s.IncRebalanceTime(other.GetRebalanceTime()) - s.IncSplit(other.GetSplit()) - s.IncSpill(other.GetSpill()) - s.IncSpillTime(other.GetSpillTime()) - s.IncWrite(other.GetWrite()) - s.IncWriteTime(other.GetWriteTime()) -} - -// Sub calculates and returns the difference between two sets of transaction stats. -// This is useful when obtaining stats at two different points and time and -// you need the performance counters that occurred within that time span. -func (s *TxStats) Sub(other *TxStats) TxStats { - var diff TxStats - diff.PageCount = s.GetPageCount() - other.GetPageCount() - diff.PageAlloc = s.GetPageAlloc() - other.GetPageAlloc() - diff.CursorCount = s.GetCursorCount() - other.GetCursorCount() - diff.NodeCount = s.GetNodeCount() - other.GetNodeCount() - diff.NodeDeref = s.GetNodeDeref() - other.GetNodeDeref() - diff.Rebalance = s.GetRebalance() - other.GetRebalance() - diff.RebalanceTime = s.GetRebalanceTime() - other.GetRebalanceTime() - diff.Split = s.GetSplit() - other.GetSplit() - diff.Spill = s.GetSpill() - other.GetSpill() - diff.SpillTime = s.GetSpillTime() - other.GetSpillTime() - diff.Write = s.GetWrite() - other.GetWrite() - diff.WriteTime = s.GetWriteTime() - other.GetWriteTime() - return diff -} - -// GetPageCount returns PageCount atomically. -func (s *TxStats) GetPageCount() int64 { - return atomic.LoadInt64(&s.PageCount) -} - -// IncPageCount increases PageCount atomically and returns the new value. -func (s *TxStats) IncPageCount(delta int64) int64 { - return atomic.AddInt64(&s.PageCount, delta) -} - -// GetPageAlloc returns PageAlloc atomically. -func (s *TxStats) GetPageAlloc() int64 { - return atomic.LoadInt64(&s.PageAlloc) -} - -// IncPageAlloc increases PageAlloc atomically and returns the new value. -func (s *TxStats) IncPageAlloc(delta int64) int64 { - return atomic.AddInt64(&s.PageAlloc, delta) -} - -// GetCursorCount returns CursorCount atomically. -func (s *TxStats) GetCursorCount() int64 { - return atomic.LoadInt64(&s.CursorCount) -} - -// IncCursorCount increases CursorCount atomically and return the new value. -func (s *TxStats) IncCursorCount(delta int64) int64 { - return atomic.AddInt64(&s.CursorCount, delta) -} - -// GetNodeCount returns NodeCount atomically. -func (s *TxStats) GetNodeCount() int64 { - return atomic.LoadInt64(&s.NodeCount) -} - -// IncNodeCount increases NodeCount atomically and returns the new value. -func (s *TxStats) IncNodeCount(delta int64) int64 { - return atomic.AddInt64(&s.NodeCount, delta) -} - -// GetNodeDeref returns NodeDeref atomically. -func (s *TxStats) GetNodeDeref() int64 { - return atomic.LoadInt64(&s.NodeDeref) -} - -// IncNodeDeref increases NodeDeref atomically and returns the new value. -func (s *TxStats) IncNodeDeref(delta int64) int64 { - return atomic.AddInt64(&s.NodeDeref, delta) -} - -// GetRebalance returns Rebalance atomically. -func (s *TxStats) GetRebalance() int64 { - return atomic.LoadInt64(&s.Rebalance) -} - -// IncRebalance increases Rebalance atomically and returns the new value. -func (s *TxStats) IncRebalance(delta int64) int64 { - return atomic.AddInt64(&s.Rebalance, delta) -} - -// GetRebalanceTime returns RebalanceTime atomically. -func (s *TxStats) GetRebalanceTime() time.Duration { - return atomicLoadDuration(&s.RebalanceTime) -} - -// IncRebalanceTime increases RebalanceTime atomically and returns the new value. -func (s *TxStats) IncRebalanceTime(delta time.Duration) time.Duration { - return atomicAddDuration(&s.RebalanceTime, delta) -} - -// GetSplit returns Split atomically. -func (s *TxStats) GetSplit() int64 { - return atomic.LoadInt64(&s.Split) -} - -// IncSplit increases Split atomically and returns the new value. -func (s *TxStats) IncSplit(delta int64) int64 { - return atomic.AddInt64(&s.Split, delta) -} - -// GetSpill returns Spill atomically. -func (s *TxStats) GetSpill() int64 { - return atomic.LoadInt64(&s.Spill) -} - -// IncSpill increases Spill atomically and returns the new value. -func (s *TxStats) IncSpill(delta int64) int64 { - return atomic.AddInt64(&s.Spill, delta) -} - -// GetSpillTime returns SpillTime atomically. -func (s *TxStats) GetSpillTime() time.Duration { - return atomicLoadDuration(&s.SpillTime) -} - -// IncSpillTime increases SpillTime atomically and returns the new value. -func (s *TxStats) IncSpillTime(delta time.Duration) time.Duration { - return atomicAddDuration(&s.SpillTime, delta) -} - -// GetWrite returns Write atomically. -func (s *TxStats) GetWrite() int64 { - return atomic.LoadInt64(&s.Write) -} - -// IncWrite increases Write atomically and returns the new value. -func (s *TxStats) IncWrite(delta int64) int64 { - return atomic.AddInt64(&s.Write, delta) -} - -// GetWriteTime returns WriteTime atomically. -func (s *TxStats) GetWriteTime() time.Duration { - return atomicLoadDuration(&s.WriteTime) -} - -// IncWriteTime increases WriteTime atomically and returns the new value. -func (s *TxStats) IncWriteTime(delta time.Duration) time.Duration { - return atomicAddDuration(&s.WriteTime, delta) -} - -func atomicAddDuration(ptr *time.Duration, du time.Duration) time.Duration { - return time.Duration(atomic.AddInt64((*int64)(unsafe.Pointer(ptr)), int64(du))) -} - -func atomicLoadDuration(ptr *time.Duration) time.Duration { - return time.Duration(atomic.LoadInt64((*int64)(unsafe.Pointer(ptr)))) -} diff --git a/vendor/go.etcd.io/bbolt/tx_check.go b/vendor/go.etcd.io/bbolt/tx_check.go deleted file mode 100644 index 75c7c084..00000000 --- a/vendor/go.etcd.io/bbolt/tx_check.go +++ /dev/null @@ -1,226 +0,0 @@ -package bbolt - -import ( - "encoding/hex" - "fmt" -) - -// Check performs several consistency checks on the database for this transaction. -// An error is returned if any inconsistency is found. -// -// It can be safely run concurrently on a writable transaction. However, this -// incurs a high cost for large databases and databases with a lot of subbuckets -// because of caching. This overhead can be removed if running on a read-only -// transaction, however, it is not safe to execute other writer transactions at -// the same time. -func (tx *Tx) Check() <-chan error { - return tx.CheckWithOptions() -} - -// CheckWithOptions allows users to provide a customized `KVStringer` implementation, -// so that bolt can generate human-readable diagnostic messages. -func (tx *Tx) CheckWithOptions(options ...CheckOption) <-chan error { - chkConfig := checkConfig{ - kvStringer: HexKVStringer(), - } - for _, op := range options { - op(&chkConfig) - } - - ch := make(chan error) - go tx.check(chkConfig.kvStringer, ch) - return ch -} - -func (tx *Tx) check(kvStringer KVStringer, ch chan error) { - // Force loading free list if opened in ReadOnly mode. - tx.db.loadFreelist() - - // Check if any pages are double freed. - freed := make(map[pgid]bool) - all := make([]pgid, tx.db.freelist.count()) - tx.db.freelist.copyall(all) - for _, id := range all { - if freed[id] { - ch <- fmt.Errorf("page %d: already freed", id) - } - freed[id] = true - } - - // Track every reachable page. - reachable := make(map[pgid]*page) - reachable[0] = tx.page(0) // meta0 - reachable[1] = tx.page(1) // meta1 - if tx.meta.freelist != pgidNoFreelist { - for i := uint32(0); i <= tx.page(tx.meta.freelist).overflow; i++ { - reachable[tx.meta.freelist+pgid(i)] = tx.page(tx.meta.freelist) - } - } - - // Recursively check buckets. - tx.checkBucket(&tx.root, reachable, freed, kvStringer, ch) - - // Ensure all pages below high water mark are either reachable or freed. - for i := pgid(0); i < tx.meta.pgid; i++ { - _, isReachable := reachable[i] - if !isReachable && !freed[i] { - ch <- fmt.Errorf("page %d: unreachable unfreed", int(i)) - } - } - - // Close the channel to signal completion. - close(ch) -} - -func (tx *Tx) checkBucket(b *Bucket, reachable map[pgid]*page, freed map[pgid]bool, - kvStringer KVStringer, ch chan error) { - // Ignore inline buckets. - if b.root == 0 { - return - } - - // Check every page used by this bucket. - b.tx.forEachPage(b.root, func(p *page, _ int, stack []pgid) { - if p.id > tx.meta.pgid { - ch <- fmt.Errorf("page %d: out of bounds: %d (stack: %v)", int(p.id), int(b.tx.meta.pgid), stack) - } - - // Ensure each page is only referenced once. - for i := pgid(0); i <= pgid(p.overflow); i++ { - var id = p.id + i - if _, ok := reachable[id]; ok { - ch <- fmt.Errorf("page %d: multiple references (stack: %v)", int(id), stack) - } - reachable[id] = p - } - - // We should only encounter un-freed leaf and branch pages. - if freed[p.id] { - ch <- fmt.Errorf("page %d: reachable freed", int(p.id)) - } else if (p.flags&branchPageFlag) == 0 && (p.flags&leafPageFlag) == 0 { - ch <- fmt.Errorf("page %d: invalid type: %s (stack: %v)", int(p.id), p.typ(), stack) - } - }) - - tx.recursivelyCheckPages(b.root, kvStringer.KeyToString, ch) - - // Check each bucket within this bucket. - _ = b.ForEachBucket(func(k []byte) error { - if child := b.Bucket(k); child != nil { - tx.checkBucket(child, reachable, freed, kvStringer, ch) - } - return nil - }) -} - -// recursivelyCheckPages confirms database consistency with respect to b-tree -// key order constraints: -// - keys on pages must be sorted -// - keys on children pages are between 2 consecutive keys on the parent's branch page). -func (tx *Tx) recursivelyCheckPages(pgId pgid, keyToString func([]byte) string, ch chan error) { - tx.recursivelyCheckPagesInternal(pgId, nil, nil, nil, keyToString, ch) -} - -// recursivelyCheckPagesInternal verifies that all keys in the subtree rooted at `pgid` are: -// - >=`minKeyClosed` (can be nil) -// - <`maxKeyOpen` (can be nil) -// - Are in right ordering relationship to their parents. -// `pagesStack` is expected to contain IDs of pages from the tree root to `pgid` for the clean debugging message. -func (tx *Tx) recursivelyCheckPagesInternal( - pgId pgid, minKeyClosed, maxKeyOpen []byte, pagesStack []pgid, - keyToString func([]byte) string, ch chan error) (maxKeyInSubtree []byte) { - - p := tx.page(pgId) - pagesStack = append(pagesStack, pgId) - switch { - case p.flags&branchPageFlag != 0: - // For branch page we navigate ranges of all subpages. - runningMin := minKeyClosed - for i := range p.branchPageElements() { - elem := p.branchPageElement(uint16(i)) - verifyKeyOrder(elem.pgid, "branch", i, elem.key(), runningMin, maxKeyOpen, ch, keyToString, pagesStack) - - maxKey := maxKeyOpen - if i < len(p.branchPageElements())-1 { - maxKey = p.branchPageElement(uint16(i + 1)).key() - } - maxKeyInSubtree = tx.recursivelyCheckPagesInternal(elem.pgid, elem.key(), maxKey, pagesStack, keyToString, ch) - runningMin = maxKeyInSubtree - } - return maxKeyInSubtree - case p.flags&leafPageFlag != 0: - runningMin := minKeyClosed - for i := range p.leafPageElements() { - elem := p.leafPageElement(uint16(i)) - verifyKeyOrder(pgId, "leaf", i, elem.key(), runningMin, maxKeyOpen, ch, keyToString, pagesStack) - runningMin = elem.key() - } - if p.count > 0 { - return p.leafPageElement(p.count - 1).key() - } - default: - ch <- fmt.Errorf("unexpected page type for pgId:%d", pgId) - } - return maxKeyInSubtree -} - -/*** - * verifyKeyOrder checks whether an entry with given #index on pgId (pageType: "branch|leaf") that has given "key", - * is within range determined by (previousKey..maxKeyOpen) and reports found violations to the channel (ch). - */ -func verifyKeyOrder(pgId pgid, pageType string, index int, key []byte, previousKey []byte, maxKeyOpen []byte, ch chan error, keyToString func([]byte) string, pagesStack []pgid) { - if index == 0 && previousKey != nil && compareKeys(previousKey, key) > 0 { - ch <- fmt.Errorf("the first key[%d]=(hex)%s on %s page(%d) needs to be >= the key in the ancestor (%s). Stack: %v", - index, keyToString(key), pageType, pgId, keyToString(previousKey), pagesStack) - } - if index > 0 { - cmpRet := compareKeys(previousKey, key) - if cmpRet > 0 { - ch <- fmt.Errorf("key[%d]=(hex)%s on %s page(%d) needs to be > (found <) than previous element (hex)%s. Stack: %v", - index, keyToString(key), pageType, pgId, keyToString(previousKey), pagesStack) - } - if cmpRet == 0 { - ch <- fmt.Errorf("key[%d]=(hex)%s on %s page(%d) needs to be > (found =) than previous element (hex)%s. Stack: %v", - index, keyToString(key), pageType, pgId, keyToString(previousKey), pagesStack) - } - } - if maxKeyOpen != nil && compareKeys(key, maxKeyOpen) >= 0 { - ch <- fmt.Errorf("key[%d]=(hex)%s on %s page(%d) needs to be < than key of the next element in ancestor (hex)%s. Pages stack: %v", - index, keyToString(key), pageType, pgId, keyToString(previousKey), pagesStack) - } -} - -// =========================================================================================== - -type checkConfig struct { - kvStringer KVStringer -} - -type CheckOption func(options *checkConfig) - -func WithKVStringer(kvStringer KVStringer) CheckOption { - return func(c *checkConfig) { - c.kvStringer = kvStringer - } -} - -// KVStringer allows to prepare human-readable diagnostic messages. -type KVStringer interface { - KeyToString([]byte) string - ValueToString([]byte) string -} - -// HexKVStringer serializes both key & value to hex representation. -func HexKVStringer() KVStringer { - return hexKvStringer{} -} - -type hexKvStringer struct{} - -func (_ hexKvStringer) KeyToString(key []byte) string { - return hex.EncodeToString(key) -} - -func (_ hexKvStringer) ValueToString(value []byte) string { - return hex.EncodeToString(value) -} diff --git a/vendor/go.etcd.io/bbolt/unsafe.go b/vendor/go.etcd.io/bbolt/unsafe.go deleted file mode 100644 index c0e50375..00000000 --- a/vendor/go.etcd.io/bbolt/unsafe.go +++ /dev/null @@ -1,39 +0,0 @@ -package bbolt - -import ( - "reflect" - "unsafe" -) - -func unsafeAdd(base unsafe.Pointer, offset uintptr) unsafe.Pointer { - return unsafe.Pointer(uintptr(base) + offset) -} - -func unsafeIndex(base unsafe.Pointer, offset uintptr, elemsz uintptr, n int) unsafe.Pointer { - return unsafe.Pointer(uintptr(base) + offset + uintptr(n)*elemsz) -} - -func unsafeByteSlice(base unsafe.Pointer, offset uintptr, i, j int) []byte { - // See: https://github.com/golang/go/wiki/cgo#turning-c-arrays-into-go-slices - // - // This memory is not allocated from C, but it is unmanaged by Go's - // garbage collector and should behave similarly, and the compiler - // should produce similar code. Note that this conversion allows a - // subslice to begin after the base address, with an optional offset, - // while the URL above does not cover this case and only slices from - // index 0. However, the wiki never says that the address must be to - // the beginning of a C allocation (or even that malloc was used at - // all), so this is believed to be correct. - return (*[maxAllocSize]byte)(unsafeAdd(base, offset))[i:j:j] -} - -// unsafeSlice modifies the data, len, and cap of a slice variable pointed to by -// the slice parameter. This helper should be used over other direct -// manipulation of reflect.SliceHeader to prevent misuse, namely, converting -// from reflect.SliceHeader to a Go slice type. -func unsafeSlice(slice, data unsafe.Pointer, len int) { - s := (*reflect.SliceHeader)(slice) - s.Data = uintptr(data) - s.Cap = len - s.Len = len -} diff --git a/vendor/go.opencensus.io/.gitignore b/vendor/go.opencensus.io/.gitignore deleted file mode 100644 index 74a6db47..00000000 --- a/vendor/go.opencensus.io/.gitignore +++ /dev/null @@ -1,9 +0,0 @@ -/.idea/ - -# go.opencensus.io/exporter/aws -/exporter/aws/ - -# Exclude vendor, use dep ensure after checkout: -/vendor/github.com/ -/vendor/golang.org/ -/vendor/google.golang.org/ diff --git a/vendor/go.opencensus.io/AUTHORS b/vendor/go.opencensus.io/AUTHORS deleted file mode 100644 index e491a9e7..00000000 --- a/vendor/go.opencensus.io/AUTHORS +++ /dev/null @@ -1 +0,0 @@ -Google Inc. diff --git a/vendor/go.opencensus.io/CONTRIBUTING.md b/vendor/go.opencensus.io/CONTRIBUTING.md deleted file mode 100644 index 1ba3962c..00000000 --- a/vendor/go.opencensus.io/CONTRIBUTING.md +++ /dev/null @@ -1,63 +0,0 @@ -# How to contribute - -We'd love to accept your patches and contributions to this project. There are -just a few small guidelines you need to follow. - -## Contributor License Agreement - -Contributions to this project must be accompanied by a Contributor License -Agreement. You (or your employer) retain the copyright to your contribution, -this simply gives us permission to use and redistribute your contributions as -part of the project. Head over to to see -your current agreements on file or to sign a new one. - -You generally only need to submit a CLA once, so if you've already submitted one -(even if it was for a different project), you probably don't need to do it -again. - -## Code reviews - -All submissions, including submissions by project members, require review. We -use GitHub pull requests for this purpose. Consult [GitHub Help] for more -information on using pull requests. - -[GitHub Help]: https://help.github.com/articles/about-pull-requests/ - -## Instructions - -Fork the repo, checkout the upstream repo to your GOPATH by: - -``` -$ go get -d go.opencensus.io -``` - -Add your fork as an origin: - -``` -cd $(go env GOPATH)/src/go.opencensus.io -git remote add fork git@github.com:YOUR_GITHUB_USERNAME/opencensus-go.git -``` - -Run tests: - -``` -$ make install-tools # Only first time. -$ make -``` - -Checkout a new branch, make modifications and push the branch to your fork: - -``` -$ git checkout -b feature -# edit files -$ git commit -$ git push fork feature -``` - -Open a pull request against the main opencensus-go repo. - -## General Notes -This project uses Appveyor and Travis for CI. - -The dependencies are managed with `go mod` if you work with the sources under your -`$GOPATH` you need to set the environment variable `GO111MODULE=on`. \ No newline at end of file diff --git a/vendor/go.opencensus.io/Makefile b/vendor/go.opencensus.io/Makefile deleted file mode 100644 index d896edc9..00000000 --- a/vendor/go.opencensus.io/Makefile +++ /dev/null @@ -1,97 +0,0 @@ -# TODO: Fix this on windows. -ALL_SRC := $(shell find . -name '*.go' \ - -not -path './vendor/*' \ - -not -path '*/gen-go/*' \ - -type f | sort) -ALL_PKGS := $(shell go list $(sort $(dir $(ALL_SRC)))) - -GOTEST_OPT?=-v -race -timeout 30s -GOTEST_OPT_WITH_COVERAGE = $(GOTEST_OPT) -coverprofile=coverage.txt -covermode=atomic -GOTEST=go test -GOIMPORTS=goimports -GOLINT=golint -GOVET=go vet -EMBEDMD=embedmd -# TODO decide if we need to change these names. -TRACE_ID_LINT_EXCEPTION="type name will be used as trace.TraceID by other packages" -TRACE_OPTION_LINT_EXCEPTION="type name will be used as trace.TraceOptions by other packages" -README_FILES := $(shell find . -name '*README.md' | sort | tr '\n' ' ') - -.DEFAULT_GOAL := imports-lint-vet-embedmd-test - -.PHONY: imports-lint-vet-embedmd-test -imports-lint-vet-embedmd-test: imports lint vet embedmd test - -# TODO enable test-with-coverage in tavis -.PHONY: travis-ci -travis-ci: imports lint vet embedmd test test-386 - -all-pkgs: - @echo $(ALL_PKGS) | tr ' ' '\n' | sort - -all-srcs: - @echo $(ALL_SRC) | tr ' ' '\n' | sort - -.PHONY: test -test: - $(GOTEST) $(GOTEST_OPT) $(ALL_PKGS) - -.PHONY: test-386 -test-386: - GOARCH=386 $(GOTEST) -v -timeout 30s $(ALL_PKGS) - -.PHONY: test-with-coverage -test-with-coverage: - $(GOTEST) $(GOTEST_OPT_WITH_COVERAGE) $(ALL_PKGS) - -.PHONY: imports -imports: - @IMPORTSOUT=`$(GOIMPORTS) -l $(ALL_SRC) 2>&1`; \ - if [ "$$IMPORTSOUT" ]; then \ - echo "$(GOIMPORTS) FAILED => goimports the following files:\n"; \ - echo "$$IMPORTSOUT\n"; \ - exit 1; \ - else \ - echo "Imports finished successfully"; \ - fi - -.PHONY: lint -lint: - @LINTOUT=`$(GOLINT) $(ALL_PKGS) | grep -v $(TRACE_ID_LINT_EXCEPTION) | grep -v $(TRACE_OPTION_LINT_EXCEPTION) 2>&1`; \ - if [ "$$LINTOUT" ]; then \ - echo "$(GOLINT) FAILED => clean the following lint errors:\n"; \ - echo "$$LINTOUT\n"; \ - exit 1; \ - else \ - echo "Lint finished successfully"; \ - fi - -.PHONY: vet -vet: - # TODO: Understand why go vet downloads "github.com/google/go-cmp v0.2.0" - @VETOUT=`$(GOVET) ./... | grep -v "go: downloading" 2>&1`; \ - if [ "$$VETOUT" ]; then \ - echo "$(GOVET) FAILED => go vet the following files:\n"; \ - echo "$$VETOUT\n"; \ - exit 1; \ - else \ - echo "Vet finished successfully"; \ - fi - -.PHONY: embedmd -embedmd: - @EMBEDMDOUT=`$(EMBEDMD) -d $(README_FILES) 2>&1`; \ - if [ "$$EMBEDMDOUT" ]; then \ - echo "$(EMBEDMD) FAILED => embedmd the following files:\n"; \ - echo "$$EMBEDMDOUT\n"; \ - exit 1; \ - else \ - echo "Embedmd finished successfully"; \ - fi - -.PHONY: install-tools -install-tools: - go install golang.org/x/lint/golint@latest - go install golang.org/x/tools/cmd/cover@latest - go install golang.org/x/tools/cmd/goimports@latest - go install github.com/rakyll/embedmd@latest diff --git a/vendor/go.opencensus.io/README.md b/vendor/go.opencensus.io/README.md deleted file mode 100644 index 1d7e8371..00000000 --- a/vendor/go.opencensus.io/README.md +++ /dev/null @@ -1,267 +0,0 @@ -# OpenCensus Libraries for Go - -[![Build Status][travis-image]][travis-url] -[![Windows Build Status][appveyor-image]][appveyor-url] -[![GoDoc][godoc-image]][godoc-url] -[![Gitter chat][gitter-image]][gitter-url] - -OpenCensus Go is a Go implementation of OpenCensus, a toolkit for -collecting application performance and behavior monitoring data. -Currently it consists of three major components: tags, stats and tracing. - -#### OpenCensus and OpenTracing have merged to form OpenTelemetry, which serves as the next major version of OpenCensus and OpenTracing. OpenTelemetry will offer backwards compatibility with existing OpenCensus integrations, and we will continue to make security patches to existing OpenCensus libraries for two years. Read more about the merger [here](https://medium.com/opentracing/a-roadmap-to-convergence-b074e5815289). - -## Installation - -``` -$ go get -u go.opencensus.io -``` - -The API of this project is still evolving, see: [Deprecation Policy](#deprecation-policy). -The use of vendoring or a dependency management tool is recommended. - -## Prerequisites - -OpenCensus Go libraries require Go 1.8 or later. - -## Getting Started - -The easiest way to get started using OpenCensus in your application is to use an existing -integration with your RPC framework: - -* [net/http](https://godoc.org/go.opencensus.io/plugin/ochttp) -* [gRPC](https://godoc.org/go.opencensus.io/plugin/ocgrpc) -* [database/sql](https://godoc.org/github.com/opencensus-integrations/ocsql) -* [Go kit](https://godoc.org/github.com/go-kit/kit/tracing/opencensus) -* [Groupcache](https://godoc.org/github.com/orijtech/groupcache) -* [Caddy webserver](https://godoc.org/github.com/orijtech/caddy) -* [MongoDB](https://godoc.org/github.com/orijtech/mongo-go-driver) -* [Redis gomodule/redigo](https://godoc.org/github.com/orijtech/redigo) -* [Redis goredis/redis](https://godoc.org/github.com/orijtech/redis) -* [Memcache](https://godoc.org/github.com/orijtech/gomemcache) - -If you're using a framework not listed here, you could either implement your own middleware for your -framework or use [custom stats](#stats) and [spans](#spans) directly in your application. - -## Exporters - -OpenCensus can export instrumentation data to various backends. -OpenCensus has exporter implementations for the following, users -can implement their own exporters by implementing the exporter interfaces -([stats](https://godoc.org/go.opencensus.io/stats/view#Exporter), -[trace](https://godoc.org/go.opencensus.io/trace#Exporter)): - -* [Prometheus][exporter-prom] for stats -* [OpenZipkin][exporter-zipkin] for traces -* [Stackdriver][exporter-stackdriver] Monitoring for stats and Trace for traces -* [Jaeger][exporter-jaeger] for traces -* [AWS X-Ray][exporter-xray] for traces -* [Datadog][exporter-datadog] for stats and traces -* [Graphite][exporter-graphite] for stats -* [Honeycomb][exporter-honeycomb] for traces -* [New Relic][exporter-newrelic] for stats and traces - -## Overview - -![OpenCensus Overview](https://i.imgur.com/cf4ElHE.jpg) - -In a microservices environment, a user request may go through -multiple services until there is a response. OpenCensus allows -you to instrument your services and collect diagnostics data all -through your services end-to-end. - -## Tags - -Tags represent propagated key-value pairs. They are propagated using `context.Context` -in the same process or can be encoded to be transmitted on the wire. Usually, this will -be handled by an integration plugin, e.g. `ocgrpc.ServerHandler` and `ocgrpc.ClientHandler` -for gRPC. - -Package `tag` allows adding or modifying tags in the current context. - -[embedmd]:# (internal/readme/tags.go new) -```go -ctx, err := tag.New(ctx, - tag.Insert(osKey, "macOS-10.12.5"), - tag.Upsert(userIDKey, "cde36753ed"), -) -if err != nil { - log.Fatal(err) -} -``` - -## Stats - -OpenCensus is a low-overhead framework even if instrumentation is always enabled. -In order to be so, it is optimized to make recording of data points fast -and separate from the data aggregation. - -OpenCensus stats collection happens in two stages: - -* Definition of measures and recording of data points -* Definition of views and aggregation of the recorded data - -### Recording - -Measurements are data points associated with a measure. -Recording implicitly tags the set of Measurements with the tags from the -provided context: - -[embedmd]:# (internal/readme/stats.go record) -```go -stats.Record(ctx, videoSize.M(102478)) -``` - -### Views - -Views are how Measures are aggregated. You can think of them as queries over the -set of recorded data points (measurements). - -Views have two parts: the tags to group by and the aggregation type used. - -Currently three types of aggregations are supported: -* CountAggregation is used to count the number of times a sample was recorded. -* DistributionAggregation is used to provide a histogram of the values of the samples. -* SumAggregation is used to sum up all sample values. - -[embedmd]:# (internal/readme/stats.go aggs) -```go -distAgg := view.Distribution(1<<32, 2<<32, 3<<32) -countAgg := view.Count() -sumAgg := view.Sum() -``` - -Here we create a view with the DistributionAggregation over our measure. - -[embedmd]:# (internal/readme/stats.go view) -```go -if err := view.Register(&view.View{ - Name: "example.com/video_size_distribution", - Description: "distribution of processed video size over time", - Measure: videoSize, - Aggregation: view.Distribution(1<<32, 2<<32, 3<<32), -}); err != nil { - log.Fatalf("Failed to register view: %v", err) -} -``` - -Register begins collecting data for the view. Registered views' data will be -exported via the registered exporters. - -## Traces - -A distributed trace tracks the progression of a single user request as -it is handled by the services and processes that make up an application. -Each step is called a span in the trace. Spans include metadata about the step, -including especially the time spent in the step, called the span’s latency. - -Below you see a trace and several spans underneath it. - -![Traces and spans](https://i.imgur.com/7hZwRVj.png) - -### Spans - -Span is the unit step in a trace. Each span has a name, latency, status and -additional metadata. - -Below we are starting a span for a cache read and ending it -when we are done: - -[embedmd]:# (internal/readme/trace.go startend) -```go -ctx, span := trace.StartSpan(ctx, "cache.Get") -defer span.End() - -// Do work to get from cache. -``` - -### Propagation - -Spans can have parents or can be root spans if they don't have any parents. -The current span is propagated in-process and across the network to allow associating -new child spans with the parent. - -In the same process, `context.Context` is used to propagate spans. -`trace.StartSpan` creates a new span as a root if the current context -doesn't contain a span. Or, it creates a child of the span that is -already in current context. The returned context can be used to keep -propagating the newly created span in the current context. - -[embedmd]:# (internal/readme/trace.go startend) -```go -ctx, span := trace.StartSpan(ctx, "cache.Get") -defer span.End() - -// Do work to get from cache. -``` - -Across the network, OpenCensus provides different propagation -methods for different protocols. - -* gRPC integrations use the OpenCensus' [binary propagation format](https://godoc.org/go.opencensus.io/trace/propagation). -* HTTP integrations use Zipkin's [B3](https://github.com/openzipkin/b3-propagation) - by default but can be configured to use a custom propagation method by setting another - [propagation.HTTPFormat](https://godoc.org/go.opencensus.io/trace/propagation#HTTPFormat). - -## Execution Tracer - -With Go 1.11, OpenCensus Go will support integration with the Go execution tracer. -See [Debugging Latency in Go](https://medium.com/observability/debugging-latency-in-go-1-11-9f97a7910d68) -for an example of their mutual use. - -## Profiles - -OpenCensus tags can be applied as profiler labels -for users who are on Go 1.9 and above. - -[embedmd]:# (internal/readme/tags.go profiler) -```go -ctx, err = tag.New(ctx, - tag.Insert(osKey, "macOS-10.12.5"), - tag.Insert(userIDKey, "fff0989878"), -) -if err != nil { - log.Fatal(err) -} -tag.Do(ctx, func(ctx context.Context) { - // Do work. - // When profiling is on, samples will be - // recorded with the key/values from the tag map. -}) -``` - -A screenshot of the CPU profile from the program above: - -![CPU profile](https://i.imgur.com/jBKjlkw.png) - -## Deprecation Policy - -Before version 1.0.0, the following deprecation policy will be observed: - -No backwards-incompatible changes will be made except for the removal of symbols that have -been marked as *Deprecated* for at least one minor release (e.g. 0.9.0 to 0.10.0). A release -removing the *Deprecated* functionality will be made no sooner than 28 days after the first -release in which the functionality was marked *Deprecated*. - -[travis-image]: https://travis-ci.org/census-instrumentation/opencensus-go.svg?branch=master -[travis-url]: https://travis-ci.org/census-instrumentation/opencensus-go -[appveyor-image]: https://ci.appveyor.com/api/projects/status/vgtt29ps1783ig38?svg=true -[appveyor-url]: https://ci.appveyor.com/project/opencensusgoteam/opencensus-go/branch/master -[godoc-image]: https://godoc.org/go.opencensus.io?status.svg -[godoc-url]: https://godoc.org/go.opencensus.io -[gitter-image]: https://badges.gitter.im/census-instrumentation/lobby.svg -[gitter-url]: https://gitter.im/census-instrumentation/lobby?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge - - -[new-ex]: https://godoc.org/go.opencensus.io/tag#example-NewMap -[new-replace-ex]: https://godoc.org/go.opencensus.io/tag#example-NewMap--Replace - -[exporter-prom]: https://godoc.org/contrib.go.opencensus.io/exporter/prometheus -[exporter-stackdriver]: https://godoc.org/contrib.go.opencensus.io/exporter/stackdriver -[exporter-zipkin]: https://godoc.org/contrib.go.opencensus.io/exporter/zipkin -[exporter-jaeger]: https://godoc.org/contrib.go.opencensus.io/exporter/jaeger -[exporter-xray]: https://github.com/census-ecosystem/opencensus-go-exporter-aws -[exporter-datadog]: https://github.com/DataDog/opencensus-go-exporter-datadog -[exporter-graphite]: https://github.com/census-ecosystem/opencensus-go-exporter-graphite -[exporter-honeycomb]: https://github.com/honeycombio/opencensus-exporter -[exporter-newrelic]: https://github.com/newrelic/newrelic-opencensus-exporter-go diff --git a/vendor/go.opencensus.io/appveyor.yml b/vendor/go.opencensus.io/appveyor.yml deleted file mode 100644 index d08f0eda..00000000 --- a/vendor/go.opencensus.io/appveyor.yml +++ /dev/null @@ -1,24 +0,0 @@ -version: "{build}" - -platform: x64 - -clone_folder: c:\gopath\src\go.opencensus.io - -environment: - GOPATH: 'c:\gopath' - GO111MODULE: 'on' - CGO_ENABLED: '0' # See: https://github.com/appveyor/ci/issues/2613 - -stack: go 1.11 - -before_test: - - go version - - go env - -build: false -deploy: false - -test_script: - - cd %APPVEYOR_BUILD_FOLDER% - - go build -v .\... - - go test -v .\... # No -race because cgo is disabled diff --git a/vendor/go.opencensus.io/internal/internal.go b/vendor/go.opencensus.io/internal/internal.go deleted file mode 100644 index 81dc7183..00000000 --- a/vendor/go.opencensus.io/internal/internal.go +++ /dev/null @@ -1,37 +0,0 @@ -// Copyright 2017, OpenCensus Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package internal // import "go.opencensus.io/internal" - -import ( - "fmt" - "time" - - opencensus "go.opencensus.io" -) - -// UserAgent is the user agent to be added to the outgoing -// requests from the exporters. -var UserAgent = fmt.Sprintf("opencensus-go/%s", opencensus.Version()) - -// MonotonicEndTime returns the end time at present -// but offset from start, monotonically. -// -// The monotonic clock is used in subtractions hence -// the duration since start added back to start gives -// end as a monotonic time. -// See https://golang.org/pkg/time/#hdr-Monotonic_Clocks -func MonotonicEndTime(start time.Time) time.Time { - return start.Add(time.Since(start)) -} diff --git a/vendor/go.opencensus.io/internal/sanitize.go b/vendor/go.opencensus.io/internal/sanitize.go deleted file mode 100644 index de8ccf23..00000000 --- a/vendor/go.opencensus.io/internal/sanitize.go +++ /dev/null @@ -1,50 +0,0 @@ -// Copyright 2017, OpenCensus Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package internal - -import ( - "strings" - "unicode" -) - -const labelKeySizeLimit = 100 - -// Sanitize returns a string that is trunacated to 100 characters if it's too -// long, and replaces non-alphanumeric characters to underscores. -func Sanitize(s string) string { - if len(s) == 0 { - return s - } - if len(s) > labelKeySizeLimit { - s = s[:labelKeySizeLimit] - } - s = strings.Map(sanitizeRune, s) - if unicode.IsDigit(rune(s[0])) { - s = "key_" + s - } - if s[0] == '_' { - s = "key" + s - } - return s -} - -// converts anything that is not a letter or digit to an underscore -func sanitizeRune(r rune) rune { - if unicode.IsLetter(r) || unicode.IsDigit(r) { - return r - } - // Everything else turns into an underscore - return '_' -} diff --git a/vendor/go.opencensus.io/internal/traceinternals.go b/vendor/go.opencensus.io/internal/traceinternals.go deleted file mode 100644 index 073af7b4..00000000 --- a/vendor/go.opencensus.io/internal/traceinternals.go +++ /dev/null @@ -1,53 +0,0 @@ -// Copyright 2017, OpenCensus Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package internal - -import ( - "time" -) - -// Trace allows internal access to some trace functionality. -// TODO(#412): remove this -var Trace interface{} - -// LocalSpanStoreEnabled true if the local span store is enabled. -var LocalSpanStoreEnabled bool - -// BucketConfiguration stores the number of samples to store for span buckets -// for successful and failed spans for a particular span name. -type BucketConfiguration struct { - Name string - MaxRequestsSucceeded int - MaxRequestsErrors int -} - -// PerMethodSummary is a summary of the spans stored for a single span name. -type PerMethodSummary struct { - Active int - LatencyBuckets []LatencyBucketSummary - ErrorBuckets []ErrorBucketSummary -} - -// LatencyBucketSummary is a summary of a latency bucket. -type LatencyBucketSummary struct { - MinLatency, MaxLatency time.Duration - Size int -} - -// ErrorBucketSummary is a summary of an error bucket. -type ErrorBucketSummary struct { - ErrorCode int32 - Size int -} diff --git a/vendor/go.opencensus.io/opencensus.go b/vendor/go.opencensus.io/opencensus.go deleted file mode 100644 index 11e31f42..00000000 --- a/vendor/go.opencensus.io/opencensus.go +++ /dev/null @@ -1,21 +0,0 @@ -// Copyright 2017, OpenCensus Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package opencensus contains Go support for OpenCensus. -package opencensus // import "go.opencensus.io" - -// Version is the current release version of OpenCensus in use. -func Version() string { - return "0.24.0" -} diff --git a/vendor/go.opencensus.io/trace/basetypes.go b/vendor/go.opencensus.io/trace/basetypes.go deleted file mode 100644 index c8e26ed6..00000000 --- a/vendor/go.opencensus.io/trace/basetypes.go +++ /dev/null @@ -1,129 +0,0 @@ -// Copyright 2017, OpenCensus Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package trace - -import ( - "fmt" - "time" -) - -type ( - // TraceID is a 16-byte identifier for a set of spans. - TraceID [16]byte - - // SpanID is an 8-byte identifier for a single span. - SpanID [8]byte -) - -func (t TraceID) String() string { - return fmt.Sprintf("%02x", t[:]) -} - -func (s SpanID) String() string { - return fmt.Sprintf("%02x", s[:]) -} - -// Annotation represents a text annotation with a set of attributes and a timestamp. -type Annotation struct { - Time time.Time - Message string - Attributes map[string]interface{} -} - -// Attribute represents a key-value pair on a span, link or annotation. -// Construct with one of: BoolAttribute, Int64Attribute, or StringAttribute. -type Attribute struct { - key string - value interface{} -} - -// Key returns the attribute's key -func (a *Attribute) Key() string { - return a.key -} - -// Value returns the attribute's value -func (a *Attribute) Value() interface{} { - return a.value -} - -// BoolAttribute returns a bool-valued attribute. -func BoolAttribute(key string, value bool) Attribute { - return Attribute{key: key, value: value} -} - -// Int64Attribute returns an int64-valued attribute. -func Int64Attribute(key string, value int64) Attribute { - return Attribute{key: key, value: value} -} - -// Float64Attribute returns a float64-valued attribute. -func Float64Attribute(key string, value float64) Attribute { - return Attribute{key: key, value: value} -} - -// StringAttribute returns a string-valued attribute. -func StringAttribute(key string, value string) Attribute { - return Attribute{key: key, value: value} -} - -// LinkType specifies the relationship between the span that had the link -// added, and the linked span. -type LinkType int32 - -// LinkType values. -const ( - LinkTypeUnspecified LinkType = iota // The relationship of the two spans is unknown. - LinkTypeChild // The linked span is a child of the current span. - LinkTypeParent // The linked span is the parent of the current span. -) - -// Link represents a reference from one span to another span. -type Link struct { - TraceID TraceID - SpanID SpanID - Type LinkType - // Attributes is a set of attributes on the link. - Attributes map[string]interface{} -} - -// MessageEventType specifies the type of message event. -type MessageEventType int32 - -// MessageEventType values. -const ( - MessageEventTypeUnspecified MessageEventType = iota // Unknown event type. - MessageEventTypeSent // Indicates a sent RPC message. - MessageEventTypeRecv // Indicates a received RPC message. -) - -// MessageEvent represents an event describing a message sent or received on the network. -type MessageEvent struct { - Time time.Time - EventType MessageEventType - MessageID int64 - UncompressedByteSize int64 - CompressedByteSize int64 -} - -// Status is the status of a Span. -type Status struct { - // Code is a status code. Zero indicates success. - // - // If Code will be propagated to Google APIs, it ideally should be a value from - // https://github.com/googleapis/googleapis/blob/master/google/rpc/code.proto . - Code int32 - Message string -} diff --git a/vendor/go.opencensus.io/trace/config.go b/vendor/go.opencensus.io/trace/config.go deleted file mode 100644 index 775f8274..00000000 --- a/vendor/go.opencensus.io/trace/config.go +++ /dev/null @@ -1,86 +0,0 @@ -// Copyright 2018, OpenCensus Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package trace - -import ( - "sync" - - "go.opencensus.io/trace/internal" -) - -// Config represents the global tracing configuration. -type Config struct { - // DefaultSampler is the default sampler used when creating new spans. - DefaultSampler Sampler - - // IDGenerator is for internal use only. - IDGenerator internal.IDGenerator - - // MaxAnnotationEventsPerSpan is max number of annotation events per span - MaxAnnotationEventsPerSpan int - - // MaxMessageEventsPerSpan is max number of message events per span - MaxMessageEventsPerSpan int - - // MaxAnnotationEventsPerSpan is max number of attributes per span - MaxAttributesPerSpan int - - // MaxLinksPerSpan is max number of links per span - MaxLinksPerSpan int -} - -var configWriteMu sync.Mutex - -const ( - // DefaultMaxAnnotationEventsPerSpan is default max number of annotation events per span - DefaultMaxAnnotationEventsPerSpan = 32 - - // DefaultMaxMessageEventsPerSpan is default max number of message events per span - DefaultMaxMessageEventsPerSpan = 128 - - // DefaultMaxAttributesPerSpan is default max number of attributes per span - DefaultMaxAttributesPerSpan = 32 - - // DefaultMaxLinksPerSpan is default max number of links per span - DefaultMaxLinksPerSpan = 32 -) - -// ApplyConfig applies changes to the global tracing configuration. -// -// Fields not provided in the given config are going to be preserved. -func ApplyConfig(cfg Config) { - configWriteMu.Lock() - defer configWriteMu.Unlock() - c := *config.Load().(*Config) - if cfg.DefaultSampler != nil { - c.DefaultSampler = cfg.DefaultSampler - } - if cfg.IDGenerator != nil { - c.IDGenerator = cfg.IDGenerator - } - if cfg.MaxAnnotationEventsPerSpan > 0 { - c.MaxAnnotationEventsPerSpan = cfg.MaxAnnotationEventsPerSpan - } - if cfg.MaxMessageEventsPerSpan > 0 { - c.MaxMessageEventsPerSpan = cfg.MaxMessageEventsPerSpan - } - if cfg.MaxAttributesPerSpan > 0 { - c.MaxAttributesPerSpan = cfg.MaxAttributesPerSpan - } - if cfg.MaxLinksPerSpan > 0 { - c.MaxLinksPerSpan = cfg.MaxLinksPerSpan - } - config.Store(&c) -} diff --git a/vendor/go.opencensus.io/trace/doc.go b/vendor/go.opencensus.io/trace/doc.go deleted file mode 100644 index 7a1616a5..00000000 --- a/vendor/go.opencensus.io/trace/doc.go +++ /dev/null @@ -1,52 +0,0 @@ -// Copyright 2017, OpenCensus Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -/* -Package trace contains support for OpenCensus distributed tracing. - -The following assumes a basic familiarity with OpenCensus concepts. -See http://opencensus.io - -# Exporting Traces - -To export collected tracing data, register at least one exporter. You can use -one of the provided exporters or write your own. - - trace.RegisterExporter(exporter) - -By default, traces will be sampled relatively rarely. To change the sampling -frequency for your entire program, call ApplyConfig. Use a ProbabilitySampler -to sample a subset of traces, or use AlwaysSample to collect a trace on every run: - - trace.ApplyConfig(trace.Config{DefaultSampler: trace.AlwaysSample()}) - -Be careful about using trace.AlwaysSample in a production application with -significant traffic: a new trace will be started and exported for every request. - -# Adding Spans to a Trace - -A trace consists of a tree of spans. In Go, the current span is carried in a -context.Context. - -It is common to want to capture all the activity of a function call in a span. For -this to work, the function must take a context.Context as a parameter. Add these two -lines to the top of the function: - - ctx, span := trace.StartSpan(ctx, "example.com/Run") - defer span.End() - -StartSpan will create a new top-level span if the context -doesn't contain another span, otherwise it will create a child span. -*/ -package trace // import "go.opencensus.io/trace" diff --git a/vendor/go.opencensus.io/trace/evictedqueue.go b/vendor/go.opencensus.io/trace/evictedqueue.go deleted file mode 100644 index ffc264f2..00000000 --- a/vendor/go.opencensus.io/trace/evictedqueue.go +++ /dev/null @@ -1,38 +0,0 @@ -// Copyright 2019, OpenCensus Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package trace - -type evictedQueue struct { - queue []interface{} - capacity int - droppedCount int -} - -func newEvictedQueue(capacity int) *evictedQueue { - eq := &evictedQueue{ - capacity: capacity, - queue: make([]interface{}, 0), - } - - return eq -} - -func (eq *evictedQueue) add(value interface{}) { - if len(eq.queue) == eq.capacity { - eq.queue = eq.queue[1:] - eq.droppedCount++ - } - eq.queue = append(eq.queue, value) -} diff --git a/vendor/go.opencensus.io/trace/export.go b/vendor/go.opencensus.io/trace/export.go deleted file mode 100644 index e0d9a4b9..00000000 --- a/vendor/go.opencensus.io/trace/export.go +++ /dev/null @@ -1,97 +0,0 @@ -// Copyright 2017, OpenCensus Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package trace - -import ( - "sync" - "sync/atomic" - "time" -) - -// Exporter is a type for functions that receive sampled trace spans. -// -// The ExportSpan method should be safe for concurrent use and should return -// quickly; if an Exporter takes a significant amount of time to process a -// SpanData, that work should be done on another goroutine. -// -// The SpanData should not be modified, but a pointer to it can be kept. -type Exporter interface { - ExportSpan(s *SpanData) -} - -type exportersMap map[Exporter]struct{} - -var ( - exporterMu sync.Mutex - exporters atomic.Value -) - -// RegisterExporter adds to the list of Exporters that will receive sampled -// trace spans. -// -// Binaries can register exporters, libraries shouldn't register exporters. -func RegisterExporter(e Exporter) { - exporterMu.Lock() - new := make(exportersMap) - if old, ok := exporters.Load().(exportersMap); ok { - for k, v := range old { - new[k] = v - } - } - new[e] = struct{}{} - exporters.Store(new) - exporterMu.Unlock() -} - -// UnregisterExporter removes from the list of Exporters the Exporter that was -// registered with the given name. -func UnregisterExporter(e Exporter) { - exporterMu.Lock() - new := make(exportersMap) - if old, ok := exporters.Load().(exportersMap); ok { - for k, v := range old { - new[k] = v - } - } - delete(new, e) - exporters.Store(new) - exporterMu.Unlock() -} - -// SpanData contains all the information collected by a Span. -type SpanData struct { - SpanContext - ParentSpanID SpanID - SpanKind int - Name string - StartTime time.Time - // The wall clock time of EndTime will be adjusted to always be offset - // from StartTime by the duration of the span. - EndTime time.Time - // The values of Attributes each have type string, bool, or int64. - Attributes map[string]interface{} - Annotations []Annotation - MessageEvents []MessageEvent - Status - Links []Link - HasRemoteParent bool - DroppedAttributeCount int - DroppedAnnotationCount int - DroppedMessageEventCount int - DroppedLinkCount int - - // ChildSpanCount holds the number of child span created for this span. - ChildSpanCount int -} diff --git a/vendor/go.opencensus.io/trace/internal/internal.go b/vendor/go.opencensus.io/trace/internal/internal.go deleted file mode 100644 index 7e808d8f..00000000 --- a/vendor/go.opencensus.io/trace/internal/internal.go +++ /dev/null @@ -1,22 +0,0 @@ -// Copyright 2018, OpenCensus Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package internal provides trace internals. -package internal - -// IDGenerator allows custom generators for TraceId and SpanId. -type IDGenerator interface { - NewTraceID() [16]byte - NewSpanID() [8]byte -} diff --git a/vendor/go.opencensus.io/trace/lrumap.go b/vendor/go.opencensus.io/trace/lrumap.go deleted file mode 100644 index 80095a5f..00000000 --- a/vendor/go.opencensus.io/trace/lrumap.go +++ /dev/null @@ -1,61 +0,0 @@ -// Copyright 2019, OpenCensus Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package trace - -import ( - "github.com/golang/groupcache/lru" -) - -// A simple lru.Cache wrapper that tracks the keys of the current contents and -// the cumulative number of evicted items. -type lruMap struct { - cacheKeys map[lru.Key]bool - cache *lru.Cache - droppedCount int -} - -func newLruMap(size int) *lruMap { - lm := &lruMap{ - cacheKeys: make(map[lru.Key]bool), - cache: lru.New(size), - droppedCount: 0, - } - lm.cache.OnEvicted = func(key lru.Key, value interface{}) { - delete(lm.cacheKeys, key) - lm.droppedCount++ - } - return lm -} - -func (lm lruMap) len() int { - return lm.cache.Len() -} - -func (lm lruMap) keys() []interface{} { - keys := make([]interface{}, 0, len(lm.cacheKeys)) - for k := range lm.cacheKeys { - keys = append(keys, k) - } - return keys -} - -func (lm *lruMap) add(key, value interface{}) { - lm.cacheKeys[lru.Key(key)] = true - lm.cache.Add(lru.Key(key), value) -} - -func (lm *lruMap) get(key interface{}) (interface{}, bool) { - return lm.cache.Get(key) -} diff --git a/vendor/go.opencensus.io/trace/sampling.go b/vendor/go.opencensus.io/trace/sampling.go deleted file mode 100644 index 71c10f9e..00000000 --- a/vendor/go.opencensus.io/trace/sampling.go +++ /dev/null @@ -1,75 +0,0 @@ -// Copyright 2017, OpenCensus Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package trace - -import ( - "encoding/binary" -) - -const defaultSamplingProbability = 1e-4 - -// Sampler decides whether a trace should be sampled and exported. -type Sampler func(SamplingParameters) SamplingDecision - -// SamplingParameters contains the values passed to a Sampler. -type SamplingParameters struct { - ParentContext SpanContext - TraceID TraceID - SpanID SpanID - Name string - HasRemoteParent bool -} - -// SamplingDecision is the value returned by a Sampler. -type SamplingDecision struct { - Sample bool -} - -// ProbabilitySampler returns a Sampler that samples a given fraction of traces. -// -// It also samples spans whose parents are sampled. -func ProbabilitySampler(fraction float64) Sampler { - if !(fraction >= 0) { - fraction = 0 - } else if fraction >= 1 { - return AlwaysSample() - } - - traceIDUpperBound := uint64(fraction * (1 << 63)) - return Sampler(func(p SamplingParameters) SamplingDecision { - if p.ParentContext.IsSampled() { - return SamplingDecision{Sample: true} - } - x := binary.BigEndian.Uint64(p.TraceID[0:8]) >> 1 - return SamplingDecision{Sample: x < traceIDUpperBound} - }) -} - -// AlwaysSample returns a Sampler that samples every trace. -// Be careful about using this sampler in a production application with -// significant traffic: a new trace will be started and exported for every -// request. -func AlwaysSample() Sampler { - return func(p SamplingParameters) SamplingDecision { - return SamplingDecision{Sample: true} - } -} - -// NeverSample returns a Sampler that samples no traces. -func NeverSample() Sampler { - return func(p SamplingParameters) SamplingDecision { - return SamplingDecision{Sample: false} - } -} diff --git a/vendor/go.opencensus.io/trace/spanbucket.go b/vendor/go.opencensus.io/trace/spanbucket.go deleted file mode 100644 index fbabad34..00000000 --- a/vendor/go.opencensus.io/trace/spanbucket.go +++ /dev/null @@ -1,130 +0,0 @@ -// Copyright 2017, OpenCensus Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package trace - -import ( - "time" -) - -// samplePeriod is the minimum time between accepting spans in a single bucket. -const samplePeriod = time.Second - -// defaultLatencies contains the default latency bucket bounds. -// TODO: consider defaults, make configurable -var defaultLatencies = [...]time.Duration{ - 10 * time.Microsecond, - 100 * time.Microsecond, - time.Millisecond, - 10 * time.Millisecond, - 100 * time.Millisecond, - time.Second, - 10 * time.Second, - time.Minute, -} - -// bucket is a container for a set of spans for a particular error code or latency range. -type bucket struct { - nextTime time.Time // next time we can accept a span - buffer []*SpanData // circular buffer of spans - nextIndex int // location next SpanData should be placed in buffer - overflow bool // whether the circular buffer has wrapped around -} - -func makeBucket(bufferSize int) bucket { - return bucket{ - buffer: make([]*SpanData, bufferSize), - } -} - -// add adds a span to the bucket, if nextTime has been reached. -func (b *bucket) add(s *SpanData) { - if s.EndTime.Before(b.nextTime) { - return - } - if len(b.buffer) == 0 { - return - } - b.nextTime = s.EndTime.Add(samplePeriod) - b.buffer[b.nextIndex] = s - b.nextIndex++ - if b.nextIndex == len(b.buffer) { - b.nextIndex = 0 - b.overflow = true - } -} - -// size returns the number of spans in the bucket. -func (b *bucket) size() int { - if b.overflow { - return len(b.buffer) - } - return b.nextIndex -} - -// span returns the ith span in the bucket. -func (b *bucket) span(i int) *SpanData { - if !b.overflow { - return b.buffer[i] - } - if i < len(b.buffer)-b.nextIndex { - return b.buffer[b.nextIndex+i] - } - return b.buffer[b.nextIndex+i-len(b.buffer)] -} - -// resize changes the size of the bucket to n, keeping up to n existing spans. -func (b *bucket) resize(n int) { - cur := b.size() - newBuffer := make([]*SpanData, n) - if cur < n { - for i := 0; i < cur; i++ { - newBuffer[i] = b.span(i) - } - b.buffer = newBuffer - b.nextIndex = cur - b.overflow = false - return - } - for i := 0; i < n; i++ { - newBuffer[i] = b.span(i + cur - n) - } - b.buffer = newBuffer - b.nextIndex = 0 - b.overflow = true -} - -// latencyBucket returns the appropriate bucket number for a given latency. -func latencyBucket(latency time.Duration) int { - i := 0 - for i < len(defaultLatencies) && latency >= defaultLatencies[i] { - i++ - } - return i -} - -// latencyBucketBounds returns the lower and upper bounds for a latency bucket -// number. -// -// The lower bound is inclusive, the upper bound is exclusive (except for the -// last bucket.) -func latencyBucketBounds(index int) (lower time.Duration, upper time.Duration) { - if index == 0 { - return 0, defaultLatencies[index] - } - if index == len(defaultLatencies) { - return defaultLatencies[index-1], 1<<63 - 1 - } - return defaultLatencies[index-1], defaultLatencies[index] -} diff --git a/vendor/go.opencensus.io/trace/spanstore.go b/vendor/go.opencensus.io/trace/spanstore.go deleted file mode 100644 index e601f76f..00000000 --- a/vendor/go.opencensus.io/trace/spanstore.go +++ /dev/null @@ -1,308 +0,0 @@ -// Copyright 2017, OpenCensus Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package trace - -import ( - "sync" - "time" - - "go.opencensus.io/internal" -) - -const ( - maxBucketSize = 100000 - defaultBucketSize = 10 -) - -var ( - ssmu sync.RWMutex // protects spanStores - spanStores = make(map[string]*spanStore) -) - -// This exists purely to avoid exposing internal methods used by z-Pages externally. -type internalOnly struct{} - -func init() { - //TODO(#412): remove - internal.Trace = &internalOnly{} -} - -// ReportActiveSpans returns the active spans for the given name. -func (i internalOnly) ReportActiveSpans(name string) []*SpanData { - s := spanStoreForName(name) - if s == nil { - return nil - } - var out []*SpanData - s.mu.Lock() - defer s.mu.Unlock() - for activeSpan := range s.active { - if s, ok := activeSpan.(*span); ok { - out = append(out, s.makeSpanData()) - } - } - return out -} - -// ReportSpansByError returns a sample of error spans. -// -// If code is nonzero, only spans with that status code are returned. -func (i internalOnly) ReportSpansByError(name string, code int32) []*SpanData { - s := spanStoreForName(name) - if s == nil { - return nil - } - var out []*SpanData - s.mu.Lock() - defer s.mu.Unlock() - if code != 0 { - if b, ok := s.errors[code]; ok { - for _, sd := range b.buffer { - if sd == nil { - break - } - out = append(out, sd) - } - } - } else { - for _, b := range s.errors { - for _, sd := range b.buffer { - if sd == nil { - break - } - out = append(out, sd) - } - } - } - return out -} - -// ConfigureBucketSizes sets the number of spans to keep per latency and error -// bucket for different span names. -func (i internalOnly) ConfigureBucketSizes(bcs []internal.BucketConfiguration) { - for _, bc := range bcs { - latencyBucketSize := bc.MaxRequestsSucceeded - if latencyBucketSize < 0 { - latencyBucketSize = 0 - } - if latencyBucketSize > maxBucketSize { - latencyBucketSize = maxBucketSize - } - errorBucketSize := bc.MaxRequestsErrors - if errorBucketSize < 0 { - errorBucketSize = 0 - } - if errorBucketSize > maxBucketSize { - errorBucketSize = maxBucketSize - } - spanStoreSetSize(bc.Name, latencyBucketSize, errorBucketSize) - } -} - -// ReportSpansPerMethod returns a summary of what spans are being stored for each span name. -func (i internalOnly) ReportSpansPerMethod() map[string]internal.PerMethodSummary { - out := make(map[string]internal.PerMethodSummary) - ssmu.RLock() - defer ssmu.RUnlock() - for name, s := range spanStores { - s.mu.Lock() - p := internal.PerMethodSummary{ - Active: len(s.active), - } - for code, b := range s.errors { - p.ErrorBuckets = append(p.ErrorBuckets, internal.ErrorBucketSummary{ - ErrorCode: code, - Size: b.size(), - }) - } - for i, b := range s.latency { - min, max := latencyBucketBounds(i) - p.LatencyBuckets = append(p.LatencyBuckets, internal.LatencyBucketSummary{ - MinLatency: min, - MaxLatency: max, - Size: b.size(), - }) - } - s.mu.Unlock() - out[name] = p - } - return out -} - -// ReportSpansByLatency returns a sample of successful spans. -// -// minLatency is the minimum latency of spans to be returned. -// maxLatency, if nonzero, is the maximum latency of spans to be returned. -func (i internalOnly) ReportSpansByLatency(name string, minLatency, maxLatency time.Duration) []*SpanData { - s := spanStoreForName(name) - if s == nil { - return nil - } - var out []*SpanData - s.mu.Lock() - defer s.mu.Unlock() - for i, b := range s.latency { - min, max := latencyBucketBounds(i) - if i+1 != len(s.latency) && max <= minLatency { - continue - } - if maxLatency != 0 && maxLatency < min { - continue - } - for _, sd := range b.buffer { - if sd == nil { - break - } - if minLatency != 0 || maxLatency != 0 { - d := sd.EndTime.Sub(sd.StartTime) - if d < minLatency { - continue - } - if maxLatency != 0 && d > maxLatency { - continue - } - } - out = append(out, sd) - } - } - return out -} - -// spanStore keeps track of spans stored for a particular span name. -// -// It contains all active spans; a sample of spans for failed requests, -// categorized by error code; and a sample of spans for successful requests, -// bucketed by latency. -type spanStore struct { - mu sync.Mutex // protects everything below. - active map[SpanInterface]struct{} - errors map[int32]*bucket - latency []bucket - maxSpansPerErrorBucket int -} - -// newSpanStore creates a span store. -func newSpanStore(name string, latencyBucketSize int, errorBucketSize int) *spanStore { - s := &spanStore{ - active: make(map[SpanInterface]struct{}), - latency: make([]bucket, len(defaultLatencies)+1), - maxSpansPerErrorBucket: errorBucketSize, - } - for i := range s.latency { - s.latency[i] = makeBucket(latencyBucketSize) - } - return s -} - -// spanStoreForName returns the spanStore for the given name. -// -// It returns nil if it doesn't exist. -func spanStoreForName(name string) *spanStore { - var s *spanStore - ssmu.RLock() - s, _ = spanStores[name] - ssmu.RUnlock() - return s -} - -// spanStoreForNameCreateIfNew returns the spanStore for the given name. -// -// It creates it if it didn't exist. -func spanStoreForNameCreateIfNew(name string) *spanStore { - ssmu.RLock() - s, ok := spanStores[name] - ssmu.RUnlock() - if ok { - return s - } - ssmu.Lock() - defer ssmu.Unlock() - s, ok = spanStores[name] - if ok { - return s - } - s = newSpanStore(name, defaultBucketSize, defaultBucketSize) - spanStores[name] = s - return s -} - -// spanStoreSetSize resizes the spanStore for the given name. -// -// It creates it if it didn't exist. -func spanStoreSetSize(name string, latencyBucketSize int, errorBucketSize int) { - ssmu.RLock() - s, ok := spanStores[name] - ssmu.RUnlock() - if ok { - s.resize(latencyBucketSize, errorBucketSize) - return - } - ssmu.Lock() - defer ssmu.Unlock() - s, ok = spanStores[name] - if ok { - s.resize(latencyBucketSize, errorBucketSize) - return - } - s = newSpanStore(name, latencyBucketSize, errorBucketSize) - spanStores[name] = s -} - -func (s *spanStore) resize(latencyBucketSize int, errorBucketSize int) { - s.mu.Lock() - for i := range s.latency { - s.latency[i].resize(latencyBucketSize) - } - for _, b := range s.errors { - b.resize(errorBucketSize) - } - s.maxSpansPerErrorBucket = errorBucketSize - s.mu.Unlock() -} - -// add adds a span to the active bucket of the spanStore. -func (s *spanStore) add(span SpanInterface) { - s.mu.Lock() - s.active[span] = struct{}{} - s.mu.Unlock() -} - -// finished removes a span from the active set, and adds a corresponding -// SpanData to a latency or error bucket. -func (s *spanStore) finished(span SpanInterface, sd *SpanData) { - latency := sd.EndTime.Sub(sd.StartTime) - if latency < 0 { - latency = 0 - } - code := sd.Status.Code - - s.mu.Lock() - delete(s.active, span) - if code == 0 { - s.latency[latencyBucket(latency)].add(sd) - } else { - if s.errors == nil { - s.errors = make(map[int32]*bucket) - } - if b := s.errors[code]; b != nil { - b.add(sd) - } else { - b := makeBucket(s.maxSpansPerErrorBucket) - s.errors[code] = &b - b.add(sd) - } - } - s.mu.Unlock() -} diff --git a/vendor/go.opencensus.io/trace/status_codes.go b/vendor/go.opencensus.io/trace/status_codes.go deleted file mode 100644 index ec60effd..00000000 --- a/vendor/go.opencensus.io/trace/status_codes.go +++ /dev/null @@ -1,37 +0,0 @@ -// Copyright 2018, OpenCensus Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package trace - -// Status codes for use with Span.SetStatus. These correspond to the status -// codes used by gRPC defined here: https://github.com/googleapis/googleapis/blob/master/google/rpc/code.proto -const ( - StatusCodeOK = 0 - StatusCodeCancelled = 1 - StatusCodeUnknown = 2 - StatusCodeInvalidArgument = 3 - StatusCodeDeadlineExceeded = 4 - StatusCodeNotFound = 5 - StatusCodeAlreadyExists = 6 - StatusCodePermissionDenied = 7 - StatusCodeResourceExhausted = 8 - StatusCodeFailedPrecondition = 9 - StatusCodeAborted = 10 - StatusCodeOutOfRange = 11 - StatusCodeUnimplemented = 12 - StatusCodeInternal = 13 - StatusCodeUnavailable = 14 - StatusCodeDataLoss = 15 - StatusCodeUnauthenticated = 16 -) diff --git a/vendor/go.opencensus.io/trace/trace.go b/vendor/go.opencensus.io/trace/trace.go deleted file mode 100644 index 861df9d3..00000000 --- a/vendor/go.opencensus.io/trace/trace.go +++ /dev/null @@ -1,595 +0,0 @@ -// Copyright 2017, OpenCensus Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package trace - -import ( - "context" - crand "crypto/rand" - "encoding/binary" - "fmt" - "math/rand" - "sync" - "sync/atomic" - "time" - - "go.opencensus.io/internal" - "go.opencensus.io/trace/tracestate" -) - -type tracer struct{} - -var _ Tracer = &tracer{} - -// Span represents a span of a trace. It has an associated SpanContext, and -// stores data accumulated while the span is active. -// -// Ideally users should interact with Spans by calling the functions in this -// package that take a Context parameter. -type span struct { - // data contains information recorded about the span. - // - // It will be non-nil if we are exporting the span or recording events for it. - // Otherwise, data is nil, and the Span is simply a carrier for the - // SpanContext, so that the trace ID is propagated. - data *SpanData - mu sync.Mutex // protects the contents of *data (but not the pointer value.) - spanContext SpanContext - - // lruAttributes are capped at configured limit. When the capacity is reached an oldest entry - // is removed to create room for a new entry. - lruAttributes *lruMap - - // annotations are stored in FIFO queue capped by configured limit. - annotations *evictedQueue - - // messageEvents are stored in FIFO queue capped by configured limit. - messageEvents *evictedQueue - - // links are stored in FIFO queue capped by configured limit. - links *evictedQueue - - // spanStore is the spanStore this span belongs to, if any, otherwise it is nil. - *spanStore - endOnce sync.Once - - executionTracerTaskEnd func() // ends the execution tracer span -} - -// IsRecordingEvents returns true if events are being recorded for this span. -// Use this check to avoid computing expensive annotations when they will never -// be used. -func (s *span) IsRecordingEvents() bool { - if s == nil { - return false - } - return s.data != nil -} - -// TraceOptions contains options associated with a trace span. -type TraceOptions uint32 - -// IsSampled returns true if the span will be exported. -func (sc SpanContext) IsSampled() bool { - return sc.TraceOptions.IsSampled() -} - -// setIsSampled sets the TraceOptions bit that determines whether the span will be exported. -func (sc *SpanContext) setIsSampled(sampled bool) { - if sampled { - sc.TraceOptions |= 1 - } else { - sc.TraceOptions &= ^TraceOptions(1) - } -} - -// IsSampled returns true if the span will be exported. -func (t TraceOptions) IsSampled() bool { - return t&1 == 1 -} - -// SpanContext contains the state that must propagate across process boundaries. -// -// SpanContext is not an implementation of context.Context. -// TODO: add reference to external Census docs for SpanContext. -type SpanContext struct { - TraceID TraceID - SpanID SpanID - TraceOptions TraceOptions - Tracestate *tracestate.Tracestate -} - -type contextKey struct{} - -// FromContext returns the Span stored in a context, or nil if there isn't one. -func (t *tracer) FromContext(ctx context.Context) *Span { - s, _ := ctx.Value(contextKey{}).(*Span) - return s -} - -// NewContext returns a new context with the given Span attached. -func (t *tracer) NewContext(parent context.Context, s *Span) context.Context { - return context.WithValue(parent, contextKey{}, s) -} - -// All available span kinds. Span kind must be either one of these values. -const ( - SpanKindUnspecified = iota - SpanKindServer - SpanKindClient -) - -// StartOptions contains options concerning how a span is started. -type StartOptions struct { - // Sampler to consult for this Span. If provided, it is always consulted. - // - // If not provided, then the behavior differs based on whether - // the parent of this Span is remote, local, or there is no parent. - // In the case of a remote parent or no parent, the - // default sampler (see Config) will be consulted. Otherwise, - // when there is a non-remote parent, no new sampling decision will be made: - // we will preserve the sampling of the parent. - Sampler Sampler - - // SpanKind represents the kind of a span. If none is set, - // SpanKindUnspecified is used. - SpanKind int -} - -// StartOption apply changes to StartOptions. -type StartOption func(*StartOptions) - -// WithSpanKind makes new spans to be created with the given kind. -func WithSpanKind(spanKind int) StartOption { - return func(o *StartOptions) { - o.SpanKind = spanKind - } -} - -// WithSampler makes new spans to be be created with a custom sampler. -// Otherwise, the global sampler is used. -func WithSampler(sampler Sampler) StartOption { - return func(o *StartOptions) { - o.Sampler = sampler - } -} - -// StartSpan starts a new child span of the current span in the context. If -// there is no span in the context, creates a new trace and span. -// -// Returned context contains the newly created span. You can use it to -// propagate the returned span in process. -func (t *tracer) StartSpan(ctx context.Context, name string, o ...StartOption) (context.Context, *Span) { - var opts StartOptions - var parent SpanContext - if p := t.FromContext(ctx); p != nil { - if ps, ok := p.internal.(*span); ok { - ps.addChild() - } - parent = p.SpanContext() - } - for _, op := range o { - op(&opts) - } - span := startSpanInternal(name, parent != SpanContext{}, parent, false, opts) - - ctx, end := startExecutionTracerTask(ctx, name) - span.executionTracerTaskEnd = end - extSpan := NewSpan(span) - return t.NewContext(ctx, extSpan), extSpan -} - -// StartSpanWithRemoteParent starts a new child span of the span from the given parent. -// -// If the incoming context contains a parent, it ignores. StartSpanWithRemoteParent is -// preferred for cases where the parent is propagated via an incoming request. -// -// Returned context contains the newly created span. You can use it to -// propagate the returned span in process. -func (t *tracer) StartSpanWithRemoteParent(ctx context.Context, name string, parent SpanContext, o ...StartOption) (context.Context, *Span) { - var opts StartOptions - for _, op := range o { - op(&opts) - } - span := startSpanInternal(name, parent != SpanContext{}, parent, true, opts) - ctx, end := startExecutionTracerTask(ctx, name) - span.executionTracerTaskEnd = end - extSpan := NewSpan(span) - return t.NewContext(ctx, extSpan), extSpan -} - -func startSpanInternal(name string, hasParent bool, parent SpanContext, remoteParent bool, o StartOptions) *span { - s := &span{} - s.spanContext = parent - - cfg := config.Load().(*Config) - if gen, ok := cfg.IDGenerator.(*defaultIDGenerator); ok { - // lazy initialization - gen.init() - } - - if !hasParent { - s.spanContext.TraceID = cfg.IDGenerator.NewTraceID() - } - s.spanContext.SpanID = cfg.IDGenerator.NewSpanID() - sampler := cfg.DefaultSampler - - if !hasParent || remoteParent || o.Sampler != nil { - // If this span is the child of a local span and no Sampler is set in the - // options, keep the parent's TraceOptions. - // - // Otherwise, consult the Sampler in the options if it is non-nil, otherwise - // the default sampler. - if o.Sampler != nil { - sampler = o.Sampler - } - s.spanContext.setIsSampled(sampler(SamplingParameters{ - ParentContext: parent, - TraceID: s.spanContext.TraceID, - SpanID: s.spanContext.SpanID, - Name: name, - HasRemoteParent: remoteParent}).Sample) - } - - if !internal.LocalSpanStoreEnabled && !s.spanContext.IsSampled() { - return s - } - - s.data = &SpanData{ - SpanContext: s.spanContext, - StartTime: time.Now(), - SpanKind: o.SpanKind, - Name: name, - HasRemoteParent: remoteParent, - } - s.lruAttributes = newLruMap(cfg.MaxAttributesPerSpan) - s.annotations = newEvictedQueue(cfg.MaxAnnotationEventsPerSpan) - s.messageEvents = newEvictedQueue(cfg.MaxMessageEventsPerSpan) - s.links = newEvictedQueue(cfg.MaxLinksPerSpan) - - if hasParent { - s.data.ParentSpanID = parent.SpanID - } - if internal.LocalSpanStoreEnabled { - var ss *spanStore - ss = spanStoreForNameCreateIfNew(name) - if ss != nil { - s.spanStore = ss - ss.add(s) - } - } - - return s -} - -// End ends the span. -func (s *span) End() { - if s == nil { - return - } - if s.executionTracerTaskEnd != nil { - s.executionTracerTaskEnd() - } - if !s.IsRecordingEvents() { - return - } - s.endOnce.Do(func() { - exp, _ := exporters.Load().(exportersMap) - mustExport := s.spanContext.IsSampled() && len(exp) > 0 - if s.spanStore != nil || mustExport { - sd := s.makeSpanData() - sd.EndTime = internal.MonotonicEndTime(sd.StartTime) - if s.spanStore != nil { - s.spanStore.finished(s, sd) - } - if mustExport { - for e := range exp { - e.ExportSpan(sd) - } - } - } - }) -} - -// makeSpanData produces a SpanData representing the current state of the Span. -// It requires that s.data is non-nil. -func (s *span) makeSpanData() *SpanData { - var sd SpanData - s.mu.Lock() - sd = *s.data - if s.lruAttributes.len() > 0 { - sd.Attributes = s.lruAttributesToAttributeMap() - sd.DroppedAttributeCount = s.lruAttributes.droppedCount - } - if len(s.annotations.queue) > 0 { - sd.Annotations = s.interfaceArrayToAnnotationArray() - sd.DroppedAnnotationCount = s.annotations.droppedCount - } - if len(s.messageEvents.queue) > 0 { - sd.MessageEvents = s.interfaceArrayToMessageEventArray() - sd.DroppedMessageEventCount = s.messageEvents.droppedCount - } - if len(s.links.queue) > 0 { - sd.Links = s.interfaceArrayToLinksArray() - sd.DroppedLinkCount = s.links.droppedCount - } - s.mu.Unlock() - return &sd -} - -// SpanContext returns the SpanContext of the span. -func (s *span) SpanContext() SpanContext { - if s == nil { - return SpanContext{} - } - return s.spanContext -} - -// SetName sets the name of the span, if it is recording events. -func (s *span) SetName(name string) { - if !s.IsRecordingEvents() { - return - } - s.mu.Lock() - s.data.Name = name - s.mu.Unlock() -} - -// SetStatus sets the status of the span, if it is recording events. -func (s *span) SetStatus(status Status) { - if !s.IsRecordingEvents() { - return - } - s.mu.Lock() - s.data.Status = status - s.mu.Unlock() -} - -func (s *span) interfaceArrayToLinksArray() []Link { - linksArr := make([]Link, 0, len(s.links.queue)) - for _, value := range s.links.queue { - linksArr = append(linksArr, value.(Link)) - } - return linksArr -} - -func (s *span) interfaceArrayToMessageEventArray() []MessageEvent { - messageEventArr := make([]MessageEvent, 0, len(s.messageEvents.queue)) - for _, value := range s.messageEvents.queue { - messageEventArr = append(messageEventArr, value.(MessageEvent)) - } - return messageEventArr -} - -func (s *span) interfaceArrayToAnnotationArray() []Annotation { - annotationArr := make([]Annotation, 0, len(s.annotations.queue)) - for _, value := range s.annotations.queue { - annotationArr = append(annotationArr, value.(Annotation)) - } - return annotationArr -} - -func (s *span) lruAttributesToAttributeMap() map[string]interface{} { - attributes := make(map[string]interface{}, s.lruAttributes.len()) - for _, key := range s.lruAttributes.keys() { - value, ok := s.lruAttributes.get(key) - if ok { - keyStr := key.(string) - attributes[keyStr] = value - } - } - return attributes -} - -func (s *span) copyToCappedAttributes(attributes []Attribute) { - for _, a := range attributes { - s.lruAttributes.add(a.key, a.value) - } -} - -func (s *span) addChild() { - if !s.IsRecordingEvents() { - return - } - s.mu.Lock() - s.data.ChildSpanCount++ - s.mu.Unlock() -} - -// AddAttributes sets attributes in the span. -// -// Existing attributes whose keys appear in the attributes parameter are overwritten. -func (s *span) AddAttributes(attributes ...Attribute) { - if !s.IsRecordingEvents() { - return - } - s.mu.Lock() - s.copyToCappedAttributes(attributes) - s.mu.Unlock() -} - -func (s *span) printStringInternal(attributes []Attribute, str string) { - now := time.Now() - var am map[string]interface{} - if len(attributes) != 0 { - am = make(map[string]interface{}, len(attributes)) - for _, attr := range attributes { - am[attr.key] = attr.value - } - } - s.mu.Lock() - s.annotations.add(Annotation{ - Time: now, - Message: str, - Attributes: am, - }) - s.mu.Unlock() -} - -// Annotate adds an annotation with attributes. -// Attributes can be nil. -func (s *span) Annotate(attributes []Attribute, str string) { - if !s.IsRecordingEvents() { - return - } - s.printStringInternal(attributes, str) -} - -// Annotatef adds an annotation with attributes. -func (s *span) Annotatef(attributes []Attribute, format string, a ...interface{}) { - if !s.IsRecordingEvents() { - return - } - s.printStringInternal(attributes, fmt.Sprintf(format, a...)) -} - -// AddMessageSendEvent adds a message send event to the span. -// -// messageID is an identifier for the message, which is recommended to be -// unique in this span and the same between the send event and the receive -// event (this allows to identify a message between the sender and receiver). -// For example, this could be a sequence id. -func (s *span) AddMessageSendEvent(messageID, uncompressedByteSize, compressedByteSize int64) { - if !s.IsRecordingEvents() { - return - } - now := time.Now() - s.mu.Lock() - s.messageEvents.add(MessageEvent{ - Time: now, - EventType: MessageEventTypeSent, - MessageID: messageID, - UncompressedByteSize: uncompressedByteSize, - CompressedByteSize: compressedByteSize, - }) - s.mu.Unlock() -} - -// AddMessageReceiveEvent adds a message receive event to the span. -// -// messageID is an identifier for the message, which is recommended to be -// unique in this span and the same between the send event and the receive -// event (this allows to identify a message between the sender and receiver). -// For example, this could be a sequence id. -func (s *span) AddMessageReceiveEvent(messageID, uncompressedByteSize, compressedByteSize int64) { - if !s.IsRecordingEvents() { - return - } - now := time.Now() - s.mu.Lock() - s.messageEvents.add(MessageEvent{ - Time: now, - EventType: MessageEventTypeRecv, - MessageID: messageID, - UncompressedByteSize: uncompressedByteSize, - CompressedByteSize: compressedByteSize, - }) - s.mu.Unlock() -} - -// AddLink adds a link to the span. -func (s *span) AddLink(l Link) { - if !s.IsRecordingEvents() { - return - } - s.mu.Lock() - s.links.add(l) - s.mu.Unlock() -} - -func (s *span) String() string { - if s == nil { - return "" - } - if s.data == nil { - return fmt.Sprintf("span %s", s.spanContext.SpanID) - } - s.mu.Lock() - str := fmt.Sprintf("span %s %q", s.spanContext.SpanID, s.data.Name) - s.mu.Unlock() - return str -} - -var config atomic.Value // access atomically - -func init() { - config.Store(&Config{ - DefaultSampler: ProbabilitySampler(defaultSamplingProbability), - IDGenerator: &defaultIDGenerator{}, - MaxAttributesPerSpan: DefaultMaxAttributesPerSpan, - MaxAnnotationEventsPerSpan: DefaultMaxAnnotationEventsPerSpan, - MaxMessageEventsPerSpan: DefaultMaxMessageEventsPerSpan, - MaxLinksPerSpan: DefaultMaxLinksPerSpan, - }) -} - -type defaultIDGenerator struct { - sync.Mutex - - // Please keep these as the first fields - // so that these 8 byte fields will be aligned on addresses - // divisible by 8, on both 32-bit and 64-bit machines when - // performing atomic increments and accesses. - // See: - // * https://github.com/census-instrumentation/opencensus-go/issues/587 - // * https://github.com/census-instrumentation/opencensus-go/issues/865 - // * https://golang.org/pkg/sync/atomic/#pkg-note-BUG - nextSpanID uint64 - spanIDInc uint64 - - traceIDAdd [2]uint64 - traceIDRand *rand.Rand - - initOnce sync.Once -} - -// init initializes the generator on the first call to avoid consuming entropy -// unnecessarily. -func (gen *defaultIDGenerator) init() { - gen.initOnce.Do(func() { - // initialize traceID and spanID generators. - var rngSeed int64 - for _, p := range []interface{}{ - &rngSeed, &gen.traceIDAdd, &gen.nextSpanID, &gen.spanIDInc, - } { - binary.Read(crand.Reader, binary.LittleEndian, p) - } - gen.traceIDRand = rand.New(rand.NewSource(rngSeed)) - gen.spanIDInc |= 1 - }) -} - -// NewSpanID returns a non-zero span ID from a randomly-chosen sequence. -func (gen *defaultIDGenerator) NewSpanID() [8]byte { - var id uint64 - for id == 0 { - id = atomic.AddUint64(&gen.nextSpanID, gen.spanIDInc) - } - var sid [8]byte - binary.LittleEndian.PutUint64(sid[:], id) - return sid -} - -// NewTraceID returns a non-zero trace ID from a randomly-chosen sequence. -// mu should be held while this function is called. -func (gen *defaultIDGenerator) NewTraceID() [16]byte { - var tid [16]byte - // Construct the trace ID from two outputs of traceIDRand, with a constant - // added to each half for additional entropy. - gen.Lock() - binary.LittleEndian.PutUint64(tid[0:8], gen.traceIDRand.Uint64()+gen.traceIDAdd[0]) - binary.LittleEndian.PutUint64(tid[8:16], gen.traceIDRand.Uint64()+gen.traceIDAdd[1]) - gen.Unlock() - return tid -} diff --git a/vendor/go.opencensus.io/trace/trace_api.go b/vendor/go.opencensus.io/trace/trace_api.go deleted file mode 100644 index 9e2c3a99..00000000 --- a/vendor/go.opencensus.io/trace/trace_api.go +++ /dev/null @@ -1,265 +0,0 @@ -// Copyright 2020, OpenCensus Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package trace - -import ( - "context" -) - -// DefaultTracer is the tracer used when package-level exported functions are invoked. -var DefaultTracer Tracer = &tracer{} - -// Tracer can start spans and access context functions. -type Tracer interface { - - // StartSpan starts a new child span of the current span in the context. If - // there is no span in the context, creates a new trace and span. - // - // Returned context contains the newly created span. You can use it to - // propagate the returned span in process. - StartSpan(ctx context.Context, name string, o ...StartOption) (context.Context, *Span) - - // StartSpanWithRemoteParent starts a new child span of the span from the given parent. - // - // If the incoming context contains a parent, it ignores. StartSpanWithRemoteParent is - // preferred for cases where the parent is propagated via an incoming request. - // - // Returned context contains the newly created span. You can use it to - // propagate the returned span in process. - StartSpanWithRemoteParent(ctx context.Context, name string, parent SpanContext, o ...StartOption) (context.Context, *Span) - - // FromContext returns the Span stored in a context, or nil if there isn't one. - FromContext(ctx context.Context) *Span - - // NewContext returns a new context with the given Span attached. - NewContext(parent context.Context, s *Span) context.Context -} - -// StartSpan starts a new child span of the current span in the context. If -// there is no span in the context, creates a new trace and span. -// -// Returned context contains the newly created span. You can use it to -// propagate the returned span in process. -func StartSpan(ctx context.Context, name string, o ...StartOption) (context.Context, *Span) { - return DefaultTracer.StartSpan(ctx, name, o...) -} - -// StartSpanWithRemoteParent starts a new child span of the span from the given parent. -// -// If the incoming context contains a parent, it ignores. StartSpanWithRemoteParent is -// preferred for cases where the parent is propagated via an incoming request. -// -// Returned context contains the newly created span. You can use it to -// propagate the returned span in process. -func StartSpanWithRemoteParent(ctx context.Context, name string, parent SpanContext, o ...StartOption) (context.Context, *Span) { - return DefaultTracer.StartSpanWithRemoteParent(ctx, name, parent, o...) -} - -// FromContext returns the Span stored in a context, or a Span that is not -// recording events if there isn't one. -func FromContext(ctx context.Context) *Span { - return DefaultTracer.FromContext(ctx) -} - -// NewContext returns a new context with the given Span attached. -func NewContext(parent context.Context, s *Span) context.Context { - return DefaultTracer.NewContext(parent, s) -} - -// SpanInterface represents a span of a trace. It has an associated SpanContext, and -// stores data accumulated while the span is active. -// -// Ideally users should interact with Spans by calling the functions in this -// package that take a Context parameter. -type SpanInterface interface { - - // IsRecordingEvents returns true if events are being recorded for this span. - // Use this check to avoid computing expensive annotations when they will never - // be used. - IsRecordingEvents() bool - - // End ends the span. - End() - - // SpanContext returns the SpanContext of the span. - SpanContext() SpanContext - - // SetName sets the name of the span, if it is recording events. - SetName(name string) - - // SetStatus sets the status of the span, if it is recording events. - SetStatus(status Status) - - // AddAttributes sets attributes in the span. - // - // Existing attributes whose keys appear in the attributes parameter are overwritten. - AddAttributes(attributes ...Attribute) - - // Annotate adds an annotation with attributes. - // Attributes can be nil. - Annotate(attributes []Attribute, str string) - - // Annotatef adds an annotation with attributes. - Annotatef(attributes []Attribute, format string, a ...interface{}) - - // AddMessageSendEvent adds a message send event to the span. - // - // messageID is an identifier for the message, which is recommended to be - // unique in this span and the same between the send event and the receive - // event (this allows to identify a message between the sender and receiver). - // For example, this could be a sequence id. - AddMessageSendEvent(messageID, uncompressedByteSize, compressedByteSize int64) - - // AddMessageReceiveEvent adds a message receive event to the span. - // - // messageID is an identifier for the message, which is recommended to be - // unique in this span and the same between the send event and the receive - // event (this allows to identify a message between the sender and receiver). - // For example, this could be a sequence id. - AddMessageReceiveEvent(messageID, uncompressedByteSize, compressedByteSize int64) - - // AddLink adds a link to the span. - AddLink(l Link) - - // String prints a string representation of a span. - String() string -} - -// NewSpan is a convenience function for creating a *Span out of a *span -func NewSpan(s SpanInterface) *Span { - return &Span{internal: s} -} - -// Span is a struct wrapper around the SpanInt interface, which allows correctly handling -// nil spans, while also allowing the SpanInterface implementation to be swapped out. -type Span struct { - internal SpanInterface -} - -// Internal returns the underlying implementation of the Span -func (s *Span) Internal() SpanInterface { - return s.internal -} - -// IsRecordingEvents returns true if events are being recorded for this span. -// Use this check to avoid computing expensive annotations when they will never -// be used. -func (s *Span) IsRecordingEvents() bool { - if s == nil { - return false - } - return s.internal.IsRecordingEvents() -} - -// End ends the span. -func (s *Span) End() { - if s == nil { - return - } - s.internal.End() -} - -// SpanContext returns the SpanContext of the span. -func (s *Span) SpanContext() SpanContext { - if s == nil { - return SpanContext{} - } - return s.internal.SpanContext() -} - -// SetName sets the name of the span, if it is recording events. -func (s *Span) SetName(name string) { - if !s.IsRecordingEvents() { - return - } - s.internal.SetName(name) -} - -// SetStatus sets the status of the span, if it is recording events. -func (s *Span) SetStatus(status Status) { - if !s.IsRecordingEvents() { - return - } - s.internal.SetStatus(status) -} - -// AddAttributes sets attributes in the span. -// -// Existing attributes whose keys appear in the attributes parameter are overwritten. -func (s *Span) AddAttributes(attributes ...Attribute) { - if !s.IsRecordingEvents() { - return - } - s.internal.AddAttributes(attributes...) -} - -// Annotate adds an annotation with attributes. -// Attributes can be nil. -func (s *Span) Annotate(attributes []Attribute, str string) { - if !s.IsRecordingEvents() { - return - } - s.internal.Annotate(attributes, str) -} - -// Annotatef adds an annotation with attributes. -func (s *Span) Annotatef(attributes []Attribute, format string, a ...interface{}) { - if !s.IsRecordingEvents() { - return - } - s.internal.Annotatef(attributes, format, a...) -} - -// AddMessageSendEvent adds a message send event to the span. -// -// messageID is an identifier for the message, which is recommended to be -// unique in this span and the same between the send event and the receive -// event (this allows to identify a message between the sender and receiver). -// For example, this could be a sequence id. -func (s *Span) AddMessageSendEvent(messageID, uncompressedByteSize, compressedByteSize int64) { - if !s.IsRecordingEvents() { - return - } - s.internal.AddMessageSendEvent(messageID, uncompressedByteSize, compressedByteSize) -} - -// AddMessageReceiveEvent adds a message receive event to the span. -// -// messageID is an identifier for the message, which is recommended to be -// unique in this span and the same between the send event and the receive -// event (this allows to identify a message between the sender and receiver). -// For example, this could be a sequence id. -func (s *Span) AddMessageReceiveEvent(messageID, uncompressedByteSize, compressedByteSize int64) { - if !s.IsRecordingEvents() { - return - } - s.internal.AddMessageReceiveEvent(messageID, uncompressedByteSize, compressedByteSize) -} - -// AddLink adds a link to the span. -func (s *Span) AddLink(l Link) { - if !s.IsRecordingEvents() { - return - } - s.internal.AddLink(l) -} - -// String prints a string representation of a span. -func (s *Span) String() string { - if s == nil { - return "" - } - return s.internal.String() -} diff --git a/vendor/go.opencensus.io/trace/trace_go11.go b/vendor/go.opencensus.io/trace/trace_go11.go deleted file mode 100644 index b8fc1e49..00000000 --- a/vendor/go.opencensus.io/trace/trace_go11.go +++ /dev/null @@ -1,33 +0,0 @@ -// Copyright 2018, OpenCensus Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//go:build go1.11 -// +build go1.11 - -package trace - -import ( - "context" - t "runtime/trace" -) - -func startExecutionTracerTask(ctx context.Context, name string) (context.Context, func()) { - if !t.IsEnabled() { - // Avoid additional overhead if - // runtime/trace is not enabled. - return ctx, func() {} - } - nctx, task := t.NewTask(ctx, name) - return nctx, task.End -} diff --git a/vendor/go.opencensus.io/trace/trace_nongo11.go b/vendor/go.opencensus.io/trace/trace_nongo11.go deleted file mode 100644 index da488fc8..00000000 --- a/vendor/go.opencensus.io/trace/trace_nongo11.go +++ /dev/null @@ -1,26 +0,0 @@ -// Copyright 2018, OpenCensus Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//go:build !go1.11 -// +build !go1.11 - -package trace - -import ( - "context" -) - -func startExecutionTracerTask(ctx context.Context, name string) (context.Context, func()) { - return ctx, func() {} -} diff --git a/vendor/go.opencensus.io/trace/tracestate/tracestate.go b/vendor/go.opencensus.io/trace/tracestate/tracestate.go deleted file mode 100644 index 2d6c713e..00000000 --- a/vendor/go.opencensus.io/trace/tracestate/tracestate.go +++ /dev/null @@ -1,147 +0,0 @@ -// Copyright 2018, OpenCensus Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package tracestate implements support for the Tracestate header of the -// W3C TraceContext propagation format. -package tracestate - -import ( - "fmt" - "regexp" -) - -const ( - keyMaxSize = 256 - valueMaxSize = 256 - maxKeyValuePairs = 32 -) - -const ( - keyWithoutVendorFormat = `[a-z][_0-9a-z\-\*\/]{0,255}` - keyWithVendorFormat = `[a-z][_0-9a-z\-\*\/]{0,240}@[a-z][_0-9a-z\-\*\/]{0,13}` - keyFormat = `(` + keyWithoutVendorFormat + `)|(` + keyWithVendorFormat + `)` - valueFormat = `[\x20-\x2b\x2d-\x3c\x3e-\x7e]{0,255}[\x21-\x2b\x2d-\x3c\x3e-\x7e]` -) - -var keyValidationRegExp = regexp.MustCompile(`^(` + keyFormat + `)$`) -var valueValidationRegExp = regexp.MustCompile(`^(` + valueFormat + `)$`) - -// Tracestate represents tracing-system specific context in a list of key-value pairs. Tracestate allows different -// vendors propagate additional information and inter-operate with their legacy Id formats. -type Tracestate struct { - entries []Entry -} - -// Entry represents one key-value pair in a list of key-value pair of Tracestate. -type Entry struct { - // Key is an opaque string up to 256 characters printable. It MUST begin with a lowercase letter, - // and can only contain lowercase letters a-z, digits 0-9, underscores _, dashes -, asterisks *, and - // forward slashes /. - Key string - - // Value is an opaque string up to 256 characters printable ASCII RFC0020 characters (i.e., the - // range 0x20 to 0x7E) except comma , and =. - Value string -} - -// Entries returns a slice of Entry. -func (ts *Tracestate) Entries() []Entry { - if ts == nil { - return nil - } - return ts.entries -} - -func (ts *Tracestate) remove(key string) *Entry { - for index, entry := range ts.entries { - if entry.Key == key { - ts.entries = append(ts.entries[:index], ts.entries[index+1:]...) - return &entry - } - } - return nil -} - -func (ts *Tracestate) add(entries []Entry) error { - for _, entry := range entries { - ts.remove(entry.Key) - } - if len(ts.entries)+len(entries) > maxKeyValuePairs { - return fmt.Errorf("adding %d key-value pairs to current %d pairs exceeds the limit of %d", - len(entries), len(ts.entries), maxKeyValuePairs) - } - ts.entries = append(entries, ts.entries...) - return nil -} - -func isValid(entry Entry) bool { - return keyValidationRegExp.MatchString(entry.Key) && - valueValidationRegExp.MatchString(entry.Value) -} - -func containsDuplicateKey(entries ...Entry) (string, bool) { - keyMap := make(map[string]int) - for _, entry := range entries { - if _, ok := keyMap[entry.Key]; ok { - return entry.Key, true - } - keyMap[entry.Key] = 1 - } - return "", false -} - -func areEntriesValid(entries ...Entry) (*Entry, bool) { - for _, entry := range entries { - if !isValid(entry) { - return &entry, false - } - } - return nil, true -} - -// New creates a Tracestate object from a parent and/or entries (key-value pair). -// Entries from the parent are copied if present. The entries passed to this function -// are inserted in front of those copied from the parent. If an entry copied from the -// parent contains the same key as one of the entry in entries then the entry copied -// from the parent is removed. See add func. -// -// An error is returned with nil Tracestate if -// 1. one or more entry in entries is invalid. -// 2. two or more entries in the input entries have the same key. -// 3. the number of entries combined from the parent and the input entries exceeds maxKeyValuePairs. -// (duplicate entry is counted only once). -func New(parent *Tracestate, entries ...Entry) (*Tracestate, error) { - if parent == nil && len(entries) == 0 { - return nil, nil - } - if entry, ok := areEntriesValid(entries...); !ok { - return nil, fmt.Errorf("key-value pair {%s, %s} is invalid", entry.Key, entry.Value) - } - - if key, duplicate := containsDuplicateKey(entries...); duplicate { - return nil, fmt.Errorf("contains duplicate keys (%s)", key) - } - - tracestate := Tracestate{} - - if parent != nil && len(parent.entries) > 0 { - tracestate.entries = append([]Entry{}, parent.entries...) - } - - err := tracestate.add(entries) - if err != nil { - return nil, err - } - return &tracestate, nil -} diff --git a/vendor/go.uber.org/atomic/.codecov.yml b/vendor/go.uber.org/atomic/.codecov.yml deleted file mode 100644 index 571116cc..00000000 --- a/vendor/go.uber.org/atomic/.codecov.yml +++ /dev/null @@ -1,19 +0,0 @@ -coverage: - range: 80..100 - round: down - precision: 2 - - status: - project: # measuring the overall project coverage - default: # context, you can create multiple ones with custom titles - enabled: yes # must be yes|true to enable this status - target: 100 # specify the target coverage for each commit status - # option: "auto" (must increase from parent commit or pull request base) - # option: "X%" a static target percentage to hit - if_not_found: success # if parent is not found report status as success, error, or failure - if_ci_failed: error # if ci fails report status as success, error, or failure - -# Also update COVER_IGNORE_PKGS in the Makefile. -ignore: - - /internal/gen-atomicint/ - - /internal/gen-valuewrapper/ diff --git a/vendor/go.uber.org/atomic/.gitignore b/vendor/go.uber.org/atomic/.gitignore deleted file mode 100644 index 2e337a0e..00000000 --- a/vendor/go.uber.org/atomic/.gitignore +++ /dev/null @@ -1,15 +0,0 @@ -/bin -.DS_Store -/vendor -cover.html -cover.out -lint.log - -# Binaries -*.test - -# Profiling output -*.prof - -# Output of fossa analyzer -/fossa diff --git a/vendor/go.uber.org/atomic/CHANGELOG.md b/vendor/go.uber.org/atomic/CHANGELOG.md deleted file mode 100644 index 38f564e2..00000000 --- a/vendor/go.uber.org/atomic/CHANGELOG.md +++ /dev/null @@ -1,100 +0,0 @@ -# Changelog -All notable changes to this project will be documented in this file. - -The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), -and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). - -## [1.9.0] - 2021-07-15 -### Added -- Add `Float64.Swap` to match int atomic operations. -- Add `atomic.Time` type for atomic operations on `time.Time` values. - -[1.9.0]: https://github.com/uber-go/atomic/compare/v1.8.0...v1.9.0 - -## [1.8.0] - 2021-06-09 -### Added -- Add `atomic.Uintptr` type for atomic operations on `uintptr` values. -- Add `atomic.UnsafePointer` type for atomic operations on `unsafe.Pointer` values. - -[1.8.0]: https://github.com/uber-go/atomic/compare/v1.7.0...v1.8.0 - -## [1.7.0] - 2020-09-14 -### Added -- Support JSON serialization and deserialization of primitive atomic types. -- Support Text marshalling and unmarshalling for string atomics. - -### Changed -- Disallow incorrect comparison of atomic values in a non-atomic way. - -### Removed -- Remove dependency on `golang.org/x/{lint, tools}`. - -[1.7.0]: https://github.com/uber-go/atomic/compare/v1.6.0...v1.7.0 - -## [1.6.0] - 2020-02-24 -### Changed -- Drop library dependency on `golang.org/x/{lint, tools}`. - -[1.6.0]: https://github.com/uber-go/atomic/compare/v1.5.1...v1.6.0 - -## [1.5.1] - 2019-11-19 -- Fix bug where `Bool.CAS` and `Bool.Toggle` do work correctly together - causing `CAS` to fail even though the old value matches. - -[1.5.1]: https://github.com/uber-go/atomic/compare/v1.5.0...v1.5.1 - -## [1.5.0] - 2019-10-29 -### Changed -- With Go modules, only the `go.uber.org/atomic` import path is supported now. - If you need to use the old import path, please add a `replace` directive to - your `go.mod`. - -[1.5.0]: https://github.com/uber-go/atomic/compare/v1.4.0...v1.5.0 - -## [1.4.0] - 2019-05-01 -### Added - - Add `atomic.Error` type for atomic operations on `error` values. - -[1.4.0]: https://github.com/uber-go/atomic/compare/v1.3.2...v1.4.0 - -## [1.3.2] - 2018-05-02 -### Added -- Add `atomic.Duration` type for atomic operations on `time.Duration` values. - -[1.3.2]: https://github.com/uber-go/atomic/compare/v1.3.1...v1.3.2 - -## [1.3.1] - 2017-11-14 -### Fixed -- Revert optimization for `atomic.String.Store("")` which caused data races. - -[1.3.1]: https://github.com/uber-go/atomic/compare/v1.3.0...v1.3.1 - -## [1.3.0] - 2017-11-13 -### Added -- Add `atomic.Bool.CAS` for compare-and-swap semantics on bools. - -### Changed -- Optimize `atomic.String.Store("")` by avoiding an allocation. - -[1.3.0]: https://github.com/uber-go/atomic/compare/v1.2.0...v1.3.0 - -## [1.2.0] - 2017-04-12 -### Added -- Shadow `atomic.Value` from `sync/atomic`. - -[1.2.0]: https://github.com/uber-go/atomic/compare/v1.1.0...v1.2.0 - -## [1.1.0] - 2017-03-10 -### Added -- Add atomic `Float64` type. - -### Changed -- Support new `go.uber.org/atomic` import path. - -[1.1.0]: https://github.com/uber-go/atomic/compare/v1.0.0...v1.1.0 - -## [1.0.0] - 2016-07-18 - -- Initial release. - -[1.0.0]: https://github.com/uber-go/atomic/releases/tag/v1.0.0 diff --git a/vendor/go.uber.org/atomic/LICENSE.txt b/vendor/go.uber.org/atomic/LICENSE.txt deleted file mode 100644 index 8765c9fb..00000000 --- a/vendor/go.uber.org/atomic/LICENSE.txt +++ /dev/null @@ -1,19 +0,0 @@ -Copyright (c) 2016 Uber Technologies, Inc. - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. diff --git a/vendor/go.uber.org/atomic/Makefile b/vendor/go.uber.org/atomic/Makefile deleted file mode 100644 index 46c945b3..00000000 --- a/vendor/go.uber.org/atomic/Makefile +++ /dev/null @@ -1,79 +0,0 @@ -# Directory to place `go install`ed binaries into. -export GOBIN ?= $(shell pwd)/bin - -GOLINT = $(GOBIN)/golint -GEN_ATOMICINT = $(GOBIN)/gen-atomicint -GEN_ATOMICWRAPPER = $(GOBIN)/gen-atomicwrapper -STATICCHECK = $(GOBIN)/staticcheck - -GO_FILES ?= $(shell find . '(' -path .git -o -path vendor ')' -prune -o -name '*.go' -print) - -# Also update ignore section in .codecov.yml. -COVER_IGNORE_PKGS = \ - go.uber.org/atomic/internal/gen-atomicint \ - go.uber.org/atomic/internal/gen-atomicwrapper - -.PHONY: build -build: - go build ./... - -.PHONY: test -test: - go test -race ./... - -.PHONY: gofmt -gofmt: - $(eval FMT_LOG := $(shell mktemp -t gofmt.XXXXX)) - gofmt -e -s -l $(GO_FILES) > $(FMT_LOG) || true - @[ ! -s "$(FMT_LOG)" ] || (echo "gofmt failed:" && cat $(FMT_LOG) && false) - -$(GOLINT): - cd tools && go install golang.org/x/lint/golint - -$(STATICCHECK): - cd tools && go install honnef.co/go/tools/cmd/staticcheck - -$(GEN_ATOMICWRAPPER): $(wildcard ./internal/gen-atomicwrapper/*) - go build -o $@ ./internal/gen-atomicwrapper - -$(GEN_ATOMICINT): $(wildcard ./internal/gen-atomicint/*) - go build -o $@ ./internal/gen-atomicint - -.PHONY: golint -golint: $(GOLINT) - $(GOLINT) ./... - -.PHONY: staticcheck -staticcheck: $(STATICCHECK) - $(STATICCHECK) ./... - -.PHONY: lint -lint: gofmt golint staticcheck generatenodirty - -# comma separated list of packages to consider for code coverage. -COVER_PKG = $(shell \ - go list -find ./... | \ - grep -v $(foreach pkg,$(COVER_IGNORE_PKGS),-e "^$(pkg)$$") | \ - paste -sd, -) - -.PHONY: cover -cover: - go test -coverprofile=cover.out -coverpkg $(COVER_PKG) -v ./... - go tool cover -html=cover.out -o cover.html - -.PHONY: generate -generate: $(GEN_ATOMICINT) $(GEN_ATOMICWRAPPER) - go generate ./... - -.PHONY: generatenodirty -generatenodirty: - @[ -z "$$(git status --porcelain)" ] || ( \ - echo "Working tree is dirty. Commit your changes first."; \ - git status; \ - exit 1 ) - @make generate - @status=$$(git status --porcelain); \ - [ -z "$$status" ] || ( \ - echo "Working tree is dirty after `make generate`:"; \ - echo "$$status"; \ - echo "Please ensure that the generated code is up-to-date." ) diff --git a/vendor/go.uber.org/atomic/README.md b/vendor/go.uber.org/atomic/README.md deleted file mode 100644 index 96b47a1f..00000000 --- a/vendor/go.uber.org/atomic/README.md +++ /dev/null @@ -1,63 +0,0 @@ -# atomic [![GoDoc][doc-img]][doc] [![Build Status][ci-img]][ci] [![Coverage Status][cov-img]][cov] [![Go Report Card][reportcard-img]][reportcard] - -Simple wrappers for primitive types to enforce atomic access. - -## Installation - -```shell -$ go get -u go.uber.org/atomic@v1 -``` - -### Legacy Import Path - -As of v1.5.0, the import path `go.uber.org/atomic` is the only supported way -of using this package. If you are using Go modules, this package will fail to -compile with the legacy import path path `github.com/uber-go/atomic`. - -We recommend migrating your code to the new import path but if you're unable -to do so, or if your dependencies are still using the old import path, you -will have to add a `replace` directive to your `go.mod` file downgrading the -legacy import path to an older version. - -``` -replace github.com/uber-go/atomic => github.com/uber-go/atomic v1.4.0 -``` - -You can do so automatically by running the following command. - -```shell -$ go mod edit -replace github.com/uber-go/atomic=github.com/uber-go/atomic@v1.4.0 -``` - -## Usage - -The standard library's `sync/atomic` is powerful, but it's easy to forget which -variables must be accessed atomically. `go.uber.org/atomic` preserves all the -functionality of the standard library, but wraps the primitive types to -provide a safer, more convenient API. - -```go -var atom atomic.Uint32 -atom.Store(42) -atom.Sub(2) -atom.CAS(40, 11) -``` - -See the [documentation][doc] for a complete API specification. - -## Development Status - -Stable. - ---- - -Released under the [MIT License](LICENSE.txt). - -[doc-img]: https://godoc.org/github.com/uber-go/atomic?status.svg -[doc]: https://godoc.org/go.uber.org/atomic -[ci-img]: https://github.com/uber-go/atomic/actions/workflows/go.yml/badge.svg -[ci]: https://github.com/uber-go/atomic/actions/workflows/go.yml -[cov-img]: https://codecov.io/gh/uber-go/atomic/branch/master/graph/badge.svg -[cov]: https://codecov.io/gh/uber-go/atomic -[reportcard-img]: https://goreportcard.com/badge/go.uber.org/atomic -[reportcard]: https://goreportcard.com/report/go.uber.org/atomic diff --git a/vendor/go.uber.org/atomic/bool.go b/vendor/go.uber.org/atomic/bool.go deleted file mode 100644 index 209df7bb..00000000 --- a/vendor/go.uber.org/atomic/bool.go +++ /dev/null @@ -1,81 +0,0 @@ -// @generated Code generated by gen-atomicwrapper. - -// Copyright (c) 2020-2021 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -package atomic - -import ( - "encoding/json" -) - -// Bool is an atomic type-safe wrapper for bool values. -type Bool struct { - _ nocmp // disallow non-atomic comparison - - v Uint32 -} - -var _zeroBool bool - -// NewBool creates a new Bool. -func NewBool(val bool) *Bool { - x := &Bool{} - if val != _zeroBool { - x.Store(val) - } - return x -} - -// Load atomically loads the wrapped bool. -func (x *Bool) Load() bool { - return truthy(x.v.Load()) -} - -// Store atomically stores the passed bool. -func (x *Bool) Store(val bool) { - x.v.Store(boolToInt(val)) -} - -// CAS is an atomic compare-and-swap for bool values. -func (x *Bool) CAS(old, new bool) (swapped bool) { - return x.v.CAS(boolToInt(old), boolToInt(new)) -} - -// Swap atomically stores the given bool and returns the old -// value. -func (x *Bool) Swap(val bool) (old bool) { - return truthy(x.v.Swap(boolToInt(val))) -} - -// MarshalJSON encodes the wrapped bool into JSON. -func (x *Bool) MarshalJSON() ([]byte, error) { - return json.Marshal(x.Load()) -} - -// UnmarshalJSON decodes a bool from JSON. -func (x *Bool) UnmarshalJSON(b []byte) error { - var v bool - if err := json.Unmarshal(b, &v); err != nil { - return err - } - x.Store(v) - return nil -} diff --git a/vendor/go.uber.org/atomic/bool_ext.go b/vendor/go.uber.org/atomic/bool_ext.go deleted file mode 100644 index a2e60e98..00000000 --- a/vendor/go.uber.org/atomic/bool_ext.go +++ /dev/null @@ -1,53 +0,0 @@ -// Copyright (c) 2020 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -package atomic - -import ( - "strconv" -) - -//go:generate bin/gen-atomicwrapper -name=Bool -type=bool -wrapped=Uint32 -pack=boolToInt -unpack=truthy -cas -swap -json -file=bool.go - -func truthy(n uint32) bool { - return n == 1 -} - -func boolToInt(b bool) uint32 { - if b { - return 1 - } - return 0 -} - -// Toggle atomically negates the Boolean and returns the previous value. -func (b *Bool) Toggle() (old bool) { - for { - old := b.Load() - if b.CAS(old, !old) { - return old - } - } -} - -// String encodes the wrapped value as a string. -func (b *Bool) String() string { - return strconv.FormatBool(b.Load()) -} diff --git a/vendor/go.uber.org/atomic/doc.go b/vendor/go.uber.org/atomic/doc.go deleted file mode 100644 index ae7390ee..00000000 --- a/vendor/go.uber.org/atomic/doc.go +++ /dev/null @@ -1,23 +0,0 @@ -// Copyright (c) 2020 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -// Package atomic provides simple wrappers around numerics to enforce atomic -// access. -package atomic diff --git a/vendor/go.uber.org/atomic/duration.go b/vendor/go.uber.org/atomic/duration.go deleted file mode 100644 index 207594f5..00000000 --- a/vendor/go.uber.org/atomic/duration.go +++ /dev/null @@ -1,82 +0,0 @@ -// @generated Code generated by gen-atomicwrapper. - -// Copyright (c) 2020-2021 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -package atomic - -import ( - "encoding/json" - "time" -) - -// Duration is an atomic type-safe wrapper for time.Duration values. -type Duration struct { - _ nocmp // disallow non-atomic comparison - - v Int64 -} - -var _zeroDuration time.Duration - -// NewDuration creates a new Duration. -func NewDuration(val time.Duration) *Duration { - x := &Duration{} - if val != _zeroDuration { - x.Store(val) - } - return x -} - -// Load atomically loads the wrapped time.Duration. -func (x *Duration) Load() time.Duration { - return time.Duration(x.v.Load()) -} - -// Store atomically stores the passed time.Duration. -func (x *Duration) Store(val time.Duration) { - x.v.Store(int64(val)) -} - -// CAS is an atomic compare-and-swap for time.Duration values. -func (x *Duration) CAS(old, new time.Duration) (swapped bool) { - return x.v.CAS(int64(old), int64(new)) -} - -// Swap atomically stores the given time.Duration and returns the old -// value. -func (x *Duration) Swap(val time.Duration) (old time.Duration) { - return time.Duration(x.v.Swap(int64(val))) -} - -// MarshalJSON encodes the wrapped time.Duration into JSON. -func (x *Duration) MarshalJSON() ([]byte, error) { - return json.Marshal(x.Load()) -} - -// UnmarshalJSON decodes a time.Duration from JSON. -func (x *Duration) UnmarshalJSON(b []byte) error { - var v time.Duration - if err := json.Unmarshal(b, &v); err != nil { - return err - } - x.Store(v) - return nil -} diff --git a/vendor/go.uber.org/atomic/duration_ext.go b/vendor/go.uber.org/atomic/duration_ext.go deleted file mode 100644 index 4c18b0a9..00000000 --- a/vendor/go.uber.org/atomic/duration_ext.go +++ /dev/null @@ -1,40 +0,0 @@ -// Copyright (c) 2020 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -package atomic - -import "time" - -//go:generate bin/gen-atomicwrapper -name=Duration -type=time.Duration -wrapped=Int64 -pack=int64 -unpack=time.Duration -cas -swap -json -imports time -file=duration.go - -// Add atomically adds to the wrapped time.Duration and returns the new value. -func (d *Duration) Add(delta time.Duration) time.Duration { - return time.Duration(d.v.Add(int64(delta))) -} - -// Sub atomically subtracts from the wrapped time.Duration and returns the new value. -func (d *Duration) Sub(delta time.Duration) time.Duration { - return time.Duration(d.v.Sub(int64(delta))) -} - -// String encodes the wrapped value as a string. -func (d *Duration) String() string { - return d.Load().String() -} diff --git a/vendor/go.uber.org/atomic/error.go b/vendor/go.uber.org/atomic/error.go deleted file mode 100644 index 3be19c35..00000000 --- a/vendor/go.uber.org/atomic/error.go +++ /dev/null @@ -1,51 +0,0 @@ -// @generated Code generated by gen-atomicwrapper. - -// Copyright (c) 2020-2021 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -package atomic - -// Error is an atomic type-safe wrapper for error values. -type Error struct { - _ nocmp // disallow non-atomic comparison - - v Value -} - -var _zeroError error - -// NewError creates a new Error. -func NewError(val error) *Error { - x := &Error{} - if val != _zeroError { - x.Store(val) - } - return x -} - -// Load atomically loads the wrapped error. -func (x *Error) Load() error { - return unpackError(x.v.Load()) -} - -// Store atomically stores the passed error. -func (x *Error) Store(val error) { - x.v.Store(packError(val)) -} diff --git a/vendor/go.uber.org/atomic/error_ext.go b/vendor/go.uber.org/atomic/error_ext.go deleted file mode 100644 index ffe0be21..00000000 --- a/vendor/go.uber.org/atomic/error_ext.go +++ /dev/null @@ -1,39 +0,0 @@ -// Copyright (c) 2020 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -package atomic - -// atomic.Value panics on nil inputs, or if the underlying type changes. -// Stabilize by always storing a custom struct that we control. - -//go:generate bin/gen-atomicwrapper -name=Error -type=error -wrapped=Value -pack=packError -unpack=unpackError -file=error.go - -type packedError struct{ Value error } - -func packError(v error) interface{} { - return packedError{v} -} - -func unpackError(v interface{}) error { - if err, ok := v.(packedError); ok { - return err.Value - } - return nil -} diff --git a/vendor/go.uber.org/atomic/float64.go b/vendor/go.uber.org/atomic/float64.go deleted file mode 100644 index 8a136718..00000000 --- a/vendor/go.uber.org/atomic/float64.go +++ /dev/null @@ -1,77 +0,0 @@ -// @generated Code generated by gen-atomicwrapper. - -// Copyright (c) 2020-2021 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -package atomic - -import ( - "encoding/json" - "math" -) - -// Float64 is an atomic type-safe wrapper for float64 values. -type Float64 struct { - _ nocmp // disallow non-atomic comparison - - v Uint64 -} - -var _zeroFloat64 float64 - -// NewFloat64 creates a new Float64. -func NewFloat64(val float64) *Float64 { - x := &Float64{} - if val != _zeroFloat64 { - x.Store(val) - } - return x -} - -// Load atomically loads the wrapped float64. -func (x *Float64) Load() float64 { - return math.Float64frombits(x.v.Load()) -} - -// Store atomically stores the passed float64. -func (x *Float64) Store(val float64) { - x.v.Store(math.Float64bits(val)) -} - -// Swap atomically stores the given float64 and returns the old -// value. -func (x *Float64) Swap(val float64) (old float64) { - return math.Float64frombits(x.v.Swap(math.Float64bits(val))) -} - -// MarshalJSON encodes the wrapped float64 into JSON. -func (x *Float64) MarshalJSON() ([]byte, error) { - return json.Marshal(x.Load()) -} - -// UnmarshalJSON decodes a float64 from JSON. -func (x *Float64) UnmarshalJSON(b []byte) error { - var v float64 - if err := json.Unmarshal(b, &v); err != nil { - return err - } - x.Store(v) - return nil -} diff --git a/vendor/go.uber.org/atomic/float64_ext.go b/vendor/go.uber.org/atomic/float64_ext.go deleted file mode 100644 index df36b010..00000000 --- a/vendor/go.uber.org/atomic/float64_ext.go +++ /dev/null @@ -1,69 +0,0 @@ -// Copyright (c) 2020 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -package atomic - -import ( - "math" - "strconv" -) - -//go:generate bin/gen-atomicwrapper -name=Float64 -type=float64 -wrapped=Uint64 -pack=math.Float64bits -unpack=math.Float64frombits -swap -json -imports math -file=float64.go - -// Add atomically adds to the wrapped float64 and returns the new value. -func (f *Float64) Add(delta float64) float64 { - for { - old := f.Load() - new := old + delta - if f.CAS(old, new) { - return new - } - } -} - -// Sub atomically subtracts from the wrapped float64 and returns the new value. -func (f *Float64) Sub(delta float64) float64 { - return f.Add(-delta) -} - -// CAS is an atomic compare-and-swap for float64 values. -// -// Note: CAS handles NaN incorrectly. NaN != NaN using Go's inbuilt operators -// but CAS allows a stored NaN to compare equal to a passed in NaN. -// This avoids typical CAS loops from blocking forever, e.g., -// -// for { -// old := atom.Load() -// new = f(old) -// if atom.CAS(old, new) { -// break -// } -// } -// -// If CAS did not match NaN to match, then the above would loop forever. -func (f *Float64) CAS(old, new float64) (swapped bool) { - return f.v.CAS(math.Float64bits(old), math.Float64bits(new)) -} - -// String encodes the wrapped value as a string. -func (f *Float64) String() string { - // 'g' is the behavior for floats with %v. - return strconv.FormatFloat(f.Load(), 'g', -1, 64) -} diff --git a/vendor/go.uber.org/atomic/gen.go b/vendor/go.uber.org/atomic/gen.go deleted file mode 100644 index 1e9ef4f8..00000000 --- a/vendor/go.uber.org/atomic/gen.go +++ /dev/null @@ -1,27 +0,0 @@ -// Copyright (c) 2020 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -package atomic - -//go:generate bin/gen-atomicint -name=Int32 -wrapped=int32 -file=int32.go -//go:generate bin/gen-atomicint -name=Int64 -wrapped=int64 -file=int64.go -//go:generate bin/gen-atomicint -name=Uint32 -wrapped=uint32 -unsigned -file=uint32.go -//go:generate bin/gen-atomicint -name=Uint64 -wrapped=uint64 -unsigned -file=uint64.go -//go:generate bin/gen-atomicint -name=Uintptr -wrapped=uintptr -unsigned -file=uintptr.go diff --git a/vendor/go.uber.org/atomic/int32.go b/vendor/go.uber.org/atomic/int32.go deleted file mode 100644 index 640ea36a..00000000 --- a/vendor/go.uber.org/atomic/int32.go +++ /dev/null @@ -1,102 +0,0 @@ -// @generated Code generated by gen-atomicint. - -// Copyright (c) 2020-2021 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -package atomic - -import ( - "encoding/json" - "strconv" - "sync/atomic" -) - -// Int32 is an atomic wrapper around int32. -type Int32 struct { - _ nocmp // disallow non-atomic comparison - - v int32 -} - -// NewInt32 creates a new Int32. -func NewInt32(val int32) *Int32 { - return &Int32{v: val} -} - -// Load atomically loads the wrapped value. -func (i *Int32) Load() int32 { - return atomic.LoadInt32(&i.v) -} - -// Add atomically adds to the wrapped int32 and returns the new value. -func (i *Int32) Add(delta int32) int32 { - return atomic.AddInt32(&i.v, delta) -} - -// Sub atomically subtracts from the wrapped int32 and returns the new value. -func (i *Int32) Sub(delta int32) int32 { - return atomic.AddInt32(&i.v, -delta) -} - -// Inc atomically increments the wrapped int32 and returns the new value. -func (i *Int32) Inc() int32 { - return i.Add(1) -} - -// Dec atomically decrements the wrapped int32 and returns the new value. -func (i *Int32) Dec() int32 { - return i.Sub(1) -} - -// CAS is an atomic compare-and-swap. -func (i *Int32) CAS(old, new int32) (swapped bool) { - return atomic.CompareAndSwapInt32(&i.v, old, new) -} - -// Store atomically stores the passed value. -func (i *Int32) Store(val int32) { - atomic.StoreInt32(&i.v, val) -} - -// Swap atomically swaps the wrapped int32 and returns the old value. -func (i *Int32) Swap(val int32) (old int32) { - return atomic.SwapInt32(&i.v, val) -} - -// MarshalJSON encodes the wrapped int32 into JSON. -func (i *Int32) MarshalJSON() ([]byte, error) { - return json.Marshal(i.Load()) -} - -// UnmarshalJSON decodes JSON into the wrapped int32. -func (i *Int32) UnmarshalJSON(b []byte) error { - var v int32 - if err := json.Unmarshal(b, &v); err != nil { - return err - } - i.Store(v) - return nil -} - -// String encodes the wrapped value as a string. -func (i *Int32) String() string { - v := i.Load() - return strconv.FormatInt(int64(v), 10) -} diff --git a/vendor/go.uber.org/atomic/int64.go b/vendor/go.uber.org/atomic/int64.go deleted file mode 100644 index 9ab66b98..00000000 --- a/vendor/go.uber.org/atomic/int64.go +++ /dev/null @@ -1,102 +0,0 @@ -// @generated Code generated by gen-atomicint. - -// Copyright (c) 2020-2021 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -package atomic - -import ( - "encoding/json" - "strconv" - "sync/atomic" -) - -// Int64 is an atomic wrapper around int64. -type Int64 struct { - _ nocmp // disallow non-atomic comparison - - v int64 -} - -// NewInt64 creates a new Int64. -func NewInt64(val int64) *Int64 { - return &Int64{v: val} -} - -// Load atomically loads the wrapped value. -func (i *Int64) Load() int64 { - return atomic.LoadInt64(&i.v) -} - -// Add atomically adds to the wrapped int64 and returns the new value. -func (i *Int64) Add(delta int64) int64 { - return atomic.AddInt64(&i.v, delta) -} - -// Sub atomically subtracts from the wrapped int64 and returns the new value. -func (i *Int64) Sub(delta int64) int64 { - return atomic.AddInt64(&i.v, -delta) -} - -// Inc atomically increments the wrapped int64 and returns the new value. -func (i *Int64) Inc() int64 { - return i.Add(1) -} - -// Dec atomically decrements the wrapped int64 and returns the new value. -func (i *Int64) Dec() int64 { - return i.Sub(1) -} - -// CAS is an atomic compare-and-swap. -func (i *Int64) CAS(old, new int64) (swapped bool) { - return atomic.CompareAndSwapInt64(&i.v, old, new) -} - -// Store atomically stores the passed value. -func (i *Int64) Store(val int64) { - atomic.StoreInt64(&i.v, val) -} - -// Swap atomically swaps the wrapped int64 and returns the old value. -func (i *Int64) Swap(val int64) (old int64) { - return atomic.SwapInt64(&i.v, val) -} - -// MarshalJSON encodes the wrapped int64 into JSON. -func (i *Int64) MarshalJSON() ([]byte, error) { - return json.Marshal(i.Load()) -} - -// UnmarshalJSON decodes JSON into the wrapped int64. -func (i *Int64) UnmarshalJSON(b []byte) error { - var v int64 - if err := json.Unmarshal(b, &v); err != nil { - return err - } - i.Store(v) - return nil -} - -// String encodes the wrapped value as a string. -func (i *Int64) String() string { - v := i.Load() - return strconv.FormatInt(int64(v), 10) -} diff --git a/vendor/go.uber.org/atomic/nocmp.go b/vendor/go.uber.org/atomic/nocmp.go deleted file mode 100644 index a8201cb4..00000000 --- a/vendor/go.uber.org/atomic/nocmp.go +++ /dev/null @@ -1,35 +0,0 @@ -// Copyright (c) 2020 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -package atomic - -// nocmp is an uncomparable struct. Embed this inside another struct to make -// it uncomparable. -// -// type Foo struct { -// nocmp -// // ... -// } -// -// This DOES NOT: -// -// - Disallow shallow copies of structs -// - Disallow comparison of pointers to uncomparable structs -type nocmp [0]func() diff --git a/vendor/go.uber.org/atomic/string.go b/vendor/go.uber.org/atomic/string.go deleted file mode 100644 index 80df93d0..00000000 --- a/vendor/go.uber.org/atomic/string.go +++ /dev/null @@ -1,54 +0,0 @@ -// @generated Code generated by gen-atomicwrapper. - -// Copyright (c) 2020-2021 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -package atomic - -// String is an atomic type-safe wrapper for string values. -type String struct { - _ nocmp // disallow non-atomic comparison - - v Value -} - -var _zeroString string - -// NewString creates a new String. -func NewString(val string) *String { - x := &String{} - if val != _zeroString { - x.Store(val) - } - return x -} - -// Load atomically loads the wrapped string. -func (x *String) Load() string { - if v := x.v.Load(); v != nil { - return v.(string) - } - return _zeroString -} - -// Store atomically stores the passed string. -func (x *String) Store(val string) { - x.v.Store(val) -} diff --git a/vendor/go.uber.org/atomic/string_ext.go b/vendor/go.uber.org/atomic/string_ext.go deleted file mode 100644 index 83d92eda..00000000 --- a/vendor/go.uber.org/atomic/string_ext.go +++ /dev/null @@ -1,45 +0,0 @@ -// Copyright (c) 2020 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -package atomic - -//go:generate bin/gen-atomicwrapper -name=String -type=string -wrapped=Value -file=string.go -// Note: No Swap as String wraps Value, which wraps the stdlib sync/atomic.Value which -// only supports Swap as of go1.17: https://github.com/golang/go/issues/39351 - -// String returns the wrapped value. -func (s *String) String() string { - return s.Load() -} - -// MarshalText encodes the wrapped string into a textual form. -// -// This makes it encodable as JSON, YAML, XML, and more. -func (s *String) MarshalText() ([]byte, error) { - return []byte(s.Load()), nil -} - -// UnmarshalText decodes text and replaces the wrapped string with it. -// -// This makes it decodable from JSON, YAML, XML, and more. -func (s *String) UnmarshalText(b []byte) error { - s.Store(string(b)) - return nil -} diff --git a/vendor/go.uber.org/atomic/time.go b/vendor/go.uber.org/atomic/time.go deleted file mode 100644 index 33460fc3..00000000 --- a/vendor/go.uber.org/atomic/time.go +++ /dev/null @@ -1,55 +0,0 @@ -// @generated Code generated by gen-atomicwrapper. - -// Copyright (c) 2020-2021 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -package atomic - -import ( - "time" -) - -// Time is an atomic type-safe wrapper for time.Time values. -type Time struct { - _ nocmp // disallow non-atomic comparison - - v Value -} - -var _zeroTime time.Time - -// NewTime creates a new Time. -func NewTime(val time.Time) *Time { - x := &Time{} - if val != _zeroTime { - x.Store(val) - } - return x -} - -// Load atomically loads the wrapped time.Time. -func (x *Time) Load() time.Time { - return unpackTime(x.v.Load()) -} - -// Store atomically stores the passed time.Time. -func (x *Time) Store(val time.Time) { - x.v.Store(packTime(val)) -} diff --git a/vendor/go.uber.org/atomic/time_ext.go b/vendor/go.uber.org/atomic/time_ext.go deleted file mode 100644 index 1e3dc978..00000000 --- a/vendor/go.uber.org/atomic/time_ext.go +++ /dev/null @@ -1,36 +0,0 @@ -// Copyright (c) 2021 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -package atomic - -import "time" - -//go:generate bin/gen-atomicwrapper -name=Time -type=time.Time -wrapped=Value -pack=packTime -unpack=unpackTime -imports time -file=time.go - -func packTime(t time.Time) interface{} { - return t -} - -func unpackTime(v interface{}) time.Time { - if t, ok := v.(time.Time); ok { - return t - } - return time.Time{} -} diff --git a/vendor/go.uber.org/atomic/uint32.go b/vendor/go.uber.org/atomic/uint32.go deleted file mode 100644 index 7859a9cc..00000000 --- a/vendor/go.uber.org/atomic/uint32.go +++ /dev/null @@ -1,102 +0,0 @@ -// @generated Code generated by gen-atomicint. - -// Copyright (c) 2020-2021 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -package atomic - -import ( - "encoding/json" - "strconv" - "sync/atomic" -) - -// Uint32 is an atomic wrapper around uint32. -type Uint32 struct { - _ nocmp // disallow non-atomic comparison - - v uint32 -} - -// NewUint32 creates a new Uint32. -func NewUint32(val uint32) *Uint32 { - return &Uint32{v: val} -} - -// Load atomically loads the wrapped value. -func (i *Uint32) Load() uint32 { - return atomic.LoadUint32(&i.v) -} - -// Add atomically adds to the wrapped uint32 and returns the new value. -func (i *Uint32) Add(delta uint32) uint32 { - return atomic.AddUint32(&i.v, delta) -} - -// Sub atomically subtracts from the wrapped uint32 and returns the new value. -func (i *Uint32) Sub(delta uint32) uint32 { - return atomic.AddUint32(&i.v, ^(delta - 1)) -} - -// Inc atomically increments the wrapped uint32 and returns the new value. -func (i *Uint32) Inc() uint32 { - return i.Add(1) -} - -// Dec atomically decrements the wrapped uint32 and returns the new value. -func (i *Uint32) Dec() uint32 { - return i.Sub(1) -} - -// CAS is an atomic compare-and-swap. -func (i *Uint32) CAS(old, new uint32) (swapped bool) { - return atomic.CompareAndSwapUint32(&i.v, old, new) -} - -// Store atomically stores the passed value. -func (i *Uint32) Store(val uint32) { - atomic.StoreUint32(&i.v, val) -} - -// Swap atomically swaps the wrapped uint32 and returns the old value. -func (i *Uint32) Swap(val uint32) (old uint32) { - return atomic.SwapUint32(&i.v, val) -} - -// MarshalJSON encodes the wrapped uint32 into JSON. -func (i *Uint32) MarshalJSON() ([]byte, error) { - return json.Marshal(i.Load()) -} - -// UnmarshalJSON decodes JSON into the wrapped uint32. -func (i *Uint32) UnmarshalJSON(b []byte) error { - var v uint32 - if err := json.Unmarshal(b, &v); err != nil { - return err - } - i.Store(v) - return nil -} - -// String encodes the wrapped value as a string. -func (i *Uint32) String() string { - v := i.Load() - return strconv.FormatUint(uint64(v), 10) -} diff --git a/vendor/go.uber.org/atomic/uint64.go b/vendor/go.uber.org/atomic/uint64.go deleted file mode 100644 index 2f2a7db6..00000000 --- a/vendor/go.uber.org/atomic/uint64.go +++ /dev/null @@ -1,102 +0,0 @@ -// @generated Code generated by gen-atomicint. - -// Copyright (c) 2020-2021 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -package atomic - -import ( - "encoding/json" - "strconv" - "sync/atomic" -) - -// Uint64 is an atomic wrapper around uint64. -type Uint64 struct { - _ nocmp // disallow non-atomic comparison - - v uint64 -} - -// NewUint64 creates a new Uint64. -func NewUint64(val uint64) *Uint64 { - return &Uint64{v: val} -} - -// Load atomically loads the wrapped value. -func (i *Uint64) Load() uint64 { - return atomic.LoadUint64(&i.v) -} - -// Add atomically adds to the wrapped uint64 and returns the new value. -func (i *Uint64) Add(delta uint64) uint64 { - return atomic.AddUint64(&i.v, delta) -} - -// Sub atomically subtracts from the wrapped uint64 and returns the new value. -func (i *Uint64) Sub(delta uint64) uint64 { - return atomic.AddUint64(&i.v, ^(delta - 1)) -} - -// Inc atomically increments the wrapped uint64 and returns the new value. -func (i *Uint64) Inc() uint64 { - return i.Add(1) -} - -// Dec atomically decrements the wrapped uint64 and returns the new value. -func (i *Uint64) Dec() uint64 { - return i.Sub(1) -} - -// CAS is an atomic compare-and-swap. -func (i *Uint64) CAS(old, new uint64) (swapped bool) { - return atomic.CompareAndSwapUint64(&i.v, old, new) -} - -// Store atomically stores the passed value. -func (i *Uint64) Store(val uint64) { - atomic.StoreUint64(&i.v, val) -} - -// Swap atomically swaps the wrapped uint64 and returns the old value. -func (i *Uint64) Swap(val uint64) (old uint64) { - return atomic.SwapUint64(&i.v, val) -} - -// MarshalJSON encodes the wrapped uint64 into JSON. -func (i *Uint64) MarshalJSON() ([]byte, error) { - return json.Marshal(i.Load()) -} - -// UnmarshalJSON decodes JSON into the wrapped uint64. -func (i *Uint64) UnmarshalJSON(b []byte) error { - var v uint64 - if err := json.Unmarshal(b, &v); err != nil { - return err - } - i.Store(v) - return nil -} - -// String encodes the wrapped value as a string. -func (i *Uint64) String() string { - v := i.Load() - return strconv.FormatUint(uint64(v), 10) -} diff --git a/vendor/go.uber.org/atomic/uintptr.go b/vendor/go.uber.org/atomic/uintptr.go deleted file mode 100644 index ecf7a772..00000000 --- a/vendor/go.uber.org/atomic/uintptr.go +++ /dev/null @@ -1,102 +0,0 @@ -// @generated Code generated by gen-atomicint. - -// Copyright (c) 2020-2021 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -package atomic - -import ( - "encoding/json" - "strconv" - "sync/atomic" -) - -// Uintptr is an atomic wrapper around uintptr. -type Uintptr struct { - _ nocmp // disallow non-atomic comparison - - v uintptr -} - -// NewUintptr creates a new Uintptr. -func NewUintptr(val uintptr) *Uintptr { - return &Uintptr{v: val} -} - -// Load atomically loads the wrapped value. -func (i *Uintptr) Load() uintptr { - return atomic.LoadUintptr(&i.v) -} - -// Add atomically adds to the wrapped uintptr and returns the new value. -func (i *Uintptr) Add(delta uintptr) uintptr { - return atomic.AddUintptr(&i.v, delta) -} - -// Sub atomically subtracts from the wrapped uintptr and returns the new value. -func (i *Uintptr) Sub(delta uintptr) uintptr { - return atomic.AddUintptr(&i.v, ^(delta - 1)) -} - -// Inc atomically increments the wrapped uintptr and returns the new value. -func (i *Uintptr) Inc() uintptr { - return i.Add(1) -} - -// Dec atomically decrements the wrapped uintptr and returns the new value. -func (i *Uintptr) Dec() uintptr { - return i.Sub(1) -} - -// CAS is an atomic compare-and-swap. -func (i *Uintptr) CAS(old, new uintptr) (swapped bool) { - return atomic.CompareAndSwapUintptr(&i.v, old, new) -} - -// Store atomically stores the passed value. -func (i *Uintptr) Store(val uintptr) { - atomic.StoreUintptr(&i.v, val) -} - -// Swap atomically swaps the wrapped uintptr and returns the old value. -func (i *Uintptr) Swap(val uintptr) (old uintptr) { - return atomic.SwapUintptr(&i.v, val) -} - -// MarshalJSON encodes the wrapped uintptr into JSON. -func (i *Uintptr) MarshalJSON() ([]byte, error) { - return json.Marshal(i.Load()) -} - -// UnmarshalJSON decodes JSON into the wrapped uintptr. -func (i *Uintptr) UnmarshalJSON(b []byte) error { - var v uintptr - if err := json.Unmarshal(b, &v); err != nil { - return err - } - i.Store(v) - return nil -} - -// String encodes the wrapped value as a string. -func (i *Uintptr) String() string { - v := i.Load() - return strconv.FormatUint(uint64(v), 10) -} diff --git a/vendor/go.uber.org/atomic/unsafe_pointer.go b/vendor/go.uber.org/atomic/unsafe_pointer.go deleted file mode 100644 index 169f793d..00000000 --- a/vendor/go.uber.org/atomic/unsafe_pointer.go +++ /dev/null @@ -1,58 +0,0 @@ -// Copyright (c) 2021 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -package atomic - -import ( - "sync/atomic" - "unsafe" -) - -// UnsafePointer is an atomic wrapper around unsafe.Pointer. -type UnsafePointer struct { - _ nocmp // disallow non-atomic comparison - - v unsafe.Pointer -} - -// NewUnsafePointer creates a new UnsafePointer. -func NewUnsafePointer(val unsafe.Pointer) *UnsafePointer { - return &UnsafePointer{v: val} -} - -// Load atomically loads the wrapped value. -func (p *UnsafePointer) Load() unsafe.Pointer { - return atomic.LoadPointer(&p.v) -} - -// Store atomically stores the passed value. -func (p *UnsafePointer) Store(val unsafe.Pointer) { - atomic.StorePointer(&p.v, val) -} - -// Swap atomically swaps the wrapped unsafe.Pointer and returns the old value. -func (p *UnsafePointer) Swap(val unsafe.Pointer) (old unsafe.Pointer) { - return atomic.SwapPointer(&p.v, val) -} - -// CAS is an atomic compare-and-swap. -func (p *UnsafePointer) CAS(old, new unsafe.Pointer) (swapped bool) { - return atomic.CompareAndSwapPointer(&p.v, old, new) -} diff --git a/vendor/go.uber.org/atomic/value.go b/vendor/go.uber.org/atomic/value.go deleted file mode 100644 index 671f3a38..00000000 --- a/vendor/go.uber.org/atomic/value.go +++ /dev/null @@ -1,31 +0,0 @@ -// Copyright (c) 2020 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -package atomic - -import "sync/atomic" - -// Value shadows the type of the same name from sync/atomic -// https://godoc.org/sync/atomic#Value -type Value struct { - atomic.Value - - _ nocmp // disallow non-atomic comparison -} diff --git a/vendor/golang.org/x/crypto/cryptobyte/asn1.go b/vendor/golang.org/x/crypto/cryptobyte/asn1.go deleted file mode 100644 index 2492f796..00000000 --- a/vendor/golang.org/x/crypto/cryptobyte/asn1.go +++ /dev/null @@ -1,825 +0,0 @@ -// Copyright 2017 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package cryptobyte - -import ( - encoding_asn1 "encoding/asn1" - "fmt" - "math/big" - "reflect" - "time" - - "golang.org/x/crypto/cryptobyte/asn1" -) - -// This file contains ASN.1-related methods for String and Builder. - -// Builder - -// AddASN1Int64 appends a DER-encoded ASN.1 INTEGER. -func (b *Builder) AddASN1Int64(v int64) { - b.addASN1Signed(asn1.INTEGER, v) -} - -// AddASN1Int64WithTag appends a DER-encoded ASN.1 INTEGER with the -// given tag. -func (b *Builder) AddASN1Int64WithTag(v int64, tag asn1.Tag) { - b.addASN1Signed(tag, v) -} - -// AddASN1Enum appends a DER-encoded ASN.1 ENUMERATION. -func (b *Builder) AddASN1Enum(v int64) { - b.addASN1Signed(asn1.ENUM, v) -} - -func (b *Builder) addASN1Signed(tag asn1.Tag, v int64) { - b.AddASN1(tag, func(c *Builder) { - length := 1 - for i := v; i >= 0x80 || i < -0x80; i >>= 8 { - length++ - } - - for ; length > 0; length-- { - i := v >> uint((length-1)*8) & 0xff - c.AddUint8(uint8(i)) - } - }) -} - -// AddASN1Uint64 appends a DER-encoded ASN.1 INTEGER. -func (b *Builder) AddASN1Uint64(v uint64) { - b.AddASN1(asn1.INTEGER, func(c *Builder) { - length := 1 - for i := v; i >= 0x80; i >>= 8 { - length++ - } - - for ; length > 0; length-- { - i := v >> uint((length-1)*8) & 0xff - c.AddUint8(uint8(i)) - } - }) -} - -// AddASN1BigInt appends a DER-encoded ASN.1 INTEGER. -func (b *Builder) AddASN1BigInt(n *big.Int) { - if b.err != nil { - return - } - - b.AddASN1(asn1.INTEGER, func(c *Builder) { - if n.Sign() < 0 { - // A negative number has to be converted to two's-complement form. So we - // invert and subtract 1. If the most-significant-bit isn't set then - // we'll need to pad the beginning with 0xff in order to keep the number - // negative. - nMinus1 := new(big.Int).Neg(n) - nMinus1.Sub(nMinus1, bigOne) - bytes := nMinus1.Bytes() - for i := range bytes { - bytes[i] ^= 0xff - } - if len(bytes) == 0 || bytes[0]&0x80 == 0 { - c.add(0xff) - } - c.add(bytes...) - } else if n.Sign() == 0 { - c.add(0) - } else { - bytes := n.Bytes() - if bytes[0]&0x80 != 0 { - c.add(0) - } - c.add(bytes...) - } - }) -} - -// AddASN1OctetString appends a DER-encoded ASN.1 OCTET STRING. -func (b *Builder) AddASN1OctetString(bytes []byte) { - b.AddASN1(asn1.OCTET_STRING, func(c *Builder) { - c.AddBytes(bytes) - }) -} - -const generalizedTimeFormatStr = "20060102150405Z0700" - -// AddASN1GeneralizedTime appends a DER-encoded ASN.1 GENERALIZEDTIME. -func (b *Builder) AddASN1GeneralizedTime(t time.Time) { - if t.Year() < 0 || t.Year() > 9999 { - b.err = fmt.Errorf("cryptobyte: cannot represent %v as a GeneralizedTime", t) - return - } - b.AddASN1(asn1.GeneralizedTime, func(c *Builder) { - c.AddBytes([]byte(t.Format(generalizedTimeFormatStr))) - }) -} - -// AddASN1UTCTime appends a DER-encoded ASN.1 UTCTime. -func (b *Builder) AddASN1UTCTime(t time.Time) { - b.AddASN1(asn1.UTCTime, func(c *Builder) { - // As utilized by the X.509 profile, UTCTime can only - // represent the years 1950 through 2049. - if t.Year() < 1950 || t.Year() >= 2050 { - b.err = fmt.Errorf("cryptobyte: cannot represent %v as a UTCTime", t) - return - } - c.AddBytes([]byte(t.Format(defaultUTCTimeFormatStr))) - }) -} - -// AddASN1BitString appends a DER-encoded ASN.1 BIT STRING. This does not -// support BIT STRINGs that are not a whole number of bytes. -func (b *Builder) AddASN1BitString(data []byte) { - b.AddASN1(asn1.BIT_STRING, func(b *Builder) { - b.AddUint8(0) - b.AddBytes(data) - }) -} - -func (b *Builder) addBase128Int(n int64) { - var length int - if n == 0 { - length = 1 - } else { - for i := n; i > 0; i >>= 7 { - length++ - } - } - - for i := length - 1; i >= 0; i-- { - o := byte(n >> uint(i*7)) - o &= 0x7f - if i != 0 { - o |= 0x80 - } - - b.add(o) - } -} - -func isValidOID(oid encoding_asn1.ObjectIdentifier) bool { - if len(oid) < 2 { - return false - } - - if oid[0] > 2 || (oid[0] <= 1 && oid[1] >= 40) { - return false - } - - for _, v := range oid { - if v < 0 { - return false - } - } - - return true -} - -func (b *Builder) AddASN1ObjectIdentifier(oid encoding_asn1.ObjectIdentifier) { - b.AddASN1(asn1.OBJECT_IDENTIFIER, func(b *Builder) { - if !isValidOID(oid) { - b.err = fmt.Errorf("cryptobyte: invalid OID: %v", oid) - return - } - - b.addBase128Int(int64(oid[0])*40 + int64(oid[1])) - for _, v := range oid[2:] { - b.addBase128Int(int64(v)) - } - }) -} - -func (b *Builder) AddASN1Boolean(v bool) { - b.AddASN1(asn1.BOOLEAN, func(b *Builder) { - if v { - b.AddUint8(0xff) - } else { - b.AddUint8(0) - } - }) -} - -func (b *Builder) AddASN1NULL() { - b.add(uint8(asn1.NULL), 0) -} - -// MarshalASN1 calls encoding_asn1.Marshal on its input and appends the result if -// successful or records an error if one occurred. -func (b *Builder) MarshalASN1(v interface{}) { - // NOTE(martinkr): This is somewhat of a hack to allow propagation of - // encoding_asn1.Marshal errors into Builder.err. N.B. if you call MarshalASN1 with a - // value embedded into a struct, its tag information is lost. - if b.err != nil { - return - } - bytes, err := encoding_asn1.Marshal(v) - if err != nil { - b.err = err - return - } - b.AddBytes(bytes) -} - -// AddASN1 appends an ASN.1 object. The object is prefixed with the given tag. -// Tags greater than 30 are not supported and result in an error (i.e. -// low-tag-number form only). The child builder passed to the -// BuilderContinuation can be used to build the content of the ASN.1 object. -func (b *Builder) AddASN1(tag asn1.Tag, f BuilderContinuation) { - if b.err != nil { - return - } - // Identifiers with the low five bits set indicate high-tag-number format - // (two or more octets), which we don't support. - if tag&0x1f == 0x1f { - b.err = fmt.Errorf("cryptobyte: high-tag number identifier octects not supported: 0x%x", tag) - return - } - b.AddUint8(uint8(tag)) - b.addLengthPrefixed(1, true, f) -} - -// String - -// ReadASN1Boolean decodes an ASN.1 BOOLEAN and converts it to a boolean -// representation into out and advances. It reports whether the read -// was successful. -func (s *String) ReadASN1Boolean(out *bool) bool { - var bytes String - if !s.ReadASN1(&bytes, asn1.BOOLEAN) || len(bytes) != 1 { - return false - } - - switch bytes[0] { - case 0: - *out = false - case 0xff: - *out = true - default: - return false - } - - return true -} - -// ReadASN1Integer decodes an ASN.1 INTEGER into out and advances. If out does -// not point to an integer, to a big.Int, or to a []byte it panics. Only -// positive and zero values can be decoded into []byte, and they are returned as -// big-endian binary values that share memory with s. Positive values will have -// no leading zeroes, and zero will be returned as a single zero byte. -// ReadASN1Integer reports whether the read was successful. -func (s *String) ReadASN1Integer(out interface{}) bool { - switch out := out.(type) { - case *int, *int8, *int16, *int32, *int64: - var i int64 - if !s.readASN1Int64(&i) || reflect.ValueOf(out).Elem().OverflowInt(i) { - return false - } - reflect.ValueOf(out).Elem().SetInt(i) - return true - case *uint, *uint8, *uint16, *uint32, *uint64: - var u uint64 - if !s.readASN1Uint64(&u) || reflect.ValueOf(out).Elem().OverflowUint(u) { - return false - } - reflect.ValueOf(out).Elem().SetUint(u) - return true - case *big.Int: - return s.readASN1BigInt(out) - case *[]byte: - return s.readASN1Bytes(out) - default: - panic("out does not point to an integer type") - } -} - -func checkASN1Integer(bytes []byte) bool { - if len(bytes) == 0 { - // An INTEGER is encoded with at least one octet. - return false - } - if len(bytes) == 1 { - return true - } - if bytes[0] == 0 && bytes[1]&0x80 == 0 || bytes[0] == 0xff && bytes[1]&0x80 == 0x80 { - // Value is not minimally encoded. - return false - } - return true -} - -var bigOne = big.NewInt(1) - -func (s *String) readASN1BigInt(out *big.Int) bool { - var bytes String - if !s.ReadASN1(&bytes, asn1.INTEGER) || !checkASN1Integer(bytes) { - return false - } - if bytes[0]&0x80 == 0x80 { - // Negative number. - neg := make([]byte, len(bytes)) - for i, b := range bytes { - neg[i] = ^b - } - out.SetBytes(neg) - out.Add(out, bigOne) - out.Neg(out) - } else { - out.SetBytes(bytes) - } - return true -} - -func (s *String) readASN1Bytes(out *[]byte) bool { - var bytes String - if !s.ReadASN1(&bytes, asn1.INTEGER) || !checkASN1Integer(bytes) { - return false - } - if bytes[0]&0x80 == 0x80 { - return false - } - for len(bytes) > 1 && bytes[0] == 0 { - bytes = bytes[1:] - } - *out = bytes - return true -} - -func (s *String) readASN1Int64(out *int64) bool { - var bytes String - if !s.ReadASN1(&bytes, asn1.INTEGER) || !checkASN1Integer(bytes) || !asn1Signed(out, bytes) { - return false - } - return true -} - -func asn1Signed(out *int64, n []byte) bool { - length := len(n) - if length > 8 { - return false - } - for i := 0; i < length; i++ { - *out <<= 8 - *out |= int64(n[i]) - } - // Shift up and down in order to sign extend the result. - *out <<= 64 - uint8(length)*8 - *out >>= 64 - uint8(length)*8 - return true -} - -func (s *String) readASN1Uint64(out *uint64) bool { - var bytes String - if !s.ReadASN1(&bytes, asn1.INTEGER) || !checkASN1Integer(bytes) || !asn1Unsigned(out, bytes) { - return false - } - return true -} - -func asn1Unsigned(out *uint64, n []byte) bool { - length := len(n) - if length > 9 || length == 9 && n[0] != 0 { - // Too large for uint64. - return false - } - if n[0]&0x80 != 0 { - // Negative number. - return false - } - for i := 0; i < length; i++ { - *out <<= 8 - *out |= uint64(n[i]) - } - return true -} - -// ReadASN1Int64WithTag decodes an ASN.1 INTEGER with the given tag into out -// and advances. It reports whether the read was successful and resulted in a -// value that can be represented in an int64. -func (s *String) ReadASN1Int64WithTag(out *int64, tag asn1.Tag) bool { - var bytes String - return s.ReadASN1(&bytes, tag) && checkASN1Integer(bytes) && asn1Signed(out, bytes) -} - -// ReadASN1Enum decodes an ASN.1 ENUMERATION into out and advances. It reports -// whether the read was successful. -func (s *String) ReadASN1Enum(out *int) bool { - var bytes String - var i int64 - if !s.ReadASN1(&bytes, asn1.ENUM) || !checkASN1Integer(bytes) || !asn1Signed(&i, bytes) { - return false - } - if int64(int(i)) != i { - return false - } - *out = int(i) - return true -} - -func (s *String) readBase128Int(out *int) bool { - ret := 0 - for i := 0; len(*s) > 0; i++ { - if i == 5 { - return false - } - // Avoid overflowing int on a 32-bit platform. - // We don't want different behavior based on the architecture. - if ret >= 1<<(31-7) { - return false - } - ret <<= 7 - b := s.read(1)[0] - - // ITU-T X.690, section 8.19.2: - // The subidentifier shall be encoded in the fewest possible octets, - // that is, the leading octet of the subidentifier shall not have the value 0x80. - if i == 0 && b == 0x80 { - return false - } - - ret |= int(b & 0x7f) - if b&0x80 == 0 { - *out = ret - return true - } - } - return false // truncated -} - -// ReadASN1ObjectIdentifier decodes an ASN.1 OBJECT IDENTIFIER into out and -// advances. It reports whether the read was successful. -func (s *String) ReadASN1ObjectIdentifier(out *encoding_asn1.ObjectIdentifier) bool { - var bytes String - if !s.ReadASN1(&bytes, asn1.OBJECT_IDENTIFIER) || len(bytes) == 0 { - return false - } - - // In the worst case, we get two elements from the first byte (which is - // encoded differently) and then every varint is a single byte long. - components := make([]int, len(bytes)+1) - - // The first varint is 40*value1 + value2: - // According to this packing, value1 can take the values 0, 1 and 2 only. - // When value1 = 0 or value1 = 1, then value2 is <= 39. When value1 = 2, - // then there are no restrictions on value2. - var v int - if !bytes.readBase128Int(&v) { - return false - } - if v < 80 { - components[0] = v / 40 - components[1] = v % 40 - } else { - components[0] = 2 - components[1] = v - 80 - } - - i := 2 - for ; len(bytes) > 0; i++ { - if !bytes.readBase128Int(&v) { - return false - } - components[i] = v - } - *out = components[:i] - return true -} - -// ReadASN1GeneralizedTime decodes an ASN.1 GENERALIZEDTIME into out and -// advances. It reports whether the read was successful. -func (s *String) ReadASN1GeneralizedTime(out *time.Time) bool { - var bytes String - if !s.ReadASN1(&bytes, asn1.GeneralizedTime) { - return false - } - t := string(bytes) - res, err := time.Parse(generalizedTimeFormatStr, t) - if err != nil { - return false - } - if serialized := res.Format(generalizedTimeFormatStr); serialized != t { - return false - } - *out = res - return true -} - -const defaultUTCTimeFormatStr = "060102150405Z0700" - -// ReadASN1UTCTime decodes an ASN.1 UTCTime into out and advances. -// It reports whether the read was successful. -func (s *String) ReadASN1UTCTime(out *time.Time) bool { - var bytes String - if !s.ReadASN1(&bytes, asn1.UTCTime) { - return false - } - t := string(bytes) - - formatStr := defaultUTCTimeFormatStr - var err error - res, err := time.Parse(formatStr, t) - if err != nil { - // Fallback to minute precision if we can't parse second - // precision. If we are following X.509 or X.690 we shouldn't - // support this, but we do. - formatStr = "0601021504Z0700" - res, err = time.Parse(formatStr, t) - } - if err != nil { - return false - } - - if serialized := res.Format(formatStr); serialized != t { - return false - } - - if res.Year() >= 2050 { - // UTCTime interprets the low order digits 50-99 as 1950-99. - // This only applies to its use in the X.509 profile. - // See https://tools.ietf.org/html/rfc5280#section-4.1.2.5.1 - res = res.AddDate(-100, 0, 0) - } - *out = res - return true -} - -// ReadASN1BitString decodes an ASN.1 BIT STRING into out and advances. -// It reports whether the read was successful. -func (s *String) ReadASN1BitString(out *encoding_asn1.BitString) bool { - var bytes String - if !s.ReadASN1(&bytes, asn1.BIT_STRING) || len(bytes) == 0 || - len(bytes)*8/8 != len(bytes) { - return false - } - - paddingBits := bytes[0] - bytes = bytes[1:] - if paddingBits > 7 || - len(bytes) == 0 && paddingBits != 0 || - len(bytes) > 0 && bytes[len(bytes)-1]&(1< 4 || len(*s) < int(2+lenLen) { - return false - } - - lenBytes := String((*s)[2 : 2+lenLen]) - if !lenBytes.readUnsigned(&len32, int(lenLen)) { - return false - } - - // ITU-T X.690 section 10.1 (DER length forms) requires encoding the length - // with the minimum number of octets. - if len32 < 128 { - // Length should have used short-form encoding. - return false - } - if len32>>((lenLen-1)*8) == 0 { - // Leading octet is 0. Length should have been at least one byte shorter. - return false - } - - headerLen = 2 + uint32(lenLen) - if headerLen+len32 < len32 { - // Overflow. - return false - } - length = headerLen + len32 - } - - if int(length) < 0 || !s.ReadBytes((*[]byte)(out), int(length)) { - return false - } - if skipHeader && !out.Skip(int(headerLen)) { - panic("cryptobyte: internal error") - } - - return true -} diff --git a/vendor/golang.org/x/crypto/cryptobyte/asn1/asn1.go b/vendor/golang.org/x/crypto/cryptobyte/asn1/asn1.go deleted file mode 100644 index cda8e3ed..00000000 --- a/vendor/golang.org/x/crypto/cryptobyte/asn1/asn1.go +++ /dev/null @@ -1,46 +0,0 @@ -// Copyright 2017 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package asn1 contains supporting types for parsing and building ASN.1 -// messages with the cryptobyte package. -package asn1 // import "golang.org/x/crypto/cryptobyte/asn1" - -// Tag represents an ASN.1 identifier octet, consisting of a tag number -// (indicating a type) and class (such as context-specific or constructed). -// -// Methods in the cryptobyte package only support the low-tag-number form, i.e. -// a single identifier octet with bits 7-8 encoding the class and bits 1-6 -// encoding the tag number. -type Tag uint8 - -const ( - classConstructed = 0x20 - classContextSpecific = 0x80 -) - -// Constructed returns t with the constructed class bit set. -func (t Tag) Constructed() Tag { return t | classConstructed } - -// ContextSpecific returns t with the context-specific class bit set. -func (t Tag) ContextSpecific() Tag { return t | classContextSpecific } - -// The following is a list of standard tag and class combinations. -const ( - BOOLEAN = Tag(1) - INTEGER = Tag(2) - BIT_STRING = Tag(3) - OCTET_STRING = Tag(4) - NULL = Tag(5) - OBJECT_IDENTIFIER = Tag(6) - ENUM = Tag(10) - UTF8String = Tag(12) - SEQUENCE = Tag(16 | classConstructed) - SET = Tag(17 | classConstructed) - PrintableString = Tag(19) - T61String = Tag(20) - IA5String = Tag(22) - UTCTime = Tag(23) - GeneralizedTime = Tag(24) - GeneralString = Tag(27) -) diff --git a/vendor/golang.org/x/crypto/cryptobyte/builder.go b/vendor/golang.org/x/crypto/cryptobyte/builder.go deleted file mode 100644 index cf254f5f..00000000 --- a/vendor/golang.org/x/crypto/cryptobyte/builder.go +++ /dev/null @@ -1,350 +0,0 @@ -// Copyright 2017 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package cryptobyte - -import ( - "errors" - "fmt" -) - -// A Builder builds byte strings from fixed-length and length-prefixed values. -// Builders either allocate space as needed, or are ‘fixed’, which means that -// they write into a given buffer and produce an error if it's exhausted. -// -// The zero value is a usable Builder that allocates space as needed. -// -// Simple values are marshaled and appended to a Builder using methods on the -// Builder. Length-prefixed values are marshaled by providing a -// BuilderContinuation, which is a function that writes the inner contents of -// the value to a given Builder. See the documentation for BuilderContinuation -// for details. -type Builder struct { - err error - result []byte - fixedSize bool - child *Builder - offset int - pendingLenLen int - pendingIsASN1 bool - inContinuation *bool -} - -// NewBuilder creates a Builder that appends its output to the given buffer. -// Like append(), the slice will be reallocated if its capacity is exceeded. -// Use Bytes to get the final buffer. -func NewBuilder(buffer []byte) *Builder { - return &Builder{ - result: buffer, - } -} - -// NewFixedBuilder creates a Builder that appends its output into the given -// buffer. This builder does not reallocate the output buffer. Writes that -// would exceed the buffer's capacity are treated as an error. -func NewFixedBuilder(buffer []byte) *Builder { - return &Builder{ - result: buffer, - fixedSize: true, - } -} - -// SetError sets the value to be returned as the error from Bytes. Writes -// performed after calling SetError are ignored. -func (b *Builder) SetError(err error) { - b.err = err -} - -// Bytes returns the bytes written by the builder or an error if one has -// occurred during building. -func (b *Builder) Bytes() ([]byte, error) { - if b.err != nil { - return nil, b.err - } - return b.result[b.offset:], nil -} - -// BytesOrPanic returns the bytes written by the builder or panics if an error -// has occurred during building. -func (b *Builder) BytesOrPanic() []byte { - if b.err != nil { - panic(b.err) - } - return b.result[b.offset:] -} - -// AddUint8 appends an 8-bit value to the byte string. -func (b *Builder) AddUint8(v uint8) { - b.add(byte(v)) -} - -// AddUint16 appends a big-endian, 16-bit value to the byte string. -func (b *Builder) AddUint16(v uint16) { - b.add(byte(v>>8), byte(v)) -} - -// AddUint24 appends a big-endian, 24-bit value to the byte string. The highest -// byte of the 32-bit input value is silently truncated. -func (b *Builder) AddUint24(v uint32) { - b.add(byte(v>>16), byte(v>>8), byte(v)) -} - -// AddUint32 appends a big-endian, 32-bit value to the byte string. -func (b *Builder) AddUint32(v uint32) { - b.add(byte(v>>24), byte(v>>16), byte(v>>8), byte(v)) -} - -// AddUint48 appends a big-endian, 48-bit value to the byte string. -func (b *Builder) AddUint48(v uint64) { - b.add(byte(v>>40), byte(v>>32), byte(v>>24), byte(v>>16), byte(v>>8), byte(v)) -} - -// AddUint64 appends a big-endian, 64-bit value to the byte string. -func (b *Builder) AddUint64(v uint64) { - b.add(byte(v>>56), byte(v>>48), byte(v>>40), byte(v>>32), byte(v>>24), byte(v>>16), byte(v>>8), byte(v)) -} - -// AddBytes appends a sequence of bytes to the byte string. -func (b *Builder) AddBytes(v []byte) { - b.add(v...) -} - -// BuilderContinuation is a continuation-passing interface for building -// length-prefixed byte sequences. Builder methods for length-prefixed -// sequences (AddUint8LengthPrefixed etc) will invoke the BuilderContinuation -// supplied to them. The child builder passed to the continuation can be used -// to build the content of the length-prefixed sequence. For example: -// -// parent := cryptobyte.NewBuilder() -// parent.AddUint8LengthPrefixed(func (child *Builder) { -// child.AddUint8(42) -// child.AddUint8LengthPrefixed(func (grandchild *Builder) { -// grandchild.AddUint8(5) -// }) -// }) -// -// It is an error to write more bytes to the child than allowed by the reserved -// length prefix. After the continuation returns, the child must be considered -// invalid, i.e. users must not store any copies or references of the child -// that outlive the continuation. -// -// If the continuation panics with a value of type BuildError then the inner -// error will be returned as the error from Bytes. If the child panics -// otherwise then Bytes will repanic with the same value. -type BuilderContinuation func(child *Builder) - -// BuildError wraps an error. If a BuilderContinuation panics with this value, -// the panic will be recovered and the inner error will be returned from -// Builder.Bytes. -type BuildError struct { - Err error -} - -// AddUint8LengthPrefixed adds a 8-bit length-prefixed byte sequence. -func (b *Builder) AddUint8LengthPrefixed(f BuilderContinuation) { - b.addLengthPrefixed(1, false, f) -} - -// AddUint16LengthPrefixed adds a big-endian, 16-bit length-prefixed byte sequence. -func (b *Builder) AddUint16LengthPrefixed(f BuilderContinuation) { - b.addLengthPrefixed(2, false, f) -} - -// AddUint24LengthPrefixed adds a big-endian, 24-bit length-prefixed byte sequence. -func (b *Builder) AddUint24LengthPrefixed(f BuilderContinuation) { - b.addLengthPrefixed(3, false, f) -} - -// AddUint32LengthPrefixed adds a big-endian, 32-bit length-prefixed byte sequence. -func (b *Builder) AddUint32LengthPrefixed(f BuilderContinuation) { - b.addLengthPrefixed(4, false, f) -} - -func (b *Builder) callContinuation(f BuilderContinuation, arg *Builder) { - if !*b.inContinuation { - *b.inContinuation = true - - defer func() { - *b.inContinuation = false - - r := recover() - if r == nil { - return - } - - if buildError, ok := r.(BuildError); ok { - b.err = buildError.Err - } else { - panic(r) - } - }() - } - - f(arg) -} - -func (b *Builder) addLengthPrefixed(lenLen int, isASN1 bool, f BuilderContinuation) { - // Subsequent writes can be ignored if the builder has encountered an error. - if b.err != nil { - return - } - - offset := len(b.result) - b.add(make([]byte, lenLen)...) - - if b.inContinuation == nil { - b.inContinuation = new(bool) - } - - b.child = &Builder{ - result: b.result, - fixedSize: b.fixedSize, - offset: offset, - pendingLenLen: lenLen, - pendingIsASN1: isASN1, - inContinuation: b.inContinuation, - } - - b.callContinuation(f, b.child) - b.flushChild() - if b.child != nil { - panic("cryptobyte: internal error") - } -} - -func (b *Builder) flushChild() { - if b.child == nil { - return - } - b.child.flushChild() - child := b.child - b.child = nil - - if child.err != nil { - b.err = child.err - return - } - - length := len(child.result) - child.pendingLenLen - child.offset - - if length < 0 { - panic("cryptobyte: internal error") // result unexpectedly shrunk - } - - if child.pendingIsASN1 { - // For ASN.1, we reserved a single byte for the length. If that turned out - // to be incorrect, we have to move the contents along in order to make - // space. - if child.pendingLenLen != 1 { - panic("cryptobyte: internal error") - } - var lenLen, lenByte uint8 - if int64(length) > 0xfffffffe { - b.err = errors.New("pending ASN.1 child too long") - return - } else if length > 0xffffff { - lenLen = 5 - lenByte = 0x80 | 4 - } else if length > 0xffff { - lenLen = 4 - lenByte = 0x80 | 3 - } else if length > 0xff { - lenLen = 3 - lenByte = 0x80 | 2 - } else if length > 0x7f { - lenLen = 2 - lenByte = 0x80 | 1 - } else { - lenLen = 1 - lenByte = uint8(length) - length = 0 - } - - // Insert the initial length byte, make space for successive length bytes, - // and adjust the offset. - child.result[child.offset] = lenByte - extraBytes := int(lenLen - 1) - if extraBytes != 0 { - child.add(make([]byte, extraBytes)...) - childStart := child.offset + child.pendingLenLen - copy(child.result[childStart+extraBytes:], child.result[childStart:]) - } - child.offset++ - child.pendingLenLen = extraBytes - } - - l := length - for i := child.pendingLenLen - 1; i >= 0; i-- { - child.result[child.offset+i] = uint8(l) - l >>= 8 - } - if l != 0 { - b.err = fmt.Errorf("cryptobyte: pending child length %d exceeds %d-byte length prefix", length, child.pendingLenLen) - return - } - - if b.fixedSize && &b.result[0] != &child.result[0] { - panic("cryptobyte: BuilderContinuation reallocated a fixed-size buffer") - } - - b.result = child.result -} - -func (b *Builder) add(bytes ...byte) { - if b.err != nil { - return - } - if b.child != nil { - panic("cryptobyte: attempted write while child is pending") - } - if len(b.result)+len(bytes) < len(bytes) { - b.err = errors.New("cryptobyte: length overflow") - } - if b.fixedSize && len(b.result)+len(bytes) > cap(b.result) { - b.err = errors.New("cryptobyte: Builder is exceeding its fixed-size buffer") - return - } - b.result = append(b.result, bytes...) -} - -// Unwrite rolls back non-negative n bytes written directly to the Builder. -// An attempt by a child builder passed to a continuation to unwrite bytes -// from its parent will panic. -func (b *Builder) Unwrite(n int) { - if b.err != nil { - return - } - if b.child != nil { - panic("cryptobyte: attempted unwrite while child is pending") - } - length := len(b.result) - b.pendingLenLen - b.offset - if length < 0 { - panic("cryptobyte: internal error") - } - if n < 0 { - panic("cryptobyte: attempted to unwrite negative number of bytes") - } - if n > length { - panic("cryptobyte: attempted to unwrite more than was written") - } - b.result = b.result[:len(b.result)-n] -} - -// A MarshalingValue marshals itself into a Builder. -type MarshalingValue interface { - // Marshal is called by Builder.AddValue. It receives a pointer to a builder - // to marshal itself into. It may return an error that occurred during - // marshaling, such as unset or invalid values. - Marshal(b *Builder) error -} - -// AddValue calls Marshal on v, passing a pointer to the builder to append to. -// If Marshal returns an error, it is set on the Builder so that subsequent -// appends don't have an effect. -func (b *Builder) AddValue(v MarshalingValue) { - err := v.Marshal(b) - if err != nil { - b.err = err - } -} diff --git a/vendor/golang.org/x/crypto/cryptobyte/string.go b/vendor/golang.org/x/crypto/cryptobyte/string.go deleted file mode 100644 index 10692a8a..00000000 --- a/vendor/golang.org/x/crypto/cryptobyte/string.go +++ /dev/null @@ -1,183 +0,0 @@ -// Copyright 2017 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package cryptobyte contains types that help with parsing and constructing -// length-prefixed, binary messages, including ASN.1 DER. (The asn1 subpackage -// contains useful ASN.1 constants.) -// -// The String type is for parsing. It wraps a []byte slice and provides helper -// functions for consuming structures, value by value. -// -// The Builder type is for constructing messages. It providers helper functions -// for appending values and also for appending length-prefixed submessages – -// without having to worry about calculating the length prefix ahead of time. -// -// See the documentation and examples for the Builder and String types to get -// started. -package cryptobyte // import "golang.org/x/crypto/cryptobyte" - -// String represents a string of bytes. It provides methods for parsing -// fixed-length and length-prefixed values from it. -type String []byte - -// read advances a String by n bytes and returns them. If less than n bytes -// remain, it returns nil. -func (s *String) read(n int) []byte { - if len(*s) < n || n < 0 { - return nil - } - v := (*s)[:n] - *s = (*s)[n:] - return v -} - -// Skip advances the String by n byte and reports whether it was successful. -func (s *String) Skip(n int) bool { - return s.read(n) != nil -} - -// ReadUint8 decodes an 8-bit value into out and advances over it. -// It reports whether the read was successful. -func (s *String) ReadUint8(out *uint8) bool { - v := s.read(1) - if v == nil { - return false - } - *out = uint8(v[0]) - return true -} - -// ReadUint16 decodes a big-endian, 16-bit value into out and advances over it. -// It reports whether the read was successful. -func (s *String) ReadUint16(out *uint16) bool { - v := s.read(2) - if v == nil { - return false - } - *out = uint16(v[0])<<8 | uint16(v[1]) - return true -} - -// ReadUint24 decodes a big-endian, 24-bit value into out and advances over it. -// It reports whether the read was successful. -func (s *String) ReadUint24(out *uint32) bool { - v := s.read(3) - if v == nil { - return false - } - *out = uint32(v[0])<<16 | uint32(v[1])<<8 | uint32(v[2]) - return true -} - -// ReadUint32 decodes a big-endian, 32-bit value into out and advances over it. -// It reports whether the read was successful. -func (s *String) ReadUint32(out *uint32) bool { - v := s.read(4) - if v == nil { - return false - } - *out = uint32(v[0])<<24 | uint32(v[1])<<16 | uint32(v[2])<<8 | uint32(v[3]) - return true -} - -// ReadUint48 decodes a big-endian, 48-bit value into out and advances over it. -// It reports whether the read was successful. -func (s *String) ReadUint48(out *uint64) bool { - v := s.read(6) - if v == nil { - return false - } - *out = uint64(v[0])<<40 | uint64(v[1])<<32 | uint64(v[2])<<24 | uint64(v[3])<<16 | uint64(v[4])<<8 | uint64(v[5]) - return true -} - -// ReadUint64 decodes a big-endian, 64-bit value into out and advances over it. -// It reports whether the read was successful. -func (s *String) ReadUint64(out *uint64) bool { - v := s.read(8) - if v == nil { - return false - } - *out = uint64(v[0])<<56 | uint64(v[1])<<48 | uint64(v[2])<<40 | uint64(v[3])<<32 | uint64(v[4])<<24 | uint64(v[5])<<16 | uint64(v[6])<<8 | uint64(v[7]) - return true -} - -func (s *String) readUnsigned(out *uint32, length int) bool { - v := s.read(length) - if v == nil { - return false - } - var result uint32 - for i := 0; i < length; i++ { - result <<= 8 - result |= uint32(v[i]) - } - *out = result - return true -} - -func (s *String) readLengthPrefixed(lenLen int, outChild *String) bool { - lenBytes := s.read(lenLen) - if lenBytes == nil { - return false - } - var length uint32 - for _, b := range lenBytes { - length = length << 8 - length = length | uint32(b) - } - v := s.read(int(length)) - if v == nil { - return false - } - *outChild = v - return true -} - -// ReadUint8LengthPrefixed reads the content of an 8-bit length-prefixed value -// into out and advances over it. It reports whether the read was successful. -func (s *String) ReadUint8LengthPrefixed(out *String) bool { - return s.readLengthPrefixed(1, out) -} - -// ReadUint16LengthPrefixed reads the content of a big-endian, 16-bit -// length-prefixed value into out and advances over it. It reports whether the -// read was successful. -func (s *String) ReadUint16LengthPrefixed(out *String) bool { - return s.readLengthPrefixed(2, out) -} - -// ReadUint24LengthPrefixed reads the content of a big-endian, 24-bit -// length-prefixed value into out and advances over it. It reports whether -// the read was successful. -func (s *String) ReadUint24LengthPrefixed(out *String) bool { - return s.readLengthPrefixed(3, out) -} - -// ReadBytes reads n bytes into out and advances over them. It reports -// whether the read was successful. -func (s *String) ReadBytes(out *[]byte, n int) bool { - v := s.read(n) - if v == nil { - return false - } - *out = v - return true -} - -// CopyBytes copies len(out) bytes into out and advances over them. It reports -// whether the copy operation was successful -func (s *String) CopyBytes(out []byte) bool { - n := len(out) - v := s.read(n) - if v == nil { - return false - } - return copy(out, v) == n -} - -// Empty reports whether the string does not contain any bytes. -func (s String) Empty() bool { - return len(s) == 0 -} diff --git a/vendor/golang.org/x/exp/slices/slices.go b/vendor/golang.org/x/exp/slices/slices.go index a17b3cf6..cff0cd49 100644 --- a/vendor/golang.org/x/exp/slices/slices.go +++ b/vendor/golang.org/x/exp/slices/slices.go @@ -128,6 +128,12 @@ func Contains[E comparable](s []E, v E) bool { return Index(s, v) >= 0 } +// ContainsFunc reports whether at least one +// element e of s satisfies f(e). +func ContainsFunc[E any](s []E, f func(E) bool) bool { + return IndexFunc(s, f) >= 0 +} + // Insert inserts the values v... into s at index i, // returning the modified slice. // In the returned slice r, r[i] == v[0]. @@ -162,6 +168,24 @@ func Delete[S ~[]E, E any](s S, i, j int) S { return append(s[:i], s[j:]...) } +// Replace replaces the elements s[i:j] by the given v, and returns the +// modified slice. Replace panics if s[i:j] is not a valid slice of s. +func Replace[S ~[]E, E any](s S, i, j int, v ...E) S { + _ = s[i:j] // verify that i:j is a valid subslice + tot := len(s[:i]) + len(v) + len(s[j:]) + if tot <= cap(s) { + s2 := s[:tot] + copy(s2[i+len(v):], s[j:]) + copy(s2[i:], v) + return s2 + } + s2 := make(S, tot) + copy(s2, s[:i]) + copy(s2[i:], v) + copy(s2[i+len(v):], s[j:]) + return s2 +} + // Clone returns a copy of the slice. // The elements are copied using assignment, so this is a shallow clone. func Clone[S ~[]E, E any](s S) S { @@ -175,8 +199,11 @@ func Clone[S ~[]E, E any](s S) S { // Compact replaces consecutive runs of equal elements with a single copy. // This is like the uniq command found on Unix. // Compact modifies the contents of the slice s; it does not create a new slice. +// When Compact discards m elements in total, it might not modify the elements +// s[len(s)-m:len(s)]. If those elements contain pointers you might consider +// zeroing those elements so that objects they reference can be garbage collected. func Compact[S ~[]E, E comparable](s S) S { - if len(s) == 0 { + if len(s) < 2 { return s } i := 1 @@ -193,7 +220,7 @@ func Compact[S ~[]E, E comparable](s S) S { // CompactFunc is like Compact but uses a comparison function. func CompactFunc[S ~[]E, E any](s S, eq func(E, E) bool) S { - if len(s) == 0 { + if len(s) < 2 { return s } i := 1 @@ -210,11 +237,19 @@ func CompactFunc[S ~[]E, E any](s S, eq func(E, E) bool) S { // Grow increases the slice's capacity, if necessary, to guarantee space for // another n elements. After Grow(n), at least n elements can be appended -// to the slice without another allocation. Grow may modify elements of the -// slice between the length and the capacity. If n is negative or too large to +// to the slice without another allocation. If n is negative or too large to // allocate the memory, Grow panics. func Grow[S ~[]E, E any](s S, n int) S { - return append(s, make(S, n)...)[:len(s)] + if n < 0 { + panic("cannot be negative") + } + if n -= cap(s) - len(s); n > 0 { + // TODO(https://go.dev/issue/53888): Make using []E instead of S + // to workaround a compiler bug where the runtime.growslice optimization + // does not take effect. Revert when the compiler is fixed. + s = append([]E(s)[:cap(s)], make([]E, n)...)[:len(s)] + } + return s } // Clip removes unused capacity from the slice, returning s[:len(s):len(s)]. diff --git a/vendor/golang.org/x/exp/slices/sort.go b/vendor/golang.org/x/exp/slices/sort.go index c22e74bd..35a5d8f0 100644 --- a/vendor/golang.org/x/exp/slices/sort.go +++ b/vendor/golang.org/x/exp/slices/sort.go @@ -30,7 +30,7 @@ func SortFunc[E any](x []E, less func(a, b E) bool) { pdqsortLessFunc(x, 0, n, bits.Len(uint(n)), less) } -// SortStable sorts the slice x while keeping the original order of equal +// SortStableFunc sorts the slice x while keeping the original order of equal // elements, using less to compare elements. func SortStableFunc[E any](x []E, less func(a, b E) bool) { stableLessFunc(x, len(x), less) diff --git a/vendor/google.golang.org/grpc/test/bufconn/bufconn.go b/vendor/google.golang.org/grpc/test/bufconn/bufconn.go deleted file mode 100644 index 3f77f487..00000000 --- a/vendor/google.golang.org/grpc/test/bufconn/bufconn.go +++ /dev/null @@ -1,318 +0,0 @@ -/* - * - * Copyright 2017 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -// Package bufconn provides a net.Conn implemented by a buffer and related -// dialing and listening functionality. -package bufconn - -import ( - "context" - "fmt" - "io" - "net" - "sync" - "time" -) - -// Listener implements a net.Listener that creates local, buffered net.Conns -// via its Accept and Dial method. -type Listener struct { - mu sync.Mutex - sz int - ch chan net.Conn - done chan struct{} -} - -// Implementation of net.Error providing timeout -type netErrorTimeout struct { - error -} - -func (e netErrorTimeout) Timeout() bool { return true } -func (e netErrorTimeout) Temporary() bool { return false } - -var errClosed = fmt.Errorf("closed") -var errTimeout net.Error = netErrorTimeout{error: fmt.Errorf("i/o timeout")} - -// Listen returns a Listener that can only be contacted by its own Dialers and -// creates buffered connections between the two. -func Listen(sz int) *Listener { - return &Listener{sz: sz, ch: make(chan net.Conn), done: make(chan struct{})} -} - -// Accept blocks until Dial is called, then returns a net.Conn for the server -// half of the connection. -func (l *Listener) Accept() (net.Conn, error) { - select { - case <-l.done: - return nil, errClosed - case c := <-l.ch: - return c, nil - } -} - -// Close stops the listener. -func (l *Listener) Close() error { - l.mu.Lock() - defer l.mu.Unlock() - select { - case <-l.done: - // Already closed. - break - default: - close(l.done) - } - return nil -} - -// Addr reports the address of the listener. -func (l *Listener) Addr() net.Addr { return addr{} } - -// Dial creates an in-memory full-duplex network connection, unblocks Accept by -// providing it the server half of the connection, and returns the client half -// of the connection. -func (l *Listener) Dial() (net.Conn, error) { - return l.DialContext(context.Background()) -} - -// DialContext creates an in-memory full-duplex network connection, unblocks Accept by -// providing it the server half of the connection, and returns the client half -// of the connection. If ctx is Done, returns ctx.Err() -func (l *Listener) DialContext(ctx context.Context) (net.Conn, error) { - p1, p2 := newPipe(l.sz), newPipe(l.sz) - select { - case <-ctx.Done(): - return nil, ctx.Err() - case <-l.done: - return nil, errClosed - case l.ch <- &conn{p1, p2}: - return &conn{p2, p1}, nil - } -} - -type pipe struct { - mu sync.Mutex - - // buf contains the data in the pipe. It is a ring buffer of fixed capacity, - // with r and w pointing to the offset to read and write, respsectively. - // - // Data is read between [r, w) and written to [w, r), wrapping around the end - // of the slice if necessary. - // - // The buffer is empty if r == len(buf), otherwise if r == w, it is full. - // - // w and r are always in the range [0, cap(buf)) and [0, len(buf)]. - buf []byte - w, r int - - wwait sync.Cond - rwait sync.Cond - - // Indicate that a write/read timeout has occurred - wtimedout bool - rtimedout bool - - wtimer *time.Timer - rtimer *time.Timer - - closed bool - writeClosed bool -} - -func newPipe(sz int) *pipe { - p := &pipe{buf: make([]byte, 0, sz)} - p.wwait.L = &p.mu - p.rwait.L = &p.mu - - p.wtimer = time.AfterFunc(0, func() {}) - p.rtimer = time.AfterFunc(0, func() {}) - return p -} - -func (p *pipe) empty() bool { - return p.r == len(p.buf) -} - -func (p *pipe) full() bool { - return p.r < len(p.buf) && p.r == p.w -} - -func (p *pipe) Read(b []byte) (n int, err error) { - p.mu.Lock() - defer p.mu.Unlock() - // Block until p has data. - for { - if p.closed { - return 0, io.ErrClosedPipe - } - if !p.empty() { - break - } - if p.writeClosed { - return 0, io.EOF - } - if p.rtimedout { - return 0, errTimeout - } - - p.rwait.Wait() - } - wasFull := p.full() - - n = copy(b, p.buf[p.r:len(p.buf)]) - p.r += n - if p.r == cap(p.buf) { - p.r = 0 - p.buf = p.buf[:p.w] - } - - // Signal a blocked writer, if any - if wasFull { - p.wwait.Signal() - } - - return n, nil -} - -func (p *pipe) Write(b []byte) (n int, err error) { - p.mu.Lock() - defer p.mu.Unlock() - if p.closed { - return 0, io.ErrClosedPipe - } - for len(b) > 0 { - // Block until p is not full. - for { - if p.closed || p.writeClosed { - return 0, io.ErrClosedPipe - } - if !p.full() { - break - } - if p.wtimedout { - return 0, errTimeout - } - - p.wwait.Wait() - } - wasEmpty := p.empty() - - end := cap(p.buf) - if p.w < p.r { - end = p.r - } - x := copy(p.buf[p.w:end], b) - b = b[x:] - n += x - p.w += x - if p.w > len(p.buf) { - p.buf = p.buf[:p.w] - } - if p.w == cap(p.buf) { - p.w = 0 - } - - // Signal a blocked reader, if any. - if wasEmpty { - p.rwait.Signal() - } - } - return n, nil -} - -func (p *pipe) Close() error { - p.mu.Lock() - defer p.mu.Unlock() - p.closed = true - // Signal all blocked readers and writers to return an error. - p.rwait.Broadcast() - p.wwait.Broadcast() - return nil -} - -func (p *pipe) closeWrite() error { - p.mu.Lock() - defer p.mu.Unlock() - p.writeClosed = true - // Signal all blocked readers and writers to return an error. - p.rwait.Broadcast() - p.wwait.Broadcast() - return nil -} - -type conn struct { - io.Reader - io.Writer -} - -func (c *conn) Close() error { - err1 := c.Reader.(*pipe).Close() - err2 := c.Writer.(*pipe).closeWrite() - if err1 != nil { - return err1 - } - return err2 -} - -func (c *conn) SetDeadline(t time.Time) error { - c.SetReadDeadline(t) - c.SetWriteDeadline(t) - return nil -} - -func (c *conn) SetReadDeadline(t time.Time) error { - p := c.Reader.(*pipe) - p.mu.Lock() - defer p.mu.Unlock() - p.rtimer.Stop() - p.rtimedout = false - if !t.IsZero() { - p.rtimer = time.AfterFunc(time.Until(t), func() { - p.mu.Lock() - defer p.mu.Unlock() - p.rtimedout = true - p.rwait.Broadcast() - }) - } - return nil -} - -func (c *conn) SetWriteDeadline(t time.Time) error { - p := c.Writer.(*pipe) - p.mu.Lock() - defer p.mu.Unlock() - p.wtimer.Stop() - p.wtimedout = false - if !t.IsZero() { - p.wtimer = time.AfterFunc(time.Until(t), func() { - p.mu.Lock() - defer p.mu.Unlock() - p.wtimedout = true - p.wwait.Broadcast() - }) - } - return nil -} - -func (*conn) LocalAddr() net.Addr { return addr{} } -func (*conn) RemoteAddr() net.Addr { return addr{} } - -type addr struct{} - -func (addr) Network() string { return "bufconn" } -func (addr) String() string { return "bufconn" } diff --git a/vendor/modules.txt b/vendor/modules.txt index d4f06454..e9819fc9 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -1,9 +1,3 @@ -# cloud.google.com/go v0.110.4 -## explicit; go 1.19 -# cloud.google.com/go/iam v1.1.1 -## explicit; go 1.19 -# cloud.google.com/go/storage v1.30.1 -## explicit; go 1.19 # github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 ## explicit; go 1.16 github.com/Azure/go-ansiterm @@ -24,32 +18,6 @@ github.com/Masterminds/sprig/v3 ## explicit; go 1.13 github.com/Microsoft/go-winio github.com/Microsoft/go-winio/pkg/guid -github.com/Microsoft/go-winio/pkg/security -github.com/Microsoft/go-winio/vhd -# github.com/Microsoft/hcsshim v0.9.10 -## explicit; go 1.13 -github.com/Microsoft/hcsshim -github.com/Microsoft/hcsshim/computestorage -github.com/Microsoft/hcsshim/internal/cow -github.com/Microsoft/hcsshim/internal/hcs -github.com/Microsoft/hcsshim/internal/hcs/schema1 -github.com/Microsoft/hcsshim/internal/hcs/schema2 -github.com/Microsoft/hcsshim/internal/hcserror -github.com/Microsoft/hcsshim/internal/hns -github.com/Microsoft/hcsshim/internal/interop -github.com/Microsoft/hcsshim/internal/jobobject -github.com/Microsoft/hcsshim/internal/log -github.com/Microsoft/hcsshim/internal/logfields -github.com/Microsoft/hcsshim/internal/longpath -github.com/Microsoft/hcsshim/internal/mergemaps -github.com/Microsoft/hcsshim/internal/oc -github.com/Microsoft/hcsshim/internal/queue -github.com/Microsoft/hcsshim/internal/safefile -github.com/Microsoft/hcsshim/internal/timeout -github.com/Microsoft/hcsshim/internal/vmcompute -github.com/Microsoft/hcsshim/internal/wclayer -github.com/Microsoft/hcsshim/internal/winapi -github.com/Microsoft/hcsshim/osversion # github.com/agext/levenshtein v1.2.1 ## explicit github.com/agext/levenshtein @@ -91,16 +59,12 @@ github.com/cilium/ebpf/link # github.com/container-storage-interface/spec v1.4.0 ## explicit github.com/container-storage-interface/spec/lib/go/csi -# github.com/containerd/cgroups v1.0.4 -## explicit; go 1.17 -github.com/containerd/cgroups/stats/v1 # github.com/containerd/console v1.0.3 ## explicit; go 1.13 github.com/containerd/console # github.com/containerd/containerd v1.6.26 ## explicit; go 1.19 github.com/containerd/containerd/pkg/userns -github.com/containerd/containerd/sys # github.com/containerd/stargz-snapshotter/estargz v0.4.1 ## explicit; go 1.15 github.com/containerd/stargz-snapshotter/estargz @@ -139,7 +103,7 @@ github.com/docker/cli/cli/config/types # github.com/docker/distribution v2.8.2+incompatible ## explicit github.com/docker/distribution/registry/client/auth/challenge -# github.com/docker/docker v20.10.24+incompatible +# github.com/docker/docker v23.0.3+incompatible ## explicit github.com/docker/docker/api/types/blkiodev github.com/docker/docker/api/types/container @@ -151,6 +115,7 @@ github.com/docker/docker/api/types/strslice github.com/docker/docker/api/types/swarm github.com/docker/docker/api/types/swarm/runtime github.com/docker/docker/api/types/versions +github.com/docker/docker/libnetwork/resolvconf github.com/docker/docker/pkg/archive github.com/docker/docker/pkg/fileutils github.com/docker/docker/pkg/homedir @@ -171,16 +136,11 @@ github.com/docker/go-connections/nat # github.com/docker/go-units v0.5.0 ## explicit github.com/docker/go-units -# github.com/docker/libnetwork v0.8.0-dev.2.0.20210525090646-64b7a4574d14 -## explicit -github.com/docker/libnetwork/resolvconf -github.com/docker/libnetwork/resolvconf/dns -github.com/docker/libnetwork/types # github.com/dustin/go-humanize v1.0.0 ## explicit github.com/dustin/go-humanize -# github.com/fatih/color v1.13.0 -## explicit; go 1.13 +# github.com/fatih/color v1.15.0 +## explicit; go 1.17 github.com/fatih/color # github.com/fsouza/go-dockerclient v1.8.2 ## explicit; go 1.18 @@ -197,18 +157,10 @@ github.com/godbus/dbus github.com/godbus/dbus/v5 # github.com/gogo/protobuf v1.3.2 ## explicit; go 1.15 -github.com/gogo/protobuf/gogoproto github.com/gogo/protobuf/proto -github.com/gogo/protobuf/protoc-gen-gogo/descriptor -# github.com/gojuno/minimock/v3 v3.0.6 -## explicit; go 1.13 -github.com/gojuno/minimock/v3 # github.com/golang-jwt/jwt/v4 v4.4.2 ## explicit; go 1.16 github.com/golang-jwt/jwt/v4 -# github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da -## explicit -github.com/golang/groupcache/lru # github.com/golang/protobuf v1.5.3 ## explicit; go 1.9 github.com/golang/protobuf/jsonpb @@ -220,9 +172,6 @@ github.com/golang/protobuf/ptypes/duration github.com/golang/protobuf/ptypes/empty github.com/golang/protobuf/ptypes/timestamp github.com/golang/protobuf/ptypes/wrappers -# github.com/golang/snappy v0.0.4 -## explicit -github.com/golang/snappy # github.com/google/btree v1.0.0 ## explicit github.com/google/btree @@ -258,26 +207,18 @@ github.com/google/go-containerregistry/pkg/v1/types # github.com/google/uuid v1.3.0 ## explicit github.com/google/uuid -# github.com/googleapis/gax-go/v2 v2.11.0 -## explicit; go 1.19 -# github.com/gorilla/websocket v1.5.0 -## explicit; go 1.12 -github.com/gorilla/websocket -# github.com/grpc-ecosystem/go-grpc-middleware v1.3.0 +# github.com/grpc-ecosystem/go-grpc-middleware v1.4.0 ## explicit; go 1.14 github.com/grpc-ecosystem/go-grpc-middleware/retry github.com/grpc-ecosystem/go-grpc-middleware/util/backoffutils github.com/grpc-ecosystem/go-grpc-middleware/util/metautils # github.com/hashicorp/consul-template v0.29.6-0.20221026140134-90370e07bf62 ## explicit; go 1.19 -github.com/hashicorp/consul-template/config -github.com/hashicorp/consul-template/dependency github.com/hashicorp/consul-template/signals -github.com/hashicorp/consul-template/version # github.com/hashicorp/consul/api v1.15.3 ## explicit; go 1.12 github.com/hashicorp/consul/api -# github.com/hashicorp/cronexpr v1.1.1 +# github.com/hashicorp/cronexpr v1.1.2 ## explicit github.com/hashicorp/cronexpr # github.com/hashicorp/errwrap v1.1.0 @@ -286,7 +227,7 @@ github.com/hashicorp/errwrap # github.com/hashicorp/go-cleanhttp v0.5.2 ## explicit; go 1.13 github.com/hashicorp/go-cleanhttp -# github.com/hashicorp/go-hclog v1.3.1 +# github.com/hashicorp/go-hclog v1.4.0 ## explicit; go 1.13 github.com/hashicorp/go-hclog # github.com/hashicorp/go-immutable-radix v1.3.1 @@ -298,7 +239,7 @@ github.com/hashicorp/go-msgpack/codec # github.com/hashicorp/go-multierror v1.1.1 ## explicit; go 1.13 github.com/hashicorp/go-multierror -# github.com/hashicorp/go-plugin v1.4.9 +# github.com/hashicorp/go-plugin v1.4.10 ## explicit; go 1.17 github.com/hashicorp/go-plugin github.com/hashicorp/go-plugin/internal/plugin @@ -311,9 +252,6 @@ github.com/hashicorp/go-rootcerts # github.com/hashicorp/go-secure-stdlib/listenerutil v0.1.4 ## explicit; go 1.16 github.com/hashicorp/go-secure-stdlib/listenerutil -# github.com/hashicorp/go-secure-stdlib/mlock v0.1.2 -## explicit; go 1.16 -github.com/hashicorp/go-secure-stdlib/mlock # github.com/hashicorp/go-secure-stdlib/parseutil v0.1.6 ## explicit; go 1.16 github.com/hashicorp/go-secure-stdlib/parseutil @@ -333,9 +271,6 @@ github.com/hashicorp/go-set ## explicit github.com/hashicorp/go-sockaddr github.com/hashicorp/go-sockaddr/template -# github.com/hashicorp/go-uuid v1.0.3 -## explicit -github.com/hashicorp/go-uuid # github.com/hashicorp/go-version v1.6.0 ## explicit github.com/hashicorp/go-version @@ -364,15 +299,11 @@ github.com/hashicorp/hcl/v2/json # github.com/hashicorp/memberlist v0.5.0 ## explicit; go 1.12 github.com/hashicorp/memberlist -# github.com/hashicorp/nomad v1.4.6 +# github.com/hashicorp/nomad v1.4.11 ## explicit; go 1.20 github.com/hashicorp/nomad/acl github.com/hashicorp/nomad/ci github.com/hashicorp/nomad/client/allocdir -github.com/hashicorp/nomad/client/allocrunner/taskrunner/interfaces -github.com/hashicorp/nomad/client/allocrunner/taskrunner/state -github.com/hashicorp/nomad/client/config -github.com/hashicorp/nomad/client/devicemanager/state github.com/hashicorp/nomad/client/dynamicplugins github.com/hashicorp/nomad/client/lib/cgutil github.com/hashicorp/nomad/client/lib/fifo @@ -382,10 +313,6 @@ github.com/hashicorp/nomad/client/logmon/logging github.com/hashicorp/nomad/client/logmon/proto github.com/hashicorp/nomad/client/pluginmanager github.com/hashicorp/nomad/client/pluginmanager/csimanager -github.com/hashicorp/nomad/client/pluginmanager/drivermanager/state -github.com/hashicorp/nomad/client/serviceregistration -github.com/hashicorp/nomad/client/serviceregistration/checks -github.com/hashicorp/nomad/client/state github.com/hashicorp/nomad/client/stats github.com/hashicorp/nomad/client/structs github.com/hashicorp/nomad/client/taskenv @@ -398,8 +325,6 @@ github.com/hashicorp/nomad/drivers/shared/executor github.com/hashicorp/nomad/drivers/shared/executor/proto github.com/hashicorp/nomad/helper github.com/hashicorp/nomad/helper/args -github.com/hashicorp/nomad/helper/boltdd -github.com/hashicorp/nomad/helper/bufconndialer github.com/hashicorp/nomad/helper/constraints/semver github.com/hashicorp/nomad/helper/crypto github.com/hashicorp/nomad/helper/discover @@ -412,7 +337,6 @@ github.com/hashicorp/nomad/helper/mount github.com/hashicorp/nomad/helper/pluginutils/grpcutils github.com/hashicorp/nomad/helper/pluginutils/hclspecutils github.com/hashicorp/nomad/helper/pluginutils/hclutils -github.com/hashicorp/nomad/helper/pluginutils/loader github.com/hashicorp/nomad/helper/pointer github.com/hashicorp/nomad/helper/stats github.com/hashicorp/nomad/helper/testlog @@ -439,11 +363,6 @@ github.com/hashicorp/nomad/plugins/shared/hclspec github.com/hashicorp/nomad/plugins/shared/structs github.com/hashicorp/nomad/plugins/shared/structs/proto github.com/hashicorp/nomad/testutil -github.com/hashicorp/nomad/version -# github.com/hashicorp/nomad/api v0.0.0-20221006174558-2aa7e66bdb52 -## explicit; go 1.19 -github.com/hashicorp/nomad/api -github.com/hashicorp/nomad/api/contexts # github.com/hashicorp/raft v1.3.11 ## explicit; go 1.16 github.com/hashicorp/raft @@ -454,33 +373,10 @@ github.com/hashicorp/raft-autopilot ## explicit; go 1.12 github.com/hashicorp/serf/coordinate github.com/hashicorp/serf/serf -# github.com/hashicorp/vault/api v1.8.2 +# github.com/hashicorp/vault/api v1.9.1 ## explicit; go 1.19 github.com/hashicorp/vault/api -# github.com/hashicorp/vault/api/auth/kubernetes v0.3.0 -## explicit; go 1.16 -github.com/hashicorp/vault/api/auth/kubernetes -# github.com/hashicorp/vault/sdk v0.6.1 -## explicit; go 1.19 -github.com/hashicorp/vault/sdk/helper/certutil -github.com/hashicorp/vault/sdk/helper/compressutil -github.com/hashicorp/vault/sdk/helper/consts -github.com/hashicorp/vault/sdk/helper/cryptoutil -github.com/hashicorp/vault/sdk/helper/errutil -github.com/hashicorp/vault/sdk/helper/hclutil -github.com/hashicorp/vault/sdk/helper/jsonutil -github.com/hashicorp/vault/sdk/helper/license -github.com/hashicorp/vault/sdk/helper/locksutil -github.com/hashicorp/vault/sdk/helper/logging -github.com/hashicorp/vault/sdk/helper/pathmanager -github.com/hashicorp/vault/sdk/helper/pluginutil -github.com/hashicorp/vault/sdk/helper/strutil -github.com/hashicorp/vault/sdk/helper/wrapping -github.com/hashicorp/vault/sdk/logical -github.com/hashicorp/vault/sdk/physical -github.com/hashicorp/vault/sdk/physical/inmem -github.com/hashicorp/vault/sdk/version -# github.com/hashicorp/yamux v0.0.0-20211028200310-0bc27b27de87 +# github.com/hashicorp/yamux v0.1.1 ## explicit; go 1.15 github.com/hashicorp/yamux # github.com/hpcloud/tail v1.0.1-0.20170814160653-37f427138745 @@ -493,12 +389,18 @@ github.com/huandu/xstrings # github.com/imdario/mergo v0.3.13 ## explicit; go 1.13 github.com/imdario/mergo -# github.com/ishidawataru/sctp v0.0.0-20191218070446-00ab2ac2db07 -## explicit; go 1.12 -github.com/ishidawataru/sctp # github.com/jefferai/isbadcipher v0.0.0-20190226160619-51d2077c035f ## explicit github.com/jefferai/isbadcipher +# github.com/klauspost/compress v1.15.11 +## explicit; go 1.17 +github.com/klauspost/compress +github.com/klauspost/compress/fse +github.com/klauspost/compress/huff0 +github.com/klauspost/compress/internal/cpuinfo +github.com/klauspost/compress/internal/snapref +github.com/klauspost/compress/zstd +github.com/klauspost/compress/zstd/internal/xxhash # github.com/kr/pretty v0.3.0 ## explicit; go 1.12 github.com/kr/pretty @@ -511,7 +413,7 @@ github.com/lufia/plan9stats # github.com/mattn/go-colorable v0.1.13 ## explicit; go 1.15 github.com/mattn/go-colorable -# github.com/mattn/go-isatty v0.0.16 +# github.com/mattn/go-isatty v0.0.17 ## explicit; go 1.15 github.com/mattn/go-isatty # github.com/miekg/dns v1.1.50 @@ -544,12 +446,18 @@ github.com/mitchellh/mapstructure # github.com/mitchellh/reflectwalk v1.0.2 ## explicit github.com/mitchellh/reflectwalk +# github.com/moby/patternmatcher v0.5.0 +## explicit; go 1.19 +github.com/moby/patternmatcher # github.com/moby/sys/mount v0.3.3 ## explicit; go 1.16 github.com/moby/sys/mount # github.com/moby/sys/mountinfo v0.6.2 ## explicit; go 1.16 github.com/moby/sys/mountinfo +# github.com/moby/sys/sequential v0.5.0 +## explicit; go 1.17 +github.com/moby/sys/sequential # github.com/moby/term v0.0.0-20210619224110-3f7ff695adc6 ## explicit; go 1.13 github.com/moby/term @@ -608,10 +516,6 @@ github.com/opencontainers/selinux/go-selinux github.com/opencontainers/selinux/go-selinux/label github.com/opencontainers/selinux/pkg/pwalk github.com/opencontainers/selinux/pkg/pwalkdir -# github.com/pierrec/lz4 v2.6.1+incompatible -## explicit -github.com/pierrec/lz4 -github.com/pierrec/lz4/internal/xxh32 # github.com/pkg/errors v0.9.1 ## explicit github.com/pkg/errors @@ -638,7 +542,7 @@ github.com/sean-/seed # github.com/seccomp/libseccomp-golang v0.10.0 ## explicit; go 1.14 github.com/seccomp/libseccomp-golang -# github.com/shirou/gopsutil/v3 v3.23.1 +# github.com/shirou/gopsutil/v3 v3.23.4 ## explicit; go 1.15 github.com/shirou/gopsutil/v3/cpu github.com/shirou/gopsutil/v3/disk @@ -647,12 +551,16 @@ github.com/shirou/gopsutil/v3/internal/common github.com/shirou/gopsutil/v3/mem github.com/shirou/gopsutil/v3/net github.com/shirou/gopsutil/v3/process -# github.com/shoenig/test v0.6.1 +# github.com/shoenig/go-m1cpu v0.1.5 +## explicit; go 1.20 +github.com/shoenig/go-m1cpu +# github.com/shoenig/test v0.6.3 ## explicit; go 1.18 github.com/shoenig/test/interfaces github.com/shoenig/test/internal/assertions github.com/shoenig/test/internal/brokenfs github.com/shoenig/test/internal/constraints +github.com/shoenig/test/internal/util github.com/shoenig/test/must github.com/shoenig/test/portal github.com/shoenig/test/wait @@ -707,30 +615,15 @@ github.com/zclconf/go-cty/cty/gocty github.com/zclconf/go-cty/cty/json github.com/zclconf/go-cty/cty/msgpack github.com/zclconf/go-cty/cty/set -# go.etcd.io/bbolt v1.3.7 -## explicit; go 1.17 -go.etcd.io/bbolt -# go.opencensus.io v0.24.0 -## explicit; go 1.13 -go.opencensus.io -go.opencensus.io/internal -go.opencensus.io/trace -go.opencensus.io/trace/internal -go.opencensus.io/trace/tracestate -# go.uber.org/atomic v1.9.0 -## explicit; go 1.13 -go.uber.org/atomic # golang.org/x/crypto v0.21.0 ## explicit; go 1.18 golang.org/x/crypto/bcrypt golang.org/x/crypto/blake2b golang.org/x/crypto/blowfish -golang.org/x/crypto/cryptobyte -golang.org/x/crypto/cryptobyte/asn1 golang.org/x/crypto/ed25519 golang.org/x/crypto/pbkdf2 golang.org/x/crypto/scrypt -# golang.org/x/exp v0.0.0-20220921164117-439092de6870 +# golang.org/x/exp v0.0.0-20230108222341-4b8118a2686a ## explicit; go 1.18 golang.org/x/exp/constraints golang.org/x/exp/maps @@ -788,8 +681,6 @@ golang.org/x/tools/internal/pkgbits golang.org/x/tools/internal/tokeninternal golang.org/x/tools/internal/typeparams golang.org/x/tools/internal/typesinternal -# google.golang.org/api v0.126.0 -## explicit; go 1.19 # google.golang.org/appengine v1.6.7 ## explicit; go 1.11 google.golang.org/appengine @@ -862,7 +753,6 @@ google.golang.org/grpc/serviceconfig google.golang.org/grpc/stats google.golang.org/grpc/status google.golang.org/grpc/tap -google.golang.org/grpc/test/bufconn # google.golang.org/protobuf v1.31.0 ## explicit; go 1.11 google.golang.org/protobuf/encoding/protojson @@ -914,6 +804,3 @@ gopkg.in/tomb.v1 # gopkg.in/yaml.v3 v3.0.1 ## explicit gopkg.in/yaml.v3 -# oss.indeed.com/go/libtime v1.6.0 -## explicit; go 1.12 -oss.indeed.com/go/libtime diff --git a/vendor/oss.indeed.com/go/libtime/.gitignore b/vendor/oss.indeed.com/go/libtime/.gitignore deleted file mode 100644 index c4851fbc..00000000 --- a/vendor/oss.indeed.com/go/libtime/.gitignore +++ /dev/null @@ -1,4 +0,0 @@ -*~ -cover.out -.idea - diff --git a/vendor/oss.indeed.com/go/libtime/.travis.yml b/vendor/oss.indeed.com/go/libtime/.travis.yml deleted file mode 100644 index 636b98c1..00000000 --- a/vendor/oss.indeed.com/go/libtime/.travis.yml +++ /dev/null @@ -1,13 +0,0 @@ -language: go - -go: - - 1.12.x - -install: - - echo "no install step" - -script: - - go generate ./... - - go build ./... - - go vet ./... - - go test -race ./... diff --git a/vendor/oss.indeed.com/go/libtime/CODE_OF_CONDUCT.md b/vendor/oss.indeed.com/go/libtime/CODE_OF_CONDUCT.md deleted file mode 100644 index f635ebef..00000000 --- a/vendor/oss.indeed.com/go/libtime/CODE_OF_CONDUCT.md +++ /dev/null @@ -1,76 +0,0 @@ -# Contributor Covenant Code of Conduct - -## Our Pledge - -In the interest of fostering an open and welcoming environment, we as -contributors and maintainers pledge to making participation in our project and -our community a harassment-free experience for everyone, regardless of age, body -size, disability, ethnicity, sex characteristics, gender identity and expression, -level of experience, education, socio-economic status, nationality, personal -appearance, race, religion, or sexual identity and orientation. - -## Our Standards - -Examples of behavior that contributes to creating a positive environment -include: - -* Using welcoming and inclusive language -* Being respectful of differing viewpoints and experiences -* Gracefully accepting constructive criticism -* Focusing on what is best for the community -* Showing empathy towards other community members - -Examples of unacceptable behavior by participants include: - -* The use of sexualized language or imagery and unwelcome sexual attention or - advances -* Trolling, insulting/derogatory comments, and personal or political attacks -* Public or private harassment -* Publishing others' private information, such as a physical or electronic - address, without explicit permission -* Other conduct which could reasonably be considered inappropriate in a - professional setting - -## Our Responsibilities - -Project maintainers are responsible for clarifying the standards of acceptable -behavior and are expected to take appropriate and fair corrective action in -response to any instances of unacceptable behavior. - -Project maintainers have the right and responsibility to remove, edit, or -reject comments, commits, code, wiki edits, issues, and other contributions -that are not aligned to this Code of Conduct, or to ban temporarily or -permanently any contributor for other behaviors that they deem inappropriate, -threatening, offensive, or harmful. - -## Scope - -This Code of Conduct applies within all project spaces, and it also applies when -an individual is representing the project or its community in public spaces. -Examples of representing a project or community include using an official -project e-mail address, posting via an official social media account, or acting -as an appointed representative at an online or offline event. Representation of -a project may be further defined and clarified by project maintainers. - -## Enforcement - -Instances of abusive, harassing, or otherwise unacceptable behavior may be -reported by contacting the project team at [opensource@indeed.com](mailto:opensource@indeed.com). All -complaints will be reviewed and investigated and will result in a response that -is deemed necessary and appropriate to the circumstances. The project team is -obligated to maintain confidentiality with regard to the reporter of an incident. -Further details of specific enforcement policies may be posted separately. - -Project maintainers who do not follow or enforce the Code of Conduct in good -faith may face temporary or permanent repercussions as determined by other -members of the project's leadership. - -## Attribution - -This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, -available at https://www.contributor-covenant.org/version/1/4/code-of-conduct.html - -[homepage]: https://www.contributor-covenant.org - -For answers to common questions about this code of conduct, see -https://www.contributor-covenant.org/faq diff --git a/vendor/oss.indeed.com/go/libtime/LICENSE b/vendor/oss.indeed.com/go/libtime/LICENSE deleted file mode 100644 index e0326865..00000000 --- a/vendor/oss.indeed.com/go/libtime/LICENSE +++ /dev/null @@ -1,11 +0,0 @@ -Copyright (c) 2019 The Indeed Engineering Authors. All rights reserved. - -Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: - -1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. - -2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. - -3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. \ No newline at end of file diff --git a/vendor/oss.indeed.com/go/libtime/OSSMETADATA b/vendor/oss.indeed.com/go/libtime/OSSMETADATA deleted file mode 100644 index b96d4a4d..00000000 --- a/vendor/oss.indeed.com/go/libtime/OSSMETADATA +++ /dev/null @@ -1 +0,0 @@ -osslifecycle=active diff --git a/vendor/oss.indeed.com/go/libtime/README.md b/vendor/oss.indeed.com/go/libtime/README.md deleted file mode 100644 index 25f918f8..00000000 --- a/vendor/oss.indeed.com/go/libtime/README.md +++ /dev/null @@ -1,69 +0,0 @@ -libtime -======= - -[![Go Report Card](https://goreportcard.com/badge/oss.indeed.com/go/libtime)](https://goreportcard.com/report/oss.indeed.com/go/libtime) -[![Build Status](https://travis-ci.com/indeedeng/libtime.svg?branch=master)](https://travis-ci.com/indeedeng/libtime) -[![GoDoc](https://godoc.org/oss.indeed.com/go/libtime?status.svg)](https://godoc.org/oss.indeed.com/go/libtime) -[![NetflixOSS Lifecycle](https://img.shields.io/osslifecycle/indeedeng/libtime.svg)](OSSMETADATA) -[![GitHub](https://img.shields.io/github/license/indeedeng/libtime.svg)](LICENSE) - -# Project Overview - -Package `libtime` is a library which provides common time operations. - -# Getting Started - -The `oss.indeed.com/go/libtime` module can be installed by running: -``` -$ go get oss.indeed.com/go/libtime -``` - -Example usage: -``` -c := libtime.SystemClock() - -func foo(c libtime.Clock) { - now := c.Now() -} -``` - -# Asking Questions - -For technical questions about `libtime`, just file an issue in the GitHub tracker. - -For questions about Open Source in Indeed Engineering, send us an email at -opensource@indeed.com - -# Contributing - -We welcome contributions! Feel free to help make `libtime` better. - -### Process - -- Open an issue and describe the desired feature / bug fix before making -changes. It's useful to get a second pair of eyes before investing development -effort. -- Make the change. If adding a new feature, remember to provide tests that -demonstrate the new feature works, including any error paths. If contributing -a bug fix, add tests that demonstrate the erroneous behavior is fixed. -- Open a pull request. Automated CI tests will run. If the tests fail, please -make changes to fix the behavior, and repeat until the tests pass. -- Once everything looks good, one of the indeedeng members will review the -PR and provide feedback. - -# Maintainers - -The `oss.indeed.com/go/libtime` module is maintained by Indeed Engineering. - -While we are always busy helping people get jobs, we will try to respond to -GitHub issues, pull requests, and questions within a couple of business days. - -# Code of Conduct - -`oss.indeed.com/go/libtime` is governed by the [Contributer Covenant v1.4.1](CODE_OF_CONDUCT.md) - -For more information please contact opensource@indeed.com. - -# License - -The `oss.indeed.com/go/libtime` module is open source under the [BSD-3-Clause](LICENSE) license. diff --git a/vendor/oss.indeed.com/go/libtime/after.go b/vendor/oss.indeed.com/go/libtime/after.go deleted file mode 100644 index a8391fa9..00000000 --- a/vendor/oss.indeed.com/go/libtime/after.go +++ /dev/null @@ -1,28 +0,0 @@ -package libtime - -import ( - "context" - "time" -) - -// After is a safer, but more expensive alternative to time.After. -// -// After waits for the duration to elapse and then sends the current time -// on the returned channel. If the context gets canceled before the duration -// elapses, no message is sent on the returned channel. -// -// The returned channel is never closed. -func After(ctx context.Context, duration time.Duration) <-chan time.Time { - durationElapsedCh := make(chan time.Time, 1) - go func() { - t := time.NewTimer(duration) - defer t.Stop() - - select { - case tick := <-t.C: - durationElapsedCh <- tick - case <-ctx.Done(): - } - }() - return durationElapsedCh -} diff --git a/vendor/oss.indeed.com/go/libtime/clock.go b/vendor/oss.indeed.com/go/libtime/clock.go deleted file mode 100644 index cbfd5e7e..00000000 --- a/vendor/oss.indeed.com/go/libtime/clock.go +++ /dev/null @@ -1,48 +0,0 @@ -package libtime // import "oss.indeed.com/go/libtime" - -import "time" - -//go:generate go run github.com/gojuno/minimock/v3/cmd/minimock -g -i Clock -o libtimetest/ -s _mock.go - -// Clock provides a mock-able interface of some of the standard library time -// package functions, like time.Now and time.Since. A default implementation is -// provided by SystemClock which defers to the time package functions. -type Clock interface { - // Now is the time on this Clock. - Now() time.Time - - // Since returns the size of the interval between the time.Time of Clock.Now - // and the provided time.Time. - Since(time.Time) time.Duration - - // SinceMS returns the same thing as Since, but converted to milliseconds. - SinceMS(time.Time) int -} - -// SystemClock creates a Clock that is implemented by deferring to the -// standard library time package. -func SystemClock() Clock { - return (*clock)(nil) -} - -type clock struct { - // nothing to see here -} - -var _ Clock = (*clock)(nil) - -func (*clock) Now() time.Time { - return time.Now() -} - -func (c *clock) Since(t time.Time) time.Duration { - now := c.Now() - return now.Sub(t) -} - -func (c *clock) SinceMS(t time.Time) int { - dur := c.Since(t) - millis := DurationToMillis(dur) - i := int(millis) - return i -} diff --git a/vendor/oss.indeed.com/go/libtime/repo.cfg b/vendor/oss.indeed.com/go/libtime/repo.cfg deleted file mode 100644 index 95481ae9..00000000 --- a/vendor/oss.indeed.com/go/libtime/repo.cfg +++ /dev/null @@ -1 +0,0 @@ -project.visibility=internal diff --git a/vendor/oss.indeed.com/go/libtime/sleep.go b/vendor/oss.indeed.com/go/libtime/sleep.go deleted file mode 100644 index 286f044e..00000000 --- a/vendor/oss.indeed.com/go/libtime/sleep.go +++ /dev/null @@ -1,28 +0,0 @@ -package libtime - -import ( - "time" -) - -//go:generate go run github.com/gojuno/minimock/v3/cmd/minimock -g -i Sleeper -o libtimetest/ -s _mock.go - -// A Sleeper is a useful way for calling time.Sleep -// in a mockable way for tests. -type Sleeper interface { - // Sleep for the specified amount of time. - Sleep(time.Duration) -} - -type sleeper struct{} - -var _ Sleeper = (*sleeper)(nil) - -// NewSleeper creates a Sleeper that will actually call -// time.Sleep under the hood, causing the program to sleep. -func NewSleeper() Sleeper { - return (*sleeper)(nil) // lolz -} - -func (s *sleeper) Sleep(duration time.Duration) { - time.Sleep(duration) -} diff --git a/vendor/oss.indeed.com/go/libtime/timeformat.go b/vendor/oss.indeed.com/go/libtime/timeformat.go deleted file mode 100644 index 7fefbb47..00000000 --- a/vendor/oss.indeed.com/go/libtime/timeformat.go +++ /dev/null @@ -1,11 +0,0 @@ -package libtime - -// Many common formats can be found in go's time package, such as ANSIC and RFC, -// so check before adding any format to this file. -// -// Additional formats are described here for convenience. -const ( - // ISO8601Micro is the default joda datetime print format, which is useful - // when interacting with services written in Java. - ISO8601Micro = `2006-01-02T15:04:05.000-07:00` -) diff --git a/vendor/oss.indeed.com/go/libtime/timer.go b/vendor/oss.indeed.com/go/libtime/timer.go deleted file mode 100644 index 1e3f097e..00000000 --- a/vendor/oss.indeed.com/go/libtime/timer.go +++ /dev/null @@ -1,36 +0,0 @@ -package libtime - -import ( - "time" -) - -// StopTimerFunc is used to stop a time.Timer. -// -// Calling StopTimerFunc prevents its time.Timer from firing. Returns true if the call -// stops the timer, false if the timer has already expired. or has been stopped. -// -// https://pkg.go.dev/time#Timer.Stop -type StopTimerFunc func() bool - -// SafeTimer creates a time.Timer and a StopTimerFunc, forcing the caller to deal -// with the otherwise potential resource leak. Encourages safe use of a time.Timer -// in a select statement, but without the overhead of a context.Context. -// -// Typical usage: -// -// t, stop := libtime.SafeTimer(interval) -// defer stop() -// for { -// select { -// case <- t.C: -// foo() -// case <- otherC : -// return -// } -// } -// -// Does not panic if duration is <= 0, instead assuming the smallest positive value. -func SafeTimer(duration time.Duration) (*time.Timer, StopTimerFunc) { - t := time.NewTimer(duration) - return t, t.Stop -} diff --git a/vendor/oss.indeed.com/go/libtime/tools.go b/vendor/oss.indeed.com/go/libtime/tools.go deleted file mode 100644 index c2ceb136..00000000 --- a/vendor/oss.indeed.com/go/libtime/tools.go +++ /dev/null @@ -1,7 +0,0 @@ -//+build tools - -package libtime - -import ( - _ "github.com/gojuno/minimock/v3" -) diff --git a/vendor/oss.indeed.com/go/libtime/unixtime.go b/vendor/oss.indeed.com/go/libtime/unixtime.go deleted file mode 100644 index 43cf8f2c..00000000 --- a/vendor/oss.indeed.com/go/libtime/unixtime.go +++ /dev/null @@ -1,28 +0,0 @@ -package libtime - -import ( - "time" -) - -const nanosToMillisDenominator = int64(time.Millisecond / time.Nanosecond) -const millisToSecsDenominator = int64(time.Second / time.Millisecond) - -// ToMilliseconds returns time in milliseconds since epoch -func ToMilliseconds(t time.Time) int64 { - return t.UnixNano() / nanosToMillisDenominator -} - -// FromMilliseconds converts time in milliseconds since epoch to time.Time -func FromMilliseconds(ms int64) time.Time { - seconds := ms / millisToSecsDenominator - - milliseconds := ms % millisToSecsDenominator - nanoseconds := milliseconds * nanosToMillisDenominator - - return time.Unix(seconds, nanoseconds) -} - -// DurationToMillis returns d in terms of milliseconds. -func DurationToMillis(d time.Duration) int64 { - return d.Nanoseconds() / nanosToMillisDenominator -}