diff --git a/.docker/Dockerfile-build b/.docker/Dockerfile-build
index d46370c440f7..662f32693340 100644
--- a/.docker/Dockerfile-build
+++ b/.docker/Dockerfile-build
@@ -15,6 +15,7 @@ COPY pkg/client-go/go.* pkg/client-go/
ENV CGO_ENABLED 1
ENV CGO_CPPFLAGS -DSQLITE_DEFAULT_FILE_PERMISSIONS=0600
+ENV GOPROXY=https://proxy.golang.org,direct
RUN go mod download
diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS
index ef90d000d7f4..9a61a55251d1 100644
--- a/.github/CODEOWNERS
+++ b/.github/CODEOWNERS
@@ -1 +1 @@
-* @aeneasr @ory/product-development
+* @SPHTech/identity-platform-team
diff --git a/.github/workflows/ci.yaml b/.github/_workflows/ci.yaml
similarity index 100%
rename from .github/workflows/ci.yaml
rename to .github/_workflows/ci.yaml
diff --git a/.github/workflows/closed_references.yml b/.github/_workflows/closed_references.yml
similarity index 100%
rename from .github/workflows/closed_references.yml
rename to .github/_workflows/closed_references.yml
diff --git a/.github/workflows/codeql-analysis.yml b/.github/_workflows/codeql-analysis.yml
similarity index 100%
rename from .github/workflows/codeql-analysis.yml
rename to .github/_workflows/codeql-analysis.yml
diff --git a/.github/workflows/conventional_commits.yml b/.github/_workflows/conventional_commits.yml
similarity index 100%
rename from .github/workflows/conventional_commits.yml
rename to .github/_workflows/conventional_commits.yml
diff --git a/.github/workflows/cve-scan.yaml b/.github/_workflows/cve-scan.yaml
similarity index 100%
rename from .github/workflows/cve-scan.yaml
rename to .github/_workflows/cve-scan.yaml
diff --git a/.github/workflows/format.yml b/.github/_workflows/format.yml
similarity index 100%
rename from .github/workflows/format.yml
rename to .github/_workflows/format.yml
diff --git a/.github/workflows/labels.yml b/.github/_workflows/labels.yml
similarity index 100%
rename from .github/workflows/labels.yml
rename to .github/_workflows/labels.yml
diff --git a/.github/_workflows/licenses.yml b/.github/_workflows/licenses.yml
new file mode 100644
index 000000000000..4d9965010970
--- /dev/null
+++ b/.github/_workflows/licenses.yml
@@ -0,0 +1,35 @@
+# AUTO-GENERATED, DO NOT EDIT!
+# Please edit the original at https://github.com/ory/meta/blob/master/templates/repository/common/.github/workflows/licenses.yml
+
+name: Licenses
+
+on:
+ pull_request:
+ push:
+ branches:
+ - main
+ - v3
+ - master
+
+jobs:
+ licenses:
+ name: License compliance
+ runs-on: ubuntu-latest
+ steps:
+ - name: Install script
+ uses: ory/ci/licenses/setup@master
+ with:
+ token: ${{ secrets.ORY_BOT_PAT || secrets.GITHUB_TOKEN }}
+ - name: Check licenses
+ uses: ory/ci/licenses/check@master
+ - name: Write, commit, push licenses
+ uses: ory/ci/licenses/write@master
+ if:
+ ${{ github.ref == 'refs/heads/main' || github.ref ==
+ 'refs/heads/master' || github.ref == 'refs/heads/v3' }}
+ with:
+ author-email:
+ ${{ secrets.ORY_BOT_PAT &&
+ '60093411+ory-bot@users.noreply.github.com' ||
+ format('{0}@users.noreply.github.com', github.actor) }}
+ author-name: ${{ secrets.ORY_BOT_PAT && 'ory-bot' || github.actor }}
diff --git a/.github/workflows/milestone.yml b/.github/_workflows/milestone.yml
similarity index 100%
rename from .github/workflows/milestone.yml
rename to .github/_workflows/milestone.yml
diff --git a/.github/workflows/pm.yml b/.github/_workflows/pm.yml
similarity index 100%
rename from .github/workflows/pm.yml
rename to .github/_workflows/pm.yml
diff --git a/.github/workflows/stale.yml b/.github/_workflows/stale.yml
similarity index 100%
rename from .github/workflows/stale.yml
rename to .github/_workflows/stale.yml
diff --git a/.github/pull_request_template.md b/.github/pull_request_template.md
index 32c141fd6f3f..160d5b248445 100644
--- a/.github/pull_request_template.md
+++ b/.github/pull_request_template.md
@@ -1,60 +1,31 @@
-
-
-## Related issue(s)
-
-
-
-## Checklist
-
-
-
-- [ ] I have read the [contributing guidelines](../blob/master/CONTRIBUTING.md).
-- [ ] I have referenced an issue containing the design document if my change
- introduces a new feature.
-- [ ] I am following the
- [contributing code guidelines](../blob/master/CONTRIBUTING.md#contributing-code).
-- [ ] I have read the [security policy](../security/policy).
-- [ ] I confirm that this pull request does not address a security
- vulnerability. If this pull request addresses a security vulnerability, I
- confirm that I got the approval (please contact
- [security@ory.com](mailto:security@ory.com)) from the maintainers to push
- the changes.
-- [ ] I have added tests that prove my fix is effective or that my feature
- works.
-- [ ] I have added or changed [the documentation](https://github.com/ory/docs).
-
-## Further Comments
-
-
+## Contents
+
+1. [Summary](#summary)
+2. [Links](#links)
+3. [Test Plan](#test-plan)
+
+## Summary
+
+// What changes are made / introduced in this PR?
+
+### Problem
+
+// Give a TL;DR, one sentence summary on the feature, issue, bug to resolve.
+
+### Solution
+
+// Give detailed explanation on the solution. What is the fix? Why do we need to do it this way? How does it resolve the error?
+
+## Links
+
+// JIRA card link, any published API docs, confluence etc.
+
+## Test Plan
+
+// How did you test your code?
+// If you can't test this locally, describe how you will test it after the PR is merged
+// Add details about how you setup your test (if any), and screenshots of the test results whenever possible
+
+### Test Method
+
+### Test Results
diff --git a/.github/workflows/build-push.yml b/.github/workflows/build-push.yml
new file mode 100644
index 000000000000..1ed955002d04
--- /dev/null
+++ b/.github/workflows/build-push.yml
@@ -0,0 +1,81 @@
+name: Build and Push Image to ECR
+on:
+ workflow_dispatch:
+ inputs:
+ commit_sha:
+ description: "Commit SHA to build"
+ required: true
+ environment:
+ description: "Environment to build"
+ required: true
+
+permissions:
+ id-token: write
+
+jobs:
+ prepare:
+ runs-on: ubuntu-latest
+ outputs:
+ short_sha: ${{ steps.get-short-sha.outputs.short_sha }}
+ steps:
+ - name: Compute short SHA
+ id: get-short-sha
+ env:
+ FULL_SHA: ${{ github.event.inputs.commit_sha || 'master' }}
+ run: |
+ echo "short_sha=${FULL_SHA::7}" >> "$GITHUB_OUTPUT"
+
+ build:
+ runs-on: ubuntu-latest
+ needs: prepare
+ environment: ${{ github.event.inputs.environment || 'dev'}}
+ steps:
+ - name: Checkout repository
+ uses: actions/checkout@v4
+ with:
+ ref: ${{ github.event.inputs.commit_sha || 'master' }}
+ path: .
+
+ - name: Setup Go environment
+ uses: actions/setup-go@v5
+ with:
+ go-version-file: "./go.mod"
+
+ - run: go version
+
+ - name: Configure AWS credentials
+ uses: aws-actions/configure-aws-credentials@v4
+ with:
+ role-to-assume: ${{ vars.AWS_IAM_ROLE }}
+ role-skip-session-tagging: true
+ role-session-name: gh-actions
+ aws-region: ${{ vars.AWS_REGION }}
+
+ - name: Login to Amazon ECR
+ id: login-ecr
+ uses: aws-actions/amazon-ecr-login@v2
+
+ - name: Set up QEMU
+ uses: docker/setup-qemu-action@v3
+
+ - name: Setup docker buildx
+ uses: docker/setup-buildx-action@v3
+ with:
+ version: latest
+
+ - name: Build kratos binary
+ run: |
+ GOOS=linux GOARCH=amd64 CGO_ENABLED=0 go build -o kratos .
+
+ - name: Get Image name:tag
+ id: get-image-name
+ run: |
+ echo "image_name=${{ steps.login-ecr.outputs.registry }}/idpf/kratos:${{ needs.prepare.outputs.short_sha }}" >> "$GITHUB_OUTPUT"
+
+ - name: Build Docker Image
+ run: |
+ docker build -f .docker/Dockerfile-alpine -t ${{ steps.get-image-name.outputs.image_name }} .
+
+ - name: Push Docker Image
+ run: |
+ docker push ${{ steps.get-image-name.outputs.image_name }}
diff --git a/README.md b/README.md
index 837e3c6f4c07..799f60298e67 100644
--- a/README.md
+++ b/README.md
@@ -1,517 +1,35 @@
-
-
-
+## Kratos (Fork) – Internal README
-
+This is our maintained fork of Ory Kratos. We keep a tight control over what we ship and when we ship it.
-Ory Kratos is an API first identity and user management system for cloud native
-applications. It centralizes login, registration, recovery, verification, and
-profile management flows so your services consume them instead of reimplementing
-them.
+### Goals
-
-
+- Manage our own Docker image builds
+- Selectively include upstream changes
+- Add fork-specific changes when needed
-**Table of contents**
+### Upstream
-- [What is Ory Kratos?](#what-is-ory-kratos)
- - [Why Ory Kratos](#why-ory-kratos)
-- [Migrating from Auth0, Okta, and similar providers](#migrating-from-auth0-okta-and-similar-providers)
-- [Deployment options](#deployment-options)
- - [Use Ory Kratos on the Ory Network](#use-ory-kratos-on-the-ory-network)
- - [Self-host Ory Kratos](#self-host-ory-kratos)
-- [Quickstart](#quickstart)
- - [Who is using it?](#who-is-using-it)
+- Source: `https://github.com/ory/kratos`
+- Sync policy: Periodic manual sync; we may cherry-pick or defer changes
-
+### Docker images
-## What is Ory Kratos?
-
-Ory Kratos is an API first identity and user management system that follows
-[cloud architecture best practices](https://www.ory.com/docs/ecosystem/software-architecture-philosophy).
-It focuses on core identity workflows that almost every application needs:
-
-- Self service login and registration
-- Account verification and recovery
-- Multi factor authentication
-- Profile and account management
-- Identity schemas and traits
-- Admin APIs for lifecycle management
-
-We recommend starting with the
-[Ory Kratos introduction docs](https://www.ory.com/kratos/docs/) to learn more
-about its architecture, feature set, and how it compares to other systems.
-
-### Why Ory Kratos
-
-Ory Kratos is designed to:
-
-- Remove identity logic from your application code and expose it over HTTP APIs
-- Work well with any UI framework through browser based and native app flows
-- Scale to large numbers of identities and devices
-- Integrate with the rest of the Ory stack for OAuth2, OpenID Connect, and
- access control
-- Fit into modern cloud native environments such as Kubernetes and managed
- platforms
-
-## Migrating from Auth0, Okta, and similar providers
-
-If you are migrating from Auth0, Okta, or another identity provider that uses
-OAuth2 / OpenID Connect based login, consider using **Ory Hydra + Ory Kratos**
-together:
-
-- **Ory Hydra** acts as the OAuth2 and OpenID Connect provider and can replace
- most authorization server and token issuing capabilities of your existing IdP.
-- **Ory Kratos** provides identity, credentials, and user-facing flows (login,
- registration, recovery, verification, profile management).
-
-This combination is often a drop-in replacement for OAuth2 and OpenID Connect
-capabilities at the protocol level. In practice, you update client configuration
-and endpoints to point to Hydra, migrate identities into Kratos, and keep your
-applications speaking the same OAuth2 / OIDC protocols they already use.
-
-## Deployment options
-
-You can run Ory Kratos in two main ways:
-
-- As a managed service on the Ory Network
-- As a self hosted service under your own control, with or without the Ory
- Enterprise License
-
-### Use Ory Kratos on the Ory Network
-
-The [Ory Network](https://www.ory.com/cloud) is the fastest way to use Ory
-services in production. **Ory Identities** is powered by the open source Ory
-Kratos server and is API compatible.
-
-The Ory Network provides:
-
-- Identity and credential management that scales to billions of users and
- devices
-- Registration, login, and account management flows for passkeys, biometrics,
- social login, SSO, and multi factor authentication
-- Prebuilt login, registration, and account management pages and components
-- OAuth2 and OpenID Connect for single sign on, API access, and machine to
- machine authorization
-- Low latency permission checks based on the Zanzibar model with the Ory
- Permission Language
-- GDPR friendly storage with data locality and compliance in mind
-- Web based Ory Console and Ory CLI for administration and operations
-- Cloud native APIs compatible with the open source servers
-- Fair, usage based [pricing](https://www.ory.com/pricing)
-
-Sign up for a
-[free developer account](https://console.ory.sh/registration?utm_source=github&utm_medium=banner&utm_campaign=kratos-readme)
-to get started.
-
-### Self-host Ory Kratos
-
-You can run Ory Kratos yourself for full control over infrastructure,
-deployment, and customization.
-
-The [install guide](https://www.ory.com/kratos/docs/install) explains how to:
-
-- Install Kratos on Linux, macOS, Windows, and Docker
-- Configure databases such as PostgreSQL, MySQL, and CockroachDB
-- Deploy to Kubernetes and other orchestration systems
-- Build Kratos from source
-
-This guide uses the open source distribution to get you started without license
-requirements. It is a great fit for individuals, researchers, hackers, and
-companies that want to experiment, prototype, or run unimportant workloads
-without SLAs. You get the full core engine, and you are free to inspect, extend,
-and build it from source.
-
-If you run Kratos as part of a business-critical system, for example login and
-account recovery for all your users, you should use a commercial agreement to
-reduce operational and security risk. The **Ory Enterprise License (OEL)**
-layers on top of self-hosted Kratos and provides:
-
-- Additional enterprise features that are not available in the open source
- version such as SCIM, SAML, organization login ("SSO"), CAPTCHAs and more
-- Regular security releases, including CVE patches, with service level
- agreements
-- Support for advanced scaling, multi-tenancy, and complex deployments
-- Premium support options with SLAs, direct access to engineers, and onboarding
- help
-- Access to a private Docker registry with frequent and vetted, up-to-date
- enterprise builds
-
-For guaranteed CVE fixes, current enterprise builds, advanced features, and
-support in production, you need a valid
-[Ory Enterprise License](https://www.ory.com/ory-enterprise-license) and access
-to the Ory Enterprise Docker registry. To learn more,
-[contact the Ory team](https://www.ory.com/contact/).
-
-## Quickstart
-
-Install the [Ory CLI](https://www.ory.com/docs/guides/cli/installation) and
-create a new project to try Ory Identities.
+- CI builds/pushes are handled by `.github/workflows/build-push.yml`.
+- Local build/push:
```bash
-# Install the Ory CLI if you do not have it yet:
-bash <(curl https://raw.githubusercontent.com/ory/meta/master/install.sh) -b . ory
-sudo mv ./ory /usr/local/bin/
-
-# Sign in or sign up
-ory auth
-
-# Create a new project
-ory create project --create-workspace "Ory Open Source" --name "GitHub Quickstart" --use-project
-ory open ax login
+make docker
+# or
+docker build -f .docker/Dockerfile-alpine -t /kratos: .
+docker push /kratos:
```
-### Who is using it?
-
-
-
-The Ory community stands on the shoulders of individuals, companies, and
-maintainers. The Ory team thanks everyone involved - from submitting bug reports
-and feature requests, to contributing patches and documentation. The Ory
-community counts more than 50.000 members and is growing. The Ory stack protects
-7.000.000.000+ API requests every day across thousands of companies. None of
-this would have been possible without each and everyone of you!
-
-The following list represents companies that have accompanied us along the way
-and that have made outstanding contributions to our ecosystem. _If you think
-that your company deserves a spot here, reach out to
-office@ory.com now_!
-
-
+### Documentation
-Many thanks to all individual contributors
+- Upstream product docs and API: see `https://www.ory.sh/kratos/docs/`.
+- We archived the original upstream README at `docs/UPSTREAM-README.md`.
-
+### Contributing
-
+- Open fork-specific PRs here. When appropriate, also contribute upstream.
diff --git a/docs/UPSTREAM-README.md b/docs/UPSTREAM-README.md
new file mode 100644
index 000000000000..8820a4495104
--- /dev/null
+++ b/docs/UPSTREAM-README.md
@@ -0,0 +1,773 @@
+Ory Kratos is _the_ developer-friendly, security-hardened and battle-tested
+Identity, User Management and Authentication system for the Cloud. Finally, it
+is no longer necessary to implement User Login for the umpteenth time!
+
+## Ory Kratos on the Ory Network
+
+The [Ory Network](https://www.ory.sh/cloud) is the fastest, most secure and
+worry-free way to use Ory's Services. **Ory Identities** is powered by the Ory
+Kratos open source identity server, and it's fully API-compatible.
+
+The Ory Network provides the infrastructure for modern end-to-end security:
+
+- **Identity & credential management scaling to billions of users and devices**
+- **Registration, Login and Account management flows for passkey, biometric,
+ social, SSO and multi-factor authentication**
+- **Pre-built login, registration and account management pages and components**
+- OAuth2 and OpenID provider for single sign on, API access and
+ machine-to-machine authorization
+- Low-latency permission checks based on Google's Zanzibar model and with
+ built-in support for the Ory Permission Language
+
+It's fully managed, highly available, developer & compliance-friendly!
+
+- GDPR-friendly secure storage with data locality
+- Cloud-native APIs, compatible with Ory's Open Source servers
+- Comprehensive admin tools with the web-based Ory Console and the Ory Command
+ Line Interface (CLI)
+- Extensive documentation, straightforward examples and easy-to-follow guides
+- Fair, usage-based [pricing](https://www.ory.sh/pricing)
+
+Sign up for a
+[**free developer account**](https://console.ory.sh/registration?utm_source=github&utm_medium=banner&utm_campaign=kratos-readme)
+today!
+
+## Ory Kratos On-premise support
+
+Are you running Ory Kratos in a mission-critical, commercial environment? The
+Ory Enterprise License (OEL) provides enhanced features, security, and expert
+support directly from the Ory core maintainers.
+
+Organizations that require advanced features, enhanced security, and
+enterprise-grade support for Ory's identity and access management solutions
+benefit from the Ory Enterprise License (OEL) as a self-hosted, premium offering
+including:
+
+- Additional features not available in the open-source version.
+- Regular releases that address CVEs and security vulnerabilities, with strict
+ SLAs for patching based on severity.
+- Support for advanced scaling and multi-tenancy features.
+- Premium support options, including SLAs, direct engineer access, and concierge
+ onboarding.
+- Access to private Docker registry for a faster, more reliable access to vetted
+ enterprise builds.
+
+A valid Ory Enterprise License and access to the Ory Enterprise Docker Registry
+are required to use these features. OEL is designed for mission-critical,
+production, and global applications where organizations need maximum control and
+flexibility over their identity infrastructure. Ory's offering is the only
+official program for qualified support from the maintainers. For more
+information book a meeting with the Ory team to
+**[discuss your needs](https://www.ory.sh/contact/)**!
+
+### Quickstart
+
+Install the [Ory CLI](https://www.ory.sh/docs/guides/cli/installation) and
+create a new project to get started with Ory Identities right away:
+
+```
+# If you don't have Ory CLI installed yet:
+bash <(curl https://raw.githubusercontent.com/ory/meta/master/install.sh) -b . ory
+sudo mv ./ory /usr/local/bin/
+
+# Sign up
+ory auth
+
+# Create project
+ory create project
+```
+
+
+
+
+**Table of Contents**
+
+- [Ory Kratos on the Ory Network](#ory-kratos-on-the-ory-network)
+ - [Quickstart](#quickstart)
+- [What is Ory Kratos?](#what-is-ory-kratos)
+ - [Who is using it?](#who-is-using-it)
+- [Getting Started](#getting-started)
+ - [Installation](#installation)
+- [Ecosystem](#ecosystem)
+ - [Ory Kratos: Identity and User Infrastructure and Management](#ory-kratos-identity-and-user-infrastructure-and-management)
+ - [Ory Hydra: OAuth2 & OpenID Connect Server](#ory-hydra-oauth2--openid-connect-server)
+ - [Ory Oathkeeper: Identity & Access Proxy](#ory-oathkeeper-identity--access-proxy)
+ - [Ory Keto: Access Control Policies as a Server](#ory-keto-access-control-policies-as-a-server)
+- [Security](#security)
+ - [Disclosing vulnerabilities](#disclosing-vulnerabilities)
+- [Telemetry](#telemetry)
+- [Documentation](#documentation)
+ - [Guide](#guide)
+ - [HTTP API documentation](#http-api-documentation)
+ - [Upgrading and Changelog](#upgrading-and-changelog)
+ - [Command line documentation](#command-line-documentation)
+ - [Develop](#develop)
+ - [Dependencies](#dependencies)
+ - [Install from source](#install-from-source)
+ - [Formatting Code](#formatting-code)
+ - [Running Tests](#running-tests)
+ - [Short Tests](#short-tests)
+ - [Regular Tests](#regular-tests)
+ - [Updating Test Fixtures](#updating-test-fixtures)
+ - [End-to-End Tests](#end-to-end-tests)
+ - [Build Docker](#build-docker)
+ - [Documentation Tests](#documentation-tests)
+ - [Preview API documentation](#preview-api-documentation)
+
+
+
+## What is Ory Kratos?
+
+Ory Kratos is an API-first Identity and User Management system that is built
+according to
+[cloud architecture best practices](https://www.ory.sh/docs/ecosystem/software-architecture-philosophy).
+It implements core use cases that almost every software application needs to
+deal with:
+
+- **Self-service Login and Registration**: Allow end-users to create and sign
+ into accounts (we call them **identities**) using Username / Email and
+ password combinations, Social Sign In ("Sign in with Google, GitHub"),
+ Passwordless flows, and others.
+- **Multi-Factor Authentication (MFA/2FA)**: Support protocols such as TOTP
+ ([RFC 6238](https://tools.ietf.org/html/rfc6238) and
+ [IETF RFC 4226](https://tools.ietf.org/html/rfc4226) - better known as
+ [Google Authenticator](https://en.wikipedia.org/wiki/Google_Authenticator))
+- **Account Verification**: Verify that an E-Mail address, phone number, or
+ physical address actually belong to that identity.
+- **Account Recovery**: Recover access using "Forgot Password" flows, Security
+ Codes (in case of MFA device loss), and others.
+- **Profile and Account Management**: Update passwords, personal details, email
+ addresses, linked social profiles using secure flows.
+- **Admin APIs**: Import, update, delete identities.
+
+We highly recommend reading the
+[Ory Kratos introduction docs](https://www.ory.sh/kratos/docs/) to learn more
+about Ory Krato's background, feature set, and differentiation from other
+products.
+
+### Who is using it?
+
+
+
+The Ory community stands on the shoulders of individuals, companies, and
+maintainers. The Ory team thanks everyone involved - from submitting bug reports
+and feature requests, to contributing patches and documentation. The Ory
+community counts more than 50.000 members and is growing. The Ory stack protects
+7.000.000.000+ API requests every day across thousands of companies. None of
+this would have been possible without each and everyone of you!
+
+The following list represents companies that have accompanied us along the way
+and that have made outstanding contributions to our ecosystem. _If you think
+that your company deserves a spot here, reach out to
+office@ory.sh now_!
+
+
+
+Many thanks to all individual contributors
+
+
+
+
+
+## Getting Started
+
+To get started with some easy examples, head over to the
+[Get Started Documentation](https://www.ory.sh/docs/guides/protect-page-login/).
+
+### Installation
+
+Head over to the
+[Ory Developer Documentation](https://www.ory.sh/kratos/docs/install) to learn
+how to install Ory Kratos on Linux, macOS, Windows, and Docker and how to build
+Ory Kratos from source.
+
+## Ecosystem
+
+
+
+We build Ory on several guiding principles when it comes to our architecture
+design:
+
+- Minimal dependencies
+- Runs everywhere
+- Scales without effort
+- Minimize room for human and network errors
+
+Ory's architecture is designed to run best on a Container Orchestration system
+such as Kubernetes, CloudFoundry, OpenShift, and similar projects. Binaries are
+small (5-15MB) and available for all popular processor types (ARM, AMD64, i386)
+and operating systems (FreeBSD, Linux, macOS, Windows) without system
+dependencies (Java, Node, Ruby, libxml, ...).
+
+### Ory Kratos: Identity and User Infrastructure and Management
+
+[Ory Kratos](https://github.com/ory/kratos) is an API-first Identity and User
+Management system that is built according to
+[cloud architecture best practices](https://www.ory.sh/docs/next/ecosystem/software-architecture-philosophy).
+It implements core use cases that almost every software application needs to
+deal with: Self-service Login and Registration, Multi-Factor Authentication
+(MFA/2FA), Account Recovery and Verification, Profile, and Account Management.
+
+### Ory Hydra: OAuth2 & OpenID Connect Server
+
+[Ory Hydra](https://github.com/ory/hydra) is an OpenID Certified™ OAuth2 and
+OpenID Connect Provider which easily connects to any existing identity system by
+writing a tiny "bridge" application. It gives absolute control over the user
+interface and user experience flows.
+
+### Ory Oathkeeper: Identity & Access Proxy
+
+[Ory Oathkeeper](https://github.com/ory/oathkeeper) is a BeyondCorp/Zero Trust
+Identity & Access Proxy (IAP) with configurable authentication, authorization,
+and request mutation rules for your web services: Authenticate JWT, Access
+Tokens, API Keys, mTLS; Check if the contained subject is allowed to perform the
+request; Encode resulting content into custom headers (`X-User-ID`), JSON Web
+Tokens and more!
+
+### Ory Keto: Access Control Policies as a Server
+
+[Ory Keto](https://github.com/ory/keto) is a policy decision point. It uses a
+set of access control policies, similar to AWS IAM Policies, in order to
+determine whether a subject (user, application, service, car, ...) is authorized
+to perform a certain action on a resource.
+
+
+
+## Security
+
+Running identity infrastructure requires
+[attention and knowledge of threat models](https://www.ory.sh/kratos/docs/concepts/security).
+
+### Disclosing vulnerabilities
+
+If you think you found a security vulnerability, please refrain from posting it
+publicly on the forums, the chat, or GitHub. You can find all info for
+responsible disclosure in our
+[security.txt](https://www.ory.sh/.well-known/security.txt).
+
+## Telemetry
+
+Ory's services collect summarized, anonymized data that can optionally be turned
+off. Click [here](https://www.ory.sh/docs/ecosystem/sqa) to learn more.
+
+## Documentation
+
+### Guide
+
+The Guide is available [here](https://www.ory.sh/kratos/docs).
+
+### HTTP API documentation
+
+The HTTP API is documented [here](https://www.ory.sh/kratos/docs/sdk/api).
+
+### Upgrading and Changelog
+
+New releases might introduce breaking changes. To help you identify and
+incorporate those changes, we document these changes in the
+[CHANGELOG.md](../CHANGELOG.md). For upgrading, please visit the
+[upgrade guide](https://www.ory.sh/kratos/docs/guides/upgrade).
+
+### Command line documentation
+
+Run kratos -h or
+kratos help.
+
+### Develop
+
+We encourage all contributions and encourage you to read our
+[contribution guidelines](../CONTRIBUTING.md)
+
+#### Dependencies
+
+You need Go 1.16+ and (for the test suites):
+
+- Docker and Docker Compose
+- Makefile
+- NodeJS / npm
+
+It is possible to develop Ory Kratos on Windows, but please be aware that all
+guides assume a Unix shell like bash or zsh.
+
+#### Install from source
+
+
+make install
+
+
+#### Formatting Code
+
+You can format all code using make format. Our
+CI checks if your code is properly formatted.
+
+#### Running Tests
+
+There are three types of tests you can run:
+
+- Short tests (do not require a SQL database like PostgreSQL)
+- Regular tests (do require PostgreSQL, MySQL, CockroachDB)
+- End to end tests (do require databases and will use a test browser)
+
+##### Short Tests
+
+Short tests run fairly quickly. You can either test all of the code at once
+
+```shell script
+go test -short -tags sqlite ./...
+```
+
+or test just a specific module:
+
+```shell script
+cd client; go test -tags sqlite -short .
+```
+
+##### Regular Tests
+
+Regular tests require a database set up. Our test suite is able to work with
+docker directly (using [ory/dockertest](https://github.com/ory/dockertest)) but
+we encourage to use the Makefile instead. Using dockertest can bloat the number
+of Docker Images on your system and are quite slow. Instead we recommend doing:
+
+
+make test
+
+
+Please be aware that make test recreates the
+databases every time you run make test. This
+can be annoying if you are trying to fix something very specific and need the
+database tests all the time. In that case we suggest that you initialize the
+databases with:
+
+
+
+```shell script
+make test-resetdb
+export TEST_DATABASE_MYSQL='mysql://root:secret@(127.0.0.1:3444)/mysql?parseTime=true' # HashiCorpIgnore
+export TEST_DATABASE_POSTGRESQL='postgres://postgres:secret@127.0.0.1:3445/kratos?sslmode=disable' # HashiCorpIgnore
+export TEST_DATABASE_COCKROACHDB='cockroach://root@127.0.0.1:3446/defaultdb?sslmode=disable' # HashiCorpIgnore
+```
+
+
+
+Then you can run `go test` as often as you'd like:
+
+```shell script
+go test -tags sqlite ./...
+
+# or in a module:
+cd client; go test -tags sqlite .
+```
+
+##### Updating Test Fixtures
+
+Some tests use fixtures. If payloads change, you can update them with:
+
+```
+make test-update-snapshots
+```
+
+This will only update the snapshots of the short tests. To update all snapshots,
+run:
+
+```bash
+UPDATE_SNAPSHOTS=true go test -p 4 -tags sqlite ./...
+```
+
+You can also run this command from a sub folder.
+
+##### End-to-End Tests
+
+We use [Cypress](https://www.cypress.io) to run our e2e tests.
+
+⚠️ To run Cypress on ARM based Mac's, at the moment it is
+[necessary to install Rosetta 2](https://www.cypress.io/blog/2021/01/20/running-cypress-on-the-apple-m1-silicon-arm-architecture-using-rosetta-2/).
+To install, use the command -
+`softwareupdate --install-rosetta --agree-to-license`
+
+The simplest way to develop e2e tests is:
+
+
+./test/e2e/run.sh --dev sqlite
+
+
+You can run all tests (with databases) using:
+
+
+make test-e2e
+
+
+For more details, run:
+
+
+./test/e2e/run.sh
+
+
+**Run only a singular test**
+
+Add `.only` to the test you would like to run.
+
+For example:
+
+```ts
+it.only('invalid remote recovery email template', () => {
+ ...
+})
+```
+
+**Run a subset of tests**
+
+This will require editing the `cypress.json` file located in the `test/e2e/`
+folder.
+
+Add the `testFiles` option and specify the test to run inside the
+`cypress/integration` folder. As an example we will add only the `network`
+tests.
+
+```json
+"testFiles": ["profiles/network/*"],
+```
+
+Now start the tests again using the run script or makefile.
+
+#### Build Docker
+
+You can build a development Docker Image using:
+
+
+make docker
+
+
+#### Preview API documentation
+
+- update the SDK including the OpenAPI specification:
+ make sdk
+- run preview server for API documentation: make
+ docs/api
+- run preview server for swagger documentation: make
+ docs/swagger
+
+
diff --git a/kratos.sh b/kratos.sh
new file mode 100755
index 000000000000..243f27ba9d53
--- /dev/null
+++ b/kratos.sh
@@ -0,0 +1,15 @@
+#!/bin/sh
+set -e
+
+echo "Running migration"
+
+# Run migrations if needed
+# Note that we have a DSN string provided via an environment variable though you may pass a config here as well
+kratos migrate sql up -e --yes
+
+echo "Starting auth server"
+
+# Run auth server
+kratos serve -c "/home/ory/kratos.yml" --watch-courier
+
+exec /home/ory "$@"
diff --git a/package-lock.json b/package-lock.json
index eeb56ea1ea73..8ae9d1b421e1 100644
--- a/package-lock.json
+++ b/package-lock.json
@@ -102,27 +102,6 @@
}
}
},
- "node_modules/@isaacs/balanced-match": {
- "version": "4.0.1",
- "resolved": "https://registry.npmjs.org/@isaacs/balanced-match/-/balanced-match-4.0.1.tgz",
- "integrity": "sha512-yzMTt9lEb8Gv7zRioUilSglI0c0smZ9k5D65677DLWLtWJaXIS3CqcGyUFByYKlnUj6TkjLVs54fBl6+TiGQDQ==",
- "license": "MIT",
- "engines": {
- "node": "20 || >=22"
- }
- },
- "node_modules/@isaacs/brace-expansion": {
- "version": "5.0.1",
- "resolved": "https://registry.npmjs.org/@isaacs/brace-expansion/-/brace-expansion-5.0.1.tgz",
- "integrity": "sha512-WMz71T1JS624nWj2n2fnYAuPovhv7EUhk69R6i9dsVyzxt5eM3bjwvgk9L+APE1TRscGysAVMANkB0jh0LQZrQ==",
- "license": "MIT",
- "dependencies": {
- "@isaacs/balanced-match": "^4.0.1"
- },
- "engines": {
- "node": "20 || >=22"
- }
- },
"node_modules/@isaacs/cliui": {
"version": "8.0.2",
"resolved": "https://registry.npmjs.org/@isaacs/cliui/-/cliui-8.0.2.tgz",
@@ -244,7 +223,6 @@
"resolved": "https://registry.npmjs.org/@nestjs/common/-/common-11.1.10.tgz",
"integrity": "sha512-NoBzJFtq1bzHGia5Q5NO1pJNpx530nupbEu/auCWOFCGL5y8Zo8kiG28EXTCDfIhQgregEtn1Cs6H8WSLUC8kg==",
"license": "MIT",
- "peer": true,
"dependencies": {
"file-type": "21.1.1",
"iterare": "1.2.1",
@@ -537,17 +515,23 @@
"license": "MIT"
},
"node_modules/axios": {
- "version": "1.13.5",
- "resolved": "https://registry.npmjs.org/axios/-/axios-1.13.5.tgz",
- "integrity": "sha512-cz4ur7Vb0xS4/KUN0tPWe44eqxrIu31me+fbang3ijiNscE129POzipJJA6zniq2C/Z6sJCjMimjS8Lc/GAs8Q==",
- "license": "MIT",
- "peer": true,
+ "version": "1.13.6",
+ "resolved": "https://registry.npmjs.org/axios/-/axios-1.13.6.tgz",
+ "integrity": "sha512-ChTCHMouEe2kn713WHbQGcuYrr6fXTBiu460OTwWrWob16g1bXn4vtz07Ope7ewMozJAnEquLk5lWQWtBig9DQ==",
"dependencies": {
"follow-redirects": "^1.15.11",
"form-data": "^4.0.5",
"proxy-from-env": "^1.1.0"
}
},
+ "node_modules/balanced-match": {
+ "version": "4.0.4",
+ "resolved": "https://registry.npmjs.org/balanced-match/-/balanced-match-4.0.4.tgz",
+ "integrity": "sha512-BLrgEcRTwX2o6gGxGOCNyMvGSp35YofuYzw9h1IMTRmKqttAZZVU67bdb9Pr2vUHA8+j3i2tJfjO6C6+4myGTA==",
+ "engines": {
+ "node": "18 || 20 || >=22"
+ }
+ },
"node_modules/base64-js": {
"version": "1.5.1",
"resolved": "https://registry.npmjs.org/base64-js/-/base64-js-1.5.1.tgz",
@@ -588,6 +572,17 @@
"readable-stream": "^3.4.0"
}
},
+ "node_modules/brace-expansion": {
+ "version": "5.0.4",
+ "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-5.0.4.tgz",
+ "integrity": "sha512-h+DEnpVvxmfVefa4jFbCf5HdH5YMDXRsmKflpf1pILZWRFlTbJpxeU55nJl4Smt5HQaGzg1o6RHFPJaOqnmBDg==",
+ "dependencies": {
+ "balanced-match": "^4.0.2"
+ },
+ "engines": {
+ "node": "18 || 20 || >=22"
+ }
+ },
"node_modules/buffer": {
"version": "5.7.1",
"resolved": "https://registry.npmjs.org/buffer/-/buffer-5.7.1.tgz",
@@ -1816,15 +1811,14 @@
}
},
"node_modules/minimatch": {
- "version": "10.1.1",
- "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-10.1.1.tgz",
- "integrity": "sha512-enIvLvRAFZYXJzkCYG5RKmPfrFArdLv+R+lbQ53BmIMLIry74bjKzX6iHAm8WYamJkhSSEabrWN5D97XnKObjQ==",
- "license": "BlueOak-1.0.0",
+ "version": "10.2.4",
+ "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-10.2.4.tgz",
+ "integrity": "sha512-oRjTw/97aTBN0RHbYCdtF1MQfvusSIBQM0IZEgzl6426+8jSC0nF1a/GmnVLpfB9yyr6g6FTqWqiZVbxrtaCIg==",
"dependencies": {
- "@isaacs/brace-expansion": "^5.0.0"
+ "brace-expansion": "^5.0.2"
},
"engines": {
- "node": "20 || >=22"
+ "node": "18 || 20 || >=22"
},
"funding": {
"url": "https://github.com/sponsors/isaacs"
@@ -2109,7 +2103,6 @@
"integrity": "sha512-5gTmgEY/sqK6gFXLIsQNH19lWb4ebPDLA4SdLP7dsWkIXHWlG66oPuVvXSGFPppYZz8ZDZq0dYYrbHfBCVUb1Q==",
"dev": true,
"license": "MIT",
- "peer": true,
"engines": {
"node": ">=12"
},
@@ -2123,7 +2116,6 @@
"integrity": "sha512-v6UNi1+3hSlVvv8fSaoUbggEM5VErKmmpGA7Pl3HF8V6uKY7rvClBOJlH6yNwQtfTueNkGVpOv/mtWL9L4bgRA==",
"dev": true,
"license": "MIT",
- "peer": true,
"bin": {
"prettier": "bin/prettier.cjs"
},
@@ -2262,8 +2254,7 @@
"version": "0.2.2",
"resolved": "https://registry.npmjs.org/reflect-metadata/-/reflect-metadata-0.2.2.tgz",
"integrity": "sha512-urBwgfrvVP/eAyXx4hluJivBKzuEbSQs9rKWCrCkbSxNv8mxPcUZKeuoF3Uy4mJl3Lwprp6yy5/39VWigZ4K6Q==",
- "license": "Apache-2.0",
- "peer": true
+ "license": "Apache-2.0"
},
"node_modules/require-directory": {
"version": "2.1.1",
@@ -2328,7 +2319,6 @@
"resolved": "https://registry.npmjs.org/rxjs/-/rxjs-7.8.2.tgz",
"integrity": "sha512-dhKf903U/PQZY6boNNtAGdWbG85WAbjT/1xYoZIC7FAY0yWapOBQVsVrDl58W86//e1VpMNBtRV4MaXfdMySFA==",
"license": "Apache-2.0",
- "peer": true,
"dependencies": {
"tslib": "^2.1.0"
}
diff --git a/test/e2e/package-lock.json b/test/e2e/package-lock.json
index eba4842f6ec6..1977f7a32ae9 100644
--- a/test/e2e/package-lock.json
+++ b/test/e2e/package-lock.json
@@ -536,14 +536,13 @@
"license": "MIT"
},
"node_modules/axios": {
- "version": "1.13.2",
- "resolved": "https://registry.npmjs.org/axios/-/axios-1.13.2.tgz",
- "integrity": "sha512-VPk9ebNqPcy5lRGuSlKx752IlDatOjT9paPlm8A7yOuW2Fbvp4X3JznJtT4f0GzGLLiWE9W8onz51SqLYwzGaA==",
+ "version": "1.13.6",
+ "resolved": "https://registry.npmjs.org/axios/-/axios-1.13.6.tgz",
+ "integrity": "sha512-ChTCHMouEe2kn713WHbQGcuYrr6fXTBiu460OTwWrWob16g1bXn4vtz07Ope7ewMozJAnEquLk5lWQWtBig9DQ==",
"dev": true,
- "license": "MIT",
"dependencies": {
- "follow-redirects": "^1.15.6",
- "form-data": "^4.0.4",
+ "follow-redirects": "^1.15.11",
+ "form-data": "^4.0.5",
"proxy-from-env": "^1.1.0"
}
},
@@ -2285,11 +2284,10 @@
}
},
"node_modules/minimatch": {
- "version": "3.1.2",
- "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-3.1.2.tgz",
- "integrity": "sha512-J7p63hRiAjw1NDEww1W7i37+ByIrOWO5XQQAzZ3VOcL0PNybwpfmV/N05zFAzwQ9USyEcX6t3UO+K5aqBQOIHw==",
+ "version": "3.1.5",
+ "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-3.1.5.tgz",
+ "integrity": "sha512-VgjWUsnnT6n+NUk6eZq77zeFdpW2LWDzP6zFGrCbHXiYNul5Dzqk2HHQ5uFH2DNW5Xbp8+jVzaeNt94ssEEl4w==",
"dev": true,
- "license": "ISC",
"dependencies": {
"brace-expansion": "^1.1.7"
},
@@ -2606,11 +2604,10 @@
}
},
"node_modules/qs": {
- "version": "6.14.1",
- "resolved": "https://registry.npmjs.org/qs/-/qs-6.14.1.tgz",
- "integrity": "sha512-4EK3+xJl8Ts67nLYNwqw/dsFVnCf+qR7RgXSK9jEEm9unao3njwMDdmsdvoKBKHzxd7tCYz5e5M+SnMjdtXGQQ==",
+ "version": "6.14.2",
+ "resolved": "https://registry.npmjs.org/qs/-/qs-6.14.2.tgz",
+ "integrity": "sha512-V/yCWTTF7VJ9hIh18Ugr2zhJMP01MY7c5kh4J870L7imm6/DIzBsNLTXzMwUA3yZ5b/KBqLx8Kp3uRvd7xSe3Q==",
"dev": true,
- "license": "BSD-3-Clause",
"dependencies": {
"side-channel": "^1.1.0"
},
@@ -3135,11 +3132,10 @@
}
},
"node_modules/validator": {
- "version": "13.15.23",
- "resolved": "https://registry.npmjs.org/validator/-/validator-13.15.23.tgz",
- "integrity": "sha512-4yoz1kEWqUjzi5zsPbAS/903QXSYp0UOtHsPpp7p9rHAw/W+dkInskAE386Fat3oKRROwO98d9ZB0G4cObgUyw==",
+ "version": "13.15.26",
+ "resolved": "https://registry.npmjs.org/validator/-/validator-13.15.26.tgz",
+ "integrity": "sha512-spH26xU080ydGggxRyR1Yhcbgx+j3y5jbNXk/8L+iRvdIEQ4uTRH2Sgf2dokud6Q4oAtsbNvJ1Ft+9xmm6IZcA==",
"dev": true,
- "license": "MIT",
"engines": {
"node": ">= 0.10"
}
diff --git a/test/e2e/proxy/package-lock.json b/test/e2e/proxy/package-lock.json
index 5f88baf3d3c9..6b0c72f41149 100644
--- a/test/e2e/proxy/package-lock.json
+++ b/test/e2e/proxy/package-lock.json
@@ -866,9 +866,9 @@
}
},
"node_modules/minimatch": {
- "version": "3.1.2",
- "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-3.1.2.tgz",
- "integrity": "sha512-J7p63hRiAjw1NDEww1W7i37+ByIrOWO5XQQAzZ3VOcL0PNybwpfmV/N05zFAzwQ9USyEcX6t3UO+K5aqBQOIHw==",
+ "version": "3.1.5",
+ "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-3.1.5.tgz",
+ "integrity": "sha512-VgjWUsnnT6n+NUk6eZq77zeFdpW2LWDzP6zFGrCbHXiYNul5Dzqk2HHQ5uFH2DNW5Xbp8+jVzaeNt94ssEEl4w==",
"dependencies": {
"brace-expansion": "^1.1.7"
},
@@ -2094,9 +2094,9 @@
}
},
"minimatch": {
- "version": "3.1.2",
- "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-3.1.2.tgz",
- "integrity": "sha512-J7p63hRiAjw1NDEww1W7i37+ByIrOWO5XQQAzZ3VOcL0PNybwpfmV/N05zFAzwQ9USyEcX6t3UO+K5aqBQOIHw==",
+ "version": "3.1.5",
+ "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-3.1.5.tgz",
+ "integrity": "sha512-VgjWUsnnT6n+NUk6eZq77zeFdpW2LWDzP6zFGrCbHXiYNul5Dzqk2HHQ5uFH2DNW5Xbp8+jVzaeNt94ssEEl4w==",
"requires": {
"brace-expansion": "^1.1.7"
}
diff --git a/vendor/github.com/avast/retry-go/v3/.gitignore b/vendor/github.com/avast/retry-go/v3/.gitignore
new file mode 100644
index 000000000000..c40eb23f9501
--- /dev/null
+++ b/vendor/github.com/avast/retry-go/v3/.gitignore
@@ -0,0 +1,21 @@
+# Binaries for programs and plugins
+*.exe
+*.dll
+*.so
+*.dylib
+
+# Test binary, build with `go test -c`
+*.test
+
+# Output of the go coverage tool, specifically when used with LiteIDE
+*.out
+
+# Project-local glide cache, RE: https://github.com/Masterminds/glide/issues/736
+.glide/
+
+# dep
+vendor/
+Gopkg.lock
+
+# cover
+coverage.txt
diff --git a/vendor/github.com/avast/retry-go/v3/.godocdown.tmpl b/vendor/github.com/avast/retry-go/v3/.godocdown.tmpl
new file mode 100644
index 000000000000..6873edf8eb38
--- /dev/null
+++ b/vendor/github.com/avast/retry-go/v3/.godocdown.tmpl
@@ -0,0 +1,37 @@
+# {{ .Name }}
+
+[](https://github.com/avast/retry-go/releases/latest)
+[](LICENSE.md)
+[](https://travis-ci.org/avast/retry-go)
+[](https://ci.appveyor.com/project/JaSei/retry-go)
+[](https://goreportcard.com/report/github.com/avast/retry-go)
+[](http://godoc.org/github.com/avast/retry-go)
+[](https://codecov.io/github/avast/retry-go?branch=master)
+[](https://sourcegraph.com/github.com/avast/retry-go?badge)
+
+{{ .EmitSynopsis }}
+
+{{ .EmitUsage }}
+
+## Contributing
+
+Contributions are very much welcome.
+
+### Makefile
+
+Makefile provides several handy rules, like README.md `generator` , `setup` for prepare build/dev environment, `test`, `cover`, etc...
+
+Try `make help` for more information.
+
+### Before pull request
+
+please try:
+* run tests (`make test`)
+* run linter (`make lint`)
+* if your IDE don't automaticaly do `go fmt`, run `go fmt` (`make fmt`)
+
+### README
+
+README.md are generate from template [.godocdown.tmpl](.godocdown.tmpl) and code documentation via [godocdown](https://github.com/robertkrimen/godocdown).
+
+Never edit README.md direct, because your change will be lost.
diff --git a/vendor/github.com/avast/retry-go/v3/.travis.yml b/vendor/github.com/avast/retry-go/v3/.travis.yml
new file mode 100644
index 000000000000..2b8366a9fd14
--- /dev/null
+++ b/vendor/github.com/avast/retry-go/v3/.travis.yml
@@ -0,0 +1,15 @@
+language: go
+
+go:
+ - 1.13
+ - 1.14
+ - 1.15
+
+install:
+ - make setup
+
+script:
+ - make ci
+
+after_success:
+ - bash <(curl -s https://codecov.io/bash)
diff --git a/vendor/github.com/avast/retry-go/v3/LICENSE b/vendor/github.com/avast/retry-go/v3/LICENSE
new file mode 100644
index 000000000000..f63fca814f9e
--- /dev/null
+++ b/vendor/github.com/avast/retry-go/v3/LICENSE
@@ -0,0 +1,21 @@
+MIT License
+
+Copyright (c) 2017 Avast
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
diff --git a/vendor/github.com/avast/retry-go/v3/Makefile b/vendor/github.com/avast/retry-go/v3/Makefile
new file mode 100644
index 000000000000..5b8b27e40be4
--- /dev/null
+++ b/vendor/github.com/avast/retry-go/v3/Makefile
@@ -0,0 +1,58 @@
+SOURCE_FILES?=$$(go list ./... | grep -v /vendor/)
+TEST_PATTERN?=.
+TEST_OPTIONS?=
+VERSION?=$$(cat VERSION)
+LINTER?=$$(which golangci-lint)
+LINTER_VERSION=1.15.0
+
+ifeq ($(OS),Windows_NT)
+ LINTER_FILE=golangci-lint-$(LINTER_VERSION)-windows-amd64.zip
+ LINTER_UNPACK= >| app.zip; unzip -j app.zip -d $$GOPATH/bin; rm app.zip
+else ifeq ($(OS), Darwin)
+ LINTER_FILE=golangci-lint-$(LINTER_VERSION)-darwin-amd64.tar.gz
+ LINTER_UNPACK= | tar xzf - -C $$GOPATH/bin --wildcards --strip 1 "**/golangci-lint"
+else
+ LINTER_FILE=golangci-lint-$(LINTER_VERSION)-linux-amd64.tar.gz
+ LINTER_UNPACK= | tar xzf - -C $$GOPATH/bin --wildcards --strip 1 "**/golangci-lint"
+endif
+
+setup:
+ go get -u github.com/pierrre/gotestcover
+ go get -u golang.org/x/tools/cmd/cover
+ go get -u github.com/robertkrimen/godocdown/godocdown
+ @if [ "$(LINTER)" = "" ]; then\
+ curl -L https://github.com/golangci/golangci-lint/releases/download/v$(LINTER_VERSION)/$(LINTER_FILE) $(LINTER_UNPACK) ;\
+ chmod +x $$GOPATH/bin/golangci-lint;\
+ fi
+ go mod download
+
+generate: ## Generate README.md
+ godocdown >| README.md
+
+test: generate test_and_cover_report lint
+
+test_and_cover_report:
+ gotestcover $(TEST_OPTIONS) -covermode=atomic -coverprofile=coverage.txt $(SOURCE_FILES) -run $(TEST_PATTERN) -timeout=2m
+
+cover: test ## Run all the tests and opens the coverage report
+ go tool cover -html=coverage.txt
+
+fmt: ## gofmt and goimports all go files
+ find . -name '*.go' -not -wholename './vendor/*' | while read -r file; do gofmt -w -s "$$file"; goimports -w "$$file"; done
+
+lint: ## Run all the linters
+ golangci-lint run
+
+ci: test_and_cover_report ## Run all the tests but no linters - use https://golangci.com integration instead
+
+build:
+ go build
+
+release: ## Release new version
+ git tag | grep -q $(VERSION) && echo This version was released! Increase VERSION! || git tag $(VERSION) && git push origin $(VERSION) && git tag v$(VERSION) && git push origin v$(VERSION)
+
+# Absolutely awesome: http://marmelab.com/blog/2016/02/29/auto-documented-makefile.html
+help:
+ @grep -E '^[a-zA-Z_-]+:.*?## .*$$' $(MAKEFILE_LIST) | awk 'BEGIN {FS = ":.*?## "}; {printf "\033[36m%-30s\033[0m %s\n", $$1, $$2}'
+
+.DEFAULT_GOAL := build
diff --git a/vendor/github.com/avast/retry-go/v3/README.md b/vendor/github.com/avast/retry-go/v3/README.md
new file mode 100644
index 000000000000..5431b4a86498
--- /dev/null
+++ b/vendor/github.com/avast/retry-go/v3/README.md
@@ -0,0 +1,348 @@
+# retry
+
+[](https://github.com/avast/retry-go/releases/latest)
+[](LICENSE.md)
+[](https://travis-ci.org/avast/retry-go)
+[](https://ci.appveyor.com/project/JaSei/retry-go)
+[](https://goreportcard.com/report/github.com/avast/retry-go)
+[](http://godoc.org/github.com/avast/retry-go)
+[](https://codecov.io/github/avast/retry-go?branch=master)
+[](https://sourcegraph.com/github.com/avast/retry-go?badge)
+
+Simple library for retry mechanism
+
+slightly inspired by
+[Try::Tiny::Retry](https://metacpan.org/pod/Try::Tiny::Retry)
+
+
+### SYNOPSIS
+
+http get with retry:
+
+ url := "http://example.com"
+ var body []byte
+
+ err := retry.Do(
+ func() error {
+ resp, err := http.Get(url)
+ if err != nil {
+ return err
+ }
+ defer resp.Body.Close()
+ body, err = ioutil.ReadAll(resp.Body)
+ if err != nil {
+ return err
+ }
+
+ return nil
+ },
+ )
+
+ fmt.Println(body)
+
+[next examples](https://github.com/avast/retry-go/tree/master/examples)
+
+
+### SEE ALSO
+
+* [giantswarm/retry-go](https://github.com/giantswarm/retry-go) - slightly
+complicated interface.
+
+* [sethgrid/pester](https://github.com/sethgrid/pester) - only http retry for
+http calls with retries and backoff
+
+* [cenkalti/backoff](https://github.com/cenkalti/backoff) - Go port of the
+exponential backoff algorithm from Google's HTTP Client Library for Java. Really
+complicated interface.
+
+* [rafaeljesus/retry-go](https://github.com/rafaeljesus/retry-go) - looks good,
+slightly similar as this package, don't have 'simple' `Retry` method
+
+* [matryer/try](https://github.com/matryer/try) - very popular package,
+nonintuitive interface (for me)
+
+
+### BREAKING CHANGES
+
+3.0.0
+
+* `DelayTypeFunc` accepts a new parameter `err` - this breaking change affects
+only your custom Delay Functions. This change allow [make delay functions based
+on error](examples/delay_based_on_error_test.go).
+
+1.0.2 -> 2.0.0
+
+* argument of `retry.Delay` is final delay (no multiplication by `retry.Units`
+anymore)
+
+* function `retry.Units` are removed
+
+* [more about this breaking change](https://github.com/avast/retry-go/issues/7)
+
+0.3.0 -> 1.0.0
+
+* `retry.Retry` function are changed to `retry.Do` function
+
+* `retry.RetryCustom` (OnRetry) and `retry.RetryCustomWithOpts` functions are
+now implement via functions produces Options (aka `retry.OnRetry`)
+
+## Usage
+
+#### func BackOffDelay
+
+```go
+func BackOffDelay(n uint, _ error, config *Config) time.Duration
+```
+BackOffDelay is a DelayType which increases delay between consecutive retries
+
+#### func Do
+
+```go
+func Do(retryableFunc RetryableFunc, opts ...Option) error
+```
+
+#### func FixedDelay
+
+```go
+func FixedDelay(_ uint, _ error, config *Config) time.Duration
+```
+FixedDelay is a DelayType which keeps delay the same through all iterations
+
+#### func IsRecoverable
+
+```go
+func IsRecoverable(err error) bool
+```
+IsRecoverable checks if error is an instance of `unrecoverableError`
+
+#### func RandomDelay
+
+```go
+func RandomDelay(_ uint, _ error, config *Config) time.Duration
+```
+RandomDelay is a DelayType which picks a random delay up to config.maxJitter
+
+#### func Unrecoverable
+
+```go
+func Unrecoverable(err error) error
+```
+Unrecoverable wraps an error in `unrecoverableError` struct
+
+#### type Config
+
+```go
+type Config struct {
+}
+```
+
+
+#### type DelayTypeFunc
+
+```go
+type DelayTypeFunc func(n uint, err error, config *Config) time.Duration
+```
+
+DelayTypeFunc is called to return the next delay to wait after the retriable
+function fails on `err` after `n` attempts.
+
+#### func CombineDelay
+
+```go
+func CombineDelay(delays ...DelayTypeFunc) DelayTypeFunc
+```
+CombineDelay is a DelayType the combines all of the specified delays into a new
+DelayTypeFunc
+
+#### type Error
+
+```go
+type Error []error
+```
+
+Error type represents list of errors in retry
+
+#### func (Error) Error
+
+```go
+func (e Error) Error() string
+```
+Error method return string representation of Error It is an implementation of
+error interface
+
+#### func (Error) WrappedErrors
+
+```go
+func (e Error) WrappedErrors() []error
+```
+WrappedErrors returns the list of errors that this Error is wrapping. It is an
+implementation of the `errwrap.Wrapper` interface in package
+[errwrap](https://github.com/hashicorp/errwrap) so that `retry.Error` can be
+used with that library.
+
+#### type OnRetryFunc
+
+```go
+type OnRetryFunc func(n uint, err error)
+```
+
+Function signature of OnRetry function n = count of attempts
+
+#### type Option
+
+```go
+type Option func(*Config)
+```
+
+Option represents an option for retry.
+
+#### func Attempts
+
+```go
+func Attempts(attempts uint) Option
+```
+Attempts set count of retry default is 10
+
+#### func Context
+
+```go
+func Context(ctx context.Context) Option
+```
+Context allow to set context of retry default are Background context
+
+example of immediately cancellation (maybe it isn't the best example, but it
+describes behavior enough; I hope)
+
+ ctx, cancel := context.WithCancel(context.Background())
+ cancel()
+
+ retry.Do(
+ func() error {
+ ...
+ },
+ retry.Context(ctx),
+ )
+
+#### func Delay
+
+```go
+func Delay(delay time.Duration) Option
+```
+Delay set delay between retry default is 100ms
+
+#### func DelayType
+
+```go
+func DelayType(delayType DelayTypeFunc) Option
+```
+DelayType set type of the delay between retries default is BackOff
+
+#### func LastErrorOnly
+
+```go
+func LastErrorOnly(lastErrorOnly bool) Option
+```
+return the direct last error that came from the retried function default is
+false (return wrapped errors with everything)
+
+#### func MaxDelay
+
+```go
+func MaxDelay(maxDelay time.Duration) Option
+```
+MaxDelay set maximum delay between retry does not apply by default
+
+#### func MaxJitter
+
+```go
+func MaxJitter(maxJitter time.Duration) Option
+```
+MaxJitter sets the maximum random Jitter between retries for RandomDelay
+
+#### func OnRetry
+
+```go
+func OnRetry(onRetry OnRetryFunc) Option
+```
+OnRetry function callback are called each retry
+
+log each retry example:
+
+ retry.Do(
+ func() error {
+ return errors.New("some error")
+ },
+ retry.OnRetry(func(n uint, err error) {
+ log.Printf("#%d: %s\n", n, err)
+ }),
+ )
+
+#### func RetryIf
+
+```go
+func RetryIf(retryIf RetryIfFunc) Option
+```
+RetryIf controls whether a retry should be attempted after an error (assuming
+there are any retry attempts remaining)
+
+skip retry if special error example:
+
+ retry.Do(
+ func() error {
+ return errors.New("special error")
+ },
+ retry.RetryIf(func(err error) bool {
+ if err.Error() == "special error" {
+ return false
+ }
+ return true
+ })
+ )
+
+By default RetryIf stops execution if the error is wrapped using
+`retry.Unrecoverable`, so above example may also be shortened to:
+
+ retry.Do(
+ func() error {
+ return retry.Unrecoverable(errors.New("special error"))
+ }
+ )
+
+#### type RetryIfFunc
+
+```go
+type RetryIfFunc func(error) bool
+```
+
+Function signature of retry if function
+
+#### type RetryableFunc
+
+```go
+type RetryableFunc func() error
+```
+
+Function signature of retryable function
+
+## Contributing
+
+Contributions are very much welcome.
+
+### Makefile
+
+Makefile provides several handy rules, like README.md `generator` , `setup` for prepare build/dev environment, `test`, `cover`, etc...
+
+Try `make help` for more information.
+
+### Before pull request
+
+please try:
+* run tests (`make test`)
+* run linter (`make lint`)
+* if your IDE don't automaticaly do `go fmt`, run `go fmt` (`make fmt`)
+
+### README
+
+README.md are generate from template [.godocdown.tmpl](.godocdown.tmpl) and code documentation via [godocdown](https://github.com/robertkrimen/godocdown).
+
+Never edit README.md direct, because your change will be lost.
diff --git a/vendor/github.com/avast/retry-go/v3/VERSION b/vendor/github.com/avast/retry-go/v3/VERSION
new file mode 100644
index 000000000000..fd2a01863fdd
--- /dev/null
+++ b/vendor/github.com/avast/retry-go/v3/VERSION
@@ -0,0 +1 @@
+3.1.0
diff --git a/vendor/github.com/avast/retry-go/v3/appveyor.yml b/vendor/github.com/avast/retry-go/v3/appveyor.yml
new file mode 100644
index 000000000000..dc5234ac8684
--- /dev/null
+++ b/vendor/github.com/avast/retry-go/v3/appveyor.yml
@@ -0,0 +1,19 @@
+version: "{build}"
+
+clone_folder: c:\Users\appveyor\go\src\github.com\avast\retry-go
+
+#os: Windows Server 2012 R2
+platform: x64
+
+install:
+ - copy c:\MinGW\bin\mingw32-make.exe c:\MinGW\bin\make.exe
+ - set GOPATH=C:\Users\appveyor\go
+ - set PATH=%PATH%;c:\MinGW\bin
+ - set PATH=%PATH%;%GOPATH%\bin;c:\go\bin
+ - set GOBIN=%GOPATH%\bin
+ - go version
+ - go env
+ - make setup
+
+build_script:
+ - make ci
diff --git a/vendor/github.com/avast/retry-go/v3/options.go b/vendor/github.com/avast/retry-go/v3/options.go
new file mode 100644
index 000000000000..a6c57207c7e9
--- /dev/null
+++ b/vendor/github.com/avast/retry-go/v3/options.go
@@ -0,0 +1,198 @@
+package retry
+
+import (
+ "context"
+ "math"
+ "math/rand"
+ "time"
+)
+
+// Function signature of retry if function
+type RetryIfFunc func(error) bool
+
+// Function signature of OnRetry function
+// n = count of attempts
+type OnRetryFunc func(n uint, err error)
+
+// DelayTypeFunc is called to return the next delay to wait after the retriable function fails on `err` after `n` attempts.
+type DelayTypeFunc func(n uint, err error, config *Config) time.Duration
+
+type Config struct {
+ attempts uint
+ delay time.Duration
+ maxDelay time.Duration
+ maxJitter time.Duration
+ onRetry OnRetryFunc
+ retryIf RetryIfFunc
+ delayType DelayTypeFunc
+ lastErrorOnly bool
+ context context.Context
+
+ maxBackOffN uint
+}
+
+// Option represents an option for retry.
+type Option func(*Config)
+
+// return the direct last error that came from the retried function
+// default is false (return wrapped errors with everything)
+func LastErrorOnly(lastErrorOnly bool) Option {
+ return func(c *Config) {
+ c.lastErrorOnly = lastErrorOnly
+ }
+}
+
+// Attempts set count of retry
+// default is 10
+func Attempts(attempts uint) Option {
+ return func(c *Config) {
+ c.attempts = attempts
+ }
+}
+
+// Delay set delay between retry
+// default is 100ms
+func Delay(delay time.Duration) Option {
+ return func(c *Config) {
+ c.delay = delay
+ }
+}
+
+// MaxDelay set maximum delay between retry
+// does not apply by default
+func MaxDelay(maxDelay time.Duration) Option {
+ return func(c *Config) {
+ c.maxDelay = maxDelay
+ }
+}
+
+// MaxJitter sets the maximum random Jitter between retries for RandomDelay
+func MaxJitter(maxJitter time.Duration) Option {
+ return func(c *Config) {
+ c.maxJitter = maxJitter
+ }
+}
+
+// DelayType set type of the delay between retries
+// default is BackOff
+func DelayType(delayType DelayTypeFunc) Option {
+ return func(c *Config) {
+ c.delayType = delayType
+ }
+}
+
+// BackOffDelay is a DelayType which increases delay between consecutive retries
+func BackOffDelay(n uint, _ error, config *Config) time.Duration {
+ // 1 << 63 would overflow signed int64 (time.Duration), thus 62.
+ const max uint = 62
+
+ if config.maxBackOffN == 0 {
+ if config.delay <= 0 {
+ config.delay = 1
+ }
+
+ config.maxBackOffN = max - uint(math.Floor(math.Log2(float64(config.delay))))
+ }
+
+ if n > config.maxBackOffN {
+ n = config.maxBackOffN
+ }
+
+ return config.delay << n
+}
+
+// FixedDelay is a DelayType which keeps delay the same through all iterations
+func FixedDelay(_ uint, _ error, config *Config) time.Duration {
+ return config.delay
+}
+
+// RandomDelay is a DelayType which picks a random delay up to config.maxJitter
+func RandomDelay(_ uint, _ error, config *Config) time.Duration {
+ return time.Duration(rand.Int63n(int64(config.maxJitter)))
+}
+
+// CombineDelay is a DelayType the combines all of the specified delays into a new DelayTypeFunc
+func CombineDelay(delays ...DelayTypeFunc) DelayTypeFunc {
+ const maxInt64 = uint64(math.MaxInt64)
+
+ return func(n uint, err error, config *Config) time.Duration {
+ var total uint64
+ for _, delay := range delays {
+ total += uint64(delay(n, err, config))
+ if total > maxInt64 {
+ total = maxInt64
+ }
+ }
+
+ return time.Duration(total)
+ }
+}
+
+// OnRetry function callback are called each retry
+//
+// log each retry example:
+//
+// retry.Do(
+// func() error {
+// return errors.New("some error")
+// },
+// retry.OnRetry(func(n uint, err error) {
+// log.Printf("#%d: %s\n", n, err)
+// }),
+// )
+func OnRetry(onRetry OnRetryFunc) Option {
+ return func(c *Config) {
+ c.onRetry = onRetry
+ }
+}
+
+// RetryIf controls whether a retry should be attempted after an error
+// (assuming there are any retry attempts remaining)
+//
+// skip retry if special error example:
+//
+// retry.Do(
+// func() error {
+// return errors.New("special error")
+// },
+// retry.RetryIf(func(err error) bool {
+// if err.Error() == "special error" {
+// return false
+// }
+// return true
+// })
+// )
+//
+// By default RetryIf stops execution if the error is wrapped using `retry.Unrecoverable`,
+// so above example may also be shortened to:
+//
+// retry.Do(
+// func() error {
+// return retry.Unrecoverable(errors.New("special error"))
+// }
+// )
+func RetryIf(retryIf RetryIfFunc) Option {
+ return func(c *Config) {
+ c.retryIf = retryIf
+ }
+}
+
+// Context allow to set context of retry
+// default are Background context
+//
+// example of immediately cancellation (maybe it isn't the best example, but it describes behavior enough; I hope)
+//
+// ctx, cancel := context.WithCancel(context.Background())
+// cancel()
+//
+// retry.Do(
+// func() error {
+// ...
+// },
+// retry.Context(ctx),
+// )
+func Context(ctx context.Context) Option {
+ return func(c *Config) {
+ c.context = ctx
+ }
+}
diff --git a/vendor/github.com/avast/retry-go/v3/retry.go b/vendor/github.com/avast/retry-go/v3/retry.go
new file mode 100644
index 000000000000..b5ef9f016677
--- /dev/null
+++ b/vendor/github.com/avast/retry-go/v3/retry.go
@@ -0,0 +1,218 @@
+/*
+Simple library for retry mechanism
+
+slightly inspired by [Try::Tiny::Retry](https://metacpan.org/pod/Try::Tiny::Retry)
+
+SYNOPSIS
+
+http get with retry:
+
+ url := "http://example.com"
+ var body []byte
+
+ err := retry.Do(
+ func() error {
+ resp, err := http.Get(url)
+ if err != nil {
+ return err
+ }
+ defer resp.Body.Close()
+ body, err = ioutil.ReadAll(resp.Body)
+ if err != nil {
+ return err
+ }
+
+ return nil
+ },
+ )
+
+ fmt.Println(body)
+
+[next examples](https://github.com/avast/retry-go/tree/master/examples)
+
+
+SEE ALSO
+
+* [giantswarm/retry-go](https://github.com/giantswarm/retry-go) - slightly complicated interface.
+
+* [sethgrid/pester](https://github.com/sethgrid/pester) - only http retry for http calls with retries and backoff
+
+* [cenkalti/backoff](https://github.com/cenkalti/backoff) - Go port of the exponential backoff algorithm from Google's HTTP Client Library for Java. Really complicated interface.
+
+* [rafaeljesus/retry-go](https://github.com/rafaeljesus/retry-go) - looks good, slightly similar as this package, don't have 'simple' `Retry` method
+
+* [matryer/try](https://github.com/matryer/try) - very popular package, nonintuitive interface (for me)
+
+
+BREAKING CHANGES
+
+3.0.0
+
+* `DelayTypeFunc` accepts a new parameter `err` - this breaking change affects only your custom Delay Functions. This change allow [make delay functions based on error](examples/delay_based_on_error_test.go).
+
+
+1.0.2 -> 2.0.0
+
+* argument of `retry.Delay` is final delay (no multiplication by `retry.Units` anymore)
+
+* function `retry.Units` are removed
+
+* [more about this breaking change](https://github.com/avast/retry-go/issues/7)
+
+
+0.3.0 -> 1.0.0
+
+* `retry.Retry` function are changed to `retry.Do` function
+
+* `retry.RetryCustom` (OnRetry) and `retry.RetryCustomWithOpts` functions are now implement via functions produces Options (aka `retry.OnRetry`)
+
+
+*/
+package retry
+
+import (
+ "context"
+ "fmt"
+ "strings"
+ "time"
+)
+
+// Function signature of retryable function
+type RetryableFunc func() error
+
+func Do(retryableFunc RetryableFunc, opts ...Option) error {
+ var n uint
+
+ //default
+ config := newDefaultRetryConfig()
+
+ //apply opts
+ for _, opt := range opts {
+ opt(config)
+ }
+
+ if err := config.context.Err(); err != nil {
+ return err
+ }
+
+ var errorLog Error
+ if !config.lastErrorOnly {
+ errorLog = make(Error, config.attempts)
+ } else {
+ errorLog = make(Error, 1)
+ }
+
+ lastErrIndex := n
+ for n < config.attempts {
+ err := retryableFunc()
+
+ if err != nil {
+ errorLog[lastErrIndex] = unpackUnrecoverable(err)
+
+ if !config.retryIf(err) {
+ break
+ }
+
+ config.onRetry(n, err)
+
+ // if this is last attempt - don't wait
+ if n == config.attempts-1 {
+ break
+ }
+
+ delayTime := config.delayType(n, err, config)
+ if config.maxDelay > 0 && delayTime > config.maxDelay {
+ delayTime = config.maxDelay
+ }
+
+ select {
+ case <-time.After(delayTime):
+ case <-config.context.Done():
+ return config.context.Err()
+ }
+
+ } else {
+ return nil
+ }
+
+ n++
+ if !config.lastErrorOnly {
+ lastErrIndex = n
+ }
+ }
+
+ if config.lastErrorOnly {
+ return errorLog[lastErrIndex]
+ }
+ return errorLog
+}
+
+func newDefaultRetryConfig() *Config {
+ return &Config{
+ attempts: uint(10),
+ delay: 100 * time.Millisecond,
+ maxJitter: 100 * time.Millisecond,
+ onRetry: func(n uint, err error) {},
+ retryIf: IsRecoverable,
+ delayType: CombineDelay(BackOffDelay, RandomDelay),
+ lastErrorOnly: false,
+ context: context.Background(),
+ }
+}
+
+// Error type represents list of errors in retry
+type Error []error
+
+// Error method return string representation of Error
+// It is an implementation of error interface
+func (e Error) Error() string {
+ logWithNumber := make([]string, lenWithoutNil(e))
+ for i, l := range e {
+ if l != nil {
+ logWithNumber[i] = fmt.Sprintf("#%d: %s", i+1, l.Error())
+ }
+ }
+
+ return fmt.Sprintf("All attempts fail:\n%s", strings.Join(logWithNumber, "\n"))
+}
+
+func lenWithoutNil(e Error) (count int) {
+ for _, v := range e {
+ if v != nil {
+ count++
+ }
+ }
+
+ return
+}
+
+// WrappedErrors returns the list of errors that this Error is wrapping.
+// It is an implementation of the `errwrap.Wrapper` interface
+// in package [errwrap](https://github.com/hashicorp/errwrap) so that
+// `retry.Error` can be used with that library.
+func (e Error) WrappedErrors() []error {
+ return e
+}
+
+type unrecoverableError struct {
+ error
+}
+
+// Unrecoverable wraps an error in `unrecoverableError` struct
+func Unrecoverable(err error) error {
+ return unrecoverableError{err}
+}
+
+// IsRecoverable checks if error is an instance of `unrecoverableError`
+func IsRecoverable(err error) bool {
+ _, isUnrecoverable := err.(unrecoverableError)
+ return !isUnrecoverable
+}
+
+func unpackUnrecoverable(err error) error {
+ if unrecoverable, isUnrecoverable := err.(unrecoverableError); isUnrecoverable {
+ return unrecoverable.error
+ }
+
+ return err
+}
diff --git a/vendor/github.com/imdario/mergo/.deepsource.toml b/vendor/github.com/imdario/mergo/.deepsource.toml
new file mode 100644
index 000000000000..8a0681af8559
--- /dev/null
+++ b/vendor/github.com/imdario/mergo/.deepsource.toml
@@ -0,0 +1,12 @@
+version = 1
+
+test_patterns = [
+ "*_test.go"
+]
+
+[[analyzers]]
+name = "go"
+enabled = true
+
+ [analyzers.meta]
+ import_path = "github.com/imdario/mergo"
\ No newline at end of file
diff --git a/vendor/github.com/imdario/mergo/.gitignore b/vendor/github.com/imdario/mergo/.gitignore
new file mode 100644
index 000000000000..529c3412ba95
--- /dev/null
+++ b/vendor/github.com/imdario/mergo/.gitignore
@@ -0,0 +1,33 @@
+#### joe made this: http://goel.io/joe
+
+#### go ####
+# Binaries for programs and plugins
+*.exe
+*.dll
+*.so
+*.dylib
+
+# Test binary, build with `go test -c`
+*.test
+
+# Output of the go coverage tool, specifically when used with LiteIDE
+*.out
+
+# Project-local glide cache, RE: https://github.com/Masterminds/glide/issues/736
+.glide/
+
+#### vim ####
+# Swap
+[._]*.s[a-v][a-z]
+[._]*.sw[a-p]
+[._]s[a-v][a-z]
+[._]sw[a-p]
+
+# Session
+Session.vim
+
+# Temporary
+.netrwhist
+*~
+# Auto-generated tag files
+tags
diff --git a/vendor/github.com/imdario/mergo/.travis.yml b/vendor/github.com/imdario/mergo/.travis.yml
new file mode 100644
index 000000000000..d324c43ba4df
--- /dev/null
+++ b/vendor/github.com/imdario/mergo/.travis.yml
@@ -0,0 +1,12 @@
+language: go
+arch:
+ - amd64
+ - ppc64le
+install:
+ - go get -t
+ - go get golang.org/x/tools/cmd/cover
+ - go get github.com/mattn/goveralls
+script:
+ - go test -race -v ./...
+after_script:
+ - $HOME/gopath/bin/goveralls -service=travis-ci -repotoken $COVERALLS_TOKEN
diff --git a/vendor/github.com/imdario/mergo/CODE_OF_CONDUCT.md b/vendor/github.com/imdario/mergo/CODE_OF_CONDUCT.md
new file mode 100644
index 000000000000..469b44907a09
--- /dev/null
+++ b/vendor/github.com/imdario/mergo/CODE_OF_CONDUCT.md
@@ -0,0 +1,46 @@
+# Contributor Covenant Code of Conduct
+
+## Our Pledge
+
+In the interest of fostering an open and welcoming environment, we as contributors and maintainers pledge to making participation in our project and our community a harassment-free experience for everyone, regardless of age, body size, disability, ethnicity, gender identity and expression, level of experience, nationality, personal appearance, race, religion, or sexual identity and orientation.
+
+## Our Standards
+
+Examples of behavior that contributes to creating a positive environment include:
+
+* Using welcoming and inclusive language
+* Being respectful of differing viewpoints and experiences
+* Gracefully accepting constructive criticism
+* Focusing on what is best for the community
+* Showing empathy towards other community members
+
+Examples of unacceptable behavior by participants include:
+
+* The use of sexualized language or imagery and unwelcome sexual attention or advances
+* Trolling, insulting/derogatory comments, and personal or political attacks
+* Public or private harassment
+* Publishing others' private information, such as a physical or electronic address, without explicit permission
+* Other conduct which could reasonably be considered inappropriate in a professional setting
+
+## Our Responsibilities
+
+Project maintainers are responsible for clarifying the standards of acceptable behavior and are expected to take appropriate and fair corrective action in response to any instances of unacceptable behavior.
+
+Project maintainers have the right and responsibility to remove, edit, or reject comments, commits, code, wiki edits, issues, and other contributions that are not aligned to this Code of Conduct, or to ban temporarily or permanently any contributor for other behaviors that they deem inappropriate, threatening, offensive, or harmful.
+
+## Scope
+
+This Code of Conduct applies both within project spaces and in public spaces when an individual is representing the project or its community. Examples of representing a project or community include using an official project e-mail address, posting via an official social media account, or acting as an appointed representative at an online or offline event. Representation of a project may be further defined and clarified by project maintainers.
+
+## Enforcement
+
+Instances of abusive, harassing, or otherwise unacceptable behavior may be reported by contacting the project team at i@dario.im. The project team will review and investigate all complaints, and will respond in a way that it deems appropriate to the circumstances. The project team is obligated to maintain confidentiality with regard to the reporter of an incident. Further details of specific enforcement policies may be posted separately.
+
+Project maintainers who do not follow or enforce the Code of Conduct in good faith may face temporary or permanent repercussions as determined by other members of the project's leadership.
+
+## Attribution
+
+This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, available at [http://contributor-covenant.org/version/1/4][version]
+
+[homepage]: http://contributor-covenant.org
+[version]: http://contributor-covenant.org/version/1/4/
diff --git a/vendor/github.com/imdario/mergo/CONTRIBUTING.md b/vendor/github.com/imdario/mergo/CONTRIBUTING.md
new file mode 100644
index 000000000000..0a1ff9f94d85
--- /dev/null
+++ b/vendor/github.com/imdario/mergo/CONTRIBUTING.md
@@ -0,0 +1,112 @@
+
+# Contributing to mergo
+
+First off, thanks for taking the time to contribute! ❤️
+
+All types of contributions are encouraged and valued. See the [Table of Contents](#table-of-contents) for different ways to help and details about how this project handles them. Please make sure to read the relevant section before making your contribution. It will make it a lot easier for us maintainers and smooth out the experience for all involved. The community looks forward to your contributions. 🎉
+
+> And if you like the project, but just don't have time to contribute, that's fine. There are other easy ways to support the project and show your appreciation, which we would also be very happy about:
+> - Star the project
+> - Tweet about it
+> - Refer this project in your project's readme
+> - Mention the project at local meetups and tell your friends/colleagues
+
+
+## Table of Contents
+
+- [Code of Conduct](#code-of-conduct)
+- [I Have a Question](#i-have-a-question)
+- [I Want To Contribute](#i-want-to-contribute)
+- [Reporting Bugs](#reporting-bugs)
+- [Suggesting Enhancements](#suggesting-enhancements)
+
+## Code of Conduct
+
+This project and everyone participating in it is governed by the
+[mergo Code of Conduct](https://github.com/imdario/mergoblob/master/CODE_OF_CONDUCT.md).
+By participating, you are expected to uphold this code. Please report unacceptable behavior
+to <>.
+
+
+## I Have a Question
+
+> If you want to ask a question, we assume that you have read the available [Documentation](https://pkg.go.dev/github.com/imdario/mergo).
+
+Before you ask a question, it is best to search for existing [Issues](https://github.com/imdario/mergo/issues) that might help you. In case you have found a suitable issue and still need clarification, you can write your question in this issue. It is also advisable to search the internet for answers first.
+
+If you then still feel the need to ask a question and need clarification, we recommend the following:
+
+- Open an [Issue](https://github.com/imdario/mergo/issues/new).
+- Provide as much context as you can about what you're running into.
+- Provide project and platform versions (nodejs, npm, etc), depending on what seems relevant.
+
+We will then take care of the issue as soon as possible.
+
+## I Want To Contribute
+
+> ### Legal Notice
+> When contributing to this project, you must agree that you have authored 100% of the content, that you have the necessary rights to the content and that the content you contribute may be provided under the project license.
+
+### Reporting Bugs
+
+
+#### Before Submitting a Bug Report
+
+A good bug report shouldn't leave others needing to chase you up for more information. Therefore, we ask you to investigate carefully, collect information and describe the issue in detail in your report. Please complete the following steps in advance to help us fix any potential bug as fast as possible.
+
+- Make sure that you are using the latest version.
+- Determine if your bug is really a bug and not an error on your side e.g. using incompatible environment components/versions (Make sure that you have read the [documentation](). If you are looking for support, you might want to check [this section](#i-have-a-question)).
+- To see if other users have experienced (and potentially already solved) the same issue you are having, check if there is not already a bug report existing for your bug or error in the [bug tracker](https://github.com/imdario/mergoissues?q=label%3Abug).
+- Also make sure to search the internet (including Stack Overflow) to see if users outside of the GitHub community have discussed the issue.
+- Collect information about the bug:
+- Stack trace (Traceback)
+- OS, Platform and Version (Windows, Linux, macOS, x86, ARM)
+- Version of the interpreter, compiler, SDK, runtime environment, package manager, depending on what seems relevant.
+- Possibly your input and the output
+- Can you reliably reproduce the issue? And can you also reproduce it with older versions?
+
+
+#### How Do I Submit a Good Bug Report?
+
+> You must never report security related issues, vulnerabilities or bugs including sensitive information to the issue tracker, or elsewhere in public. Instead sensitive bugs must be sent by email to .
+
+
+We use GitHub issues to track bugs and errors. If you run into an issue with the project:
+
+- Open an [Issue](https://github.com/imdario/mergo/issues/new). (Since we can't be sure at this point whether it is a bug or not, we ask you not to talk about a bug yet and not to label the issue.)
+- Explain the behavior you would expect and the actual behavior.
+- Please provide as much context as possible and describe the *reproduction steps* that someone else can follow to recreate the issue on their own. This usually includes your code. For good bug reports you should isolate the problem and create a reduced test case.
+- Provide the information you collected in the previous section.
+
+Once it's filed:
+
+- The project team will label the issue accordingly.
+- A team member will try to reproduce the issue with your provided steps. If there are no reproduction steps or no obvious way to reproduce the issue, the team will ask you for those steps and mark the issue as `needs-repro`. Bugs with the `needs-repro` tag will not be addressed until they are reproduced.
+- If the team is able to reproduce the issue, it will be marked `needs-fix`, as well as possibly other tags (such as `critical`), and the issue will be left to be implemented by someone.
+
+### Suggesting Enhancements
+
+This section guides you through submitting an enhancement suggestion for mergo, **including completely new features and minor improvements to existing functionality**. Following these guidelines will help maintainers and the community to understand your suggestion and find related suggestions.
+
+
+#### Before Submitting an Enhancement
+
+- Make sure that you are using the latest version.
+- Read the [documentation]() carefully and find out if the functionality is already covered, maybe by an individual configuration.
+- Perform a [search](https://github.com/imdario/mergo/issues) to see if the enhancement has already been suggested. If it has, add a comment to the existing issue instead of opening a new one.
+- Find out whether your idea fits with the scope and aims of the project. It's up to you to make a strong case to convince the project's developers of the merits of this feature. Keep in mind that we want features that will be useful to the majority of our users and not just a small subset. If you're just targeting a minority of users, consider writing an add-on/plugin library.
+
+
+#### How Do I Submit a Good Enhancement Suggestion?
+
+Enhancement suggestions are tracked as [GitHub issues](https://github.com/imdario/mergo/issues).
+
+- Use a **clear and descriptive title** for the issue to identify the suggestion.
+- Provide a **step-by-step description of the suggested enhancement** in as many details as possible.
+- **Describe the current behavior** and **explain which behavior you expected to see instead** and why. At this point you can also tell which alternatives do not work for you.
+- You may want to **include screenshots and animated GIFs** which help you demonstrate the steps or point out the part which the suggestion is related to. You can use [this tool](https://www.cockos.com/licecap/) to record GIFs on macOS and Windows, and [this tool](https://github.com/colinkeenan/silentcast) or [this tool](https://github.com/GNOME/byzanz) on Linux.
+- **Explain why this enhancement would be useful** to most mergo users. You may also want to point out the other projects that solved it better and which could serve as inspiration.
+
+
+## Attribution
+This guide is based on the **contributing-gen**. [Make your own](https://github.com/bttger/contributing-gen)!
diff --git a/vendor/github.com/imdario/mergo/LICENSE b/vendor/github.com/imdario/mergo/LICENSE
new file mode 100644
index 000000000000..686680298da2
--- /dev/null
+++ b/vendor/github.com/imdario/mergo/LICENSE
@@ -0,0 +1,28 @@
+Copyright (c) 2013 Dario Castañé. All rights reserved.
+Copyright (c) 2012 The Go Authors. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+ * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/github.com/imdario/mergo/README.md b/vendor/github.com/imdario/mergo/README.md
new file mode 100644
index 000000000000..ffbbb62c7044
--- /dev/null
+++ b/vendor/github.com/imdario/mergo/README.md
@@ -0,0 +1,242 @@
+# Mergo
+
+[![GitHub release][5]][6]
+[![GoCard][7]][8]
+[![Test status][1]][2]
+[![OpenSSF Scorecard][21]][22]
+[![OpenSSF Best Practices][19]][20]
+[![Coverage status][9]][10]
+[![Sourcegraph][11]][12]
+[![FOSSA status][13]][14]
+
+[![GoDoc][3]][4]
+[![Become my sponsor][15]][16]
+[![Tidelift][17]][18]
+
+[1]: https://github.com/imdario/mergo/workflows/tests/badge.svg?branch=master
+[2]: https://github.com/imdario/mergo/actions/workflows/tests.yml
+[3]: https://godoc.org/github.com/imdario/mergo?status.svg
+[4]: https://godoc.org/github.com/imdario/mergo
+[5]: https://img.shields.io/github/release/imdario/mergo.svg
+[6]: https://github.com/imdario/mergo/releases
+[7]: https://goreportcard.com/badge/imdario/mergo
+[8]: https://goreportcard.com/report/github.com/imdario/mergo
+[9]: https://coveralls.io/repos/github/imdario/mergo/badge.svg?branch=master
+[10]: https://coveralls.io/github/imdario/mergo?branch=master
+[11]: https://sourcegraph.com/github.com/imdario/mergo/-/badge.svg
+[12]: https://sourcegraph.com/github.com/imdario/mergo?badge
+[13]: https://app.fossa.io/api/projects/git%2Bgithub.com%2Fimdario%2Fmergo.svg?type=shield
+[14]: https://app.fossa.io/projects/git%2Bgithub.com%2Fimdario%2Fmergo?ref=badge_shield
+[15]: https://img.shields.io/github/sponsors/imdario
+[16]: https://github.com/sponsors/imdario
+[17]: https://tidelift.com/badges/package/go/github.com%2Fimdario%2Fmergo
+[18]: https://tidelift.com/subscription/pkg/go-github.com-imdario-mergo
+[19]: https://bestpractices.coreinfrastructure.org/projects/7177/badge
+[20]: https://bestpractices.coreinfrastructure.org/projects/7177
+[21]: https://api.securityscorecards.dev/projects/github.com/imdario/mergo/badge
+[22]: https://api.securityscorecards.dev/projects/github.com/imdario/mergo
+
+A helper to merge structs and maps in Golang. Useful for configuration default values, avoiding messy if-statements.
+
+Mergo merges same-type structs and maps by setting default values in zero-value fields. Mergo won't merge unexported (private) fields. It will do recursively any exported one. It also won't merge structs inside maps (because they are not addressable using Go reflection).
+
+Also a lovely [comune](http://en.wikipedia.org/wiki/Mergo) (municipality) in the Province of Ancona in the Italian region of Marche.
+
+## Status
+
+It is ready for production use. [It is used in several projects by Docker, Google, The Linux Foundation, VMWare, Shopify, Microsoft, etc](https://github.com/imdario/mergo#mergo-in-the-wild).
+
+### Important note
+
+Please keep in mind that a problematic PR broke [0.3.9](//github.com/imdario/mergo/releases/tag/0.3.9). I reverted it in [0.3.10](//github.com/imdario/mergo/releases/tag/0.3.10), and I consider it stable but not bug-free. Also, this version adds support for go modules.
+
+Keep in mind that in [0.3.2](//github.com/imdario/mergo/releases/tag/0.3.2), Mergo changed `Merge()`and `Map()` signatures to support [transformers](#transformers). I added an optional/variadic argument so that it won't break the existing code.
+
+If you were using Mergo before April 6th, 2015, please check your project works as intended after updating your local copy with ```go get -u github.com/imdario/mergo```. I apologize for any issue caused by its previous behavior and any future bug that Mergo could cause in existing projects after the change (release 0.2.0).
+
+### Donations
+
+If Mergo is useful to you, consider buying me a coffee, a beer, or making a monthly donation to allow me to keep building great free software. :heart_eyes:
+
+
+
+
+
+### Mergo in the wild
+
+- [moby/moby](https://github.com/moby/moby)
+- [kubernetes/kubernetes](https://github.com/kubernetes/kubernetes)
+- [vmware/dispatch](https://github.com/vmware/dispatch)
+- [Shopify/themekit](https://github.com/Shopify/themekit)
+- [imdario/zas](https://github.com/imdario/zas)
+- [matcornic/hermes](https://github.com/matcornic/hermes)
+- [OpenBazaar/openbazaar-go](https://github.com/OpenBazaar/openbazaar-go)
+- [kataras/iris](https://github.com/kataras/iris)
+- [michaelsauter/crane](https://github.com/michaelsauter/crane)
+- [go-task/task](https://github.com/go-task/task)
+- [sensu/uchiwa](https://github.com/sensu/uchiwa)
+- [ory/hydra](https://github.com/ory/hydra)
+- [sisatech/vcli](https://github.com/sisatech/vcli)
+- [dairycart/dairycart](https://github.com/dairycart/dairycart)
+- [projectcalico/felix](https://github.com/projectcalico/felix)
+- [resin-os/balena](https://github.com/resin-os/balena)
+- [go-kivik/kivik](https://github.com/go-kivik/kivik)
+- [Telefonica/govice](https://github.com/Telefonica/govice)
+- [supergiant/supergiant](supergiant/supergiant)
+- [SergeyTsalkov/brooce](https://github.com/SergeyTsalkov/brooce)
+- [soniah/dnsmadeeasy](https://github.com/soniah/dnsmadeeasy)
+- [ohsu-comp-bio/funnel](https://github.com/ohsu-comp-bio/funnel)
+- [EagerIO/Stout](https://github.com/EagerIO/Stout)
+- [lynndylanhurley/defsynth-api](https://github.com/lynndylanhurley/defsynth-api)
+- [russross/canvasassignments](https://github.com/russross/canvasassignments)
+- [rdegges/cryptly-api](https://github.com/rdegges/cryptly-api)
+- [casualjim/exeggutor](https://github.com/casualjim/exeggutor)
+- [divshot/gitling](https://github.com/divshot/gitling)
+- [RWJMurphy/gorl](https://github.com/RWJMurphy/gorl)
+- [andrerocker/deploy42](https://github.com/andrerocker/deploy42)
+- [elwinar/rambler](https://github.com/elwinar/rambler)
+- [tmaiaroto/gopartman](https://github.com/tmaiaroto/gopartman)
+- [jfbus/impressionist](https://github.com/jfbus/impressionist)
+- [Jmeyering/zealot](https://github.com/Jmeyering/zealot)
+- [godep-migrator/rigger-host](https://github.com/godep-migrator/rigger-host)
+- [Dronevery/MultiwaySwitch-Go](https://github.com/Dronevery/MultiwaySwitch-Go)
+- [thoas/picfit](https://github.com/thoas/picfit)
+- [mantasmatelis/whooplist-server](https://github.com/mantasmatelis/whooplist-server)
+- [jnuthong/item_search](https://github.com/jnuthong/item_search)
+- [bukalapak/snowboard](https://github.com/bukalapak/snowboard)
+- [containerssh/containerssh](https://github.com/containerssh/containerssh)
+- [goreleaser/goreleaser](https://github.com/goreleaser/goreleaser)
+- [tjpnz/structbot](https://github.com/tjpnz/structbot)
+
+## Install
+
+ go get github.com/imdario/mergo
+
+ // use in your .go code
+ import (
+ "github.com/imdario/mergo"
+ )
+
+## Usage
+
+You can only merge same-type structs with exported fields initialized as zero value of their type and same-types maps. Mergo won't merge unexported (private) fields but will do recursively any exported one. It won't merge empty structs value as [they are zero values](https://golang.org/ref/spec#The_zero_value) too. Also, maps will be merged recursively except for structs inside maps (because they are not addressable using Go reflection).
+
+```go
+if err := mergo.Merge(&dst, src); err != nil {
+ // ...
+}
+```
+
+Also, you can merge overwriting values using the transformer `WithOverride`.
+
+```go
+if err := mergo.Merge(&dst, src, mergo.WithOverride); err != nil {
+ // ...
+}
+```
+
+Additionally, you can map a `map[string]interface{}` to a struct (and otherwise, from struct to map), following the same restrictions as in `Merge()`. Keys are capitalized to find each corresponding exported field.
+
+```go
+if err := mergo.Map(&dst, srcMap); err != nil {
+ // ...
+}
+```
+
+Warning: if you map a struct to map, it won't do it recursively. Don't expect Mergo to map struct members of your struct as `map[string]interface{}`. They will be just assigned as values.
+
+Here is a nice example:
+
+```go
+package main
+
+import (
+ "fmt"
+ "github.com/imdario/mergo"
+)
+
+type Foo struct {
+ A string
+ B int64
+}
+
+func main() {
+ src := Foo{
+ A: "one",
+ B: 2,
+ }
+ dest := Foo{
+ A: "two",
+ }
+ mergo.Merge(&dest, src)
+ fmt.Println(dest)
+ // Will print
+ // {two 2}
+}
+```
+
+Note: if test are failing due missing package, please execute:
+
+ go get gopkg.in/yaml.v3
+
+### Transformers
+
+Transformers allow to merge specific types differently than in the default behavior. In other words, now you can customize how some types are merged. For example, `time.Time` is a struct; it doesn't have zero value but IsZero can return true because it has fields with zero value. How can we merge a non-zero `time.Time`?
+
+```go
+package main
+
+import (
+ "fmt"
+ "github.com/imdario/mergo"
+ "reflect"
+ "time"
+)
+
+type timeTransformer struct {
+}
+
+func (t timeTransformer) Transformer(typ reflect.Type) func(dst, src reflect.Value) error {
+ if typ == reflect.TypeOf(time.Time{}) {
+ return func(dst, src reflect.Value) error {
+ if dst.CanSet() {
+ isZero := dst.MethodByName("IsZero")
+ result := isZero.Call([]reflect.Value{})
+ if result[0].Bool() {
+ dst.Set(src)
+ }
+ }
+ return nil
+ }
+ }
+ return nil
+}
+
+type Snapshot struct {
+ Time time.Time
+ // ...
+}
+
+func main() {
+ src := Snapshot{time.Now()}
+ dest := Snapshot{}
+ mergo.Merge(&dest, src, mergo.WithTransformers(timeTransformer{}))
+ fmt.Println(dest)
+ // Will print
+ // { 2018-01-12 01:15:00 +0000 UTC m=+0.000000001 }
+}
+```
+
+## Contact me
+
+If I can help you, you have an idea or you are using Mergo in your projects, don't hesitate to drop me a line (or a pull request): [@im_dario](https://twitter.com/im_dario)
+
+## About
+
+Written by [Dario Castañé](http://dario.im).
+
+## License
+
+[BSD 3-Clause](http://opensource.org/licenses/BSD-3-Clause) license, as [Go language](http://golang.org/LICENSE).
+
+[](https://app.fossa.io/projects/git%2Bgithub.com%2Fimdario%2Fmergo?ref=badge_large)
diff --git a/vendor/github.com/imdario/mergo/SECURITY.md b/vendor/github.com/imdario/mergo/SECURITY.md
new file mode 100644
index 000000000000..a5de61f77ba7
--- /dev/null
+++ b/vendor/github.com/imdario/mergo/SECURITY.md
@@ -0,0 +1,14 @@
+# Security Policy
+
+## Supported Versions
+
+| Version | Supported |
+| ------- | ------------------ |
+| 0.3.x | :white_check_mark: |
+| < 0.3 | :x: |
+
+## Security contact information
+
+To report a security vulnerability, please use the
+[Tidelift security contact](https://tidelift.com/security).
+Tidelift will coordinate the fix and disclosure.
diff --git a/vendor/github.com/imdario/mergo/doc.go b/vendor/github.com/imdario/mergo/doc.go
new file mode 100644
index 000000000000..fcd985f995dc
--- /dev/null
+++ b/vendor/github.com/imdario/mergo/doc.go
@@ -0,0 +1,143 @@
+// Copyright 2013 Dario Castañé. All rights reserved.
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+/*
+A helper to merge structs and maps in Golang. Useful for configuration default values, avoiding messy if-statements.
+
+Mergo merges same-type structs and maps by setting default values in zero-value fields. Mergo won't merge unexported (private) fields. It will do recursively any exported one. It also won't merge structs inside maps (because they are not addressable using Go reflection).
+
+Status
+
+It is ready for production use. It is used in several projects by Docker, Google, The Linux Foundation, VMWare, Shopify, etc.
+
+Important note
+
+Please keep in mind that a problematic PR broke 0.3.9. We reverted it in 0.3.10. We consider 0.3.10 as stable but not bug-free. . Also, this version adds suppot for go modules.
+
+Keep in mind that in 0.3.2, Mergo changed Merge() and Map() signatures to support transformers. We added an optional/variadic argument so that it won't break the existing code.
+
+If you were using Mergo before April 6th, 2015, please check your project works as intended after updating your local copy with go get -u github.com/imdario/mergo. I apologize for any issue caused by its previous behavior and any future bug that Mergo could cause in existing projects after the change (release 0.2.0).
+
+Install
+
+Do your usual installation procedure:
+
+ go get github.com/imdario/mergo
+
+ // use in your .go code
+ import (
+ "github.com/imdario/mergo"
+ )
+
+Usage
+
+You can only merge same-type structs with exported fields initialized as zero value of their type and same-types maps. Mergo won't merge unexported (private) fields but will do recursively any exported one. It won't merge empty structs value as they are zero values too. Also, maps will be merged recursively except for structs inside maps (because they are not addressable using Go reflection).
+
+ if err := mergo.Merge(&dst, src); err != nil {
+ // ...
+ }
+
+Also, you can merge overwriting values using the transformer WithOverride.
+
+ if err := mergo.Merge(&dst, src, mergo.WithOverride); err != nil {
+ // ...
+ }
+
+Additionally, you can map a map[string]interface{} to a struct (and otherwise, from struct to map), following the same restrictions as in Merge(). Keys are capitalized to find each corresponding exported field.
+
+ if err := mergo.Map(&dst, srcMap); err != nil {
+ // ...
+ }
+
+Warning: if you map a struct to map, it won't do it recursively. Don't expect Mergo to map struct members of your struct as map[string]interface{}. They will be just assigned as values.
+
+Here is a nice example:
+
+ package main
+
+ import (
+ "fmt"
+ "github.com/imdario/mergo"
+ )
+
+ type Foo struct {
+ A string
+ B int64
+ }
+
+ func main() {
+ src := Foo{
+ A: "one",
+ B: 2,
+ }
+ dest := Foo{
+ A: "two",
+ }
+ mergo.Merge(&dest, src)
+ fmt.Println(dest)
+ // Will print
+ // {two 2}
+ }
+
+Transformers
+
+Transformers allow to merge specific types differently than in the default behavior. In other words, now you can customize how some types are merged. For example, time.Time is a struct; it doesn't have zero value but IsZero can return true because it has fields with zero value. How can we merge a non-zero time.Time?
+
+ package main
+
+ import (
+ "fmt"
+ "github.com/imdario/mergo"
+ "reflect"
+ "time"
+ )
+
+ type timeTransformer struct {
+ }
+
+ func (t timeTransformer) Transformer(typ reflect.Type) func(dst, src reflect.Value) error {
+ if typ == reflect.TypeOf(time.Time{}) {
+ return func(dst, src reflect.Value) error {
+ if dst.CanSet() {
+ isZero := dst.MethodByName("IsZero")
+ result := isZero.Call([]reflect.Value{})
+ if result[0].Bool() {
+ dst.Set(src)
+ }
+ }
+ return nil
+ }
+ }
+ return nil
+ }
+
+ type Snapshot struct {
+ Time time.Time
+ // ...
+ }
+
+ func main() {
+ src := Snapshot{time.Now()}
+ dest := Snapshot{}
+ mergo.Merge(&dest, src, mergo.WithTransformers(timeTransformer{}))
+ fmt.Println(dest)
+ // Will print
+ // { 2018-01-12 01:15:00 +0000 UTC m=+0.000000001 }
+ }
+
+Contact me
+
+If I can help you, you have an idea or you are using Mergo in your projects, don't hesitate to drop me a line (or a pull request): https://twitter.com/im_dario
+
+About
+
+Written by Dario Castañé: https://da.rio.hn
+
+License
+
+BSD 3-Clause license, as Go language.
+
+*/
+package mergo
diff --git a/vendor/github.com/imdario/mergo/map.go b/vendor/github.com/imdario/mergo/map.go
new file mode 100644
index 000000000000..b50d5c2a4e7c
--- /dev/null
+++ b/vendor/github.com/imdario/mergo/map.go
@@ -0,0 +1,178 @@
+// Copyright 2014 Dario Castañé. All rights reserved.
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Based on src/pkg/reflect/deepequal.go from official
+// golang's stdlib.
+
+package mergo
+
+import (
+ "fmt"
+ "reflect"
+ "unicode"
+ "unicode/utf8"
+)
+
+func changeInitialCase(s string, mapper func(rune) rune) string {
+ if s == "" {
+ return s
+ }
+ r, n := utf8.DecodeRuneInString(s)
+ return string(mapper(r)) + s[n:]
+}
+
+func isExported(field reflect.StructField) bool {
+ r, _ := utf8.DecodeRuneInString(field.Name)
+ return r >= 'A' && r <= 'Z'
+}
+
+// Traverses recursively both values, assigning src's fields values to dst.
+// The map argument tracks comparisons that have already been seen, which allows
+// short circuiting on recursive types.
+func deepMap(dst, src reflect.Value, visited map[uintptr]*visit, depth int, config *Config) (err error) {
+ overwrite := config.Overwrite
+ if dst.CanAddr() {
+ addr := dst.UnsafeAddr()
+ h := 17 * addr
+ seen := visited[h]
+ typ := dst.Type()
+ for p := seen; p != nil; p = p.next {
+ if p.ptr == addr && p.typ == typ {
+ return nil
+ }
+ }
+ // Remember, remember...
+ visited[h] = &visit{typ, seen, addr}
+ }
+ zeroValue := reflect.Value{}
+ switch dst.Kind() {
+ case reflect.Map:
+ dstMap := dst.Interface().(map[string]interface{})
+ for i, n := 0, src.NumField(); i < n; i++ {
+ srcType := src.Type()
+ field := srcType.Field(i)
+ if !isExported(field) {
+ continue
+ }
+ fieldName := field.Name
+ fieldName = changeInitialCase(fieldName, unicode.ToLower)
+ if v, ok := dstMap[fieldName]; !ok || (isEmptyValue(reflect.ValueOf(v), !config.ShouldNotDereference) || overwrite) {
+ dstMap[fieldName] = src.Field(i).Interface()
+ }
+ }
+ case reflect.Ptr:
+ if dst.IsNil() {
+ v := reflect.New(dst.Type().Elem())
+ dst.Set(v)
+ }
+ dst = dst.Elem()
+ fallthrough
+ case reflect.Struct:
+ srcMap := src.Interface().(map[string]interface{})
+ for key := range srcMap {
+ config.overwriteWithEmptyValue = true
+ srcValue := srcMap[key]
+ fieldName := changeInitialCase(key, unicode.ToUpper)
+ dstElement := dst.FieldByName(fieldName)
+ if dstElement == zeroValue {
+ // We discard it because the field doesn't exist.
+ continue
+ }
+ srcElement := reflect.ValueOf(srcValue)
+ dstKind := dstElement.Kind()
+ srcKind := srcElement.Kind()
+ if srcKind == reflect.Ptr && dstKind != reflect.Ptr {
+ srcElement = srcElement.Elem()
+ srcKind = reflect.TypeOf(srcElement.Interface()).Kind()
+ } else if dstKind == reflect.Ptr {
+ // Can this work? I guess it can't.
+ if srcKind != reflect.Ptr && srcElement.CanAddr() {
+ srcPtr := srcElement.Addr()
+ srcElement = reflect.ValueOf(srcPtr)
+ srcKind = reflect.Ptr
+ }
+ }
+
+ if !srcElement.IsValid() {
+ continue
+ }
+ if srcKind == dstKind {
+ if err = deepMerge(dstElement, srcElement, visited, depth+1, config); err != nil {
+ return
+ }
+ } else if dstKind == reflect.Interface && dstElement.Kind() == reflect.Interface {
+ if err = deepMerge(dstElement, srcElement, visited, depth+1, config); err != nil {
+ return
+ }
+ } else if srcKind == reflect.Map {
+ if err = deepMap(dstElement, srcElement, visited, depth+1, config); err != nil {
+ return
+ }
+ } else {
+ return fmt.Errorf("type mismatch on %s field: found %v, expected %v", fieldName, srcKind, dstKind)
+ }
+ }
+ }
+ return
+}
+
+// Map sets fields' values in dst from src.
+// src can be a map with string keys or a struct. dst must be the opposite:
+// if src is a map, dst must be a valid pointer to struct. If src is a struct,
+// dst must be map[string]interface{}.
+// It won't merge unexported (private) fields and will do recursively
+// any exported field.
+// If dst is a map, keys will be src fields' names in lower camel case.
+// Missing key in src that doesn't match a field in dst will be skipped. This
+// doesn't apply if dst is a map.
+// This is separated method from Merge because it is cleaner and it keeps sane
+// semantics: merging equal types, mapping different (restricted) types.
+func Map(dst, src interface{}, opts ...func(*Config)) error {
+ return _map(dst, src, opts...)
+}
+
+// MapWithOverwrite will do the same as Map except that non-empty dst attributes will be overridden by
+// non-empty src attribute values.
+// Deprecated: Use Map(…) with WithOverride
+func MapWithOverwrite(dst, src interface{}, opts ...func(*Config)) error {
+ return _map(dst, src, append(opts, WithOverride)...)
+}
+
+func _map(dst, src interface{}, opts ...func(*Config)) error {
+ if dst != nil && reflect.ValueOf(dst).Kind() != reflect.Ptr {
+ return ErrNonPointerArgument
+ }
+ var (
+ vDst, vSrc reflect.Value
+ err error
+ )
+ config := &Config{}
+
+ for _, opt := range opts {
+ opt(config)
+ }
+
+ if vDst, vSrc, err = resolveValues(dst, src); err != nil {
+ return err
+ }
+ // To be friction-less, we redirect equal-type arguments
+ // to deepMerge. Only because arguments can be anything.
+ if vSrc.Kind() == vDst.Kind() {
+ return deepMerge(vDst, vSrc, make(map[uintptr]*visit), 0, config)
+ }
+ switch vSrc.Kind() {
+ case reflect.Struct:
+ if vDst.Kind() != reflect.Map {
+ return ErrExpectedMapAsDestination
+ }
+ case reflect.Map:
+ if vDst.Kind() != reflect.Struct {
+ return ErrExpectedStructAsDestination
+ }
+ default:
+ return ErrNotSupported
+ }
+ return deepMap(vDst, vSrc, make(map[uintptr]*visit), 0, config)
+}
diff --git a/vendor/github.com/imdario/mergo/merge.go b/vendor/github.com/imdario/mergo/merge.go
new file mode 100644
index 000000000000..0ef9b2138c15
--- /dev/null
+++ b/vendor/github.com/imdario/mergo/merge.go
@@ -0,0 +1,409 @@
+// Copyright 2013 Dario Castañé. All rights reserved.
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Based on src/pkg/reflect/deepequal.go from official
+// golang's stdlib.
+
+package mergo
+
+import (
+ "fmt"
+ "reflect"
+)
+
+func hasMergeableFields(dst reflect.Value) (exported bool) {
+ for i, n := 0, dst.NumField(); i < n; i++ {
+ field := dst.Type().Field(i)
+ if field.Anonymous && dst.Field(i).Kind() == reflect.Struct {
+ exported = exported || hasMergeableFields(dst.Field(i))
+ } else if isExportedComponent(&field) {
+ exported = exported || len(field.PkgPath) == 0
+ }
+ }
+ return
+}
+
+func isExportedComponent(field *reflect.StructField) bool {
+ pkgPath := field.PkgPath
+ if len(pkgPath) > 0 {
+ return false
+ }
+ c := field.Name[0]
+ if 'a' <= c && c <= 'z' || c == '_' {
+ return false
+ }
+ return true
+}
+
+type Config struct {
+ Transformers Transformers
+ Overwrite bool
+ ShouldNotDereference bool
+ AppendSlice bool
+ TypeCheck bool
+ overwriteWithEmptyValue bool
+ overwriteSliceWithEmptyValue bool
+ sliceDeepCopy bool
+ debug bool
+}
+
+type Transformers interface {
+ Transformer(reflect.Type) func(dst, src reflect.Value) error
+}
+
+// Traverses recursively both values, assigning src's fields values to dst.
+// The map argument tracks comparisons that have already been seen, which allows
+// short circuiting on recursive types.
+func deepMerge(dst, src reflect.Value, visited map[uintptr]*visit, depth int, config *Config) (err error) {
+ overwrite := config.Overwrite
+ typeCheck := config.TypeCheck
+ overwriteWithEmptySrc := config.overwriteWithEmptyValue
+ overwriteSliceWithEmptySrc := config.overwriteSliceWithEmptyValue
+ sliceDeepCopy := config.sliceDeepCopy
+
+ if !src.IsValid() {
+ return
+ }
+ if dst.CanAddr() {
+ addr := dst.UnsafeAddr()
+ h := 17 * addr
+ seen := visited[h]
+ typ := dst.Type()
+ for p := seen; p != nil; p = p.next {
+ if p.ptr == addr && p.typ == typ {
+ return nil
+ }
+ }
+ // Remember, remember...
+ visited[h] = &visit{typ, seen, addr}
+ }
+
+ if config.Transformers != nil && !isReflectNil(dst) && dst.IsValid() {
+ if fn := config.Transformers.Transformer(dst.Type()); fn != nil {
+ err = fn(dst, src)
+ return
+ }
+ }
+
+ switch dst.Kind() {
+ case reflect.Struct:
+ if hasMergeableFields(dst) {
+ for i, n := 0, dst.NumField(); i < n; i++ {
+ if err = deepMerge(dst.Field(i), src.Field(i), visited, depth+1, config); err != nil {
+ return
+ }
+ }
+ } else {
+ if dst.CanSet() && (isReflectNil(dst) || overwrite) && (!isEmptyValue(src, !config.ShouldNotDereference) || overwriteWithEmptySrc) {
+ dst.Set(src)
+ }
+ }
+ case reflect.Map:
+ if dst.IsNil() && !src.IsNil() {
+ if dst.CanSet() {
+ dst.Set(reflect.MakeMap(dst.Type()))
+ } else {
+ dst = src
+ return
+ }
+ }
+
+ if src.Kind() != reflect.Map {
+ if overwrite && dst.CanSet() {
+ dst.Set(src)
+ }
+ return
+ }
+
+ for _, key := range src.MapKeys() {
+ srcElement := src.MapIndex(key)
+ if !srcElement.IsValid() {
+ continue
+ }
+ dstElement := dst.MapIndex(key)
+ switch srcElement.Kind() {
+ case reflect.Chan, reflect.Func, reflect.Map, reflect.Interface, reflect.Slice:
+ if srcElement.IsNil() {
+ if overwrite {
+ dst.SetMapIndex(key, srcElement)
+ }
+ continue
+ }
+ fallthrough
+ default:
+ if !srcElement.CanInterface() {
+ continue
+ }
+ switch reflect.TypeOf(srcElement.Interface()).Kind() {
+ case reflect.Struct:
+ fallthrough
+ case reflect.Ptr:
+ fallthrough
+ case reflect.Map:
+ srcMapElm := srcElement
+ dstMapElm := dstElement
+ if srcMapElm.CanInterface() {
+ srcMapElm = reflect.ValueOf(srcMapElm.Interface())
+ if dstMapElm.IsValid() {
+ dstMapElm = reflect.ValueOf(dstMapElm.Interface())
+ }
+ }
+ if err = deepMerge(dstMapElm, srcMapElm, visited, depth+1, config); err != nil {
+ return
+ }
+ case reflect.Slice:
+ srcSlice := reflect.ValueOf(srcElement.Interface())
+
+ var dstSlice reflect.Value
+ if !dstElement.IsValid() || dstElement.IsNil() {
+ dstSlice = reflect.MakeSlice(srcSlice.Type(), 0, srcSlice.Len())
+ } else {
+ dstSlice = reflect.ValueOf(dstElement.Interface())
+ }
+
+ if (!isEmptyValue(src, !config.ShouldNotDereference) || overwriteWithEmptySrc || overwriteSliceWithEmptySrc) && (overwrite || isEmptyValue(dst, !config.ShouldNotDereference)) && !config.AppendSlice && !sliceDeepCopy {
+ if typeCheck && srcSlice.Type() != dstSlice.Type() {
+ return fmt.Errorf("cannot override two slices with different type (%s, %s)", srcSlice.Type(), dstSlice.Type())
+ }
+ dstSlice = srcSlice
+ } else if config.AppendSlice {
+ if srcSlice.Type() != dstSlice.Type() {
+ return fmt.Errorf("cannot append two slices with different type (%s, %s)", srcSlice.Type(), dstSlice.Type())
+ }
+ dstSlice = reflect.AppendSlice(dstSlice, srcSlice)
+ } else if sliceDeepCopy {
+ i := 0
+ for ; i < srcSlice.Len() && i < dstSlice.Len(); i++ {
+ srcElement := srcSlice.Index(i)
+ dstElement := dstSlice.Index(i)
+
+ if srcElement.CanInterface() {
+ srcElement = reflect.ValueOf(srcElement.Interface())
+ }
+ if dstElement.CanInterface() {
+ dstElement = reflect.ValueOf(dstElement.Interface())
+ }
+
+ if err = deepMerge(dstElement, srcElement, visited, depth+1, config); err != nil {
+ return
+ }
+ }
+
+ }
+ dst.SetMapIndex(key, dstSlice)
+ }
+ }
+
+ if dstElement.IsValid() && !isEmptyValue(dstElement, !config.ShouldNotDereference) {
+ if reflect.TypeOf(srcElement.Interface()).Kind() == reflect.Slice {
+ continue
+ }
+ if reflect.TypeOf(srcElement.Interface()).Kind() == reflect.Map && reflect.TypeOf(dstElement.Interface()).Kind() == reflect.Map {
+ continue
+ }
+ }
+
+ if srcElement.IsValid() && ((srcElement.Kind() != reflect.Ptr && overwrite) || !dstElement.IsValid() || isEmptyValue(dstElement, !config.ShouldNotDereference)) {
+ if dst.IsNil() {
+ dst.Set(reflect.MakeMap(dst.Type()))
+ }
+ dst.SetMapIndex(key, srcElement)
+ }
+ }
+
+ // Ensure that all keys in dst are deleted if they are not in src.
+ if overwriteWithEmptySrc {
+ for _, key := range dst.MapKeys() {
+ srcElement := src.MapIndex(key)
+ if !srcElement.IsValid() {
+ dst.SetMapIndex(key, reflect.Value{})
+ }
+ }
+ }
+ case reflect.Slice:
+ if !dst.CanSet() {
+ break
+ }
+ if (!isEmptyValue(src, !config.ShouldNotDereference) || overwriteWithEmptySrc || overwriteSliceWithEmptySrc) && (overwrite || isEmptyValue(dst, !config.ShouldNotDereference)) && !config.AppendSlice && !sliceDeepCopy {
+ dst.Set(src)
+ } else if config.AppendSlice {
+ if src.Type() != dst.Type() {
+ return fmt.Errorf("cannot append two slice with different type (%s, %s)", src.Type(), dst.Type())
+ }
+ dst.Set(reflect.AppendSlice(dst, src))
+ } else if sliceDeepCopy {
+ for i := 0; i < src.Len() && i < dst.Len(); i++ {
+ srcElement := src.Index(i)
+ dstElement := dst.Index(i)
+ if srcElement.CanInterface() {
+ srcElement = reflect.ValueOf(srcElement.Interface())
+ }
+ if dstElement.CanInterface() {
+ dstElement = reflect.ValueOf(dstElement.Interface())
+ }
+
+ if err = deepMerge(dstElement, srcElement, visited, depth+1, config); err != nil {
+ return
+ }
+ }
+ }
+ case reflect.Ptr:
+ fallthrough
+ case reflect.Interface:
+ if isReflectNil(src) {
+ if overwriteWithEmptySrc && dst.CanSet() && src.Type().AssignableTo(dst.Type()) {
+ dst.Set(src)
+ }
+ break
+ }
+
+ if src.Kind() != reflect.Interface {
+ if dst.IsNil() || (src.Kind() != reflect.Ptr && overwrite) {
+ if dst.CanSet() && (overwrite || isEmptyValue(dst, !config.ShouldNotDereference)) {
+ dst.Set(src)
+ }
+ } else if src.Kind() == reflect.Ptr {
+ if !config.ShouldNotDereference {
+ if err = deepMerge(dst.Elem(), src.Elem(), visited, depth+1, config); err != nil {
+ return
+ }
+ } else {
+ if overwriteWithEmptySrc || (overwrite && !src.IsNil()) || dst.IsNil() {
+ dst.Set(src)
+ }
+ }
+ } else if dst.Elem().Type() == src.Type() {
+ if err = deepMerge(dst.Elem(), src, visited, depth+1, config); err != nil {
+ return
+ }
+ } else {
+ return ErrDifferentArgumentsTypes
+ }
+ break
+ }
+
+ if dst.IsNil() || overwrite {
+ if dst.CanSet() && (overwrite || isEmptyValue(dst, !config.ShouldNotDereference)) {
+ dst.Set(src)
+ }
+ break
+ }
+
+ if dst.Elem().Kind() == src.Elem().Kind() {
+ if err = deepMerge(dst.Elem(), src.Elem(), visited, depth+1, config); err != nil {
+ return
+ }
+ break
+ }
+ default:
+ mustSet := (isEmptyValue(dst, !config.ShouldNotDereference) || overwrite) && (!isEmptyValue(src, !config.ShouldNotDereference) || overwriteWithEmptySrc)
+ if mustSet {
+ if dst.CanSet() {
+ dst.Set(src)
+ } else {
+ dst = src
+ }
+ }
+ }
+
+ return
+}
+
+// Merge will fill any empty for value type attributes on the dst struct using corresponding
+// src attributes if they themselves are not empty. dst and src must be valid same-type structs
+// and dst must be a pointer to struct.
+// It won't merge unexported (private) fields and will do recursively any exported field.
+func Merge(dst, src interface{}, opts ...func(*Config)) error {
+ return merge(dst, src, opts...)
+}
+
+// MergeWithOverwrite will do the same as Merge except that non-empty dst attributes will be overridden by
+// non-empty src attribute values.
+// Deprecated: use Merge(…) with WithOverride
+func MergeWithOverwrite(dst, src interface{}, opts ...func(*Config)) error {
+ return merge(dst, src, append(opts, WithOverride)...)
+}
+
+// WithTransformers adds transformers to merge, allowing to customize the merging of some types.
+func WithTransformers(transformers Transformers) func(*Config) {
+ return func(config *Config) {
+ config.Transformers = transformers
+ }
+}
+
+// WithOverride will make merge override non-empty dst attributes with non-empty src attributes values.
+func WithOverride(config *Config) {
+ config.Overwrite = true
+}
+
+// WithOverwriteWithEmptyValue will make merge override non empty dst attributes with empty src attributes values.
+func WithOverwriteWithEmptyValue(config *Config) {
+ config.Overwrite = true
+ config.overwriteWithEmptyValue = true
+}
+
+// WithOverrideEmptySlice will make merge override empty dst slice with empty src slice.
+func WithOverrideEmptySlice(config *Config) {
+ config.overwriteSliceWithEmptyValue = true
+}
+
+// WithoutDereference prevents dereferencing pointers when evaluating whether they are empty
+// (i.e. a non-nil pointer is never considered empty).
+func WithoutDereference(config *Config) {
+ config.ShouldNotDereference = true
+}
+
+// WithAppendSlice will make merge append slices instead of overwriting it.
+func WithAppendSlice(config *Config) {
+ config.AppendSlice = true
+}
+
+// WithTypeCheck will make merge check types while overwriting it (must be used with WithOverride).
+func WithTypeCheck(config *Config) {
+ config.TypeCheck = true
+}
+
+// WithSliceDeepCopy will merge slice element one by one with Overwrite flag.
+func WithSliceDeepCopy(config *Config) {
+ config.sliceDeepCopy = true
+ config.Overwrite = true
+}
+
+func merge(dst, src interface{}, opts ...func(*Config)) error {
+ if dst != nil && reflect.ValueOf(dst).Kind() != reflect.Ptr {
+ return ErrNonPointerArgument
+ }
+ var (
+ vDst, vSrc reflect.Value
+ err error
+ )
+
+ config := &Config{}
+
+ for _, opt := range opts {
+ opt(config)
+ }
+
+ if vDst, vSrc, err = resolveValues(dst, src); err != nil {
+ return err
+ }
+ if vDst.Type() != vSrc.Type() {
+ return ErrDifferentArgumentsTypes
+ }
+ return deepMerge(vDst, vSrc, make(map[uintptr]*visit), 0, config)
+}
+
+// IsReflectNil is the reflect value provided nil
+func isReflectNil(v reflect.Value) bool {
+ k := v.Kind()
+ switch k {
+ case reflect.Interface, reflect.Slice, reflect.Chan, reflect.Func, reflect.Map, reflect.Ptr:
+ // Both interface and slice are nil if first word is 0.
+ // Both are always bigger than a word; assume flagIndir.
+ return v.IsNil()
+ default:
+ return false
+ }
+}
diff --git a/vendor/github.com/imdario/mergo/mergo.go b/vendor/github.com/imdario/mergo/mergo.go
new file mode 100644
index 000000000000..0a721e2d8586
--- /dev/null
+++ b/vendor/github.com/imdario/mergo/mergo.go
@@ -0,0 +1,81 @@
+// Copyright 2013 Dario Castañé. All rights reserved.
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Based on src/pkg/reflect/deepequal.go from official
+// golang's stdlib.
+
+package mergo
+
+import (
+ "errors"
+ "reflect"
+)
+
+// Errors reported by Mergo when it finds invalid arguments.
+var (
+ ErrNilArguments = errors.New("src and dst must not be nil")
+ ErrDifferentArgumentsTypes = errors.New("src and dst must be of same type")
+ ErrNotSupported = errors.New("only structs, maps, and slices are supported")
+ ErrExpectedMapAsDestination = errors.New("dst was expected to be a map")
+ ErrExpectedStructAsDestination = errors.New("dst was expected to be a struct")
+ ErrNonPointerArgument = errors.New("dst must be a pointer")
+)
+
+// During deepMerge, must keep track of checks that are
+// in progress. The comparison algorithm assumes that all
+// checks in progress are true when it reencounters them.
+// Visited are stored in a map indexed by 17 * a1 + a2;
+type visit struct {
+ typ reflect.Type
+ next *visit
+ ptr uintptr
+}
+
+// From src/pkg/encoding/json/encode.go.
+func isEmptyValue(v reflect.Value, shouldDereference bool) bool {
+ switch v.Kind() {
+ case reflect.Array, reflect.Map, reflect.Slice, reflect.String:
+ return v.Len() == 0
+ case reflect.Bool:
+ return !v.Bool()
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ return v.Int() == 0
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+ return v.Uint() == 0
+ case reflect.Float32, reflect.Float64:
+ return v.Float() == 0
+ case reflect.Interface, reflect.Ptr:
+ if v.IsNil() {
+ return true
+ }
+ if shouldDereference {
+ return isEmptyValue(v.Elem(), shouldDereference)
+ }
+ return false
+ case reflect.Func:
+ return v.IsNil()
+ case reflect.Invalid:
+ return true
+ }
+ return false
+}
+
+func resolveValues(dst, src interface{}) (vDst, vSrc reflect.Value, err error) {
+ if dst == nil || src == nil {
+ err = ErrNilArguments
+ return
+ }
+ vDst = reflect.ValueOf(dst).Elem()
+ if vDst.Kind() != reflect.Struct && vDst.Kind() != reflect.Map && vDst.Kind() != reflect.Slice {
+ err = ErrNotSupported
+ return
+ }
+ vSrc = reflect.ValueOf(src)
+ // We check if vSrc is a pointer to dereference it.
+ if vSrc.Kind() == reflect.Ptr {
+ vSrc = vSrc.Elem()
+ }
+ return
+}
diff --git a/vendor/github.com/shopspring/decimal/.travis.yml b/vendor/github.com/shopspring/decimal/.travis.yml
new file mode 100644
index 000000000000..6326d40f0e9a
--- /dev/null
+++ b/vendor/github.com/shopspring/decimal/.travis.yml
@@ -0,0 +1,19 @@
+language: go
+
+arch:
+ - amd64
+ - ppc64le
+
+go:
+ - 1.7.x
+ - 1.14.x
+ - 1.15.x
+ - 1.16.x
+ - 1.17.x
+ - tip
+
+install:
+ - go build .
+
+script:
+ - go test -v