diff --git a/.github/codecov.yml b/.github/codecov.yml index 072d561668..cacd8f8180 100644 --- a/.github/codecov.yml +++ b/.github/codecov.yml @@ -1,42 +1,67 @@ # Source Network's codecov configuration file. + github_checks: - annotations: true + annotations: true # This won't work if patch is `false` or has flags. + codecov: - require_ci_to_pass: yes + require_ci_to_pass: false + allow_pseudo_compare: true + allow_coverage_offsets: true + coverage: precision: 2 - round: down - range: 65...100 + round: "nearest" + range: 60...90 status: - # Learn more at https://docs.codecov.io/docs/commit-status + project: default: - target: 50% - threshold: 0.05 # allow this much decrease on project - flags: - - unit - - # Disable patch as it is not correct and buggy. - # Folks over at amazon's aws and mozilla's firefox tv also did the same LOL: - # - https://github.com/aws/amazon-vpc-cni-k8s/pull/1226/files - # - https://github.com/mozilla-mobile/firefox-tv/pull/779/files + only_pulls: true # Only post the status if the commits are on a pull request. + informational: true # Don't fail codcov action because of project's coverage. + if_ci_failed: "error" # Give an error if CI fails (eg. upload to codecov failed). + if_not_found: "failure" # Fail if no report for HEAD found. + + # Note: Patch is needed for github annotations. patch: default: - enabled: no - if_not_found: success + informational: true # Don't fail codcov action because of patch's coverage. + if_ci_failed: "error" # Give an error if CI fails (eg. upload to codecov failed). + if_not_found: "failure" # Fail if no report for HEAD found. + + # Detect indirect coverage changes. + changes: + default: + informational: true # Don't fail codcov action because of indirect coverage changes. + if_ci_failed: "error" # Give an error if CI fails (eg. upload to codecov failed). + if_not_found: "failure" # Fail if no report for HEAD found. + + +parsers: + go: + partials_as_hits: false # Don't treat partials as hits. - changes: false comment: - layout: "reach, diff, files" - behavior: default # update if exists else create new - require_changes: true + # First the reach graph, then the diff, then the file changes. + layout: "newheader, reach, diff, flags, files, footer" + + # Change old comment with new results. + behavior: "default" + + # Post comment even if there were no changes. + require_changes: false + + # Post comment even if no head or base found. + require_head: false + require_base: false + ignore: - "tests" + - "**/mocks/*" - "**/*_test.go" - "**/*.pb.go" diff --git a/.github/dependabot.yml b/.github/dependabot.yml index e277c8c8bc..e48d4303f1 100644 --- a/.github/dependabot.yml +++ b/.github/dependabot.yml @@ -8,3 +8,11 @@ updates: - "dependencies" commit-message: prefix: "bot" + - package-ecosystem: "npm" + directory: "/playground" + schedule: + interval: "weekly" + labels: + - "dependencies" + commit-message: + prefix: "bot" \ No newline at end of file diff --git a/.github/workflows/build-ami-with-packer.yml b/.github/workflows/build-ami-with-packer.yml deleted file mode 100644 index ee5392338b..0000000000 --- a/.github/workflows/build-ami-with-packer.yml +++ /dev/null @@ -1,59 +0,0 @@ -# Copyright 2023 Democratized Data Foundation -# -# Use of this software is governed by the Business Source License -# included in the file licenses/BSL.txt. -# -# As of the Change Date specified in that file, in accordance with -# the Business Source License, use of this software will be governed -# by the Apache License, Version 2.0, included in the file -# licenses/APL.txt. - -name: Build AMI With Packer Workflow - -on: - push: - tags: ["v[0-9].[0-9]+.[0-9]+"] - -env: - PACKER_LOG: 1 - # RELEASE_VERSION: v0.5.0 - -jobs: - build-ami-with-packer: - name: Build ami with packer job - - runs-on: ubuntu-latest - - steps: - - name: Checkout code into the directory - uses: actions/checkout@v3 - - - - name: Environment version target - run: echo "RELEASE_VERSION=${GITHUB_REF#refs/*/}" >> $GITHUB_ENV - # run: echo ${{ env.RELEASE_VERSION }} - - - name: Configure AWS credentials - uses: aws-actions/configure-aws-credentials@v1 - with: - aws-access-key-id: ${{ secrets.AWS_AMI_ACCESS_KEY_ID }} - aws-secret-access-key: ${{ secrets.AWS_AMI_SECRET_ACCESS_KEY }} - aws-region: us-east-1 - - - name: Setup `packer` - uses: hashicorp/setup-packer@main - id: setup - with: - version: "latest" - - - name: Run `packer init` - id: init - run: "packer init ./tools/cloud/aws/packer/build_aws_ami.pkr.hcl" - - - name: Run `packer validate` - id: validate - run: "packer validate -var \"commit=${{ env.RELEASE_VERSION }}\" ./tools/cloud/aws/packer/build_aws_ami.pkr.hcl" - - - name: Run `packer build` - id: build - run: "packer build -var \"commit=${{ env.RELEASE_VERSION }}\" ./tools/cloud/aws/packer/build_aws_ami.pkr.hcl" diff --git a/.github/workflows/build-dependencies.yml b/.github/workflows/build-dependencies.yml index 4eeb238726..112f847192 100644 --- a/.github/workflows/build-dependencies.yml +++ b/.github/workflows/build-dependencies.yml @@ -12,10 +12,13 @@ name: Build Dependencies Workflow on: pull_request: + branches: + - master + - develop push: tags: - - v* + - 'v[0-9]+.[0-9]+.[0-9]+' branches: - master - develop @@ -34,7 +37,7 @@ jobs: - name: Setup Go environment explicitly uses: actions/setup-go@v3 with: - go-version: "1.19" + go-version: "1.20" check-latest: true - name: Build all dependencies diff --git a/.github/workflows/build-then-deploy-ami.yml b/.github/workflows/build-then-deploy-ami.yml new file mode 100644 index 0000000000..ce6be1e0bd --- /dev/null +++ b/.github/workflows/build-then-deploy-ami.yml @@ -0,0 +1,116 @@ +# Copyright 2023 Democratized Data Foundation +# +# Use of this software is governed by the Business Source License +# included in the file licenses/BSL.txt. +# +# As of the Change Date specified in that file, in accordance with +# the Business Source License, use of this software will be governed +# by the Apache License, Version 2.0, included in the file +# licenses/APL.txt. + +# This workflow builds the AMI using packer, if the build is successfull +# then it will deploy the AMI using terraform apply, onto AWS. +name: Build Then Deploy AMI Workflow + +on: + push: + tags: + - 'v[0-9]+.[0-9]+.[0-9]+' + +env: + AWS_REGION: 'us-east-1' + AWS_ACCESS_KEY_ID: ${{ secrets.AWS_AMI_ACCESS_KEY_ID }} + AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_AMI_SECRET_ACCESS_KEY }} + + # Logging verbosities (has to be named `PACKER_LOG` and `TF_LOG`). + PACKER_LOG: 1 + TF_LOG: INFO + + # Directories containing config files for AWS AMI. + PACKER_DIR: 'tools/cloud/aws/packer' + TF_DIR: 'tools/cloud/aws/terraform' + + # Set environment type for terraform: `dev`, `test`, `prod` + ENVIRONMENT_TYPE: "dev" + + # RELEASE_VERSION: v0.5.0 + +jobs: + # This job is responsilble to build the AMI using packer. + build-ami-with-packer: + name: Build ami with packer job + + runs-on: ubuntu-latest + + defaults: + run: + working-directory: ${{ env.PACKER_DIR }} + + steps: + - name: Checkout code into the directory + uses: actions/checkout@v3 + + - name: Environment version target + run: echo "RELEASE_VERSION=${GITHUB_REF#refs/*/}" >> ${GITHUB_ENV} + # run: echo ${{ env.RELEASE_VERSION }} + + - name: Configure AWS credentials + uses: aws-actions/configure-aws-credentials@v1 + with: + aws-region: ${{ env.AWS_REGION }} + aws-access-key-id: ${{ env.AWS_ACCESS_KEY_ID }} + aws-secret-access-key: ${{ env.AWS_SECRET_ACCESS_KEY }} + + - name: Setup `packer` + uses: hashicorp/setup-packer@main + with: + version: "latest" + + - name: Run `packer init` + run: "packer init build_aws_ami.pkr.hcl" + + - name: Run `packer validate` + run: "packer validate -var \"commit=${{ env.RELEASE_VERSION }}\" build_aws_ami.pkr.hcl" + + - name: Run `packer build` + run: "packer build -var \"commit=${{ env.RELEASE_VERSION }}\" build_aws_ami.pkr.hcl" + + # This job is responsilble for deploying the built AMI onto AWS, using terraform apply. + deploy-ami-with-terraform-apply: + name: Deploy ami with terraform apply job + needs: + - build-ami-with-packer + + runs-on: ubuntu-latest + + defaults: + run: + working-directory: ${{ env.TF_DIR }} + + steps: + - name: Checkout code into the directory + uses: actions/checkout@v3 + + - name: Terraform action setup + uses: hashicorp/setup-terraform@v2 + with: + terraform_version: 1.3.7 + + - name: Terraform format + run: terraform fmt -check + + - name: Terraform initialization + run: terraform init -backend-config="workspaces/${ENVIRONMENT_TYPE}-backend.conf" + + - name: Terraform workspace + # Select workspace if it exists, otherwise create a new workspace. + run: terraform workspace select ${ENVIRONMENT_TYPE} || terraform workspace new ${ENVIRONMENT_TYPE} + + - name: Terraform validation + run: terraform validate -no-color + + - name: List workspaces + run: ls workspaces + + - name: Terraform Apply + run: terraform apply -auto-approve -input=false -var-file="workspaces/source-ec2-${ENVIRONMENT_TYPE}.tfvars" diff --git a/.github/workflows/check-vulnerabilities.yml b/.github/workflows/check-vulnerabilities.yml new file mode 100644 index 0000000000..18e5f60de8 --- /dev/null +++ b/.github/workflows/check-vulnerabilities.yml @@ -0,0 +1,39 @@ +# Copyright 2023 Democratized Data Foundation +# +# Use of this software is governed by the Business Source License +# included in the file licenses/BSL.txt. +# +# As of the Change Date specified in that file, in accordance with +# the Business Source License, use of this software will be governed +# by the Apache License, Version 2.0, included in the file +# licenses/APL.txt. + +name: Check Vulnerabilities Workflow + +on: + pull_request: + branches: + - master + - develop + + push: + tags: + - 'v[0-9]+.[0-9]+.[0-9]+' + branches: + - master + - develop + +jobs: + check-vulnerabilities: + name: Check vulnerabilities job + + runs-on: ubuntu-latest + + steps: + - name: Run govulncheck + uses: golang/govulncheck-action@v1 + with: + go-version-input: "1.20" + go-package: ./... + check-latest: true + cache: true diff --git a/.github/workflows/code-test-coverage.yml b/.github/workflows/code-test-coverage.yml index bb1be36296..65c0a92f1f 100644 --- a/.github/workflows/code-test-coverage.yml +++ b/.github/workflows/code-test-coverage.yml @@ -12,9 +12,16 @@ name: Code Test Coverage Workflow on: pull_request: + branches: + - master + - develop push: - + tags: + - 'v[0-9]+.[0-9]+.[0-9]+' + branches: + - master + - develop jobs: code-test-coverage: @@ -25,25 +32,45 @@ jobs: steps: - name: Checkout code uses: actions/checkout@v3 - with: - fetch-depth: 2 - - name: Setup Go + - name: Setup Go environment explicitly uses: actions/setup-go@v3 with: - go-version: "1.19" + go-version: "1.20" check-latest: true - name: Generate full test coverage report using go-acc run: make test:coverage - - name: Upload coverage to Codecov + - name: Upload coverage to Codecov without token, retry on failure + env: + codecov_secret: ${{ secrets.CODECOV_TOKEN }} + if: env.codecov_secret == '' + uses: Wandalen/wretry.action@v1.0.36 + with: + attempt_limit: 5 + attempt_delay: 10000 + action: codecov/codecov-action@v3 + with: | + name: defradb-codecov + files: ./coverage.txt + flags: all-tests + os: 'linux' + fail_ci_if_error: true + verbose: true + + - name: Upload coverage to Codecov with token + env: + codecov_secret: ${{ secrets.CODECOV_TOKEN }} + if: env.codecov_secret != '' uses: codecov/codecov-action@v3 with: - fail_ci_if_error: true + token: ${{ env.codecov_secret }} + name: defradb-codecov files: ./coverage.txt - flags: defra-tests - name: codecov-umbrella + flags: all-tests + os: 'linux' + fail_ci_if_error: true verbose: true # path_to_write_report: ./coverage/codecov_report.txt # directory: ./coverage/reports/ diff --git a/.github/workflows/deploy-ami-with-terraform.yml b/.github/workflows/deploy-ami-with-terraform.yml deleted file mode 100644 index 3ec9d074ab..0000000000 --- a/.github/workflows/deploy-ami-with-terraform.yml +++ /dev/null @@ -1,112 +0,0 @@ -# Copyright 2023 Democratized Data Foundation -# -# Use of this software is governed by the Business Source License -# included in the file licenses/BSL.txt. -# -# As of the Change Date specified in that file, in accordance with -# the Business Source License, use of this software will be governed -# by the Apache License, Version 2.0, included in the file -# licenses/APL.txt. - -name: Deploy AMI With Terraform Workflow - -env: - # Verbosity setting for Terraform logs - TF_LOG: INFO - - # Credentials for deployment to AWS. - AWS_ACCESS_KEY_ID: ${{ secrets.AWS_AMI_ACCESS_KEY_ID }} - AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_AMI_SECRET_ACCESS_KEY }} - - # Set environment type: dev, test, prod - ENVIRONMENT: "dev" - -on: - workflow_run: - workflows: ["Build AMI With Packer Workflow"] - types: - - completed - - pull_request: - -jobs: - deploy-ami-with-terraform: - name: Deploy ami with terraform job - - if: ${{ github.event.workflow_run.conclusion == 'success' }} - - runs-on: ubuntu-latest - - defaults: - run: - working-directory: tools/cloud/aws/terraform - - steps: - - name: Checkout code into the directory - uses: actions/checkout@v3 - - - name: Terraform action setup - uses: hashicorp/setup-terraform@v2 - with: - terraform_version: 1.3.7 - - - name: Terraform format - id: fmt - run: terraform fmt -check - - - name: Terraform initialization - id: init - run: terraform init -backend-config="workspaces/$ENVIRONMENT-backend.conf" - - - name: Terraform workspace - id: wrokspace - run: terraform workspace select $ENVIRONMENT || terraform workspace new $ENVIRONMENT #Create workspace if it doesnt exist - - - name: Terraform validate - id: validate - run: terraform validate -no-color - - - name: Terraform plan - id: plan - if: github.event_name == 'pull_request' - run: terraform plan -no-color -input=false -var-file="workspaces/source-ec2-$ENVIRONMENT.tfvars" - continue-on-error: true - - - name: Update pull request - uses: actions/github-script@v6 - - if: github.event_name == 'pull_request' - - env: - PLAN: "terraform\n${{ steps.plan.outputs.stdout }}" - - with: - github-token: ${{ secrets.ONLY_DEFRADB_REPO_CI_PAT }} # Must have pull request write perms. - script: | - const output = `#### Terraform Format and Style 🖌\`${{ steps.fmt.outcome }}\` - #### Terraform Initialization ⚙️\`${{ steps.init.outcome }}\` - #### Terraform Validation 🤖\`${{ steps.validate.outcome }}\` - #### Terraform Plan 📖\`${{ steps.plan.outcome }}\` -
Show Plan - \`\`\`\n - ${process.env.PLAN} - \`\`\` -
- *Pushed by: @${{ github.actor }}, Action: \`${{ github.event_name }}\`*`; - github.rest.issues.createComment({ - issue_number: context.issue.number, - owner: context.repo.owner, - repo: context.repo.repo, - body: output - }) - - - name: Terraform plan status - if: steps.plan.outcome == 'failure' - run: exit 1 - - - name: List workspaces - run: ls workspaces - - - name: Terraform Apply # Only runs if pushed - if: github.event_name != 'pull_request' - run: terraform apply -auto-approve -input=false -var-file="workspaces/source-ec2-$ENVIRONMENT.tfvars" diff --git a/.github/workflows/detect-change.yml b/.github/workflows/detect-change.yml index 65238e78da..b6272c21cd 100644 --- a/.github/workflows/detect-change.yml +++ b/.github/workflows/detect-change.yml @@ -12,10 +12,13 @@ name: Detect Change Workflow on: pull_request: + branches: + - master + - develop push: tags: - - v* + - 'v[0-9]+.[0-9]+.[0-9]+' branches: - master - develop @@ -33,7 +36,7 @@ jobs: - name: Setup Go environment explicitly uses: actions/setup-go@v3 with: - go-version: "1.19" + go-version: "1.20" check-latest: true - name: Build dependencies diff --git a/.github/workflows/lint-then-benchmark.yml b/.github/workflows/lint-then-benchmark.yml index 9c1bdf42eb..015c8725c2 100644 --- a/.github/workflows/lint-then-benchmark.yml +++ b/.github/workflows/lint-then-benchmark.yml @@ -15,7 +15,7 @@ on: push: tags: - - v* + - 'v[0-9]+.[0-9]+.[0-9]+' branches: - master - develop @@ -57,7 +57,7 @@ jobs: - name: Setup Go environment explicitly uses: actions/setup-go@v3 with: - go-version: "1.19" + go-version: "1.20" check-latest: true - name: Run the golangci-lint diff --git a/.github/workflows/lint.yml b/.github/workflows/lint.yml index e36f2ed49b..df2af79dd0 100644 --- a/.github/workflows/lint.yml +++ b/.github/workflows/lint.yml @@ -36,7 +36,7 @@ jobs: - name: Setup Go environment explicitly uses: actions/setup-go@v3 with: - go-version: "1.19" + go-version: "1.20" check-latest: true - name: Check linting through golangci-lint @@ -46,7 +46,7 @@ jobs: # Required: the version of golangci-lint is required. # Note: The version should not pick the patch version as the latest patch # version is what will always be used. - version: v1.51 + version: v1.53 # Optional: working directory, useful for monorepos or if we wanted to run this # on a non-root directory. diff --git a/.github/workflows/preview-ami-with-terraform-plan.yml b/.github/workflows/preview-ami-with-terraform-plan.yml new file mode 100644 index 0000000000..ed2fef6f0c --- /dev/null +++ b/.github/workflows/preview-ami-with-terraform-plan.yml @@ -0,0 +1,135 @@ +# Copyright 2023 Democratized Data Foundation +# +# Use of this software is governed by the Business Source License +# included in the file licenses/BSL.txt. +# +# As of the Change Date specified in that file, in accordance with +# the Business Source License, use of this software will be governed +# by the Apache License, Version 2.0, included in the file +# licenses/APL.txt. + +name: Preview AMI With Terraform Plan Workflow + +on: + pull_request: + branches: + - master + - develop + paths: + - '.github/workflows/preview-ami-with-terraform-plan.yml' + - '.github/workflows/build-then-deploy-ami.yml' + - 'tools/cloud/aws/**' + + +env: + # Verbosity setting for terraform logs (has to be named `TF_LOG`). + TF_LOG: INFO + + # Directory containing terraform config files. + TF_DIR: 'tools/cloud/aws/terraform' + + # Set environment type: dev, test, prod + ENVIRONMENT_TYPE: "dev" + + # Even though we don't see these being used directly, terraform needs these set. + AWS_ACCESS_KEY_ID: ${{ secrets.AWS_AMI_ACCESS_KEY_ID }} + AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_AMI_SECRET_ACCESS_KEY }} + + +jobs: + preview-ami-with-terraform-plan: + name: Preview ami with terraform plan job + runs-on: ubuntu-latest + + defaults: + run: + working-directory: ${{ env.TF_DIR }} + + steps: + - name: Stop and notify the use of unprivileged flow or missing tokens + if: env.AWS_ACCESS_KEY_ID == '' || env.AWS_SECRET_ACCESS_KEY == '' + # Note: Fail this step, as we don't want unprivileged access doing these changes. + uses: actions/github-script@v6 + with: + script: | + let unprivileged_warning = + 'Warning: you made changes to files that require privileged access, this means' + + ' you are either using the fork-flow, or are missing some secrets.\n' + + 'Solution: please use branch-flow, or add the missing secrets. If you are not' + + ' an internal developer, please reach out to a maintainer for assistance.\n' + + 'Note: the files that were changed also require manual testing' + + ' using our organization AWS account, and using manual triggers on' + + ' some of our workflows (that are not triggered normally).\n' + + 'Pushed by: @${{ github.actor }}, SHA: \`${{ github.event.pull_request.head.sha }}\`\n'; + core.setFailed(unprivileged_warning) + + - name: Checkout code into the directory + uses: actions/checkout@v3 + + - name: Terraform action setup + uses: hashicorp/setup-terraform@v2 + with: + terraform_version: 1.3.7 + + - name: Terraform format + id: terraform-format + run: terraform fmt -check + + - name: Terraform initialization + id: terraform-initialization + run: terraform init -backend-config="workspaces/${ENVIRONMENT_TYPE}-backend.conf" + + - name: Terraform workspace + # Select workspace if it exists, otherwise create a new workspace. + run: terraform workspace select ${ENVIRONMENT_TYPE} || terraform workspace new ${ENVIRONMENT_TYPE} + + - name: Terraform validation + id: terraform-validation + run: terraform validate -no-color + + - name: Terraform plan + id: terraform-plan + run: terraform plan -no-color -input=false -var-file="workspaces/source-ec2-${ENVIRONMENT_TYPE}.tfvars" + continue-on-error: true + + - name: Comment results on pull request + uses: actions/github-script@v6 + env: + TERRAFORM_PLAN_OUTPUT: "Terraform Plan Output:\n${{ steps.terraform-plan.outputs.stdout }}\n" + + with: + github-token: ${{ secrets.ONLY_DEFRADB_REPO_CI_PAT }} # Must have pull request write perms. + script: | + const terraform_plan_output = ` + #### Terraform Format and Style \`${{ steps.terraform-format.outcome }}\` + #### Terraform Initialization \`${{ steps.terraform-initialization.outcome }}\` + #### Terraform Validation \`${{ steps.terraform-validation.outcome }}\` + #### Terraform Plan \`${{ steps.terraform-plan.outcome }}\` + +
+ + Show Plan + + \`\`\`\n + ${process.env.TERRAFORM_PLAN_OUTPUT} + \`\`\`\n + +
+ + ***Pushed By: @${{ github.actor }}*** + ***SHA: \`${{ github.event.pull_request.head.sha }}\`*** + `; + + github.rest.issues.createComment({ + issue_number: context.issue.number, + owner: context.repo.owner, + repo: context.repo.repo, + body: terraform_plan_output + }) + + - name: Terraform plan failure + if: steps.terraform-plan.outcome == 'failure' + run: exit 1 + + - name: List workspaces + run: ls workspaces diff --git a/.github/workflows/pull-docker-image.yml b/.github/workflows/pull-docker-image.yml new file mode 100644 index 0000000000..eb0170b7ef --- /dev/null +++ b/.github/workflows/pull-docker-image.yml @@ -0,0 +1,51 @@ +# Copyright 2023 Democratized Data Foundation +# +# Use of this software is governed by the Business Source License +# included in the file licenses/BSL.txt. +# +# As of the Change Date specified in that file, in accordance with +# the Business Source License, use of this software will be governed +# by the Apache License, Version 2.0, included in the file +# licenses/APL.txt. + +# This workflow validates that the images pushed to the container +# registries can be pulled then run sucessfully. +name: Pull Docker Image Workflow + +on: + workflow_run: + # Warning: this workflow must NOT: + # - interact with any new code. + # - checkout new code. + # - build/compile anything (only pull). + # - make any indirect calls (i.e. make xyz, or npm install, etc.) + # Note this workflow: + # - will use the base's (or default) workflow file's state. + # - doesn't run on the PR or the branch coming in, it runs on the default branch. + # - has read-write repo token + # - has access to secrets + workflows: ["Push Docker Image To Registries Workflow"] + types: + - completed + +jobs: + pull-docker-image: + name: Pull docker image job + + if: ${{ github.event.workflow_run.conclusion == 'success' }} + + runs-on: ubuntu-latest + + strategy: + fail-fast: false + matrix: + image_tag: + - sourcenetwork/defradb:latest + - ghcr.io/sourcenetwork/defradb:latest + + steps: + - name: Pull Docker image + run: docker pull ${{ matrix.image_tag }} + + - name: Test Docker image + run: docker run --rm ${{ matrix.image_tag }} diff --git a/.github/workflows/push-docker-image-to-registries.yml b/.github/workflows/push-docker-image-to-registries.yml new file mode 100644 index 0000000000..d7d00d14aa --- /dev/null +++ b/.github/workflows/push-docker-image-to-registries.yml @@ -0,0 +1,83 @@ +# Copyright 2023 Democratized Data Foundation +# +# Use of this software is governed by the Business Source License +# included in the file licenses/BSL.txt. +# +# As of the Change Date specified in that file, in accordance with +# the Business Source License, use of this software will be governed +# by the Apache License, Version 2.0, included in the file +# licenses/APL.txt. + +# This workflow builds a Docker container image, if the build is successful +# then it will deploy the image to DockerHub & GitHub container registries. +name: Push Docker Image To Registries Workflow + +on: + push: + tags: + - 'v[0-9]+.[0-9]+.[0-9]+' + +env: + TEST_TAG: sourcenetwork/defradb:test + +jobs: + push-docker-image-to-registries: + name: Push Docker image to registries job + + runs-on: ubuntu-latest + + permissions: + packages: write + contents: read + + steps: + - name: Check out the repo + uses: actions/checkout@v3 + + - name: Set up QEMU + uses: docker/setup-qemu-action@v2 + + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@v2 + + - name: Build Docker image + uses: docker/build-push-action@v4 + with: + context: . + file: tools/defradb.containerfile + load: true + tags: ${{ env.TEST_TAG }} + labels: ${{ steps.meta.outputs.labels }} + + - name: Test Docker image + run: docker run --rm ${{ env.TEST_TAG }} + + - name: Log in to Docker Hub + uses: docker/login-action@v2 + with: + username: ${{ secrets.DOCKER_USERNAME }} + password: ${{ secrets.DOCKER_PASSWORD }} + + - name: Log in to the Container registry + uses: docker/login-action@v2 + with: + registry: ghcr.io + username: ${{ github.actor }} + password: ${{ secrets.GITHUB_TOKEN }} + + - name: Extract metadata (tags, labels) for Docker + id: meta + uses: docker/metadata-action@v4 + with: + images: | + sourcenetwork/defradb + ghcr.io/${{ github.repository }} + + - name: Push Docker images + uses: docker/build-push-action@v4 + with: + context: . + file: tools/defradb.containerfile + push: true + tags: ${{ steps.meta.outputs.tags }} + labels: ${{ steps.meta.outputs.labels }} diff --git a/.github/workflows/run-tests.yml b/.github/workflows/run-tests.yml index 1bab3e5a05..bfa696a283 100644 --- a/.github/workflows/run-tests.yml +++ b/.github/workflows/run-tests.yml @@ -12,6 +12,9 @@ name: Run Tests Workflow on: pull_request: + branches: + - master + - develop push: @@ -28,7 +31,7 @@ jobs: - name: Setup Go environment explicitly uses: actions/setup-go@v3 with: - go-version: "1.19" + go-version: "1.20" check-latest: true - name: Build dependencies diff --git a/.github/workflows/start-binary.yml b/.github/workflows/start-binary.yml index 97db056df9..267466b8a3 100644 --- a/.github/workflows/start-binary.yml +++ b/.github/workflows/start-binary.yml @@ -12,10 +12,13 @@ name: Start Binary Workflow on: pull_request: + branches: + - master + - develop push: tags: - - v* + - 'v[0-9]+.[0-9]+.[0-9]+' branches: - master - develop @@ -34,7 +37,7 @@ jobs: - name: Setup Go environment explicitly uses: actions/setup-go@v3 with: - go-version: "1.19" + go-version: "1.20" check-latest: true - name: Build modules diff --git a/.github/workflows/validate-containerfile.yml b/.github/workflows/validate-containerfile.yml new file mode 100644 index 0000000000..b3315861ad --- /dev/null +++ b/.github/workflows/validate-containerfile.yml @@ -0,0 +1,57 @@ +# Copyright 2023 Democratized Data Foundation +# +# Use of this software is governed by the Business Source License +# included in the file licenses/BSL.txt. +# +# As of the Change Date specified in that file, in accordance with +# the Business Source License, use of this software will be governed +# by the Apache License, Version 2.0, included in the file +# licenses/APL.txt. + +# This workflow tests that the container build is successful and +# that the built container runs successfully. +name: Validate Containerfile Workflow + +on: + pull_request: + branches: + - master + - develop + + push: + tags: + - 'v[0-9]+.[0-9]+.[0-9]+' + branches: + - master + - develop + +env: + TEST_TAG: sourcenetwork/defradb:test + +jobs: + validate-containerfile: + name: Validate containerfile job + + runs-on: ubuntu-latest + + steps: + - name: Check out the repo + uses: actions/checkout@v3 + + - name: Set up QEMU + uses: docker/setup-qemu-action@v2 + + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@v2 + + - name: Build Docker image + uses: docker/build-push-action@v4 + with: + context: . + file: tools/defradb.containerfile + load: true + tags: ${{ env.TEST_TAG }} + + - name: Test Docker image + run: docker run --rm ${{ env.TEST_TAG }} + diff --git a/.gitignore b/.gitignore index 826d4d912b..b19a6d9259 100644 --- a/.gitignore +++ b/.gitignore @@ -7,6 +7,16 @@ coverage.txt tests/bench/*.log tests/bench/*.svg +tests/lenses/rust_wasm32_set_default/Cargo.lock +tests/lenses/rust_wasm32_set_default/target +tests/lenses/rust_wasm32_set_default/pkg +tests/lenses/rust_wasm32_remove/Cargo.lock +tests/lenses/rust_wasm32_remove/target +tests/lenses/rust_wasm32_remove/pkg +tests/lenses/rust_wasm32_copy/Cargo.lock +tests/lenses/rust_wasm32_copy/target +tests/lenses/rust_wasm32_copy/pkg + # Ignore OS X metadata files. .history **.DS_Store diff --git a/CHANGELOG.md b/CHANGELOG.md index da957eb28f..3638d3ef75 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,88 @@ + + +## [v0.6.0](https://github.com/sourcenetwork/defradb/compare/v0.5.1...v0.6.0) + +> 2023-07-31 + +DefraDB v0.6 is a major pre-production release. Until the stable version 1.0 is reached, the SemVer minor patch number will denote notable releases, which will give the project freedom to experiment and explore potentially breaking changes. + +There are several new and powerful features, important bug fixes, and notable refactors in this release. Some highlight features include: The initial release of our LensVM based schema migration engine powered by WebAssembly ([#1650](https://github.com/sourcenetwork/defradb/issues/1650)), newly embedded DefraDB Playround which includes a bundled GraphQL client and schema manager, and last but not least a relation field (_id) alias to improve the developer experience ([#1609](https://github.com/sourcenetwork/defradb/issues/1609)). + +To get a full outline of the changes, we invite you to review the official changelog below. This release does include a Breaking Change to existing v0.5.x databases. If you need help migrating an existing deployment, reach out at [hello@source.network](mailto:hello@source.network) or join our Discord at https://discord.source.network/. + +### Features + +* Add `_not` operator ([#1631](https://github.com/sourcenetwork/defradb/issues/1631)) +* Schema list API ([#1625](https://github.com/sourcenetwork/defradb/issues/1625)) +* Add simple data import and export ([#1630](https://github.com/sourcenetwork/defradb/issues/1630)) +* Playground ([#1575](https://github.com/sourcenetwork/defradb/issues/1575)) +* Add schema migration get and set cmds to CLI ([#1650](https://github.com/sourcenetwork/defradb/issues/1650)) +* Allow relation alias on create and update ([#1609](https://github.com/sourcenetwork/defradb/issues/1609)) +* Make fetcher calculate docFetches and fieldFetches ([#1713](https://github.com/sourcenetwork/defradb/issues/1713)) +* Add lens migration engine to defra ([#1564](https://github.com/sourcenetwork/defradb/issues/1564)) +* Add `_keys` attribute to `selectNode` simple explain ([#1546](https://github.com/sourcenetwork/defradb/issues/1546)) +* CLI commands for secondary indexes ([#1595](https://github.com/sourcenetwork/defradb/issues/1595)) +* Add alias to `groupBy` related object ([#1579](https://github.com/sourcenetwork/defradb/issues/1579)) +* Non-unique secondary index (no querying) ([#1450](https://github.com/sourcenetwork/defradb/issues/1450)) +* Add ability to explain-debug all nodes ([#1563](https://github.com/sourcenetwork/defradb/issues/1563)) +* Include dockey in doc exists err ([#1558](https://github.com/sourcenetwork/defradb/issues/1558)) + +### Fixes + +* Better wait in CLI integration test ([#1415](https://github.com/sourcenetwork/defradb/issues/1415)) +* Return error when relation is not defined on both types ([#1647](https://github.com/sourcenetwork/defradb/issues/1647)) +* Change `core.DocumentMapping` to pointer ([#1528](https://github.com/sourcenetwork/defradb/issues/1528)) +* Fix invalid (badger) datastore state ([#1685](https://github.com/sourcenetwork/defradb/issues/1685)) +* Discard index and subscription implicit transactions ([#1715](https://github.com/sourcenetwork/defradb/issues/1715)) +* Remove duplicated `peers` in peerstore prefix ([#1678](https://github.com/sourcenetwork/defradb/issues/1678)) +* Return errors from typeJoinOne ([#1716](https://github.com/sourcenetwork/defradb/issues/1716)) +* Document change detector breaking change ([#1531](https://github.com/sourcenetwork/defradb/issues/1531)) +* Standardise `schema migration` CLI errors ([#1682](https://github.com/sourcenetwork/defradb/issues/1682)) +* Introspection OrderArg returns null inputFields ([#1633](https://github.com/sourcenetwork/defradb/issues/1633)) +* Avoid duplicated requestable fields ([#1621](https://github.com/sourcenetwork/defradb/issues/1621)) +* Normalize int field kind ([#1619](https://github.com/sourcenetwork/defradb/issues/1619)) +* Change the WriteSyncer to use lock when piping ([#1608](https://github.com/sourcenetwork/defradb/issues/1608)) +* Filter splitting and rendering for related types ([#1541](https://github.com/sourcenetwork/defradb/issues/1541)) + +### Documentation + +* Improve CLI command documentation ([#1505](https://github.com/sourcenetwork/defradb/issues/1505)) + +### Refactoring + +* Schema list output to include schemaVersionID ([#1706](https://github.com/sourcenetwork/defradb/issues/1706)) +* Reuse lens wasm modules ([#1641](https://github.com/sourcenetwork/defradb/issues/1641)) +* Remove redundant txn param from fetcher start ([#1635](https://github.com/sourcenetwork/defradb/issues/1635)) +* Remove first CRDT byte from field encoded values ([#1622](https://github.com/sourcenetwork/defradb/issues/1622)) +* Merge `node` into `net` and improve coverage ([#1593](https://github.com/sourcenetwork/defradb/issues/1593)) +* Fetcher filter and field optimization ([#1500](https://github.com/sourcenetwork/defradb/issues/1500)) + +### Testing + +* Rework transaction test framework capabilities ([#1603](https://github.com/sourcenetwork/defradb/issues/1603)) +* Expand backup integration tests ([#1699](https://github.com/sourcenetwork/defradb/issues/1699)) +* Disable test ([#1675](https://github.com/sourcenetwork/defradb/issues/1675)) +* Add tests for 1-1 group by id ([#1655](https://github.com/sourcenetwork/defradb/issues/1655)) +* Remove CLI tests from make test ([#1643](https://github.com/sourcenetwork/defradb/issues/1643)) +* Bundle test state into single var ([#1645](https://github.com/sourcenetwork/defradb/issues/1645)) +* Convert explain group tests to new explain setup ([#1537](https://github.com/sourcenetwork/defradb/issues/1537)) +* Add tests for foo_id field name clashes ([#1521](https://github.com/sourcenetwork/defradb/issues/1521)) +* Resume wait correctly following test node restart ([#1515](https://github.com/sourcenetwork/defradb/issues/1515)) +* Require no errors when none expected ([#1509](https://github.com/sourcenetwork/defradb/issues/1509)) + +### Continuous integration + +* Add workflows to push, pull, and validate docker images ([#1676](https://github.com/sourcenetwork/defradb/issues/1676)) +* Build mocks using make ([#1612](https://github.com/sourcenetwork/defradb/issues/1612)) +* Fix terraform plan and merge AMI build + deploy workflow ([#1514](https://github.com/sourcenetwork/defradb/issues/1514)) +* Reconfigure CodeCov action to ensure stability ([#1414](https://github.com/sourcenetwork/defradb/issues/1414)) + +### Chore + +* Bump to GoLang v1.20 ([#1689](https://github.com/sourcenetwork/defradb/issues/1689)) +* Update to ipfs boxo 0.10.0 ([#1573](https://github.com/sourcenetwork/defradb/issues/1573)) + + ## [v0.5.1](https://github.com/sourcenetwork/defradb/compare/v0.5.0...v0.5.1) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index fedad36f7e..c7cfb9b590 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -55,6 +55,13 @@ Run the following commands for testing: - `make bench` to run the benchmark suite. To compare a branch's results with the `develop` branch results, execute the suite on both branches, output the results to files, and compare them with a tool like `benchstat` (e.g., `benchstat develop.txt current.txt`). To install `benchstat`, use `make deps:bench`. - `make test:changes` to run a test suite detecting breaking changes. Accompany breaking changes with documentation in `docs/data_format_changes/` for the test to pass. +### Test prerequisites + +The following tools are required in order to build and run the tests within this repository: + +- [Go](https://go.dev/doc/install) +- Cargo/rustc, typically installed via [rustup](https://www.rust-lang.org/tools/install) + ## Documentation The overall project documentation can be found at [docs.source.network](https://docs.source.network), and its source at [github.com/sourcenetwork/docs.source.network](https://github.com/sourcenetwork/docs.source.network). diff --git a/Makefile b/Makefile index 78ba5ceced..0e79f59646 100644 --- a/Makefile +++ b/Makefile @@ -25,7 +25,16 @@ BUILD_FLAGS=-trimpath -ldflags "\ -X 'github.com/sourcenetwork/defradb/version.GitCommitDate=$(VERSION_GITCOMMITDATE)'" endif -TEST_FLAGS=-race -shuffle=on -timeout 60s +ifdef BUILD_TAGS +BUILD_FLAGS+=-tags $(BUILD_TAGS) +endif + +TEST_FLAGS=-race -shuffle=on -timeout 150s + +PLAYGROUND_DIRECTORY=playground +LENS_TEST_DIRECTORY=tests/integration/schema/migrations +CLI_TEST_DIRECTORY=tests/integration/cli +DEFAULT_TEST_DIRECTORIES=$$(go list ./... | grep -v -e $(LENS_TEST_DIRECTORY) -e $(CLI_TEST_DIRECTORY)) default: @go run $(BUILD_FLAGS) cmd/defradb/main.go @@ -67,15 +76,21 @@ client\:add-schema: .PHONY: deps\:lint deps\:lint: - go install github.com/golangci/golangci-lint/cmd/golangci-lint@v1.51 + go install github.com/golangci/golangci-lint/cmd/golangci-lint@v1.53 .PHONY: deps\:test deps\:test: go install gotest.tools/gotestsum@latest +.PHONY: deps\:lens +deps\:lens: + rustup target add wasm32-unknown-unknown + @$(MAKE) -C ./tests/lenses build + .PHONY: deps\:coverage deps\:coverage: go install github.com/ory/go-acc@latest + @$(MAKE) deps:lens .PHONY: deps\:bench deps\:bench: @@ -89,6 +104,14 @@ deps\:chglog: deps\:modules: go mod download +.PHONY: deps\:mock +deps\:mock: + go install github.com/vektra/mockery/v2@v2.30.1 + +.PHONY: deps\:playground +deps\:playground: + cd $(PLAYGROUND_DIRECTORY) && npm install && npm run build + .PHONY: deps deps: @$(MAKE) deps:modules && \ @@ -96,7 +119,22 @@ deps: $(MAKE) deps:chglog && \ $(MAKE) deps:coverage && \ $(MAKE) deps:lint && \ - $(MAKE) deps:test + $(MAKE) deps:test && \ + $(MAKE) deps:mock + +.PHONY: mock +mock: + @$(MAKE) deps:mock + mockery --dir ./client --output ./client/mocks --name DB --with-expecter + mockery --dir ./client --output ./client/mocks --name Collection --with-expecter + mockery --dir ./datastore --output ./datastore/mocks --name DAGStore --with-expecter + mockery --dir ./datastore --output ./datastore/mocks --name DSReaderWriter --with-expecter + mockery --srcpkg github.com/ipfs/go-datastore/query --output ./datastore/mocks --name Results --with-expecter + mockery --dir ./datastore --output ./datastore/mocks --name RootStore --with-expecter + mockery --dir ./datastore --output ./datastore/mocks --name Txn --with-expecter + mockery --dir ./datastore --output ./datastore/mocks --name DAGStore --with-expecter + mockery --dir ./db/fetcher --output ./db/fetcher/mocks --name Fetcher --with-expecter + mockery --dir ./db/fetcher --output ./db/fetcher/mocks --name EncodedDocument --with-expecter .PHONY: dev\:start dev\:start: @@ -120,7 +158,7 @@ verify: .PHONY: tidy tidy: - go mod tidy -go=1.19 + go mod tidy -go=1.20 .PHONY: clean clean: @@ -145,32 +183,42 @@ endif .PHONY: test test: - gotestsum --format pkgname -- ./... $(TEST_FLAGS) + gotestsum --format pkgname -- $(DEFAULT_TEST_DIRECTORIES) $(TEST_FLAGS) + +.PHONY: test\:quick +test\:quick: + gotestsum --format pkgname -- $(DEFAULT_TEST_DIRECTORIES) # Only build the tests (don't execute them). .PHONY: test\:build test\:build: - gotestsum --format pkgname -- ./... $(TEST_FLAGS) -run=nope + gotestsum --format pkgname -- $(DEFAULT_TEST_DIRECTORIES) $(TEST_FLAGS) -run=nope .PHONY: test\:ci test\:ci: - DEFRA_BADGER_MEMORY=true DEFRA_BADGER_FILE=true $(MAKE) test:names + DEFRA_BADGER_MEMORY=true DEFRA_BADGER_FILE=true $(MAKE) test:all .PHONY: test\:go test\:go: - go test ./... $(TEST_FLAGS) + go test $(DEFAULT_TEST_DIRECTORIES) $(TEST_FLAGS) .PHONY: test\:names test\:names: - gotestsum --format testname -- ./... $(TEST_FLAGS) + gotestsum --format testname -- $(DEFAULT_TEST_DIRECTORIES) $(TEST_FLAGS) + +.PHONY: test\:all +test\:all: + @$(MAKE) test:names + @$(MAKE) test:lens + @$(MAKE) test:cli .PHONY: test\:verbose test\:verbose: - gotestsum --format standard-verbose -- ./... $(TEST_FLAGS) + gotestsum --format standard-verbose -- $(DEFAULT_TEST_DIRECTORIES) $(TEST_FLAGS) .PHONY: test\:watch test\:watch: - gotestsum --watch -- ./... + gotestsum --watch -- $(DEFAULT_TEST_DIRECTORIES) .PHONY: test\:clean test\:clean: @@ -188,6 +236,16 @@ test\:bench-short: test\:scripts: @$(MAKE) -C ./tools/scripts/ test +.PHONY: test\:lens +test\:lens: + @$(MAKE) deps:lens + gotestsum --format testname -- ./$(LENS_TEST_DIRECTORY)/... $(TEST_FLAGS) + +.PHONY: test\:cli +test\:cli: + @$(MAKE) deps:lens + gotestsum --format testname -- ./$(CLI_TEST_DIRECTORY)/... $(TEST_FLAGS) + # Using go-acc to ensure integration tests are included. # Usage: `make test:coverage` or `make test:coverage path="{pathToPackage}"` # Example: `make test:coverage path="./api/..."` @@ -195,10 +253,10 @@ test\:scripts: test\:coverage: @$(MAKE) deps:coverage ifeq ($(path),) - go-acc ./... --output=coverage.txt --covermode=atomic -- -coverpkg=./... + go-acc ./... --output=coverage.txt --covermode=atomic -- -failfast -coverpkg=./... @echo "Show coverage information for each function in ./..." else - go-acc $(path) --output=coverage.txt --covermode=atomic -- -coverpkg=$(path) + go-acc $(path) --output=coverage.txt --covermode=atomic -- -failfast -coverpkg=$(path) @echo "Show coverage information for each function in" path=$(path) endif go tool cover -func coverage.txt | grep total | awk '{print $$3}' @@ -214,6 +272,7 @@ test\:coverage-html: .PHONY: test\:changes test\:changes: + @$(MAKE) deps:lens env DEFRA_DETECT_DATABASE_CHANGES=true gotestsum -- ./... -shuffle=on -p 1 .PHONY: validate\:codecov diff --git a/README.md b/README.md index f1f1925c83..8428ebc77f 100644 --- a/README.md +++ b/README.md @@ -31,6 +31,8 @@ Read the documentation on [docs.source.network](https://docs.source.network/). - [Collection subscription example](#collection-subscription-example) - [Replicator example](#replicator-example) - [Securing the HTTP API with TLS](#securing-the-http-api-with-tls) +- [Supporting CORS](#supporting-cors) +- [Backing up and restoring](#backing-up-and-restoring) - [Licensing](#licensing) - [Contributors](#contributors) @@ -272,7 +274,7 @@ About the flags: - `--rootdir` specifies the root dir (config and data) to use - `--url` is the address to listen on for the client HTTP and GraphQL API -- `--p2paddr` is the multiaddress for the p2p networking to listen on +- `--p2paddr` is the multiaddress for the P2P networking to listen on - `--tcpaddr` is the multiaddress for the gRPC server to listen on - `--peers` is a comma-separated list of peer multiaddresses @@ -387,6 +389,25 @@ defradb start --allowed-origins=http://localhost:3000 The catch-all `*` is also a valid origin. +## Backing up and restoring + +It is currently not possible to do a full backup of DefraDB that includes the history of changes through the Merkle DAG. However, DefraDB currently supports a simple backup of the current data state in JSON format that can be used to seed a database or help with transitioning from one DefraDB version to another. + +To backup the data, run the following command: +```shell +defradb client backup export path/to/backup.json +``` + +To pretty print the JSON content when exporting, run the following command: +```shell +defradb client backup export --pretty path/to/backup.json +``` + +To restore the data, run the following command: +```shell +defradb client backup import path/to/backup.json +``` + ## Community Discuss on [Discord](https://discord.source.network/) or [Github Discussions](https://github.com/sourcenetwork/defradb/discussions). The Source project is on [Twitter](https://twitter.com/sourcenetwrk). diff --git a/api/http/errors.go b/api/http/errors.go index 91a235543d..4acf9abd25 100644 --- a/api/http/errors.go +++ b/api/http/errors.go @@ -36,6 +36,8 @@ var ( ErrPeerIdUnavailable = errors.New("no PeerID available. P2P might be disabled") ErrStreamingUnsupported = errors.New("streaming unsupported") ErrNoEmail = errors.New("email address must be specified for tls with autocert") + ErrPayloadFormat = errors.New("invalid payload format") + ErrMissingNewKey = errors.New("missing _newKey for imported doc") ) // ErrorResponse is the GQL top level object holding error items for the response payload. diff --git a/api/http/handler.go b/api/http/handler.go index 6ab802df12..aa7b828f29 100644 --- a/api/http/handler.go +++ b/api/http/handler.go @@ -18,6 +18,7 @@ import ( "net/http" "github.com/go-chi/chi/v5" + "github.com/go-chi/cors" "github.com/pkg/errors" "github.com/sourcenetwork/defradb/client" @@ -68,25 +69,38 @@ func simpleDataResponse(args ...any) DataResponse { // newHandler returns a handler with the router instantiated. func newHandler(db client.DB, opts serverOptions) *handler { + mux := chi.NewRouter() + mux.Use(loggerMiddleware) + + if len(opts.allowedOrigins) != 0 { + mux.Use(cors.Handler(cors.Options{ + AllowedOrigins: opts.allowedOrigins, + AllowedMethods: []string{"GET", "POST", "PATCH", "OPTIONS"}, + AllowedHeaders: []string{"Content-Type"}, + MaxAge: 300, + })) + } + + mux.Use(func(next http.Handler) http.Handler { + return http.HandlerFunc(func(rw http.ResponseWriter, req *http.Request) { + if opts.tls.HasValue() { + rw.Header().Add("Strict-Transport-Security", "max-age=63072000; includeSubDomains") + } + ctx := context.WithValue(req.Context(), ctxDB{}, db) + if opts.peerID != "" { + ctx = context.WithValue(ctx, ctxPeerID{}, opts.peerID) + } + next.ServeHTTP(rw, req.WithContext(ctx)) + }) + }) + return setRoutes(&handler{ + Mux: mux, db: db, options: opts, }) } -func (h *handler) handle(f http.HandlerFunc) http.HandlerFunc { - return func(rw http.ResponseWriter, req *http.Request) { - if h.options.tls.HasValue() { - rw.Header().Add("Strict-Transport-Security", "max-age=63072000; includeSubDomains") - } - ctx := context.WithValue(req.Context(), ctxDB{}, h.db) - if h.options.peerID != "" { - ctx = context.WithValue(ctx, ctxPeerID{}, h.options.peerID) - } - f(rw, req.WithContext(ctx)) - } -} - func getJSON(req *http.Request, v any) error { err := json.NewDecoder(req.Body).Decode(v) if err != nil { diff --git a/api/http/handlerfuncs.go b/api/http/handlerfuncs.go index d6e90af778..9e5b212fe3 100644 --- a/api/http/handlerfuncs.go +++ b/api/http/handlerfuncs.go @@ -24,9 +24,10 @@ import ( "github.com/ipfs/go-cid" ds "github.com/ipfs/go-datastore" "github.com/multiformats/go-multihash" - "github.com/pkg/errors" + "github.com/sourcenetwork/defradb/client" corecrdt "github.com/sourcenetwork/defradb/core/crdt" + "github.com/sourcenetwork/defradb/errors" "github.com/sourcenetwork/defradb/events" ) @@ -52,7 +53,7 @@ func pingHandler(rw http.ResponseWriter, req *http.Request) { sendJSON( req.Context(), rw, - simpleDataResponse("response", "pong", "test"), + simpleDataResponse("response", "pong"), http.StatusOK, ) } @@ -155,9 +156,65 @@ func execGQLHandler(rw http.ResponseWriter, req *http.Request) { sendJSON(req.Context(), rw, newGQLResult(result.GQL), http.StatusOK) } +type fieldResponse struct { + ID string `json:"id"` + Name string `json:"name"` + Kind string `json:"kind"` + Internal bool `json:"internal"` +} + type collectionResponse struct { - Name string `json:"name"` - ID string `json:"id"` + Name string `json:"name"` + ID string `json:"id"` + VersionID string `json:"version_id"` + Fields []fieldResponse `json:"fields,omitempty"` +} + +func listSchemaHandler(rw http.ResponseWriter, req *http.Request) { + db, err := dbFromContext(req.Context()) + if err != nil { + handleErr(req.Context(), rw, err, http.StatusInternalServerError) + return + } + + cols, err := db.GetAllCollections(req.Context()) + if err != nil { + handleErr(req.Context(), rw, err, http.StatusInternalServerError) + return + } + + colResp := make([]collectionResponse, len(cols)) + for i, col := range cols { + var fields []fieldResponse + for _, field := range col.Schema().Fields { + fieldRes := fieldResponse{ + ID: field.ID.String(), + Name: field.Name, + Internal: field.IsInternal(), + } + if field.IsObjectArray() { + fieldRes.Kind = fmt.Sprintf("[%s]", field.Schema) + } else if field.IsObject() { + fieldRes.Kind = field.Schema + } else { + fieldRes.Kind = field.Kind.String() + } + fields = append(fields, fieldRes) + } + colResp[i] = collectionResponse{ + Name: col.Name(), + ID: col.SchemaID(), + VersionID: col.Schema().VersionID, + Fields: fields, + } + } + + sendJSON( + req.Context(), + rw, + simpleDataResponse("collections", colResp), + http.StatusOK, + ) } func loadSchemaHandler(rw http.ResponseWriter, req *http.Request) { @@ -187,8 +244,9 @@ func loadSchemaHandler(rw http.ResponseWriter, req *http.Request) { return } colResp[i] = collectionResponse{ - Name: col.Name(), - ID: col.SchemaID(), + Name: col.Name(), + ID: col.SchemaID(), + VersionID: col.Schema().VersionID, } } @@ -227,6 +285,73 @@ func patchSchemaHandler(rw http.ResponseWriter, req *http.Request) { ) } +func setMigrationHandler(rw http.ResponseWriter, req *http.Request) { + cfgStr, err := readWithLimit(req.Body, rw) + if err != nil { + handleErr(req.Context(), rw, err, http.StatusInternalServerError) + return + } + + db, err := dbFromContext(req.Context()) + if err != nil { + handleErr(req.Context(), rw, err, http.StatusInternalServerError) + return + } + + txn, err := db.NewTxn(req.Context(), false) + if err != nil { + handleErr(req.Context(), rw, err, http.StatusInternalServerError) + return + } + + var cfg client.LensConfig + err = json.Unmarshal(cfgStr, &cfg) + if err != nil { + handleErr(req.Context(), rw, err, http.StatusInternalServerError) + return + } + + err = db.LensRegistry().SetMigration(req.Context(), txn, cfg) + if err != nil { + handleErr(req.Context(), rw, err, http.StatusInternalServerError) + return + } + + err = txn.Commit(req.Context()) + if err != nil { + handleErr(req.Context(), rw, err, http.StatusInternalServerError) + return + } + + sendJSON( + req.Context(), + rw, + simpleDataResponse("result", "success"), + http.StatusOK, + ) +} + +func getMigrationHandler(rw http.ResponseWriter, req *http.Request) { + db, err := dbFromContext(req.Context()) + if err != nil { + handleErr(req.Context(), rw, err, http.StatusInternalServerError) + return + } + + cfgs := db.LensRegistry().Config() + if err != nil { + handleErr(req.Context(), rw, err, http.StatusInternalServerError) + return + } + + sendJSON( + req.Context(), + rw, + simpleDataResponse("configuration", cfgs), + http.StatusOK, + ) +} + func getBlockHandler(rw http.ResponseWriter, req *http.Request) { cidStr := chi.URLParam(req, "cid") diff --git a/api/http/handlerfuncs_backup.go b/api/http/handlerfuncs_backup.go new file mode 100644 index 0000000000..3961263995 --- /dev/null +++ b/api/http/handlerfuncs_backup.go @@ -0,0 +1,123 @@ +// Copyright 2023 Democratized Data Foundation +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package http + +import ( + "context" + "net/http" + "os" + "strings" + + "github.com/sourcenetwork/defradb/client" + "github.com/sourcenetwork/defradb/errors" +) + +func exportHandler(rw http.ResponseWriter, req *http.Request) { + db, err := dbFromContext(req.Context()) + if err != nil { + handleErr(req.Context(), rw, err, http.StatusInternalServerError) + return + } + + cfg := &client.BackupConfig{} + err = getJSON(req, cfg) + if err != nil { + handleErr(req.Context(), rw, err, http.StatusBadRequest) + return + } + + err = validateBackupConfig(req.Context(), cfg, db) + if err != nil { + handleErr(req.Context(), rw, err, http.StatusBadRequest) + return + } + + err = db.BasicExport(req.Context(), cfg) + if err != nil { + handleErr(req.Context(), rw, err, http.StatusInternalServerError) + return + } + + sendJSON( + req.Context(), + rw, + simpleDataResponse("result", "success"), + http.StatusOK, + ) +} + +func importHandler(rw http.ResponseWriter, req *http.Request) { + db, err := dbFromContext(req.Context()) + if err != nil { + handleErr(req.Context(), rw, err, http.StatusInternalServerError) + return + } + + cfg := &client.BackupConfig{} + err = getJSON(req, cfg) + if err != nil { + handleErr(req.Context(), rw, err, http.StatusBadRequest) + return + } + + err = validateBackupConfig(req.Context(), cfg, db) + if err != nil { + handleErr(req.Context(), rw, err, http.StatusBadRequest) + return + } + + err = db.BasicImport(req.Context(), cfg.Filepath) + if err != nil { + handleErr(req.Context(), rw, err, http.StatusInternalServerError) + return + } + + sendJSON( + req.Context(), + rw, + simpleDataResponse("result", "success"), + http.StatusOK, + ) +} + +func validateBackupConfig(ctx context.Context, cfg *client.BackupConfig, db client.DB) error { + if !isValidPath(cfg.Filepath) { + return errors.New("invalid file path") + } + + if cfg.Format != "" && strings.ToLower(cfg.Format) != "json" { + return errors.New("only JSON format is supported at the moment") + } + for _, colName := range cfg.Collections { + _, err := db.GetCollectionByName(ctx, colName) + if err != nil { + return errors.Wrap("collection does not exist", err) + } + } + return nil +} + +func isValidPath(filepath string) bool { + // if a file exists, return true + if _, err := os.Stat(filepath); err == nil { + return true + } + + // if not, attempt to write to the path and if successful, + // remove the file and return true + var d []byte + if err := os.WriteFile(filepath, d, 0o644); err == nil { + _ = os.Remove(filepath) + return true + } + + return false +} diff --git a/api/http/handlerfuncs_backup_test.go b/api/http/handlerfuncs_backup_test.go new file mode 100644 index 0000000000..67af6015a1 --- /dev/null +++ b/api/http/handlerfuncs_backup_test.go @@ -0,0 +1,623 @@ +// Copyright 2023 Democratized Data Foundation +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package http + +import ( + "bytes" + "context" + "encoding/json" + "net/http" + "os" + "testing" + + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" + + "github.com/sourcenetwork/defradb/client" + "github.com/sourcenetwork/defradb/client/mocks" + "github.com/sourcenetwork/defradb/errors" +) + +func TestExportHandler_WithNoDB_NoDatabaseAvailableError(t *testing.T) { + t.Cleanup(CleanupEnv) + env = "dev" + errResponse := ErrorResponse{} + testRequest(testOptions{ + Testing: t, + DB: nil, + Method: "POST", + Path: ExportPath, + Body: nil, + ExpectedStatus: 500, + ResponseData: &errResponse, + }) + require.Contains(t, errResponse.Errors[0].Extensions.Stack, "no database available") + require.Equal(t, http.StatusInternalServerError, errResponse.Errors[0].Extensions.Status) + require.Equal(t, "Internal Server Error", errResponse.Errors[0].Extensions.HTTPError) + require.Equal(t, "no database available", errResponse.Errors[0].Message) +} + +func TestExportHandler_WithWrongPayload_ReturnError(t *testing.T) { + t.Cleanup(CleanupEnv) + env = "dev" + ctx := context.Background() + defra := testNewInMemoryDB(t, ctx) + defer defra.Close(ctx) + + buf := bytes.NewBuffer([]byte("[]")) + errResponse := ErrorResponse{} + testRequest(testOptions{ + Testing: t, + DB: defra, + Method: "POST", + Path: ExportPath, + Body: buf, + ExpectedStatus: 400, + ResponseData: &errResponse, + }) + require.Contains(t, errResponse.Errors[0].Extensions.Stack, "json: cannot unmarshal array into Go value of type client.BackupConfig") + require.Equal(t, http.StatusBadRequest, errResponse.Errors[0].Extensions.Status) + require.Equal(t, "Bad Request", errResponse.Errors[0].Extensions.HTTPError) + require.Equal(t, "unmarshal error: json: cannot unmarshal array into Go value of type client.BackupConfig", errResponse.Errors[0].Message) +} + +func TestExportHandler_WithInvalidFilePath_ReturnError(t *testing.T) { + t.Cleanup(CleanupEnv) + env = "dev" + ctx := context.Background() + defra := testNewInMemoryDB(t, ctx) + defer defra.Close(ctx) + + filepath := t.TempDir() + "/some/test.json" + cfg := client.BackupConfig{ + Filepath: filepath, + } + b, err := json.Marshal(cfg) + require.NoError(t, err) + buf := bytes.NewBuffer(b) + + errResponse := ErrorResponse{} + testRequest(testOptions{ + Testing: t, + DB: defra, + Method: "POST", + Path: ExportPath, + Body: buf, + ExpectedStatus: 400, + ResponseData: &errResponse, + }) + require.Contains(t, errResponse.Errors[0].Extensions.Stack, "invalid file path") + require.Equal(t, http.StatusBadRequest, errResponse.Errors[0].Extensions.Status) + require.Equal(t, "Bad Request", errResponse.Errors[0].Extensions.HTTPError) + require.Equal(t, "invalid file path", errResponse.Errors[0].Message) +} + +func TestExportHandler_WithInvalidFomat_ReturnError(t *testing.T) { + t.Cleanup(CleanupEnv) + env = "dev" + ctx := context.Background() + defra := testNewInMemoryDB(t, ctx) + defer defra.Close(ctx) + + filepath := t.TempDir() + "/test.json" + cfg := client.BackupConfig{ + Filepath: filepath, + Format: "csv", + } + b, err := json.Marshal(cfg) + require.NoError(t, err) + buf := bytes.NewBuffer(b) + + errResponse := ErrorResponse{} + testRequest(testOptions{ + Testing: t, + DB: defra, + Method: "POST", + Path: ExportPath, + Body: buf, + ExpectedStatus: 400, + ResponseData: &errResponse, + }) + require.Contains(t, errResponse.Errors[0].Extensions.Stack, "only JSON format is supported at the moment") + require.Equal(t, http.StatusBadRequest, errResponse.Errors[0].Extensions.Status) + require.Equal(t, "Bad Request", errResponse.Errors[0].Extensions.HTTPError) + require.Equal(t, "only JSON format is supported at the moment", errResponse.Errors[0].Message) +} + +func TestExportHandler_WithInvalidCollection_ReturnError(t *testing.T) { + t.Cleanup(CleanupEnv) + env = "dev" + ctx := context.Background() + defra := testNewInMemoryDB(t, ctx) + defer defra.Close(ctx) + + filepath := t.TempDir() + "/test.json" + cfg := client.BackupConfig{ + Filepath: filepath, + Format: "json", + Collections: []string{"invalid"}, + } + b, err := json.Marshal(cfg) + require.NoError(t, err) + buf := bytes.NewBuffer(b) + + errResponse := ErrorResponse{} + testRequest(testOptions{ + Testing: t, + DB: defra, + Method: "POST", + Path: ExportPath, + Body: buf, + ExpectedStatus: 400, + ResponseData: &errResponse, + }) + require.Contains(t, errResponse.Errors[0].Extensions.Stack, "collection does not exist: datastore: key not found") + require.Equal(t, http.StatusBadRequest, errResponse.Errors[0].Extensions.Status) + require.Equal(t, "Bad Request", errResponse.Errors[0].Extensions.HTTPError) + require.Equal(t, "collection does not exist: datastore: key not found", errResponse.Errors[0].Message) +} + +func TestExportHandler_WithBasicExportError_ReturnError(t *testing.T) { + t.Cleanup(CleanupEnv) + env = "dev" + db := mocks.NewDB(t) + testError := errors.New("test error") + db.EXPECT().BasicExport(mock.Anything, mock.Anything).Return(testError) + + filepath := t.TempDir() + "/test.json" + cfg := client.BackupConfig{ + Filepath: filepath, + } + b, err := json.Marshal(cfg) + require.NoError(t, err) + buf := bytes.NewBuffer(b) + + errResponse := ErrorResponse{} + testRequest(testOptions{ + Testing: t, + DB: db, + Method: "POST", + Path: ExportPath, + Body: buf, + ExpectedStatus: 500, + ResponseData: &errResponse, + }) + require.Contains(t, errResponse.Errors[0].Extensions.Stack, "test error") + require.Equal(t, http.StatusInternalServerError, errResponse.Errors[0].Extensions.Status) + require.Equal(t, "Internal Server Error", errResponse.Errors[0].Extensions.HTTPError) + require.Equal(t, "test error", errResponse.Errors[0].Message) +} + +func TestExportHandler_AllCollections_NoError(t *testing.T) { + ctx := context.Background() + defra := testNewInMemoryDB(t, ctx) + defer defra.Close(ctx) + + testLoadSchema(t, ctx, defra) + + col, err := defra.GetCollectionByName(ctx, "User") + require.NoError(t, err) + + doc, err := client.NewDocFromJSON([]byte(`{"age": 31, "verified": true, "points": 90, "name": "Bob"}`)) + require.NoError(t, err) + + err = col.Create(ctx, doc) + require.NoError(t, err) + + filepath := t.TempDir() + "/test.json" + cfg := client.BackupConfig{ + Filepath: filepath, + } + b, err := json.Marshal(cfg) + require.NoError(t, err) + buf := bytes.NewBuffer(b) + + respBody := testRequest(testOptions{ + Testing: t, + DB: defra, + Method: "POST", + Path: ExportPath, + Body: buf, + ExpectedStatus: 200, + }) + + b, err = os.ReadFile(filepath) + require.NoError(t, err) + + require.Equal( + t, + `{"data":{"result":"success"}}`, + string(respBody), + ) + + require.Equal( + t, + `{"User":[{"_key":"bae-91171025-ed21-50e3-b0dc-e31bccdfa1ab","_newKey":"bae-91171025-ed21-50e3-b0dc-e31bccdfa1ab","age":31,"name":"Bob","points":90,"verified":true}]}`, + string(b), + ) +} + +func TestExportHandler_UserCollection_NoError(t *testing.T) { + ctx := context.Background() + defra := testNewInMemoryDB(t, ctx) + defer defra.Close(ctx) + + testLoadSchema(t, ctx, defra) + + col, err := defra.GetCollectionByName(ctx, "User") + require.NoError(t, err) + + doc, err := client.NewDocFromJSON([]byte(`{"age": 31, "verified": true, "points": 90, "name": "Bob"}`)) + require.NoError(t, err) + + err = col.Create(ctx, doc) + require.NoError(t, err) + + filepath := t.TempDir() + "/test.json" + cfg := client.BackupConfig{ + Filepath: filepath, + Collections: []string{"User"}, + } + b, err := json.Marshal(cfg) + require.NoError(t, err) + buf := bytes.NewBuffer(b) + + respBody := testRequest(testOptions{ + Testing: t, + DB: defra, + Method: "POST", + Path: ExportPath, + Body: buf, + ExpectedStatus: 200, + }) + + b, err = os.ReadFile(filepath) + require.NoError(t, err) + + require.Equal( + t, + `{"data":{"result":"success"}}`, + string(respBody), + ) + + require.Equal( + t, + `{"User":[{"_key":"bae-91171025-ed21-50e3-b0dc-e31bccdfa1ab","_newKey":"bae-91171025-ed21-50e3-b0dc-e31bccdfa1ab","age":31,"name":"Bob","points":90,"verified":true}]}`, + string(b), + ) +} + +func TestExportHandler_UserCollectionWithModifiedDoc_NoError(t *testing.T) { + ctx := context.Background() + defra := testNewInMemoryDB(t, ctx) + defer defra.Close(ctx) + + testLoadSchema(t, ctx, defra) + + col, err := defra.GetCollectionByName(ctx, "User") + require.NoError(t, err) + + doc, err := client.NewDocFromJSON([]byte(`{"age": 31, "verified": true, "points": 90, "name": "Bob"}`)) + require.NoError(t, err) + + err = col.Create(ctx, doc) + require.NoError(t, err) + + err = doc.Set("points", 1000) + require.NoError(t, err) + + err = col.Update(ctx, doc) + require.NoError(t, err) + + filepath := t.TempDir() + "/test.json" + cfg := client.BackupConfig{ + Filepath: filepath, + Collections: []string{"User"}, + } + b, err := json.Marshal(cfg) + require.NoError(t, err) + buf := bytes.NewBuffer(b) + + respBody := testRequest(testOptions{ + Testing: t, + DB: defra, + Method: "POST", + Path: ExportPath, + Body: buf, + ExpectedStatus: 200, + }) + + b, err = os.ReadFile(filepath) + require.NoError(t, err) + + require.Equal( + t, + `{"data":{"result":"success"}}`, + string(respBody), + ) + + require.Equal( + t, + `{"User":[{"_key":"bae-91171025-ed21-50e3-b0dc-e31bccdfa1ab","_newKey":"bae-36697142-d46a-57b1-b25e-6336706854ea","age":31,"name":"Bob","points":1000,"verified":true}]}`, + string(b), + ) +} + +func TestImportHandler_WithNoDB_NoDatabaseAvailableError(t *testing.T) { + t.Cleanup(CleanupEnv) + env = "dev" + errResponse := ErrorResponse{} + testRequest(testOptions{ + Testing: t, + DB: nil, + Method: "POST", + Path: ImportPath, + Body: nil, + ExpectedStatus: 500, + ResponseData: &errResponse, + }) + require.Contains(t, errResponse.Errors[0].Extensions.Stack, "no database available") + require.Equal(t, http.StatusInternalServerError, errResponse.Errors[0].Extensions.Status) + require.Equal(t, "Internal Server Error", errResponse.Errors[0].Extensions.HTTPError) + require.Equal(t, "no database available", errResponse.Errors[0].Message) +} + +func TestImportHandler_WithWrongPayloadFormat_UnmarshalError(t *testing.T) { + t.Cleanup(CleanupEnv) + env = "dev" + + ctx := context.Background() + defra := testNewInMemoryDB(t, ctx) + defer defra.Close(ctx) + + buf := bytes.NewBuffer([]byte(`[]`)) + + errResponse := ErrorResponse{} + testRequest(testOptions{ + Testing: t, + DB: defra, + Method: "POST", + Path: ImportPath, + Body: buf, + ExpectedStatus: 400, + ResponseData: &errResponse, + }) + require.Contains( + t, + errResponse.Errors[0].Extensions.Stack, + "json: cannot unmarshal array into Go value of type client.BackupConfig", + ) + require.Equal(t, http.StatusBadRequest, errResponse.Errors[0].Extensions.Status) + require.Equal(t, "Bad Request", errResponse.Errors[0].Extensions.HTTPError) + require.Equal( + t, + "unmarshal error: json: cannot unmarshal array into Go value of type client.BackupConfig", + errResponse.Errors[0].Message, + ) +} + +func TestImportHandler_WithInvalidFilepath_ReturnError(t *testing.T) { + t.Cleanup(CleanupEnv) + env = "dev" + + ctx := context.Background() + defra := testNewInMemoryDB(t, ctx) + defer defra.Close(ctx) + + filepath := t.TempDir() + "/some/test.json" + cfg := client.BackupConfig{ + Filepath: filepath, + } + b, err := json.Marshal(cfg) + require.NoError(t, err) + buf := bytes.NewBuffer(b) + + errResponse := ErrorResponse{} + testRequest(testOptions{ + Testing: t, + DB: defra, + Method: "POST", + Path: ImportPath, + Body: buf, + ExpectedStatus: 400, + ResponseData: &errResponse, + }) + require.Contains(t, errResponse.Errors[0].Extensions.Stack, "invalid file path") + require.Equal(t, http.StatusBadRequest, errResponse.Errors[0].Extensions.Status) + require.Equal(t, "Bad Request", errResponse.Errors[0].Extensions.HTTPError) + require.Equal(t, "invalid file path", errResponse.Errors[0].Message) +} + +func TestImportHandler_WithDBClosed_DatastoreClosedError(t *testing.T) { + t.Cleanup(CleanupEnv) + env = "dev" + + ctx := context.Background() + defra := testNewInMemoryDB(t, ctx) + defra.Close(ctx) + + filepath := t.TempDir() + "/test.json" + cfg := client.BackupConfig{ + Filepath: filepath, + } + b, err := json.Marshal(cfg) + require.NoError(t, err) + buf := bytes.NewBuffer(b) + + errResponse := ErrorResponse{} + testRequest(testOptions{ + Testing: t, + DB: defra, + Method: "POST", + Path: ImportPath, + Body: buf, + ExpectedStatus: 500, + ResponseData: &errResponse, + }) + require.Contains(t, errResponse.Errors[0].Extensions.Stack, "datastore closed") + require.Equal(t, http.StatusInternalServerError, errResponse.Errors[0].Extensions.Status) + require.Equal(t, "Internal Server Error", errResponse.Errors[0].Extensions.HTTPError) + require.Equal(t, "datastore closed", errResponse.Errors[0].Message) +} + +func TestImportHandler_WithUnknownCollection_KeyNotFoundError(t *testing.T) { + t.Cleanup(CleanupEnv) + env = "dev" + + ctx := context.Background() + defra := testNewInMemoryDB(t, ctx) + defer defra.Close(ctx) + + filepath := t.TempDir() + "/test.json" + err := os.WriteFile( + filepath, + []byte(`{"User":[{"_key":"bae-91171025-ed21-50e3-b0dc-e31bccdfa1ab","age":31,"name":"Bob","points":90,"verified":true}]}`), + 0644, + ) + require.NoError(t, err) + + cfg := client.BackupConfig{ + Filepath: filepath, + } + + b, err := json.Marshal(cfg) + require.NoError(t, err) + buf := bytes.NewBuffer(b) + + errResponse := ErrorResponse{} + testRequest(testOptions{ + Testing: t, + DB: defra, + Method: "POST", + Path: ImportPath, + Body: buf, + ExpectedStatus: 500, + ResponseData: &errResponse, + }) + require.Contains(t, errResponse.Errors[0].Extensions.Stack, "failed to get collection: datastore: key not found. Name: User") + require.Equal(t, http.StatusInternalServerError, errResponse.Errors[0].Extensions.Status) + require.Equal(t, "Internal Server Error", errResponse.Errors[0].Extensions.HTTPError) + require.Equal(t, "failed to get collection: datastore: key not found. Name: User", errResponse.Errors[0].Message) +} + +func TestImportHandler_UserCollection_NoError(t *testing.T) { + ctx := context.Background() + defra := testNewInMemoryDB(t, ctx) + defer defra.Close(ctx) + + testLoadSchema(t, ctx, defra) + + filepath := t.TempDir() + "/test.json" + err := os.WriteFile( + filepath, + []byte(`{"User":[{"_key":"bae-91171025-ed21-50e3-b0dc-e31bccdfa1ab","_newKey":"bae-91171025-ed21-50e3-b0dc-e31bccdfa1ab","age":31,"name":"Bob","points":90,"verified":true}]}`), + 0644, + ) + require.NoError(t, err) + + cfg := client.BackupConfig{ + Filepath: filepath, + } + + b, err := json.Marshal(cfg) + require.NoError(t, err) + buf := bytes.NewBuffer(b) + + resp := DataResponse{} + _ = testRequest(testOptions{ + Testing: t, + DB: defra, + Method: "POST", + Path: ImportPath, + Body: buf, + ExpectedStatus: 200, + ResponseData: &resp, + }) + + switch v := resp.Data.(type) { + case map[string]any: + require.Equal(t, "success", v["result"]) + default: + t.Fatalf("data should be of type map[string]any but got %T", resp.Data) + } + + doc, err := client.NewDocFromJSON([]byte(`{"age": 31, "verified": true, "points": 90, "name": "Bob"}`)) + require.NoError(t, err) + + col, err := defra.GetCollectionByName(ctx, "User") + require.NoError(t, err) + + importedDoc, err := col.Get(ctx, doc.Key(), false) + require.NoError(t, err) + + require.Equal(t, doc.Key().String(), importedDoc.Key().String()) +} + +func TestImportHandler_WithExistingDoc_DocumentExistError(t *testing.T) { + t.Cleanup(CleanupEnv) + env = "dev" + + ctx := context.Background() + defra := testNewInMemoryDB(t, ctx) + defer defra.Close(ctx) + + testLoadSchema(t, ctx, defra) + + col, err := defra.GetCollectionByName(ctx, "User") + require.NoError(t, err) + + doc, err := client.NewDocFromJSON([]byte(`{"age": 31, "verified": true, "points": 90, "name": "Bob"}`)) + require.NoError(t, err) + + err = col.Create(ctx, doc) + require.NoError(t, err) + + filepath := t.TempDir() + "/test.json" + err = os.WriteFile( + filepath, + []byte(`{"User":[{"_key":"bae-91171025-ed21-50e3-b0dc-e31bccdfa1ab","_newKey":"bae-91171025-ed21-50e3-b0dc-e31bccdfa1ab","age":31,"name":"Bob","points":90,"verified":true}]}`), + 0644, + ) + require.NoError(t, err) + + cfg := client.BackupConfig{ + Filepath: filepath, + } + + b, err := json.Marshal(cfg) + require.NoError(t, err) + buf := bytes.NewBuffer(b) + + errResponse := ErrorResponse{} + _ = testRequest(testOptions{ + Testing: t, + DB: defra, + Method: "POST", + Path: ImportPath, + QueryParams: map[string]string{"collections": "User"}, + Body: buf, + ExpectedStatus: 500, + ResponseData: &errResponse, + }) + + require.Contains( + t, + errResponse.Errors[0].Extensions.Stack, + "failed to save a new doc to collection: a document with the given dockey already exists", + ) + require.Equal(t, http.StatusInternalServerError, errResponse.Errors[0].Extensions.Status) + require.Equal(t, "Internal Server Error", errResponse.Errors[0].Extensions.HTTPError) + require.Equal( + t, + "failed to save a new doc to collection: a document with the given dockey already exists. DocKey: bae-91171025-ed21-50e3-b0dc-e31bccdfa1ab", + errResponse.Errors[0].Message, + ) +} diff --git a/api/http/handlerfuncs_index.go b/api/http/handlerfuncs_index.go new file mode 100644 index 0000000000..e8d10d900e --- /dev/null +++ b/api/http/handlerfuncs_index.go @@ -0,0 +1,144 @@ +// Copyright 2023 Democratized Data Foundation +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package http + +import ( + "net/http" + "strings" + + "github.com/sourcenetwork/defradb/client" +) + +func createIndexHandler(rw http.ResponseWriter, req *http.Request) { + db, err := dbFromContext(req.Context()) + if err != nil { + handleErr(req.Context(), rw, err, http.StatusInternalServerError) + return + } + + var data map[string]string + err = getJSON(req, &data) + if err != nil { + handleErr(req.Context(), rw, err, http.StatusBadRequest) + return + } + + colNameArg := data["collection"] + fieldsArg := data["fields"] + indexNameArg := data["name"] + + col, err := db.GetCollectionByName(req.Context(), colNameArg) + if err != nil { + handleErr(req.Context(), rw, err, http.StatusInternalServerError) + return + } + + fields := strings.Split(fieldsArg, ",") + fieldDescriptions := make([]client.IndexedFieldDescription, 0, len(fields)) + for _, field := range fields { + fieldDescriptions = append(fieldDescriptions, client.IndexedFieldDescription{Name: field}) + } + indexDesc := client.IndexDescription{ + Name: indexNameArg, + Fields: fieldDescriptions, + } + indexDesc, err = col.CreateIndex(req.Context(), indexDesc) + if err != nil { + handleErr(req.Context(), rw, err, http.StatusInternalServerError) + return + } + + sendJSON( + req.Context(), + rw, + simpleDataResponse("index", indexDesc), + http.StatusOK, + ) +} + +func dropIndexHandler(rw http.ResponseWriter, req *http.Request) { + db, err := dbFromContext(req.Context()) + if err != nil { + handleErr(req.Context(), rw, err, http.StatusInternalServerError) + return + } + + var data map[string]string + err = getJSON(req, &data) + if err != nil { + handleErr(req.Context(), rw, err, http.StatusBadRequest) + return + } + + colNameArg := data["collection"] + indexNameArg := data["name"] + + col, err := db.GetCollectionByName(req.Context(), colNameArg) + if err != nil { + handleErr(req.Context(), rw, err, http.StatusInternalServerError) + return + } + + err = col.DropIndex(req.Context(), indexNameArg) + if err != nil { + handleErr(req.Context(), rw, err, http.StatusInternalServerError) + return + } + + sendJSON( + req.Context(), + rw, + simpleDataResponse("result", "success"), + http.StatusOK, + ) +} + +func listIndexHandler(rw http.ResponseWriter, req *http.Request) { + db, err := dbFromContext(req.Context()) + if err != nil { + handleErr(req.Context(), rw, err, http.StatusInternalServerError) + return + } + + queryParams := req.URL.Query() + collectionParam := queryParams.Get("collection") + + if collectionParam == "" { + indexesPerCol, err := db.GetAllIndexes(req.Context()) + if err != nil { + handleErr(req.Context(), rw, err, http.StatusInternalServerError) + return + } + sendJSON( + req.Context(), + rw, + simpleDataResponse("collections", indexesPerCol), + http.StatusOK, + ) + } else { + col, err := db.GetCollectionByName(req.Context(), collectionParam) + if err != nil { + handleErr(req.Context(), rw, err, http.StatusInternalServerError) + return + } + indexes, err := col.GetIndexes(req.Context()) + if err != nil { + handleErr(req.Context(), rw, err, http.StatusInternalServerError) + return + } + sendJSON( + req.Context(), + rw, + simpleDataResponse("indexes", indexes), + http.StatusOK, + ) + } +} diff --git a/api/http/handlerfuncs_index_test.go b/api/http/handlerfuncs_index_test.go new file mode 100644 index 0000000000..3e82249ef8 --- /dev/null +++ b/api/http/handlerfuncs_index_test.go @@ -0,0 +1,239 @@ +// Copyright 2023 Democratized Data Foundation +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package http + +import ( + "bytes" + "context" + "net/http" + "net/http/httptest" + "net/url" + "testing" + + "github.com/sourcenetwork/defradb/client" + "github.com/sourcenetwork/defradb/client/mocks" + "github.com/sourcenetwork/defradb/errors" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" +) + +func addDBToContext(t *testing.T, req *http.Request, db *mocks.DB) *http.Request { + if db == nil { + db = mocks.NewDB(t) + } + ctx := context.WithValue(req.Context(), ctxDB{}, db) + return req.WithContext(ctx) +} + +func TestCreateIndexHandler_IfNoDBInContext_ReturnError(t *testing.T) { + handler := http.HandlerFunc(createIndexHandler) + assert.HTTPBodyContains(t, handler, "POST", IndexPath, nil, "no database available") +} + +func TestCreateIndexHandler_IfFailsToParseParams_ReturnError(t *testing.T) { + req, err := http.NewRequest("POST", IndexPath, bytes.NewBuffer([]byte("invalid map"))) + if err != nil { + t.Fatal(err) + } + req = addDBToContext(t, req, nil) + + rr := httptest.NewRecorder() + handler := http.HandlerFunc(createIndexHandler) + + handler.ServeHTTP(rr, req) + + assert.Equal(t, http.StatusBadRequest, rr.Code, "handler returned wrong status code") + assert.Contains(t, rr.Body.String(), "invalid character", "handler returned unexpected body") +} + +func TestCreateIndexHandler_IfFailsToGetCollection_ReturnError(t *testing.T) { + testError := errors.New("test error") + db := mocks.NewDB(t) + db.EXPECT().GetCollectionByName(mock.Anything, mock.Anything).Return(nil, testError) + + req, err := http.NewRequest("POST", IndexPath, bytes.NewBuffer([]byte(`{}`))) + if err != nil { + t.Fatal(err) + } + + req = addDBToContext(t, req, db) + + rr := httptest.NewRecorder() + handler := http.HandlerFunc(createIndexHandler) + handler.ServeHTTP(rr, req) + + assert.Equal(t, http.StatusInternalServerError, rr.Code, "handler returned wrong status code") + assert.Contains(t, rr.Body.String(), testError.Error()) +} + +func TestCreateIndexHandler_IfFailsToCreateIndex_ReturnError(t *testing.T) { + testError := errors.New("test error") + col := mocks.NewCollection(t) + col.EXPECT().CreateIndex(mock.Anything, mock.Anything). + Return(client.IndexDescription{}, testError) + + db := mocks.NewDB(t) + db.EXPECT().GetCollectionByName(mock.Anything, mock.Anything).Return(col, nil) + + req, err := http.NewRequest("POST", IndexPath, bytes.NewBuffer([]byte(`{}`))) + if err != nil { + t.Fatal(err) + } + req = addDBToContext(t, req, db) + + rr := httptest.NewRecorder() + handler := http.HandlerFunc(createIndexHandler) + handler.ServeHTTP(rr, req) + + assert.Equal(t, http.StatusInternalServerError, rr.Code, "handler returned wrong status code") + assert.Contains(t, rr.Body.String(), testError.Error()) +} + +func TestDropIndexHandler_IfNoDBInContext_ReturnError(t *testing.T) { + handler := http.HandlerFunc(dropIndexHandler) + assert.HTTPBodyContains(t, handler, "DELETE", IndexPath, nil, "no database available") +} + +func TestDropIndexHandler_IfFailsToParseParams_ReturnError(t *testing.T) { + req, err := http.NewRequest("DELETE", IndexPath, bytes.NewBuffer([]byte("invalid map"))) + if err != nil { + t.Fatal(err) + } + req = addDBToContext(t, req, nil) + + rr := httptest.NewRecorder() + handler := http.HandlerFunc(dropIndexHandler) + + handler.ServeHTTP(rr, req) + + assert.Equal(t, http.StatusBadRequest, rr.Code, "handler returned wrong status code") + assert.Contains(t, rr.Body.String(), "invalid character", "handler returned unexpected body") +} + +func TestDropIndexHandler_IfFailsToGetCollection_ReturnError(t *testing.T) { + testError := errors.New("test error") + db := mocks.NewDB(t) + db.EXPECT().GetCollectionByName(mock.Anything, mock.Anything).Return(nil, testError) + + req, err := http.NewRequest("DELETE", IndexPath, bytes.NewBuffer([]byte(`{}`))) + if err != nil { + t.Fatal(err) + } + + req = addDBToContext(t, req, db) + + rr := httptest.NewRecorder() + handler := http.HandlerFunc(dropIndexHandler) + handler.ServeHTTP(rr, req) + + assert.Equal(t, http.StatusInternalServerError, rr.Code, "handler returned wrong status code") + assert.Contains(t, rr.Body.String(), testError.Error()) +} + +func TestDropIndexHandler_IfFailsToDropIndex_ReturnError(t *testing.T) { + testError := errors.New("test error") + col := mocks.NewCollection(t) + col.EXPECT().DropIndex(mock.Anything, mock.Anything).Return(testError) + + db := mocks.NewDB(t) + db.EXPECT().GetCollectionByName(mock.Anything, mock.Anything).Return(col, nil) + + req, err := http.NewRequest("DELETE", IndexPath, bytes.NewBuffer([]byte(`{}`))) + if err != nil { + t.Fatal(err) + } + req = addDBToContext(t, req, db) + + rr := httptest.NewRecorder() + handler := http.HandlerFunc(dropIndexHandler) + handler.ServeHTTP(rr, req) + + assert.Equal(t, http.StatusInternalServerError, rr.Code, "handler returned wrong status code") + assert.Contains(t, rr.Body.String(), testError.Error()) +} + +func TestListIndexHandler_IfNoDBInContext_ReturnError(t *testing.T) { + handler := http.HandlerFunc(listIndexHandler) + assert.HTTPBodyContains(t, handler, "GET", IndexPath, nil, "no database available") +} + +func TestListIndexHandler_IfFailsToGetAllIndexes_ReturnError(t *testing.T) { + testError := errors.New("test error") + db := mocks.NewDB(t) + db.EXPECT().GetAllIndexes(mock.Anything).Return(nil, testError) + + req, err := http.NewRequest("GET", IndexPath, bytes.NewBuffer([]byte(`{}`))) + if err != nil { + t.Fatal(err) + } + + req = addDBToContext(t, req, db) + + rr := httptest.NewRecorder() + handler := http.HandlerFunc(listIndexHandler) + handler.ServeHTTP(rr, req) + + assert.Equal(t, http.StatusInternalServerError, rr.Code, "handler returned wrong status code") + assert.Contains(t, rr.Body.String(), testError.Error()) +} + +func TestListIndexHandler_IfFailsToGetCollection_ReturnError(t *testing.T) { + testError := errors.New("test error") + db := mocks.NewDB(t) + db.EXPECT().GetCollectionByName(mock.Anything, mock.Anything).Return(nil, testError) + + u, _ := url.Parse("http://defradb.com" + IndexPath) + params := url.Values{} + params.Add("collection", "testCollection") + u.RawQuery = params.Encode() + + req, err := http.NewRequest("GET", u.String(), nil) + if err != nil { + t.Fatal(err) + } + + req = addDBToContext(t, req, db) + + rr := httptest.NewRecorder() + handler := http.HandlerFunc(listIndexHandler) + handler.ServeHTTP(rr, req) + + assert.Equal(t, http.StatusInternalServerError, rr.Code, "handler returned wrong status code") + assert.Contains(t, rr.Body.String(), testError.Error()) +} + +func TestListIndexHandler_IfFailsToCollectionGetIndexes_ReturnError(t *testing.T) { + testError := errors.New("test error") + col := mocks.NewCollection(t) + col.EXPECT().GetIndexes(mock.Anything).Return(nil, testError) + + db := mocks.NewDB(t) + db.EXPECT().GetCollectionByName(mock.Anything, mock.Anything).Return(col, nil) + + u, _ := url.Parse("http://defradb.com" + IndexPath) + params := url.Values{} + params.Add("collection", "testCollection") + u.RawQuery = params.Encode() + + req, err := http.NewRequest("GET", u.String(), nil) + if err != nil { + t.Fatal(err) + } + req = addDBToContext(t, req, db) + + rr := httptest.NewRecorder() + handler := http.HandlerFunc(listIndexHandler) + handler.ServeHTTP(rr, req) + + assert.Equal(t, http.StatusInternalServerError, rr.Code, "handler returned wrong status code") + assert.Contains(t, rr.Body.String(), testError.Error()) +} diff --git a/api/http/handlerfuncs_test.go b/api/http/handlerfuncs_test.go index b21526efc0..bb7bb71aad 100644 --- a/api/http/handlerfuncs_test.go +++ b/api/http/handlerfuncs_test.go @@ -27,6 +27,7 @@ import ( "github.com/ipfs/go-cid" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" "github.com/sourcenetwork/defradb/client" badgerds "github.com/sourcenetwork/defradb/datastore/badger/v3" @@ -42,6 +43,7 @@ type testOptions struct { Path string Body io.Reader Headers map[string]string + QueryParams map[string]string ExpectedStatus int ResponseData any ServerOptions serverOptions @@ -69,7 +71,7 @@ func TestRootHandler(t *testing.T) { }) switch v := resp.Data.(type) { case map[string]any: - assert.Equal(t, "Welcome to the DefraDB HTTP API. Use /graphql to send queries to the database. Read the documentation at https://docs.source.network/.", v["response"]) + require.Equal(t, "Welcome to the DefraDB HTTP API. Use /graphql to send queries to the database. Read the documentation at https://docs.source.network/.", v["response"]) default: t.Fatalf("data should be of type map[string]any but got %T", resp.Data) } @@ -89,7 +91,7 @@ func TestPingHandler(t *testing.T) { switch v := resp.Data.(type) { case map[string]any: - assert.Equal(t, "pong", v["response"]) + require.Equal(t, "pong", v["response"]) default: t.Fatalf("data should be of type map[string]any but got %T", resp.Data) } @@ -113,7 +115,7 @@ func TestDumpHandlerWithNoError(t *testing.T) { switch v := resp.Data.(type) { case map[string]any: - assert.Equal(t, "ok", v["response"]) + require.Equal(t, "ok", v["response"]) default: t.Fatalf("data should be of type map[string]any but got %T", resp.Data) } @@ -132,10 +134,10 @@ func TestDumpHandlerWithDBError(t *testing.T) { ExpectedStatus: 500, ResponseData: &errResponse, }) - assert.Contains(t, errResponse.Errors[0].Extensions.Stack, "no database available") - assert.Equal(t, http.StatusInternalServerError, errResponse.Errors[0].Extensions.Status) - assert.Equal(t, "Internal Server Error", errResponse.Errors[0].Extensions.HTTPError) - assert.Equal(t, "no database available", errResponse.Errors[0].Message) + require.Contains(t, errResponse.Errors[0].Extensions.Stack, "no database available") + require.Equal(t, http.StatusInternalServerError, errResponse.Errors[0].Extensions.Status) + require.Equal(t, "Internal Server Error", errResponse.Errors[0].Extensions.HTTPError) + require.Equal(t, "no database available", errResponse.Errors[0].Message) } func TestExecGQLWithNilBody(t *testing.T) { @@ -152,10 +154,10 @@ func TestExecGQLWithNilBody(t *testing.T) { ResponseData: &errResponse, }) - assert.Contains(t, errResponse.Errors[0].Extensions.Stack, "body cannot be empty") - assert.Equal(t, http.StatusBadRequest, errResponse.Errors[0].Extensions.Status) - assert.Equal(t, "Bad Request", errResponse.Errors[0].Extensions.HTTPError) - assert.Equal(t, "body cannot be empty", errResponse.Errors[0].Message) + require.Contains(t, errResponse.Errors[0].Extensions.Stack, "body cannot be empty") + require.Equal(t, http.StatusBadRequest, errResponse.Errors[0].Extensions.Status) + require.Equal(t, "Bad Request", errResponse.Errors[0].Extensions.HTTPError) + require.Equal(t, "body cannot be empty", errResponse.Errors[0].Message) } func TestExecGQLWithEmptyBody(t *testing.T) { @@ -172,10 +174,10 @@ func TestExecGQLWithEmptyBody(t *testing.T) { ResponseData: &errResponse, }) - assert.Contains(t, errResponse.Errors[0].Extensions.Stack, "missing GraphQL request") - assert.Equal(t, http.StatusBadRequest, errResponse.Errors[0].Extensions.Status) - assert.Equal(t, "Bad Request", errResponse.Errors[0].Extensions.HTTPError) - assert.Equal(t, "missing GraphQL request", errResponse.Errors[0].Message) + require.Contains(t, errResponse.Errors[0].Extensions.Stack, "missing GraphQL request") + require.Equal(t, http.StatusBadRequest, errResponse.Errors[0].Extensions.Status) + require.Equal(t, "Bad Request", errResponse.Errors[0].Extensions.HTTPError) + require.Equal(t, "missing GraphQL request", errResponse.Errors[0].Message) } type mockReadCloser struct { @@ -205,10 +207,10 @@ func TestExecGQLWithMockBody(t *testing.T) { ResponseData: &errResponse, }) - assert.Contains(t, errResponse.Errors[0].Extensions.Stack, "error reading") - assert.Equal(t, http.StatusInternalServerError, errResponse.Errors[0].Extensions.Status) - assert.Equal(t, "Internal Server Error", errResponse.Errors[0].Extensions.HTTPError) - assert.Equal(t, "error reading", errResponse.Errors[0].Message) + require.Contains(t, errResponse.Errors[0].Extensions.Stack, "error reading") + require.Equal(t, http.StatusInternalServerError, errResponse.Errors[0].Extensions.Status) + require.Equal(t, "Internal Server Error", errResponse.Errors[0].Extensions.HTTPError) + require.Equal(t, "error reading", errResponse.Errors[0].Message) } func TestExecGQLWithInvalidContentType(t *testing.T) { @@ -217,7 +219,7 @@ func TestExecGQLWithInvalidContentType(t *testing.T) { errResponse := ErrorResponse{} stmt := ` mutation { - create_user(data: "{\"age\": 31, \"verified\": true, \"points\": 90, \"name\": \"Bob\"}") { + create_User(data: "{\"age\": 31, \"verified\": true, \"points\": 90, \"name\": \"Bob\"}") { _key } }` @@ -233,10 +235,10 @@ mutation { ResponseData: &errResponse, }) - assert.Contains(t, errResponse.Errors[0].Extensions.Stack, "mime: invalid media parameter") - assert.Equal(t, http.StatusInternalServerError, errResponse.Errors[0].Extensions.Status) - assert.Equal(t, "Internal Server Error", errResponse.Errors[0].Extensions.HTTPError) - assert.Equal(t, "mime: invalid media parameter", errResponse.Errors[0].Message) + require.Contains(t, errResponse.Errors[0].Extensions.Stack, "mime: invalid media parameter") + require.Equal(t, http.StatusInternalServerError, errResponse.Errors[0].Extensions.Status) + require.Equal(t, "Internal Server Error", errResponse.Errors[0].Extensions.HTTPError) + require.Equal(t, "mime: invalid media parameter", errResponse.Errors[0].Message) } func TestExecGQLWithNoDB(t *testing.T) { @@ -245,7 +247,7 @@ func TestExecGQLWithNoDB(t *testing.T) { errResponse := ErrorResponse{} stmt := ` mutation { - create_user(data: "{\"age\": 31, \"verified\": true, \"points\": 90, \"name\": \"Bob\"}") { + create_User(data: "{\"age\": 31, \"verified\": true, \"points\": 90, \"name\": \"Bob\"}") { _key } }` @@ -260,10 +262,10 @@ mutation { ResponseData: &errResponse, }) - assert.Contains(t, errResponse.Errors[0].Extensions.Stack, "no database available") - assert.Equal(t, http.StatusInternalServerError, errResponse.Errors[0].Extensions.Status) - assert.Equal(t, "Internal Server Error", errResponse.Errors[0].Extensions.HTTPError) - assert.Equal(t, "no database available", errResponse.Errors[0].Message) + require.Contains(t, errResponse.Errors[0].Extensions.Stack, "no database available") + require.Equal(t, http.StatusInternalServerError, errResponse.Errors[0].Extensions.Status) + require.Equal(t, "Internal Server Error", errResponse.Errors[0].Extensions.HTTPError) + require.Equal(t, "no database available", errResponse.Errors[0].Message) } func TestExecGQLHandlerContentTypeJSONWithJSONError(t *testing.T) { @@ -273,7 +275,7 @@ func TestExecGQLHandlerContentTypeJSONWithJSONError(t *testing.T) { stmt := ` [ "query": "mutation { - create_user( + create_User( data: \"{ \\\"age\\\": 31, \\\"verified\\\": true, @@ -297,10 +299,10 @@ func TestExecGQLHandlerContentTypeJSONWithJSONError(t *testing.T) { ResponseData: &errResponse, }) - assert.Contains(t, errResponse.Errors[0].Extensions.Stack, "invalid character") - assert.Equal(t, http.StatusBadRequest, errResponse.Errors[0].Extensions.Status) - assert.Equal(t, "Bad Request", errResponse.Errors[0].Extensions.HTTPError) - assert.Equal(t, "unmarshal error: invalid character ':' after array element", errResponse.Errors[0].Message) + require.Contains(t, errResponse.Errors[0].Extensions.Stack, "invalid character") + require.Equal(t, http.StatusBadRequest, errResponse.Errors[0].Extensions.Status) + require.Equal(t, "Bad Request", errResponse.Errors[0].Extensions.HTTPError) + require.Equal(t, "unmarshal error: invalid character ':' after array element", errResponse.Errors[0].Message) } func TestExecGQLHandlerContentTypeJSON(t *testing.T) { @@ -315,7 +317,7 @@ func TestExecGQLHandlerContentTypeJSON(t *testing.T) { stmt := ` { "query": "mutation { - create_user( + create_User( data: \"{ \\\"age\\\": 31, \\\"verified\\\": true, @@ -344,7 +346,7 @@ func TestExecGQLHandlerContentTypeJSON(t *testing.T) { ResponseData: &resp, }) - assert.Contains(t, users[0].Key, "bae-") + require.Contains(t, users[0].Key, "bae-") } func TestExecGQLHandlerContentTypeJSONWithError(t *testing.T) { @@ -359,7 +361,7 @@ func TestExecGQLHandlerContentTypeJSONWithError(t *testing.T) { stmt := ` { "query": "mutation { - create_user( + create_User( data: \"{ \\\"age\\\": 31, \\\"notAField\\\": true @@ -384,8 +386,8 @@ func TestExecGQLHandlerContentTypeJSONWithError(t *testing.T) { ResponseData: &resp, }) - assert.Contains(t, resp.Errors, "The given field does not exist. Name: notAField") - assert.Len(t, resp.Errors, 1) + require.Contains(t, resp.Errors, "The given field does not exist. Name: notAField") + require.Len(t, resp.Errors, 1) } func TestExecGQLHandlerContentTypeJSONWithCharset(t *testing.T) { @@ -400,7 +402,7 @@ func TestExecGQLHandlerContentTypeJSONWithCharset(t *testing.T) { stmt := ` { "query": "mutation { - create_user( + create_User( data: \"{ \\\"age\\\": 31, \\\"verified\\\": true, @@ -429,7 +431,7 @@ func TestExecGQLHandlerContentTypeJSONWithCharset(t *testing.T) { ResponseData: &resp, }) - assert.Contains(t, users[0].Key, "bae-") + require.Contains(t, users[0].Key, "bae-") } func TestExecGQLHandlerContentTypeFormURLEncoded(t *testing.T) { @@ -447,10 +449,10 @@ func TestExecGQLHandlerContentTypeFormURLEncoded(t *testing.T) { ResponseData: &errResponse, }) - assert.Contains(t, errResponse.Errors[0].Extensions.Stack, "content type application/x-www-form-urlencoded not yet supported") - assert.Equal(t, http.StatusBadRequest, errResponse.Errors[0].Extensions.Status) - assert.Equal(t, "Bad Request", errResponse.Errors[0].Extensions.HTTPError) - assert.Equal(t, "content type application/x-www-form-urlencoded not yet supported", errResponse.Errors[0].Message) + require.Contains(t, errResponse.Errors[0].Extensions.Stack, "content type application/x-www-form-urlencoded not yet supported") + require.Equal(t, http.StatusBadRequest, errResponse.Errors[0].Extensions.Status) + require.Equal(t, "Bad Request", errResponse.Errors[0].Extensions.HTTPError) + require.Equal(t, "content type application/x-www-form-urlencoded not yet supported", errResponse.Errors[0].Message) } func TestExecGQLHandlerContentTypeGraphQL(t *testing.T) { @@ -464,7 +466,7 @@ func TestExecGQLHandlerContentTypeGraphQL(t *testing.T) { // add document stmt := ` mutation { - create_user(data: "{\"age\": 31, \"verified\": true, \"points\": 90, \"name\": \"Bob\"}") { + create_User(data: "{\"age\": 31, \"verified\": true, \"points\": 90, \"name\": \"Bob\"}") { _key } }` @@ -485,7 +487,7 @@ mutation { ResponseData: &resp, }) - assert.Contains(t, users[0].Key, "bae-") + require.Contains(t, users[0].Key, "bae-") } func TestExecGQLHandlerContentTypeText(t *testing.T) { @@ -499,7 +501,7 @@ func TestExecGQLHandlerContentTypeText(t *testing.T) { // add document stmt := ` mutation { - create_user(data: "{\"age\": 31, \"verified\": true, \"points\": 90, \"name\": \"Bob\"}") { + create_User(data: "{\"age\": 31, \"verified\": true, \"points\": 90, \"name\": \"Bob\"}") { _key } }` @@ -519,7 +521,7 @@ mutation { ResponseData: &resp, }) - assert.Contains(t, users[0].Key, "bae-") + require.Contains(t, users[0].Key, "bae-") } func TestExecGQLHandlerWithSubsctiption(t *testing.T) { @@ -532,7 +534,7 @@ func TestExecGQLHandlerWithSubsctiption(t *testing.T) { stmt := ` subscription { - user { + User { _key age name @@ -565,7 +567,7 @@ subscription { // add document stmt2 := ` mutation { - create_user(data: "{\"age\": 31, \"verified\": true, \"points\": 90, \"name\": \"Bob\"}") { + create_User(data: "{\"age\": 31, \"verified\": true, \"points\": 90, \"name\": \"Bob\"}") { _key } }` @@ -586,12 +588,144 @@ mutation { }) select { case data := <-ch: - assert.Contains(t, string(data), users[0].Key) + require.Contains(t, string(data), users[0].Key) case err := <-errCh: t.Fatal(err) } } +func TestListSchemaHandlerWithoutDB(t *testing.T) { + t.Cleanup(CleanupEnv) + env = "dev" + + errResponse := ErrorResponse{} + testRequest(testOptions{ + Testing: t, + DB: nil, + Method: "GET", + Path: SchemaPath, + ExpectedStatus: 500, + ResponseData: &errResponse, + }) + + assert.Contains(t, errResponse.Errors[0].Extensions.Stack, "no database available") + assert.Equal(t, http.StatusInternalServerError, errResponse.Errors[0].Extensions.Status) + assert.Equal(t, "Internal Server Error", errResponse.Errors[0].Extensions.HTTPError) + assert.Equal(t, "no database available", errResponse.Errors[0].Message) +} + +func TestListSchemaHandlerWitNoError(t *testing.T) { + ctx := context.Background() + defra := testNewInMemoryDB(t, ctx) + defer defra.Close(ctx) + + stmt := ` +type user { + name: String + age: Int + verified: Boolean + points: Float +} +type group { + owner: user + members: [user] +}` + + _, err := defra.AddSchema(ctx, stmt) + if err != nil { + t.Fatal(err) + } + + resp := DataResponse{} + testRequest(testOptions{ + Testing: t, + DB: defra, + Method: "GET", + Path: SchemaPath, + ExpectedStatus: 200, + ResponseData: &resp, + }) + + switch v := resp.Data.(type) { + case map[string]any: + assert.Equal(t, map[string]any{ + "collections": []any{ + map[string]any{ + "name": "group", + "id": "bafkreieunyhcyupkdppyo2g4zcqtdxvj5xi4f422gp2jwene6ohndvcobe", + "version_id": "bafkreieunyhcyupkdppyo2g4zcqtdxvj5xi4f422gp2jwene6ohndvcobe", + "fields": []any{ + map[string]any{ + "id": "0", + "kind": "ID", + "name": "_key", + "internal": true, + }, + map[string]any{ + "id": "1", + "kind": "[user]", + "name": "members", + "internal": false, + }, + map[string]any{ + "id": "2", + "kind": "user", + "name": "owner", + "internal": false, + }, + map[string]any{ + "id": "3", + "kind": "ID", + "name": "owner_id", + "internal": true, + }, + }, + }, + map[string]any{ + "name": "user", + "id": "bafkreigrucdl7x3lsa4xwgz2bn7lbqmiwkifnspgx7hlkpaal3o55325bq", + "version_id": "bafkreigrucdl7x3lsa4xwgz2bn7lbqmiwkifnspgx7hlkpaal3o55325bq", + "fields": []any{ + map[string]any{ + "id": "0", + "kind": "ID", + "name": "_key", + "internal": true, + }, + map[string]any{ + "id": "1", + "kind": "Int", + "name": "age", + "internal": false, + }, + map[string]any{ + "id": "2", + "kind": "String", + "name": "name", + "internal": false, + }, + map[string]any{ + "id": "3", + "kind": "Float", + "name": "points", + "internal": false, + }, + map[string]any{ + "id": "4", + "kind": "Boolean", + "name": "verified", + "internal": false, + }, + }, + }, + }, + }, v) + + default: + t.Fatalf("data should be of type map[string]any but got %T\n%v", resp.Data, v) + } +} + func TestLoadSchemaHandlerWithReadBodyError(t *testing.T) { t.Cleanup(CleanupEnv) env = "dev" @@ -604,23 +738,23 @@ func TestLoadSchemaHandlerWithReadBodyError(t *testing.T) { Testing: t, DB: nil, Method: "POST", - Path: SchemaLoadPath, + Path: SchemaPath, Body: &mockReadCloser, ExpectedStatus: 500, ResponseData: &errResponse, }) - assert.Contains(t, errResponse.Errors[0].Extensions.Stack, "error reading") - assert.Equal(t, http.StatusInternalServerError, errResponse.Errors[0].Extensions.Status) - assert.Equal(t, "Internal Server Error", errResponse.Errors[0].Extensions.HTTPError) - assert.Equal(t, "error reading", errResponse.Errors[0].Message) + require.Contains(t, errResponse.Errors[0].Extensions.Stack, "error reading") + require.Equal(t, http.StatusInternalServerError, errResponse.Errors[0].Extensions.Status) + require.Equal(t, "Internal Server Error", errResponse.Errors[0].Extensions.HTTPError) + require.Equal(t, "error reading", errResponse.Errors[0].Message) } func TestLoadSchemaHandlerWithoutDB(t *testing.T) { t.Cleanup(CleanupEnv) env = "dev" stmt := ` -type user { +type User { name: String age: Int verified: Boolean @@ -634,16 +768,16 @@ type user { Testing: t, DB: nil, Method: "POST", - Path: SchemaLoadPath, + Path: SchemaPath, Body: buf, ExpectedStatus: 500, ResponseData: &errResponse, }) - assert.Contains(t, errResponse.Errors[0].Extensions.Stack, "no database available") - assert.Equal(t, http.StatusInternalServerError, errResponse.Errors[0].Extensions.Status) - assert.Equal(t, "Internal Server Error", errResponse.Errors[0].Extensions.HTTPError) - assert.Equal(t, "no database available", errResponse.Errors[0].Message) + require.Contains(t, errResponse.Errors[0].Extensions.Stack, "no database available") + require.Equal(t, http.StatusInternalServerError, errResponse.Errors[0].Extensions.Status) + require.Equal(t, "Internal Server Error", errResponse.Errors[0].Extensions.HTTPError) + require.Equal(t, "no database available", errResponse.Errors[0].Message) } func TestLoadSchemaHandlerWithAddSchemaError(t *testing.T) { @@ -655,7 +789,7 @@ func TestLoadSchemaHandlerWithAddSchemaError(t *testing.T) { // statement with types instead of type stmt := ` -types user { +types User { name: String age: Int verified: Boolean @@ -669,18 +803,18 @@ types user { Testing: t, DB: defra, Method: "POST", - Path: SchemaLoadPath, + Path: SchemaPath, Body: buf, ExpectedStatus: 500, ResponseData: &errResponse, }) - assert.Contains(t, errResponse.Errors[0].Extensions.Stack, "Syntax Error GraphQL (2:1) Unexpected Name") - assert.Equal(t, http.StatusInternalServerError, errResponse.Errors[0].Extensions.Status) - assert.Equal(t, "Internal Server Error", errResponse.Errors[0].Extensions.HTTPError) - assert.Equal( + require.Contains(t, errResponse.Errors[0].Extensions.Stack, "Syntax Error GraphQL (2:1) Unexpected Name") + require.Equal(t, http.StatusInternalServerError, errResponse.Errors[0].Extensions.Status) + require.Equal(t, "Internal Server Error", errResponse.Errors[0].Extensions.HTTPError) + require.Equal( t, - "Syntax Error GraphQL (2:1) Unexpected Name \"types\"\n\n1: \n2: types user {\n ^\n3: \\u0009name: String\n", + "Syntax Error GraphQL (2:1) Unexpected Name \"types\"\n\n1: \n2: types User {\n ^\n3: \\u0009name: String\n", errResponse.Errors[0].Message, ) } @@ -691,7 +825,7 @@ func TestLoadSchemaHandlerWitNoError(t *testing.T) { defer defra.Close(ctx) stmt := ` -type user { +type User { name: String age: Int verified: Boolean @@ -705,7 +839,7 @@ type user { Testing: t, DB: defra, Method: "POST", - Path: SchemaLoadPath, + Path: SchemaPath, Body: buf, ExpectedStatus: 200, ResponseData: &resp, @@ -713,12 +847,13 @@ type user { switch v := resp.Data.(type) { case map[string]any: - assert.Equal(t, map[string]any{ + require.Equal(t, map[string]any{ "result": "success", "collections": []any{ map[string]any{ - "name": "user", - "id": "bafkreigrucdl7x3lsa4xwgz2bn7lbqmiwkifnspgx7hlkpaal3o55325bq", + "name": "User", + "id": "bafkreibpnvkvjqvg4skzlijka5xe63zeu74ivcjwd76q7yi65jdhwqhske", + "version_id": "bafkreibpnvkvjqvg4skzlijka5xe63zeu74ivcjwd76q7yi65jdhwqhske", }, }, }, v) @@ -742,10 +877,10 @@ func TestGetBlockHandlerWithMultihashError(t *testing.T) { ResponseData: &errResponse, }) - assert.Contains(t, errResponse.Errors[0].Extensions.Stack, "illegal base32 data at input byte 0") - assert.Equal(t, http.StatusBadRequest, errResponse.Errors[0].Extensions.Status) - assert.Equal(t, "Bad Request", errResponse.Errors[0].Extensions.HTTPError) - assert.Equal(t, "illegal base32 data at input byte 0", errResponse.Errors[0].Message) + require.Contains(t, errResponse.Errors[0].Extensions.Stack, "illegal base32 data at input byte 0") + require.Equal(t, http.StatusBadRequest, errResponse.Errors[0].Extensions.Status) + require.Equal(t, "Bad Request", errResponse.Errors[0].Extensions.HTTPError) + require.Equal(t, "illegal base32 data at input byte 0", errResponse.Errors[0].Message) } func TestGetBlockHandlerWithDSKeyWithNoDB(t *testing.T) { @@ -768,10 +903,10 @@ func TestGetBlockHandlerWithDSKeyWithNoDB(t *testing.T) { ResponseData: &errResponse, }) - assert.Contains(t, errResponse.Errors[0].Extensions.Stack, "no database available") - assert.Equal(t, http.StatusInternalServerError, errResponse.Errors[0].Extensions.Status) - assert.Equal(t, "Internal Server Error", errResponse.Errors[0].Extensions.HTTPError) - assert.Equal(t, "no database available", errResponse.Errors[0].Message) + require.Contains(t, errResponse.Errors[0].Extensions.Stack, "no database available") + require.Equal(t, http.StatusInternalServerError, errResponse.Errors[0].Extensions.Status) + require.Equal(t, "Internal Server Error", errResponse.Errors[0].Extensions.HTTPError) + require.Equal(t, "no database available", errResponse.Errors[0].Message) } func TestGetBlockHandlerWithNoDB(t *testing.T) { @@ -788,10 +923,10 @@ func TestGetBlockHandlerWithNoDB(t *testing.T) { ResponseData: &errResponse, }) - assert.Contains(t, errResponse.Errors[0].Extensions.Stack, "no database available") - assert.Equal(t, http.StatusInternalServerError, errResponse.Errors[0].Extensions.Status) - assert.Equal(t, "Internal Server Error", errResponse.Errors[0].Extensions.HTTPError) - assert.Equal(t, "no database available", errResponse.Errors[0].Message) + require.Contains(t, errResponse.Errors[0].Extensions.Stack, "no database available") + require.Equal(t, http.StatusInternalServerError, errResponse.Errors[0].Extensions.Status) + require.Equal(t, "Internal Server Error", errResponse.Errors[0].Extensions.HTTPError) + require.Equal(t, "no database available", errResponse.Errors[0].Message) } func TestGetBlockHandlerWithGetBlockstoreError(t *testing.T) { @@ -812,10 +947,10 @@ func TestGetBlockHandlerWithGetBlockstoreError(t *testing.T) { ResponseData: &errResponse, }) - assert.Contains(t, errResponse.Errors[0].Extensions.Stack, "ipld: could not find bafybeidembipteezluioakc2zyke4h5fnj4rr3uaougfyxd35u3qzefzhm") - assert.Equal(t, http.StatusInternalServerError, errResponse.Errors[0].Extensions.Status) - assert.Equal(t, "Internal Server Error", errResponse.Errors[0].Extensions.HTTPError) - assert.Equal(t, "ipld: could not find bafybeidembipteezluioakc2zyke4h5fnj4rr3uaougfyxd35u3qzefzhm", errResponse.Errors[0].Message) + require.Contains(t, errResponse.Errors[0].Extensions.Stack, "ipld: could not find bafybeidembipteezluioakc2zyke4h5fnj4rr3uaougfyxd35u3qzefzhm") + require.Equal(t, http.StatusInternalServerError, errResponse.Errors[0].Extensions.Status) + require.Equal(t, "Internal Server Error", errResponse.Errors[0].Extensions.HTTPError) + require.Equal(t, "ipld: could not find bafybeidembipteezluioakc2zyke4h5fnj4rr3uaougfyxd35u3qzefzhm", errResponse.Errors[0].Message) } func TestGetBlockHandlerWithValidBlockstore(t *testing.T) { @@ -828,7 +963,7 @@ func TestGetBlockHandlerWithValidBlockstore(t *testing.T) { // add document stmt := ` mutation { - create_user(data: "{\"age\": 31, \"verified\": true, \"points\": 90, \"name\": \"Bob\"}") { + create_User(data: "{\"age\": 31, \"verified\": true, \"points\": 90, \"name\": \"Bob\"}") { _key } }` @@ -856,7 +991,7 @@ mutation { // get document cid stmt2 := ` query { - user (dockey: "%s") { + User (dockey: "%s") { _version { cid } @@ -898,7 +1033,7 @@ query { case map[string]any: switch val := d["val"].(type) { case string: - assert.Equal(t, "pGNhZ2UYH2RuYW1lY0JvYmZwb2ludHMYWmh2ZXJpZmllZPU=", val) + require.Equal(t, "pGNhZ2UYH2RuYW1lY0JvYmZwb2ludHMYWmh2ZXJpZmllZPU=", val) default: t.Fatalf("expecting string but got %T", val) } @@ -924,7 +1059,7 @@ func TestPeerIDHandler(t *testing.T) { switch v := resp.Data.(type) { case map[string]any: - assert.Equal(t, "12D3KooWFpi6VTYKLtxUftJKEyfX8jDfKi8n15eaygH8ggfYFZbR", v["peerID"]) + require.Equal(t, "12D3KooWFpi6VTYKLtxUftJKEyfX8jDfKi8n15eaygH8ggfYFZbR", v["peerID"]) default: t.Fatalf("data should be of type map[string]any but got %T", resp.Data) } @@ -945,13 +1080,13 @@ func TestPeerIDHandlerWithNoPeerIDInContext(t *testing.T) { ResponseData: &errResponse, }) - assert.Contains(t, errResponse.Errors[0].Extensions.Stack, "no PeerID available. P2P might be disabled") - assert.Equal(t, http.StatusNotFound, errResponse.Errors[0].Extensions.Status) - assert.Equal(t, "Not Found", errResponse.Errors[0].Extensions.HTTPError) - assert.Equal(t, "no PeerID available. P2P might be disabled", errResponse.Errors[0].Message) + require.Contains(t, errResponse.Errors[0].Extensions.Stack, "no PeerID available. P2P might be disabled") + require.Equal(t, http.StatusNotFound, errResponse.Errors[0].Extensions.Status) + require.Equal(t, "Not Found", errResponse.Errors[0].Extensions.HTTPError) + require.Equal(t, "no PeerID available. P2P might be disabled", errResponse.Errors[0].Message) } -func testRequest(opt testOptions) { +func testRequest(opt testOptions) []byte { req, err := http.NewRequest(opt.Method, opt.Path, opt.Body) if err != nil { opt.Testing.Fatal(err) @@ -961,20 +1096,30 @@ func testRequest(opt testOptions) { req.Header.Set(k, v) } + q := req.URL.Query() + for k, v := range opt.QueryParams { + q.Add(k, v) + } + req.URL.RawQuery = q.Encode() + h := newHandler(opt.DB, opt.ServerOptions) rec := httptest.NewRecorder() h.ServeHTTP(rec, req) assert.Equal(opt.Testing, opt.ExpectedStatus, rec.Result().StatusCode) - respBody, err := io.ReadAll(rec.Result().Body) + resBody, err := io.ReadAll(rec.Result().Body) if err != nil { opt.Testing.Fatal(err) } - err = json.Unmarshal(respBody, &opt.ResponseData) - if err != nil { - opt.Testing.Fatal(err) + if opt.ResponseData != nil { + err = json.Unmarshal(resBody, &opt.ResponseData) + if err != nil { + opt.Testing.Fatal(err) + } } + + return resBody } func testSubscriptionRequest(ctx context.Context, opt testOptions, ch chan []byte, errCh chan error) { @@ -993,7 +1138,7 @@ func testSubscriptionRequest(ctx context.Context, opt testOptions, ch chan []byt h := newHandler(opt.DB, opt.ServerOptions) rec := httptest.NewRecorder() h.ServeHTTP(rec, req) - assert.Equal(opt.Testing, opt.ExpectedStatus, rec.Result().StatusCode) + require.Equal(opt.Testing, opt.ExpectedStatus, rec.Result().StatusCode) respBody, err := io.ReadAll(rec.Result().Body) if err != nil { @@ -1026,7 +1171,7 @@ func testNewInMemoryDB(t *testing.T, ctx context.Context) client.DB { func testLoadSchema(t *testing.T, ctx context.Context, db client.DB) { stmt := ` -type user { +type User { name: String age: Int verified: Boolean diff --git a/api/http/logger_test.go b/api/http/logger_test.go index 75367eba29..9c2791d9df 100644 --- a/api/http/logger_test.go +++ b/api/http/logger_test.go @@ -79,12 +79,11 @@ func TestLoggerKeyValueOutput(t *testing.T) { rec2 := httptest.NewRecorder() - h := newHandler(nil, serverOptions{}) log.ApplyConfig(logging.Config{ EncoderFormat: logging.NewEncoderFormatOption(logging.JSON), OutputPaths: []string{logFile}, }) - loggerMiddleware(h.handle(pingHandler)).ServeHTTP(rec2, req) + loggerMiddleware(http.HandlerFunc(pingHandler)).ServeHTTP(rec2, req) assert.Equal(t, 200, rec2.Result().StatusCode) // inspect the log file diff --git a/api/http/playground.go b/api/http/playground.go new file mode 100644 index 0000000000..0a69e312b2 --- /dev/null +++ b/api/http/playground.go @@ -0,0 +1,28 @@ +// Copyright 2023 Democratized Data Foundation +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +//go:build playground + +package http + +import ( + "io/fs" + "net/http" + + "github.com/sourcenetwork/defradb/playground" +) + +func init() { + sub, err := fs.Sub(playground.Dist, "dist") + if err != nil { + panic(err) + } + playgroundHandler = http.FileServer(http.FS(sub)) +} diff --git a/api/http/router.go b/api/http/router.go index ad1111ad71..2d54a16560 100644 --- a/api/http/router.go +++ b/api/http/router.go @@ -11,12 +11,11 @@ package http import ( + "net/http" "net/url" "path" "strings" - "github.com/go-chi/chi/v5" - "github.com/go-chi/cors" "github.com/pkg/errors" ) @@ -25,42 +24,42 @@ const ( Version string = "v0" versionedAPIPath string = "/api/" + Version - RootPath string = versionedAPIPath + "" - PingPath string = versionedAPIPath + "/ping" - DumpPath string = versionedAPIPath + "/debug/dump" - BlocksPath string = versionedAPIPath + "/blocks" - GraphQLPath string = versionedAPIPath + "/graphql" - SchemaLoadPath string = versionedAPIPath + "/schema/load" - SchemaPatchPath string = versionedAPIPath + "/schema/patch" - PeerIDPath string = versionedAPIPath + "/peerid" + RootPath string = versionedAPIPath + "" + PingPath string = versionedAPIPath + "/ping" + DumpPath string = versionedAPIPath + "/debug/dump" + BlocksPath string = versionedAPIPath + "/blocks" + GraphQLPath string = versionedAPIPath + "/graphql" + SchemaPath string = versionedAPIPath + "/schema" + SchemaMigrationPath string = SchemaPath + "/migration" + IndexPath string = versionedAPIPath + "/index" + PeerIDPath string = versionedAPIPath + "/peerid" + BackupPath string = versionedAPIPath + "/backup" + ExportPath string = BackupPath + "/export" + ImportPath string = BackupPath + "/import" ) -func setRoutes(h *handler) *handler { - h.Mux = chi.NewRouter() - - // setup CORS - if len(h.options.allowedOrigins) != 0 { - h.Use(cors.Handler(cors.Options{ - AllowedOrigins: h.options.allowedOrigins, - AllowedMethods: []string{"GET", "POST", "OPTIONS"}, - AllowedHeaders: []string{"Content-Type"}, - MaxAge: 300, - })) - } +// playgroundHandler is set when building with the playground build tag +var playgroundHandler http.Handler - // setup logger middleware - h.Use(loggerMiddleware) - - // define routes - h.Get(RootPath, h.handle(rootHandler)) - h.Get(PingPath, h.handle(pingHandler)) - h.Get(DumpPath, h.handle(dumpHandler)) - h.Get(BlocksPath+"/{cid}", h.handle(getBlockHandler)) - h.Get(GraphQLPath, h.handle(execGQLHandler)) - h.Post(GraphQLPath, h.handle(execGQLHandler)) - h.Post(SchemaLoadPath, h.handle(loadSchemaHandler)) - h.Post(SchemaPatchPath, h.handle(patchSchemaHandler)) - h.Get(PeerIDPath, h.handle(peerIDHandler)) +func setRoutes(h *handler) *handler { + h.Get(RootPath, rootHandler) + h.Get(PingPath, pingHandler) + h.Get(DumpPath, dumpHandler) + h.Get(BlocksPath+"/{cid}", getBlockHandler) + h.Get(GraphQLPath, execGQLHandler) + h.Post(GraphQLPath, execGQLHandler) + h.Get(SchemaPath, listSchemaHandler) + h.Post(SchemaPath, loadSchemaHandler) + h.Patch(SchemaPath, patchSchemaHandler) + h.Post(SchemaMigrationPath, setMigrationHandler) + h.Get(SchemaMigrationPath, getMigrationHandler) + h.Post(IndexPath, createIndexHandler) + h.Delete(IndexPath, dropIndexHandler) + h.Get(IndexPath, listIndexHandler) + h.Get(PeerIDPath, peerIDHandler) + h.Post(ExportPath, exportHandler) + h.Post(ImportPath, importHandler) + h.Handle("/*", playgroundHandler) return h } diff --git a/cli/backup.go b/cli/backup.go new file mode 100644 index 0000000000..15877fa7fb --- /dev/null +++ b/cli/backup.go @@ -0,0 +1,25 @@ +// Copyright 2022 Democratized Data Foundation +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package cli + +import ( + "github.com/spf13/cobra" +) + +func MakeBackupCommand() *cobra.Command { + var cmd = &cobra.Command{ + Use: "backup", + Short: "Interact with the backup utility", + Long: `Export to or Import from a backup file. +Currently only supports JSON format.`, + } + return cmd +} diff --git a/cli/backup_export.go b/cli/backup_export.go new file mode 100644 index 0000000000..32184bfe35 --- /dev/null +++ b/cli/backup_export.go @@ -0,0 +1,142 @@ +// Copyright 2023 Democratized Data Foundation +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package cli + +import ( + "bytes" + "encoding/json" + "io" + "net/http" + "os" + "strings" + + "github.com/spf13/cobra" + + httpapi "github.com/sourcenetwork/defradb/api/http" + "github.com/sourcenetwork/defradb/client" + "github.com/sourcenetwork/defradb/config" + "github.com/sourcenetwork/defradb/logging" +) + +const jsonFileType = "json" + +func MakeBackupExportCommand(cfg *config.Config) *cobra.Command { + var collections []string + var pretty bool + var format string + var cmd = &cobra.Command{ + Use: "export [-c --collections | -p --pretty | -f --format] ", + Short: "Export the database to a file", + Long: `Export the database to a file. If a file exists at the location, it will be overwritten. + +If the --collection flag is provided, only the data for that collection will be exported. +Otherwise, all collections in the database will be exported. + +If the --pretty flag is provided, the JSON will be pretty printed. + +Example: export data for the 'Users' collection: + defradb client export --collection Users user_data.json`, + Args: func(cmd *cobra.Command, args []string) error { + if err := cobra.ExactArgs(1)(cmd, args); err != nil { + return NewErrInvalidArgumentLength(err, 1) + } + return nil + }, + RunE: func(cmd *cobra.Command, args []string) (err error) { + if !isValidExportFormat(format) { + return ErrInvalidExportFormat + } + outputPath := args[0] + endpoint, err := httpapi.JoinPaths(cfg.API.AddressToURL(), httpapi.ExportPath) + if err != nil { + return NewErrFailedToJoinEndpoint(err) + } + + for i := range collections { + collections[i] = strings.Trim(collections[i], " ") + } + + data := client.BackupConfig{ + Filepath: outputPath, + Format: format, + Pretty: pretty, + Collections: collections, + } + + b, err := json.Marshal(data) + if err != nil { + return err + } + + res, err := http.Post(endpoint.String(), "application/json", bytes.NewBuffer(b)) + if err != nil { + return NewErrFailedToSendRequest(err) + } + + defer func() { + if e := res.Body.Close(); e != nil { + err = NewErrFailedToCloseResponseBody(e, err) + } + }() + + response, err := io.ReadAll(res.Body) + if err != nil { + return NewErrFailedToReadResponseBody(err) + } + + stdout, err := os.Stdout.Stat() + if err != nil { + return err + } + + if isFileInfoPipe(stdout) { + cmd.Println(string(response)) + } else { + type exportResponse struct { + Errors []struct { + Message string `json:"message"` + } `json:"errors"` + } + r := exportResponse{} + err = json.Unmarshal(response, &r) + if err != nil { + return NewErrFailedToUnmarshalResponse(err) + } + if len(r.Errors) > 0 { + log.FeedbackError(cmd.Context(), "Failed to export data", + logging.NewKV("Errors", r.Errors)) + } else if len(collections) == 1 { + log.FeedbackInfo(cmd.Context(), "Data exported for collection "+collections[0]) + } else if len(collections) > 1 { + log.FeedbackInfo(cmd.Context(), "Data exported for collections "+strings.Join(collections, ", ")) + } else { + log.FeedbackInfo(cmd.Context(), "Data exported for all collections") + } + } + return nil + }, + } + cmd.Flags().BoolVarP(&pretty, "pretty", "p", false, "Set the output JSON to be pretty printed") + cmd.Flags().StringVarP(&format, "format", "f", jsonFileType, + "Define the output format. Supported formats: [json]") + cmd.Flags().StringSliceVarP(&collections, "collections", "c", []string{}, "List of collections") + + return cmd +} + +func isValidExportFormat(format string) bool { + switch strings.ToLower(format) { + case jsonFileType: + return true + default: + return false + } +} diff --git a/cli/backup_export_test.go b/cli/backup_export_test.go new file mode 100644 index 0000000000..9539a1cdb1 --- /dev/null +++ b/cli/backup_export_test.go @@ -0,0 +1,300 @@ +// Copyright 2023 Democratized Data Foundation +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package cli + +import ( + "context" + "encoding/json" + "os" + "testing" + + "github.com/stretchr/testify/require" + + "github.com/sourcenetwork/defradb/client" +) + +func TestBackupExportCmd_WithNoArgument_ReturnError(t *testing.T) { + cfg := getTestConfig(t) + + dbExportCmd := MakeBackupExportCommand(cfg) + err := dbExportCmd.ValidateArgs([]string{}) + require.ErrorIs(t, err, ErrInvalidArgumentLength) +} + +func TestBackupExportCmd_WithInvalidExportFormat_ReturnError(t *testing.T) { + cfg := getTestConfig(t) + dbExportCmd := MakeBackupExportCommand(cfg) + + filepath := t.TempDir() + "/test.json" + + dbExportCmd.Flags().Set("format", "invalid") + err := dbExportCmd.RunE(dbExportCmd, []string{filepath}) + require.ErrorIs(t, err, ErrInvalidExportFormat) +} + +func TestBackupExportCmd_IfInvalidAddress_ReturnError(t *testing.T) { + cfg := getTestConfig(t) + cfg.API.Address = "invalid address" + + filepath := t.TempDir() + "/test.json" + + dbExportCmd := MakeBackupExportCommand(cfg) + err := dbExportCmd.RunE(dbExportCmd, []string{filepath}) + require.ErrorIs(t, err, NewErrFailedToJoinEndpoint(err)) +} + +func TestBackupExportCmd_WithEmptyDatastore_NoError(t *testing.T) { + cfg, _, close := startTestNode(t) + defer close() + + filepath := t.TempDir() + "/test.json" + + outputBuf, revertOutput := simulateConsoleOutput(t) + defer revertOutput() + + dbExportCmd := MakeBackupExportCommand(cfg) + err := dbExportCmd.RunE(dbExportCmd, []string{filepath}) + require.NoError(t, err) + + logLines, err := parseLines(outputBuf) + require.NoError(t, err) + require.True(t, lineHas(logLines, "msg", "Data exported for all collections")) + + b, err := os.ReadFile(filepath) + require.NoError(t, err) + + require.Len(t, b, 2) // file should be an empty json object +} + +func TestBackupExportCmd_WithInvalidCollection_ReturnError(t *testing.T) { + cfg, _, close := startTestNode(t) + defer close() + + filepath := t.TempDir() + "/test.json" + + outputBuf, revertOutput := simulateConsoleOutput(t) + defer revertOutput() + + dbExportCmd := MakeBackupExportCommand(cfg) + dbExportCmd.Flags().Set("collections", "User") + err := dbExportCmd.RunE(dbExportCmd, []string{filepath}) + require.NoError(t, err) + + logLines, err := parseLines(outputBuf) + require.NoError(t, err) + require.True(t, lineHas(logLines, "msg", "Failed to export data")) +} + +func TestBackupExportCmd_WithAllCollection_NoError(t *testing.T) { + ctx := context.Background() + + cfg, di, close := startTestNode(t) + defer close() + + _, err := di.db.AddSchema(ctx, `type User { + name: String + age: Int + }`) + require.NoError(t, err) + + doc, err := client.NewDocFromJSON([]byte(`{"name": "John", "age": 30}`)) + require.NoError(t, err) + + col, err := di.db.GetCollectionByName(ctx, "User") + require.NoError(t, err) + + err = col.Create(ctx, doc) + require.NoError(t, err) + + filepath := t.TempDir() + "/test.json" + + outputBuf, revertOutput := simulateConsoleOutput(t) + defer revertOutput() + + dbExportCmd := MakeBackupExportCommand(cfg) + err = dbExportCmd.RunE(dbExportCmd, []string{filepath}) + require.NoError(t, err) + + logLines, err := parseLines(outputBuf) + require.NoError(t, err) + require.True(t, lineHas(logLines, "msg", "Data exported for all collections")) + + b, err := os.ReadFile(filepath) + require.NoError(t, err) + + require.Equal( + t, + `{"User":[{"_key":"bae-e933420a-988a-56f8-8952-6c245aebd519","_newKey":"bae-e933420a-988a-56f8-8952-6c245aebd519","age":30,"name":"John"}]}`, + string(b), + ) +} + +func TestBackupExportCmd_WithAllCollectionAndPrettyFormating_NoError(t *testing.T) { + ctx := context.Background() + + cfg, di, close := startTestNode(t) + defer close() + + _, err := di.db.AddSchema(ctx, `type User { + name: String + age: Int + }`) + require.NoError(t, err) + + doc, err := client.NewDocFromJSON([]byte(`{"name": "John", "age": 30}`)) + require.NoError(t, err) + + col, err := di.db.GetCollectionByName(ctx, "User") + require.NoError(t, err) + + err = col.Create(ctx, doc) + require.NoError(t, err) + + filepath := t.TempDir() + "/test.json" + + outputBuf, revertOutput := simulateConsoleOutput(t) + defer revertOutput() + + dbExportCmd := MakeBackupExportCommand(cfg) + dbExportCmd.Flags().Set("pretty", "true") + err = dbExportCmd.RunE(dbExportCmd, []string{filepath}) + require.NoError(t, err) + + logLines, err := parseLines(outputBuf) + require.NoError(t, err) + require.True(t, lineHas(logLines, "msg", "Data exported for all collections")) + + b, err := os.ReadFile(filepath) + require.NoError(t, err) + + require.Equal( + t, + `{ + "User": [ + { + "_key": "bae-e933420a-988a-56f8-8952-6c245aebd519", + "_newKey": "bae-e933420a-988a-56f8-8952-6c245aebd519", + "age": 30, + "name": "John" + } + ] +}`, + string(b), + ) +} + +func TestBackupExportCmd_WithSingleCollection_NoError(t *testing.T) { + ctx := context.Background() + + cfg, di, close := startTestNode(t) + defer close() + + _, err := di.db.AddSchema(ctx, `type User { + name: String + age: Int + }`) + require.NoError(t, err) + + doc, err := client.NewDocFromJSON([]byte(`{"name": "John", "age": 30}`)) + require.NoError(t, err) + + col, err := di.db.GetCollectionByName(ctx, "User") + require.NoError(t, err) + + err = col.Create(ctx, doc) + require.NoError(t, err) + + filepath := t.TempDir() + "/test.json" + + outputBuf, revertOutput := simulateConsoleOutput(t) + defer revertOutput() + + dbExportCmd := MakeBackupExportCommand(cfg) + dbExportCmd.Flags().Set("collections", "User") + err = dbExportCmd.RunE(dbExportCmd, []string{filepath}) + require.NoError(t, err) + + logLines, err := parseLines(outputBuf) + require.NoError(t, err) + require.True(t, lineHas(logLines, "msg", "Data exported for collection User")) + + b, err := os.ReadFile(filepath) + require.NoError(t, err) + + require.Equal( + t, + `{"User":[{"_key":"bae-e933420a-988a-56f8-8952-6c245aebd519","_newKey":"bae-e933420a-988a-56f8-8952-6c245aebd519","age":30,"name":"John"}]}`, + string(b), + ) +} + +func TestBackupExportCmd_WithMultipleCollections_NoError(t *testing.T) { + ctx := context.Background() + + cfg, di, close := startTestNode(t) + defer close() + + _, err := di.db.AddSchema(ctx, `type User { + name: String + age: Int + } + + type Address { + street: String + city: String + }`) + require.NoError(t, err) + + doc1, err := client.NewDocFromJSON([]byte(`{"name": "John", "age": 30}`)) + require.NoError(t, err) + + col1, err := di.db.GetCollectionByName(ctx, "User") + require.NoError(t, err) + + err = col1.Create(ctx, doc1) + require.NoError(t, err) + + doc2, err := client.NewDocFromJSON([]byte(`{"street": "101 Maple St", "city": "Toronto"}`)) + require.NoError(t, err) + + col2, err := di.db.GetCollectionByName(ctx, "Address") + require.NoError(t, err) + + err = col2.Create(ctx, doc2) + require.NoError(t, err) + + filepath := t.TempDir() + "/test.json" + + outputBuf, revertOutput := simulateConsoleOutput(t) + defer revertOutput() + + dbExportCmd := MakeBackupExportCommand(cfg) + dbExportCmd.Flags().Set("collections", "User, Address") + err = dbExportCmd.RunE(dbExportCmd, []string{filepath}) + require.NoError(t, err) + + logLines, err := parseLines(outputBuf) + require.NoError(t, err) + require.True(t, lineHas(logLines, "msg", "Data exported for collections User, Address")) + + b, err := os.ReadFile(filepath) + require.NoError(t, err) + fileMap := map[string]any{} + err = json.Unmarshal(b, &fileMap) + require.NoError(t, err) + + expectedMap := map[string]any{} + data := []byte(`{"Address":[{"_key":"bae-8096f2c1-ea4c-5226-8ba5-17fc4b68ac1f","_newKey":"bae-8096f2c1-ea4c-5226-8ba5-17fc4b68ac1f","city":"Toronto","street":"101 Maple St"}],"User":[{"_key":"bae-e933420a-988a-56f8-8952-6c245aebd519","_newKey":"bae-e933420a-988a-56f8-8952-6c245aebd519","age":30,"name":"John"}]}`) + err = json.Unmarshal(data, &expectedMap) + require.NoError(t, err) + + require.EqualValues(t, expectedMap, fileMap) +} diff --git a/cli/backup_import.go b/cli/backup_import.go new file mode 100644 index 0000000000..6802230aa0 --- /dev/null +++ b/cli/backup_import.go @@ -0,0 +1,98 @@ +// Copyright 2023 Democratized Data Foundation +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package cli + +import ( + "bytes" + "encoding/json" + "io" + "net/http" + "os" + + "github.com/spf13/cobra" + + httpapi "github.com/sourcenetwork/defradb/api/http" + "github.com/sourcenetwork/defradb/config" + "github.com/sourcenetwork/defradb/logging" +) + +func MakeBackupImportCommand(cfg *config.Config) *cobra.Command { + var cmd = &cobra.Command{ + Use: "import ", + Short: "Import a JSON data file to the database", + Long: `Import a JSON data file to the database. + +Example: import data to the database: + defradb client import user_data.json`, + Args: func(cmd *cobra.Command, args []string) error { + if err := cobra.ExactArgs(1)(cmd, args); err != nil { + return NewErrInvalidArgumentLength(err, 1) + } + return nil + }, + RunE: func(cmd *cobra.Command, args []string) (err error) { + endpoint, err := httpapi.JoinPaths(cfg.API.AddressToURL(), httpapi.ImportPath) + if err != nil { + return NewErrFailedToJoinEndpoint(err) + } + + inputPath := args[0] + data := map[string]string{ + "filepath": inputPath, + } + + b, err := json.Marshal(data) + if err != nil { + return err + } + + res, err := http.Post(endpoint.String(), "application/json", bytes.NewBuffer(b)) + if err != nil { + return NewErrFailedToSendRequest(err) + } + + defer func() { + if e := res.Body.Close(); e != nil { + err = NewErrFailedToCloseResponseBody(e, err) + } + }() + + response, err := io.ReadAll(res.Body) + if err != nil { + return NewErrFailedToReadResponseBody(err) + } + + stdout, err := os.Stdout.Stat() + if err != nil { + return err + } + + if isFileInfoPipe(stdout) { + cmd.Println(string(response)) + } else { + r := indexCreateResponse{} + err = json.Unmarshal(response, &r) + if err != nil { + return NewErrFailedToUnmarshalResponse(err) + } + if len(r.Errors) > 0 { + log.FeedbackError(cmd.Context(), "Failed to import data", + logging.NewKV("Errors", r.Errors)) + } else { + log.FeedbackInfo(cmd.Context(), "Successfully imported data from file", + logging.NewKV("File", inputPath)) + } + } + return nil + }, + } + return cmd +} diff --git a/cli/backup_import_test.go b/cli/backup_import_test.go new file mode 100644 index 0000000000..ce84c5c2c6 --- /dev/null +++ b/cli/backup_import_test.go @@ -0,0 +1,130 @@ +// Copyright 2023 Democratized Data Foundation +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package cli + +import ( + "context" + "os" + "testing" + + "github.com/stretchr/testify/require" + + "github.com/sourcenetwork/defradb/client" +) + +func TestBackupImportCmd_WithNoArgument_ReturnError(t *testing.T) { + cfg := getTestConfig(t) + setTestingAddresses(cfg) + + dbImportCmd := MakeBackupImportCommand(cfg) + err := dbImportCmd.ValidateArgs([]string{}) + require.ErrorIs(t, err, ErrInvalidArgumentLength) +} + +func TestBackupImportCmd_IfInvalidAddress_ReturnError(t *testing.T) { + cfg := getTestConfig(t) + cfg.API.Address = "invalid address" + + filepath := t.TempDir() + "/test.json" + + dbImportCmd := MakeBackupImportCommand(cfg) + err := dbImportCmd.RunE(dbImportCmd, []string{filepath}) + require.ErrorIs(t, err, NewErrFailedToJoinEndpoint(err)) +} + +func TestBackupImportCmd_WithNonExistantFile_ReturnError(t *testing.T) { + cfg, _, close := startTestNode(t) + defer close() + + filepath := t.TempDir() + "/test.json" + + outputBuf, revertOutput := simulateConsoleOutput(t) + defer revertOutput() + + dbImportCmd := MakeBackupImportCommand(cfg) + err := dbImportCmd.RunE(dbImportCmd, []string{filepath}) + require.NoError(t, err) + + logLines, err := parseLines(outputBuf) + require.NoError(t, err) + require.True(t, lineHas(logLines, "msg", "Failed to import data")) +} + +func TestBackupImportCmd_WithEmptyDatastore_ReturnError(t *testing.T) { + cfg, _, close := startTestNode(t) + defer close() + + filepath := t.TempDir() + "/test.json" + + err := os.WriteFile( + filepath, + []byte(`{"User":[{"_key":"bae-e933420a-988a-56f8-8952-6c245aebd519","_newKey":"bae-e933420a-988a-56f8-8952-6c245aebd519","age":30,"name":"John"}]}`), + 0664, + ) + require.NoError(t, err) + + outputBuf, revertOutput := simulateConsoleOutput(t) + defer revertOutput() + + dbImportCmd := MakeBackupImportCommand(cfg) + err = dbImportCmd.RunE(dbImportCmd, []string{filepath}) + require.NoError(t, err) + + logLines, err := parseLines(outputBuf) + require.NoError(t, err) + require.True(t, lineHas(logLines, "msg", "Failed to import data")) +} + +func TestBackupImportCmd_WithExistingCollection_NoError(t *testing.T) { + ctx := context.Background() + + cfg, di, close := startTestNode(t) + defer close() + + _, err := di.db.AddSchema(ctx, `type User { + name: String + age: Int + }`) + require.NoError(t, err) + + filepath := t.TempDir() + "/test.json" + + err = os.WriteFile( + filepath, + []byte(`{"User":[{"_key":"bae-e933420a-988a-56f8-8952-6c245aebd519","_newKey":"bae-e933420a-988a-56f8-8952-6c245aebd519","age":30,"name":"John"}]}`), + 0664, + ) + require.NoError(t, err) + + outputBuf, revertOutput := simulateConsoleOutput(t) + defer revertOutput() + + dbImportCmd := MakeBackupImportCommand(cfg) + err = dbImportCmd.RunE(dbImportCmd, []string{filepath}) + require.NoError(t, err) + + logLines, err := parseLines(outputBuf) + require.NoError(t, err) + require.True(t, lineHas(logLines, "msg", "Successfully imported data from file")) + + col, err := di.db.GetCollectionByName(ctx, "User") + require.NoError(t, err) + + key, err := client.NewDocKeyFromString("bae-e933420a-988a-56f8-8952-6c245aebd519") + require.NoError(t, err) + doc, err := col.Get(ctx, key, false) + require.NoError(t, err) + + val, err := doc.Get("name") + require.NoError(t, err) + + require.Equal(t, "John", val.(string)) +} diff --git a/cli/blocks_get.go b/cli/blocks_get.go index 4223745fc4..c3519f99e7 100644 --- a/cli/blocks_get.go +++ b/cli/blocks_get.go @@ -24,7 +24,7 @@ import ( func MakeBlocksGetCommand(cfg *config.Config) *cobra.Command { var cmd = &cobra.Command{ Use: "get [CID]", - Short: "Get a block by its CID from the blockstore.", + Short: "Get a block by its CID from the blockstore", RunE: func(cmd *cobra.Command, args []string) (err error) { if len(args) != 1 { return NewErrMissingArg("CID") diff --git a/cli/cli.go b/cli/cli.go index 615ed1c208..707adbab7c 100644 --- a/cli/cli.go +++ b/cli/cli.go @@ -57,7 +57,10 @@ func NewDefraCommand(cfg *config.Config) DefraCommand { rpcCmd := MakeRPCCommand(cfg) blocksCmd := MakeBlocksCommand() schemaCmd := MakeSchemaCommand() + schemaMigrationCmd := MakeSchemaMigrationCommand() + indexCmd := MakeIndexCommand() clientCmd := MakeClientCommand() + backupCmd := MakeBackupCommand() rpcReplicatorCmd := MakeReplicatorCommand() p2pCollectionCmd := MakeP2PCollectionCommand() p2pCollectionCmd.AddCommand( @@ -77,9 +80,24 @@ func NewDefraCommand(cfg *config.Config) DefraCommand { blocksCmd.AddCommand( MakeBlocksGetCommand(cfg), ) + schemaMigrationCmd.AddCommand( + MakeSchemaMigrationSetCommand(cfg), + MakeSchemaMigrationGetCommand(cfg), + ) schemaCmd.AddCommand( MakeSchemaAddCommand(cfg), + MakeSchemaListCommand(cfg), MakeSchemaPatchCommand(cfg), + schemaMigrationCmd, + ) + indexCmd.AddCommand( + MakeIndexCreateCommand(cfg), + MakeIndexDropCommand(cfg), + MakeIndexListCommand(cfg), + ) + backupCmd.AddCommand( + MakeBackupExportCommand(cfg), + MakeBackupImportCommand(cfg), ) clientCmd.AddCommand( MakeDumpCommand(cfg), @@ -87,8 +105,10 @@ func NewDefraCommand(cfg *config.Config) DefraCommand { MakeRequestCommand(cfg), MakePeerIDCommand(cfg), schemaCmd, + indexCmd, rpcCmd, blocksCmd, + backupCmd, ) rootCmd.AddCommand( clientCmd, diff --git a/cli/client.go b/cli/client.go index 1e6ba43ae5..2456df8d43 100644 --- a/cli/client.go +++ b/cli/client.go @@ -17,9 +17,9 @@ import ( func MakeClientCommand() *cobra.Command { var cmd = &cobra.Command{ Use: "client", - Short: "Interact with a running DefraDB node as a client", - Long: `Interact with a running DefraDB node as a client. -Execute queries, add schema types, and run debug routines.`, + Short: "Interact with a DefraDB node", + Long: `Interact with a DefraDB node. +Execute queries, add schema types, obtain node info, etc.`, } return cmd diff --git a/cli/dump.go b/cli/dump.go index a23d160e7e..f35e9232b1 100644 --- a/cli/dump.go +++ b/cli/dump.go @@ -12,7 +12,6 @@ package cli import ( "encoding/json" - "fmt" "io" "net/http" "os" @@ -27,7 +26,7 @@ import ( func MakeDumpCommand(cfg *config.Config) *cobra.Command { var cmd = &cobra.Command{ Use: "dump", - Short: "Dump the contents of a database node-side", + Short: "Dump the contents of DefraDB node-side", RunE: func(cmd *cobra.Command, _ []string) (err error) { stdout, err := os.Stdout.Stat() if err != nil { @@ -49,7 +48,7 @@ func MakeDumpCommand(cfg *config.Config) *cobra.Command { defer func() { if e := res.Body.Close(); e != nil { - err = errors.Wrap(fmt.Sprintf("failed to read response body: %v", e.Error()), err) + err = NewErrFailedToCloseResponseBody(e, err) } }() diff --git a/cli/errors.go b/cli/errors.go index be0b75ff5f..17e4819a8b 100644 --- a/cli/errors.go +++ b/cli/errors.go @@ -10,7 +10,11 @@ package cli -import "github.com/sourcenetwork/defradb/errors" +import ( + "strings" + + "github.com/sourcenetwork/defradb/errors" +) const ( errMissingArg string = "missing argument" @@ -25,10 +29,14 @@ const ( errFailedToJoinEndpoint string = "failed to join endpoint" errFailedToSendRequest string = "failed to send request" errFailedToReadResponseBody string = "failed to read response body" + errFailedToCloseResponseBody string = "failed to close response body" errFailedToStatStdOut string = "failed to stat stdout" errFailedToHandleGQLErrors string = "failed to handle GraphQL errors" errFailedToPrettyPrintResponse string = "failed to pretty print response" errFailedToUnmarshalResponse string = "failed to unmarshal response" + errFailedParsePeerID string = "failed to parse PeerID" + errFailedToMarshalData string = "failed to marshal data" + errInvalidArgumentLength string = "invalid argument length" ) // Errors returnable from this package. @@ -43,7 +51,7 @@ var ( ErrEmptyStdin = errors.New(errEmptyStdin) ErrFailedToReadFile = errors.New(errFailedToReadFile) ErrFailedToReadStdin = errors.New(errFailedToReadStdin) - ErrFailToWrapRPCClient = errors.New(errFailedToCreateRPCClient) + ErrFailedToCreateRPCClient = errors.New(errFailedToCreateRPCClient) ErrFailedToAddReplicator = errors.New(errFailedToAddReplicator) ErrFailedToJoinEndpoint = errors.New(errFailedToJoinEndpoint) ErrFailedToSendRequest = errors.New(errFailedToSendRequest) @@ -52,14 +60,21 @@ var ( ErrFailedToHandleGQLErrors = errors.New(errFailedToHandleGQLErrors) ErrFailedToPrettyPrintResponse = errors.New(errFailedToPrettyPrintResponse) ErrFailedToUnmarshalResponse = errors.New(errFailedToUnmarshalResponse) + ErrFailedParsePeerID = errors.New(errFailedParsePeerID) + ErrInvalidExportFormat = errors.New("invalid export format") + ErrInvalidArgumentLength = errors.New(errInvalidArgumentLength) ) func NewErrMissingArg(name string) error { return errors.New(errMissingArg, errors.NewKV("Name", name)) } -func NewErrMissingArgs(count int, provided int) error { - return errors.New(errMissingArgs, errors.NewKV("Required", count), errors.NewKV("Provided", provided)) +func NewErrMissingArgs(names []string) error { + return errors.New(errMissingArgs, errors.NewKV("Required", strings.Join(names, ", "))) +} + +func NewErrTooManyArgs(max, actual int) error { + return errors.New(errTooManyArgs, errors.NewKV("Max", max), errors.NewKV("Actual", actual)) } func NewFailedToReadFile(inner error) error { @@ -90,6 +105,13 @@ func NewErrFailedToReadResponseBody(inner error) error { return errors.Wrap(errFailedToReadResponseBody, inner) } +func NewErrFailedToCloseResponseBody(closeErr, other error) error { + if other != nil { + return errors.Wrap(errFailedToCloseResponseBody, closeErr, errors.NewKV("Other error", other)) + } + return errors.Wrap(errFailedToCloseResponseBody, closeErr) +} + func NewErrFailedToStatStdOut(inner error) error { return errors.Wrap(errFailedToStatStdOut, inner) } @@ -105,3 +127,17 @@ func NewErrFailedToPrettyPrintResponse(inner error) error { func NewErrFailedToUnmarshalResponse(inner error) error { return errors.Wrap(errFailedToUnmarshalResponse, inner) } + +func NewErrFailedParsePeerID(inner error) error { + return errors.Wrap(errFailedParsePeerID, inner) +} + +// NewFailedToMarshalData returns an error indicating that a there was a problem with mashalling. +func NewFailedToMarshalData(inner error) error { + return errors.Wrap(errFailedToMarshalData, inner) +} + +// NewErrInvalidArgumentLength returns an error indicating an incorrect number of arguments. +func NewErrInvalidArgumentLength(inner error, expected int) error { + return errors.Wrap(errInvalidArgumentLength, inner, errors.NewKV("Expected", expected)) +} diff --git a/cli/index.go b/cli/index.go new file mode 100644 index 0000000000..a7343ddce2 --- /dev/null +++ b/cli/index.go @@ -0,0 +1,25 @@ +// Copyright 2023 Democratized Data Foundation +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package cli + +import ( + "github.com/spf13/cobra" +) + +func MakeIndexCommand() *cobra.Command { + var cmd = &cobra.Command{ + Use: "index", + Short: "Manage collections' indexes of a running DefraDB instance", + Long: `Manage (create, drop, or list) collection indexes on a DefraDB node.`, + } + + return cmd +} diff --git a/cli/index_create.go b/cli/index_create.go new file mode 100644 index 0000000000..a91a76d2d0 --- /dev/null +++ b/cli/index_create.go @@ -0,0 +1,125 @@ +// Copyright 2023 Democratized Data Foundation +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package cli + +import ( + "bytes" + "encoding/json" + "io" + "net/http" + "os" + + "github.com/spf13/cobra" + + httpapi "github.com/sourcenetwork/defradb/api/http" + "github.com/sourcenetwork/defradb/client" + "github.com/sourcenetwork/defradb/config" + "github.com/sourcenetwork/defradb/logging" +) + +type indexCreateResponse struct { + Data struct { + Index client.IndexDescription `json:"index"` + } `json:"data"` + Errors []struct { + Message string `json:"message"` + } `json:"errors"` +} + +func MakeIndexCreateCommand(cfg *config.Config) *cobra.Command { + var collectionArg string + var nameArg string + var fieldsArg string + var cmd = &cobra.Command{ + Use: "create -c --collection --fields [-n --name ]", + Short: "Creates a secondary index on a collection's field(s)", + Long: `Creates a secondary index on a collection's field(s). + +The --name flag is optional. If not provided, a name will be generated automatically. + +Example: create an index for 'Users' collection on 'name' field: + defradb client index create --collection Users --fields name + +Example: create a named index for 'Users' collection on 'name' field: + defradb client index create --collection Users --fields name --name UsersByName`, + ValidArgs: []string{"collection", "fields", "name"}, + RunE: func(cmd *cobra.Command, args []string) (err error) { + if collectionArg == "" || fieldsArg == "" { + if collectionArg == "" { + return NewErrMissingArg("collection") + } else { + return NewErrMissingArg("fields") + } + } + + endpoint, err := httpapi.JoinPaths(cfg.API.AddressToURL(), httpapi.IndexPath) + if err != nil { + return NewErrFailedToJoinEndpoint(err) + } + + data := map[string]string{ + "collection": collectionArg, + "fields": fieldsArg, + } + if nameArg != "" { + data["name"] = nameArg + } + + jsonData, err := json.Marshal(data) + if err != nil { + return err + } + + res, err := http.Post(endpoint.String(), "application/json", bytes.NewBuffer(jsonData)) + if err != nil { + return NewErrFailedToSendRequest(err) + } + defer func() { + if e := res.Body.Close(); e != nil { + err = NewErrFailedToCloseResponseBody(e, err) + } + }() + + response, err := io.ReadAll(res.Body) + if err != nil { + return NewErrFailedToReadResponseBody(err) + } + + stdout, err := os.Stdout.Stat() + if err != nil { + return err + } + + if isFileInfoPipe(stdout) { + cmd.Println(string(response)) + } else { + r := indexCreateResponse{} + err = json.Unmarshal(response, &r) + if err != nil { + return NewErrFailedToUnmarshalResponse(err) + } + if len(r.Errors) > 0 { + log.FeedbackError(cmd.Context(), "Failed to create index", + logging.NewKV("Errors", r.Errors)) + } else { + log.FeedbackInfo(cmd.Context(), "Successfully created index", + logging.NewKV("Index", r.Data.Index)) + } + } + return nil + }, + } + cmd.Flags().StringVarP(&collectionArg, "collection", "c", "", "Collection name") + cmd.Flags().StringVarP(&nameArg, "name", "n", "", "Index name") + cmd.Flags().StringVar(&fieldsArg, "fields", "", "Fields to index") + + return cmd +} diff --git a/cli/index_create_test.go b/cli/index_create_test.go new file mode 100644 index 0000000000..7032abbb2f --- /dev/null +++ b/cli/index_create_test.go @@ -0,0 +1,244 @@ +// Copyright 2023 Democratized Data Foundation +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package cli + +import ( + "bufio" + "bytes" + "context" + "encoding/json" + "io" + "os" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/sourcenetwork/defradb/config" + "github.com/sourcenetwork/defradb/logging" +) + +const randomMultiaddr = "/ip4/0.0.0.0/tcp/0" + +func getTestConfig(t *testing.T) *config.Config { + cfg := config.DefaultConfig() + dir := t.TempDir() + cfg.Datastore.Store = "memory" + cfg.Datastore.Badger.Path = dir + cfg.Net.P2PDisabled = false + cfg.Net.P2PAddress = randomMultiaddr + cfg.Net.RPCAddress = "0.0.0.0:0" + cfg.Net.TCPAddress = randomMultiaddr + return cfg +} + +func startTestNode(t *testing.T) (*config.Config, *defraInstance, func()) { + cfg := getTestConfig(t) + setTestingAddresses(cfg) + + ctx := context.Background() + di, err := start(ctx, cfg) + require.NoError(t, err) + return cfg, di, func() { di.close(ctx) } +} + +func parseLines(r io.Reader) ([]map[string]any, error) { + fileScanner := bufio.NewScanner(r) + + fileScanner.Split(bufio.ScanLines) + + logLines := []map[string]any{} + for fileScanner.Scan() { + loggedLine := make(map[string]any) + err := json.Unmarshal(fileScanner.Bytes(), &loggedLine) + if err != nil { + return nil, err + } + logLines = append(logLines, loggedLine) + } + + return logLines, nil +} + +func lineHas(lines []map[string]any, key, value string) bool { + for _, line := range lines { + if line[key] == value { + return true + } + } + return false +} + +func simulateConsoleOutput(t *testing.T) (*bytes.Buffer, func()) { + b := &bytes.Buffer{} + log.ApplyConfig(logging.Config{ + EncoderFormat: logging.NewEncoderFormatOption(logging.JSON), + Pipe: b, + }) + + f, err := os.CreateTemp(t.TempDir(), "tmpFile") + require.NoError(t, err) + originalStdout := os.Stdout + os.Stdout = f + + return b, func() { + os.Stdout = originalStdout + f.Close() + os.Remove(f.Name()) + } +} + +func execAddSchemaCmd(t *testing.T, cfg *config.Config, schema string) { + addSchemaCmd := MakeSchemaAddCommand(cfg) + err := addSchemaCmd.RunE(addSchemaCmd, []string{schema}) + require.NoError(t, err) +} + +func execCreateIndexCmd(t *testing.T, cfg *config.Config, collection, fields, name string) { + indexCreateCmd := MakeIndexCreateCommand(cfg) + indexCreateCmd.SetArgs([]string{ + "--collection", collection, + "--fields", fields, + "--name", name, + }) + err := indexCreateCmd.Execute() + require.NoError(t, err) +} + +func hasLogWithKey(logLines []map[string]any, key string) bool { + for _, logLine := range logLines { + if _, ok := logLine[key]; ok { + return true + } + } + return false +} + +func TestIndexCreateCmd_IfInvalidAddress_ReturnError(t *testing.T) { + cfg := getTestConfig(t) + cfg.API.Address = "invalid address" + indexCreateCmd := MakeIndexCreateCommand(cfg) + + indexCreateCmd.SetArgs([]string{ + "--collection", "User", + "--fields", "Name", + "--name", "users_name_index", + }) + err := indexCreateCmd.Execute() + require.ErrorIs(t, err, NewErrFailedToJoinEndpoint(err)) +} + +func TestIndexCreateCmd_IfNoCollection_ReturnError(t *testing.T) { + cfg, _, close := startTestNode(t) + defer close() + indexCreateCmd := MakeIndexCreateCommand(cfg) + + outputBuf := bytes.NewBufferString("") + indexCreateCmd.SetOut(outputBuf) + + indexCreateCmd.SetArgs([]string{ + "--collection", "User", + "--fields", "Name", + "--name", "users_name_index", + }) + err := indexCreateCmd.Execute() + require.NoError(t, err) + + out, err := io.ReadAll(outputBuf) + require.NoError(t, err) + + r := make(map[string]any) + err = json.Unmarshal(out, &r) + require.NoError(t, err) + + _, hasErrors := r["errors"] + assert.True(t, hasErrors, "command should return error") +} + +func TestIndexCreateCmd_IfNoErrors_ReturnData(t *testing.T) { + cfg, _, close := startTestNode(t) + defer close() + + execAddSchemaCmd(t, cfg, `type User { name: String }`) + + indexCreateCmd := MakeIndexCreateCommand(cfg) + outputBuf := bytes.NewBufferString("") + indexCreateCmd.SetOut(outputBuf) + + indexCreateCmd.SetArgs([]string{ + "--collection", "User", + "--fields", "name", + "--name", "users_name_index", + }) + err := indexCreateCmd.Execute() + require.NoError(t, err) + + out, err := io.ReadAll(outputBuf) + require.NoError(t, err) + + r := make(map[string]any) + err = json.Unmarshal(out, &r) + require.NoError(t, err) + + _, hasData := r["data"] + assert.True(t, hasData, "command should return data") +} + +func TestIndexCreateCmd_WithConsoleOutputIfNoCollection_ReturnError(t *testing.T) { + cfg, _, close := startTestNode(t) + defer close() + indexCreateCmd := MakeIndexCreateCommand(cfg) + indexCreateCmd.SetArgs([]string{ + "--collection", "User", + "--fields", "Name", + "--name", "users_name_index", + }) + + outputBuf, revertOutput := simulateConsoleOutput(t) + defer revertOutput() + + err := indexCreateCmd.Execute() + require.NoError(t, err) + + logLines, err := parseLines(outputBuf) + require.NoError(t, err) + assert.True(t, hasLogWithKey(logLines, "Errors")) +} + +func TestIndexCreateCmd_WithConsoleOutputIfNoErrors_ReturnData(t *testing.T) { + cfg, _, close := startTestNode(t) + defer close() + + execAddSchemaCmd(t, cfg, `type User { name: String }`) + + const indexName = "users_name_index" + indexCreateCmd := MakeIndexCreateCommand(cfg) + indexCreateCmd.SetArgs([]string{ + "--collection", "User", + "--fields", "name", + "--name", indexName, + }) + + outputBuf, revertOutput := simulateConsoleOutput(t) + defer revertOutput() + + err := indexCreateCmd.Execute() + require.NoError(t, err) + + logLines, err := parseLines(outputBuf) + require.NoError(t, err) + require.Len(t, logLines, 1) + result, ok := logLines[0]["Index"].(map[string]any) + require.True(t, ok) + assert.Equal(t, indexName, result["Name"]) + + assert.False(t, hasLogWithKey(logLines, "Errors")) +} diff --git a/cli/index_drop.go b/cli/index_drop.go new file mode 100644 index 0000000000..ef0a37db0c --- /dev/null +++ b/cli/index_drop.go @@ -0,0 +1,121 @@ +// Copyright 2023 Democratized Data Foundation +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package cli + +import ( + "bytes" + "encoding/json" + "io" + "net/http" + "os" + + "github.com/spf13/cobra" + + httpapi "github.com/sourcenetwork/defradb/api/http" + "github.com/sourcenetwork/defradb/config" + "github.com/sourcenetwork/defradb/logging" +) + +type indexDropResponse struct { + Data struct { + Result string `json:"result"` + } `json:"data"` + Errors []struct { + Message string `json:"message"` + } `json:"errors"` +} + +func MakeIndexDropCommand(cfg *config.Config) *cobra.Command { + var collectionArg string + var nameArg string + var cmd = &cobra.Command{ + Use: "drop -c --collection -n --name ", + Short: "Drop a collection's secondary index", + Long: `Drop a collection's secondary index. + +Example: drop the index 'UsersByName' for 'Users' collection: + defradb client index create --collection Users --name UsersByName`, + ValidArgs: []string{"collection", "name"}, + RunE: func(cmd *cobra.Command, args []string) (err error) { + if collectionArg == "" || nameArg == "" { + if collectionArg == "" { + return NewErrMissingArg("collection") + } else { + return NewErrMissingArg("name") + } + } + + endpoint, err := httpapi.JoinPaths(cfg.API.AddressToURL(), httpapi.IndexPath) + if err != nil { + return NewErrFailedToJoinEndpoint(err) + } + + data := map[string]string{ + "collection": collectionArg, + "name": nameArg, + } + + jsonData, err := json.Marshal(data) + if err != nil { + return err + } + + req, err := http.NewRequest("DELETE", endpoint.String(), bytes.NewBuffer(jsonData)) + if err != nil { + return NewErrFailedToSendRequest(err) + } + req.Header.Add("Content-Type", "application/json") + client := &http.Client{} + res, err := client.Do(req) + if err != nil { + return NewErrFailedToSendRequest(err) + } + + defer func() { + if e := res.Body.Close(); e != nil { + err = NewErrFailedToCloseResponseBody(e, err) + } + }() + + response, err := io.ReadAll(res.Body) + if err != nil { + return NewErrFailedToReadResponseBody(err) + } + + stdout, err := os.Stdout.Stat() + if err != nil { + return err + } + + if isFileInfoPipe(stdout) { + cmd.Println(string(response)) + } else { + r := indexDropResponse{} + err = json.Unmarshal(response, &r) + if err != nil { + return NewErrFailedToUnmarshalResponse(err) + } + if len(r.Errors) > 0 { + log.FeedbackError(cmd.Context(), "Failed to drop index", + logging.NewKV("Errors", r.Errors)) + } else { + log.FeedbackInfo(cmd.Context(), "Successfully dropped index", + logging.NewKV("Result", r.Data.Result)) + } + } + return nil + }, + } + cmd.Flags().StringVarP(&collectionArg, "collection", "c", "", "Collection name") + cmd.Flags().StringVarP(&nameArg, "name", "n", "", "Index name") + + return cmd +} diff --git a/cli/index_drop_test.go b/cli/index_drop_test.go new file mode 100644 index 0000000000..7fa368a458 --- /dev/null +++ b/cli/index_drop_test.go @@ -0,0 +1,121 @@ +// Copyright 2023 Democratized Data Foundation +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package cli + +import ( + "bytes" + "encoding/json" + "io" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestIndexDropCmd_IfInvalidAddress_ReturnError(t *testing.T) { + cfg := getTestConfig(t) + cfg.API.Address = "invalid address" + indexDropCmd := MakeIndexDropCommand(cfg) + + indexDropCmd.SetArgs([]string{"--collection", "User", "--name", "users_name_index"}) + err := indexDropCmd.Execute() + require.ErrorIs(t, err, NewErrFailedToJoinEndpoint(err)) +} + +func TestIndexDropCmd_IfNoCollection_ReturnError(t *testing.T) { + cfg, _, close := startTestNode(t) + defer close() + indexDropCmd := MakeIndexDropCommand(cfg) + + outputBuf := bytes.NewBufferString("") + indexDropCmd.SetOut(outputBuf) + + indexDropCmd.SetArgs([]string{"--collection", "User", "--name", "users_name_index"}) + err := indexDropCmd.Execute() + require.NoError(t, err) + + out, err := io.ReadAll(outputBuf) + require.NoError(t, err) + + r := make(map[string]any) + err = json.Unmarshal(out, &r) + require.NoError(t, err) + + _, hasErrors := r["errors"] + assert.True(t, hasErrors, "command should return error") +} + +func TestIndexDropCmd_IfNoErrors_ShouldReturnData(t *testing.T) { + cfg, _, close := startTestNode(t) + defer close() + + execAddSchemaCmd(t, cfg, `type User { name: String }`) + execCreateIndexCmd(t, cfg, "User", "name", "users_name_index") + + indexDropCmd := MakeIndexDropCommand(cfg) + outputBuf := bytes.NewBufferString("") + indexDropCmd.SetOut(outputBuf) + + indexDropCmd.SetArgs([]string{"--collection", "User", "--name", "users_name_index"}) + err := indexDropCmd.Execute() + require.NoError(t, err) + + out, err := io.ReadAll(outputBuf) + require.NoError(t, err) + + r := make(map[string]any) + err = json.Unmarshal(out, &r) + require.NoError(t, err) + + _, hasData := r["data"] + assert.True(t, hasData, "command should return data") +} + +func TestIndexDropCmd_WithConsoleOutputIfNoCollection_ReturnError(t *testing.T) { + cfg, _, close := startTestNode(t) + defer close() + indexDropCmd := MakeIndexDropCommand(cfg) + + outputBuf, revertOutput := simulateConsoleOutput(t) + defer revertOutput() + + indexDropCmd.SetArgs([]string{"--collection", "User", "--name", "users_name_index"}) + err := indexDropCmd.Execute() + require.NoError(t, err) + + logLines, err := parseLines(outputBuf) + require.NoError(t, err) + assert.True(t, hasLogWithKey(logLines, "Errors")) +} + +func TestIndexDropCmd_WithConsoleOutputIfNoErrors_ShouldReturnData(t *testing.T) { + cfg, _, close := startTestNode(t) + defer close() + + execAddSchemaCmd(t, cfg, `type User { name: String }`) + execCreateIndexCmd(t, cfg, "User", "name", "users_name_index") + + indexDropCmd := MakeIndexDropCommand(cfg) + indexDropCmd.SetArgs([]string{"--collection", "User", "--name", "users_name_index"}) + + outputBuf, revertOutput := simulateConsoleOutput(t) + defer revertOutput() + + err := indexDropCmd.Execute() + require.NoError(t, err) + + logLines, err := parseLines(outputBuf) + require.NoError(t, err) + require.Len(t, logLines, 1) + assert.Equal(t, "success", logLines[0]["Result"]) + + assert.False(t, hasLogWithKey(logLines, "Errors")) +} diff --git a/cli/index_list.go b/cli/index_list.go new file mode 100644 index 0000000000..131782cfe5 --- /dev/null +++ b/cli/index_list.go @@ -0,0 +1,110 @@ +// Copyright 2023 Democratized Data Foundation +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package cli + +import ( + "encoding/json" + "io" + "net/http" + "net/url" + "os" + + "github.com/spf13/cobra" + + httpapi "github.com/sourcenetwork/defradb/api/http" + "github.com/sourcenetwork/defradb/client" + "github.com/sourcenetwork/defradb/config" + "github.com/sourcenetwork/defradb/logging" +) + +type indexListResponse struct { + Data struct { + Collections map[string][]client.IndexDescription `json:"collections"` + Indexes []client.IndexDescription `json:"indexes"` + } `json:"data"` + Errors []struct { + Message string `json:"message"` + } `json:"errors"` +} + +func MakeIndexListCommand(cfg *config.Config) *cobra.Command { + var collectionArg string + var cmd = &cobra.Command{ + Use: "list [-c --collection ]", + Short: "Shows the list indexes in the database or for a specific collection", + Long: `Shows the list indexes in the database or for a specific collection. + +If the --collection flag is provided, only the indexes for that collection will be shown. +Otherwise, all indexes in the database will be shown. + +Example: show all index for 'Users' collection: + defradb client index list --collection Users`, + ValidArgs: []string{"collection"}, + RunE: func(cmd *cobra.Command, args []string) (err error) { + endpoint, err := httpapi.JoinPaths(cfg.API.AddressToURL(), httpapi.IndexPath) + if err != nil { + return NewErrFailedToJoinEndpoint(err) + } + + if collectionArg != "" { + values := url.Values{ + "collection": {collectionArg}, + } + endpoint.RawQuery = values.Encode() + } + + res, err := http.Get(endpoint.String()) + if err != nil { + return NewErrFailedToSendRequest(err) + } + + defer func() { + if e := res.Body.Close(); e != nil { + err = NewErrFailedToCloseResponseBody(e, err) + } + }() + + response, err := io.ReadAll(res.Body) + if err != nil { + return NewErrFailedToReadResponseBody(err) + } + + stdout, err := os.Stdout.Stat() + if err != nil { + return err + } + + if isFileInfoPipe(stdout) { + cmd.Println(string(response)) + } else { + r := indexListResponse{} + err = json.Unmarshal(response, &r) + if err != nil { + return NewErrFailedToUnmarshalResponse(err) + } + if len(r.Errors) > 0 { + log.FeedbackError(cmd.Context(), "Failed to list index", + logging.NewKV("Errors", r.Errors)) + } else if collectionArg != "" { + log.FeedbackInfo(cmd.Context(), "Fetched indexes for collection "+collectionArg, + logging.NewKV("Indexes", r.Data.Indexes)) + } else { + log.FeedbackInfo(cmd.Context(), "Fetched all indexes", + logging.NewKV("Collections", r.Data.Collections)) + } + } + return nil + }, + } + cmd.Flags().StringVarP(&collectionArg, "collection", "c", "", "Collection name") + + return cmd +} diff --git a/cli/index_list_test.go b/cli/index_list_test.go new file mode 100644 index 0000000000..548d2af040 --- /dev/null +++ b/cli/index_list_test.go @@ -0,0 +1,145 @@ +// Copyright 2023 Democratized Data Foundation +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package cli + +import ( + "bytes" + "encoding/json" + "io" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestIndexListCmd_IfInvalidAddress_ReturnError(t *testing.T) { + cfg := getTestConfig(t) + cfg.API.Address = "invalid address" + indexCreateCmd := MakeIndexListCommand(cfg) + + err := indexCreateCmd.RunE(indexCreateCmd, nil) + require.ErrorIs(t, err, NewErrFailedToJoinEndpoint(err)) +} + +func TestIndexListCmd_IfNoErrors_ShouldReturnData(t *testing.T) { + cfg, _, close := startTestNode(t) + defer close() + + execAddSchemaCmd(t, cfg, `type User { name: String }`) + execCreateIndexCmd(t, cfg, "User", "name", "users_name_index") + + indexListCmd := MakeIndexListCommand(cfg) + outputBuf := bytes.NewBufferString("") + indexListCmd.SetOut(outputBuf) + + err := indexListCmd.Execute() + require.NoError(t, err) + + out, err := io.ReadAll(outputBuf) + require.NoError(t, err) + + r := make(map[string]any) + err = json.Unmarshal(out, &r) + require.NoError(t, err) + + _, hasData := r["data"] + assert.True(t, hasData, "command should return data") +} + +func TestIndexListCmd_WithConsoleOutputIfCollectionDoesNotExist_ReturnError(t *testing.T) { + cfg, _, close := startTestNode(t) + defer close() + + indexListCmd := MakeIndexListCommand(cfg) + indexListCmd.SetArgs([]string{"--collection", "User"}) + + outputBuf, revertOutput := simulateConsoleOutput(t) + defer revertOutput() + + err := indexListCmd.Execute() + require.NoError(t, err) + + logLines, err := parseLines(outputBuf) + require.NoError(t, err) + require.True(t, hasLogWithKey(logLines, "Errors")) +} + +func TestIndexListCmd_WithConsoleOutputIfCollectionIsGiven_ReturnCollectionList(t *testing.T) { + cfg, _, close := startTestNode(t) + defer close() + + const indexName = "users_name_index" + execAddSchemaCmd(t, cfg, `type User { name: String }`) + execCreateIndexCmd(t, cfg, "User", "name", indexName) + + indexListCmd := MakeIndexListCommand(cfg) + indexListCmd.SetArgs([]string{"--collection", "User"}) + + outputBuf, revertOutput := simulateConsoleOutput(t) + defer revertOutput() + + err := indexListCmd.Execute() + require.NoError(t, err) + + logLines, err := parseLines(outputBuf) + require.NoError(t, err) + require.Len(t, logLines, 1) + resultList, ok := logLines[0]["Indexes"].([]any) + require.True(t, ok) + require.Len(t, resultList, 1) + result, ok := resultList[0].(map[string]any) + require.True(t, ok) + assert.Equal(t, indexName, result["Name"]) + + assert.False(t, hasLogWithKey(logLines, "Errors")) +} + +func TestIndexListCmd_WithConsoleOutputIfNoArgs_ReturnAllIndexes(t *testing.T) { + cfg, _, close := startTestNode(t) + defer close() + + const userIndexName = "users_name_index" + const productIndexName = "product_price_index" + execAddSchemaCmd(t, cfg, `type User { name: String }`) + execAddSchemaCmd(t, cfg, `type Product { price: Int }`) + execCreateIndexCmd(t, cfg, "User", "name", userIndexName) + execCreateIndexCmd(t, cfg, "Product", "price", productIndexName) + + indexListCmd := MakeIndexListCommand(cfg) + + outputBuf, revertOutput := simulateConsoleOutput(t) + defer revertOutput() + + err := indexListCmd.Execute() + require.NoError(t, err) + + logLines, err := parseLines(outputBuf) + require.NoError(t, err) + require.Len(t, logLines, 1) + resultCollections, ok := logLines[0]["Collections"].(map[string]any) + require.True(t, ok) + + userCollection, ok := resultCollections["User"].([]any) + require.True(t, ok) + require.Len(t, userCollection, 1) + userIndex, ok := userCollection[0].(map[string]any) + require.True(t, ok) + require.Equal(t, userIndexName, userIndex["Name"]) + + productCollection, ok := resultCollections["Product"].([]any) + require.True(t, ok) + require.Len(t, productCollection, 1) + productIndex, ok := productCollection[0].(map[string]any) + require.True(t, ok) + require.Equal(t, productIndexName, productIndex["Name"]) + + assert.False(t, hasLogWithKey(logLines, "Errors")) +} diff --git a/cli/init.go b/cli/init.go index 9d188509bd..f9af1850b7 100644 --- a/cli/init.go +++ b/cli/init.go @@ -32,7 +32,8 @@ func MakeInitCommand(cfg *config.Config) *cobra.Command { var cmd = &cobra.Command{ Use: "init", Short: "Initialize DefraDB's root directory and configuration file", - Long: "Initialize a directory for configuration and data at the given path.", + Long: `Initialize a directory for configuration and data at the given path. +Passed flags will be persisted in the stored configuration.`, // Load a default configuration, considering env. variables and CLI flags. PersistentPreRunE: func(cmd *cobra.Command, _ []string) error { if err := cfg.LoadWithRootdir(false); err != nil { diff --git a/cli/p2p_collection.go b/cli/p2p_collection.go index 143820d4d8..6ce6d8e7c7 100644 --- a/cli/p2p_collection.go +++ b/cli/p2p_collection.go @@ -17,8 +17,9 @@ import ( func MakeP2PCollectionCommand() *cobra.Command { var cmd = &cobra.Command{ Use: "p2pcollection", - Short: "Interact with the P2P collection system", - Long: "Add, delete, or get the list of P2P collections", + Short: "Configure the P2P collection system", + Long: `Add, delete, or get the list of P2P collections. +The selected collections synchronize their events on the pubsub network.`, } return cmd } diff --git a/cli/p2p_collection_add.go b/cli/p2p_collection_add.go index d0fc18b6db..46a4f171e1 100644 --- a/cli/p2p_collection_add.go +++ b/cli/p2p_collection_add.go @@ -27,7 +27,8 @@ func MakeP2PCollectionAddCommand(cfg *config.Config) *cobra.Command { var cmd = &cobra.Command{ Use: "add [collectionID]", Short: "Add P2P collections", - Long: `Use this command if you wish to add new P2P collections to the pubsub topics`, + Long: `Add P2P collections to the synchronized pubsub topics. +The collections are synchronized between nodes of a pubsub network.`, Args: func(cmd *cobra.Command, args []string) error { if err := cobra.MinimumNArgs(1)(cmd, args); err != nil { return errors.New("must specify at least one collectionID") @@ -38,7 +39,7 @@ func MakeP2PCollectionAddCommand(cfg *config.Config) *cobra.Command { cred := insecure.NewCredentials() client, err := netclient.NewClient(cfg.Net.RPCAddress, grpc.WithTransportCredentials(cred)) if err != nil { - return errors.Wrap("failed to create RPC client", err) + return ErrFailedToCreateRPCClient } rpcTimeoutDuration, err := cfg.Net.RPCTimeoutDuration() @@ -51,9 +52,9 @@ func MakeP2PCollectionAddCommand(cfg *config.Config) *cobra.Command { err = client.AddP2PCollections(ctx, args...) if err != nil { - return errors.Wrap("failed to add p2p collections, request failed", err) + return errors.Wrap("failed to add P2P collections, request failed", err) } - log.FeedbackInfo(ctx, "Successfully added p2p collections", logging.NewKV("Collections", args)) + log.FeedbackInfo(ctx, "Successfully added P2P collections", logging.NewKV("Collections", args)) return nil }, } diff --git a/cli/p2p_collection_getall.go b/cli/p2p_collection_getall.go index 7e34339e0d..cb9c9f4025 100644 --- a/cli/p2p_collection_getall.go +++ b/cli/p2p_collection_getall.go @@ -27,7 +27,8 @@ func MakeP2PCollectionGetallCommand(cfg *config.Config) *cobra.Command { var cmd = &cobra.Command{ Use: "getall", Short: "Get all P2P collections", - Long: `Use this command if you wish to get all P2P collections in the pubsub topics`, + Long: `Get all P2P collections in the pubsub topics. +This is the list of collections of the node that are synchronized on the pubsub network.`, Args: func(cmd *cobra.Command, args []string) error { if err := cobra.NoArgs(cmd, args); err != nil { return errors.New("must specify no argument") @@ -38,7 +39,7 @@ func MakeP2PCollectionGetallCommand(cfg *config.Config) *cobra.Command { cred := insecure.NewCredentials() client, err := netclient.NewClient(cfg.Net.RPCAddress, grpc.WithTransportCredentials(cred)) if err != nil { - return errors.Wrap("failed to create RPC client", err) + return ErrFailedToCreateRPCClient } rpcTimeoutDuration, err := cfg.Net.RPCTimeoutDuration() @@ -51,7 +52,7 @@ func MakeP2PCollectionGetallCommand(cfg *config.Config) *cobra.Command { collections, err := client.GetAllP2PCollections(ctx) if err != nil { - return errors.Wrap("failed to add p2p collections, request failed", err) + return errors.Wrap("failed to add P2P collections, request failed", err) } if len(collections) > 0 { diff --git a/cli/p2p_collection_remove.go b/cli/p2p_collection_remove.go index ad79a86d1a..66dbd5fa16 100644 --- a/cli/p2p_collection_remove.go +++ b/cli/p2p_collection_remove.go @@ -26,8 +26,9 @@ import ( func MakeP2PCollectionRemoveCommand(cfg *config.Config) *cobra.Command { var cmd = &cobra.Command{ Use: "remove [collectionID]", - Short: "Add P2P collections", - Long: `Use this command if you wish to remove P2P collections from the pubsub topics`, + Short: "Remove P2P collections", + Long: `Remove P2P collections from the followed pubsub topics. +The removed collections will no longer be synchronized between nodes.`, Args: func(cmd *cobra.Command, args []string) error { if err := cobra.MinimumNArgs(1)(cmd, args); err != nil { return errors.New("must specify at least one collectionID") @@ -38,7 +39,7 @@ func MakeP2PCollectionRemoveCommand(cfg *config.Config) *cobra.Command { cred := insecure.NewCredentials() client, err := netclient.NewClient(cfg.Net.RPCAddress, grpc.WithTransportCredentials(cred)) if err != nil { - return errors.Wrap("failed to create RPC client", err) + return ErrFailedToCreateRPCClient } rpcTimeoutDuration, err := cfg.Net.RPCTimeoutDuration() @@ -51,9 +52,9 @@ func MakeP2PCollectionRemoveCommand(cfg *config.Config) *cobra.Command { err = client.RemoveP2PCollections(ctx, args...) if err != nil { - return errors.Wrap("failed to remove p2p collections, request failed", err) + return errors.Wrap("failed to remove P2P collections, request failed", err) } - log.FeedbackInfo(ctx, "Successfully removed p2p collections", logging.NewKV("Collections", args)) + log.FeedbackInfo(ctx, "Successfully removed P2P collections", logging.NewKV("Collections", args)) return nil }, } diff --git a/cli/peerid.go b/cli/peerid.go index 27559c2302..a3d269fb2d 100644 --- a/cli/peerid.go +++ b/cli/peerid.go @@ -12,7 +12,6 @@ package cli import ( "encoding/json" - "fmt" "io" "net/http" "os" @@ -27,7 +26,8 @@ import ( func MakePeerIDCommand(cfg *config.Config) *cobra.Command { var cmd = &cobra.Command{ Use: "peerid", - Short: "Get the PeerID of the DefraDB node", + Short: "Get the PeerID of the node", + Long: `Get the PeerID of the node.`, RunE: func(cmd *cobra.Command, _ []string) (err error) { stdout, err := os.Stdout.Stat() if err != nil { @@ -49,7 +49,7 @@ func MakePeerIDCommand(cfg *config.Config) *cobra.Command { defer func() { if e := res.Body.Close(); e != nil { - err = errors.Wrap(fmt.Sprintf("failed to read response body: %v", e.Error()), err) + err = NewErrFailedToCloseResponseBody(e, err) } }() diff --git a/cli/ping.go b/cli/ping.go index 11ca129850..210847dfcc 100644 --- a/cli/ping.go +++ b/cli/ping.go @@ -12,7 +12,6 @@ package cli import ( "encoding/json" - "fmt" "io" "net/http" "os" @@ -27,7 +26,7 @@ import ( func MakePingCommand(cfg *config.Config) *cobra.Command { var cmd = &cobra.Command{ Use: "ping", - Short: "Ping to test connection to a node", + Short: "Ping to test connection with a node", RunE: func(cmd *cobra.Command, _ []string) (err error) { stdout, err := os.Stdout.Stat() if err != nil { @@ -49,7 +48,7 @@ func MakePingCommand(cfg *config.Config) *cobra.Command { defer func() { if e := res.Body.Close(); e != nil { - err = errors.Wrap(fmt.Sprintf("failed to read response body: %v", e.Error()), err) + err = NewErrFailedToCloseResponseBody(e, err) } }() diff --git a/cli/replicator.go b/cli/replicator.go index fb6946ac29..c7956c80a6 100644 --- a/cli/replicator.go +++ b/cli/replicator.go @@ -17,8 +17,9 @@ import ( func MakeReplicatorCommand() *cobra.Command { var cmd = &cobra.Command{ Use: "replicator", - Short: "Interact with the replicator system", - Long: "Add, delete, or get the list of persisted replicators", + Short: "Configure the replicator system", + Long: `Configure the replicator system. Add, delete, or get the list of persisted replicators. +A replicator replicates one or all collection(s) from one node to another.`, } return cmd } diff --git a/cli/replicator_delete.go b/cli/replicator_delete.go index 0bd6e0374b..eb7e580f12 100644 --- a/cli/replicator_delete.go +++ b/cli/replicator_delete.go @@ -31,9 +31,8 @@ func MakeReplicatorDeleteCommand(cfg *config.Config) *cobra.Command { ) var cmd = &cobra.Command{ Use: "delete [-f, --full | -c, --collection] ", - Short: "Delete a replicator", - Long: `Use this command if you wish to remove the target replicator - for the p2p data sync system.`, + Short: "Delete a replicator. It will stop synchronizing", + Long: `Delete a replicator. It will stop synchronizing.`, Args: func(cmd *cobra.Command, args []string) error { if err := cobra.ExactArgs(1)(cmd, args); err != nil { return errors.New("must specify one argument: PeerID") @@ -50,7 +49,7 @@ func MakeReplicatorDeleteCommand(cfg *config.Config) *cobra.Command { cred := insecure.NewCredentials() client, err := netclient.NewClient(cfg.Net.RPCAddress, grpc.WithTransportCredentials(cred)) if err != nil { - return errors.Wrap("failed to create RPC client", err) + return ErrFailedToCreateRPCClient } rpcTimeoutDuration, err := cfg.Net.RPCTimeoutDuration() @@ -63,7 +62,7 @@ func MakeReplicatorDeleteCommand(cfg *config.Config) *cobra.Command { pid, err := peer.Decode(pidString) if err != nil { - return errors.Wrap("failed to parse PeerID from string", err) + return NewErrFailedParsePeerID(err) } err = client.DeleteReplicator(ctx, pid) diff --git a/cli/replicator_getall.go b/cli/replicator_getall.go index 0c03b34e3a..63cd6533ba 100644 --- a/cli/replicator_getall.go +++ b/cli/replicator_getall.go @@ -27,7 +27,8 @@ func MakeReplicatorGetallCommand(cfg *config.Config) *cobra.Command { var cmd = &cobra.Command{ Use: "getall", Short: "Get all replicators", - Long: `Use this command if you wish to get all the replicators for the p2p data sync system.`, + Long: `Get all the replicators active in the P2P data sync system. +These are the replicators that are currently replicating data from one node to another.`, RunE: func(cmd *cobra.Command, args []string) error { if len(args) != 0 { if err := cmd.Usage(); err != nil { diff --git a/cli/replicator_set.go b/cli/replicator_set.go index 377761a9f2..acb70d0cfd 100644 --- a/cli/replicator_set.go +++ b/cli/replicator_set.go @@ -32,8 +32,9 @@ func MakeReplicatorSetCommand(cfg *config.Config) *cobra.Command { var cmd = &cobra.Command{ Use: "set [-f, --full | -c, --collection] ", Short: "Set a P2P replicator", - Long: `Use this command if you wish to add a new target replicator - for the p2p data sync system or add schemas to an existing one`, + Long: `Add a new target replicator. +A replicator replicates one or all collection(s) from this node to another. +`, Args: func(cmd *cobra.Command, args []string) error { if err := cobra.ExactArgs(1)(cmd, args); err != nil { return errors.New("must specify one argument: peer") @@ -43,7 +44,7 @@ func MakeReplicatorSetCommand(cfg *config.Config) *cobra.Command { RunE: func(cmd *cobra.Command, args []string) error { peerAddr, err := ma.NewMultiaddr(args[0]) if err != nil { - return errors.Wrap("could not parse peer address", err) + return NewErrFailedParsePeerID(err) } if len(col) == 0 && !fullRep { return errors.New("must run with either --full or --collection") @@ -52,7 +53,7 @@ func MakeReplicatorSetCommand(cfg *config.Config) *cobra.Command { cred := insecure.NewCredentials() client, err := netclient.NewClient(cfg.Net.RPCAddress, grpc.WithTransportCredentials(cred)) if err != nil { - return errors.Wrap("failed to create RPC client", err) + return ErrFailedToCreateRPCClient } rpcTimeoutDuration, err := cfg.Net.RPCTimeoutDuration() diff --git a/cli/request.go b/cli/request.go index b0c85a854b..1b8f86ced8 100644 --- a/cli/request.go +++ b/cli/request.go @@ -11,7 +11,6 @@ package cli import ( - "fmt" "io" "net/http" "net/url" @@ -109,7 +108,7 @@ To learn more about the DefraDB GraphQL Query Language, refer to https://docs.so defer func() { if e := res.Body.Close(); e != nil { - err = errors.Wrap(fmt.Sprintf("failed to read response body: %v", e.Error()), err) + err = NewErrFailedToCloseResponseBody(e, err) } }() diff --git a/cli/root.go b/cli/root.go index 941cc8b882..e639cde785 100644 --- a/cli/root.go +++ b/cli/root.go @@ -25,10 +25,7 @@ func MakeRootCommand(cfg *config.Config) *cobra.Command { Short: "DefraDB Edge Database", Long: `DefraDB is the edge database to power the user-centric future. -Start a database node, issue a request to a local or remote node, and much more. - -DefraDB is released under the BSL license, (c) 2022 Democratized Data Foundation. -See https://docs.source.network/BSL.txt for more information. +Start a DefraDB node, interact with a local or remote node, and much more. `, // Runs on subcommands before their Run function, to handle configuration and top-level flags. // Loads the rootDir containing the configuration file, otherwise warn about it and load a default configuration. diff --git a/cli/rpc.go b/cli/rpc.go index 02caa055fb..afb1a007e2 100644 --- a/cli/rpc.go +++ b/cli/rpc.go @@ -21,12 +21,12 @@ import ( func MakeRPCCommand(cfg *config.Config) *cobra.Command { var cmd = &cobra.Command{ Use: "rpc", - Short: "Interact with a DefraDB gRPC server", - Long: "Interact with a DefraDB gRPC server.", + Short: "Interact with a DefraDB node via RPC", + Long: "Interact with a DefraDB node via RPC.", } cmd.PersistentFlags().String( "addr", cfg.Net.RPCAddress, - "gRPC endpoint address", + "RPC endpoint address", ) if err := cfg.BindFlag("net.rpcaddress", cmd.PersistentFlags().Lookup("addr")); err != nil { diff --git a/cli/schema.go b/cli/schema.go index dc96539c71..9316768316 100644 --- a/cli/schema.go +++ b/cli/schema.go @@ -17,8 +17,8 @@ import ( func MakeSchemaCommand() *cobra.Command { var cmd = &cobra.Command{ Use: "schema", - Short: "Interact with the schema system of a running DefraDB instance", - Long: `Make changes, updates, or look for existing schema types to a DefraDB node.`, + Short: "Interact with the schema system of a DefraDB node", + Long: `Make changes, updates, or look for existing schema types.`, } return cmd diff --git a/cli/schema_add.go b/cli/schema_add.go index 4fc916567f..b5f28f15d3 100644 --- a/cli/schema_add.go +++ b/cli/schema_add.go @@ -12,7 +12,6 @@ package cli import ( "encoding/json" - "fmt" "io" "net/http" "os" @@ -30,8 +29,8 @@ func MakeSchemaAddCommand(cfg *config.Config) *cobra.Command { var schemaFile string var cmd = &cobra.Command{ Use: "add [schema]", - Short: "Add a new schema type to DefraDB", - Long: `Add a new schema type to DefraDB. + Short: "Add new schema", + Long: `Add new schema. Example: add from an argument string: defradb client schema add 'type Foo { ... }' @@ -94,7 +93,7 @@ Learn more about the DefraDB GraphQL Schema Language on https://docs.source.netw return errors.New("empty schema provided") } - endpoint, err := httpapi.JoinPaths(cfg.API.AddressToURL(), httpapi.SchemaLoadPath) + endpoint, err := httpapi.JoinPaths(cfg.API.AddressToURL(), httpapi.SchemaPath) if err != nil { return errors.Wrap("join paths failed", err) } @@ -106,7 +105,7 @@ Learn more about the DefraDB GraphQL Schema Language on https://docs.source.netw defer func() { if e := res.Body.Close(); e != nil { - err = errors.Wrap(fmt.Sprintf("failed to read response body: %v", e.Error()), err) + err = NewErrFailedToCloseResponseBody(e, err) } }() diff --git a/cli/schema_list.go b/cli/schema_list.go new file mode 100644 index 0000000000..3a0e32bcce --- /dev/null +++ b/cli/schema_list.go @@ -0,0 +1,89 @@ +// Copyright 2023 Democratized Data Foundation +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package cli + +import ( + "encoding/json" + "io" + "net/http" + + "github.com/spf13/cobra" + + httpapi "github.com/sourcenetwork/defradb/api/http" + "github.com/sourcenetwork/defradb/config" + "github.com/sourcenetwork/defradb/errors" +) + +type schemaListResponse struct { + Data struct { + Collections []struct { + Name string `json:"name"` + ID string `json:"id"` + VersionID string `json:"version_id"` + Fields []struct { + ID string `json:"id"` + Name string `json:"name"` + Kind string `json:"kind"` + Internal bool `json:"internal"` + } `json:"fields"` + } `json:"collections"` + } `json:"data"` + Errors []struct { + Message string `json:"message"` + } `json:"errors"` +} + +func MakeSchemaListCommand(cfg *config.Config) *cobra.Command { + var cmd = &cobra.Command{ + Use: "list", + Short: "List schema types with their respective fields", + RunE: func(cmd *cobra.Command, args []string) (err error) { + endpoint, err := httpapi.JoinPaths(cfg.API.AddressToURL(), httpapi.SchemaPath) + if err != nil { + return NewErrFailedToJoinEndpoint(err) + } + + res, err := http.Get(endpoint.String()) + if err != nil { + return NewErrFailedToSendRequest(err) + } + defer res.Body.Close() //nolint:errcheck + + data, err := io.ReadAll(res.Body) + if err != nil { + return NewErrFailedToReadResponseBody(err) + } + + var r schemaListResponse + if err := json.Unmarshal(data, &r); err != nil { + return NewErrFailedToUnmarshalResponse(err) + } + if len(r.Errors) > 0 { + return errors.New("failed to list schemas", errors.NewKV("errors", r.Errors)) + } + + for _, c := range r.Data.Collections { + cmd.Printf("# Schema ID: %s\n", c.ID) + cmd.Printf("# Version ID: %s\n", c.VersionID) + cmd.Printf("type %s {\n", c.Name) + for _, f := range c.Fields { + if !f.Internal { + cmd.Printf("\t%s: %s\n", f.Name, f.Kind) + } + } + cmd.Printf("}\n\n") + } + + return nil + }, + } + return cmd +} diff --git a/cli/schema_migration.go b/cli/schema_migration.go new file mode 100644 index 0000000000..7b37fdcabe --- /dev/null +++ b/cli/schema_migration.go @@ -0,0 +1,25 @@ +// Copyright 2023 Democratized Data Foundation +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package cli + +import ( + "github.com/spf13/cobra" +) + +func MakeSchemaMigrationCommand() *cobra.Command { + var cmd = &cobra.Command{ + Use: "migration", + Short: "Interact with the schema migration system of a running DefraDB instance", + Long: `Make set or look for existing schema migrations on a DefraDB node.`, + } + + return cmd +} diff --git a/cli/schema_migration_get.go b/cli/schema_migration_get.go new file mode 100644 index 0000000000..333c2d9cf4 --- /dev/null +++ b/cli/schema_migration_get.go @@ -0,0 +1,98 @@ +// Copyright 2023 Democratized Data Foundation +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package cli + +import ( + "encoding/json" + "io" + "net/http" + "os" + + "github.com/spf13/cobra" + + httpapi "github.com/sourcenetwork/defradb/api/http" + "github.com/sourcenetwork/defradb/client" + "github.com/sourcenetwork/defradb/config" + "github.com/sourcenetwork/defradb/errors" + "github.com/sourcenetwork/defradb/logging" +) + +func MakeSchemaMigrationGetCommand(cfg *config.Config) *cobra.Command { + var cmd = &cobra.Command{ + Use: "get", + Short: "Gets the schema migrations within DefraDB", + Long: `Gets the schema migrations within the local DefraDB node. + +Example: + defradb client schema migration get' + +Learn more about the DefraDB GraphQL Schema Language on https://docs.source.network.`, + RunE: func(cmd *cobra.Command, args []string) (err error) { + if err := cobra.NoArgs(cmd, args); err != nil { + return NewErrTooManyArgs(0, len(args)) + } + + endpoint, err := httpapi.JoinPaths(cfg.API.AddressToURL(), httpapi.SchemaMigrationPath) + if err != nil { + return errors.Wrap("join paths failed", err) + } + + res, err := http.Get(endpoint.String()) + if err != nil { + return errors.Wrap("failed to get schema migrations", err) + } + + defer func() { + if e := res.Body.Close(); e != nil { + err = NewErrFailedToCloseResponseBody(e, err) + } + }() + + response, err := io.ReadAll(res.Body) + if err != nil { + return errors.Wrap("failed to read response body", err) + } + + stdout, err := os.Stdout.Stat() + if err != nil { + return errors.Wrap("failed to stat stdout", err) + } + if isFileInfoPipe(stdout) { + cmd.Println(string(response)) + } else { + type migrationGetResponse struct { + Data struct { + Configuration []client.LensConfig `json:"configuration"` + } `json:"data"` + Errors []struct { + Message string `json:"message"` + } `json:"errors"` + } + r := migrationGetResponse{} + err = json.Unmarshal(response, &r) + log.FeedbackInfo(cmd.Context(), string(response)) + if err != nil { + return NewErrFailedToUnmarshalResponse(err) + } + if len(r.Errors) > 0 { + log.FeedbackError(cmd.Context(), "Failed to get schema migrations", + logging.NewKV("Errors", r.Errors)) + } else { + log.FeedbackInfo(cmd.Context(), "Successfully got schema migrations", + logging.NewKV("Configuration", r.Data.Configuration)) + } + } + + return nil + }, + } + return cmd +} diff --git a/cli/schema_migration_set.go b/cli/schema_migration_set.go new file mode 100644 index 0000000000..633cbf0115 --- /dev/null +++ b/cli/schema_migration_set.go @@ -0,0 +1,178 @@ +// Copyright 2023 Democratized Data Foundation +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package cli + +import ( + "encoding/json" + "io" + "net/http" + "os" + "strings" + + "github.com/lens-vm/lens/host-go/config/model" + "github.com/spf13/cobra" + + httpapi "github.com/sourcenetwork/defradb/api/http" + "github.com/sourcenetwork/defradb/client" + "github.com/sourcenetwork/defradb/config" + "github.com/sourcenetwork/defradb/errors" + "github.com/sourcenetwork/defradb/logging" +) + +func MakeSchemaMigrationSetCommand(cfg *config.Config) *cobra.Command { + var lensFile string + var cmd = &cobra.Command{ + Use: "set [src] [dst] [cfg]", + Short: "Set a schema migration within DefraDB", + Long: `Set a migration between two schema versions within the local DefraDB node. + +Example: set from an argument string: + defradb client schema migration set bae123 bae456 '{"lenses": [...' + +Example: set from file: + defradb client schema migration set bae123 bae456 -f schema_migration.lens + +Example: add from stdin: + cat schema_migration.lens | defradb client schema migration set bae123 bae456 - + +Learn more about the DefraDB GraphQL Schema Language on https://docs.source.network.`, + RunE: func(cmd *cobra.Command, args []string) (err error) { + if err := cobra.MinimumNArgs(2)(cmd, args); err != nil { + return NewErrMissingArgs([]string{"src", "dst", "cfg"}) + } + if err := cobra.MaximumNArgs(3)(cmd, args); err != nil { + return NewErrTooManyArgs(3, len(args)) + } + + var lensCfgJson string + var srcSchemaVersionID string + var dstSchemaVersionID string + fi, err := os.Stdin.Stat() + if err != nil { + return err + } + + if lensFile != "" { + buf, err := os.ReadFile(lensFile) + if err != nil { + return errors.Wrap("failed to read schema file", err) + } + lensCfgJson = string(buf) + } else if len(args) == 2 { + // If the lensFile flag has not been provided then it must be provided as an arg + // and thus len(args) cannot be 2 + return NewErrMissingArg("cfg") + } else if isFileInfoPipe(fi) && args[2] != "-" { + log.FeedbackInfo( + cmd.Context(), + "Run 'defradb client schema migration set -' to read from stdin."+ + " Example: 'cat schema_migration.lens | defradb client schema migration set -').", + ) + return nil + } else if args[2] == "-" { + stdin, err := readStdin() + if err != nil { + return errors.Wrap("failed to read stdin", err) + } + if len(stdin) == 0 { + return errors.New("no lens cfg in stdin provided") + } else { + lensCfgJson = stdin + } + } else { + lensCfgJson = args[2] + } + + srcSchemaVersionID = args[0] + dstSchemaVersionID = args[1] + + if lensCfgJson == "" { + return NewErrMissingArg("cfg") + } + if srcSchemaVersionID == "" { + return NewErrMissingArg("src") + } + if dstSchemaVersionID == "" { + return NewErrMissingArg("dst") + } + + decoder := json.NewDecoder(strings.NewReader(lensCfgJson)) + decoder.DisallowUnknownFields() + + var lensCfg model.Lens + err = decoder.Decode(&lensCfg) + if err != nil { + return errors.Wrap("invalid lens configuration", err) + } + + migrationCfg := client.LensConfig{ + SourceSchemaVersionID: srcSchemaVersionID, + DestinationSchemaVersionID: dstSchemaVersionID, + Lens: lensCfg, + } + + migrationCfgJson, err := json.Marshal(migrationCfg) + if err != nil { + return errors.Wrap("failed to marshal cfg", err) + } + + endpoint, err := httpapi.JoinPaths(cfg.API.AddressToURL(), httpapi.SchemaMigrationPath) + if err != nil { + return errors.Wrap("join paths failed", err) + } + + res, err := http.Post(endpoint.String(), "application/json", strings.NewReader(string(migrationCfgJson))) + if err != nil { + return errors.Wrap("failed to post schema migration", err) + } + + defer func() { + if e := res.Body.Close(); e != nil { + err = NewErrFailedToCloseResponseBody(e, err) + } + }() + + response, err := io.ReadAll(res.Body) + if err != nil { + return errors.Wrap("failed to read response body", err) + } + + stdout, err := os.Stdout.Stat() + if err != nil { + return errors.Wrap("failed to stat stdout", err) + } + if isFileInfoPipe(stdout) { + cmd.Println(string(response)) + } else { + type migrationSetResponse struct { + Errors []struct { + Message string `json:"message"` + } `json:"errors"` + } + r := migrationSetResponse{} + err = json.Unmarshal(response, &r) + if err != nil { + return NewErrFailedToUnmarshalResponse(err) + } + if len(r.Errors) > 0 { + log.FeedbackError(cmd.Context(), "Failed to set schema migration", + logging.NewKV("Errors", r.Errors)) + } else { + log.FeedbackInfo(cmd.Context(), "Successfully set schema migration") + } + } + + return nil + }, + } + cmd.Flags().StringVarP(&lensFile, "file", "f", "", "Lens configuration file") + return cmd +} diff --git a/cli/schema_patch.go b/cli/schema_patch.go index 31ac830345..b1e962c51a 100644 --- a/cli/schema_patch.go +++ b/cli/schema_patch.go @@ -31,7 +31,7 @@ func MakeSchemaPatchCommand(cfg *config.Config) *cobra.Command { Short: "Patch an existing schema type", Long: `Patch an existing schema. -Uses JSON PATCH formatting as a DDL. +Uses JSON Patch to modify schema types. Example: patch from an argument string: defradb client schema patch '[{ "op": "add", "path": "...", "value": {...} }]' @@ -54,7 +54,7 @@ To learn more about the DefraDB GraphQL Schema Language, refer to https://docs.s if err = cmd.Usage(); err != nil { return err } - return ErrTooManyArgs + return NewErrTooManyArgs(1, len(args)) } if patchFile != "" { @@ -95,12 +95,16 @@ To learn more about the DefraDB GraphQL Schema Language, refer to https://docs.s return ErrEmptyFile } - endpoint, err := httpapi.JoinPaths(cfg.API.AddressToURL(), httpapi.SchemaPatchPath) + endpoint, err := httpapi.JoinPaths(cfg.API.AddressToURL(), httpapi.SchemaPath) if err != nil { return err } - res, err := http.Post(endpoint.String(), "text", strings.NewReader(patch)) + req, err := http.NewRequest(http.MethodPatch, endpoint.String(), strings.NewReader(patch)) + if err != nil { + return NewErrFailedToSendRequest(err) + } + res, err := http.DefaultClient.Do(req) if err != nil { return NewErrFailedToSendRequest(err) } diff --git a/cli/start.go b/cli/start.go index b830a79b9a..5d571be46d 100644 --- a/cli/start.go +++ b/cli/start.go @@ -36,17 +36,16 @@ import ( "github.com/sourcenetwork/defradb/db" "github.com/sourcenetwork/defradb/errors" "github.com/sourcenetwork/defradb/logging" - netapi "github.com/sourcenetwork/defradb/net/api" - netpb "github.com/sourcenetwork/defradb/net/api/pb" + "github.com/sourcenetwork/defradb/net" + netpb "github.com/sourcenetwork/defradb/net/pb" netutils "github.com/sourcenetwork/defradb/net/utils" - "github.com/sourcenetwork/defradb/node" ) func MakeStartCommand(cfg *config.Config) *cobra.Command { var cmd = &cobra.Command{ Use: "start", Short: "Start a DefraDB node", - Long: "Start a new instance of DefraDB node.", + Long: "Start a DefraDB node.", // Load the root config if it exists, otherwise create it. PersistentPreRunE: func(cmd *cobra.Command, _ []string) error { if err := cfg.LoadRootDirFromFlagOrDefault(); err != nil { @@ -194,7 +193,7 @@ func MakeStartCommand(cfg *config.Config) *cobra.Command { } type defraInstance struct { - node *node.Node + node *net.Node db client.DB server *httpapi.Server } @@ -252,13 +251,13 @@ func start(ctx context.Context, cfg *config.Config) (*defraInstance, error) { } // init the p2p node - var n *node.Node + var n *net.Node if !cfg.Net.P2PDisabled { log.FeedbackInfo(ctx, "Starting P2P node", logging.NewKV("P2P address", cfg.Net.P2PAddress)) - n, err = node.NewNode( + n, err = net.NewNode( ctx, db, - cfg.NodeConfig(), + net.WithConfig(cfg), ) if err != nil { db.Close(ctx) @@ -315,11 +314,9 @@ func start(ctx context.Context, cfg *config.Config) (*defraInstance, error) { return nil, errors.Wrap(fmt.Sprintf("failed to listen on TCP address %v", addr), err) } - netService := netapi.NewService(n.Peer) - go func() { log.FeedbackInfo(ctx, "Started RPC server", logging.NewKV("Address", addr)) - netpb.RegisterServiceServer(server, netService) + netpb.RegisterCollectionServer(server, n.Peer) if err := server.Serve(tcplistener); err != nil && !errors.Is(err, grpc.ErrServerStopped) { log.FeedbackFatalE(ctx, "Failed to start RPC server", err) } diff --git a/client/backup.go b/client/backup.go new file mode 100644 index 0000000000..58ccf1f9d2 --- /dev/null +++ b/client/backup.go @@ -0,0 +1,36 @@ +// Copyright 2023 Democratized Data Foundation +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package client + +import ( + "context" +) + +// Backup contains DefraDB's supported backup operations. +type Backup interface { + // BasicImport imports a json dataset. + // filepath must be accessible to the node. + BasicImport(ctx context.Context, filepath string) error + // BasicExport exports the current data or subset of data to file in json format. + BasicExport(ctx context.Context, config *BackupConfig) error +} + +// BackupConfig holds the configuration parameters for database backups. +type BackupConfig struct { + // If a file already exists at this location, it will be truncated and overwriten. + Filepath string `json:"filepath"` + // Only JSON is supported at the moment. + Format string `json:"format"` + // Pretty print JSON. + Pretty bool `json:"pretty"` + // List of collection names to select which one to backup. + Collections []string `json:"collections"` +} diff --git a/client/collection.go b/client/collection.go index f59bf43d6b..9c91dccb7c 100644 --- a/client/collection.go +++ b/client/collection.go @@ -136,6 +136,19 @@ type Collection interface { // GetAllDocKeys returns all the document keys that exist in the collection. GetAllDocKeys(ctx context.Context) (<-chan DocKeysResult, error) + + // CreateIndex creates a new index on the collection. + // `IndexDescription` contains the description of the index to be created. + // `IndexDescription.Name` must start with a letter or an underscore and can + // only contain letters, numbers, and underscores. + // If the name of the index is not provided, it will be generated. + CreateIndex(context.Context, IndexDescription) (IndexDescription, error) + + // DropIndex drops an index from the collection. + DropIndex(ctx context.Context, indexName string) error + + // GetIndexes returns all the indexes that exist on the collection. + GetIndexes(ctx context.Context) ([]IndexDescription, error) } // DocKeysResult wraps the result of an attempt at a DocKey retrieval operation. diff --git a/client/db.go b/client/db.go index 4ec69668cd..ba4dd0b89d 100644 --- a/client/db.go +++ b/client/db.go @@ -19,6 +19,8 @@ import ( "github.com/sourcenetwork/defradb/events" ) +type CollectionName = string + // DB is the primary public programmatic access point to the local DefraDB instance. // // It should be constructed via the [db] package, via the [db.NewDB] function. @@ -83,6 +85,9 @@ type Store interface { // P2P holds the P2P related methods that must be implemented by the database. P2P + // Backup holds the backup related methods that must be implemented by the database. + Backup + // AddSchema takes the provided GQL schema in SDL format, and applies it to the [Store], // creating the necessary collections, request types, etc. // @@ -106,10 +111,29 @@ type Store interface { // [FieldKindStringToEnumMapping]. PatchSchema(context.Context, string) error + // SetMigration sets the migration for the given source-destination schema version IDs. Is equivilent to + // calling `LensRegistry().SetMigration(ctx, cfg)`. + // + // There may only be one migration per schema version id. If another migration was registered it will be + // overwritten by this migration. + // + // Neither of the schema version IDs specified in the configuration need to exist at the time of calling. + // This is to allow the migration of documents of schema versions unknown to the local node recieved by the + // P2P system. + // + // Migrations will only run if there is a complete path from the document schema version to the latest local + // schema version. + SetMigration(context.Context, LensConfig) error + + // LensRegistry returns the LensRegistry in use by this database instance. + // + // It exposes several useful thread-safe migration related functions. + LensRegistry() LensRegistry + // GetCollectionByName attempts to retrieve a collection matching the given name. // // If no matching collection is found an error will be returned. - GetCollectionByName(context.Context, string) (Collection, error) + GetCollectionByName(context.Context, CollectionName) (Collection, error) // GetCollectionBySchemaID attempts to retrieve a collection matching the given schema ID. // @@ -125,6 +149,9 @@ type Store interface { // this [Store]. GetAllCollections(context.Context) ([]Collection, error) + // GetAllIndexes returns all the indexes that currently exist within this [Store]. + GetAllIndexes(context.Context) (map[CollectionName][]IndexDescription, error) + // ExecRequest executes the given GQL request against the [Store]. ExecRequest(context.Context, string) *RequestResult } diff --git a/client/descriptions.go b/client/descriptions.go index cd1d7fc53d..0b44f36b83 100644 --- a/client/descriptions.go +++ b/client/descriptions.go @@ -29,6 +29,9 @@ type CollectionDescription struct { // Schema contains the data type information that this Collection uses. Schema SchemaDescription + + // Indexes contains the secondary indexes that this Collection has. + Indexes []IndexDescription } // IDString returns the collection ID as a string. @@ -36,24 +39,12 @@ func (col CollectionDescription) IDString() string { return fmt.Sprint(col.ID) } -// GetField returns the field of the given name. -func (col CollectionDescription) GetField(name string) (FieldDescription, bool) { - if !col.Schema.IsEmpty() { - for _, field := range col.Schema.Fields { - if field.Name == name { - return field, true - } - } - } - return FieldDescription{}, false -} - // GetFieldByID searches for a field with the given ID. If such a field is found it // will return it and true, if it is not found it will return false. -func (col CollectionDescription) GetFieldByID(id string) (FieldDescription, bool) { +func (col CollectionDescription) GetFieldByID(id FieldID) (FieldDescription, bool) { if !col.Schema.IsEmpty() { for _, field := range col.Schema.Fields { - if field.ID.String() == id { + if field.ID == id { return field, true } } @@ -115,9 +106,56 @@ func (sd SchemaDescription) GetFieldKey(fieldName string) uint32 { return uint32(0) } +// GetField returns the field of the given name. +func (sd SchemaDescription) GetField(name string) (FieldDescription, bool) { + if !sd.IsEmpty() { + for _, field := range sd.Fields { + if field.Name == name { + return field, true + } + } + } + return FieldDescription{}, false +} + // FieldKind describes the type of a field. type FieldKind uint8 +func (f FieldKind) String() string { + switch f { + case FieldKind_DocKey: + return "ID" + case FieldKind_BOOL: + return "Boolean" + case FieldKind_NILLABLE_BOOL_ARRAY: + return "[Boolean]" + case FieldKind_BOOL_ARRAY: + return "[Boolean!]" + case FieldKind_INT: + return "Int" + case FieldKind_NILLABLE_INT_ARRAY: + return "[Int]" + case FieldKind_INT_ARRAY: + return "[Int!]" + case FieldKind_DATETIME: + return "DateTime" + case FieldKind_FLOAT: + return "Float" + case FieldKind_NILLABLE_FLOAT_ARRAY: + return "[Float]" + case FieldKind_FLOAT_ARRAY: + return "[Float!]" + case FieldKind_STRING: + return "String" + case FieldKind_NILLABLE_STRING_ARRAY: + return "[String]" + case FieldKind_STRING_ARRAY: + return "[String!]" + default: + return fmt.Sprint(uint8(f)) + } +} + // Note: These values are serialized and persisted in the database, avoid modifying existing values. const ( FieldKind_None FieldKind = 0 @@ -161,9 +199,9 @@ var FieldKindStringToEnumMapping = map[string]FieldKind{ "Boolean": FieldKind_BOOL, "[Boolean]": FieldKind_NILLABLE_BOOL_ARRAY, "[Boolean!]": FieldKind_BOOL_ARRAY, - "Integer": FieldKind_INT, - "[Integer]": FieldKind_NILLABLE_INT_ARRAY, - "[Integer!]": FieldKind_INT_ARRAY, + "Int": FieldKind_INT, + "[Int]": FieldKind_NILLABLE_INT_ARRAY, + "[Int!]": FieldKind_INT_ARRAY, "DateTime": FieldKind_DATETIME, "Float": FieldKind_FLOAT, "[Float]": FieldKind_NILLABLE_FLOAT_ARRAY, @@ -233,6 +271,11 @@ type FieldDescription struct { RelationType RelationType } +// IsInternal returns true if this field is internally generated. +func (f FieldDescription) IsInternal() bool { + return (f.Name == "_key") || f.RelationType&Relation_Type_INTERNAL_ID != 0 +} + // IsObject returns true if this field is an object type. func (f FieldDescription) IsObject() bool { return (f.Kind == FieldKind_FOREIGN_OBJECT) || diff --git a/client/document.go b/client/document.go index 11d432d1fa..5c8fd9441d 100644 --- a/client/document.go +++ b/client/document.go @@ -17,7 +17,9 @@ import ( "github.com/fxamacker/cbor/v2" "github.com/ipfs/go-cid" - mh "github.com/multiformats/go-multihash" + + "github.com/sourcenetwork/defradb/client/request" + ccid "github.com/sourcenetwork/defradb/core/cid" ) // This is the main implementation starting point for accessing the internal Document API @@ -51,11 +53,17 @@ import ( // @body: A document interface can be implemented by both a TypedDocument and a // UnTypedDocument, which use a schema and schemaless approach respectively. type Document struct { - key DocKey - fields map[string]Field - values map[Field]Value - head cid.Cid - mu sync.RWMutex + key DocKey + // SchemaVersionID holds the id of the schema version that this document is + // currently at. + // + // Migrating the document will update this value to the output version of the + // migration. + SchemaVersionID string + fields map[string]Field + values map[Field]Value + head cid.Cid + mu sync.RWMutex // marks if document has unsaved changes isDirty bool } @@ -100,26 +108,12 @@ func NewDocFromMap(data map[string]any) (*Document, error) { return nil, err } - // if no key was specified, then we assume it doesn't exist and we generate it. + // if no key was specified, then we assume it doesn't exist and we generate, and set it. if !hasKey { - pref := cid.Prefix{ - Version: 1, - Codec: cid.Raw, - MhType: mh.SHA2_256, - MhLength: -1, // default length - } - - buf, err := doc.Bytes() - if err != nil { - return nil, err - } - - // And then feed it some data - c, err := pref.Sum(buf) + err = doc.generateAndSetDocKey() if err != nil { return nil, err } - doc.key = NewDocKeyV0(c) } return doc, nil @@ -251,11 +245,6 @@ func (doc *Document) Delete(fields ...string) error { return nil } -// SetAsType Sets the value of a field along with a specific type -// func (doc *Document) SetAsType(t client.CType, field string, value any) error { -// return doc.set(t, field, value) -// } - func (doc *Document) set(t CType, field string, value Value) error { doc.mu.Lock() defer doc.mu.Unlock() @@ -473,6 +462,74 @@ func (doc *Document) toMapWithKey() (map[string]any, error) { return docMap, nil } +// GenerateDocKey generates docKey/docID corresponding to the document. +func (doc *Document) GenerateDocKey() (DocKey, error) { + bytes, err := doc.Bytes() + if err != nil { + return DocKey{}, err + } + + cid, err := ccid.NewSHA256CidV1(bytes) + if err != nil { + return DocKey{}, err + } + + return NewDocKeyV0(cid), nil +} + +// setDocKey sets the `doc.key` (should NOT be public). +func (doc *Document) setDocKey(docID DocKey) { + doc.mu.Lock() + defer doc.mu.Unlock() + + doc.key = docID +} + +// generateAndSetDocKey generates the docKey/docID and then (re)sets `doc.key`. +func (doc *Document) generateAndSetDocKey() error { + docKey, err := doc.GenerateDocKey() + if err != nil { + return err + } + + doc.setDocKey(docKey) + return nil +} + +func (doc *Document) remapAliasFields(fieldDescriptions []FieldDescription) (bool, error) { + doc.mu.Lock() + defer doc.mu.Unlock() + + foundAlias := false + for docField, docFieldValue := range doc.fields { + for _, fieldDescription := range fieldDescriptions { + maybeAliasField := docField + request.RelatedObjectID + if fieldDescription.Name == maybeAliasField { + foundAlias = true + doc.fields[maybeAliasField] = docFieldValue + delete(doc.fields, docField) + } + } + } + + return foundAlias, nil +} + +// RemapAliasFieldsAndDockey remaps the alias fields and fixes (overwrites) the dockey. +func (doc *Document) RemapAliasFieldsAndDockey(fieldDescriptions []FieldDescription) error { + foundAlias, err := doc.remapAliasFields(fieldDescriptions) + if err != nil { + return err + } + + if !foundAlias { + return nil + } + + // Update the dockey so dockey isn't based on an aliased name of a field. + return doc.generateAndSetDocKey() +} + // DocumentStatus represent the state of the document in the DAG store. // It can either be `Active“ or `Deleted`. type DocumentStatus uint8 diff --git a/client/document_test.go b/client/document_test.go index e49f238f48..c2e9b406c0 100644 --- a/client/document_test.go +++ b/client/document_test.go @@ -13,9 +13,9 @@ package client import ( "testing" - "github.com/ipfs/go-cid" - mh "github.com/multiformats/go-multihash" "github.com/stretchr/testify/assert" + + ccid "github.com/sourcenetwork/defradb/core/cid" ) var ( @@ -28,12 +28,7 @@ var ( } }`) - pref = cid.Prefix{ - Version: 1, - Codec: cid.Raw, - MhType: mh.SHA2_256, - MhLength: -1, // default length - } + pref = ccid.NewDefaultSHA256PrefixV1() ) func TestNewFromJSON(t *testing.T) { diff --git a/client/errors.go b/client/errors.go index 5e55c22710..035ac87235 100644 --- a/client/errors.go +++ b/client/errors.go @@ -17,12 +17,11 @@ import ( ) const ( - errFieldNotExist string = "The given field does not exist" - errSelectOfNonGroupField string = "cannot select a non-group-by field at group-level" - errUnexpectedType string = "unexpected type" - errParsingFailed string = "failed to parse argument" - errUninitializeProperty string = "invalid state, required property is uninitialized" - errMaxTxnRetries string = "reached maximum transaction reties" + errFieldNotExist string = "The given field does not exist" + errUnexpectedType string = "unexpected type" + errParsingFailed string = "failed to parse argument" + errUninitializeProperty string = "invalid state, required property is uninitialized" + errMaxTxnRetries string = "reached maximum transaction reties" ) // Errors returnable from this package. @@ -30,21 +29,20 @@ const ( // This list is incomplete and undefined errors may also be returned. // Errors returned from this package may be tested against these errors with errors.Is. var ( - ErrFieldNotExist = errors.New(errFieldNotExist) - ErrSelectOfNonGroupField = errors.New(errSelectOfNonGroupField) - ErrUnexpectedType = errors.New(errUnexpectedType) - ErrParsingFailed = errors.New(errParsingFailed) - ErrUninitializeProperty = errors.New(errUninitializeProperty) - ErrFieldNotObject = errors.New("trying to access field on a non object type") - ErrValueTypeMismatch = errors.New("value does not match indicated type") - ErrIndexNotFound = errors.New("no index found for given ID") - ErrDocumentNotFound = errors.New("no document for the given key exists") - ErrInvalidUpdateTarget = errors.New("the target document to update is of invalid type") - ErrInvalidUpdater = errors.New("the updater of a document is of invalid type") - ErrInvalidDeleteTarget = errors.New("the target document to delete is of invalid type") - ErrMalformedDocKey = errors.New("malformed DocKey, missing either version or cid") - ErrInvalidDocKeyVersion = errors.New("invalid DocKey version") - ErrMaxTxnRetries = errors.New(errMaxTxnRetries) + ErrFieldNotExist = errors.New(errFieldNotExist) + ErrUnexpectedType = errors.New(errUnexpectedType) + ErrParsingFailed = errors.New(errParsingFailed) + ErrUninitializeProperty = errors.New(errUninitializeProperty) + ErrFieldNotObject = errors.New("trying to access field on a non object type") + ErrValueTypeMismatch = errors.New("value does not match indicated type") + ErrIndexNotFound = errors.New("no index found for given ID") + ErrDocumentNotFound = errors.New("no document for the given key exists") + ErrInvalidUpdateTarget = errors.New("the target document to update is of invalid type") + ErrInvalidUpdater = errors.New("the updater of a document is of invalid type") + ErrInvalidDeleteTarget = errors.New("the target document to delete is of invalid type") + ErrMalformedDocKey = errors.New("malformed DocKey, missing either version or cid") + ErrInvalidDocKeyVersion = errors.New("invalid DocKey version") + ErrMaxTxnRetries = errors.New(errMaxTxnRetries) ) // NewErrFieldNotExist returns an error indicating that the given field does not exist. @@ -58,12 +56,6 @@ func NewErrFieldIndexNotExist(index int) error { return errors.New(errFieldNotExist, errors.NewKV("Index", index)) } -// NewErrSelectOfNonGroupField returns an error indicating that a non-group-by field -// was selected at group-level. -func NewErrSelectOfNonGroupField(name string) error { - return errors.New(errSelectOfNonGroupField, errors.NewKV("Field", name)) -} - // NewErrUnexpectedType returns an error indicating that the given value is of an unexpected type. func NewErrUnexpectedType[TExpected any](property string, actual any) error { var expected TExpected diff --git a/client/index.go b/client/index.go new file mode 100644 index 0000000000..47b52f00c5 --- /dev/null +++ b/client/index.go @@ -0,0 +1,39 @@ +// Copyright 2023 Democratized Data Foundation +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package client + +// IndexDirection is the direction of an index. +type IndexDirection string + +const ( + // Ascending is the value to use for an ascending fields + Ascending IndexDirection = "ASC" + // Descending is the value to use for an descending fields + Descending IndexDirection = "DESC" +) + +// IndexFieldDescription describes how a field is being indexed. +type IndexedFieldDescription struct { + // Name contains the name of the field. + Name string + // Direction contains the direction of the index. + Direction IndexDirection +} + +// IndexDescription describes an index. +type IndexDescription struct { + // Name contains the name of the index. + Name string + // ID is the local identifier of this index. + ID uint32 + // Fields contains the fields that are being indexed. + Fields []IndexedFieldDescription +} diff --git a/client/lens.go b/client/lens.go new file mode 100644 index 0000000000..1cffa19248 --- /dev/null +++ b/client/lens.go @@ -0,0 +1,83 @@ +// Copyright 2023 Democratized Data Foundation +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package client + +import ( + "context" + + "github.com/lens-vm/lens/host-go/config/model" + "github.com/sourcenetwork/immutable/enumerable" + + "github.com/sourcenetwork/defradb/datastore" +) + +// LensConfig represents the configuration of a Lens migration in Defra. +type LensConfig struct { + // SourceSchemaVersionID is the ID of the schema version from which to migrate + // from. + // + // The source and destination versions must be next to each other in the history. + SourceSchemaVersionID string + + // DestinationSchemaVersionID is the ID of the schema version from which to migrate + // to. + // + // The source and destination versions must be next to each other in the history. + DestinationSchemaVersionID string + + // The configuration of the Lens module. + // + // For now, the wasm module must remain at the location specified as long as the + // migration is active. + model.Lens +} + +// LensRegistry exposes several useful thread-safe migration related functions which may +// be used to manage migrations. +type LensRegistry interface { + // SetMigration sets the migration for the given source-destination schema version IDs. Is equivilent to + // calling `Store.SetMigration(ctx, cfg)`. + // + // There may only be one migration per schema version id. If another migration was registered it will be + // overwritten by this migration. + // + // Neither of the schema version IDs specified in the configuration need to exist at the time of calling. + // This is to allow the migration of documents of schema versions unknown to the local node recieved by the + // P2P system. + // + // Migrations will only run if there is a complete path from the document schema version to the latest local + // schema version. + SetMigration(context.Context, datastore.Txn, LensConfig) error + + // ReloadLenses clears any cached migrations, loads their configurations from the database and re-initializes + // them. It is run on database start if the database already existed. + ReloadLenses(ctx context.Context, txn datastore.Txn) error + + // MigrateUp returns an enumerable that feeds the given source through the Lens migration for the given + // schema version id if one is found, if there is no matching migration the given source will be returned. + MigrateUp(enumerable.Enumerable[map[string]any], string) (enumerable.Enumerable[map[string]any], error) + + // MigrateDown returns an enumerable that feeds the given source through the Lens migration for the schema + // version that precedes the given schema version id in reverse, if one is found, if there is no matching + // migration the given source will be returned. + // + // This downgrades any documents in the source enumerable if/when enumerated. + MigrateDown(enumerable.Enumerable[map[string]any], string) (enumerable.Enumerable[map[string]any], error) + + // Config returns a slice of the configurations of the currently loaded migrations. + // + // Modifying the slice does not affect the loaded configurations. + Config() []LensConfig + + // HasMigration returns true if there is a migration registered for the given schema version id, otherwise + // will return false. + HasMigration(string) bool +} diff --git a/client/mocks/Collection.go b/client/mocks/Collection.go new file mode 100644 index 0000000000..a675cd1a17 --- /dev/null +++ b/client/mocks/Collection.go @@ -0,0 +1,1271 @@ +// Code generated by mockery v2.30.1. DO NOT EDIT. + +package mocks + +import ( + context "context" + + client "github.com/sourcenetwork/defradb/client" + + datastore "github.com/sourcenetwork/defradb/datastore" + + mock "github.com/stretchr/testify/mock" +) + +// Collection is an autogenerated mock type for the Collection type +type Collection struct { + mock.Mock +} + +type Collection_Expecter struct { + mock *mock.Mock +} + +func (_m *Collection) EXPECT() *Collection_Expecter { + return &Collection_Expecter{mock: &_m.Mock} +} + +// Create provides a mock function with given fields: _a0, _a1 +func (_m *Collection) Create(_a0 context.Context, _a1 *client.Document) error { + ret := _m.Called(_a0, _a1) + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, *client.Document) error); ok { + r0 = rf(_a0, _a1) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// Collection_Create_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Create' +type Collection_Create_Call struct { + *mock.Call +} + +// Create is a helper method to define mock.On call +// - _a0 context.Context +// - _a1 *client.Document +func (_e *Collection_Expecter) Create(_a0 interface{}, _a1 interface{}) *Collection_Create_Call { + return &Collection_Create_Call{Call: _e.mock.On("Create", _a0, _a1)} +} + +func (_c *Collection_Create_Call) Run(run func(_a0 context.Context, _a1 *client.Document)) *Collection_Create_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(*client.Document)) + }) + return _c +} + +func (_c *Collection_Create_Call) Return(_a0 error) *Collection_Create_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *Collection_Create_Call) RunAndReturn(run func(context.Context, *client.Document) error) *Collection_Create_Call { + _c.Call.Return(run) + return _c +} + +// CreateIndex provides a mock function with given fields: _a0, _a1 +func (_m *Collection) CreateIndex(_a0 context.Context, _a1 client.IndexDescription) (client.IndexDescription, error) { + ret := _m.Called(_a0, _a1) + + var r0 client.IndexDescription + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, client.IndexDescription) (client.IndexDescription, error)); ok { + return rf(_a0, _a1) + } + if rf, ok := ret.Get(0).(func(context.Context, client.IndexDescription) client.IndexDescription); ok { + r0 = rf(_a0, _a1) + } else { + r0 = ret.Get(0).(client.IndexDescription) + } + + if rf, ok := ret.Get(1).(func(context.Context, client.IndexDescription) error); ok { + r1 = rf(_a0, _a1) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// Collection_CreateIndex_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'CreateIndex' +type Collection_CreateIndex_Call struct { + *mock.Call +} + +// CreateIndex is a helper method to define mock.On call +// - _a0 context.Context +// - _a1 client.IndexDescription +func (_e *Collection_Expecter) CreateIndex(_a0 interface{}, _a1 interface{}) *Collection_CreateIndex_Call { + return &Collection_CreateIndex_Call{Call: _e.mock.On("CreateIndex", _a0, _a1)} +} + +func (_c *Collection_CreateIndex_Call) Run(run func(_a0 context.Context, _a1 client.IndexDescription)) *Collection_CreateIndex_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(client.IndexDescription)) + }) + return _c +} + +func (_c *Collection_CreateIndex_Call) Return(_a0 client.IndexDescription, _a1 error) *Collection_CreateIndex_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *Collection_CreateIndex_Call) RunAndReturn(run func(context.Context, client.IndexDescription) (client.IndexDescription, error)) *Collection_CreateIndex_Call { + _c.Call.Return(run) + return _c +} + +// CreateMany provides a mock function with given fields: _a0, _a1 +func (_m *Collection) CreateMany(_a0 context.Context, _a1 []*client.Document) error { + ret := _m.Called(_a0, _a1) + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, []*client.Document) error); ok { + r0 = rf(_a0, _a1) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// Collection_CreateMany_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'CreateMany' +type Collection_CreateMany_Call struct { + *mock.Call +} + +// CreateMany is a helper method to define mock.On call +// - _a0 context.Context +// - _a1 []*client.Document +func (_e *Collection_Expecter) CreateMany(_a0 interface{}, _a1 interface{}) *Collection_CreateMany_Call { + return &Collection_CreateMany_Call{Call: _e.mock.On("CreateMany", _a0, _a1)} +} + +func (_c *Collection_CreateMany_Call) Run(run func(_a0 context.Context, _a1 []*client.Document)) *Collection_CreateMany_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].([]*client.Document)) + }) + return _c +} + +func (_c *Collection_CreateMany_Call) Return(_a0 error) *Collection_CreateMany_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *Collection_CreateMany_Call) RunAndReturn(run func(context.Context, []*client.Document) error) *Collection_CreateMany_Call { + _c.Call.Return(run) + return _c +} + +// Delete provides a mock function with given fields: _a0, _a1 +func (_m *Collection) Delete(_a0 context.Context, _a1 client.DocKey) (bool, error) { + ret := _m.Called(_a0, _a1) + + var r0 bool + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, client.DocKey) (bool, error)); ok { + return rf(_a0, _a1) + } + if rf, ok := ret.Get(0).(func(context.Context, client.DocKey) bool); ok { + r0 = rf(_a0, _a1) + } else { + r0 = ret.Get(0).(bool) + } + + if rf, ok := ret.Get(1).(func(context.Context, client.DocKey) error); ok { + r1 = rf(_a0, _a1) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// Collection_Delete_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Delete' +type Collection_Delete_Call struct { + *mock.Call +} + +// Delete is a helper method to define mock.On call +// - _a0 context.Context +// - _a1 client.DocKey +func (_e *Collection_Expecter) Delete(_a0 interface{}, _a1 interface{}) *Collection_Delete_Call { + return &Collection_Delete_Call{Call: _e.mock.On("Delete", _a0, _a1)} +} + +func (_c *Collection_Delete_Call) Run(run func(_a0 context.Context, _a1 client.DocKey)) *Collection_Delete_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(client.DocKey)) + }) + return _c +} + +func (_c *Collection_Delete_Call) Return(_a0 bool, _a1 error) *Collection_Delete_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *Collection_Delete_Call) RunAndReturn(run func(context.Context, client.DocKey) (bool, error)) *Collection_Delete_Call { + _c.Call.Return(run) + return _c +} + +// DeleteWith provides a mock function with given fields: ctx, target +func (_m *Collection) DeleteWith(ctx context.Context, target interface{}) (*client.DeleteResult, error) { + ret := _m.Called(ctx, target) + + var r0 *client.DeleteResult + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, interface{}) (*client.DeleteResult, error)); ok { + return rf(ctx, target) + } + if rf, ok := ret.Get(0).(func(context.Context, interface{}) *client.DeleteResult); ok { + r0 = rf(ctx, target) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*client.DeleteResult) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, interface{}) error); ok { + r1 = rf(ctx, target) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// Collection_DeleteWith_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'DeleteWith' +type Collection_DeleteWith_Call struct { + *mock.Call +} + +// DeleteWith is a helper method to define mock.On call +// - ctx context.Context +// - target interface{} +func (_e *Collection_Expecter) DeleteWith(ctx interface{}, target interface{}) *Collection_DeleteWith_Call { + return &Collection_DeleteWith_Call{Call: _e.mock.On("DeleteWith", ctx, target)} +} + +func (_c *Collection_DeleteWith_Call) Run(run func(ctx context.Context, target interface{})) *Collection_DeleteWith_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(interface{})) + }) + return _c +} + +func (_c *Collection_DeleteWith_Call) Return(_a0 *client.DeleteResult, _a1 error) *Collection_DeleteWith_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *Collection_DeleteWith_Call) RunAndReturn(run func(context.Context, interface{}) (*client.DeleteResult, error)) *Collection_DeleteWith_Call { + _c.Call.Return(run) + return _c +} + +// DeleteWithFilter provides a mock function with given fields: ctx, filter +func (_m *Collection) DeleteWithFilter(ctx context.Context, filter interface{}) (*client.DeleteResult, error) { + ret := _m.Called(ctx, filter) + + var r0 *client.DeleteResult + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, interface{}) (*client.DeleteResult, error)); ok { + return rf(ctx, filter) + } + if rf, ok := ret.Get(0).(func(context.Context, interface{}) *client.DeleteResult); ok { + r0 = rf(ctx, filter) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*client.DeleteResult) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, interface{}) error); ok { + r1 = rf(ctx, filter) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// Collection_DeleteWithFilter_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'DeleteWithFilter' +type Collection_DeleteWithFilter_Call struct { + *mock.Call +} + +// DeleteWithFilter is a helper method to define mock.On call +// - ctx context.Context +// - filter interface{} +func (_e *Collection_Expecter) DeleteWithFilter(ctx interface{}, filter interface{}) *Collection_DeleteWithFilter_Call { + return &Collection_DeleteWithFilter_Call{Call: _e.mock.On("DeleteWithFilter", ctx, filter)} +} + +func (_c *Collection_DeleteWithFilter_Call) Run(run func(ctx context.Context, filter interface{})) *Collection_DeleteWithFilter_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(interface{})) + }) + return _c +} + +func (_c *Collection_DeleteWithFilter_Call) Return(_a0 *client.DeleteResult, _a1 error) *Collection_DeleteWithFilter_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *Collection_DeleteWithFilter_Call) RunAndReturn(run func(context.Context, interface{}) (*client.DeleteResult, error)) *Collection_DeleteWithFilter_Call { + _c.Call.Return(run) + return _c +} + +// DeleteWithKey provides a mock function with given fields: _a0, _a1 +func (_m *Collection) DeleteWithKey(_a0 context.Context, _a1 client.DocKey) (*client.DeleteResult, error) { + ret := _m.Called(_a0, _a1) + + var r0 *client.DeleteResult + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, client.DocKey) (*client.DeleteResult, error)); ok { + return rf(_a0, _a1) + } + if rf, ok := ret.Get(0).(func(context.Context, client.DocKey) *client.DeleteResult); ok { + r0 = rf(_a0, _a1) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*client.DeleteResult) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, client.DocKey) error); ok { + r1 = rf(_a0, _a1) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// Collection_DeleteWithKey_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'DeleteWithKey' +type Collection_DeleteWithKey_Call struct { + *mock.Call +} + +// DeleteWithKey is a helper method to define mock.On call +// - _a0 context.Context +// - _a1 client.DocKey +func (_e *Collection_Expecter) DeleteWithKey(_a0 interface{}, _a1 interface{}) *Collection_DeleteWithKey_Call { + return &Collection_DeleteWithKey_Call{Call: _e.mock.On("DeleteWithKey", _a0, _a1)} +} + +func (_c *Collection_DeleteWithKey_Call) Run(run func(_a0 context.Context, _a1 client.DocKey)) *Collection_DeleteWithKey_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(client.DocKey)) + }) + return _c +} + +func (_c *Collection_DeleteWithKey_Call) Return(_a0 *client.DeleteResult, _a1 error) *Collection_DeleteWithKey_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *Collection_DeleteWithKey_Call) RunAndReturn(run func(context.Context, client.DocKey) (*client.DeleteResult, error)) *Collection_DeleteWithKey_Call { + _c.Call.Return(run) + return _c +} + +// DeleteWithKeys provides a mock function with given fields: _a0, _a1 +func (_m *Collection) DeleteWithKeys(_a0 context.Context, _a1 []client.DocKey) (*client.DeleteResult, error) { + ret := _m.Called(_a0, _a1) + + var r0 *client.DeleteResult + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, []client.DocKey) (*client.DeleteResult, error)); ok { + return rf(_a0, _a1) + } + if rf, ok := ret.Get(0).(func(context.Context, []client.DocKey) *client.DeleteResult); ok { + r0 = rf(_a0, _a1) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*client.DeleteResult) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, []client.DocKey) error); ok { + r1 = rf(_a0, _a1) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// Collection_DeleteWithKeys_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'DeleteWithKeys' +type Collection_DeleteWithKeys_Call struct { + *mock.Call +} + +// DeleteWithKeys is a helper method to define mock.On call +// - _a0 context.Context +// - _a1 []client.DocKey +func (_e *Collection_Expecter) DeleteWithKeys(_a0 interface{}, _a1 interface{}) *Collection_DeleteWithKeys_Call { + return &Collection_DeleteWithKeys_Call{Call: _e.mock.On("DeleteWithKeys", _a0, _a1)} +} + +func (_c *Collection_DeleteWithKeys_Call) Run(run func(_a0 context.Context, _a1 []client.DocKey)) *Collection_DeleteWithKeys_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].([]client.DocKey)) + }) + return _c +} + +func (_c *Collection_DeleteWithKeys_Call) Return(_a0 *client.DeleteResult, _a1 error) *Collection_DeleteWithKeys_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *Collection_DeleteWithKeys_Call) RunAndReturn(run func(context.Context, []client.DocKey) (*client.DeleteResult, error)) *Collection_DeleteWithKeys_Call { + _c.Call.Return(run) + return _c +} + +// Description provides a mock function with given fields: +func (_m *Collection) Description() client.CollectionDescription { + ret := _m.Called() + + var r0 client.CollectionDescription + if rf, ok := ret.Get(0).(func() client.CollectionDescription); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(client.CollectionDescription) + } + + return r0 +} + +// Collection_Description_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Description' +type Collection_Description_Call struct { + *mock.Call +} + +// Description is a helper method to define mock.On call +func (_e *Collection_Expecter) Description() *Collection_Description_Call { + return &Collection_Description_Call{Call: _e.mock.On("Description")} +} + +func (_c *Collection_Description_Call) Run(run func()) *Collection_Description_Call { + _c.Call.Run(func(args mock.Arguments) { + run() + }) + return _c +} + +func (_c *Collection_Description_Call) Return(_a0 client.CollectionDescription) *Collection_Description_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *Collection_Description_Call) RunAndReturn(run func() client.CollectionDescription) *Collection_Description_Call { + _c.Call.Return(run) + return _c +} + +// DropIndex provides a mock function with given fields: ctx, indexName +func (_m *Collection) DropIndex(ctx context.Context, indexName string) error { + ret := _m.Called(ctx, indexName) + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, string) error); ok { + r0 = rf(ctx, indexName) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// Collection_DropIndex_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'DropIndex' +type Collection_DropIndex_Call struct { + *mock.Call +} + +// DropIndex is a helper method to define mock.On call +// - ctx context.Context +// - indexName string +func (_e *Collection_Expecter) DropIndex(ctx interface{}, indexName interface{}) *Collection_DropIndex_Call { + return &Collection_DropIndex_Call{Call: _e.mock.On("DropIndex", ctx, indexName)} +} + +func (_c *Collection_DropIndex_Call) Run(run func(ctx context.Context, indexName string)) *Collection_DropIndex_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(string)) + }) + return _c +} + +func (_c *Collection_DropIndex_Call) Return(_a0 error) *Collection_DropIndex_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *Collection_DropIndex_Call) RunAndReturn(run func(context.Context, string) error) *Collection_DropIndex_Call { + _c.Call.Return(run) + return _c +} + +// Exists provides a mock function with given fields: _a0, _a1 +func (_m *Collection) Exists(_a0 context.Context, _a1 client.DocKey) (bool, error) { + ret := _m.Called(_a0, _a1) + + var r0 bool + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, client.DocKey) (bool, error)); ok { + return rf(_a0, _a1) + } + if rf, ok := ret.Get(0).(func(context.Context, client.DocKey) bool); ok { + r0 = rf(_a0, _a1) + } else { + r0 = ret.Get(0).(bool) + } + + if rf, ok := ret.Get(1).(func(context.Context, client.DocKey) error); ok { + r1 = rf(_a0, _a1) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// Collection_Exists_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Exists' +type Collection_Exists_Call struct { + *mock.Call +} + +// Exists is a helper method to define mock.On call +// - _a0 context.Context +// - _a1 client.DocKey +func (_e *Collection_Expecter) Exists(_a0 interface{}, _a1 interface{}) *Collection_Exists_Call { + return &Collection_Exists_Call{Call: _e.mock.On("Exists", _a0, _a1)} +} + +func (_c *Collection_Exists_Call) Run(run func(_a0 context.Context, _a1 client.DocKey)) *Collection_Exists_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(client.DocKey)) + }) + return _c +} + +func (_c *Collection_Exists_Call) Return(_a0 bool, _a1 error) *Collection_Exists_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *Collection_Exists_Call) RunAndReturn(run func(context.Context, client.DocKey) (bool, error)) *Collection_Exists_Call { + _c.Call.Return(run) + return _c +} + +// Get provides a mock function with given fields: ctx, key, showDeleted +func (_m *Collection) Get(ctx context.Context, key client.DocKey, showDeleted bool) (*client.Document, error) { + ret := _m.Called(ctx, key, showDeleted) + + var r0 *client.Document + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, client.DocKey, bool) (*client.Document, error)); ok { + return rf(ctx, key, showDeleted) + } + if rf, ok := ret.Get(0).(func(context.Context, client.DocKey, bool) *client.Document); ok { + r0 = rf(ctx, key, showDeleted) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*client.Document) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, client.DocKey, bool) error); ok { + r1 = rf(ctx, key, showDeleted) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// Collection_Get_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Get' +type Collection_Get_Call struct { + *mock.Call +} + +// Get is a helper method to define mock.On call +// - ctx context.Context +// - key client.DocKey +// - showDeleted bool +func (_e *Collection_Expecter) Get(ctx interface{}, key interface{}, showDeleted interface{}) *Collection_Get_Call { + return &Collection_Get_Call{Call: _e.mock.On("Get", ctx, key, showDeleted)} +} + +func (_c *Collection_Get_Call) Run(run func(ctx context.Context, key client.DocKey, showDeleted bool)) *Collection_Get_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(client.DocKey), args[2].(bool)) + }) + return _c +} + +func (_c *Collection_Get_Call) Return(_a0 *client.Document, _a1 error) *Collection_Get_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *Collection_Get_Call) RunAndReturn(run func(context.Context, client.DocKey, bool) (*client.Document, error)) *Collection_Get_Call { + _c.Call.Return(run) + return _c +} + +// GetAllDocKeys provides a mock function with given fields: ctx +func (_m *Collection) GetAllDocKeys(ctx context.Context) (<-chan client.DocKeysResult, error) { + ret := _m.Called(ctx) + + var r0 <-chan client.DocKeysResult + var r1 error + if rf, ok := ret.Get(0).(func(context.Context) (<-chan client.DocKeysResult, error)); ok { + return rf(ctx) + } + if rf, ok := ret.Get(0).(func(context.Context) <-chan client.DocKeysResult); ok { + r0 = rf(ctx) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(<-chan client.DocKeysResult) + } + } + + if rf, ok := ret.Get(1).(func(context.Context) error); ok { + r1 = rf(ctx) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// Collection_GetAllDocKeys_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetAllDocKeys' +type Collection_GetAllDocKeys_Call struct { + *mock.Call +} + +// GetAllDocKeys is a helper method to define mock.On call +// - ctx context.Context +func (_e *Collection_Expecter) GetAllDocKeys(ctx interface{}) *Collection_GetAllDocKeys_Call { + return &Collection_GetAllDocKeys_Call{Call: _e.mock.On("GetAllDocKeys", ctx)} +} + +func (_c *Collection_GetAllDocKeys_Call) Run(run func(ctx context.Context)) *Collection_GetAllDocKeys_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context)) + }) + return _c +} + +func (_c *Collection_GetAllDocKeys_Call) Return(_a0 <-chan client.DocKeysResult, _a1 error) *Collection_GetAllDocKeys_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *Collection_GetAllDocKeys_Call) RunAndReturn(run func(context.Context) (<-chan client.DocKeysResult, error)) *Collection_GetAllDocKeys_Call { + _c.Call.Return(run) + return _c +} + +// GetIndexes provides a mock function with given fields: ctx +func (_m *Collection) GetIndexes(ctx context.Context) ([]client.IndexDescription, error) { + ret := _m.Called(ctx) + + var r0 []client.IndexDescription + var r1 error + if rf, ok := ret.Get(0).(func(context.Context) ([]client.IndexDescription, error)); ok { + return rf(ctx) + } + if rf, ok := ret.Get(0).(func(context.Context) []client.IndexDescription); ok { + r0 = rf(ctx) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]client.IndexDescription) + } + } + + if rf, ok := ret.Get(1).(func(context.Context) error); ok { + r1 = rf(ctx) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// Collection_GetIndexes_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetIndexes' +type Collection_GetIndexes_Call struct { + *mock.Call +} + +// GetIndexes is a helper method to define mock.On call +// - ctx context.Context +func (_e *Collection_Expecter) GetIndexes(ctx interface{}) *Collection_GetIndexes_Call { + return &Collection_GetIndexes_Call{Call: _e.mock.On("GetIndexes", ctx)} +} + +func (_c *Collection_GetIndexes_Call) Run(run func(ctx context.Context)) *Collection_GetIndexes_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context)) + }) + return _c +} + +func (_c *Collection_GetIndexes_Call) Return(_a0 []client.IndexDescription, _a1 error) *Collection_GetIndexes_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *Collection_GetIndexes_Call) RunAndReturn(run func(context.Context) ([]client.IndexDescription, error)) *Collection_GetIndexes_Call { + _c.Call.Return(run) + return _c +} + +// ID provides a mock function with given fields: +func (_m *Collection) ID() uint32 { + ret := _m.Called() + + var r0 uint32 + if rf, ok := ret.Get(0).(func() uint32); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(uint32) + } + + return r0 +} + +// Collection_ID_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'ID' +type Collection_ID_Call struct { + *mock.Call +} + +// ID is a helper method to define mock.On call +func (_e *Collection_Expecter) ID() *Collection_ID_Call { + return &Collection_ID_Call{Call: _e.mock.On("ID")} +} + +func (_c *Collection_ID_Call) Run(run func()) *Collection_ID_Call { + _c.Call.Run(func(args mock.Arguments) { + run() + }) + return _c +} + +func (_c *Collection_ID_Call) Return(_a0 uint32) *Collection_ID_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *Collection_ID_Call) RunAndReturn(run func() uint32) *Collection_ID_Call { + _c.Call.Return(run) + return _c +} + +// Name provides a mock function with given fields: +func (_m *Collection) Name() string { + ret := _m.Called() + + var r0 string + if rf, ok := ret.Get(0).(func() string); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(string) + } + + return r0 +} + +// Collection_Name_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Name' +type Collection_Name_Call struct { + *mock.Call +} + +// Name is a helper method to define mock.On call +func (_e *Collection_Expecter) Name() *Collection_Name_Call { + return &Collection_Name_Call{Call: _e.mock.On("Name")} +} + +func (_c *Collection_Name_Call) Run(run func()) *Collection_Name_Call { + _c.Call.Run(func(args mock.Arguments) { + run() + }) + return _c +} + +func (_c *Collection_Name_Call) Return(_a0 string) *Collection_Name_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *Collection_Name_Call) RunAndReturn(run func() string) *Collection_Name_Call { + _c.Call.Return(run) + return _c +} + +// Save provides a mock function with given fields: _a0, _a1 +func (_m *Collection) Save(_a0 context.Context, _a1 *client.Document) error { + ret := _m.Called(_a0, _a1) + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, *client.Document) error); ok { + r0 = rf(_a0, _a1) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// Collection_Save_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Save' +type Collection_Save_Call struct { + *mock.Call +} + +// Save is a helper method to define mock.On call +// - _a0 context.Context +// - _a1 *client.Document +func (_e *Collection_Expecter) Save(_a0 interface{}, _a1 interface{}) *Collection_Save_Call { + return &Collection_Save_Call{Call: _e.mock.On("Save", _a0, _a1)} +} + +func (_c *Collection_Save_Call) Run(run func(_a0 context.Context, _a1 *client.Document)) *Collection_Save_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(*client.Document)) + }) + return _c +} + +func (_c *Collection_Save_Call) Return(_a0 error) *Collection_Save_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *Collection_Save_Call) RunAndReturn(run func(context.Context, *client.Document) error) *Collection_Save_Call { + _c.Call.Return(run) + return _c +} + +// Schema provides a mock function with given fields: +func (_m *Collection) Schema() client.SchemaDescription { + ret := _m.Called() + + var r0 client.SchemaDescription + if rf, ok := ret.Get(0).(func() client.SchemaDescription); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(client.SchemaDescription) + } + + return r0 +} + +// Collection_Schema_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Schema' +type Collection_Schema_Call struct { + *mock.Call +} + +// Schema is a helper method to define mock.On call +func (_e *Collection_Expecter) Schema() *Collection_Schema_Call { + return &Collection_Schema_Call{Call: _e.mock.On("Schema")} +} + +func (_c *Collection_Schema_Call) Run(run func()) *Collection_Schema_Call { + _c.Call.Run(func(args mock.Arguments) { + run() + }) + return _c +} + +func (_c *Collection_Schema_Call) Return(_a0 client.SchemaDescription) *Collection_Schema_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *Collection_Schema_Call) RunAndReturn(run func() client.SchemaDescription) *Collection_Schema_Call { + _c.Call.Return(run) + return _c +} + +// SchemaID provides a mock function with given fields: +func (_m *Collection) SchemaID() string { + ret := _m.Called() + + var r0 string + if rf, ok := ret.Get(0).(func() string); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(string) + } + + return r0 +} + +// Collection_SchemaID_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'SchemaID' +type Collection_SchemaID_Call struct { + *mock.Call +} + +// SchemaID is a helper method to define mock.On call +func (_e *Collection_Expecter) SchemaID() *Collection_SchemaID_Call { + return &Collection_SchemaID_Call{Call: _e.mock.On("SchemaID")} +} + +func (_c *Collection_SchemaID_Call) Run(run func()) *Collection_SchemaID_Call { + _c.Call.Run(func(args mock.Arguments) { + run() + }) + return _c +} + +func (_c *Collection_SchemaID_Call) Return(_a0 string) *Collection_SchemaID_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *Collection_SchemaID_Call) RunAndReturn(run func() string) *Collection_SchemaID_Call { + _c.Call.Return(run) + return _c +} + +// Update provides a mock function with given fields: _a0, _a1 +func (_m *Collection) Update(_a0 context.Context, _a1 *client.Document) error { + ret := _m.Called(_a0, _a1) + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, *client.Document) error); ok { + r0 = rf(_a0, _a1) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// Collection_Update_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Update' +type Collection_Update_Call struct { + *mock.Call +} + +// Update is a helper method to define mock.On call +// - _a0 context.Context +// - _a1 *client.Document +func (_e *Collection_Expecter) Update(_a0 interface{}, _a1 interface{}) *Collection_Update_Call { + return &Collection_Update_Call{Call: _e.mock.On("Update", _a0, _a1)} +} + +func (_c *Collection_Update_Call) Run(run func(_a0 context.Context, _a1 *client.Document)) *Collection_Update_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(*client.Document)) + }) + return _c +} + +func (_c *Collection_Update_Call) Return(_a0 error) *Collection_Update_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *Collection_Update_Call) RunAndReturn(run func(context.Context, *client.Document) error) *Collection_Update_Call { + _c.Call.Return(run) + return _c +} + +// UpdateWith provides a mock function with given fields: ctx, target, updater +func (_m *Collection) UpdateWith(ctx context.Context, target interface{}, updater string) (*client.UpdateResult, error) { + ret := _m.Called(ctx, target, updater) + + var r0 *client.UpdateResult + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, interface{}, string) (*client.UpdateResult, error)); ok { + return rf(ctx, target, updater) + } + if rf, ok := ret.Get(0).(func(context.Context, interface{}, string) *client.UpdateResult); ok { + r0 = rf(ctx, target, updater) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*client.UpdateResult) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, interface{}, string) error); ok { + r1 = rf(ctx, target, updater) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// Collection_UpdateWith_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'UpdateWith' +type Collection_UpdateWith_Call struct { + *mock.Call +} + +// UpdateWith is a helper method to define mock.On call +// - ctx context.Context +// - target interface{} +// - updater string +func (_e *Collection_Expecter) UpdateWith(ctx interface{}, target interface{}, updater interface{}) *Collection_UpdateWith_Call { + return &Collection_UpdateWith_Call{Call: _e.mock.On("UpdateWith", ctx, target, updater)} +} + +func (_c *Collection_UpdateWith_Call) Run(run func(ctx context.Context, target interface{}, updater string)) *Collection_UpdateWith_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(interface{}), args[2].(string)) + }) + return _c +} + +func (_c *Collection_UpdateWith_Call) Return(_a0 *client.UpdateResult, _a1 error) *Collection_UpdateWith_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *Collection_UpdateWith_Call) RunAndReturn(run func(context.Context, interface{}, string) (*client.UpdateResult, error)) *Collection_UpdateWith_Call { + _c.Call.Return(run) + return _c +} + +// UpdateWithFilter provides a mock function with given fields: ctx, filter, updater +func (_m *Collection) UpdateWithFilter(ctx context.Context, filter interface{}, updater string) (*client.UpdateResult, error) { + ret := _m.Called(ctx, filter, updater) + + var r0 *client.UpdateResult + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, interface{}, string) (*client.UpdateResult, error)); ok { + return rf(ctx, filter, updater) + } + if rf, ok := ret.Get(0).(func(context.Context, interface{}, string) *client.UpdateResult); ok { + r0 = rf(ctx, filter, updater) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*client.UpdateResult) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, interface{}, string) error); ok { + r1 = rf(ctx, filter, updater) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// Collection_UpdateWithFilter_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'UpdateWithFilter' +type Collection_UpdateWithFilter_Call struct { + *mock.Call +} + +// UpdateWithFilter is a helper method to define mock.On call +// - ctx context.Context +// - filter interface{} +// - updater string +func (_e *Collection_Expecter) UpdateWithFilter(ctx interface{}, filter interface{}, updater interface{}) *Collection_UpdateWithFilter_Call { + return &Collection_UpdateWithFilter_Call{Call: _e.mock.On("UpdateWithFilter", ctx, filter, updater)} +} + +func (_c *Collection_UpdateWithFilter_Call) Run(run func(ctx context.Context, filter interface{}, updater string)) *Collection_UpdateWithFilter_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(interface{}), args[2].(string)) + }) + return _c +} + +func (_c *Collection_UpdateWithFilter_Call) Return(_a0 *client.UpdateResult, _a1 error) *Collection_UpdateWithFilter_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *Collection_UpdateWithFilter_Call) RunAndReturn(run func(context.Context, interface{}, string) (*client.UpdateResult, error)) *Collection_UpdateWithFilter_Call { + _c.Call.Return(run) + return _c +} + +// UpdateWithKey provides a mock function with given fields: ctx, key, updater +func (_m *Collection) UpdateWithKey(ctx context.Context, key client.DocKey, updater string) (*client.UpdateResult, error) { + ret := _m.Called(ctx, key, updater) + + var r0 *client.UpdateResult + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, client.DocKey, string) (*client.UpdateResult, error)); ok { + return rf(ctx, key, updater) + } + if rf, ok := ret.Get(0).(func(context.Context, client.DocKey, string) *client.UpdateResult); ok { + r0 = rf(ctx, key, updater) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*client.UpdateResult) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, client.DocKey, string) error); ok { + r1 = rf(ctx, key, updater) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// Collection_UpdateWithKey_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'UpdateWithKey' +type Collection_UpdateWithKey_Call struct { + *mock.Call +} + +// UpdateWithKey is a helper method to define mock.On call +// - ctx context.Context +// - key client.DocKey +// - updater string +func (_e *Collection_Expecter) UpdateWithKey(ctx interface{}, key interface{}, updater interface{}) *Collection_UpdateWithKey_Call { + return &Collection_UpdateWithKey_Call{Call: _e.mock.On("UpdateWithKey", ctx, key, updater)} +} + +func (_c *Collection_UpdateWithKey_Call) Run(run func(ctx context.Context, key client.DocKey, updater string)) *Collection_UpdateWithKey_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(client.DocKey), args[2].(string)) + }) + return _c +} + +func (_c *Collection_UpdateWithKey_Call) Return(_a0 *client.UpdateResult, _a1 error) *Collection_UpdateWithKey_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *Collection_UpdateWithKey_Call) RunAndReturn(run func(context.Context, client.DocKey, string) (*client.UpdateResult, error)) *Collection_UpdateWithKey_Call { + _c.Call.Return(run) + return _c +} + +// UpdateWithKeys provides a mock function with given fields: _a0, _a1, _a2 +func (_m *Collection) UpdateWithKeys(_a0 context.Context, _a1 []client.DocKey, _a2 string) (*client.UpdateResult, error) { + ret := _m.Called(_a0, _a1, _a2) + + var r0 *client.UpdateResult + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, []client.DocKey, string) (*client.UpdateResult, error)); ok { + return rf(_a0, _a1, _a2) + } + if rf, ok := ret.Get(0).(func(context.Context, []client.DocKey, string) *client.UpdateResult); ok { + r0 = rf(_a0, _a1, _a2) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*client.UpdateResult) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, []client.DocKey, string) error); ok { + r1 = rf(_a0, _a1, _a2) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// Collection_UpdateWithKeys_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'UpdateWithKeys' +type Collection_UpdateWithKeys_Call struct { + *mock.Call +} + +// UpdateWithKeys is a helper method to define mock.On call +// - _a0 context.Context +// - _a1 []client.DocKey +// - _a2 string +func (_e *Collection_Expecter) UpdateWithKeys(_a0 interface{}, _a1 interface{}, _a2 interface{}) *Collection_UpdateWithKeys_Call { + return &Collection_UpdateWithKeys_Call{Call: _e.mock.On("UpdateWithKeys", _a0, _a1, _a2)} +} + +func (_c *Collection_UpdateWithKeys_Call) Run(run func(_a0 context.Context, _a1 []client.DocKey, _a2 string)) *Collection_UpdateWithKeys_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].([]client.DocKey), args[2].(string)) + }) + return _c +} + +func (_c *Collection_UpdateWithKeys_Call) Return(_a0 *client.UpdateResult, _a1 error) *Collection_UpdateWithKeys_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *Collection_UpdateWithKeys_Call) RunAndReturn(run func(context.Context, []client.DocKey, string) (*client.UpdateResult, error)) *Collection_UpdateWithKeys_Call { + _c.Call.Return(run) + return _c +} + +// WithTxn provides a mock function with given fields: _a0 +func (_m *Collection) WithTxn(_a0 datastore.Txn) client.Collection { + ret := _m.Called(_a0) + + var r0 client.Collection + if rf, ok := ret.Get(0).(func(datastore.Txn) client.Collection); ok { + r0 = rf(_a0) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(client.Collection) + } + } + + return r0 +} + +// Collection_WithTxn_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'WithTxn' +type Collection_WithTxn_Call struct { + *mock.Call +} + +// WithTxn is a helper method to define mock.On call +// - _a0 datastore.Txn +func (_e *Collection_Expecter) WithTxn(_a0 interface{}) *Collection_WithTxn_Call { + return &Collection_WithTxn_Call{Call: _e.mock.On("WithTxn", _a0)} +} + +func (_c *Collection_WithTxn_Call) Run(run func(_a0 datastore.Txn)) *Collection_WithTxn_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(datastore.Txn)) + }) + return _c +} + +func (_c *Collection_WithTxn_Call) Return(_a0 client.Collection) *Collection_WithTxn_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *Collection_WithTxn_Call) RunAndReturn(run func(datastore.Txn) client.Collection) *Collection_WithTxn_Call { + _c.Call.Return(run) + return _c +} + +// NewCollection creates a new instance of Collection. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewCollection(t interface { + mock.TestingT + Cleanup(func()) +}) *Collection { + mock := &Collection{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/client/mocks/DB.go b/client/mocks/DB.go new file mode 100644 index 0000000000..82d53291da --- /dev/null +++ b/client/mocks/DB.go @@ -0,0 +1,1308 @@ +// Code generated by mockery v2.30.1. DO NOT EDIT. + +package mocks + +import ( + blockstore "github.com/ipfs/boxo/blockstore" + client "github.com/sourcenetwork/defradb/client" + + context "context" + + datastore "github.com/sourcenetwork/defradb/datastore" + + events "github.com/sourcenetwork/defradb/events" + + mock "github.com/stretchr/testify/mock" +) + +// DB is an autogenerated mock type for the DB type +type DB struct { + mock.Mock +} + +type DB_Expecter struct { + mock *mock.Mock +} + +func (_m *DB) EXPECT() *DB_Expecter { + return &DB_Expecter{mock: &_m.Mock} +} + +// AddP2PCollection provides a mock function with given fields: ctx, collectionID +func (_m *DB) AddP2PCollection(ctx context.Context, collectionID string) error { + ret := _m.Called(ctx, collectionID) + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, string) error); ok { + r0 = rf(ctx, collectionID) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// DB_AddP2PCollection_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'AddP2PCollection' +type DB_AddP2PCollection_Call struct { + *mock.Call +} + +// AddP2PCollection is a helper method to define mock.On call +// - ctx context.Context +// - collectionID string +func (_e *DB_Expecter) AddP2PCollection(ctx interface{}, collectionID interface{}) *DB_AddP2PCollection_Call { + return &DB_AddP2PCollection_Call{Call: _e.mock.On("AddP2PCollection", ctx, collectionID)} +} + +func (_c *DB_AddP2PCollection_Call) Run(run func(ctx context.Context, collectionID string)) *DB_AddP2PCollection_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(string)) + }) + return _c +} + +func (_c *DB_AddP2PCollection_Call) Return(_a0 error) *DB_AddP2PCollection_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *DB_AddP2PCollection_Call) RunAndReturn(run func(context.Context, string) error) *DB_AddP2PCollection_Call { + _c.Call.Return(run) + return _c +} + +// AddSchema provides a mock function with given fields: _a0, _a1 +func (_m *DB) AddSchema(_a0 context.Context, _a1 string) ([]client.CollectionDescription, error) { + ret := _m.Called(_a0, _a1) + + var r0 []client.CollectionDescription + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, string) ([]client.CollectionDescription, error)); ok { + return rf(_a0, _a1) + } + if rf, ok := ret.Get(0).(func(context.Context, string) []client.CollectionDescription); ok { + r0 = rf(_a0, _a1) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]client.CollectionDescription) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, string) error); ok { + r1 = rf(_a0, _a1) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// DB_AddSchema_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'AddSchema' +type DB_AddSchema_Call struct { + *mock.Call +} + +// AddSchema is a helper method to define mock.On call +// - _a0 context.Context +// - _a1 string +func (_e *DB_Expecter) AddSchema(_a0 interface{}, _a1 interface{}) *DB_AddSchema_Call { + return &DB_AddSchema_Call{Call: _e.mock.On("AddSchema", _a0, _a1)} +} + +func (_c *DB_AddSchema_Call) Run(run func(_a0 context.Context, _a1 string)) *DB_AddSchema_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(string)) + }) + return _c +} + +func (_c *DB_AddSchema_Call) Return(_a0 []client.CollectionDescription, _a1 error) *DB_AddSchema_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *DB_AddSchema_Call) RunAndReturn(run func(context.Context, string) ([]client.CollectionDescription, error)) *DB_AddSchema_Call { + _c.Call.Return(run) + return _c +} + +// BasicExport provides a mock function with given fields: ctx, config +func (_m *DB) BasicExport(ctx context.Context, config *client.BackupConfig) error { + ret := _m.Called(ctx, config) + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, *client.BackupConfig) error); ok { + r0 = rf(ctx, config) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// DB_BasicExport_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'BasicExport' +type DB_BasicExport_Call struct { + *mock.Call +} + +// BasicExport is a helper method to define mock.On call +// - ctx context.Context +// - config *client.BackupConfig +func (_e *DB_Expecter) BasicExport(ctx interface{}, config interface{}) *DB_BasicExport_Call { + return &DB_BasicExport_Call{Call: _e.mock.On("BasicExport", ctx, config)} +} + +func (_c *DB_BasicExport_Call) Run(run func(ctx context.Context, config *client.BackupConfig)) *DB_BasicExport_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(*client.BackupConfig)) + }) + return _c +} + +func (_c *DB_BasicExport_Call) Return(_a0 error) *DB_BasicExport_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *DB_BasicExport_Call) RunAndReturn(run func(context.Context, *client.BackupConfig) error) *DB_BasicExport_Call { + _c.Call.Return(run) + return _c +} + +// BasicImport provides a mock function with given fields: ctx, filepath +func (_m *DB) BasicImport(ctx context.Context, filepath string) error { + ret := _m.Called(ctx, filepath) + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, string) error); ok { + r0 = rf(ctx, filepath) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// DB_BasicImport_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'BasicImport' +type DB_BasicImport_Call struct { + *mock.Call +} + +// BasicImport is a helper method to define mock.On call +// - ctx context.Context +// - filepath string +func (_e *DB_Expecter) BasicImport(ctx interface{}, filepath interface{}) *DB_BasicImport_Call { + return &DB_BasicImport_Call{Call: _e.mock.On("BasicImport", ctx, filepath)} +} + +func (_c *DB_BasicImport_Call) Run(run func(ctx context.Context, filepath string)) *DB_BasicImport_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(string)) + }) + return _c +} + +func (_c *DB_BasicImport_Call) Return(_a0 error) *DB_BasicImport_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *DB_BasicImport_Call) RunAndReturn(run func(context.Context, string) error) *DB_BasicImport_Call { + _c.Call.Return(run) + return _c +} + +// Blockstore provides a mock function with given fields: +func (_m *DB) Blockstore() blockstore.Blockstore { + ret := _m.Called() + + var r0 blockstore.Blockstore + if rf, ok := ret.Get(0).(func() blockstore.Blockstore); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(blockstore.Blockstore) + } + } + + return r0 +} + +// DB_Blockstore_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Blockstore' +type DB_Blockstore_Call struct { + *mock.Call +} + +// Blockstore is a helper method to define mock.On call +func (_e *DB_Expecter) Blockstore() *DB_Blockstore_Call { + return &DB_Blockstore_Call{Call: _e.mock.On("Blockstore")} +} + +func (_c *DB_Blockstore_Call) Run(run func()) *DB_Blockstore_Call { + _c.Call.Run(func(args mock.Arguments) { + run() + }) + return _c +} + +func (_c *DB_Blockstore_Call) Return(_a0 blockstore.Blockstore) *DB_Blockstore_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *DB_Blockstore_Call) RunAndReturn(run func() blockstore.Blockstore) *DB_Blockstore_Call { + _c.Call.Return(run) + return _c +} + +// Close provides a mock function with given fields: _a0 +func (_m *DB) Close(_a0 context.Context) { + _m.Called(_a0) +} + +// DB_Close_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Close' +type DB_Close_Call struct { + *mock.Call +} + +// Close is a helper method to define mock.On call +// - _a0 context.Context +func (_e *DB_Expecter) Close(_a0 interface{}) *DB_Close_Call { + return &DB_Close_Call{Call: _e.mock.On("Close", _a0)} +} + +func (_c *DB_Close_Call) Run(run func(_a0 context.Context)) *DB_Close_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context)) + }) + return _c +} + +func (_c *DB_Close_Call) Return() *DB_Close_Call { + _c.Call.Return() + return _c +} + +func (_c *DB_Close_Call) RunAndReturn(run func(context.Context)) *DB_Close_Call { + _c.Call.Return(run) + return _c +} + +// DeleteReplicator provides a mock function with given fields: ctx, rep +func (_m *DB) DeleteReplicator(ctx context.Context, rep client.Replicator) error { + ret := _m.Called(ctx, rep) + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, client.Replicator) error); ok { + r0 = rf(ctx, rep) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// DB_DeleteReplicator_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'DeleteReplicator' +type DB_DeleteReplicator_Call struct { + *mock.Call +} + +// DeleteReplicator is a helper method to define mock.On call +// - ctx context.Context +// - rep client.Replicator +func (_e *DB_Expecter) DeleteReplicator(ctx interface{}, rep interface{}) *DB_DeleteReplicator_Call { + return &DB_DeleteReplicator_Call{Call: _e.mock.On("DeleteReplicator", ctx, rep)} +} + +func (_c *DB_DeleteReplicator_Call) Run(run func(ctx context.Context, rep client.Replicator)) *DB_DeleteReplicator_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(client.Replicator)) + }) + return _c +} + +func (_c *DB_DeleteReplicator_Call) Return(_a0 error) *DB_DeleteReplicator_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *DB_DeleteReplicator_Call) RunAndReturn(run func(context.Context, client.Replicator) error) *DB_DeleteReplicator_Call { + _c.Call.Return(run) + return _c +} + +// Events provides a mock function with given fields: +func (_m *DB) Events() events.Events { + ret := _m.Called() + + var r0 events.Events + if rf, ok := ret.Get(0).(func() events.Events); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(events.Events) + } + + return r0 +} + +// DB_Events_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Events' +type DB_Events_Call struct { + *mock.Call +} + +// Events is a helper method to define mock.On call +func (_e *DB_Expecter) Events() *DB_Events_Call { + return &DB_Events_Call{Call: _e.mock.On("Events")} +} + +func (_c *DB_Events_Call) Run(run func()) *DB_Events_Call { + _c.Call.Run(func(args mock.Arguments) { + run() + }) + return _c +} + +func (_c *DB_Events_Call) Return(_a0 events.Events) *DB_Events_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *DB_Events_Call) RunAndReturn(run func() events.Events) *DB_Events_Call { + _c.Call.Return(run) + return _c +} + +// ExecRequest provides a mock function with given fields: _a0, _a1 +func (_m *DB) ExecRequest(_a0 context.Context, _a1 string) *client.RequestResult { + ret := _m.Called(_a0, _a1) + + var r0 *client.RequestResult + if rf, ok := ret.Get(0).(func(context.Context, string) *client.RequestResult); ok { + r0 = rf(_a0, _a1) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*client.RequestResult) + } + } + + return r0 +} + +// DB_ExecRequest_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'ExecRequest' +type DB_ExecRequest_Call struct { + *mock.Call +} + +// ExecRequest is a helper method to define mock.On call +// - _a0 context.Context +// - _a1 string +func (_e *DB_Expecter) ExecRequest(_a0 interface{}, _a1 interface{}) *DB_ExecRequest_Call { + return &DB_ExecRequest_Call{Call: _e.mock.On("ExecRequest", _a0, _a1)} +} + +func (_c *DB_ExecRequest_Call) Run(run func(_a0 context.Context, _a1 string)) *DB_ExecRequest_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(string)) + }) + return _c +} + +func (_c *DB_ExecRequest_Call) Return(_a0 *client.RequestResult) *DB_ExecRequest_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *DB_ExecRequest_Call) RunAndReturn(run func(context.Context, string) *client.RequestResult) *DB_ExecRequest_Call { + _c.Call.Return(run) + return _c +} + +// GetAllCollections provides a mock function with given fields: _a0 +func (_m *DB) GetAllCollections(_a0 context.Context) ([]client.Collection, error) { + ret := _m.Called(_a0) + + var r0 []client.Collection + var r1 error + if rf, ok := ret.Get(0).(func(context.Context) ([]client.Collection, error)); ok { + return rf(_a0) + } + if rf, ok := ret.Get(0).(func(context.Context) []client.Collection); ok { + r0 = rf(_a0) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]client.Collection) + } + } + + if rf, ok := ret.Get(1).(func(context.Context) error); ok { + r1 = rf(_a0) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// DB_GetAllCollections_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetAllCollections' +type DB_GetAllCollections_Call struct { + *mock.Call +} + +// GetAllCollections is a helper method to define mock.On call +// - _a0 context.Context +func (_e *DB_Expecter) GetAllCollections(_a0 interface{}) *DB_GetAllCollections_Call { + return &DB_GetAllCollections_Call{Call: _e.mock.On("GetAllCollections", _a0)} +} + +func (_c *DB_GetAllCollections_Call) Run(run func(_a0 context.Context)) *DB_GetAllCollections_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context)) + }) + return _c +} + +func (_c *DB_GetAllCollections_Call) Return(_a0 []client.Collection, _a1 error) *DB_GetAllCollections_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *DB_GetAllCollections_Call) RunAndReturn(run func(context.Context) ([]client.Collection, error)) *DB_GetAllCollections_Call { + _c.Call.Return(run) + return _c +} + +// GetAllIndexes provides a mock function with given fields: _a0 +func (_m *DB) GetAllIndexes(_a0 context.Context) (map[string][]client.IndexDescription, error) { + ret := _m.Called(_a0) + + var r0 map[string][]client.IndexDescription + var r1 error + if rf, ok := ret.Get(0).(func(context.Context) (map[string][]client.IndexDescription, error)); ok { + return rf(_a0) + } + if rf, ok := ret.Get(0).(func(context.Context) map[string][]client.IndexDescription); ok { + r0 = rf(_a0) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(map[string][]client.IndexDescription) + } + } + + if rf, ok := ret.Get(1).(func(context.Context) error); ok { + r1 = rf(_a0) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// DB_GetAllIndexes_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetAllIndexes' +type DB_GetAllIndexes_Call struct { + *mock.Call +} + +// GetAllIndexes is a helper method to define mock.On call +// - _a0 context.Context +func (_e *DB_Expecter) GetAllIndexes(_a0 interface{}) *DB_GetAllIndexes_Call { + return &DB_GetAllIndexes_Call{Call: _e.mock.On("GetAllIndexes", _a0)} +} + +func (_c *DB_GetAllIndexes_Call) Run(run func(_a0 context.Context)) *DB_GetAllIndexes_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context)) + }) + return _c +} + +func (_c *DB_GetAllIndexes_Call) Return(_a0 map[string][]client.IndexDescription, _a1 error) *DB_GetAllIndexes_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *DB_GetAllIndexes_Call) RunAndReturn(run func(context.Context) (map[string][]client.IndexDescription, error)) *DB_GetAllIndexes_Call { + _c.Call.Return(run) + return _c +} + +// GetAllP2PCollections provides a mock function with given fields: ctx +func (_m *DB) GetAllP2PCollections(ctx context.Context) ([]string, error) { + ret := _m.Called(ctx) + + var r0 []string + var r1 error + if rf, ok := ret.Get(0).(func(context.Context) ([]string, error)); ok { + return rf(ctx) + } + if rf, ok := ret.Get(0).(func(context.Context) []string); ok { + r0 = rf(ctx) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]string) + } + } + + if rf, ok := ret.Get(1).(func(context.Context) error); ok { + r1 = rf(ctx) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// DB_GetAllP2PCollections_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetAllP2PCollections' +type DB_GetAllP2PCollections_Call struct { + *mock.Call +} + +// GetAllP2PCollections is a helper method to define mock.On call +// - ctx context.Context +func (_e *DB_Expecter) GetAllP2PCollections(ctx interface{}) *DB_GetAllP2PCollections_Call { + return &DB_GetAllP2PCollections_Call{Call: _e.mock.On("GetAllP2PCollections", ctx)} +} + +func (_c *DB_GetAllP2PCollections_Call) Run(run func(ctx context.Context)) *DB_GetAllP2PCollections_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context)) + }) + return _c +} + +func (_c *DB_GetAllP2PCollections_Call) Return(_a0 []string, _a1 error) *DB_GetAllP2PCollections_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *DB_GetAllP2PCollections_Call) RunAndReturn(run func(context.Context) ([]string, error)) *DB_GetAllP2PCollections_Call { + _c.Call.Return(run) + return _c +} + +// GetAllReplicators provides a mock function with given fields: ctx +func (_m *DB) GetAllReplicators(ctx context.Context) ([]client.Replicator, error) { + ret := _m.Called(ctx) + + var r0 []client.Replicator + var r1 error + if rf, ok := ret.Get(0).(func(context.Context) ([]client.Replicator, error)); ok { + return rf(ctx) + } + if rf, ok := ret.Get(0).(func(context.Context) []client.Replicator); ok { + r0 = rf(ctx) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]client.Replicator) + } + } + + if rf, ok := ret.Get(1).(func(context.Context) error); ok { + r1 = rf(ctx) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// DB_GetAllReplicators_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetAllReplicators' +type DB_GetAllReplicators_Call struct { + *mock.Call +} + +// GetAllReplicators is a helper method to define mock.On call +// - ctx context.Context +func (_e *DB_Expecter) GetAllReplicators(ctx interface{}) *DB_GetAllReplicators_Call { + return &DB_GetAllReplicators_Call{Call: _e.mock.On("GetAllReplicators", ctx)} +} + +func (_c *DB_GetAllReplicators_Call) Run(run func(ctx context.Context)) *DB_GetAllReplicators_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context)) + }) + return _c +} + +func (_c *DB_GetAllReplicators_Call) Return(_a0 []client.Replicator, _a1 error) *DB_GetAllReplicators_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *DB_GetAllReplicators_Call) RunAndReturn(run func(context.Context) ([]client.Replicator, error)) *DB_GetAllReplicators_Call { + _c.Call.Return(run) + return _c +} + +// GetCollectionByName provides a mock function with given fields: _a0, _a1 +func (_m *DB) GetCollectionByName(_a0 context.Context, _a1 string) (client.Collection, error) { + ret := _m.Called(_a0, _a1) + + var r0 client.Collection + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, string) (client.Collection, error)); ok { + return rf(_a0, _a1) + } + if rf, ok := ret.Get(0).(func(context.Context, string) client.Collection); ok { + r0 = rf(_a0, _a1) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(client.Collection) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, string) error); ok { + r1 = rf(_a0, _a1) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// DB_GetCollectionByName_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetCollectionByName' +type DB_GetCollectionByName_Call struct { + *mock.Call +} + +// GetCollectionByName is a helper method to define mock.On call +// - _a0 context.Context +// - _a1 string +func (_e *DB_Expecter) GetCollectionByName(_a0 interface{}, _a1 interface{}) *DB_GetCollectionByName_Call { + return &DB_GetCollectionByName_Call{Call: _e.mock.On("GetCollectionByName", _a0, _a1)} +} + +func (_c *DB_GetCollectionByName_Call) Run(run func(_a0 context.Context, _a1 string)) *DB_GetCollectionByName_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(string)) + }) + return _c +} + +func (_c *DB_GetCollectionByName_Call) Return(_a0 client.Collection, _a1 error) *DB_GetCollectionByName_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *DB_GetCollectionByName_Call) RunAndReturn(run func(context.Context, string) (client.Collection, error)) *DB_GetCollectionByName_Call { + _c.Call.Return(run) + return _c +} + +// GetCollectionBySchemaID provides a mock function with given fields: _a0, _a1 +func (_m *DB) GetCollectionBySchemaID(_a0 context.Context, _a1 string) (client.Collection, error) { + ret := _m.Called(_a0, _a1) + + var r0 client.Collection + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, string) (client.Collection, error)); ok { + return rf(_a0, _a1) + } + if rf, ok := ret.Get(0).(func(context.Context, string) client.Collection); ok { + r0 = rf(_a0, _a1) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(client.Collection) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, string) error); ok { + r1 = rf(_a0, _a1) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// DB_GetCollectionBySchemaID_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetCollectionBySchemaID' +type DB_GetCollectionBySchemaID_Call struct { + *mock.Call +} + +// GetCollectionBySchemaID is a helper method to define mock.On call +// - _a0 context.Context +// - _a1 string +func (_e *DB_Expecter) GetCollectionBySchemaID(_a0 interface{}, _a1 interface{}) *DB_GetCollectionBySchemaID_Call { + return &DB_GetCollectionBySchemaID_Call{Call: _e.mock.On("GetCollectionBySchemaID", _a0, _a1)} +} + +func (_c *DB_GetCollectionBySchemaID_Call) Run(run func(_a0 context.Context, _a1 string)) *DB_GetCollectionBySchemaID_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(string)) + }) + return _c +} + +func (_c *DB_GetCollectionBySchemaID_Call) Return(_a0 client.Collection, _a1 error) *DB_GetCollectionBySchemaID_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *DB_GetCollectionBySchemaID_Call) RunAndReturn(run func(context.Context, string) (client.Collection, error)) *DB_GetCollectionBySchemaID_Call { + _c.Call.Return(run) + return _c +} + +// GetCollectionByVersionID provides a mock function with given fields: _a0, _a1 +func (_m *DB) GetCollectionByVersionID(_a0 context.Context, _a1 string) (client.Collection, error) { + ret := _m.Called(_a0, _a1) + + var r0 client.Collection + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, string) (client.Collection, error)); ok { + return rf(_a0, _a1) + } + if rf, ok := ret.Get(0).(func(context.Context, string) client.Collection); ok { + r0 = rf(_a0, _a1) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(client.Collection) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, string) error); ok { + r1 = rf(_a0, _a1) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// DB_GetCollectionByVersionID_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetCollectionByVersionID' +type DB_GetCollectionByVersionID_Call struct { + *mock.Call +} + +// GetCollectionByVersionID is a helper method to define mock.On call +// - _a0 context.Context +// - _a1 string +func (_e *DB_Expecter) GetCollectionByVersionID(_a0 interface{}, _a1 interface{}) *DB_GetCollectionByVersionID_Call { + return &DB_GetCollectionByVersionID_Call{Call: _e.mock.On("GetCollectionByVersionID", _a0, _a1)} +} + +func (_c *DB_GetCollectionByVersionID_Call) Run(run func(_a0 context.Context, _a1 string)) *DB_GetCollectionByVersionID_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(string)) + }) + return _c +} + +func (_c *DB_GetCollectionByVersionID_Call) Return(_a0 client.Collection, _a1 error) *DB_GetCollectionByVersionID_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *DB_GetCollectionByVersionID_Call) RunAndReturn(run func(context.Context, string) (client.Collection, error)) *DB_GetCollectionByVersionID_Call { + _c.Call.Return(run) + return _c +} + +// LensRegistry provides a mock function with given fields: +func (_m *DB) LensRegistry() client.LensRegistry { + ret := _m.Called() + + var r0 client.LensRegistry + if rf, ok := ret.Get(0).(func() client.LensRegistry); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(client.LensRegistry) + } + } + + return r0 +} + +// DB_LensRegistry_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'LensRegistry' +type DB_LensRegistry_Call struct { + *mock.Call +} + +// LensRegistry is a helper method to define mock.On call +func (_e *DB_Expecter) LensRegistry() *DB_LensRegistry_Call { + return &DB_LensRegistry_Call{Call: _e.mock.On("LensRegistry")} +} + +func (_c *DB_LensRegistry_Call) Run(run func()) *DB_LensRegistry_Call { + _c.Call.Run(func(args mock.Arguments) { + run() + }) + return _c +} + +func (_c *DB_LensRegistry_Call) Return(_a0 client.LensRegistry) *DB_LensRegistry_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *DB_LensRegistry_Call) RunAndReturn(run func() client.LensRegistry) *DB_LensRegistry_Call { + _c.Call.Return(run) + return _c +} + +// MaxTxnRetries provides a mock function with given fields: +func (_m *DB) MaxTxnRetries() int { + ret := _m.Called() + + var r0 int + if rf, ok := ret.Get(0).(func() int); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(int) + } + + return r0 +} + +// DB_MaxTxnRetries_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'MaxTxnRetries' +type DB_MaxTxnRetries_Call struct { + *mock.Call +} + +// MaxTxnRetries is a helper method to define mock.On call +func (_e *DB_Expecter) MaxTxnRetries() *DB_MaxTxnRetries_Call { + return &DB_MaxTxnRetries_Call{Call: _e.mock.On("MaxTxnRetries")} +} + +func (_c *DB_MaxTxnRetries_Call) Run(run func()) *DB_MaxTxnRetries_Call { + _c.Call.Run(func(args mock.Arguments) { + run() + }) + return _c +} + +func (_c *DB_MaxTxnRetries_Call) Return(_a0 int) *DB_MaxTxnRetries_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *DB_MaxTxnRetries_Call) RunAndReturn(run func() int) *DB_MaxTxnRetries_Call { + _c.Call.Return(run) + return _c +} + +// NewConcurrentTxn provides a mock function with given fields: _a0, _a1 +func (_m *DB) NewConcurrentTxn(_a0 context.Context, _a1 bool) (datastore.Txn, error) { + ret := _m.Called(_a0, _a1) + + var r0 datastore.Txn + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, bool) (datastore.Txn, error)); ok { + return rf(_a0, _a1) + } + if rf, ok := ret.Get(0).(func(context.Context, bool) datastore.Txn); ok { + r0 = rf(_a0, _a1) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(datastore.Txn) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, bool) error); ok { + r1 = rf(_a0, _a1) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// DB_NewConcurrentTxn_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'NewConcurrentTxn' +type DB_NewConcurrentTxn_Call struct { + *mock.Call +} + +// NewConcurrentTxn is a helper method to define mock.On call +// - _a0 context.Context +// - _a1 bool +func (_e *DB_Expecter) NewConcurrentTxn(_a0 interface{}, _a1 interface{}) *DB_NewConcurrentTxn_Call { + return &DB_NewConcurrentTxn_Call{Call: _e.mock.On("NewConcurrentTxn", _a0, _a1)} +} + +func (_c *DB_NewConcurrentTxn_Call) Run(run func(_a0 context.Context, _a1 bool)) *DB_NewConcurrentTxn_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(bool)) + }) + return _c +} + +func (_c *DB_NewConcurrentTxn_Call) Return(_a0 datastore.Txn, _a1 error) *DB_NewConcurrentTxn_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *DB_NewConcurrentTxn_Call) RunAndReturn(run func(context.Context, bool) (datastore.Txn, error)) *DB_NewConcurrentTxn_Call { + _c.Call.Return(run) + return _c +} + +// NewTxn provides a mock function with given fields: _a0, _a1 +func (_m *DB) NewTxn(_a0 context.Context, _a1 bool) (datastore.Txn, error) { + ret := _m.Called(_a0, _a1) + + var r0 datastore.Txn + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, bool) (datastore.Txn, error)); ok { + return rf(_a0, _a1) + } + if rf, ok := ret.Get(0).(func(context.Context, bool) datastore.Txn); ok { + r0 = rf(_a0, _a1) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(datastore.Txn) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, bool) error); ok { + r1 = rf(_a0, _a1) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// DB_NewTxn_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'NewTxn' +type DB_NewTxn_Call struct { + *mock.Call +} + +// NewTxn is a helper method to define mock.On call +// - _a0 context.Context +// - _a1 bool +func (_e *DB_Expecter) NewTxn(_a0 interface{}, _a1 interface{}) *DB_NewTxn_Call { + return &DB_NewTxn_Call{Call: _e.mock.On("NewTxn", _a0, _a1)} +} + +func (_c *DB_NewTxn_Call) Run(run func(_a0 context.Context, _a1 bool)) *DB_NewTxn_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(bool)) + }) + return _c +} + +func (_c *DB_NewTxn_Call) Return(_a0 datastore.Txn, _a1 error) *DB_NewTxn_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *DB_NewTxn_Call) RunAndReturn(run func(context.Context, bool) (datastore.Txn, error)) *DB_NewTxn_Call { + _c.Call.Return(run) + return _c +} + +// PatchSchema provides a mock function with given fields: _a0, _a1 +func (_m *DB) PatchSchema(_a0 context.Context, _a1 string) error { + ret := _m.Called(_a0, _a1) + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, string) error); ok { + r0 = rf(_a0, _a1) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// DB_PatchSchema_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'PatchSchema' +type DB_PatchSchema_Call struct { + *mock.Call +} + +// PatchSchema is a helper method to define mock.On call +// - _a0 context.Context +// - _a1 string +func (_e *DB_Expecter) PatchSchema(_a0 interface{}, _a1 interface{}) *DB_PatchSchema_Call { + return &DB_PatchSchema_Call{Call: _e.mock.On("PatchSchema", _a0, _a1)} +} + +func (_c *DB_PatchSchema_Call) Run(run func(_a0 context.Context, _a1 string)) *DB_PatchSchema_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(string)) + }) + return _c +} + +func (_c *DB_PatchSchema_Call) Return(_a0 error) *DB_PatchSchema_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *DB_PatchSchema_Call) RunAndReturn(run func(context.Context, string) error) *DB_PatchSchema_Call { + _c.Call.Return(run) + return _c +} + +// PrintDump provides a mock function with given fields: ctx +func (_m *DB) PrintDump(ctx context.Context) error { + ret := _m.Called(ctx) + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context) error); ok { + r0 = rf(ctx) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// DB_PrintDump_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'PrintDump' +type DB_PrintDump_Call struct { + *mock.Call +} + +// PrintDump is a helper method to define mock.On call +// - ctx context.Context +func (_e *DB_Expecter) PrintDump(ctx interface{}) *DB_PrintDump_Call { + return &DB_PrintDump_Call{Call: _e.mock.On("PrintDump", ctx)} +} + +func (_c *DB_PrintDump_Call) Run(run func(ctx context.Context)) *DB_PrintDump_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context)) + }) + return _c +} + +func (_c *DB_PrintDump_Call) Return(_a0 error) *DB_PrintDump_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *DB_PrintDump_Call) RunAndReturn(run func(context.Context) error) *DB_PrintDump_Call { + _c.Call.Return(run) + return _c +} + +// RemoveP2PCollection provides a mock function with given fields: ctx, collectionID +func (_m *DB) RemoveP2PCollection(ctx context.Context, collectionID string) error { + ret := _m.Called(ctx, collectionID) + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, string) error); ok { + r0 = rf(ctx, collectionID) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// DB_RemoveP2PCollection_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'RemoveP2PCollection' +type DB_RemoveP2PCollection_Call struct { + *mock.Call +} + +// RemoveP2PCollection is a helper method to define mock.On call +// - ctx context.Context +// - collectionID string +func (_e *DB_Expecter) RemoveP2PCollection(ctx interface{}, collectionID interface{}) *DB_RemoveP2PCollection_Call { + return &DB_RemoveP2PCollection_Call{Call: _e.mock.On("RemoveP2PCollection", ctx, collectionID)} +} + +func (_c *DB_RemoveP2PCollection_Call) Run(run func(ctx context.Context, collectionID string)) *DB_RemoveP2PCollection_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(string)) + }) + return _c +} + +func (_c *DB_RemoveP2PCollection_Call) Return(_a0 error) *DB_RemoveP2PCollection_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *DB_RemoveP2PCollection_Call) RunAndReturn(run func(context.Context, string) error) *DB_RemoveP2PCollection_Call { + _c.Call.Return(run) + return _c +} + +// Root provides a mock function with given fields: +func (_m *DB) Root() datastore.RootStore { + ret := _m.Called() + + var r0 datastore.RootStore + if rf, ok := ret.Get(0).(func() datastore.RootStore); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(datastore.RootStore) + } + } + + return r0 +} + +// DB_Root_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Root' +type DB_Root_Call struct { + *mock.Call +} + +// Root is a helper method to define mock.On call +func (_e *DB_Expecter) Root() *DB_Root_Call { + return &DB_Root_Call{Call: _e.mock.On("Root")} +} + +func (_c *DB_Root_Call) Run(run func()) *DB_Root_Call { + _c.Call.Run(func(args mock.Arguments) { + run() + }) + return _c +} + +func (_c *DB_Root_Call) Return(_a0 datastore.RootStore) *DB_Root_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *DB_Root_Call) RunAndReturn(run func() datastore.RootStore) *DB_Root_Call { + _c.Call.Return(run) + return _c +} + +// SetMigration provides a mock function with given fields: _a0, _a1 +func (_m *DB) SetMigration(_a0 context.Context, _a1 client.LensConfig) error { + ret := _m.Called(_a0, _a1) + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, client.LensConfig) error); ok { + r0 = rf(_a0, _a1) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// DB_SetMigration_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'SetMigration' +type DB_SetMigration_Call struct { + *mock.Call +} + +// SetMigration is a helper method to define mock.On call +// - _a0 context.Context +// - _a1 client.LensConfig +func (_e *DB_Expecter) SetMigration(_a0 interface{}, _a1 interface{}) *DB_SetMigration_Call { + return &DB_SetMigration_Call{Call: _e.mock.On("SetMigration", _a0, _a1)} +} + +func (_c *DB_SetMigration_Call) Run(run func(_a0 context.Context, _a1 client.LensConfig)) *DB_SetMigration_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(client.LensConfig)) + }) + return _c +} + +func (_c *DB_SetMigration_Call) Return(_a0 error) *DB_SetMigration_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *DB_SetMigration_Call) RunAndReturn(run func(context.Context, client.LensConfig) error) *DB_SetMigration_Call { + _c.Call.Return(run) + return _c +} + +// SetReplicator provides a mock function with given fields: ctx, rep +func (_m *DB) SetReplicator(ctx context.Context, rep client.Replicator) error { + ret := _m.Called(ctx, rep) + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, client.Replicator) error); ok { + r0 = rf(ctx, rep) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// DB_SetReplicator_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'SetReplicator' +type DB_SetReplicator_Call struct { + *mock.Call +} + +// SetReplicator is a helper method to define mock.On call +// - ctx context.Context +// - rep client.Replicator +func (_e *DB_Expecter) SetReplicator(ctx interface{}, rep interface{}) *DB_SetReplicator_Call { + return &DB_SetReplicator_Call{Call: _e.mock.On("SetReplicator", ctx, rep)} +} + +func (_c *DB_SetReplicator_Call) Run(run func(ctx context.Context, rep client.Replicator)) *DB_SetReplicator_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(client.Replicator)) + }) + return _c +} + +func (_c *DB_SetReplicator_Call) Return(_a0 error) *DB_SetReplicator_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *DB_SetReplicator_Call) RunAndReturn(run func(context.Context, client.Replicator) error) *DB_SetReplicator_Call { + _c.Call.Return(run) + return _c +} + +// WithTxn provides a mock function with given fields: _a0 +func (_m *DB) WithTxn(_a0 datastore.Txn) client.Store { + ret := _m.Called(_a0) + + var r0 client.Store + if rf, ok := ret.Get(0).(func(datastore.Txn) client.Store); ok { + r0 = rf(_a0) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(client.Store) + } + } + + return r0 +} + +// DB_WithTxn_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'WithTxn' +type DB_WithTxn_Call struct { + *mock.Call +} + +// WithTxn is a helper method to define mock.On call +// - _a0 datastore.Txn +func (_e *DB_Expecter) WithTxn(_a0 interface{}) *DB_WithTxn_Call { + return &DB_WithTxn_Call{Call: _e.mock.On("WithTxn", _a0)} +} + +func (_c *DB_WithTxn_Call) Run(run func(_a0 datastore.Txn)) *DB_WithTxn_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(datastore.Txn)) + }) + return _c +} + +func (_c *DB_WithTxn_Call) Return(_a0 client.Store) *DB_WithTxn_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *DB_WithTxn_Call) RunAndReturn(run func(datastore.Txn) client.Store) *DB_WithTxn_Call { + _c.Call.Return(run) + return _c +} + +// NewDB creates a new instance of DB. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewDB(t interface { + mock.TestingT + Cleanup(func()) +}) *DB { + mock := &DB{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/client/request/consts.go b/client/request/consts.go index 04ade35bc5..7287a49ac3 100644 --- a/client/request/consts.go +++ b/client/request/consts.go @@ -15,6 +15,10 @@ const ( // https://spec.graphql.org/October2021/#sec-Type-Name-Introspection TypeNameFieldName = "__typename" + // This is appended to the related object name to give us the field name + // that corresponds to the related object's join relation id, i.e. `Author_id`. + RelatedObjectID = "_id" + Cid = "cid" Data = "data" DocKey = "dockey" diff --git a/client/request/errors.go b/client/request/errors.go new file mode 100644 index 0000000000..e3c6b143f0 --- /dev/null +++ b/client/request/errors.go @@ -0,0 +1,33 @@ +// Copyright 2023 Democratized Data Foundation +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package request + +import ( + "github.com/sourcenetwork/defradb/errors" +) + +const ( + errSelectOfNonGroupField string = "cannot select a non-group-by field at group-level" +) + +// Errors returnable from this package. +// +// This list is incomplete and undefined errors may also be returned. +// Errors returned from this package may be tested against these errors with errors.Is. +var ( + ErrSelectOfNonGroupField = errors.New(errSelectOfNonGroupField) +) + +// NewErrSelectOfNonGroupField returns an error indicating that a non-group-by field +// was selected at group-level. +func NewErrSelectOfNonGroupField(name string) error { + return errors.New(errSelectOfNonGroupField, errors.NewKV("Field", name)) +} diff --git a/client/request/explain.go b/client/request/explain.go index ee8cb2b388..36d5df4cbe 100644 --- a/client/request/explain.go +++ b/client/request/explain.go @@ -17,4 +17,5 @@ type ExplainType string const ( SimpleExplain ExplainType = "simple" ExecuteExplain ExplainType = "execute" + DebugExplain ExplainType = "debug" ) diff --git a/client/request/select.go b/client/request/select.go index 0d09cad8dc..fb842228aa 100644 --- a/client/request/select.go +++ b/client/request/select.go @@ -12,8 +12,6 @@ package request import ( "github.com/sourcenetwork/immutable" - - "github.com/sourcenetwork/defradb/client" ) // SelectionType is the type of selection. @@ -89,14 +87,18 @@ func (s *Select) validateGroupBy() []error { } var fieldExistsInGroupBy bool + var isAliasFieldInGroupBy bool for _, groupByField := range s.GroupBy.Value().Fields { if typedChildSelection.Name == groupByField { fieldExistsInGroupBy = true break + } else if typedChildSelection.Name == groupByField+RelatedObjectID { + isAliasFieldInGroupBy = true + break } } - if !fieldExistsInGroupBy { - result = append(result, client.NewErrSelectOfNonGroupField(typedChildSelection.Name)) + if !fieldExistsInGroupBy && !isAliasFieldInGroupBy { + result = append(result, NewErrSelectOfNonGroupField(typedChildSelection.Name)) } default: // Do nothing diff --git a/config/config.go b/config/config.go index 524a9fe94f..e659dc0cbc 100644 --- a/config/config.go +++ b/config/config.go @@ -61,7 +61,6 @@ import ( badgerds "github.com/sourcenetwork/defradb/datastore/badger/v3" "github.com/sourcenetwork/defradb/logging" - "github.com/sourcenetwork/defradb/node" ) var log = logging.MustNewLogger("config") @@ -424,29 +423,6 @@ func (netcfg *NetConfig) RPCMaxConnectionIdleDuration() (time.Duration, error) { return d, nil } -// NodeConfig provides the Node-specific configuration, from the top-level Net config. -func (cfg *Config) NodeConfig() node.NodeOpt { - return func(opt *node.Options) error { - var err error - err = node.ListenP2PAddrStrings(cfg.Net.P2PAddress)(opt) - if err != nil { - return err - } - err = node.ListenTCPAddrString(cfg.Net.TCPAddress)(opt) - if err != nil { - return err - } - opt.EnableRelay = cfg.Net.RelayEnabled - opt.EnablePubSub = cfg.Net.PubSubEnabled - opt.DataPath = cfg.Datastore.Badger.Path - opt.ConnManager, err = node.NewConnManager(100, 400, time.Second*20) - if err != nil { - return err - } - return nil - } -} - // LogConfig configures output and logger. type LoggingConfig struct { Level string diff --git a/config/config_test.go b/config/config_test.go index 2ed3a3dec3..b7ff295efa 100644 --- a/config/config_test.go +++ b/config/config_test.go @@ -17,10 +17,7 @@ import ( "testing" "time" - ma "github.com/multiformats/go-multiaddr" "github.com/stretchr/testify/assert" - - "github.com/sourcenetwork/defradb/node" ) var envVarsDifferent = map[string]string{ @@ -224,47 +221,6 @@ func TestInvalidEnvVars(t *testing.T) { assert.ErrorIs(t, err, ErrLoadingConfig) } -func TestNodeConfig(t *testing.T) { - cfg := DefaultConfig() - cfg.Net.P2PAddress = "/ip4/0.0.0.0/tcp/9179" - cfg.Net.TCPAddress = "/ip4/0.0.0.0/tcp/9169" - cfg.Net.RPCTimeout = "100s" - cfg.Net.RPCMaxConnectionIdle = "111s" - cfg.Net.RelayEnabled = true - cfg.Net.PubSubEnabled = true - cfg.Datastore.Badger.Path = "/tmp/defra_cli/badger" - - err := cfg.validate() - assert.NoError(t, err) - - nodeConfig := cfg.NodeConfig() - options, errOptionsMerge := node.NewMergedOptions(nodeConfig) - - // confirming it provides the same config as a manually constructed node.Options - p2pAddr, errP2P := ma.NewMultiaddr(cfg.Net.P2PAddress) - tcpAddr, errTCP := ma.NewMultiaddr(cfg.Net.TCPAddress) - connManager, errConnManager := node.NewConnManager(100, 400, time.Second*20) - expectedOptions := node.Options{ - ListenAddrs: []ma.Multiaddr{p2pAddr}, - TCPAddr: tcpAddr, - DataPath: "/tmp/defra_cli/badger", - EnablePubSub: true, - EnableRelay: true, - ConnManager: connManager, - } - assert.NoError(t, errOptionsMerge) - assert.NoError(t, errP2P) - assert.NoError(t, errTCP) - assert.NoError(t, errConnManager) - for k, v := range options.ListenAddrs { - assert.Equal(t, expectedOptions.ListenAddrs[k], v) - } - assert.Equal(t, expectedOptions.TCPAddr.String(), options.TCPAddr.String()) - assert.Equal(t, expectedOptions.DataPath, options.DataPath) - assert.Equal(t, expectedOptions.EnablePubSub, options.EnablePubSub) - assert.Equal(t, expectedOptions.EnableRelay, options.EnableRelay) -} - func TestCreateAndLoadCustomConfig(t *testing.T) { testdir := t.TempDir() diff --git a/connor/connor.go b/connor/connor.go index 9f56041c6c..4b174bc45c 100644 --- a/connor/connor.go +++ b/connor/connor.go @@ -40,6 +40,8 @@ func matchWith(op string, conditions, data any) (bool, error) { return like(conditions, data) case "_nlike": return nlike(conditions, data) + case "_not": + return not(conditions, data) default: return false, NewErrUnknownOperator(op) } diff --git a/connor/not.go b/connor/not.go new file mode 100644 index 0000000000..96fcd87ff8 --- /dev/null +++ b/connor/not.go @@ -0,0 +1,11 @@ +package connor + +// not is an operator which performs object equality test +// and returns the inverse of the result. +func not(condition, data any) (bool, error) { + m, err := eq(condition, data) + if err != nil { + return false, err + } + return !m, nil +} diff --git a/connor/not_test.go b/connor/not_test.go new file mode 100644 index 0000000000..1a1dd785dd --- /dev/null +++ b/connor/not_test.go @@ -0,0 +1,50 @@ +package connor + +import ( + "testing" + + "github.com/stretchr/testify/require" +) + +func TestNot_WithNotAndNotNot_NoError(t *testing.T) { + const testString = "Source is the glue of web3" + + // not equal + result, err := not(testString, testString) + require.NoError(t, err) + require.False(t, result) + + // not not equal + result, err = not("Source is the glue", testString) + require.NoError(t, err) + require.True(t, result) +} + +func TestNot_WithEmptyCondition_ReturnError(t *testing.T) { + const testString = "Source is the glue of web3" + + _, err := not(map[FilterKey]any{&operator{"_some"}: "test"}, testString) + require.ErrorIs(t, err, ErrUnknownOperator) +} + +type operator struct { + // The filter operation string that this `operator`` represents. + // + // E.g. "_eq", or "_and". + Operation string +} + +func (k *operator) GetProp(data any) any { + return data +} + +func (k *operator) GetOperatorOrDefault(defaultOp string) string { + return k.Operation +} + +func (k *operator) Equal(other FilterKey) bool { + if otherKey, isOk := other.(*operator); isOk && *k == *otherKey { + return true + } + return false +} diff --git a/core/cid.go b/core/cid/cid.go similarity index 85% rename from core/cid.go rename to core/cid/cid.go index d9c8bbeea2..14367f4ae9 100644 --- a/core/cid.go +++ b/core/cid/cid.go @@ -8,22 +8,24 @@ // by the Apache License, Version 2.0, included in the file // licenses/APL.txt. -package core +package cid import ( "github.com/ipfs/go-cid" mh "github.com/multiformats/go-multihash" ) -// NewSHA256CidV1 returns a new CIDv1 with the SHA256 multihash. -func NewSHA256CidV1(data []byte) (cid.Cid, error) { - pref := cid.Prefix{ +func NewDefaultSHA256PrefixV1() cid.Prefix { + return cid.Prefix{ Version: 1, Codec: cid.Raw, MhType: mh.SHA2_256, MhLength: -1, // default length } +} +// NewSHA256CidV1 returns a new CIDv1 with the SHA256 multihash. +func NewSHA256CidV1(data []byte) (cid.Cid, error) { // And then feed it some data - return pref.Sum(data) + return NewDefaultSHA256PrefixV1().Sum(data) } diff --git a/core/crdt/composite.go b/core/crdt/composite.go index e7ce96e867..ab6cbe95f5 100644 --- a/core/crdt/composite.go +++ b/core/crdt/composite.go @@ -154,13 +154,27 @@ func (c CompositeDAG) Merge(ctx context.Context, delta core.Delta, id string) er return c.deleteWithPrefix(ctx, c.key.WithValueFlag().WithFieldId("")) } - // ensure object marker exists - exists, err := c.store.Has(ctx, c.key.ToPrimaryDataStoreKey().ToDS()) + // We cannot rely on the dagDelta.Status here as it may have been deleted locally, this is not + // reflected in `dagDelta.Status` if sourced via P2P. Updates synced via P2P should not undelete + // the local reperesentation of the document. + versionKey := c.key.WithValueFlag().WithFieldId(core.DATASTORE_DOC_VERSION_FIELD_ID) + objectMarker, err := c.store.Get(ctx, c.key.ToPrimaryDataStoreKey().ToDS()) + hasObjectMarker := !errors.Is(err, ds.ErrNotFound) + if err != nil && hasObjectMarker { + return err + } + + if bytes.Equal(objectMarker, []byte{base.DeletedObjectMarker}) { + versionKey = versionKey.WithDeletedFlag() + } + + err = c.store.Put(ctx, versionKey.ToDS(), []byte(c.schemaVersionKey.SchemaVersionId)) if err != nil { return err } - if !exists { - // write object marker + + if !hasObjectMarker { + // ensure object marker exists return c.store.Put(ctx, c.key.ToPrimaryDataStoreKey().ToDS(), []byte{base.ObjectMarker}) } diff --git a/core/crdt/lwwreg.go b/core/crdt/lwwreg.go index bc6806e857..9ff5ec266c 100644 --- a/core/crdt/lwwreg.go +++ b/core/crdt/lwwreg.go @@ -114,8 +114,6 @@ func (reg LWWRegister) Value(ctx context.Context) ([]byte, error) { if err != nil { return nil, err } - // ignore the first byte (CRDT Type marker) from the returned value - buf = buf[1:] return buf, nil } @@ -187,9 +185,7 @@ func (reg LWWRegister) setValue(ctx context.Context, val []byte, priority uint64 } } - // prepend the value byte array with a single byte indicator for the CRDT Type. - buf := append([]byte{byte(client.LWW_REGISTER)}, val...) - err = reg.store.Put(ctx, key.ToDS(), buf) + err = reg.store.Put(ctx, key.ToDS(), val) if err != nil { return NewErrFailedToStoreValue(err) } diff --git a/core/data.go b/core/data.go index aee4cf64ed..a756d41f91 100644 --- a/core/data.go +++ b/core/data.go @@ -156,12 +156,6 @@ func NewSpans(spans ...Span) Spans { } } -// KeyValue is a KV store response containing the resulting core.Key and byte array value. -type KeyValue struct { - Key DataStoreKey - Value []byte -} - // HeadKeyValue is a KV store response containing the resulting core.HeadStoreKey // and byte array value. type HeadKeyValue struct { diff --git a/core/doc.go b/core/doc.go index 6966a8db4d..8f6700f50c 100644 --- a/core/doc.go +++ b/core/doc.go @@ -34,6 +34,9 @@ type Doc struct { Fields DocFields Status client.DocumentStatus + // The id of the schema version that this document is currently at. This includes + // any migrations that may have been run. + SchemaVersionID string } // GetKey returns the DocKey for this document. @@ -278,17 +281,5 @@ func (mapping *DocumentMapping) TryToFindNameFromIndex(targetIndex int) (string, } } - // Try to find the name of this index in the ChildMappings. - for _, childMapping := range mapping.ChildMappings { - if childMapping == nil { - continue - } - - name, found := childMapping.TryToFindNameFromIndex(targetIndex) - if found { - return name, true - } - } - return "", false } diff --git a/core/encoding.go b/core/encoding.go new file mode 100644 index 0000000000..9482acefbf --- /dev/null +++ b/core/encoding.go @@ -0,0 +1,170 @@ +// Copyright 2023 Democratized Data Foundation +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package core + +import ( + "fmt" + + "github.com/sourcenetwork/immutable" + + "github.com/sourcenetwork/defradb/client" +) + +// DecodeFieldValue takes a field value and description and converts it to the +// standardized Defra Go type. +func DecodeFieldValue(fieldDesc client.FieldDescription, val any) (any, error) { + if val == nil { + return nil, nil + } + + var err error + if array, isArray := val.([]any); isArray { + var ok bool + switch fieldDesc.Kind { + case client.FieldKind_BOOL_ARRAY: + boolArray := make([]bool, len(array)) + for i, untypedValue := range array { + boolArray[i], ok = untypedValue.(bool) + if !ok { + return nil, client.NewErrUnexpectedType[bool](fieldDesc.Name, untypedValue) + } + } + val = boolArray + + case client.FieldKind_NILLABLE_BOOL_ARRAY: + val, err = convertNillableArray[bool](fieldDesc.Name, array) + if err != nil { + return nil, err + } + + case client.FieldKind_INT_ARRAY: + intArray := make([]int64, len(array)) + for i, untypedValue := range array { + intArray[i], err = convertToInt(fmt.Sprintf("%s[%v]", fieldDesc.Name, i), untypedValue) + if err != nil { + return nil, err + } + } + val = intArray + + case client.FieldKind_NILLABLE_INT_ARRAY: + val, err = convertNillableArrayWithConverter(fieldDesc.Name, array, convertToInt) + if err != nil { + return nil, err + } + + case client.FieldKind_FLOAT_ARRAY: + floatArray := make([]float64, len(array)) + for i, untypedValue := range array { + floatArray[i], ok = untypedValue.(float64) + if !ok { + return nil, client.NewErrUnexpectedType[float64](fieldDesc.Name, untypedValue) + } + } + val = floatArray + + case client.FieldKind_NILLABLE_FLOAT_ARRAY: + val, err = convertNillableArray[float64](fieldDesc.Name, array) + if err != nil { + return nil, err + } + + case client.FieldKind_STRING_ARRAY: + stringArray := make([]string, len(array)) + for i, untypedValue := range array { + stringArray[i], ok = untypedValue.(string) + if !ok { + return nil, client.NewErrUnexpectedType[string](fieldDesc.Name, untypedValue) + } + } + val = stringArray + + case client.FieldKind_NILLABLE_STRING_ARRAY: + val, err = convertNillableArray[string](fieldDesc.Name, array) + if err != nil { + return nil, err + } + } + } else { // CBOR often encodes values typed as floats as ints + switch fieldDesc.Kind { + case client.FieldKind_FLOAT: + switch v := val.(type) { + case int64: + return float64(v), nil + case int: + return float64(v), nil + case uint64: + return float64(v), nil + case uint: + return float64(v), nil + } + case client.FieldKind_INT: + switch v := val.(type) { + case float64: + if v >= 0 { + return uint64(v), nil + } + return int64(v), nil + } + } + } + + return val, nil +} + +func convertNillableArray[T any](propertyName string, items []any) ([]immutable.Option[T], error) { + resultArray := make([]immutable.Option[T], len(items)) + for i, untypedValue := range items { + if untypedValue == nil { + resultArray[i] = immutable.None[T]() + continue + } + value, ok := untypedValue.(T) + if !ok { + return nil, client.NewErrUnexpectedType[T](fmt.Sprintf("%s[%v]", propertyName, i), untypedValue) + } + resultArray[i] = immutable.Some(value) + } + return resultArray, nil +} + +func convertNillableArrayWithConverter[TOut any]( + propertyName string, + items []any, + converter func(propertyName string, in any) (TOut, error), +) ([]immutable.Option[TOut], error) { + resultArray := make([]immutable.Option[TOut], len(items)) + for i, untypedValue := range items { + if untypedValue == nil { + resultArray[i] = immutable.None[TOut]() + continue + } + value, err := converter(fmt.Sprintf("%s[%v]", propertyName, i), untypedValue) + if err != nil { + return nil, err + } + resultArray[i] = immutable.Some(value) + } + return resultArray, nil +} + +func convertToInt(propertyName string, untypedValue any) (int64, error) { + switch value := untypedValue.(type) { + case uint64: + return int64(value), nil + case int64: + return value, nil + case float64: + return int64(value), nil + default: + return 0, client.NewErrUnexpectedType[string](propertyName, untypedValue) + } +} diff --git a/core/key.go b/core/key.go index 756290a607..a8ec5ece2b 100644 --- a/core/key.go +++ b/core/key.go @@ -41,13 +41,17 @@ const ( ) const ( - COLLECTION = "/collection/names" - COLLECTION_SCHEMA = "/collection/schema" - COLLECTION_SCHEMA_VERSION = "/collection/version" - SEQ = "/seq" - PRIMARY_KEY = "/pk" - REPLICATOR = "/replicator/id" - P2P_COLLECTION = "/p2p/collection" + COLLECTION = "/collection/names" + COLLECTION_SCHEMA = "/collection/schema" + COLLECTION_SCHEMA_VERSION = "/collection/version/v" + COLLECTION_SCHEMA_VERSION_HISTORY = "/collection/version/h" + COLLECTION_INDEX = "/collection/index" + SCHEMA_MIGRATION = "/schema/migration" + SEQ = "/seq" + PRIMARY_KEY = "/pk" + DATASTORE_DOC_VERSION_FIELD_ID = "v" + REPLICATOR = "/replicator/id" + P2P_COLLECTION = "/p2p/collection" ) // Key is an interface that represents a key in the database. @@ -67,6 +71,18 @@ type DataStoreKey struct { var _ Key = (*DataStoreKey)(nil) +// IndexDataStoreKey is key of an indexed document in the database. +type IndexDataStoreKey struct { + // CollectionID is the id of the collection + CollectionID uint32 + // IndexID is the id of the index + IndexID uint32 + // FieldValues is the values of the fields in the index + FieldValues [][]byte +} + +var _ Key = (*IndexDataStoreKey)(nil) + type PrimaryDataStoreKey struct { CollectionId string DocKey string @@ -106,6 +122,37 @@ type CollectionSchemaVersionKey struct { var _ Key = (*CollectionSchemaVersionKey)(nil) +// CollectionIndexKey to a stored description of an index +type CollectionIndexKey struct { + // CollectionName is the name of the collection that the index is on + CollectionName string + // IndexName is the name of the index + IndexName string +} + +var _ Key = (*CollectionIndexKey)(nil) + +// SchemaHistoryKey holds the pathway through the schema version history for +// any given schema. +// +// The key points to the schema version id of the next version of the schema. +// If a SchemaHistoryKey does not exist for a given SchemaVersionID it means +// that that SchemaVersionID is for the latest version. +type SchemaHistoryKey struct { + SchemaID string + PreviousSchemaVersionID string +} + +var _ Key = (*SchemaHistoryKey)(nil) + +// SchemaVersionMigrationKey points to the jsonified configuration of a lens migration +// for the given source schema version id. +type SchemaVersionMigrationKey struct { + SourceSchemaVersionID string +} + +var _ Key = (*SchemaVersionMigrationKey)(nil) + type P2PCollectionKey struct { CollectionID string } @@ -210,6 +257,80 @@ func NewCollectionSchemaVersionKey(schemaVersionId string) CollectionSchemaVersi return CollectionSchemaVersionKey{SchemaVersionId: schemaVersionId} } +// NewCollectionIndexKey creates a new CollectionIndexKey from a collection name and index name. +func NewCollectionIndexKey(colID, indexName string) CollectionIndexKey { + return CollectionIndexKey{CollectionName: colID, IndexName: indexName} +} + +// NewCollectionIndexKeyFromString creates a new CollectionIndexKey from a string. +// It expects the input string is in the following format: +// +// /collection/index/[CollectionName]/[IndexName] +// +// Where [IndexName] might be omitted. Anything else will return an error. +func NewCollectionIndexKeyFromString(key string) (CollectionIndexKey, error) { + keyArr := strings.Split(key, "/") + if len(keyArr) < 4 || len(keyArr) > 5 || keyArr[1] != "collection" || keyArr[2] != "index" { + return CollectionIndexKey{}, ErrInvalidKey + } + result := CollectionIndexKey{CollectionName: keyArr[3]} + if len(keyArr) == 5 { + result.IndexName = keyArr[4] + } + return result, nil +} + +// ToString returns the string representation of the key +// It is in the following format: +// /collection/index/[CollectionName]/[IndexName] +// if [CollectionName] is empty, the rest is ignored +func (k CollectionIndexKey) ToString() string { + result := COLLECTION_INDEX + + if k.CollectionName != "" { + result = result + "/" + k.CollectionName + if k.IndexName != "" { + result = result + "/" + k.IndexName + } + } + + return result +} + +// Bytes returns the byte representation of the key +func (k CollectionIndexKey) Bytes() []byte { + return []byte(k.ToString()) +} + +// ToDS returns the datastore key +func (k CollectionIndexKey) ToDS() ds.Key { + return ds.NewKey(k.ToString()) +} + +func NewSchemaHistoryKey(schemaId string, previousSchemaVersionID string) SchemaHistoryKey { + return SchemaHistoryKey{ + SchemaID: schemaId, + PreviousSchemaVersionID: previousSchemaVersionID, + } +} + +func NewSchemaVersionMigrationKey(schemaVersionID string) SchemaVersionMigrationKey { + return SchemaVersionMigrationKey{SourceSchemaVersionID: schemaVersionID} +} + +func NewSchemaHistoryKeyFromString(keyString string) (SchemaHistoryKey, error) { + keyString = strings.TrimPrefix(keyString, COLLECTION_SCHEMA_VERSION_HISTORY+"/") + elements := strings.Split(keyString, "/") + if len(elements) != 2 { + return SchemaHistoryKey{}, ErrInvalidKey + } + + return SchemaHistoryKey{ + SchemaID: elements[0], + PreviousSchemaVersionID: elements[1], + }, nil +} + func NewSequenceKey(name string) SequenceKey { return SequenceKey{SequenceName: name} } @@ -318,6 +439,109 @@ func (k DataStoreKey) ToPrimaryDataStoreKey() PrimaryDataStoreKey { } } +// NewIndexDataStoreKey creates a new IndexDataStoreKey from a string. +// It expects the input string is in the following format: +// +// /[CollectionID]/[IndexID]/[FieldValue](/[FieldValue]...) +// +// Where [CollectionID] and [IndexID] are integers +func NewIndexDataStoreKey(key string) (IndexDataStoreKey, error) { + if key == "" { + return IndexDataStoreKey{}, ErrEmptyKey + } + + if !strings.HasPrefix(key, "/") { + return IndexDataStoreKey{}, ErrInvalidKey + } + + elements := strings.Split(key[1:], "/") + + // With less than 3 elements, we know it's an invalid key + if len(elements) < 3 { + return IndexDataStoreKey{}, ErrInvalidKey + } + + colID, err := strconv.Atoi(elements[0]) + if err != nil { + return IndexDataStoreKey{}, ErrInvalidKey + } + + indexKey := IndexDataStoreKey{CollectionID: uint32(colID)} + + indID, err := strconv.Atoi(elements[1]) + if err != nil { + return IndexDataStoreKey{}, ErrInvalidKey + } + indexKey.IndexID = uint32(indID) + + // first 2 elements are the collection and index IDs, the rest are field values + for i := 2; i < len(elements); i++ { + indexKey.FieldValues = append(indexKey.FieldValues, []byte(elements[i])) + } + + return indexKey, nil +} + +// Bytes returns the byte representation of the key +func (k *IndexDataStoreKey) Bytes() []byte { + return []byte(k.ToString()) +} + +// ToDS returns the datastore key +func (k *IndexDataStoreKey) ToDS() ds.Key { + return ds.NewKey(k.ToString()) +} + +// ToString returns the string representation of the key +// It is in the following format: +// /[CollectionID]/[IndexID]/[FieldValue](/[FieldValue]...) +// If while composing the string from left to right, a component +// is empty, the string is returned up to that point +func (k *IndexDataStoreKey) ToString() string { + sb := strings.Builder{} + + if k.CollectionID == 0 { + return "" + } + sb.WriteByte('/') + sb.WriteString(strconv.Itoa(int(k.CollectionID))) + + if k.IndexID == 0 { + return sb.String() + } + sb.WriteByte('/') + sb.WriteString(strconv.Itoa(int(k.IndexID))) + + for _, v := range k.FieldValues { + if len(v) == 0 { + break + } + sb.WriteByte('/') + sb.WriteString(string(v)) + } + + return sb.String() +} + +// Equal returns true if the two keys are equal +func (k IndexDataStoreKey) Equal(other IndexDataStoreKey) bool { + if k.CollectionID != other.CollectionID { + return false + } + if k.IndexID != other.IndexID { + return false + } + if len(k.FieldValues) != len(other.FieldValues) { + return false + } + for i := range k.FieldValues { + if string(k.FieldValues[i]) != string(other.FieldValues[i]) { + return false + } + } + return true +} + func (k PrimaryDataStoreKey) ToDataStoreKey() DataStoreKey { return DataStoreKey{ CollectionID: k.CollectionId, @@ -401,6 +625,46 @@ func (k CollectionSchemaVersionKey) ToDS() ds.Key { return ds.NewKey(k.ToString()) } +func (k SchemaHistoryKey) ToString() string { + result := COLLECTION_SCHEMA_VERSION_HISTORY + + if k.SchemaID != "" { + result = result + "/" + k.SchemaID + } + + if k.PreviousSchemaVersionID != "" { + result = result + "/" + k.PreviousSchemaVersionID + } + + return result +} + +func (k SchemaHistoryKey) Bytes() []byte { + return []byte(k.ToString()) +} + +func (k SchemaHistoryKey) ToDS() ds.Key { + return ds.NewKey(k.ToString()) +} + +func (k SchemaVersionMigrationKey) ToString() string { + result := SCHEMA_MIGRATION + + if k.SourceSchemaVersionID != "" { + result = result + "/" + k.SourceSchemaVersionID + } + + return result +} + +func (k SchemaVersionMigrationKey) Bytes() []byte { + return []byte(k.ToString()) +} + +func (k SchemaVersionMigrationKey) ToDS() ds.Key { + return ds.NewKey(k.ToString()) +} + func (k SequenceKey) ToString() string { result := SEQ diff --git a/core/key_test.go b/core/key_test.go index 865ece9c26..d22498bd8c 100644 --- a/core/key_test.go +++ b/core/key_test.go @@ -13,6 +13,7 @@ package core import ( "testing" + ds "github.com/ipfs/go-datastore" "github.com/stretchr/testify/assert" ) @@ -107,3 +108,316 @@ func TestNewDataStoreKey_GivenAStringWithExtraSuffix(t *testing.T) { assert.ErrorIs(t, ErrInvalidKey, err) } + +func TestNewIndexKey_IfEmptyParam_ReturnPrefix(t *testing.T) { + key := NewCollectionIndexKey("", "") + assert.Equal(t, "/collection/index", key.ToString()) +} + +func TestNewIndexKey_IfParamsAreGiven_ReturnFullKey(t *testing.T) { + key := NewCollectionIndexKey("col", "idx") + assert.Equal(t, "/collection/index/col/idx", key.ToString()) +} + +func TestNewIndexKey_InNoCollectionName_ReturnJustPrefix(t *testing.T) { + key := NewCollectionIndexKey("", "idx") + assert.Equal(t, "/collection/index", key.ToString()) +} + +func TestNewIndexKey_InNoIndexName_ReturnWithoutIndexName(t *testing.T) { + key := NewCollectionIndexKey("col", "") + assert.Equal(t, "/collection/index/col", key.ToString()) +} + +func TestNewIndexKeyFromString_IfInvalidString_ReturnError(t *testing.T) { + for _, key := range []string{ + "", + "/collection", + "/collection/index", + "/collection/index/col/idx/extra", + "/wrong/index/col/idx", + "/collection/wrong/col/idx", + } { + _, err := NewCollectionIndexKeyFromString(key) + assert.ErrorIs(t, err, ErrInvalidKey) + } +} + +func TestNewIndexKeyFromString_IfOnlyCollectionName_ReturnKey(t *testing.T) { + key, err := NewCollectionIndexKeyFromString("/collection/index/col") + assert.NoError(t, err) + assert.Equal(t, key.CollectionName, "col") + assert.Equal(t, key.IndexName, "") +} + +func TestNewIndexKeyFromString_IfFullKeyString_ReturnKey(t *testing.T) { + key, err := NewCollectionIndexKeyFromString("/collection/index/col/idx") + assert.NoError(t, err) + assert.Equal(t, key.CollectionName, "col") + assert.Equal(t, key.IndexName, "idx") +} + +func toFieldValues(values ...string) [][]byte { + var result [][]byte = make([][]byte, 0, len(values)) + for _, value := range values { + result = append(result, []byte(value)) + } + return result +} + +func TestIndexDatastoreKey_ToString(t *testing.T) { + cases := []struct { + Key IndexDataStoreKey + Expected string + }{ + { + Key: IndexDataStoreKey{}, + Expected: "", + }, + { + Key: IndexDataStoreKey{ + CollectionID: 1, + }, + Expected: "/1", + }, + { + Key: IndexDataStoreKey{ + CollectionID: 1, + IndexID: 2, + }, + Expected: "/1/2", + }, + { + Key: IndexDataStoreKey{ + CollectionID: 1, + IndexID: 2, + FieldValues: toFieldValues("3"), + }, + Expected: "/1/2/3", + }, + { + Key: IndexDataStoreKey{ + CollectionID: 1, + IndexID: 2, + FieldValues: toFieldValues("3", "4"), + }, + Expected: "/1/2/3/4", + }, + { + Key: IndexDataStoreKey{ + CollectionID: 1, + FieldValues: toFieldValues("3"), + }, + Expected: "/1", + }, + { + Key: IndexDataStoreKey{ + IndexID: 2, + FieldValues: toFieldValues("3"), + }, + Expected: "", + }, + { + Key: IndexDataStoreKey{ + FieldValues: toFieldValues("3"), + }, + Expected: "", + }, + { + Key: IndexDataStoreKey{ + CollectionID: 1, + IndexID: 2, + FieldValues: toFieldValues("", ""), + }, + Expected: "/1/2", + }, + { + Key: IndexDataStoreKey{ + CollectionID: 1, + IndexID: 2, + FieldValues: toFieldValues("", "3"), + }, + Expected: "/1/2", + }, + { + Key: IndexDataStoreKey{ + CollectionID: 1, + IndexID: 2, + FieldValues: toFieldValues("3", "", "4"), + }, + Expected: "/1/2/3", + }, + } + for i, c := range cases { + assert.Equal(t, c.Key.ToString(), c.Expected, "case %d", i) + } +} + +func TestIndexDatastoreKey_Bytes(t *testing.T) { + key := IndexDataStoreKey{ + CollectionID: 1, + IndexID: 2, + FieldValues: toFieldValues("3", "4"), + } + assert.Equal(t, key.Bytes(), []byte("/1/2/3/4")) +} + +func TestIndexDatastoreKey_ToDS(t *testing.T) { + key := IndexDataStoreKey{ + CollectionID: 1, + IndexID: 2, + FieldValues: toFieldValues("3", "4"), + } + assert.Equal(t, key.ToDS(), ds.NewKey("/1/2/3/4")) +} + +func TestIndexDatastoreKey_EqualTrue(t *testing.T) { + cases := [][]IndexDataStoreKey{ + { + { + CollectionID: 1, + IndexID: 2, + FieldValues: toFieldValues("3", "4"), + }, + { + CollectionID: 1, + IndexID: 2, + FieldValues: toFieldValues("3", "4"), + }, + }, + { + { + CollectionID: 1, + FieldValues: toFieldValues("3", "4"), + }, + { + CollectionID: 1, + FieldValues: toFieldValues("3", "4"), + }, + }, + { + { + CollectionID: 1, + }, + { + CollectionID: 1, + }, + }, + } + + for i, c := range cases { + assert.True(t, c[0].Equal(c[1]), "case %d", i) + } +} + +func TestCollectionIndexKey_Bytes(t *testing.T) { + key := CollectionIndexKey{ + CollectionName: "col", + IndexName: "idx", + } + assert.Equal(t, []byte(COLLECTION_INDEX+"/col/idx"), key.Bytes()) +} + +func TestIndexDatastoreKey_EqualFalse(t *testing.T) { + cases := [][]IndexDataStoreKey{ + { + { + CollectionID: 1, + }, + { + CollectionID: 2, + }, + }, + { + { + CollectionID: 1, + IndexID: 2, + }, + { + CollectionID: 1, + IndexID: 3, + }, + }, + { + { + CollectionID: 1, + }, + { + IndexID: 1, + }, + }, + { + { + CollectionID: 1, + IndexID: 2, + FieldValues: toFieldValues("4", "3"), + }, + { + CollectionID: 1, + IndexID: 2, + FieldValues: toFieldValues("3", "4"), + }, + }, + { + { + CollectionID: 1, + IndexID: 2, + FieldValues: toFieldValues("3"), + }, + { + CollectionID: 1, + IndexID: 2, + FieldValues: toFieldValues("3", "4"), + }, + }, + { + { + CollectionID: 1, + FieldValues: toFieldValues("3", "", "4"), + }, + { + CollectionID: 1, + FieldValues: toFieldValues("3", "4"), + }, + }, + } + + for i, c := range cases { + assert.False(t, c[0].Equal(c[1]), "case %d", i) + } +} + +func TestNewIndexDataStoreKey_ValidKey(t *testing.T) { + str, err := NewIndexDataStoreKey("/1/2/3") + assert.NoError(t, err) + assert.Equal(t, str, IndexDataStoreKey{ + CollectionID: 1, + IndexID: 2, + FieldValues: toFieldValues("3"), + }) + + str, err = NewIndexDataStoreKey("/1/2/3/4") + assert.NoError(t, err) + assert.Equal(t, str, IndexDataStoreKey{ + CollectionID: 1, + IndexID: 2, + FieldValues: toFieldValues("3", "4"), + }) +} + +func TestNewIndexDataStoreKey_InvalidKey(t *testing.T) { + keys := []string{ + "", + "/", + "/1", + "/1/2", + " /1/2/3", + "1/2/3", + "/a/2/3", + "/1/b/3", + } + for i, key := range keys { + _, err := NewIndexDataStoreKey(key) + assert.Error(t, err, "case %d: %s", i, key) + } +} diff --git a/core/net/protocol.go b/core/net/protocol.go index c405f95f25..82024bde81 100644 --- a/core/net/protocol.go +++ b/core/net/protocol.go @@ -15,7 +15,7 @@ import ( ma "github.com/multiformats/go-multiaddr" ) -// DefraDB's p2p protocol information (https://docs.libp2p.io/concepts/protocols/). +// DefraDB's P2P protocol information (https://docs.libp2p.io/concepts/protocols/). const ( // Name is the protocol slug, the codename representing it. diff --git a/datastore/blockstore_test.go b/datastore/blockstore_test.go index b3861cb7dc..81e086c99f 100644 --- a/datastore/blockstore_test.go +++ b/datastore/blockstore_test.go @@ -17,9 +17,10 @@ import ( blocks "github.com/ipfs/go-block-format" "github.com/ipfs/go-cid" ipld "github.com/ipfs/go-ipld-format" - mh "github.com/multiformats/go-multihash" "github.com/stretchr/testify/require" + ccid "github.com/sourcenetwork/defradb/core/cid" + "github.com/sourcenetwork/defradb/datastore/memory" ) @@ -28,20 +29,6 @@ var ( data2 = []byte("SourceHub") ) -// Adding this here to avoid circular dependency datastore->core->datastore. -// The culprit is `core.Parser`. -func newSHA256CidV1(data []byte) (cid.Cid, error) { - pref := cid.Prefix{ - Version: 1, - Codec: cid.Raw, - MhType: mh.SHA2_256, - MhLength: -1, // default length - } - - // And then feed it some data - return pref.Sum(data) -} - func TestBStoreGet(t *testing.T) { ctx := context.Background() rootstore := memory.NewDatastore(ctx) @@ -51,7 +38,7 @@ func TestBStoreGet(t *testing.T) { store: dsRW, } - cID, err := newSHA256CidV1(data) + cID, err := ccid.NewSHA256CidV1(data) require.NoError(t, err) b, err := blocks.NewBlockWithCid(data, cID) require.NoError(t, err) @@ -73,7 +60,7 @@ func TestBStoreGetWithUndefinedCID(t *testing.T) { store: dsRW, } - cID, err := newSHA256CidV1(data) + cID, err := ccid.NewSHA256CidV1(data) require.NoError(t, err) b, err := blocks.NewBlockWithCid(data, cID) require.NoError(t, err) @@ -93,7 +80,7 @@ func TestBStoreGetWithStoreClosed(t *testing.T) { store: dsRW, } - cID, err := newSHA256CidV1(data) + cID, err := ccid.NewSHA256CidV1(data) require.NoError(t, err) b, err := blocks.NewBlockWithCid(data, cID) require.NoError(t, err) @@ -118,7 +105,7 @@ func TestBStoreGetWithReHash(t *testing.T) { bs.HashOnRead(true) - cID, err := newSHA256CidV1(data) + cID, err := ccid.NewSHA256CidV1(data) require.NoError(t, err) b, err := blocks.NewBlockWithCid(data, cID) require.NoError(t, err) @@ -140,12 +127,12 @@ func TestPutMany(t *testing.T) { store: dsRW, } - cID, err := newSHA256CidV1(data) + cID, err := ccid.NewSHA256CidV1(data) require.NoError(t, err) b, err := blocks.NewBlockWithCid(data, cID) require.NoError(t, err) - cID2, err := newSHA256CidV1(data2) + cID2, err := ccid.NewSHA256CidV1(data) require.NoError(t, err) b2, err := blocks.NewBlockWithCid(data2, cID2) require.NoError(t, err) @@ -163,7 +150,7 @@ func TestPutManyWithExists(t *testing.T) { store: dsRW, } - cID, err := newSHA256CidV1(data) + cID, err := ccid.NewSHA256CidV1(data) require.NoError(t, err) b, err := blocks.NewBlockWithCid(data, cID) require.NoError(t, err) @@ -171,7 +158,7 @@ func TestPutManyWithExists(t *testing.T) { err = bs.Put(ctx, b) require.NoError(t, err) - cID2, err := newSHA256CidV1(data2) + cID2, err := ccid.NewSHA256CidV1(data) require.NoError(t, err) b2, err := blocks.NewBlockWithCid(data2, cID2) require.NoError(t, err) @@ -189,12 +176,12 @@ func TestPutManyWithStoreClosed(t *testing.T) { store: dsRW, } - cID, err := newSHA256CidV1(data) + cID, err := ccid.NewSHA256CidV1(data) require.NoError(t, err) b, err := blocks.NewBlockWithCid(data, cID) require.NoError(t, err) - cID2, err := newSHA256CidV1(data2) + cID2, err := ccid.NewSHA256CidV1(data2) require.NoError(t, err) b2, err := blocks.NewBlockWithCid(data2, cID2) require.NoError(t, err) diff --git a/datastore/mocks/DAGStore.go b/datastore/mocks/DAGStore.go new file mode 100644 index 0000000000..1ca7d96d7b --- /dev/null +++ b/datastore/mocks/DAGStore.go @@ -0,0 +1,416 @@ +// Code generated by mockery v2.30.1. DO NOT EDIT. + +package mocks + +import ( + blocks "github.com/ipfs/go-block-format" + cid "github.com/ipfs/go-cid" + + context "context" + + mock "github.com/stretchr/testify/mock" +) + +// DAGStore is an autogenerated mock type for the DAGStore type +type DAGStore struct { + mock.Mock +} + +type DAGStore_Expecter struct { + mock *mock.Mock +} + +func (_m *DAGStore) EXPECT() *DAGStore_Expecter { + return &DAGStore_Expecter{mock: &_m.Mock} +} + +// AllKeysChan provides a mock function with given fields: ctx +func (_m *DAGStore) AllKeysChan(ctx context.Context) (<-chan cid.Cid, error) { + ret := _m.Called(ctx) + + var r0 <-chan cid.Cid + var r1 error + if rf, ok := ret.Get(0).(func(context.Context) (<-chan cid.Cid, error)); ok { + return rf(ctx) + } + if rf, ok := ret.Get(0).(func(context.Context) <-chan cid.Cid); ok { + r0 = rf(ctx) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(<-chan cid.Cid) + } + } + + if rf, ok := ret.Get(1).(func(context.Context) error); ok { + r1 = rf(ctx) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// DAGStore_AllKeysChan_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'AllKeysChan' +type DAGStore_AllKeysChan_Call struct { + *mock.Call +} + +// AllKeysChan is a helper method to define mock.On call +// - ctx context.Context +func (_e *DAGStore_Expecter) AllKeysChan(ctx interface{}) *DAGStore_AllKeysChan_Call { + return &DAGStore_AllKeysChan_Call{Call: _e.mock.On("AllKeysChan", ctx)} +} + +func (_c *DAGStore_AllKeysChan_Call) Run(run func(ctx context.Context)) *DAGStore_AllKeysChan_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context)) + }) + return _c +} + +func (_c *DAGStore_AllKeysChan_Call) Return(_a0 <-chan cid.Cid, _a1 error) *DAGStore_AllKeysChan_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *DAGStore_AllKeysChan_Call) RunAndReturn(run func(context.Context) (<-chan cid.Cid, error)) *DAGStore_AllKeysChan_Call { + _c.Call.Return(run) + return _c +} + +// DeleteBlock provides a mock function with given fields: _a0, _a1 +func (_m *DAGStore) DeleteBlock(_a0 context.Context, _a1 cid.Cid) error { + ret := _m.Called(_a0, _a1) + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, cid.Cid) error); ok { + r0 = rf(_a0, _a1) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// DAGStore_DeleteBlock_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'DeleteBlock' +type DAGStore_DeleteBlock_Call struct { + *mock.Call +} + +// DeleteBlock is a helper method to define mock.On call +// - _a0 context.Context +// - _a1 cid.Cid +func (_e *DAGStore_Expecter) DeleteBlock(_a0 interface{}, _a1 interface{}) *DAGStore_DeleteBlock_Call { + return &DAGStore_DeleteBlock_Call{Call: _e.mock.On("DeleteBlock", _a0, _a1)} +} + +func (_c *DAGStore_DeleteBlock_Call) Run(run func(_a0 context.Context, _a1 cid.Cid)) *DAGStore_DeleteBlock_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(cid.Cid)) + }) + return _c +} + +func (_c *DAGStore_DeleteBlock_Call) Return(_a0 error) *DAGStore_DeleteBlock_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *DAGStore_DeleteBlock_Call) RunAndReturn(run func(context.Context, cid.Cid) error) *DAGStore_DeleteBlock_Call { + _c.Call.Return(run) + return _c +} + +// Get provides a mock function with given fields: _a0, _a1 +func (_m *DAGStore) Get(_a0 context.Context, _a1 cid.Cid) (blocks.Block, error) { + ret := _m.Called(_a0, _a1) + + var r0 blocks.Block + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, cid.Cid) (blocks.Block, error)); ok { + return rf(_a0, _a1) + } + if rf, ok := ret.Get(0).(func(context.Context, cid.Cid) blocks.Block); ok { + r0 = rf(_a0, _a1) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(blocks.Block) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, cid.Cid) error); ok { + r1 = rf(_a0, _a1) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// DAGStore_Get_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Get' +type DAGStore_Get_Call struct { + *mock.Call +} + +// Get is a helper method to define mock.On call +// - _a0 context.Context +// - _a1 cid.Cid +func (_e *DAGStore_Expecter) Get(_a0 interface{}, _a1 interface{}) *DAGStore_Get_Call { + return &DAGStore_Get_Call{Call: _e.mock.On("Get", _a0, _a1)} +} + +func (_c *DAGStore_Get_Call) Run(run func(_a0 context.Context, _a1 cid.Cid)) *DAGStore_Get_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(cid.Cid)) + }) + return _c +} + +func (_c *DAGStore_Get_Call) Return(_a0 blocks.Block, _a1 error) *DAGStore_Get_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *DAGStore_Get_Call) RunAndReturn(run func(context.Context, cid.Cid) (blocks.Block, error)) *DAGStore_Get_Call { + _c.Call.Return(run) + return _c +} + +// GetSize provides a mock function with given fields: _a0, _a1 +func (_m *DAGStore) GetSize(_a0 context.Context, _a1 cid.Cid) (int, error) { + ret := _m.Called(_a0, _a1) + + var r0 int + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, cid.Cid) (int, error)); ok { + return rf(_a0, _a1) + } + if rf, ok := ret.Get(0).(func(context.Context, cid.Cid) int); ok { + r0 = rf(_a0, _a1) + } else { + r0 = ret.Get(0).(int) + } + + if rf, ok := ret.Get(1).(func(context.Context, cid.Cid) error); ok { + r1 = rf(_a0, _a1) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// DAGStore_GetSize_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetSize' +type DAGStore_GetSize_Call struct { + *mock.Call +} + +// GetSize is a helper method to define mock.On call +// - _a0 context.Context +// - _a1 cid.Cid +func (_e *DAGStore_Expecter) GetSize(_a0 interface{}, _a1 interface{}) *DAGStore_GetSize_Call { + return &DAGStore_GetSize_Call{Call: _e.mock.On("GetSize", _a0, _a1)} +} + +func (_c *DAGStore_GetSize_Call) Run(run func(_a0 context.Context, _a1 cid.Cid)) *DAGStore_GetSize_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(cid.Cid)) + }) + return _c +} + +func (_c *DAGStore_GetSize_Call) Return(_a0 int, _a1 error) *DAGStore_GetSize_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *DAGStore_GetSize_Call) RunAndReturn(run func(context.Context, cid.Cid) (int, error)) *DAGStore_GetSize_Call { + _c.Call.Return(run) + return _c +} + +// Has provides a mock function with given fields: _a0, _a1 +func (_m *DAGStore) Has(_a0 context.Context, _a1 cid.Cid) (bool, error) { + ret := _m.Called(_a0, _a1) + + var r0 bool + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, cid.Cid) (bool, error)); ok { + return rf(_a0, _a1) + } + if rf, ok := ret.Get(0).(func(context.Context, cid.Cid) bool); ok { + r0 = rf(_a0, _a1) + } else { + r0 = ret.Get(0).(bool) + } + + if rf, ok := ret.Get(1).(func(context.Context, cid.Cid) error); ok { + r1 = rf(_a0, _a1) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// DAGStore_Has_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Has' +type DAGStore_Has_Call struct { + *mock.Call +} + +// Has is a helper method to define mock.On call +// - _a0 context.Context +// - _a1 cid.Cid +func (_e *DAGStore_Expecter) Has(_a0 interface{}, _a1 interface{}) *DAGStore_Has_Call { + return &DAGStore_Has_Call{Call: _e.mock.On("Has", _a0, _a1)} +} + +func (_c *DAGStore_Has_Call) Run(run func(_a0 context.Context, _a1 cid.Cid)) *DAGStore_Has_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(cid.Cid)) + }) + return _c +} + +func (_c *DAGStore_Has_Call) Return(_a0 bool, _a1 error) *DAGStore_Has_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *DAGStore_Has_Call) RunAndReturn(run func(context.Context, cid.Cid) (bool, error)) *DAGStore_Has_Call { + _c.Call.Return(run) + return _c +} + +// HashOnRead provides a mock function with given fields: enabled +func (_m *DAGStore) HashOnRead(enabled bool) { + _m.Called(enabled) +} + +// DAGStore_HashOnRead_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'HashOnRead' +type DAGStore_HashOnRead_Call struct { + *mock.Call +} + +// HashOnRead is a helper method to define mock.On call +// - enabled bool +func (_e *DAGStore_Expecter) HashOnRead(enabled interface{}) *DAGStore_HashOnRead_Call { + return &DAGStore_HashOnRead_Call{Call: _e.mock.On("HashOnRead", enabled)} +} + +func (_c *DAGStore_HashOnRead_Call) Run(run func(enabled bool)) *DAGStore_HashOnRead_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(bool)) + }) + return _c +} + +func (_c *DAGStore_HashOnRead_Call) Return() *DAGStore_HashOnRead_Call { + _c.Call.Return() + return _c +} + +func (_c *DAGStore_HashOnRead_Call) RunAndReturn(run func(bool)) *DAGStore_HashOnRead_Call { + _c.Call.Return(run) + return _c +} + +// Put provides a mock function with given fields: _a0, _a1 +func (_m *DAGStore) Put(_a0 context.Context, _a1 blocks.Block) error { + ret := _m.Called(_a0, _a1) + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, blocks.Block) error); ok { + r0 = rf(_a0, _a1) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// DAGStore_Put_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Put' +type DAGStore_Put_Call struct { + *mock.Call +} + +// Put is a helper method to define mock.On call +// - _a0 context.Context +// - _a1 blocks.Block +func (_e *DAGStore_Expecter) Put(_a0 interface{}, _a1 interface{}) *DAGStore_Put_Call { + return &DAGStore_Put_Call{Call: _e.mock.On("Put", _a0, _a1)} +} + +func (_c *DAGStore_Put_Call) Run(run func(_a0 context.Context, _a1 blocks.Block)) *DAGStore_Put_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(blocks.Block)) + }) + return _c +} + +func (_c *DAGStore_Put_Call) Return(_a0 error) *DAGStore_Put_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *DAGStore_Put_Call) RunAndReturn(run func(context.Context, blocks.Block) error) *DAGStore_Put_Call { + _c.Call.Return(run) + return _c +} + +// PutMany provides a mock function with given fields: _a0, _a1 +func (_m *DAGStore) PutMany(_a0 context.Context, _a1 []blocks.Block) error { + ret := _m.Called(_a0, _a1) + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, []blocks.Block) error); ok { + r0 = rf(_a0, _a1) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// DAGStore_PutMany_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'PutMany' +type DAGStore_PutMany_Call struct { + *mock.Call +} + +// PutMany is a helper method to define mock.On call +// - _a0 context.Context +// - _a1 []blocks.Block +func (_e *DAGStore_Expecter) PutMany(_a0 interface{}, _a1 interface{}) *DAGStore_PutMany_Call { + return &DAGStore_PutMany_Call{Call: _e.mock.On("PutMany", _a0, _a1)} +} + +func (_c *DAGStore_PutMany_Call) Run(run func(_a0 context.Context, _a1 []blocks.Block)) *DAGStore_PutMany_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].([]blocks.Block)) + }) + return _c +} + +func (_c *DAGStore_PutMany_Call) Return(_a0 error) *DAGStore_PutMany_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *DAGStore_PutMany_Call) RunAndReturn(run func(context.Context, []blocks.Block) error) *DAGStore_PutMany_Call { + _c.Call.Return(run) + return _c +} + +// NewDAGStore creates a new instance of DAGStore. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewDAGStore(t interface { + mock.TestingT + Cleanup(func()) +}) *DAGStore { + mock := &DAGStore{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/datastore/mocks/DSReaderWriter.go b/datastore/mocks/DSReaderWriter.go new file mode 100644 index 0000000000..3d822f6d2c --- /dev/null +++ b/datastore/mocks/DSReaderWriter.go @@ -0,0 +1,399 @@ +// Code generated by mockery v2.30.1. DO NOT EDIT. + +package mocks + +import ( + context "context" + + datastore "github.com/ipfs/go-datastore" + + iterable "github.com/sourcenetwork/defradb/datastore/iterable" + + mock "github.com/stretchr/testify/mock" + + query "github.com/ipfs/go-datastore/query" +) + +// DSReaderWriter is an autogenerated mock type for the DSReaderWriter type +type DSReaderWriter struct { + mock.Mock +} + +type DSReaderWriter_Expecter struct { + mock *mock.Mock +} + +func (_m *DSReaderWriter) EXPECT() *DSReaderWriter_Expecter { + return &DSReaderWriter_Expecter{mock: &_m.Mock} +} + +// Delete provides a mock function with given fields: ctx, key +func (_m *DSReaderWriter) Delete(ctx context.Context, key datastore.Key) error { + ret := _m.Called(ctx, key) + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, datastore.Key) error); ok { + r0 = rf(ctx, key) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// DSReaderWriter_Delete_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Delete' +type DSReaderWriter_Delete_Call struct { + *mock.Call +} + +// Delete is a helper method to define mock.On call +// - ctx context.Context +// - key datastore.Key +func (_e *DSReaderWriter_Expecter) Delete(ctx interface{}, key interface{}) *DSReaderWriter_Delete_Call { + return &DSReaderWriter_Delete_Call{Call: _e.mock.On("Delete", ctx, key)} +} + +func (_c *DSReaderWriter_Delete_Call) Run(run func(ctx context.Context, key datastore.Key)) *DSReaderWriter_Delete_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(datastore.Key)) + }) + return _c +} + +func (_c *DSReaderWriter_Delete_Call) Return(_a0 error) *DSReaderWriter_Delete_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *DSReaderWriter_Delete_Call) RunAndReturn(run func(context.Context, datastore.Key) error) *DSReaderWriter_Delete_Call { + _c.Call.Return(run) + return _c +} + +// Get provides a mock function with given fields: ctx, key +func (_m *DSReaderWriter) Get(ctx context.Context, key datastore.Key) ([]byte, error) { + ret := _m.Called(ctx, key) + + var r0 []byte + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, datastore.Key) ([]byte, error)); ok { + return rf(ctx, key) + } + if rf, ok := ret.Get(0).(func(context.Context, datastore.Key) []byte); ok { + r0 = rf(ctx, key) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]byte) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, datastore.Key) error); ok { + r1 = rf(ctx, key) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// DSReaderWriter_Get_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Get' +type DSReaderWriter_Get_Call struct { + *mock.Call +} + +// Get is a helper method to define mock.On call +// - ctx context.Context +// - key datastore.Key +func (_e *DSReaderWriter_Expecter) Get(ctx interface{}, key interface{}) *DSReaderWriter_Get_Call { + return &DSReaderWriter_Get_Call{Call: _e.mock.On("Get", ctx, key)} +} + +func (_c *DSReaderWriter_Get_Call) Run(run func(ctx context.Context, key datastore.Key)) *DSReaderWriter_Get_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(datastore.Key)) + }) + return _c +} + +func (_c *DSReaderWriter_Get_Call) Return(value []byte, err error) *DSReaderWriter_Get_Call { + _c.Call.Return(value, err) + return _c +} + +func (_c *DSReaderWriter_Get_Call) RunAndReturn(run func(context.Context, datastore.Key) ([]byte, error)) *DSReaderWriter_Get_Call { + _c.Call.Return(run) + return _c +} + +// GetIterator provides a mock function with given fields: q +func (_m *DSReaderWriter) GetIterator(q query.Query) (iterable.Iterator, error) { + ret := _m.Called(q) + + var r0 iterable.Iterator + var r1 error + if rf, ok := ret.Get(0).(func(query.Query) (iterable.Iterator, error)); ok { + return rf(q) + } + if rf, ok := ret.Get(0).(func(query.Query) iterable.Iterator); ok { + r0 = rf(q) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(iterable.Iterator) + } + } + + if rf, ok := ret.Get(1).(func(query.Query) error); ok { + r1 = rf(q) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// DSReaderWriter_GetIterator_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetIterator' +type DSReaderWriter_GetIterator_Call struct { + *mock.Call +} + +// GetIterator is a helper method to define mock.On call +// - q query.Query +func (_e *DSReaderWriter_Expecter) GetIterator(q interface{}) *DSReaderWriter_GetIterator_Call { + return &DSReaderWriter_GetIterator_Call{Call: _e.mock.On("GetIterator", q)} +} + +func (_c *DSReaderWriter_GetIterator_Call) Run(run func(q query.Query)) *DSReaderWriter_GetIterator_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(query.Query)) + }) + return _c +} + +func (_c *DSReaderWriter_GetIterator_Call) Return(_a0 iterable.Iterator, _a1 error) *DSReaderWriter_GetIterator_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *DSReaderWriter_GetIterator_Call) RunAndReturn(run func(query.Query) (iterable.Iterator, error)) *DSReaderWriter_GetIterator_Call { + _c.Call.Return(run) + return _c +} + +// GetSize provides a mock function with given fields: ctx, key +func (_m *DSReaderWriter) GetSize(ctx context.Context, key datastore.Key) (int, error) { + ret := _m.Called(ctx, key) + + var r0 int + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, datastore.Key) (int, error)); ok { + return rf(ctx, key) + } + if rf, ok := ret.Get(0).(func(context.Context, datastore.Key) int); ok { + r0 = rf(ctx, key) + } else { + r0 = ret.Get(0).(int) + } + + if rf, ok := ret.Get(1).(func(context.Context, datastore.Key) error); ok { + r1 = rf(ctx, key) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// DSReaderWriter_GetSize_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetSize' +type DSReaderWriter_GetSize_Call struct { + *mock.Call +} + +// GetSize is a helper method to define mock.On call +// - ctx context.Context +// - key datastore.Key +func (_e *DSReaderWriter_Expecter) GetSize(ctx interface{}, key interface{}) *DSReaderWriter_GetSize_Call { + return &DSReaderWriter_GetSize_Call{Call: _e.mock.On("GetSize", ctx, key)} +} + +func (_c *DSReaderWriter_GetSize_Call) Run(run func(ctx context.Context, key datastore.Key)) *DSReaderWriter_GetSize_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(datastore.Key)) + }) + return _c +} + +func (_c *DSReaderWriter_GetSize_Call) Return(size int, err error) *DSReaderWriter_GetSize_Call { + _c.Call.Return(size, err) + return _c +} + +func (_c *DSReaderWriter_GetSize_Call) RunAndReturn(run func(context.Context, datastore.Key) (int, error)) *DSReaderWriter_GetSize_Call { + _c.Call.Return(run) + return _c +} + +// Has provides a mock function with given fields: ctx, key +func (_m *DSReaderWriter) Has(ctx context.Context, key datastore.Key) (bool, error) { + ret := _m.Called(ctx, key) + + var r0 bool + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, datastore.Key) (bool, error)); ok { + return rf(ctx, key) + } + if rf, ok := ret.Get(0).(func(context.Context, datastore.Key) bool); ok { + r0 = rf(ctx, key) + } else { + r0 = ret.Get(0).(bool) + } + + if rf, ok := ret.Get(1).(func(context.Context, datastore.Key) error); ok { + r1 = rf(ctx, key) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// DSReaderWriter_Has_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Has' +type DSReaderWriter_Has_Call struct { + *mock.Call +} + +// Has is a helper method to define mock.On call +// - ctx context.Context +// - key datastore.Key +func (_e *DSReaderWriter_Expecter) Has(ctx interface{}, key interface{}) *DSReaderWriter_Has_Call { + return &DSReaderWriter_Has_Call{Call: _e.mock.On("Has", ctx, key)} +} + +func (_c *DSReaderWriter_Has_Call) Run(run func(ctx context.Context, key datastore.Key)) *DSReaderWriter_Has_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(datastore.Key)) + }) + return _c +} + +func (_c *DSReaderWriter_Has_Call) Return(exists bool, err error) *DSReaderWriter_Has_Call { + _c.Call.Return(exists, err) + return _c +} + +func (_c *DSReaderWriter_Has_Call) RunAndReturn(run func(context.Context, datastore.Key) (bool, error)) *DSReaderWriter_Has_Call { + _c.Call.Return(run) + return _c +} + +// Put provides a mock function with given fields: ctx, key, value +func (_m *DSReaderWriter) Put(ctx context.Context, key datastore.Key, value []byte) error { + ret := _m.Called(ctx, key, value) + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, datastore.Key, []byte) error); ok { + r0 = rf(ctx, key, value) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// DSReaderWriter_Put_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Put' +type DSReaderWriter_Put_Call struct { + *mock.Call +} + +// Put is a helper method to define mock.On call +// - ctx context.Context +// - key datastore.Key +// - value []byte +func (_e *DSReaderWriter_Expecter) Put(ctx interface{}, key interface{}, value interface{}) *DSReaderWriter_Put_Call { + return &DSReaderWriter_Put_Call{Call: _e.mock.On("Put", ctx, key, value)} +} + +func (_c *DSReaderWriter_Put_Call) Run(run func(ctx context.Context, key datastore.Key, value []byte)) *DSReaderWriter_Put_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(datastore.Key), args[2].([]byte)) + }) + return _c +} + +func (_c *DSReaderWriter_Put_Call) Return(_a0 error) *DSReaderWriter_Put_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *DSReaderWriter_Put_Call) RunAndReturn(run func(context.Context, datastore.Key, []byte) error) *DSReaderWriter_Put_Call { + _c.Call.Return(run) + return _c +} + +// Query provides a mock function with given fields: ctx, q +func (_m *DSReaderWriter) Query(ctx context.Context, q query.Query) (query.Results, error) { + ret := _m.Called(ctx, q) + + var r0 query.Results + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, query.Query) (query.Results, error)); ok { + return rf(ctx, q) + } + if rf, ok := ret.Get(0).(func(context.Context, query.Query) query.Results); ok { + r0 = rf(ctx, q) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(query.Results) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, query.Query) error); ok { + r1 = rf(ctx, q) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// DSReaderWriter_Query_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Query' +type DSReaderWriter_Query_Call struct { + *mock.Call +} + +// Query is a helper method to define mock.On call +// - ctx context.Context +// - q query.Query +func (_e *DSReaderWriter_Expecter) Query(ctx interface{}, q interface{}) *DSReaderWriter_Query_Call { + return &DSReaderWriter_Query_Call{Call: _e.mock.On("Query", ctx, q)} +} + +func (_c *DSReaderWriter_Query_Call) Run(run func(ctx context.Context, q query.Query)) *DSReaderWriter_Query_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(query.Query)) + }) + return _c +} + +func (_c *DSReaderWriter_Query_Call) Return(_a0 query.Results, _a1 error) *DSReaderWriter_Query_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *DSReaderWriter_Query_Call) RunAndReturn(run func(context.Context, query.Query) (query.Results, error)) *DSReaderWriter_Query_Call { + _c.Call.Return(run) + return _c +} + +// NewDSReaderWriter creates a new instance of DSReaderWriter. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewDSReaderWriter(t interface { + mock.TestingT + Cleanup(func()) +}) *DSReaderWriter { + mock := &DSReaderWriter{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/datastore/mocks/Results.go b/datastore/mocks/Results.go new file mode 100644 index 0000000000..69e19a420e --- /dev/null +++ b/datastore/mocks/Results.go @@ -0,0 +1,309 @@ +// Code generated by mockery v2.30.1. DO NOT EDIT. + +package mocks + +import ( + goprocess "github.com/jbenet/goprocess" + mock "github.com/stretchr/testify/mock" + + query "github.com/ipfs/go-datastore/query" +) + +// Results is an autogenerated mock type for the Results type +type Results struct { + mock.Mock +} + +type Results_Expecter struct { + mock *mock.Mock +} + +func (_m *Results) EXPECT() *Results_Expecter { + return &Results_Expecter{mock: &_m.Mock} +} + +// Close provides a mock function with given fields: +func (_m *Results) Close() error { + ret := _m.Called() + + var r0 error + if rf, ok := ret.Get(0).(func() error); ok { + r0 = rf() + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// Results_Close_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Close' +type Results_Close_Call struct { + *mock.Call +} + +// Close is a helper method to define mock.On call +func (_e *Results_Expecter) Close() *Results_Close_Call { + return &Results_Close_Call{Call: _e.mock.On("Close")} +} + +func (_c *Results_Close_Call) Run(run func()) *Results_Close_Call { + _c.Call.Run(func(args mock.Arguments) { + run() + }) + return _c +} + +func (_c *Results_Close_Call) Return(_a0 error) *Results_Close_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *Results_Close_Call) RunAndReturn(run func() error) *Results_Close_Call { + _c.Call.Return(run) + return _c +} + +// Next provides a mock function with given fields: +func (_m *Results) Next() <-chan query.Result { + ret := _m.Called() + + var r0 <-chan query.Result + if rf, ok := ret.Get(0).(func() <-chan query.Result); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(<-chan query.Result) + } + } + + return r0 +} + +// Results_Next_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Next' +type Results_Next_Call struct { + *mock.Call +} + +// Next is a helper method to define mock.On call +func (_e *Results_Expecter) Next() *Results_Next_Call { + return &Results_Next_Call{Call: _e.mock.On("Next")} +} + +func (_c *Results_Next_Call) Run(run func()) *Results_Next_Call { + _c.Call.Run(func(args mock.Arguments) { + run() + }) + return _c +} + +func (_c *Results_Next_Call) Return(_a0 <-chan query.Result) *Results_Next_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *Results_Next_Call) RunAndReturn(run func() <-chan query.Result) *Results_Next_Call { + _c.Call.Return(run) + return _c +} + +// NextSync provides a mock function with given fields: +func (_m *Results) NextSync() (query.Result, bool) { + ret := _m.Called() + + var r0 query.Result + var r1 bool + if rf, ok := ret.Get(0).(func() (query.Result, bool)); ok { + return rf() + } + if rf, ok := ret.Get(0).(func() query.Result); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(query.Result) + } + + if rf, ok := ret.Get(1).(func() bool); ok { + r1 = rf() + } else { + r1 = ret.Get(1).(bool) + } + + return r0, r1 +} + +// Results_NextSync_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'NextSync' +type Results_NextSync_Call struct { + *mock.Call +} + +// NextSync is a helper method to define mock.On call +func (_e *Results_Expecter) NextSync() *Results_NextSync_Call { + return &Results_NextSync_Call{Call: _e.mock.On("NextSync")} +} + +func (_c *Results_NextSync_Call) Run(run func()) *Results_NextSync_Call { + _c.Call.Run(func(args mock.Arguments) { + run() + }) + return _c +} + +func (_c *Results_NextSync_Call) Return(_a0 query.Result, _a1 bool) *Results_NextSync_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *Results_NextSync_Call) RunAndReturn(run func() (query.Result, bool)) *Results_NextSync_Call { + _c.Call.Return(run) + return _c +} + +// Process provides a mock function with given fields: +func (_m *Results) Process() goprocess.Process { + ret := _m.Called() + + var r0 goprocess.Process + if rf, ok := ret.Get(0).(func() goprocess.Process); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(goprocess.Process) + } + } + + return r0 +} + +// Results_Process_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Process' +type Results_Process_Call struct { + *mock.Call +} + +// Process is a helper method to define mock.On call +func (_e *Results_Expecter) Process() *Results_Process_Call { + return &Results_Process_Call{Call: _e.mock.On("Process")} +} + +func (_c *Results_Process_Call) Run(run func()) *Results_Process_Call { + _c.Call.Run(func(args mock.Arguments) { + run() + }) + return _c +} + +func (_c *Results_Process_Call) Return(_a0 goprocess.Process) *Results_Process_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *Results_Process_Call) RunAndReturn(run func() goprocess.Process) *Results_Process_Call { + _c.Call.Return(run) + return _c +} + +// Query provides a mock function with given fields: +func (_m *Results) Query() query.Query { + ret := _m.Called() + + var r0 query.Query + if rf, ok := ret.Get(0).(func() query.Query); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(query.Query) + } + + return r0 +} + +// Results_Query_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Query' +type Results_Query_Call struct { + *mock.Call +} + +// Query is a helper method to define mock.On call +func (_e *Results_Expecter) Query() *Results_Query_Call { + return &Results_Query_Call{Call: _e.mock.On("Query")} +} + +func (_c *Results_Query_Call) Run(run func()) *Results_Query_Call { + _c.Call.Run(func(args mock.Arguments) { + run() + }) + return _c +} + +func (_c *Results_Query_Call) Return(_a0 query.Query) *Results_Query_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *Results_Query_Call) RunAndReturn(run func() query.Query) *Results_Query_Call { + _c.Call.Return(run) + return _c +} + +// Rest provides a mock function with given fields: +func (_m *Results) Rest() ([]query.Entry, error) { + ret := _m.Called() + + var r0 []query.Entry + var r1 error + if rf, ok := ret.Get(0).(func() ([]query.Entry, error)); ok { + return rf() + } + if rf, ok := ret.Get(0).(func() []query.Entry); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]query.Entry) + } + } + + if rf, ok := ret.Get(1).(func() error); ok { + r1 = rf() + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// Results_Rest_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Rest' +type Results_Rest_Call struct { + *mock.Call +} + +// Rest is a helper method to define mock.On call +func (_e *Results_Expecter) Rest() *Results_Rest_Call { + return &Results_Rest_Call{Call: _e.mock.On("Rest")} +} + +func (_c *Results_Rest_Call) Run(run func()) *Results_Rest_Call { + _c.Call.Run(func(args mock.Arguments) { + run() + }) + return _c +} + +func (_c *Results_Rest_Call) Return(_a0 []query.Entry, _a1 error) *Results_Rest_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *Results_Rest_Call) RunAndReturn(run func() ([]query.Entry, error)) *Results_Rest_Call { + _c.Call.Return(run) + return _c +} + +// NewResults creates a new instance of Results. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewResults(t interface { + mock.TestingT + Cleanup(func()) +}) *Results { + mock := &Results{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/datastore/mocks/RootStore.go b/datastore/mocks/RootStore.go new file mode 100644 index 0000000000..96f9cb6256 --- /dev/null +++ b/datastore/mocks/RootStore.go @@ -0,0 +1,536 @@ +// Code generated by mockery v2.30.1. DO NOT EDIT. + +package mocks + +import ( + context "context" + + datastore "github.com/ipfs/go-datastore" + + mock "github.com/stretchr/testify/mock" + + query "github.com/ipfs/go-datastore/query" +) + +// RootStore is an autogenerated mock type for the RootStore type +type RootStore struct { + mock.Mock +} + +type RootStore_Expecter struct { + mock *mock.Mock +} + +func (_m *RootStore) EXPECT() *RootStore_Expecter { + return &RootStore_Expecter{mock: &_m.Mock} +} + +// Batch provides a mock function with given fields: ctx +func (_m *RootStore) Batch(ctx context.Context) (datastore.Batch, error) { + ret := _m.Called(ctx) + + var r0 datastore.Batch + var r1 error + if rf, ok := ret.Get(0).(func(context.Context) (datastore.Batch, error)); ok { + return rf(ctx) + } + if rf, ok := ret.Get(0).(func(context.Context) datastore.Batch); ok { + r0 = rf(ctx) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(datastore.Batch) + } + } + + if rf, ok := ret.Get(1).(func(context.Context) error); ok { + r1 = rf(ctx) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// RootStore_Batch_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Batch' +type RootStore_Batch_Call struct { + *mock.Call +} + +// Batch is a helper method to define mock.On call +// - ctx context.Context +func (_e *RootStore_Expecter) Batch(ctx interface{}) *RootStore_Batch_Call { + return &RootStore_Batch_Call{Call: _e.mock.On("Batch", ctx)} +} + +func (_c *RootStore_Batch_Call) Run(run func(ctx context.Context)) *RootStore_Batch_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context)) + }) + return _c +} + +func (_c *RootStore_Batch_Call) Return(_a0 datastore.Batch, _a1 error) *RootStore_Batch_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *RootStore_Batch_Call) RunAndReturn(run func(context.Context) (datastore.Batch, error)) *RootStore_Batch_Call { + _c.Call.Return(run) + return _c +} + +// Close provides a mock function with given fields: +func (_m *RootStore) Close() error { + ret := _m.Called() + + var r0 error + if rf, ok := ret.Get(0).(func() error); ok { + r0 = rf() + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// RootStore_Close_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Close' +type RootStore_Close_Call struct { + *mock.Call +} + +// Close is a helper method to define mock.On call +func (_e *RootStore_Expecter) Close() *RootStore_Close_Call { + return &RootStore_Close_Call{Call: _e.mock.On("Close")} +} + +func (_c *RootStore_Close_Call) Run(run func()) *RootStore_Close_Call { + _c.Call.Run(func(args mock.Arguments) { + run() + }) + return _c +} + +func (_c *RootStore_Close_Call) Return(_a0 error) *RootStore_Close_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *RootStore_Close_Call) RunAndReturn(run func() error) *RootStore_Close_Call { + _c.Call.Return(run) + return _c +} + +// Delete provides a mock function with given fields: ctx, key +func (_m *RootStore) Delete(ctx context.Context, key datastore.Key) error { + ret := _m.Called(ctx, key) + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, datastore.Key) error); ok { + r0 = rf(ctx, key) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// RootStore_Delete_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Delete' +type RootStore_Delete_Call struct { + *mock.Call +} + +// Delete is a helper method to define mock.On call +// - ctx context.Context +// - key datastore.Key +func (_e *RootStore_Expecter) Delete(ctx interface{}, key interface{}) *RootStore_Delete_Call { + return &RootStore_Delete_Call{Call: _e.mock.On("Delete", ctx, key)} +} + +func (_c *RootStore_Delete_Call) Run(run func(ctx context.Context, key datastore.Key)) *RootStore_Delete_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(datastore.Key)) + }) + return _c +} + +func (_c *RootStore_Delete_Call) Return(_a0 error) *RootStore_Delete_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *RootStore_Delete_Call) RunAndReturn(run func(context.Context, datastore.Key) error) *RootStore_Delete_Call { + _c.Call.Return(run) + return _c +} + +// Get provides a mock function with given fields: ctx, key +func (_m *RootStore) Get(ctx context.Context, key datastore.Key) ([]byte, error) { + ret := _m.Called(ctx, key) + + var r0 []byte + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, datastore.Key) ([]byte, error)); ok { + return rf(ctx, key) + } + if rf, ok := ret.Get(0).(func(context.Context, datastore.Key) []byte); ok { + r0 = rf(ctx, key) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]byte) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, datastore.Key) error); ok { + r1 = rf(ctx, key) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// RootStore_Get_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Get' +type RootStore_Get_Call struct { + *mock.Call +} + +// Get is a helper method to define mock.On call +// - ctx context.Context +// - key datastore.Key +func (_e *RootStore_Expecter) Get(ctx interface{}, key interface{}) *RootStore_Get_Call { + return &RootStore_Get_Call{Call: _e.mock.On("Get", ctx, key)} +} + +func (_c *RootStore_Get_Call) Run(run func(ctx context.Context, key datastore.Key)) *RootStore_Get_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(datastore.Key)) + }) + return _c +} + +func (_c *RootStore_Get_Call) Return(value []byte, err error) *RootStore_Get_Call { + _c.Call.Return(value, err) + return _c +} + +func (_c *RootStore_Get_Call) RunAndReturn(run func(context.Context, datastore.Key) ([]byte, error)) *RootStore_Get_Call { + _c.Call.Return(run) + return _c +} + +// GetSize provides a mock function with given fields: ctx, key +func (_m *RootStore) GetSize(ctx context.Context, key datastore.Key) (int, error) { + ret := _m.Called(ctx, key) + + var r0 int + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, datastore.Key) (int, error)); ok { + return rf(ctx, key) + } + if rf, ok := ret.Get(0).(func(context.Context, datastore.Key) int); ok { + r0 = rf(ctx, key) + } else { + r0 = ret.Get(0).(int) + } + + if rf, ok := ret.Get(1).(func(context.Context, datastore.Key) error); ok { + r1 = rf(ctx, key) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// RootStore_GetSize_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetSize' +type RootStore_GetSize_Call struct { + *mock.Call +} + +// GetSize is a helper method to define mock.On call +// - ctx context.Context +// - key datastore.Key +func (_e *RootStore_Expecter) GetSize(ctx interface{}, key interface{}) *RootStore_GetSize_Call { + return &RootStore_GetSize_Call{Call: _e.mock.On("GetSize", ctx, key)} +} + +func (_c *RootStore_GetSize_Call) Run(run func(ctx context.Context, key datastore.Key)) *RootStore_GetSize_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(datastore.Key)) + }) + return _c +} + +func (_c *RootStore_GetSize_Call) Return(size int, err error) *RootStore_GetSize_Call { + _c.Call.Return(size, err) + return _c +} + +func (_c *RootStore_GetSize_Call) RunAndReturn(run func(context.Context, datastore.Key) (int, error)) *RootStore_GetSize_Call { + _c.Call.Return(run) + return _c +} + +// Has provides a mock function with given fields: ctx, key +func (_m *RootStore) Has(ctx context.Context, key datastore.Key) (bool, error) { + ret := _m.Called(ctx, key) + + var r0 bool + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, datastore.Key) (bool, error)); ok { + return rf(ctx, key) + } + if rf, ok := ret.Get(0).(func(context.Context, datastore.Key) bool); ok { + r0 = rf(ctx, key) + } else { + r0 = ret.Get(0).(bool) + } + + if rf, ok := ret.Get(1).(func(context.Context, datastore.Key) error); ok { + r1 = rf(ctx, key) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// RootStore_Has_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Has' +type RootStore_Has_Call struct { + *mock.Call +} + +// Has is a helper method to define mock.On call +// - ctx context.Context +// - key datastore.Key +func (_e *RootStore_Expecter) Has(ctx interface{}, key interface{}) *RootStore_Has_Call { + return &RootStore_Has_Call{Call: _e.mock.On("Has", ctx, key)} +} + +func (_c *RootStore_Has_Call) Run(run func(ctx context.Context, key datastore.Key)) *RootStore_Has_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(datastore.Key)) + }) + return _c +} + +func (_c *RootStore_Has_Call) Return(exists bool, err error) *RootStore_Has_Call { + _c.Call.Return(exists, err) + return _c +} + +func (_c *RootStore_Has_Call) RunAndReturn(run func(context.Context, datastore.Key) (bool, error)) *RootStore_Has_Call { + _c.Call.Return(run) + return _c +} + +// NewTransaction provides a mock function with given fields: ctx, readOnly +func (_m *RootStore) NewTransaction(ctx context.Context, readOnly bool) (datastore.Txn, error) { + ret := _m.Called(ctx, readOnly) + + var r0 datastore.Txn + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, bool) (datastore.Txn, error)); ok { + return rf(ctx, readOnly) + } + if rf, ok := ret.Get(0).(func(context.Context, bool) datastore.Txn); ok { + r0 = rf(ctx, readOnly) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(datastore.Txn) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, bool) error); ok { + r1 = rf(ctx, readOnly) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// RootStore_NewTransaction_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'NewTransaction' +type RootStore_NewTransaction_Call struct { + *mock.Call +} + +// NewTransaction is a helper method to define mock.On call +// - ctx context.Context +// - readOnly bool +func (_e *RootStore_Expecter) NewTransaction(ctx interface{}, readOnly interface{}) *RootStore_NewTransaction_Call { + return &RootStore_NewTransaction_Call{Call: _e.mock.On("NewTransaction", ctx, readOnly)} +} + +func (_c *RootStore_NewTransaction_Call) Run(run func(ctx context.Context, readOnly bool)) *RootStore_NewTransaction_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(bool)) + }) + return _c +} + +func (_c *RootStore_NewTransaction_Call) Return(_a0 datastore.Txn, _a1 error) *RootStore_NewTransaction_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *RootStore_NewTransaction_Call) RunAndReturn(run func(context.Context, bool) (datastore.Txn, error)) *RootStore_NewTransaction_Call { + _c.Call.Return(run) + return _c +} + +// Put provides a mock function with given fields: ctx, key, value +func (_m *RootStore) Put(ctx context.Context, key datastore.Key, value []byte) error { + ret := _m.Called(ctx, key, value) + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, datastore.Key, []byte) error); ok { + r0 = rf(ctx, key, value) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// RootStore_Put_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Put' +type RootStore_Put_Call struct { + *mock.Call +} + +// Put is a helper method to define mock.On call +// - ctx context.Context +// - key datastore.Key +// - value []byte +func (_e *RootStore_Expecter) Put(ctx interface{}, key interface{}, value interface{}) *RootStore_Put_Call { + return &RootStore_Put_Call{Call: _e.mock.On("Put", ctx, key, value)} +} + +func (_c *RootStore_Put_Call) Run(run func(ctx context.Context, key datastore.Key, value []byte)) *RootStore_Put_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(datastore.Key), args[2].([]byte)) + }) + return _c +} + +func (_c *RootStore_Put_Call) Return(_a0 error) *RootStore_Put_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *RootStore_Put_Call) RunAndReturn(run func(context.Context, datastore.Key, []byte) error) *RootStore_Put_Call { + _c.Call.Return(run) + return _c +} + +// Query provides a mock function with given fields: ctx, q +func (_m *RootStore) Query(ctx context.Context, q query.Query) (query.Results, error) { + ret := _m.Called(ctx, q) + + var r0 query.Results + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, query.Query) (query.Results, error)); ok { + return rf(ctx, q) + } + if rf, ok := ret.Get(0).(func(context.Context, query.Query) query.Results); ok { + r0 = rf(ctx, q) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(query.Results) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, query.Query) error); ok { + r1 = rf(ctx, q) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// RootStore_Query_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Query' +type RootStore_Query_Call struct { + *mock.Call +} + +// Query is a helper method to define mock.On call +// - ctx context.Context +// - q query.Query +func (_e *RootStore_Expecter) Query(ctx interface{}, q interface{}) *RootStore_Query_Call { + return &RootStore_Query_Call{Call: _e.mock.On("Query", ctx, q)} +} + +func (_c *RootStore_Query_Call) Run(run func(ctx context.Context, q query.Query)) *RootStore_Query_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(query.Query)) + }) + return _c +} + +func (_c *RootStore_Query_Call) Return(_a0 query.Results, _a1 error) *RootStore_Query_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *RootStore_Query_Call) RunAndReturn(run func(context.Context, query.Query) (query.Results, error)) *RootStore_Query_Call { + _c.Call.Return(run) + return _c +} + +// Sync provides a mock function with given fields: ctx, prefix +func (_m *RootStore) Sync(ctx context.Context, prefix datastore.Key) error { + ret := _m.Called(ctx, prefix) + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, datastore.Key) error); ok { + r0 = rf(ctx, prefix) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// RootStore_Sync_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Sync' +type RootStore_Sync_Call struct { + *mock.Call +} + +// Sync is a helper method to define mock.On call +// - ctx context.Context +// - prefix datastore.Key +func (_e *RootStore_Expecter) Sync(ctx interface{}, prefix interface{}) *RootStore_Sync_Call { + return &RootStore_Sync_Call{Call: _e.mock.On("Sync", ctx, prefix)} +} + +func (_c *RootStore_Sync_Call) Run(run func(ctx context.Context, prefix datastore.Key)) *RootStore_Sync_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(datastore.Key)) + }) + return _c +} + +func (_c *RootStore_Sync_Call) Return(_a0 error) *RootStore_Sync_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *RootStore_Sync_Call) RunAndReturn(run func(context.Context, datastore.Key) error) *RootStore_Sync_Call { + _c.Call.Return(run) + return _c +} + +// NewRootStore creates a new instance of RootStore. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewRootStore(t interface { + mock.TestingT + Cleanup(func()) +}) *RootStore { + mock := &RootStore{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/datastore/mocks/Txn.go b/datastore/mocks/Txn.go new file mode 100644 index 0000000000..2fe024a9ad --- /dev/null +++ b/datastore/mocks/Txn.go @@ -0,0 +1,393 @@ +// Code generated by mockery v2.30.1. DO NOT EDIT. + +package mocks + +import ( + context "context" + + datastore "github.com/sourcenetwork/defradb/datastore" + mock "github.com/stretchr/testify/mock" +) + +// Txn is an autogenerated mock type for the Txn type +type Txn struct { + mock.Mock +} + +type Txn_Expecter struct { + mock *mock.Mock +} + +func (_m *Txn) EXPECT() *Txn_Expecter { + return &Txn_Expecter{mock: &_m.Mock} +} + +// Commit provides a mock function with given fields: ctx +func (_m *Txn) Commit(ctx context.Context) error { + ret := _m.Called(ctx) + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context) error); ok { + r0 = rf(ctx) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// Txn_Commit_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Commit' +type Txn_Commit_Call struct { + *mock.Call +} + +// Commit is a helper method to define mock.On call +// - ctx context.Context +func (_e *Txn_Expecter) Commit(ctx interface{}) *Txn_Commit_Call { + return &Txn_Commit_Call{Call: _e.mock.On("Commit", ctx)} +} + +func (_c *Txn_Commit_Call) Run(run func(ctx context.Context)) *Txn_Commit_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context)) + }) + return _c +} + +func (_c *Txn_Commit_Call) Return(_a0 error) *Txn_Commit_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *Txn_Commit_Call) RunAndReturn(run func(context.Context) error) *Txn_Commit_Call { + _c.Call.Return(run) + return _c +} + +// DAGstore provides a mock function with given fields: +func (_m *Txn) DAGstore() datastore.DAGStore { + ret := _m.Called() + + var r0 datastore.DAGStore + if rf, ok := ret.Get(0).(func() datastore.DAGStore); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(datastore.DAGStore) + } + } + + return r0 +} + +// Txn_DAGstore_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'DAGstore' +type Txn_DAGstore_Call struct { + *mock.Call +} + +// DAGstore is a helper method to define mock.On call +func (_e *Txn_Expecter) DAGstore() *Txn_DAGstore_Call { + return &Txn_DAGstore_Call{Call: _e.mock.On("DAGstore")} +} + +func (_c *Txn_DAGstore_Call) Run(run func()) *Txn_DAGstore_Call { + _c.Call.Run(func(args mock.Arguments) { + run() + }) + return _c +} + +func (_c *Txn_DAGstore_Call) Return(_a0 datastore.DAGStore) *Txn_DAGstore_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *Txn_DAGstore_Call) RunAndReturn(run func() datastore.DAGStore) *Txn_DAGstore_Call { + _c.Call.Return(run) + return _c +} + +// Datastore provides a mock function with given fields: +func (_m *Txn) Datastore() datastore.DSReaderWriter { + ret := _m.Called() + + var r0 datastore.DSReaderWriter + if rf, ok := ret.Get(0).(func() datastore.DSReaderWriter); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(datastore.DSReaderWriter) + } + } + + return r0 +} + +// Txn_Datastore_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Datastore' +type Txn_Datastore_Call struct { + *mock.Call +} + +// Datastore is a helper method to define mock.On call +func (_e *Txn_Expecter) Datastore() *Txn_Datastore_Call { + return &Txn_Datastore_Call{Call: _e.mock.On("Datastore")} +} + +func (_c *Txn_Datastore_Call) Run(run func()) *Txn_Datastore_Call { + _c.Call.Run(func(args mock.Arguments) { + run() + }) + return _c +} + +func (_c *Txn_Datastore_Call) Return(_a0 datastore.DSReaderWriter) *Txn_Datastore_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *Txn_Datastore_Call) RunAndReturn(run func() datastore.DSReaderWriter) *Txn_Datastore_Call { + _c.Call.Return(run) + return _c +} + +// Discard provides a mock function with given fields: ctx +func (_m *Txn) Discard(ctx context.Context) { + _m.Called(ctx) +} + +// Txn_Discard_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Discard' +type Txn_Discard_Call struct { + *mock.Call +} + +// Discard is a helper method to define mock.On call +// - ctx context.Context +func (_e *Txn_Expecter) Discard(ctx interface{}) *Txn_Discard_Call { + return &Txn_Discard_Call{Call: _e.mock.On("Discard", ctx)} +} + +func (_c *Txn_Discard_Call) Run(run func(ctx context.Context)) *Txn_Discard_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context)) + }) + return _c +} + +func (_c *Txn_Discard_Call) Return() *Txn_Discard_Call { + _c.Call.Return() + return _c +} + +func (_c *Txn_Discard_Call) RunAndReturn(run func(context.Context)) *Txn_Discard_Call { + _c.Call.Return(run) + return _c +} + +// Headstore provides a mock function with given fields: +func (_m *Txn) Headstore() datastore.DSReaderWriter { + ret := _m.Called() + + var r0 datastore.DSReaderWriter + if rf, ok := ret.Get(0).(func() datastore.DSReaderWriter); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(datastore.DSReaderWriter) + } + } + + return r0 +} + +// Txn_Headstore_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Headstore' +type Txn_Headstore_Call struct { + *mock.Call +} + +// Headstore is a helper method to define mock.On call +func (_e *Txn_Expecter) Headstore() *Txn_Headstore_Call { + return &Txn_Headstore_Call{Call: _e.mock.On("Headstore")} +} + +func (_c *Txn_Headstore_Call) Run(run func()) *Txn_Headstore_Call { + _c.Call.Run(func(args mock.Arguments) { + run() + }) + return _c +} + +func (_c *Txn_Headstore_Call) Return(_a0 datastore.DSReaderWriter) *Txn_Headstore_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *Txn_Headstore_Call) RunAndReturn(run func() datastore.DSReaderWriter) *Txn_Headstore_Call { + _c.Call.Return(run) + return _c +} + +// OnError provides a mock function with given fields: fn +func (_m *Txn) OnError(fn func()) { + _m.Called(fn) +} + +// Txn_OnError_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'OnError' +type Txn_OnError_Call struct { + *mock.Call +} + +// OnError is a helper method to define mock.On call +// - fn func() +func (_e *Txn_Expecter) OnError(fn interface{}) *Txn_OnError_Call { + return &Txn_OnError_Call{Call: _e.mock.On("OnError", fn)} +} + +func (_c *Txn_OnError_Call) Run(run func(fn func())) *Txn_OnError_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(func())) + }) + return _c +} + +func (_c *Txn_OnError_Call) Return() *Txn_OnError_Call { + _c.Call.Return() + return _c +} + +func (_c *Txn_OnError_Call) RunAndReturn(run func(func())) *Txn_OnError_Call { + _c.Call.Return(run) + return _c +} + +// OnSuccess provides a mock function with given fields: fn +func (_m *Txn) OnSuccess(fn func()) { + _m.Called(fn) +} + +// Txn_OnSuccess_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'OnSuccess' +type Txn_OnSuccess_Call struct { + *mock.Call +} + +// OnSuccess is a helper method to define mock.On call +// - fn func() +func (_e *Txn_Expecter) OnSuccess(fn interface{}) *Txn_OnSuccess_Call { + return &Txn_OnSuccess_Call{Call: _e.mock.On("OnSuccess", fn)} +} + +func (_c *Txn_OnSuccess_Call) Run(run func(fn func())) *Txn_OnSuccess_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(func())) + }) + return _c +} + +func (_c *Txn_OnSuccess_Call) Return() *Txn_OnSuccess_Call { + _c.Call.Return() + return _c +} + +func (_c *Txn_OnSuccess_Call) RunAndReturn(run func(func())) *Txn_OnSuccess_Call { + _c.Call.Return(run) + return _c +} + +// Rootstore provides a mock function with given fields: +func (_m *Txn) Rootstore() datastore.DSReaderWriter { + ret := _m.Called() + + var r0 datastore.DSReaderWriter + if rf, ok := ret.Get(0).(func() datastore.DSReaderWriter); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(datastore.DSReaderWriter) + } + } + + return r0 +} + +// Txn_Rootstore_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Rootstore' +type Txn_Rootstore_Call struct { + *mock.Call +} + +// Rootstore is a helper method to define mock.On call +func (_e *Txn_Expecter) Rootstore() *Txn_Rootstore_Call { + return &Txn_Rootstore_Call{Call: _e.mock.On("Rootstore")} +} + +func (_c *Txn_Rootstore_Call) Run(run func()) *Txn_Rootstore_Call { + _c.Call.Run(func(args mock.Arguments) { + run() + }) + return _c +} + +func (_c *Txn_Rootstore_Call) Return(_a0 datastore.DSReaderWriter) *Txn_Rootstore_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *Txn_Rootstore_Call) RunAndReturn(run func() datastore.DSReaderWriter) *Txn_Rootstore_Call { + _c.Call.Return(run) + return _c +} + +// Systemstore provides a mock function with given fields: +func (_m *Txn) Systemstore() datastore.DSReaderWriter { + ret := _m.Called() + + var r0 datastore.DSReaderWriter + if rf, ok := ret.Get(0).(func() datastore.DSReaderWriter); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(datastore.DSReaderWriter) + } + } + + return r0 +} + +// Txn_Systemstore_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Systemstore' +type Txn_Systemstore_Call struct { + *mock.Call +} + +// Systemstore is a helper method to define mock.On call +func (_e *Txn_Expecter) Systemstore() *Txn_Systemstore_Call { + return &Txn_Systemstore_Call{Call: _e.mock.On("Systemstore")} +} + +func (_c *Txn_Systemstore_Call) Run(run func()) *Txn_Systemstore_Call { + _c.Call.Run(func(args mock.Arguments) { + run() + }) + return _c +} + +func (_c *Txn_Systemstore_Call) Return(_a0 datastore.DSReaderWriter) *Txn_Systemstore_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *Txn_Systemstore_Call) RunAndReturn(run func() datastore.DSReaderWriter) *Txn_Systemstore_Call { + _c.Call.Return(run) + return _c +} + +// NewTxn creates a new instance of Txn. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewTxn(t interface { + mock.TestingT + Cleanup(func()) +}) *Txn { + mock := &Txn{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/datastore/mocks/utils.go b/datastore/mocks/utils.go new file mode 100644 index 0000000000..af91fc6d3a --- /dev/null +++ b/datastore/mocks/utils.go @@ -0,0 +1,116 @@ +// Copyright 2023 Democratized Data Foundation +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package mocks + +import ( + "testing" + + ds "github.com/ipfs/go-datastore" + "github.com/ipfs/go-datastore/query" + "github.com/stretchr/testify/mock" +) + +type MultiStoreTxn struct { + *Txn + t *testing.T + MockRootstore *DSReaderWriter + MockDatastore *DSReaderWriter + MockHeadstore *DSReaderWriter + MockDAGstore *DAGStore + MockSystemstore *DSReaderWriter +} + +func prepareDataStore(t *testing.T) *DSReaderWriter { + dataStore := NewDSReaderWriter(t) + dataStore.EXPECT().Get(mock.Anything, mock.Anything).Return([]byte{}, ds.ErrNotFound).Maybe() + dataStore.EXPECT().Put(mock.Anything, mock.Anything, mock.Anything).Return(nil).Maybe() + dataStore.EXPECT().Has(mock.Anything, mock.Anything).Return(true, nil).Maybe() + return dataStore +} + +func prepareRootStore(t *testing.T) *DSReaderWriter { + return NewDSReaderWriter(t) +} + +func prepareHeadStore(t *testing.T) *DSReaderWriter { + headStore := NewDSReaderWriter(t) + + headStore.EXPECT().Query(mock.Anything, mock.Anything). + Return(NewQueryResultsWithValues(t), nil).Maybe() + + headStore.EXPECT().Get(mock.Anything, mock.Anything).Return([]byte{}, ds.ErrNotFound).Maybe() + headStore.EXPECT().Put(mock.Anything, mock.Anything, mock.Anything).Return(nil).Maybe() + headStore.EXPECT().Has(mock.Anything, mock.Anything).Return(false, nil).Maybe() + return headStore +} + +func prepareSystemStore(t *testing.T) *DSReaderWriter { + systemStore := NewDSReaderWriter(t) + systemStore.EXPECT().Get(mock.Anything, mock.Anything).Return([]byte{}, nil).Maybe() + return systemStore +} + +func prepareDAGStore(t *testing.T) *DAGStore { + dagStore := NewDAGStore(t) + dagStore.EXPECT().Put(mock.Anything, mock.Anything).Return(nil).Maybe() + dagStore.EXPECT().Has(mock.Anything, mock.Anything).Return(false, nil).Maybe() + return dagStore +} + +func NewTxnWithMultistore(t *testing.T) *MultiStoreTxn { + txn := NewTxn(t) + txn.EXPECT().OnSuccess(mock.Anything).Maybe() + + result := &MultiStoreTxn{ + Txn: txn, + t: t, + MockRootstore: prepareRootStore(t), + MockDatastore: prepareDataStore(t), + MockHeadstore: prepareHeadStore(t), + MockDAGstore: prepareDAGStore(t), + MockSystemstore: prepareSystemStore(t), + } + + txn.EXPECT().Rootstore().Return(result.MockRootstore).Maybe() + txn.EXPECT().Datastore().Return(result.MockDatastore).Maybe() + txn.EXPECT().Headstore().Return(result.MockHeadstore).Maybe() + txn.EXPECT().DAGstore().Return(result.MockDAGstore).Maybe() + txn.EXPECT().Systemstore().Return(result.MockSystemstore).Maybe() + + return result +} + +func (txn *MultiStoreTxn) ClearSystemStore() *MultiStoreTxn { + txn.MockSystemstore = NewDSReaderWriter(txn.t) + txn.EXPECT().Systemstore().Unset() + txn.EXPECT().Systemstore().Return(txn.MockSystemstore).Maybe() + return txn +} + +func NewQueryResultsWithValues(t *testing.T, values ...[]byte) *Results { + results := make([]query.Result, len(values)) + for i, value := range values { + results[i] = query.Result{Entry: query.Entry{Value: value}} + } + return NewQueryResultsWithResults(t, results...) +} + +func NewQueryResultsWithResults(t *testing.T, results ...query.Result) *Results { + queryResults := NewResults(t) + resultChan := make(chan query.Result, len(results)) + for _, result := range results { + resultChan <- result + } + close(resultChan) + queryResults.EXPECT().Next().Return(resultChan).Maybe() + queryResults.EXPECT().Close().Return(nil).Maybe() + return queryResults +} diff --git a/db/backup.go b/db/backup.go new file mode 100644 index 0000000000..89925a6c53 --- /dev/null +++ b/db/backup.go @@ -0,0 +1,392 @@ +// Copyright 2023 Democratized Data Foundation +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package db + +import ( + "bufio" + "context" + "encoding/json" + "fmt" + "os" + + "github.com/sourcenetwork/defradb/client" + "github.com/sourcenetwork/defradb/client/request" + "github.com/sourcenetwork/defradb/datastore" +) + +func (db *db) basicImport(ctx context.Context, txn datastore.Txn, filepath string) (err error) { + f, err := os.Open(filepath) + if err != nil { + return NewErrOpenFile(err, filepath) + } + defer func() { + closeErr := f.Close() + if closeErr != nil { + err = NewErrCloseFile(closeErr, err) + } + }() + + d := json.NewDecoder(bufio.NewReader(f)) + + t, err := d.Token() + if err != nil { + return err + } + if t != json.Delim('{') { + return ErrExpectedJSONObject + } + for d.More() { + t, err := d.Token() + if err != nil { + return err + } + colName := t.(string) + col, err := db.getCollectionByName(ctx, txn, colName) + if err != nil { + return NewErrFailedToGetCollection(colName, err) + } + + t, err = d.Token() + if err != nil { + return err + } + if t != json.Delim('[') { + return ErrExpectedJSONArray + } + + for d.More() { + docMap := map[string]any{} + err = d.Decode(&docMap) + if err != nil { + return NewErrJSONDecode(err) + } + + // check if self referencing and remove from docMap for key creation + resetMap := map[string]any{} + for _, field := range col.Schema().Fields { + if field.Kind == client.FieldKind_FOREIGN_OBJECT { + if val, ok := docMap[field.Name+request.RelatedObjectID]; ok { + if docMap["_newKey"] == val { + resetMap[field.Name+request.RelatedObjectID] = val + delete(docMap, field.Name+request.RelatedObjectID) + } + } + } + } + + delete(docMap, "_key") + delete(docMap, "_newKey") + + doc, err := client.NewDocFromMap(docMap) + if err != nil { + return NewErrDocFromMap(err) + } + + err = col.WithTxn(txn).Create(ctx, doc) + if err != nil { + return NewErrDocCreate(err) + } + + // add back the self referencing fields and update doc. + for k, v := range resetMap { + err := doc.Set(k, v) + if err != nil { + return NewErrDocUpdate(err) + } + err = col.WithTxn(txn).Update(ctx, doc) + if err != nil { + return NewErrDocUpdate(err) + } + } + } + _, err = d.Token() + if err != nil { + return err + } + } + + return nil +} + +func (db *db) basicExport(ctx context.Context, txn datastore.Txn, config *client.BackupConfig) (err error) { + // old key -> new Key + keyChangeCache := map[string]string{} + + cols := []client.Collection{} + if len(config.Collections) == 0 { + cols, err = db.getAllCollections(ctx, txn) + if err != nil { + return NewErrFailedToGetAllCollections(err) + } + } else { + for _, colName := range config.Collections { + col, err := db.getCollectionByName(ctx, txn, colName) + if err != nil { + return NewErrFailedToGetCollection(colName, err) + } + cols = append(cols, col) + } + } + colNameCache := map[string]struct{}{} + for _, col := range cols { + colNameCache[col.Name()] = struct{}{} + } + + tempFile := config.Filepath + ".temp" + f, err := os.Create(tempFile) + if err != nil { + return NewErrCreateFile(err, tempFile) + } + defer func() { + closeErr := f.Close() + if closeErr != nil { + err = NewErrCloseFile(closeErr, err) + } else if err != nil { + // ensure we cleanup if there was an error + removeErr := os.Remove(tempFile) + if removeErr != nil { + err = NewErrRemoveFile(removeErr, err, tempFile) + } + } else { + _ = os.Rename(tempFile, config.Filepath) + } + }() + + // open the object + err = writeString(f, "{", "{\n", config.Pretty) + if err != nil { + return err + } + + firstCol := true + for _, col := range cols { + if firstCol { + firstCol = false + } else { + // add collection seperator + err = writeString(f, ",", ",\n", config.Pretty) + if err != nil { + return err + } + } + + // set collection + err = writeString( + f, + fmt.Sprintf("\"%s\":[", col.Name()), + fmt.Sprintf(" \"%s\": [\n", col.Name()), + config.Pretty, + ) + if err != nil { + return err + } + colTxn := col.WithTxn(txn) + keysCh, err := colTxn.GetAllDocKeys(ctx) + if err != nil { + return err + } + + firstDoc := true + for key := range keysCh { + if firstDoc { + firstDoc = false + } else { + // add document seperator + err = writeString(f, ",", ",\n", config.Pretty) + if err != nil { + return err + } + } + doc, err := colTxn.Get(ctx, key.Key, false) + if err != nil { + return err + } + + isSelfReference := false + refFieldName := "" + // replace any foreing key if it needs to be changed + for _, field := range col.Schema().Fields { + switch field.Kind { + case client.FieldKind_FOREIGN_OBJECT: + if _, ok := colNameCache[field.Schema]; !ok { + continue + } + if foreignKey, err := doc.Get(field.Name + request.RelatedObjectID); err == nil { + if newKey, ok := keyChangeCache[foreignKey.(string)]; ok { + err := doc.Set(field.Name+request.RelatedObjectID, newKey) + if err != nil { + return err + } + if foreignKey.(string) == doc.Key().String() { + isSelfReference = true + refFieldName = field.Name + request.RelatedObjectID + } + } else { + foreignCol, err := db.getCollectionByName(ctx, txn, field.Schema) + if err != nil { + return NewErrFailedToGetCollection(field.Schema, err) + } + foreignDocKey, err := client.NewDocKeyFromString(foreignKey.(string)) + if err != nil { + return err + } + foreignDoc, err := foreignCol.Get(ctx, foreignDocKey, false) + if err != nil { + err := doc.Set(field.Name+request.RelatedObjectID, nil) + if err != nil { + return err + } + } else { + oldForeignDoc, err := foreignDoc.ToMap() + if err != nil { + return err + } + + // Temporary until https://github.com/sourcenetwork/defradb/issues/1681 is resolved. + ensureIntIsInt(foreignCol.Schema().Fields, oldForeignDoc) + + delete(oldForeignDoc, "_key") + if foreignDoc.Key().String() == foreignDocKey.String() { + delete(oldForeignDoc, field.Name+request.RelatedObjectID) + } + + if foreignDoc.Key().String() == doc.Key().String() { + isSelfReference = true + refFieldName = field.Name + request.RelatedObjectID + } + + newForeignDoc, err := client.NewDocFromMap(oldForeignDoc) + if err != nil { + return err + } + + if foreignDoc.Key().String() != doc.Key().String() { + err = doc.Set(field.Name+request.RelatedObjectID, newForeignDoc.Key().String()) + if err != nil { + return err + } + } + + if newForeignDoc.Key().String() != foreignDoc.Key().String() { + keyChangeCache[foreignDoc.Key().String()] = newForeignDoc.Key().String() + } + } + } + } + } + } + + docM, err := doc.ToMap() + if err != nil { + return err + } + + // Temporary until https://github.com/sourcenetwork/defradb/issues/1681 is resolved. + ensureIntIsInt(col.Schema().Fields, docM) + + delete(docM, "_key") + if isSelfReference { + delete(docM, refFieldName) + } + + newDoc, err := client.NewDocFromMap(docM) + if err != nil { + return err + } + // newKey is needed to let the user know what will be the key of the imported document. + docM["_newKey"] = newDoc.Key().String() + // NewDocFromMap removes the "_key" map item so we add it back. + docM["_key"] = doc.Key().String() + + if isSelfReference { + docM[refFieldName] = newDoc.Key().String() + } + + if newDoc.Key().String() != doc.Key().String() { + keyChangeCache[doc.Key().String()] = newDoc.Key().String() + } + + var b []byte + if config.Pretty { + _, err = f.WriteString(" ") + if err != nil { + return NewErrFailedToWriteString(err) + } + b, err = json.MarshalIndent(docM, " ", " ") + if err != nil { + return NewErrFailedToWriteString(err) + } + } else { + b, err = json.Marshal(docM) + if err != nil { + return err + } + } + + // write document + _, err = f.Write(b) + if err != nil { + return err + } + } + + // close collection + err = writeString(f, "]", "\n ]", config.Pretty) + if err != nil { + return err + } + } + + // close object + err = writeString(f, "}", "\n}", config.Pretty) + if err != nil { + return err + } + + err = f.Sync() + if err != nil { + return err + } + + return nil +} + +func writeString(f *os.File, normal, pretty string, isPretty bool) error { + if isPretty { + _, err := f.WriteString(pretty) + if err != nil { + return NewErrFailedToWriteString(err) + } + return nil + } + + _, err := f.WriteString(normal) + if err != nil { + return NewErrFailedToWriteString(err) + } + return nil +} + +// Temporary until https://github.com/sourcenetwork/defradb/issues/1681 is resolved. +func ensureIntIsInt(fields []client.FieldDescription, docMap map[string]any) { + for _, field := range fields { + if field.Kind == client.FieldKind_INT { + if val, ok := docMap[field.Name]; ok { + switch v := val.(type) { + case uint64: + docMap[field.Name] = int(v) + case int64: + docMap[field.Name] = int(v) + } + } + } + } +} diff --git a/db/backup_test.go b/db/backup_test.go new file mode 100644 index 0000000000..2f89f54a07 --- /dev/null +++ b/db/backup_test.go @@ -0,0 +1,552 @@ +// Copyright 2023 Democratized Data Foundation +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package db + +import ( + "context" + "encoding/json" + "os" + "testing" + + "github.com/stretchr/testify/require" + + "github.com/sourcenetwork/defradb/client" +) + +func TestBasicExport_WithNormalFormatting_NoError(t *testing.T) { + ctx := context.Background() + db, err := newMemoryDB(ctx) + require.NoError(t, err) + defer db.Close(ctx) + + _, err = db.AddSchema(ctx, `type User { + name: String + age: Int + } + + type Address { + street: String + city: String + }`) + require.NoError(t, err) + + doc1, err := client.NewDocFromJSON([]byte(`{"name": "John", "age": 30}`)) + require.NoError(t, err) + + doc2, err := client.NewDocFromJSON([]byte(`{"name": "Bob", "age": 40}`)) + require.NoError(t, err) + + col1, err := db.GetCollectionByName(ctx, "User") + require.NoError(t, err) + + err = col1.Create(ctx, doc1) + require.NoError(t, err) + + err = col1.Create(ctx, doc2) + require.NoError(t, err) + + doc3, err := client.NewDocFromJSON([]byte(`{"street": "101 Maple St", "city": "Toronto"}`)) + require.NoError(t, err) + + col2, err := db.GetCollectionByName(ctx, "Address") + require.NoError(t, err) + + err = col2.Create(ctx, doc3) + require.NoError(t, err) + + txn, err := db.NewTxn(ctx, true) + require.NoError(t, err) + defer txn.Discard(ctx) + + filepath := t.TempDir() + "/test.json" + err = db.basicExport(ctx, txn, &client.BackupConfig{Filepath: filepath}) + require.NoError(t, err) + + b, err := os.ReadFile(filepath) + require.NoError(t, err) + fileMap := map[string]any{} + err = json.Unmarshal(b, &fileMap) + require.NoError(t, err) + + expectedMap := map[string]any{} + data := []byte(`{"Address":[{"_key":"bae-8096f2c1-ea4c-5226-8ba5-17fc4b68ac1f","_newKey":"bae-8096f2c1-ea4c-5226-8ba5-17fc4b68ac1f","city":"Toronto","street":"101 Maple St"}],"User":[{"_key":"bae-b94880d1-e6d2-542f-b9e0-5a369fafd0df","_newKey":"bae-b94880d1-e6d2-542f-b9e0-5a369fafd0df","age":40,"name":"Bob"},{"_key":"bae-e933420a-988a-56f8-8952-6c245aebd519","_newKey":"bae-e933420a-988a-56f8-8952-6c245aebd519","age":30,"name":"John"}]}`) + err = json.Unmarshal(data, &expectedMap) + require.NoError(t, err) + require.EqualValues(t, expectedMap, fileMap) +} + +func TestBasicExport_WithPrettyFormatting_NoError(t *testing.T) { + ctx := context.Background() + db, err := newMemoryDB(ctx) + require.NoError(t, err) + defer db.Close(ctx) + + _, err = db.AddSchema(ctx, `type User { + name: String + age: Int + } + + type Address { + street: String + city: String + }`) + require.NoError(t, err) + + doc1, err := client.NewDocFromJSON([]byte(`{"name": "John", "age": 30}`)) + require.NoError(t, err) + + doc2, err := client.NewDocFromJSON([]byte(`{"name": "Bob", "age": 40}`)) + require.NoError(t, err) + + col1, err := db.GetCollectionByName(ctx, "User") + require.NoError(t, err) + + err = col1.Create(ctx, doc1) + require.NoError(t, err) + + err = col1.Create(ctx, doc2) + require.NoError(t, err) + + doc3, err := client.NewDocFromJSON([]byte(`{"street": "101 Maple St", "city": "Toronto"}`)) + require.NoError(t, err) + + col2, err := db.GetCollectionByName(ctx, "Address") + require.NoError(t, err) + + err = col2.Create(ctx, doc3) + require.NoError(t, err) + + txn, err := db.NewTxn(ctx, true) + require.NoError(t, err) + defer txn.Discard(ctx) + + filepath := t.TempDir() + "/test.json" + err = db.basicExport(ctx, txn, &client.BackupConfig{Filepath: filepath, Pretty: true}) + require.NoError(t, err) + + b, err := os.ReadFile(filepath) + require.NoError(t, err) + fileMap := map[string]any{} + err = json.Unmarshal(b, &fileMap) + require.NoError(t, err) + + expectedMap := map[string]any{} + data := []byte(`{"Address":[{"_key":"bae-8096f2c1-ea4c-5226-8ba5-17fc4b68ac1f","_newKey":"bae-8096f2c1-ea4c-5226-8ba5-17fc4b68ac1f","city":"Toronto","street":"101 Maple St"}],"User":[{"_key":"bae-b94880d1-e6d2-542f-b9e0-5a369fafd0df","_newKey":"bae-b94880d1-e6d2-542f-b9e0-5a369fafd0df","age":40,"name":"Bob"},{"_key":"bae-e933420a-988a-56f8-8952-6c245aebd519","_newKey":"bae-e933420a-988a-56f8-8952-6c245aebd519","age":30,"name":"John"}]}`) + err = json.Unmarshal(data, &expectedMap) + require.NoError(t, err) + require.EqualValues(t, expectedMap, fileMap) +} + +func TestBasicExport_WithSingleCollection_NoError(t *testing.T) { + ctx := context.Background() + db, err := newMemoryDB(ctx) + require.NoError(t, err) + defer db.Close(ctx) + + _, err = db.AddSchema(ctx, `type User { + name: String + age: Int + } + + type Address { + street: String + city: String + }`) + require.NoError(t, err) + + doc1, err := client.NewDocFromJSON([]byte(`{"name": "John", "age": 30}`)) + require.NoError(t, err) + + doc2, err := client.NewDocFromJSON([]byte(`{"name": "Bob", "age": 40}`)) + require.NoError(t, err) + + col1, err := db.GetCollectionByName(ctx, "User") + require.NoError(t, err) + + err = col1.Create(ctx, doc1) + require.NoError(t, err) + + err = col1.Create(ctx, doc2) + require.NoError(t, err) + + doc3, err := client.NewDocFromJSON([]byte(`{"street": "101 Maple St", "city": "Toronto"}`)) + require.NoError(t, err) + + col2, err := db.GetCollectionByName(ctx, "Address") + require.NoError(t, err) + + err = col2.Create(ctx, doc3) + require.NoError(t, err) + + txn, err := db.NewTxn(ctx, true) + require.NoError(t, err) + defer txn.Discard(ctx) + + filepath := t.TempDir() + "/test.json" + err = db.basicExport(ctx, txn, &client.BackupConfig{Filepath: filepath, Collections: []string{"Address"}}) + require.NoError(t, err) + + b, err := os.ReadFile(filepath) + require.NoError(t, err) + fileMap := map[string]any{} + err = json.Unmarshal(b, &fileMap) + require.NoError(t, err) + + expectedMap := map[string]any{} + data := []byte(`{"Address":[{"_key":"bae-8096f2c1-ea4c-5226-8ba5-17fc4b68ac1f","_newKey":"bae-8096f2c1-ea4c-5226-8ba5-17fc4b68ac1f","city":"Toronto","street":"101 Maple St"}]}`) + err = json.Unmarshal(data, &expectedMap) + require.NoError(t, err) + require.EqualValues(t, expectedMap, fileMap) +} + +func TestBasicExport_WithMultipleCollectionsAndUpdate_NoError(t *testing.T) { + ctx := context.Background() + db, err := newMemoryDB(ctx) + require.NoError(t, err) + defer db.Close(ctx) + + _, err = db.AddSchema(ctx, `type User { + name: String + age: Int + book: [Book] + } + + type Book { + name: String + author: User + }`) + require.NoError(t, err) + + doc1, err := client.NewDocFromJSON([]byte(`{"name": "John", "age": 30}`)) + require.NoError(t, err) + + doc2, err := client.NewDocFromJSON([]byte(`{"name": "Bob", "age": 31}`)) + require.NoError(t, err) + + col1, err := db.GetCollectionByName(ctx, "User") + require.NoError(t, err) + + err = col1.Create(ctx, doc1) + require.NoError(t, err) + + err = col1.Create(ctx, doc2) + require.NoError(t, err) + + doc3, err := client.NewDocFromJSON([]byte(`{"name": "John and the sourcerers' stone", "author": "bae-e933420a-988a-56f8-8952-6c245aebd519"}`)) + require.NoError(t, err) + + doc4, err := client.NewDocFromJSON([]byte(`{"name": "Game of chains", "author": "bae-e933420a-988a-56f8-8952-6c245aebd519"}`)) + require.NoError(t, err) + + col2, err := db.GetCollectionByName(ctx, "Book") + require.NoError(t, err) + + err = col2.Create(ctx, doc3) + require.NoError(t, err) + err = col2.Create(ctx, doc4) + require.NoError(t, err) + + err = doc1.Set("age", 31) + require.NoError(t, err) + + err = col1.Update(ctx, doc1) + require.NoError(t, err) + + txn, err := db.NewTxn(ctx, true) + require.NoError(t, err) + defer txn.Discard(ctx) + + filepath := t.TempDir() + "/test.json" + err = db.basicExport(ctx, txn, &client.BackupConfig{Filepath: filepath}) + require.NoError(t, err) + + b, err := os.ReadFile(filepath) + require.NoError(t, err) + fileMap := map[string]any{} + err = json.Unmarshal(b, &fileMap) + require.NoError(t, err) + + expectedMap := map[string]any{} + data := []byte(`{"Book":[{"_key":"bae-4399f189-138d-5d49-9e25-82e78463677b","_newKey":"bae-78a40f28-a4b8-5dca-be44-392b0f96d0ff","author_id":"bae-807ea028-6c13-5f86-a72b-46e8b715a162","name":"Game of chains"},{"_key":"bae-5cf2fec3-d8ed-50d5-8286-39109853d2da","_newKey":"bae-edeade01-2d21-5d6d-aadf-efc5a5279de5","author_id":"bae-807ea028-6c13-5f86-a72b-46e8b715a162","name":"John and the sourcerers' stone"}],"User":[{"_key":"bae-0648f44e-74e8-593b-a662-3310ec278927","_newKey":"bae-0648f44e-74e8-593b-a662-3310ec278927","age":31,"name":"Bob"},{"_key":"bae-e933420a-988a-56f8-8952-6c245aebd519","_newKey":"bae-807ea028-6c13-5f86-a72b-46e8b715a162","age":31,"name":"John"}]}`) + err = json.Unmarshal(data, &expectedMap) + require.NoError(t, err) + require.EqualValues(t, expectedMap, fileMap) +} + +func TestBasicExport_EnsureFileOverwrite_NoError(t *testing.T) { + ctx := context.Background() + db, err := newMemoryDB(ctx) + require.NoError(t, err) + defer db.Close(ctx) + + _, err = db.AddSchema(ctx, `type User { + name: String + age: Int + } + + type Address { + street: String + city: String + }`) + require.NoError(t, err) + + doc1, err := client.NewDocFromJSON([]byte(`{"name": "John", "age": 30}`)) + require.NoError(t, err) + + doc2, err := client.NewDocFromJSON([]byte(`{"name": "Bob", "age": 40}`)) + require.NoError(t, err) + + col1, err := db.GetCollectionByName(ctx, "User") + require.NoError(t, err) + + err = col1.Create(ctx, doc1) + require.NoError(t, err) + + err = col1.Create(ctx, doc2) + require.NoError(t, err) + + doc3, err := client.NewDocFromJSON([]byte(`{"street": "101 Maple St", "city": "Toronto"}`)) + require.NoError(t, err) + + col2, err := db.GetCollectionByName(ctx, "Address") + require.NoError(t, err) + + err = col2.Create(ctx, doc3) + require.NoError(t, err) + + txn, err := db.NewTxn(ctx, true) + require.NoError(t, err) + defer txn.Discard(ctx) + + filepath := t.TempDir() + "/test.json" + + err = os.WriteFile( + filepath, + []byte(`{"Address":[{"_key":"bae-8096f2c1-ea4c-5226-8ba5-17fc4b68ac1f","_newKey":"bae-8096f2c1-ea4c-5226-8ba5-17fc4b68ac1f","city":"Toronto","street":"101 Maple St"}],"User":[{"_key":"bae-b94880d1-e6d2-542f-b9e0-5a369fafd0df","_newKey":"bae-b94880d1-e6d2-542f-b9e0-5a369fafd0df","age":40,"name":"Bob"},{"_key":"bae-e933420a-988a-56f8-8952-6c245aebd519","_newKey":"bae-e933420a-988a-56f8-8952-6c245aebd519","age":30,"name":"John"}]}`), + 0664, + ) + require.NoError(t, err) + + err = db.basicExport(ctx, txn, &client.BackupConfig{Filepath: filepath, Collections: []string{"Address"}}) + require.NoError(t, err) + + b, err := os.ReadFile(filepath) + require.NoError(t, err) + fileMap := map[string]any{} + err = json.Unmarshal(b, &fileMap) + require.NoError(t, err) + + expectedMap := map[string]any{} + data := []byte(`{"Address":[{"_key":"bae-8096f2c1-ea4c-5226-8ba5-17fc4b68ac1f","_newKey":"bae-8096f2c1-ea4c-5226-8ba5-17fc4b68ac1f","city":"Toronto","street":"101 Maple St"}]}`) + err = json.Unmarshal(data, &expectedMap) + require.NoError(t, err) + require.EqualValues(t, expectedMap, fileMap) +} + +func TestBasicImport_WithMultipleCollectionsAndObjects_NoError(t *testing.T) { + ctx := context.Background() + db, err := newMemoryDB(ctx) + require.NoError(t, err) + defer db.Close(ctx) + + _, err = db.AddSchema(ctx, `type User { + name: String + age: Int + } + + type Address { + street: String + city: String + }`) + require.NoError(t, err) + + txn, err := db.NewTxn(ctx, false) + require.NoError(t, err) + + filepath := t.TempDir() + "/test.json" + + err = os.WriteFile( + filepath, + []byte(`{"Address":[{"_key":"bae-8096f2c1-ea4c-5226-8ba5-17fc4b68ac1f","_newKey":"bae-8096f2c1-ea4c-5226-8ba5-17fc4b68ac1f","city":"Toronto","street":"101 Maple St"}],"User":[{"_key":"bae-b94880d1-e6d2-542f-b9e0-5a369fafd0df","_newKey":"bae-b94880d1-e6d2-542f-b9e0-5a369fafd0df","age":40,"name":"Bob"},{"_key":"bae-e933420a-988a-56f8-8952-6c245aebd519","_newKey":"bae-e933420a-988a-56f8-8952-6c245aebd519","age":30,"name":"John"}]}`), + 0664, + ) + require.NoError(t, err) + + err = db.basicImport(ctx, txn, filepath) + require.NoError(t, err) + err = txn.Commit(ctx) + require.NoError(t, err) + + txn, err = db.NewTxn(ctx, true) + require.NoError(t, err) + + col1, err := db.getCollectionByName(ctx, txn, "Address") + require.NoError(t, err) + + key1, err := client.NewDocKeyFromString("bae-8096f2c1-ea4c-5226-8ba5-17fc4b68ac1f") + require.NoError(t, err) + _, err = col1.Get(ctx, key1, false) + require.NoError(t, err) + + col2, err := db.getCollectionByName(ctx, txn, "User") + require.NoError(t, err) + + key2, err := client.NewDocKeyFromString("bae-b94880d1-e6d2-542f-b9e0-5a369fafd0df") + require.NoError(t, err) + _, err = col2.Get(ctx, key2, false) + require.NoError(t, err) + + key3, err := client.NewDocKeyFromString("bae-e933420a-988a-56f8-8952-6c245aebd519") + require.NoError(t, err) + _, err = col2.Get(ctx, key3, false) + require.NoError(t, err) +} + +func TestBasicImport_WithJSONArray_ReturnError(t *testing.T) { + ctx := context.Background() + db, err := newMemoryDB(ctx) + require.NoError(t, err) + defer db.Close(ctx) + + _, err = db.AddSchema(ctx, `type User { + name: String + age: Int + } + + type Address { + street: String + city: String + }`) + require.NoError(t, err) + + txn, err := db.NewTxn(ctx, false) + require.NoError(t, err) + + filepath := t.TempDir() + "/test.json" + + err = os.WriteFile( + filepath, + []byte(`["Address":[{"_key":"bae-8096f2c1-ea4c-5226-8ba5-17fc4b68ac1f","_newKey":"bae-8096f2c1-ea4c-5226-8ba5-17fc4b68ac1f","city":"Toronto","street":"101 Maple St"}],"User":[{"_key":"bae-b94880d1-e6d2-542f-b9e0-5a369fafd0df","_newKey":"bae-b94880d1-e6d2-542f-b9e0-5a369fafd0df","age":40,"name":"Bob"},{"_key":"bae-e933420a-988a-56f8-8952-6c245aebd519","_newKey":"bae-e933420a-988a-56f8-8952-6c245aebd519","age":30,"name":"John"}]]`), + 0664, + ) + require.NoError(t, err) + + err = db.basicImport(ctx, txn, filepath) + require.ErrorIs(t, err, ErrExpectedJSONObject) + err = txn.Commit(ctx) + require.NoError(t, err) +} + +func TestBasicImport_WithObjectCollection_ReturnError(t *testing.T) { + ctx := context.Background() + db, err := newMemoryDB(ctx) + require.NoError(t, err) + defer db.Close(ctx) + + _, err = db.AddSchema(ctx, `type User { + name: String + age: Int + } + + type Address { + street: String + city: String + }`) + require.NoError(t, err) + + txn, err := db.NewTxn(ctx, false) + require.NoError(t, err) + + filepath := t.TempDir() + "/test.json" + + err = os.WriteFile( + filepath, + []byte(`{"Address":{"_key":"bae-8096f2c1-ea4c-5226-8ba5-17fc4b68ac1f","_newKey":"bae-8096f2c1-ea4c-5226-8ba5-17fc4b68ac1f","city":"Toronto","street":"101 Maple St"}}`), + 0664, + ) + require.NoError(t, err) + + err = db.basicImport(ctx, txn, filepath) + require.ErrorIs(t, err, ErrExpectedJSONArray) + err = txn.Commit(ctx) + require.NoError(t, err) +} + +func TestBasicImport_WithInvalidFilepath_ReturnError(t *testing.T) { + ctx := context.Background() + db, err := newMemoryDB(ctx) + require.NoError(t, err) + defer db.Close(ctx) + + _, err = db.AddSchema(ctx, `type User { + name: String + age: Int + } + + type Address { + street: String + city: String + }`) + require.NoError(t, err) + + txn, err := db.NewTxn(ctx, false) + require.NoError(t, err) + + filepath := t.TempDir() + "/test.json" + + err = os.WriteFile( + filepath, + []byte(`{"Address":{"_key":"bae-8096f2c1-ea4c-5226-8ba5-17fc4b68ac1f","_newKey":"bae-8096f2c1-ea4c-5226-8ba5-17fc4b68ac1f","city":"Toronto","street":"101 Maple St"}}`), + 0664, + ) + require.NoError(t, err) + + wrongFilepath := t.TempDir() + "/some/test.json" + err = db.basicImport(ctx, txn, wrongFilepath) + require.ErrorIs(t, err, os.ErrNotExist) + err = txn.Commit(ctx) + require.NoError(t, err) +} + +func TestBasicImport_WithInvalidCollection_ReturnError(t *testing.T) { + ctx := context.Background() + db, err := newMemoryDB(ctx) + require.NoError(t, err) + defer db.Close(ctx) + + _, err = db.AddSchema(ctx, `type User { + name: String + age: Int + } + + type Address { + street: String + city: String + }`) + require.NoError(t, err) + + txn, err := db.NewTxn(ctx, false) + require.NoError(t, err) + + filepath := t.TempDir() + "/test.json" + + err = os.WriteFile( + filepath, + []byte(`{"Addresses":{"_key":"bae-8096f2c1-ea4c-5226-8ba5-17fc4b68ac1f","_newKey":"bae-8096f2c1-ea4c-5226-8ba5-17fc4b68ac1f","city":"Toronto","street":"101 Maple St"}}`), + 0664, + ) + require.NoError(t, err) + + err = db.basicImport(ctx, txn, filepath) + require.ErrorIs(t, err, ErrFailedToGetCollection) + err = txn.Commit(ctx) + require.NoError(t, err) +} diff --git a/db/collection.go b/db/collection.go index a9131037c9..3430684697 100644 --- a/db/collection.go +++ b/db/collection.go @@ -22,16 +22,18 @@ import ( ds "github.com/ipfs/go-datastore" "github.com/ipfs/go-datastore/query" ipld "github.com/ipfs/go-ipld-format" - mh "github.com/multiformats/go-multihash" "github.com/sourcenetwork/immutable" "github.com/sourcenetwork/defradb/client" "github.com/sourcenetwork/defradb/client/request" "github.com/sourcenetwork/defradb/core" + ccid "github.com/sourcenetwork/defradb/core/cid" "github.com/sourcenetwork/defradb/datastore" "github.com/sourcenetwork/defradb/db/base" + "github.com/sourcenetwork/defradb/db/fetcher" "github.com/sourcenetwork/defradb/errors" "github.com/sourcenetwork/defradb/events" + "github.com/sourcenetwork/defradb/lens" "github.com/sourcenetwork/defradb/logging" "github.com/sourcenetwork/defradb/merkle/crdt" ) @@ -56,6 +58,9 @@ type collection struct { schemaID string desc client.CollectionDescription + + indexes []CollectionIndex + fetcherFactory func() fetcher.Fetcher } // @todo: Move the base Descriptions to an internal API within the db/ package. @@ -94,12 +99,31 @@ func (db *db) newCollection(desc client.CollectionDescription) (*collection, err } return &collection{ - db: db, - desc: desc, + db: db, + desc: client.CollectionDescription{ + ID: desc.ID, + Name: desc.Name, + Schema: desc.Schema, + }, colID: desc.ID, }, nil } +// newFetcher returns a new fetcher instance for this collection. +// If a fetcherFactory is set, it will be used to create the fetcher. +// It's a very simple factory, but it allows us to inject a mock fetcher +// for testing. +func (c *collection) newFetcher() fetcher.Fetcher { + var innerFetcher fetcher.Fetcher + if c.fetcherFactory != nil { + innerFetcher = c.fetcherFactory() + } else { + innerFetcher = new(fetcher.DocumentFetcher) + } + + return lens.NewFetcher(innerFetcher, c.db.LensRegistry()) +} + // createCollection creates a collection and saves it to the database in its system store. // Note: Collection.ID is an autoincrementing value that is generated by the database. func (db *db) createCollection( @@ -142,7 +166,7 @@ func (db *db) createCollection( } // add a reference to this DB by desc hash - cid, err := core.NewSHA256CidV1(globalSchemaBuf) + cid, err := ccid.NewSHA256CidV1(globalSchemaBuf) if err != nil { return nil, err } @@ -186,6 +210,12 @@ func (db *db) createCollection( logging.NewKV("Name", col.Name()), logging.NewKV("SchemaID", col.SchemaID()), ) + + for _, index := range desc.Indexes { + if _, err := col.createIndex(ctx, txn, index); err != nil { + return nil, err + } + } return col, nil } @@ -232,10 +262,11 @@ func (db *db) updateCollection( return nil, err } - cid, err := core.NewSHA256CidV1(globalSchemaBuf) + cid, err := ccid.NewSHA256CidV1(globalSchemaBuf) if err != nil { return nil, err } + previousSchemaVersionID := desc.Schema.VersionID schemaVersionID := cid.String() desc.Schema.VersionID = schemaVersionID @@ -264,6 +295,12 @@ func (db *db) updateCollection( return nil, err } + schemaVersionHistoryKey := core.NewSchemaHistoryKey(desc.Schema.SchemaID, previousSchemaVersionID) + err = txn.Systemstore().Put(ctx, schemaVersionHistoryKey.ToDS(), []byte(schemaVersionID)) + if err != nil { + return nil, err + } + return db.getCollectionByName(ctx, txn, desc.Name) } @@ -276,7 +313,6 @@ func (db *db) validateUpdateCollection( txn datastore.Txn, proposedDesc client.CollectionDescription, ) (bool, error) { - var hasChanged bool existingCollection, err := db.getCollectionByName(ctx, txn, proposedDesc.Name) if err != nil { if errors.Is(err, ds.ErrNotFound) { @@ -310,6 +346,20 @@ func (db *db) validateUpdateCollection( return false, ErrCannotSetVersionID } + hasChangedFields, err := validateUpdateCollectionFields(existingDesc, proposedDesc) + if err != nil { + return hasChangedFields, err + } + + hasChangedIndexes, err := validateUpdateCollectionIndexes(existingDesc.Indexes, proposedDesc.Indexes) + return hasChangedFields || hasChangedIndexes, err +} + +func validateUpdateCollectionFields( + existingDesc client.CollectionDescription, + proposedDesc client.CollectionDescription, +) (bool, error) { + hasChanged := false existingFieldsByID := map[client.FieldID]client.FieldDescription{} existingFieldIndexesByName := map[string]int{} for i, field := range existingDesc.Schema.Fields { @@ -365,10 +415,40 @@ func (db *db) validateUpdateCollection( return false, NewErrCannotDeleteField(field.Name, field.ID) } } - return hasChanged, nil } +func validateUpdateCollectionIndexes( + existingIndexes []client.IndexDescription, + proposedIndexes []client.IndexDescription, +) (bool, error) { + existingNameToIndex := map[string]client.IndexDescription{} + for _, index := range existingIndexes { + existingNameToIndex[index.Name] = index + } + for _, proposedIndex := range proposedIndexes { + if existingIndex, exists := existingNameToIndex[proposedIndex.Name]; exists { + if len(existingIndex.Fields) != len(proposedIndex.Fields) { + return false, ErrCanNotChangeIndexWithPatch + } + for i := range existingIndex.Fields { + if existingIndex.Fields[i] != proposedIndex.Fields[i] { + return false, ErrCanNotChangeIndexWithPatch + } + } + delete(existingNameToIndex, proposedIndex.Name) + } else { + return false, NewErrCannotAddIndexWithPatch(proposedIndex.Name) + } + } + if len(existingNameToIndex) > 0 { + for _, index := range existingNameToIndex { + return false, NewErrCannotDropIndexWithPatch(index.Name) + } + } + return false, nil +} + // getCollectionByVersionId returns the [*collection] at the given [schemaVersionId] version. // // Will return an error if the given key is empty, or not found. @@ -378,7 +458,7 @@ func (db *db) getCollectionByVersionID( schemaVersionId string, ) (*collection, error) { if schemaVersionId == "" { - return nil, ErrSchemaVersionIdEmpty + return nil, ErrSchemaVersionIDEmpty } key := core.NewCollectionSchemaVersionKey(schemaVersionId) @@ -393,12 +473,19 @@ func (db *db) getCollectionByVersionID( return nil, err } - return &collection{ + col := &collection{ db: db, desc: desc, colID: desc.ID, schemaID: desc.Schema.SchemaID, - }, nil + } + + err = col.loadIndexes(ctx, txn) + if err != nil { + return nil, err + } + + return col, nil } // getCollectionByName returns an existing collection within the database. @@ -424,7 +511,7 @@ func (db *db) getCollectionBySchemaID( schemaID string, ) (client.Collection, error) { if schemaID == "" { - return nil, ErrSchemaIdEmpty + return nil, ErrSchemaIDEmpty } key := core.NewCollectionSchemaKey(schemaID) @@ -569,11 +656,13 @@ func (c *collection) SchemaID() string { // handle instead of a raw DB handle. func (c *collection) WithTxn(txn datastore.Txn) client.Collection { return &collection{ - db: c.db, - txn: immutable.Some(txn), - desc: c.desc, - colID: c.colID, - schemaID: c.schemaID, + db: c.db, + txn: immutable.Some(txn), + desc: c.desc, + colID: c.colID, + schemaID: c.schemaID, + indexes: c.indexes, + fetcherFactory: c.fetcherFactory, } } @@ -614,34 +703,25 @@ func (c *collection) CreateMany(ctx context.Context, docs []*client.Document) er func (c *collection) getKeysFromDoc( doc *client.Document, ) (client.DocKey, core.PrimaryDataStoreKey, error) { - // DocKey verification - buf, err := doc.Bytes() - if err != nil { - return client.DocKey{}, core.PrimaryDataStoreKey{}, err - } - // @todo: grab the cid Prefix from the DocKey internal CID if available - pref := cid.Prefix{ - Version: 1, - Codec: cid.Raw, - MhType: mh.SHA2_256, - MhLength: -1, // default length - } - // And then feed it some data - doccid, err := pref.Sum(buf) + docKey, err := doc.GenerateDocKey() if err != nil { return client.DocKey{}, core.PrimaryDataStoreKey{}, err } - dockey := client.NewDocKeyV0(doccid) - primaryKey := c.getPrimaryKeyFromDocKey(dockey) + primaryKey := c.getPrimaryKeyFromDocKey(docKey) if primaryKey.DocKey != doc.Key().String() { return client.DocKey{}, core.PrimaryDataStoreKey{}, NewErrDocVerification(doc.Key().String(), primaryKey.DocKey) } - return dockey, primaryKey, nil + return docKey, primaryKey, nil } func (c *collection) create(ctx context.Context, txn datastore.Txn, doc *client.Document) error { + // This has to be done before dockey verification happens in the next step. + if err := doc.RemapAliasFieldsAndDockey(c.desc.Schema.Fields); err != nil { + return err + } + dockey, primaryKey, err := c.getKeysFromDoc(doc) if err != nil { return err @@ -653,10 +733,10 @@ func (c *collection) create(ctx context.Context, txn datastore.Txn, doc *client. return err } if exists { - return ErrDocumentAlreadyExists + return NewErrDocumentAlreadyExists(primaryKey.DocKey) } if isDeleted { - return ErrDocumentDeleted + return NewErrDocumentDeleted(primaryKey.DocKey) } // write value object marker if we have an empty doc @@ -674,7 +754,7 @@ func (c *collection) create(ctx context.Context, txn datastore.Txn, doc *client. return err } - return err + return c.indexNewDoc(ctx, txn, doc) } // Update an existing document with the new values. @@ -696,7 +776,7 @@ func (c *collection) Update(ctx context.Context, doc *client.Document) error { return client.ErrDocumentNotFound } if isDeleted { - return ErrDocumentDeleted + return NewErrDocumentDeleted(primaryKey.DocKey) } err = c.update(ctx, txn, doc) @@ -755,6 +835,12 @@ func (c *collection) save( doc *client.Document, isCreate bool, ) (cid.Cid, error) { + if !isCreate { + err := c.updateIndexedDoc(ctx, txn, doc) + if err != nil { + return cid.Undef, err + } + } // NOTE: We delay the final Clean() call until we know // the commit on the transaction is successful. If we didn't // wait, and just did it here, then *if* the commit fails down @@ -780,11 +866,12 @@ func (c *collection) save( if val.IsDirty() { fieldKey, fieldExists := c.tryGetFieldKey(primaryKey, k) + if !fieldExists { return cid.Undef, client.NewErrFieldNotExist(k) } - fieldDescription, valid := c.desc.GetField(k) + fieldDescription, valid := c.desc.Schema.GetField(k) if !valid { return cid.Undef, client.NewErrFieldNotExist(k) } @@ -886,7 +973,7 @@ func (c *collection) Delete(ctx context.Context, key client.DocKey) (bool, error return false, client.ErrDocumentNotFound } if isDeleted { - return false, ErrDocumentDeleted + return false, NewErrDocumentDeleted(primaryKey.DocKey) } err = c.applyDelete(ctx, txn, primaryKey) @@ -967,7 +1054,11 @@ func (c *collection) saveValueToMerkleCRDT( args ...any) (ipld.Node, uint64, error) { switch ctype { case client.LWW_REGISTER: - field, _ := c.Description().GetFieldByID(key.FieldId) + fieldID, err := strconv.Atoi(key.FieldId) + if err != nil { + return nil, 0, err + } + field, _ := c.Description().GetFieldByID(client.FieldID(fieldID)) merkleCRDT, err := c.db.crdtFactory.InstanceWithStores( txn, core.NewCollectionSchemaVersionKey(c.Schema().VersionID), @@ -1110,5 +1201,6 @@ func (c *collection) tryGetSchemaFieldID(fieldName string) (uint32, bool) { return uint32(field.ID), true } } + return uint32(0), false } diff --git a/db/collection_delete.go b/db/collection_delete.go index acbdb26404..480656849f 100644 --- a/db/collection_delete.go +++ b/db/collection_delete.go @@ -239,7 +239,7 @@ func (c *collection) applyDelete( return client.ErrDocumentNotFound } if isDeleted { - return ErrDocumentDeleted + return NewErrDocumentDeleted(key.DocKey) } dsKey := key.ToDataStoreKey() diff --git a/db/collection_get.go b/db/collection_get.go index 678a154598..17e113231e 100644 --- a/db/collection_get.go +++ b/db/collection_get.go @@ -17,7 +17,6 @@ import ( "github.com/sourcenetwork/defradb/core" "github.com/sourcenetwork/defradb/datastore" "github.com/sourcenetwork/defradb/db/base" - "github.com/sourcenetwork/defradb/db/fetcher" ) func (c *collection) Get(ctx context.Context, key client.DocKey, showDeleted bool) (*client.Document, error) { @@ -37,7 +36,7 @@ func (c *collection) Get(ctx context.Context, key client.DocKey, showDeleted boo return nil, client.ErrDocumentNotFound } - doc, err := c.get(ctx, txn, dsKey, showDeleted) + doc, err := c.get(ctx, txn, dsKey, nil, showDeleted) if err != nil { return nil, err } @@ -48,13 +47,14 @@ func (c *collection) get( ctx context.Context, txn datastore.Txn, key core.PrimaryDataStoreKey, + fields []client.FieldDescription, showDeleted bool, ) (*client.Document, error) { // create a new document fetcher - df := new(fetcher.DocumentFetcher) + df := c.newFetcher() desc := &c.desc // initialize it with the primary index - err := df.Init(&c.desc, nil, false, showDeleted) + err := df.Init(ctx, txn, &c.desc, fields, nil, nil, false, showDeleted) if err != nil { _ = df.Close() return nil, err @@ -63,14 +63,14 @@ func (c *collection) get( // construct target key for DocKey targetKey := base.MakeDocKey(*desc, key.DocKey) // run the doc fetcher - err = df.Start(ctx, txn, core.NewSpans(core.NewSpan(targetKey, targetKey.PrefixEnd()))) + err = df.Start(ctx, core.NewSpans(core.NewSpan(targetKey, targetKey.PrefixEnd()))) if err != nil { _ = df.Close() return nil, err } // return first matched decoded doc - doc, err := df.FetchNextDecoded(ctx) + doc, _, err := df.FetchNextDecoded(ctx) if err != nil { _ = df.Close() return nil, err diff --git a/db/collection_index.go b/db/collection_index.go new file mode 100644 index 0000000000..a3a45ee7d4 --- /dev/null +++ b/db/collection_index.go @@ -0,0 +1,542 @@ +// Copyright 2023 Democratized Data Foundation +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package db + +import ( + "context" + "encoding/json" + "fmt" + "strconv" + "strings" + + ds "github.com/ipfs/go-datastore" + "github.com/ipfs/go-datastore/query" + + "github.com/sourcenetwork/defradb/client" + "github.com/sourcenetwork/defradb/core" + "github.com/sourcenetwork/defradb/datastore" + "github.com/sourcenetwork/defradb/db/base" + "github.com/sourcenetwork/defradb/request/graphql/schema" +) + +// createCollectionIndex creates a new collection index and saves it to the database in its system store. +func (db *db) createCollectionIndex( + ctx context.Context, + txn datastore.Txn, + collectionName string, + desc client.IndexDescription, +) (client.IndexDescription, error) { + col, err := db.getCollectionByName(ctx, txn, collectionName) + if err != nil { + return client.IndexDescription{}, NewErrCanNotReadCollection(collectionName, err) + } + col = col.WithTxn(txn) + return col.CreateIndex(ctx, desc) +} + +func (db *db) dropCollectionIndex( + ctx context.Context, + txn datastore.Txn, + collectionName, indexName string, +) error { + col, err := db.getCollectionByName(ctx, txn, collectionName) + if err != nil { + return NewErrCanNotReadCollection(collectionName, err) + } + col = col.WithTxn(txn) + return col.DropIndex(ctx, indexName) +} + +// getAllIndexes returns all the indexes in the database. +func (db *db) getAllIndexes( + ctx context.Context, + txn datastore.Txn, +) (map[client.CollectionName][]client.IndexDescription, error) { + prefix := core.NewCollectionIndexKey("", "") + + deserializedIndexes, err := deserializePrefix[client.IndexDescription](ctx, + prefix.ToString(), txn.Systemstore()) + + if err != nil { + return nil, err + } + + indexes := make(map[client.CollectionName][]client.IndexDescription) + + for _, indexRec := range deserializedIndexes { + indexKey, err := core.NewCollectionIndexKeyFromString(indexRec.key) + if err != nil { + return nil, NewErrInvalidStoredIndexKey(indexKey.ToString()) + } + indexes[indexKey.CollectionName] = append(indexes[indexKey.CollectionName], indexRec.element) + } + + return indexes, nil +} + +func (db *db) fetchCollectionIndexDescriptions( + ctx context.Context, + txn datastore.Txn, + colName string, +) ([]client.IndexDescription, error) { + prefix := core.NewCollectionIndexKey(colName, "") + deserializedIndexes, err := deserializePrefix[client.IndexDescription](ctx, + prefix.ToString(), txn.Systemstore()) + if err != nil { + return nil, err + } + indexes := make([]client.IndexDescription, 0, len(deserializedIndexes)) + for _, indexRec := range deserializedIndexes { + indexes = append(indexes, indexRec.element) + } + return indexes, nil +} + +func (c *collection) indexNewDoc(ctx context.Context, txn datastore.Txn, doc *client.Document) error { + err := c.loadIndexes(ctx, txn) + if err != nil { + return err + } + for _, index := range c.indexes { + err = index.Save(ctx, txn, doc) + if err != nil { + return err + } + } + return nil +} + +// collectIndexedFields returns all fields that are indexed by all collection indexes. +func (c *collection) collectIndexedFields() []client.FieldDescription { + fieldsMap := make(map[string]client.FieldDescription) + for _, index := range c.indexes { + for _, field := range index.Description().Fields { + for i := range c.desc.Schema.Fields { + colField := c.desc.Schema.Fields[i] + if field.Name == colField.Name { + fieldsMap[field.Name] = colField + break + } + } + } + } + fields := make([]client.FieldDescription, 0, len(fieldsMap)) + for _, field := range fieldsMap { + fields = append(fields, field) + } + return fields +} + +func (c *collection) updateIndexedDoc( + ctx context.Context, + txn datastore.Txn, + doc *client.Document, +) error { + err := c.loadIndexes(ctx, txn) + if err != nil { + return err + } + oldDoc, err := c.get(ctx, txn, c.getPrimaryKeyFromDocKey(doc.Key()), c.collectIndexedFields(), false) + if err != nil { + return err + } + for _, index := range c.indexes { + err = index.Update(ctx, txn, oldDoc, doc) + if err != nil { + return err + } + } + return nil +} + +// CreateIndex creates a new index on the collection. +// +// If the index name is empty, a name will be automatically generated. +// Otherwise its uniqueness will be checked against existing indexes and +// it will be validated with `schema.IsValidIndexName` method. +// +// The provided index description must include at least one field with +// a name that exists in the collection schema. +// Also it's `ID` field must be zero. It will be assigned a unique +// incremental value by the database. +// +// The index description will be stored in the system store. +// +// Once finished, if there are existing documents in the collection, +// the documents will be indexed by the new index. +func (c *collection) CreateIndex( + ctx context.Context, + desc client.IndexDescription, +) (client.IndexDescription, error) { + txn, err := c.getTxn(ctx, false) + if err != nil { + return client.IndexDescription{}, err + } + defer c.discardImplicitTxn(ctx, txn) + + index, err := c.createIndex(ctx, txn, desc) + if err != nil { + return client.IndexDescription{}, err + } + return index.Description(), c.commitImplicitTxn(ctx, txn) +} + +func (c *collection) createIndex( + ctx context.Context, + txn datastore.Txn, + desc client.IndexDescription, +) (CollectionIndex, error) { + if desc.Name != "" && !schema.IsValidIndexName(desc.Name) { + return nil, schema.NewErrIndexWithInvalidName("!") + } + err := validateIndexDescription(desc) + if err != nil { + return nil, err + } + + err = c.checkExistingFields(ctx, desc.Fields) + if err != nil { + return nil, err + } + + indexKey, err := c.generateIndexNameIfNeededAndCreateKey(ctx, txn, &desc) + if err != nil { + return nil, err + } + + colSeq, err := c.db.getSequence(ctx, txn, fmt.Sprintf("%s/%d", core.COLLECTION_INDEX, c.ID())) + if err != nil { + return nil, err + } + colID, err := colSeq.next(ctx, txn) + if err != nil { + return nil, err + } + desc.ID = uint32(colID) + + buf, err := json.Marshal(desc) + if err != nil { + return nil, err + } + + err = txn.Systemstore().Put(ctx, indexKey.ToDS(), buf) + if err != nil { + return nil, err + } + colIndex, err := NewCollectionIndex(c, desc) + if err != nil { + return nil, err + } + c.desc.Indexes = append(c.desc.Indexes, colIndex.Description()) + c.indexes = append(c.indexes, colIndex) + err = c.indexExistingDocs(ctx, txn, colIndex) + if err != nil { + return nil, err + } + return colIndex, nil +} + +func (c *collection) iterateAllDocs( + ctx context.Context, + txn datastore.Txn, + fields []client.FieldDescription, + exec func(doc *client.Document) error, +) error { + df := c.newFetcher() + err := df.Init(ctx, txn, &c.desc, fields, nil, nil, false, false) + if err != nil { + _ = df.Close() + return err + } + start := base.MakeCollectionKey(c.desc) + spans := core.NewSpans(core.NewSpan(start, start.PrefixEnd())) + + err = df.Start(ctx, spans) + if err != nil { + _ = df.Close() + return err + } + + var doc *client.Document + for { + doc, _, err = df.FetchNextDecoded(ctx) + if err != nil { + _ = df.Close() + return err + } + if doc == nil { + break + } + err = exec(doc) + if err != nil { + return err + } + } + + return df.Close() +} + +func (c *collection) indexExistingDocs( + ctx context.Context, + txn datastore.Txn, + index CollectionIndex, +) error { + fields := make([]client.FieldDescription, 0, 1) + for _, field := range index.Description().Fields { + for i := range c.desc.Schema.Fields { + colField := c.desc.Schema.Fields[i] + if field.Name == colField.Name { + fields = append(fields, colField) + break + } + } + } + + return c.iterateAllDocs(ctx, txn, fields, func(doc *client.Document) error { + return index.Save(ctx, txn, doc) + }) +} + +// DropIndex removes an index from the collection. +// +// The index will be removed from the system store. +// +// All index artifacts for existing documents related the index will be removed. +func (c *collection) DropIndex(ctx context.Context, indexName string) error { + txn, err := c.getTxn(ctx, false) + if err != nil { + return err + } + defer c.discardImplicitTxn(ctx, txn) + + err = c.dropIndex(ctx, txn, indexName) + if err != nil { + return err + } + return c.commitImplicitTxn(ctx, txn) +} + +func (c *collection) dropIndex(ctx context.Context, txn datastore.Txn, indexName string) error { + err := c.loadIndexes(ctx, txn) + if err != nil { + return err + } + + var didFind bool + for i := range c.indexes { + if c.indexes[i].Name() == indexName { + err = c.indexes[i].RemoveAll(ctx, txn) + if err != nil { + return err + } + c.indexes = append(c.indexes[:i], c.indexes[i+1:]...) + didFind = true + break + } + } + if !didFind { + return NewErrIndexWithNameDoesNotExists(indexName) + } + + for i := range c.desc.Indexes { + if c.desc.Indexes[i].Name == indexName { + c.desc.Indexes = append(c.desc.Indexes[:i], c.desc.Indexes[i+1:]...) + break + } + } + key := core.NewCollectionIndexKey(c.Name(), indexName) + err = txn.Systemstore().Delete(ctx, key.ToDS()) + if err != nil { + return err + } + + return nil +} + +func (c *collection) dropAllIndexes(ctx context.Context, txn datastore.Txn) error { + prefix := core.NewCollectionIndexKey(c.Name(), "") + + keys, err := fetchKeysForPrefix(ctx, prefix.ToString(), txn.Systemstore()) + if err != nil { + return err + } + + for _, key := range keys { + err = txn.Systemstore().Delete(ctx, key) + if err != nil { + return err + } + } + + return err +} + +func (c *collection) loadIndexes(ctx context.Context, txn datastore.Txn) error { + indexDescriptions, err := c.db.fetchCollectionIndexDescriptions(ctx, txn, c.Name()) + if err != nil { + return err + } + colIndexes := make([]CollectionIndex, 0, len(indexDescriptions)) + for _, indexDesc := range indexDescriptions { + index, err := NewCollectionIndex(c, indexDesc) + if err != nil { + return err + } + colIndexes = append(colIndexes, index) + } + c.desc.Indexes = indexDescriptions + c.indexes = colIndexes + return nil +} + +// GetIndexes returns all indexes for the collection. +func (c *collection) GetIndexes(ctx context.Context) ([]client.IndexDescription, error) { + txn, err := c.getTxn(ctx, false) + if err != nil { + return nil, err + } + defer c.discardImplicitTxn(ctx, txn) + + err = c.loadIndexes(ctx, txn) + if err != nil { + return nil, err + } + return c.desc.Indexes, nil +} + +func (c *collection) checkExistingFields( + ctx context.Context, + fields []client.IndexedFieldDescription, +) error { + collectionFields := c.Description().Schema.Fields + for _, field := range fields { + found := false + for _, colField := range collectionFields { + if field.Name == colField.Name { + found = true + break + } + } + if !found { + return NewErrNonExistingFieldForIndex(field.Name) + } + } + return nil +} + +func (c *collection) generateIndexNameIfNeededAndCreateKey( + ctx context.Context, + txn datastore.Txn, + desc *client.IndexDescription, +) (core.CollectionIndexKey, error) { + var indexKey core.CollectionIndexKey + if desc.Name == "" { + nameIncrement := 1 + for { + desc.Name = generateIndexName(c, desc.Fields, nameIncrement) + indexKey = core.NewCollectionIndexKey(c.Name(), desc.Name) + exists, err := txn.Systemstore().Has(ctx, indexKey.ToDS()) + if err != nil { + return core.CollectionIndexKey{}, err + } + if !exists { + break + } + nameIncrement++ + } + } else { + indexKey = core.NewCollectionIndexKey(c.Name(), desc.Name) + exists, err := txn.Systemstore().Has(ctx, indexKey.ToDS()) + if err != nil { + return core.CollectionIndexKey{}, err + } + if exists { + return core.CollectionIndexKey{}, NewErrIndexWithNameAlreadyExists(desc.Name) + } + } + return indexKey, nil +} + +func validateIndexDescription(desc client.IndexDescription) error { + if desc.ID != 0 { + return NewErrNonZeroIndexIDProvided(desc.ID) + } + if len(desc.Fields) == 0 { + return ErrIndexMissingFields + } + if len(desc.Fields) == 1 && desc.Fields[0].Direction == client.Descending { + return ErrIndexSingleFieldWrongDirection + } + for i := range desc.Fields { + if desc.Fields[i].Name == "" { + return ErrIndexFieldMissingName + } + if desc.Fields[i].Direction == "" { + desc.Fields[i].Direction = client.Ascending + } + } + return nil +} + +func generateIndexName(col client.Collection, fields []client.IndexedFieldDescription, inc int) string { + sb := strings.Builder{} + // at the moment we support only single field indexes that can be stored only in + // ascending order. This will change once we introduce composite indexes. + direction := "ASC" + sb.WriteString(col.Name()) + sb.WriteByte('_') + // we can safely assume that there is at least one field in the slice + // because we validate it before calling this function + sb.WriteString(fields[0].Name) + sb.WriteByte('_') + sb.WriteString(direction) + if inc > 1 { + sb.WriteByte('_') + sb.WriteString(strconv.Itoa(inc)) + } + return sb.String() +} + +type deserializedElement[T any] struct { + key string + element T +} + +func deserializePrefix[T any]( + ctx context.Context, + prefix string, + storage ds.Read, +) ([]deserializedElement[T], error) { + q, err := storage.Query(ctx, query.Query{Prefix: prefix}) + if err != nil { + return nil, NewErrFailedToCreateCollectionQuery(err) + } + + elements := make([]deserializedElement[T], 0) + for res := range q.Next() { + if res.Error != nil { + _ = q.Close() + return nil, res.Error + } + + var element T + err = json.Unmarshal(res.Value, &element) + if err != nil { + _ = q.Close() + return nil, NewErrInvalidStoredIndex(err) + } + elements = append(elements, deserializedElement[T]{key: res.Key, element: element}) + } + if err := q.Close(); err != nil { + return nil, err + } + return elements, nil +} diff --git a/db/collection_update.go b/db/collection_update.go index ea2d0b3980..b945ec4592 100644 --- a/db/collection_update.go +++ b/db/collection_update.go @@ -305,9 +305,19 @@ func (c *collection) applyMerge( return ErrInvalidMergeValueType } - fd, valid := c.desc.GetField(mfield) - if !valid { - return client.NewErrFieldNotExist(mfield) + fd, isValidAliasField := c.desc.Schema.GetField(mfield + request.RelatedObjectID) + if isValidAliasField { + // Overwrite the key with aliased name to the internal related object name. + oldKey := mfield + mfield = mfield + request.RelatedObjectID + mergeMap[mfield] = mval + delete(mergeMap, oldKey) + } else { + var isValidField bool + fd, isValidField = c.desc.Schema.GetField(mfield) + if !isValidField { + return client.NewErrFieldNotExist(mfield) + } } relationFieldDescription, isSecondaryRelationID := c.isSecondaryIDField(fd) @@ -398,7 +408,9 @@ func (c *collection) isSecondaryIDField(fieldDesc client.FieldDescription) (clie return client.FieldDescription{}, false } - relationFieldDescription, valid := c.Description().GetField(strings.TrimSuffix(fieldDesc.Name, "_id")) + relationFieldDescription, valid := c.Description().Schema.GetField( + strings.TrimSuffix(fieldDesc.Name, request.RelatedObjectID), + ) return relationFieldDescription, valid && !relationFieldDescription.IsPrimaryRelation() } @@ -431,7 +443,7 @@ func (c *collection) patchPrimaryDoc( _, err = primaryCol.UpdateWithKey( ctx, primaryDockey, - fmt.Sprintf(`{"%s": "%s"}`, primaryField.Name+"_id", docKey), + fmt.Sprintf(`{"%s": "%s"}`, primaryField.Name+request.RelatedObjectID, docKey), ) if err != nil { return err @@ -491,7 +503,7 @@ func validateFieldSchema(val *fastjson.Value, field client.FieldDescription) (an return getNillableArray(val, getInt64) case client.FieldKind_FOREIGN_OBJECT, client.FieldKind_FOREIGN_OBJECT_ARRAY: - return nil, ErrMergeSubTypeNotSupported + return nil, NewErrFieldOrAliasToFieldNotExist(field.Name) } return nil, client.NewErrUnhandledType("FieldKind", field.Kind) diff --git a/db/db.go b/db/db.go index 656dbdadf7..8ffda296b4 100644 --- a/db/db.go +++ b/db/db.go @@ -28,6 +28,7 @@ import ( "github.com/sourcenetwork/defradb/datastore" "github.com/sourcenetwork/defradb/errors" "github.com/sourcenetwork/defradb/events" + "github.com/sourcenetwork/defradb/lens" "github.com/sourcenetwork/defradb/logging" "github.com/sourcenetwork/defradb/merkle/crdt" "github.com/sourcenetwork/defradb/request/graphql" @@ -58,11 +59,15 @@ type db struct { events events.Events - parser core.Parser + parser core.Parser + lensRegistry client.LensRegistry // The maximum number of retries per transaction. maxTxnRetries immutable.Option[int] + // The maximum number of cached migrations instances to preserve per schema version. + lensPoolSize immutable.Option[int] + // The options used to init the database options any } @@ -88,6 +93,15 @@ func WithMaxRetries(num int) Option { } } +// WithLensPoolSize sets the maximum number of cached migrations instances to preserve per schema version. +// +// Will default to `5` if not set. +func WithLensPoolSize(num int) Option { + return func(db *db) { + db.lensPoolSize = immutable.Some(num) + } +} + // NewDB creates a new instance of the DB using the given options. func NewDB(ctx context.Context, rootstore datastore.RootStore, options ...Option) (client.DB, error) { return newDB(ctx, rootstore, options...) @@ -122,6 +136,10 @@ func newDB(ctx context.Context, rootstore datastore.RootStore, options ...Option opt(db) } + // lensPoolSize may be set by `options`, and because they are funcs on db + // we have to mutate `db` here to set the registry. + db.lensRegistry = lens.NewRegistry(db.lensPoolSize) + err = db.initialize(ctx) if err != nil { return nil, err @@ -162,6 +180,10 @@ func (db *db) systemstore() datastore.DSReaderWriter { return db.multistore.Systemstore() } +func (db *db) LensRegistry() client.LensRegistry { + return db.lensRegistry +} + // Initialize is called when a database is first run and creates all the db global meta data // like Collection ID counters. func (db *db) initialize(ctx context.Context) error { @@ -180,13 +202,19 @@ func (db *db) initialize(ctx context.Context) error { return err } // if we're loading an existing database, just load the schema - // and finish initialization + // and migrations and finish initialization if exists { log.Debug(ctx, "DB has already been initialized, continuing") err = db.loadSchema(ctx, txn) if err != nil { return err } + + err = db.lensRegistry.ReloadLenses(ctx, txn) + if err != nil { + return err + } + // The query language types are only updated on successful commit // so we must not forget to do so on success regardless of whether // we have written to the datastores. diff --git a/db/errors.go b/db/errors.go index 7aa1cc5656..e5b55dcf1a 100644 --- a/db/errors.go +++ b/db/errors.go @@ -16,31 +16,69 @@ import ( ) const ( - errFailedToGetHeads string = "failed to get document heads" - errFailedToCreateCollectionQuery string = "failed to create collection prefix query" - errFailedToGetCollection string = "failed to get collection" - errDocVerification string = "the document verification failed" - errAddingP2PCollection string = "cannot add collection ID" - errRemovingP2PCollection string = "cannot remove collection ID" - errAddCollectionWithPatch string = "unknown collection, adding collections via patch is not supported" - errCollectionIDDoesntMatch string = "CollectionID does not match existing" - errSchemaIDDoesntMatch string = "SchemaID does not match existing" - errCannotModifySchemaName string = "modifying the schema name is not supported" - errCannotSetVersionID string = "setting the VersionID is not supported. It is updated automatically" - errCannotSetFieldID string = "explicitly setting a field ID value is not supported" - errCannotAddRelationalField string = "the adding of new relation fields is not yet supported" - errDuplicateField string = "duplicate field" - errCannotMutateField string = "mutating an existing field is not supported" - errCannotMoveField string = "moving fields is not currently supported" - errInvalidCRDTType string = "only default or LWW (last writer wins) CRDT types are supported" - errCannotDeleteField string = "deleting an existing field is not supported" - errFieldKindNotFound string = "no type found for given name" + errFailedToGetHeads string = "failed to get document heads" + errFailedToCreateCollectionQuery string = "failed to create collection prefix query" + errFailedToGetCollection string = "failed to get collection" + errFailedToGetAllCollections string = "failed to get all collections" + errDocVerification string = "the document verification failed" + errAddingP2PCollection string = "cannot add collection ID" + errRemovingP2PCollection string = "cannot remove collection ID" + errAddCollectionWithPatch string = "unknown collection, adding collections via patch is not supported" + errCollectionIDDoesntMatch string = "CollectionID does not match existing" + errSchemaIDDoesntMatch string = "SchemaID does not match existing" + errCannotModifySchemaName string = "modifying the schema name is not supported" + errCannotSetVersionID string = "setting the VersionID is not supported. It is updated automatically" + errCannotSetFieldID string = "explicitly setting a field ID value is not supported" + errCannotAddRelationalField string = "the adding of new relation fields is not yet supported" + errDuplicateField string = "duplicate field" + errCannotMutateField string = "mutating an existing field is not supported" + errCannotMoveField string = "moving fields is not currently supported" + errInvalidCRDTType string = "only default or LWW (last writer wins) CRDT types are supported" + errCannotDeleteField string = "deleting an existing field is not supported" + errFieldKindNotFound string = "no type found for given name" + errDocumentAlreadyExists string = "a document with the given dockey already exists" + errDocumentDeleted string = "a document with the given dockey has been deleted" + errIndexMissingFields string = "index missing fields" + errNonZeroIndexIDProvided string = "non-zero index ID provided" + errIndexFieldMissingName string = "index field missing name" + errIndexFieldMissingDirection string = "index field missing direction" + errIndexSingleFieldWrongDirection string = "wrong direction for index with a single field" + errIndexWithNameAlreadyExists string = "index with name already exists" + errInvalidStoredIndex string = "invalid stored index" + errInvalidStoredIndexKey string = "invalid stored index key" + errNonExistingFieldForIndex string = "creating an index on a non-existing property" + errCollectionDoesntExisting string = "collection with given name doesn't exist" + errFailedToStoreIndexedField string = "failed to store indexed field" + errFailedToReadStoredIndexDesc string = "failed to read stored index description" + errCanNotDeleteIndexedField string = "can not delete indexed field" + errCanNotAddIndexWithPatch string = "adding indexes via patch is not supported" + errCanNotDropIndexWithPatch string = "dropping indexes via patch is not supported" + errCanNotChangeIndexWithPatch string = "changing indexes via patch is not supported" + errIndexWithNameDoesNotExists string = "index with name doesn't exists" + errInvalidFieldValue string = "invalid field value" + errUnsupportedIndexFieldType string = "unsupported index field type" + errIndexDescriptionHasNoFields string = "index description has no fields" + errIndexDescHasNonExistingField string = "index description has non existing field" + errFieldOrAliasToFieldNotExist string = "The given field or alias to field does not exist" + errCreateFile string = "failed to create file" + errOpenFile string = "failed to open file" + errCloseFile string = "failed to close file" + errRemoveFile string = "failed to remove file" + errFailedToReadByte string = "failed to read byte" + errFailedToWriteString string = "failed to write string" + errJSONDecode string = "failed to decode JSON" + errDocFromMap string = "failed to create a new doc from map" + errDocCreate string = "failed to save a new doc to collection" + errDocUpdate string = "failed to update doc to collection" + errExpectedJSONObject string = "expected JSON object" + errExpectedJSONArray string = "expected JSON array" ) var ( ErrFailedToGetHeads = errors.New(errFailedToGetHeads) ErrFailedToCreateCollectionQuery = errors.New(errFailedToCreateCollectionQuery) ErrFailedToGetCollection = errors.New(errFailedToGetCollection) + ErrFailedToGetAllCollections = errors.New(errFailedToGetAllCollections) // ErrDocVerification occurs when a documents contents fail the verification during a Create() // call against the supplied Document Key. ErrDocVerification = errors.New(errDocVerification) @@ -52,38 +90,60 @@ var ( ErrInvalidMergeValueType = errors.New( "the type of value in the merge patch doesn't match the schema", ) - ErrMissingDocFieldToUpdate = errors.New("missing document field to update") - ErrDocMissingKey = errors.New("document is missing key") - ErrMergeSubTypeNotSupported = errors.New("merge doesn't support sub types yet") - ErrInvalidFilter = errors.New("invalid filter") - ErrInvalidOpPath = errors.New("invalid patch op path") - ErrDocumentAlreadyExists = errors.New("a document with the given dockey already exists") - ErrDocumentDeleted = errors.New("a document with the given dockey has been deleted") - ErrUnknownCRDTArgument = errors.New("invalid CRDT arguments") - ErrUnknownCRDT = errors.New("unknown crdt") - ErrSchemaFirstFieldDocKey = errors.New("collection schema first field must be a DocKey") - ErrCollectionAlreadyExists = errors.New("collection already exists") - ErrCollectionNameEmpty = errors.New("collection name can't be empty") - ErrSchemaIdEmpty = errors.New("schema ID can't be empty") - ErrSchemaVersionIdEmpty = errors.New("schema version ID can't be empty") - ErrKeyEmpty = errors.New("key cannot be empty") - ErrAddingP2PCollection = errors.New(errAddingP2PCollection) - ErrRemovingP2PCollection = errors.New(errRemovingP2PCollection) - ErrAddCollectionWithPatch = errors.New(errAddCollectionWithPatch) - ErrCollectionIDDoesntMatch = errors.New(errCollectionIDDoesntMatch) - ErrSchemaIDDoesntMatch = errors.New(errSchemaIDDoesntMatch) - ErrCannotModifySchemaName = errors.New(errCannotModifySchemaName) - ErrCannotSetVersionID = errors.New(errCannotSetVersionID) - ErrCannotSetFieldID = errors.New(errCannotSetFieldID) - ErrCannotAddRelationalField = errors.New(errCannotAddRelationalField) - ErrDuplicateField = errors.New(errDuplicateField) - ErrCannotMutateField = errors.New(errCannotMutateField) - ErrCannotMoveField = errors.New(errCannotMoveField) - ErrInvalidCRDTType = errors.New(errInvalidCRDTType) - ErrCannotDeleteField = errors.New(errCannotDeleteField) - ErrFieldKindNotFound = errors.New(errFieldKindNotFound) + ErrMissingDocFieldToUpdate = errors.New("missing document field to update") + ErrDocMissingKey = errors.New("document is missing key") + ErrInvalidFilter = errors.New("invalid filter") + ErrInvalidOpPath = errors.New("invalid patch op path") + ErrDocumentAlreadyExists = errors.New(errDocumentAlreadyExists) + ErrDocumentDeleted = errors.New(errDocumentDeleted) + ErrUnknownCRDTArgument = errors.New("invalid CRDT arguments") + ErrUnknownCRDT = errors.New("unknown crdt") + ErrSchemaFirstFieldDocKey = errors.New("collection schema first field must be a DocKey") + ErrCollectionAlreadyExists = errors.New("collection already exists") + ErrCollectionNameEmpty = errors.New("collection name can't be empty") + ErrSchemaIDEmpty = errors.New("schema ID can't be empty") + ErrSchemaVersionIDEmpty = errors.New("schema version ID can't be empty") + ErrKeyEmpty = errors.New("key cannot be empty") + ErrAddingP2PCollection = errors.New(errAddingP2PCollection) + ErrRemovingP2PCollection = errors.New(errRemovingP2PCollection) + ErrAddCollectionWithPatch = errors.New(errAddCollectionWithPatch) + ErrCollectionIDDoesntMatch = errors.New(errCollectionIDDoesntMatch) + ErrSchemaIDDoesntMatch = errors.New(errSchemaIDDoesntMatch) + ErrCannotModifySchemaName = errors.New(errCannotModifySchemaName) + ErrCannotSetVersionID = errors.New(errCannotSetVersionID) + ErrCannotSetFieldID = errors.New(errCannotSetFieldID) + ErrCannotAddRelationalField = errors.New(errCannotAddRelationalField) + ErrDuplicateField = errors.New(errDuplicateField) + ErrCannotMutateField = errors.New(errCannotMutateField) + ErrCannotMoveField = errors.New(errCannotMoveField) + ErrInvalidCRDTType = errors.New(errInvalidCRDTType) + ErrCannotDeleteField = errors.New(errCannotDeleteField) + ErrFieldKindNotFound = errors.New(errFieldKindNotFound) + ErrIndexMissingFields = errors.New(errIndexMissingFields) + ErrIndexFieldMissingName = errors.New(errIndexFieldMissingName) + ErrIndexFieldMissingDirection = errors.New(errIndexFieldMissingDirection) + ErrIndexSingleFieldWrongDirection = errors.New(errIndexSingleFieldWrongDirection) + ErrCanNotChangeIndexWithPatch = errors.New(errCanNotChangeIndexWithPatch) + ErrFieldOrAliasToFieldNotExist = errors.New(errFieldOrAliasToFieldNotExist) + ErrCreateFile = errors.New(errCreateFile) + ErrOpenFile = errors.New(errOpenFile) + ErrCloseFile = errors.New(errCloseFile) + ErrRemoveFile = errors.New(errRemoveFile) + ErrFailedToReadByte = errors.New(errFailedToReadByte) + ErrFailedToWriteString = errors.New(errFailedToWriteString) + ErrJSONDecode = errors.New(errJSONDecode) + ErrDocFromMap = errors.New(errDocFromMap) + ErrDocCreate = errors.New(errDocCreate) + ErrDocUpdate = errors.New(errDocUpdate) + ErrExpectedJSONObject = errors.New(errExpectedJSONObject) + ErrExpectedJSONArray = errors.New(errExpectedJSONArray) ) +// NewErrFieldOrAliasToFieldNotExist returns an error indicating that the given field or an alias field does not exist. +func NewErrFieldOrAliasToFieldNotExist(name string) error { + return errors.New(errFieldOrAliasToFieldNotExist, errors.NewKV("Name", name)) +} + // NewErrFailedToGetHeads returns a new error indicating that the heads of a document // could not be obtained. func NewErrFailedToGetHeads(inner error) error { @@ -96,11 +156,64 @@ func NewErrFailedToCreateCollectionQuery(inner error) error { return errors.Wrap(errFailedToCreateCollectionQuery, inner) } -// NewErrFailedToGetCollection returns a new error indicating that the collection could not be obtained. +// NewErrInvalidStoredIndex returns a new error indicating that the stored +// index in the database is invalid. +func NewErrInvalidStoredIndex(inner error) error { + return errors.Wrap(errInvalidStoredIndex, inner) +} + +// NewErrInvalidStoredIndexKey returns a new error indicating that the stored +// index in the database is invalid. +func NewErrInvalidStoredIndexKey(key string) error { + return errors.New(errInvalidStoredIndexKey, errors.NewKV("Key", key)) +} + +// NewErrNonExistingFieldForIndex returns a new error indicating the attempt to create an index +// on a non-existing field. +func NewErrNonExistingFieldForIndex(field string) error { + return errors.New(errNonExistingFieldForIndex, errors.NewKV("Field", field)) +} + +// NewErrCanNotReadCollection returns a new error indicating the collection doesn't exist. +func NewErrCanNotReadCollection(colName string, inner error) error { + return errors.Wrap(errCollectionDoesntExisting, inner, errors.NewKV("Collection", colName)) +} + +// NewErrFailedToStoreIndexedField returns a new error indicating that the indexed field +// could not be stored. +func NewErrFailedToStoreIndexedField(key string, inner error) error { + return errors.Wrap(errFailedToStoreIndexedField, inner, errors.NewKV("Key", key)) +} + +// NewErrFailedToReadStoredIndexDesc returns a new error indicating that the stored index +// description could not be read. +func NewErrFailedToReadStoredIndexDesc(inner error) error { + return errors.Wrap(errFailedToReadStoredIndexDesc, inner) +} + +// NewCanNotDeleteIndexedField returns a new error a failed attempt to delete an indexed field +func NewCanNotDeleteIndexedField(inner error) error { + return errors.Wrap(errCanNotDeleteIndexedField, inner) +} + +// NewErrNonZeroIndexIDProvided returns a new error indicating that a non-zero index ID was +// provided. +func NewErrNonZeroIndexIDProvided(indexID uint32) error { + return errors.New(errNonZeroIndexIDProvided, errors.NewKV("ID", indexID)) +} + +// NewErrFailedToGetCollection returns a new error indicating that the collection could not +// be obtained. func NewErrFailedToGetCollection(name string, inner error) error { return errors.Wrap(errFailedToGetCollection, inner, errors.NewKV("Name", name)) } +// NewErrFailedToGetAllCollections returns a new error indicating that the collection list could not +// be obtained. +func NewErrFailedToGetAllCollections(inner error) error { + return errors.Wrap(errFailedToGetAllCollections, inner) +} + // NewErrDocVerification returns a new error indicating that the document verification failed. func NewErrDocVerification(expected string, actual string) error { return errors.New( @@ -214,3 +327,162 @@ func NewErrCannotDeleteField(name string, id client.FieldID) error { errors.NewKV("ID", id), ) } + +func NewErrDocumentAlreadyExists(dockey string) error { + return errors.New( + errDocumentAlreadyExists, + errors.NewKV("DocKey", dockey), + ) +} + +func NewErrDocumentDeleted(dockey string) error { + return errors.New( + errDocumentDeleted, + errors.NewKV("DocKey", dockey), + ) +} + +// NewErrIndexWithNameAlreadyExists returns a new error indicating that an index with the +// given name already exists. +func NewErrIndexWithNameAlreadyExists(indexName string) error { + return errors.New( + errIndexWithNameAlreadyExists, + errors.NewKV("Name", indexName), + ) +} + +// NewErrIndexWithNameDoesNotExists returns a new error indicating that an index with the +// given name does not exist. +func NewErrIndexWithNameDoesNotExists(indexName string) error { + return errors.New( + errIndexWithNameDoesNotExists, + errors.NewKV("Name", indexName), + ) +} + +// NewErrCannotAddIndexWithPatch returns a new error indicating that an index cannot be added +// with a patch. +func NewErrCannotAddIndexWithPatch(proposedName string) error { + return errors.New( + errCanNotAddIndexWithPatch, + errors.NewKV("ProposedName", proposedName), + ) +} + +// NewErrCannotDropIndexWithPatch returns a new error indicating that an index cannot be dropped +// with a patch. +func NewErrCannotDropIndexWithPatch(indexName string) error { + return errors.New( + errCanNotDropIndexWithPatch, + errors.NewKV("Name", indexName), + ) +} + +// NewErrInvalidFieldValue returns a new error indicating that the given value is invalid for the +// given field kind. +func NewErrInvalidFieldValue(kind client.FieldKind, value any) error { + return errors.New( + errInvalidFieldValue, + errors.NewKV("Kind", kind), + errors.NewKV("Value", value), + ) +} + +// NewErrUnsupportedIndexFieldType returns a new error indicating that the given field kind is not +// supported for indexing. +func NewErrUnsupportedIndexFieldType(kind client.FieldKind) error { + return errors.New( + errUnsupportedIndexFieldType, + errors.NewKV("Kind", kind), + ) +} + +// NewErrIndexDescHasNoFields returns a new error indicating that the given index +// description has no fields. +func NewErrIndexDescHasNoFields(desc client.IndexDescription) error { + return errors.New( + errIndexDescriptionHasNoFields, + errors.NewKV("Description", desc), + ) +} + +// NewErrIndexDescHasNonExistingField returns a new error indicating that the given index +// description points to a field that does not exist. +func NewErrIndexDescHasNonExistingField(desc client.IndexDescription, fieldName string) error { + return errors.New( + errIndexDescHasNonExistingField, + errors.NewKV("Description", desc), + errors.NewKV("Field name", fieldName), + ) +} + +// NewErrCreateFile returns a new error indicating there was a failure in creating a file. +func NewErrCreateFile(inner error, filepath string) error { + return errors.Wrap(errCreateFile, inner, errors.NewKV("Filepath", filepath)) +} + +// NewErrOpenFile returns a new error indicating there was a failure in opening a file. +func NewErrOpenFile(inner error, filepath string) error { + return errors.Wrap(errOpenFile, inner, errors.NewKV("Filepath", filepath)) +} + +// NewErrCloseFile returns a new error indicating there was a failure in closing a file. +func NewErrCloseFile(closeErr, other error) error { + if other != nil { + return errors.Wrap(errCloseFile, closeErr, errors.NewKV("Other error", other)) + } + return errors.Wrap(errCloseFile, closeErr) +} + +// NewErrRemoveFile returns a new error indicating there was a failure in removing a file. +func NewErrRemoveFile(removeErr, other error, filepath string) error { + if other != nil { + return errors.Wrap( + errRemoveFile, + removeErr, + errors.NewKV("Other error", other), + errors.NewKV("Filepath", filepath), + ) + } + return errors.Wrap( + errRemoveFile, + removeErr, + errors.NewKV("Filepath", filepath), + ) +} + +// NewErrFailedToReadByte returns a new error indicating there was a failure in read a byte +// from the Reader +func NewErrFailedToReadByte(inner error) error { + return errors.Wrap(errFailedToReadByte, inner) +} + +// NewErrFailedToWriteString returns a new error indicating there was a failure in writing +// a string to the Writer +func NewErrFailedToWriteString(inner error) error { + return errors.Wrap(errFailedToWriteString, inner) +} + +// NewErrJSONDecode returns a new error indicating there was a failure in decoding some JSON +// from the JSON decoder +func NewErrJSONDecode(inner error) error { + return errors.Wrap(errJSONDecode, inner) +} + +// NewErrDocFromMap returns a new error indicating there was a failure to create +// a new doc from a map +func NewErrDocFromMap(inner error) error { + return errors.Wrap(errDocFromMap, inner) +} + +// NewErrDocCreate returns a new error indicating there was a failure to save +// a new doc to a collection +func NewErrDocCreate(inner error) error { + return errors.Wrap(errDocCreate, inner) +} + +// NewErrDocUpdate returns a new error indicating there was a failure to update +// a doc to a collection +func NewErrDocUpdate(inner error) error { + return errors.Wrap(errDocUpdate, inner) +} diff --git a/db/fetcher/encoded_doc.go b/db/fetcher/encoded_doc.go index a141c50652..ec3803a2fa 100644 --- a/db/fetcher/encoded_doc.go +++ b/db/fetcher/encoded_doc.go @@ -11,15 +11,26 @@ package fetcher import ( - "fmt" - + "github.com/bits-and-blooms/bitset" "github.com/fxamacker/cbor/v2" - "github.com/sourcenetwork/immutable" "github.com/sourcenetwork/defradb/client" "github.com/sourcenetwork/defradb/core" ) +type EncodedDocument interface { + // Key returns the key of the document + Key() []byte + SchemaVersionID() string + // Reset re-initializes the EncodedDocument object. + Reset() + // Decode returns a properly decoded document object + Decode() (*client.Document, error) + // DecodeToDoc returns a decoded document as a + // map of field/value pairs + DecodeToDoc() (core.Doc, error) +} + type EPTuple []encProperty // EncProperty is an encoded property of a EncodedDocument @@ -27,199 +38,122 @@ type encProperty struct { Desc client.FieldDescription Raw []byte + // Filter flag to determine if this flag + // is needed for eager filter evaluation + IsFilter bool + // // encoding meta data // encoding base.DataEncoding } // Decode returns the decoded value and CRDT type for the given property. -func (e encProperty) Decode() (client.CType, any, error) { - ctype := client.CType(e.Raw[0]) - buf := e.Raw[1:] +func (e encProperty) Decode() (any, error) { var val any - err := cbor.Unmarshal(buf, &val) + err := cbor.Unmarshal(e.Raw, &val) if err != nil { - return ctype, nil, err - } - - if array, isArray := val.([]any); isArray { - var ok bool - switch e.Desc.Kind { - case client.FieldKind_BOOL_ARRAY: - boolArray := make([]bool, len(array)) - for i, untypedValue := range array { - boolArray[i], ok = untypedValue.(bool) - if !ok { - return ctype, nil, client.NewErrUnexpectedType[bool](e.Desc.Name, untypedValue) - } - } - val = boolArray - - case client.FieldKind_NILLABLE_BOOL_ARRAY: - val, err = convertNillableArray[bool](e.Desc.Name, array) - if err != nil { - return ctype, nil, err - } - - case client.FieldKind_INT_ARRAY: - intArray := make([]int64, len(array)) - for i, untypedValue := range array { - intArray[i], err = convertToInt(fmt.Sprintf("%s[%v]", e.Desc.Name, i), untypedValue) - if err != nil { - return ctype, nil, err - } - } - val = intArray - - case client.FieldKind_NILLABLE_INT_ARRAY: - val, err = convertNillableArrayWithConverter(e.Desc.Name, array, convertToInt) - if err != nil { - return ctype, nil, err - } - - case client.FieldKind_FLOAT_ARRAY: - floatArray := make([]float64, len(array)) - for i, untypedValue := range array { - floatArray[i], ok = untypedValue.(float64) - if !ok { - return ctype, nil, client.NewErrUnexpectedType[float64](e.Desc.Name, untypedValue) - } - } - val = floatArray - - case client.FieldKind_NILLABLE_FLOAT_ARRAY: - val, err = convertNillableArray[float64](e.Desc.Name, array) - if err != nil { - return ctype, nil, err - } - - case client.FieldKind_STRING_ARRAY: - stringArray := make([]string, len(array)) - for i, untypedValue := range array { - stringArray[i], ok = untypedValue.(string) - if !ok { - return ctype, nil, client.NewErrUnexpectedType[string](e.Desc.Name, untypedValue) - } - } - val = stringArray - - case client.FieldKind_NILLABLE_STRING_ARRAY: - val, err = convertNillableArray[string](e.Desc.Name, array) - if err != nil { - return ctype, nil, err - } - } - } else { // CBOR often encodes values typed as floats as ints - switch e.Desc.Kind { - case client.FieldKind_FLOAT: - switch v := val.(type) { - case int64: - return ctype, float64(v), nil - case int: - return ctype, float64(v), nil - case uint64: - return ctype, float64(v), nil - case uint: - return ctype, float64(v), nil - } - } + return nil, err } - return ctype, val, nil + return core.DecodeFieldValue(e.Desc, val) } -func convertNillableArray[T any](propertyName string, items []any) ([]immutable.Option[T], error) { - resultArray := make([]immutable.Option[T], len(items)) - for i, untypedValue := range items { - if untypedValue == nil { - resultArray[i] = immutable.None[T]() - continue - } - value, ok := untypedValue.(T) - if !ok { - return nil, client.NewErrUnexpectedType[T](fmt.Sprintf("%s[%v]", propertyName, i), untypedValue) - } - resultArray[i] = immutable.Some(value) - } - return resultArray, nil +// @todo: Implement Encoded Document type +type encodedDocument struct { + mapping *core.DocumentMapping + doc *core.Doc + + key []byte + schemaVersionID string + Properties map[client.FieldDescription]*encProperty + + // tracking bitsets + // A value of 1 indicates a required field + // 0 means we we ignore the field + // we update the bitsets as we collect values + // by clearing the bit for the FieldID + filterSet *bitset.BitSet // filter fields + selectSet *bitset.BitSet // select fields } -func convertNillableArrayWithConverter[TOut any]( - propertyName string, - items []any, - converter func(propertyName string, in any) (TOut, error), -) ([]immutable.Option[TOut], error) { - resultArray := make([]immutable.Option[TOut], len(items)) - for i, untypedValue := range items { - if untypedValue == nil { - resultArray[i] = immutable.None[TOut]() - continue - } - value, err := converter(fmt.Sprintf("%s[%v]", propertyName, i), untypedValue) - if err != nil { - return nil, err - } - resultArray[i] = immutable.Some(value) - } - return resultArray, nil -} +var _ EncodedDocument = (*encodedDocument)(nil) -func convertToInt(propertyName string, untypedValue any) (int64, error) { - switch value := untypedValue.(type) { - case uint64: - return int64(value), nil - case int64: - return value, nil - case float64: - return int64(value), nil - default: - return 0, client.NewErrUnexpectedType[string](propertyName, untypedValue) - } +func (encdoc *encodedDocument) Key() []byte { + return encdoc.key } -// @todo: Implement Encoded Document type -type encodedDocument struct { - Key []byte - Properties map[client.FieldDescription]*encProperty +func (encdoc *encodedDocument) SchemaVersionID() string { + return encdoc.schemaVersionID } // Reset re-initializes the EncodedDocument object. func (encdoc *encodedDocument) Reset() { - encdoc.Properties = make(map[client.FieldDescription]*encProperty) - encdoc.Key = nil + encdoc.Properties = make(map[client.FieldDescription]*encProperty, 0) + encdoc.key = nil + if encdoc.mapping != nil { + doc := encdoc.mapping.NewDoc() + encdoc.doc = &doc + } + encdoc.filterSet = nil + encdoc.selectSet = nil + encdoc.schemaVersionID = "" } // Decode returns a properly decoded document object func (encdoc *encodedDocument) Decode() (*client.Document, error) { - key, err := client.NewDocKeyFromString(string(encdoc.Key)) + key, err := client.NewDocKeyFromString(string(encdoc.key)) if err != nil { return nil, err } doc := client.NewDocWithKey(key) - for fieldDesc, prop := range encdoc.Properties { - ctype, val, err := prop.Decode() + for _, prop := range encdoc.Properties { + val, err := prop.Decode() if err != nil { return nil, err } - err = doc.SetAs(fieldDesc.Name, val, ctype) + err = doc.SetAs(prop.Desc.Name, val, prop.Desc.Typ) if err != nil { return nil, err } } + doc.SchemaVersionID = encdoc.SchemaVersionID() + return doc, nil } // DecodeToDoc returns a decoded document as a // map of field/value pairs -func (encdoc *encodedDocument) DecodeToDoc(mapping *core.DocumentMapping) (core.Doc, error) { - doc := mapping.NewDoc() - doc.SetKey(string(encdoc.Key)) - for fieldDesc, prop := range encdoc.Properties { - _, val, err := prop.Decode() +func (encdoc *encodedDocument) DecodeToDoc() (core.Doc, error) { + return encdoc.decodeToDoc(false) +} + +func (encdoc *encodedDocument) decodeToDocForFilter() (core.Doc, error) { + return encdoc.decodeToDoc(true) +} + +func (encdoc *encodedDocument) decodeToDoc(filter bool) (core.Doc, error) { + if encdoc.mapping == nil { + return core.Doc{}, ErrMissingMapper + } + if encdoc.doc == nil { + doc := encdoc.mapping.NewDoc() + encdoc.doc = &doc + } + encdoc.doc.SetKey(string(encdoc.key)) + for _, prop := range encdoc.Properties { + if encdoc.doc.Fields[prop.Desc.ID] != nil { // used cached decoded fields + continue + } + if filter && !prop.IsFilter { // only get filter fields if filter=true + continue + } + val, err := prop.Decode() if err != nil { return core.Doc{}, err } - doc.Fields[fieldDesc.ID] = val + encdoc.doc.Fields[prop.Desc.ID] = val } - return doc, nil + + encdoc.doc.SchemaVersionID = encdoc.SchemaVersionID() + return *encdoc.doc, nil } diff --git a/db/fetcher/errors.go b/db/fetcher/errors.go index 31453e8ad6..84d947c46f 100644 --- a/db/fetcher/errors.go +++ b/db/fetcher/errors.go @@ -25,6 +25,7 @@ const ( errVFetcherFailedToDecodeNode string = "(version fetcher) failed to decode protobuf" errVFetcherFailedToGetDagLink string = "(version fetcher) failed to get node link from DAG" errFailedToGetDagNode string = "failed to get DAG Node" + errMissingMapper string = "missing document mapper" ) var ( @@ -38,6 +39,7 @@ var ( ErrVFetcherFailedToDecodeNode = errors.New(errVFetcherFailedToDecodeNode) ErrVFetcherFailedToGetDagLink = errors.New(errVFetcherFailedToGetDagLink) ErrFailedToGetDagNode = errors.New(errFailedToGetDagNode) + ErrMissingMapper = errors.New(errMissingMapper) ErrSingleSpanOnly = errors.New("spans must contain only a single entry") ) diff --git a/db/fetcher/fetcher.go b/db/fetcher/fetcher.go index 5de2a8899e..35a89c29c0 100644 --- a/db/fetcher/fetcher.go +++ b/db/fetcher/fetcher.go @@ -13,7 +13,9 @@ package fetcher import ( "bytes" "context" + "strings" + "github.com/bits-and-blooms/bitset" dsq "github.com/ipfs/go-datastore/query" "github.com/sourcenetwork/defradb/client" @@ -21,19 +23,56 @@ import ( "github.com/sourcenetwork/defradb/datastore" "github.com/sourcenetwork/defradb/datastore/iterable" "github.com/sourcenetwork/defradb/db/base" + "github.com/sourcenetwork/defradb/planner/mapper" + "github.com/sourcenetwork/defradb/request/graphql/parser" ) +// ExecInfo contains statistics about the fetcher execution. +type ExecInfo struct { + // Number of documents fetched. + DocsFetched uint64 + // Number of fields fetched. + FieldsFetched uint64 +} + +// Add adds the other ExecInfo to the current ExecInfo. +func (s *ExecInfo) Add(other ExecInfo) { + s.DocsFetched += other.DocsFetched + s.FieldsFetched += other.FieldsFetched +} + +// Reset resets the ExecInfo. +func (s *ExecInfo) Reset() { + s.DocsFetched = 0 + s.FieldsFetched = 0 +} + // Fetcher is the interface for collecting documents from the underlying data store. // It handles all the key/value scanning, aggregation, and document encoding. type Fetcher interface { - Init(col *client.CollectionDescription, fields []*client.FieldDescription, reverse bool, showDeleted bool) error - Start(ctx context.Context, txn datastore.Txn, spans core.Spans) error - FetchNext(ctx context.Context) (*encodedDocument, error) - FetchNextDecoded(ctx context.Context) (*client.Document, error) - FetchNextDoc(ctx context.Context, mapping *core.DocumentMapping) ([]byte, core.Doc, error) + Init( + ctx context.Context, + txn datastore.Txn, + col *client.CollectionDescription, + fields []client.FieldDescription, + filter *mapper.Filter, + docmapper *core.DocumentMapping, + reverse bool, + showDeleted bool, + ) error + Start(ctx context.Context, spans core.Spans) error + FetchNext(ctx context.Context) (EncodedDocument, ExecInfo, error) + FetchNextDecoded(ctx context.Context) (*client.Document, ExecInfo, error) + FetchNextDoc(ctx context.Context, mapping *core.DocumentMapping) ([]byte, core.Doc, ExecInfo, error) Close() error } +// keyValue is a KV store response containing the resulting core.Key and byte array value. +type keyValue struct { + Key core.DataStoreKey + Value []byte +} + var ( _ Fetcher = (*DocumentFetcher)(nil) ) @@ -48,14 +87,35 @@ type DocumentFetcher struct { order []dsq.Order curSpanIndex int - schemaFields map[uint32]client.FieldDescription - fields []*client.FieldDescription + filter *mapper.Filter + ranFilter bool // did we run the filter + passedFilter bool // did we pass the filter + + filterFields map[uint32]client.FieldDescription + selectFields map[uint32]client.FieldDescription + + // static bitset to which stores the IDs of fields + // needed for filtering. + // + // This is compared against the encdoc.filterSet which + // is a dynamic bitset, that gets updated as fields are + // added to the encdoc, and cleared on reset. + // + // We compare the two bitsets to determine if we've collected + // all the necessary fields to run the filter. + // + // This is *much* more effecient for comparison then most (any?) + // other approach. + // + // When proper seek() is added, this will also be responsible + // for effectiently finding the next field to seek to. + filterSet *bitset.BitSet + + doc *encodedDocument - doc *encodedDocument - decodedDoc *client.Document initialized bool - kv *core.KeyValue + kv *keyValue kvIter iterable.Iterator kvResultsIter dsq.Results kvEnd bool @@ -65,20 +125,27 @@ type DocumentFetcher struct { // we use a parallel fetcher to be able to return the documents in the expected order. // That being lexicographically ordered dockeys. deletedDocFetcher *DocumentFetcher + + execInfo ExecInfo } // Init implements DocumentFetcher. func (df *DocumentFetcher) Init( + ctx context.Context, + txn datastore.Txn, col *client.CollectionDescription, - fields []*client.FieldDescription, + fields []client.FieldDescription, + filter *mapper.Filter, + docmapper *core.DocumentMapping, reverse bool, showDeleted bool, ) error { + df.txn = txn if col.Schema.IsEmpty() { return client.NewErrUninitializeProperty("DocumentFetcher", "Schema") } - err := df.init(col, fields, reverse) + err := df.init(col, fields, filter, docmapper, reverse) if err != nil { return err } @@ -86,8 +153,9 @@ func (df *DocumentFetcher) Init( if showDeleted { if df.deletedDocFetcher == nil { df.deletedDocFetcher = new(DocumentFetcher) + df.deletedDocFetcher.txn = txn } - return df.deletedDocFetcher.init(col, fields, reverse) + return df.deletedDocFetcher.init(col, fields, filter, docmapper, reverse) } return nil @@ -95,15 +163,22 @@ func (df *DocumentFetcher) Init( func (df *DocumentFetcher) init( col *client.CollectionDescription, - fields []*client.FieldDescription, + fields []client.FieldDescription, + filter *mapper.Filter, + docMapper *core.DocumentMapping, reverse bool, ) error { df.col = col - df.fields = fields df.reverse = reverse df.initialized = true + df.filter = filter df.isReadingDocument = false df.doc = new(encodedDocument) + df.doc.mapping = docMapper + + if df.filter != nil && docMapper == nil { + return ErrMissingMapper + } if df.kvResultsIter != nil { if err := df.kvResultsIter.Close(); err != nil { @@ -118,28 +193,52 @@ func (df *DocumentFetcher) init( } df.kvIter = nil - df.schemaFields = make(map[uint32]client.FieldDescription) - for _, field := range col.Schema.Fields { - df.schemaFields[uint32(field.ID)] = field + df.selectFields = make(map[uint32]client.FieldDescription, len(fields)) + // if we haven't been told to get specific fields + // get them all + var targetFields []client.FieldDescription + if len(fields) == 0 { + targetFields = df.col.Schema.Fields + } else { + targetFields = fields + } + + for _, field := range targetFields { + df.selectFields[uint32(field.ID)] = field } + + if df.filter != nil { + conditions := df.filter.ToMap(df.doc.mapping) + parsedfilterFields, err := parser.ParseFilterFieldsForDescription(conditions, df.col.Schema) + if err != nil { + return err + } + df.filterFields = make(map[uint32]client.FieldDescription, len(parsedfilterFields)) + df.filterSet = bitset.New(uint(len(col.Schema.Fields))) + for _, field := range parsedfilterFields { + df.filterFields[uint32(field.ID)] = field + df.filterSet.Set(uint(field.ID)) + } + } + return nil } -func (df *DocumentFetcher) Start(ctx context.Context, txn datastore.Txn, spans core.Spans) error { - err := df.start(ctx, txn, spans, false) +func (df *DocumentFetcher) Start(ctx context.Context, spans core.Spans) error { + err := df.start(ctx, spans, false) if err != nil { return err } if df.deletedDocFetcher != nil { - return df.deletedDocFetcher.start(ctx, txn, spans, true) + return df.deletedDocFetcher.start(ctx, spans, true) } return nil } // Start implements DocumentFetcher. -func (df *DocumentFetcher) start(ctx context.Context, txn datastore.Txn, spans core.Spans, withDeleted bool) error { +func (df *DocumentFetcher) start(ctx context.Context, spans core.Spans, withDeleted bool) error { if df.col == nil { return client.NewErrUninitializeProperty("DocumentFetcher", "CollectionDescription") } @@ -176,7 +275,6 @@ func (df *DocumentFetcher) start(ctx context.Context, txn datastore.Txn, spans c } df.curSpanIndex = -1 - df.txn = txn if df.reverse { df.order = []dsq.Order{dsq.OrderByKeyDescending{}} @@ -218,39 +316,36 @@ func (df *DocumentFetcher) startNextSpan(ctx context.Context) (bool, error) { } df.curSpanIndex = nextSpanIndex - _, err = df.nextKey(ctx) + _, _, err = df.nextKey(ctx, false) return err == nil, err } -func (df *DocumentFetcher) KVEnd() bool { - return df.kvEnd -} - -func (df *DocumentFetcher) KV() *core.KeyValue { - return df.kv -} - -func (df *DocumentFetcher) NextKey(ctx context.Context) (docDone bool, err error) { - return df.nextKey(ctx) -} - -func (df *DocumentFetcher) NextKV() (iterDone bool, kv *core.KeyValue, err error) { - return df.nextKV() -} - -func (df *DocumentFetcher) ProcessKV(kv *core.KeyValue) error { - return df.processKV(kv) -} - // nextKey gets the next kv. It sets both kv and kvEnd internally. -// It returns true if the current doc is completed -func (df *DocumentFetcher) nextKey(ctx context.Context) (spanDone bool, err error) { - // get the next kv from nextKV() - spanDone, df.kv, err = df.nextKV() - // handle any internal errors - if err != nil { - return false, err +// It returns true if the current doc is completed. +// The first call to nextKey CANNOT have seekNext be true (ErrFailedToSeek) +func (df *DocumentFetcher) nextKey(ctx context.Context, seekNext bool) (spanDone bool, docDone bool, err error) { + // safety against seekNext on first call + if seekNext && df.kv == nil { + return false, false, ErrFailedToSeek + } + + if seekNext { + curKey := df.kv.Key + curKey.FieldId = "" // clear field so prefixEnd applies to dockey + seekKey := curKey.PrefixEnd().ToString() + spanDone, df.kv, err = df.seekKV(seekKey) + // handle any internal errors + if err != nil { + return false, false, err + } + } else { + spanDone, df.kv, err = df.nextKV() + // handle any internal errors + if err != nil { + return false, false, err + } } + if df.kv != nil && (df.kv.Key.InstanceType != core.ValueKey && df.kv.Key.InstanceType != core.DeletedKey) { // We can only ready value values, if we escape the collection's value keys // then we must be done and can stop reading @@ -259,50 +354,103 @@ func (df *DocumentFetcher) nextKey(ctx context.Context) (spanDone bool, err erro df.kvEnd = spanDone if df.kvEnd { - _, err := df.startNextSpan(ctx) + err = df.kvResultsIter.Close() if err != nil { - return false, err + return false, false, err + } + moreSpans, err := df.startNextSpan(ctx) + if err != nil { + return false, false, err } - return true, nil + df.isReadingDocument = false + return !moreSpans, true, nil } // check if we've crossed document boundries - if df.doc.Key != nil && df.kv.Key.DocKey != string(df.doc.Key) { + if (df.doc.key != nil && df.kv.Key.DocKey != string(df.doc.key)) || seekNext { df.isReadingDocument = false - return true, nil + return false, true, nil } - return false, nil + return false, false, nil } // nextKV is a lower-level utility compared to nextKey. The differences are as follows: // - It directly interacts with the KVIterator. // - Returns true if the entire iterator/span is exhausted // - Returns a kv pair instead of internally updating -func (df *DocumentFetcher) nextKV() (iterDone bool, kv *core.KeyValue, err error) { +func (df *DocumentFetcher) nextKV() (iterDone bool, kv *keyValue, err error) { + done, dsKey, res, err := df.nextKVRaw() + if done || err != nil { + return done, nil, err + } + + kv = &keyValue{ + Key: dsKey, + Value: res.Value, + } + return false, kv, nil +} + +// seekKV will seek through results/iterator until it reaches +// the target key, or if the target key doesn't exist, the +// next smallest key that is greater than the target. +func (df *DocumentFetcher) seekKV(key string) (bool, *keyValue, error) { + // make sure the current kv is *before* the target key + switch strings.Compare(df.kv.Key.ToString(), key) { + case 0: + // equal, we should just return the kv state + return df.kvEnd, df.kv, nil + case 1: + // greater, error + return false, nil, NewErrFailedToSeek(key, nil) + } + + for { + done, dsKey, res, err := df.nextKVRaw() + if done || err != nil { + return done, nil, err + } + + switch strings.Compare(dsKey.ToString(), key) { + case -1: + // before, so lets seek again + continue + case 0, 1: + // equal or greater (first), return a formatted kv + kv := &keyValue{ + Key: dsKey, + Value: res.Value, // @todo make lazy + } + return false, kv, nil + } + } +} + +// nextKV is a lower-level utility compared to nextKey. The differences are as follows: +// - It directly interacts with the KVIterator. +// - Returns true if the entire iterator/span is exhausted +// - Returns a kv pair instead of internally updating +func (df *DocumentFetcher) nextKVRaw() (bool, core.DataStoreKey, dsq.Result, error) { res, available := df.kvResultsIter.NextSync() if !available { - return true, nil, nil + return true, core.DataStoreKey{}, res, nil } - err = res.Error + err := res.Error if err != nil { - return true, nil, err + return true, core.DataStoreKey{}, res, err } dsKey, err := core.NewDataStoreKey(res.Key) if err != nil { - return true, nil, err + return true, core.DataStoreKey{}, res, err } - kv = &core.KeyValue{ - Key: dsKey, - Value: res.Value, - } - return false, kv, nil + return false, dsKey, res, nil } // processKV continuously processes the key value pairs we've received // and step by step constructs the current encoded document -func (df *DocumentFetcher) processKV(kv *core.KeyValue) error { +func (df *DocumentFetcher) processKV(kv *keyValue) error { // skip MerkleCRDT meta-data priority key-value pair // implement here <-- // instance := kv.Key.Name() @@ -316,7 +464,22 @@ func (df *DocumentFetcher) processKV(kv *core.KeyValue) error { if !df.isReadingDocument { df.isReadingDocument = true df.doc.Reset() - df.doc.Key = []byte(kv.Key.DocKey) + + // re-init doc state + if df.filterSet != nil { + df.doc.filterSet = bitset.New(df.filterSet.Len()) + if df.filterSet.Test(0) { + df.doc.filterSet.Set(0) // mark dockey as set + } + } + df.doc.key = []byte(kv.Key.DocKey) + df.passedFilter = false + df.ranFilter = false + } + + if kv.Key.FieldId == core.DATASTORE_DOC_VERSION_FIELD_ID { + df.doc.schemaVersionID = string(kv.Value) + return nil } // we have to skip the object marker @@ -329,53 +492,110 @@ func (df *DocumentFetcher) processKV(kv *core.KeyValue) error { if err != nil { return err } - fieldDesc, exists := df.schemaFields[fieldID] + fieldDesc, exists := df.selectFields[fieldID] if !exists { - return NewErrFieldIdNotFound(fieldID) + fieldDesc, exists = df.filterFields[fieldID] + if !exists { + return nil // if we can't find this field in our sets, just ignore it + } } - // @todo: Secondary Index might not have encoded FieldIDs - // @body: Need to generalized the processKV, and overall Fetcher architecture - // to better handle dynamic use cases beyond primary indexes. If a - // secondary index is provided, we need to extract the indexed/implicit fields - // from the KV pair. - df.doc.Properties[fieldDesc] = &encProperty{ + ufid := uint(fieldID) + + property := &encProperty{ Desc: fieldDesc, Raw: kv.Value, } - // @todo: Extract Index implicit/stored keys + + if df.filterSet != nil && df.filterSet.Test(ufid) { + df.doc.filterSet.Set(ufid) + property.IsFilter = true + } + + df.execInfo.FieldsFetched++ + + df.doc.Properties[fieldDesc] = property + return nil } // FetchNext returns a raw binary encoded document. It iterates over all the relevant // keypairs from the underlying store and constructs the document. -func (df *DocumentFetcher) FetchNext(ctx context.Context) (*encodedDocument, error) { +func (df *DocumentFetcher) FetchNext(ctx context.Context) (EncodedDocument, ExecInfo, error) { if df.kvEnd { - return nil, nil + return nil, ExecInfo{}, nil } if df.kv == nil { - return nil, client.NewErrUninitializeProperty("DocumentFetcher", "kv") + return nil, ExecInfo{}, client.NewErrUninitializeProperty("DocumentFetcher", "kv") } // save the DocKey of the current kv pair so we can track when we cross the doc pair boundries // keyparts := df.kv.Key.List() // key := keyparts[len(keyparts)-2] + df.execInfo.Reset() // iterate until we have collected all the necessary kv pairs for the doc // we'll know when were done when either // A) Reach the end of the iterator for { err := df.processKV(df.kv) if err != nil { - return nil, err + return nil, ExecInfo{}, err + } + + if df.filter != nil { + // only run filter if we've collected all the fields + // required for filtering. This is tracked by the bitsets. + if df.filterSet.Equal(df.doc.filterSet) { + filterDoc, err := df.doc.decodeToDocForFilter() + if err != nil { + return nil, ExecInfo{}, err + } + + df.ranFilter = true + df.passedFilter, err = mapper.RunFilter(filterDoc, df.filter) + if err != nil { + return nil, ExecInfo{}, err + } + } } - end, err := df.nextKey(ctx) + // if we don't pass the filter (ran and pass) + // theres no point in collecting other select fields + // so we seek to the next doc + spansDone, docDone, err := df.nextKey(ctx, !df.passedFilter && df.ranFilter) if err != nil { - return nil, err + return nil, ExecInfo{}, err } - if end { - return df.doc, nil + + if docDone { + df.execInfo.DocsFetched++ + if df.filter != nil { + // if we passed, return + if df.passedFilter { + return df.doc, df.execInfo, nil + } else if !df.ranFilter { // if we didn't run, run it + decodedDoc, err := df.doc.DecodeToDoc() + if err != nil { + return nil, ExecInfo{}, err + } + df.passedFilter, err = mapper.RunFilter(decodedDoc, df.filter) + if err != nil { + return nil, ExecInfo{}, err + } + if df.passedFilter { + return df.doc, df.execInfo, nil + } + } + } else { + return df.doc, df.execInfo, nil + } + + if !spansDone { + continue + } + + return nil, df.execInfo, nil } // // crossed document kv boundary? @@ -389,21 +609,21 @@ func (df *DocumentFetcher) FetchNext(ctx context.Context) (*encodedDocument, err } // FetchNextDecoded implements DocumentFetcher -func (df *DocumentFetcher) FetchNextDecoded(ctx context.Context) (*client.Document, error) { - encdoc, err := df.FetchNext(ctx) +func (df *DocumentFetcher) FetchNextDecoded(ctx context.Context) (*client.Document, ExecInfo, error) { + encdoc, execInfo, err := df.FetchNext(ctx) if err != nil { - return nil, err + return nil, ExecInfo{}, err } if encdoc == nil { - return nil, nil + return nil, ExecInfo{}, nil } - df.decodedDoc, err = encdoc.Decode() + decodedDoc, err := encdoc.Decode() if err != nil { - return nil, err + return nil, ExecInfo{}, err } - return df.decodedDoc, nil + return decodedDoc, execInfo, nil } // FetchNextDoc returns the next document as a core.Doc. @@ -411,10 +631,11 @@ func (df *DocumentFetcher) FetchNextDecoded(ctx context.Context) (*client.Docume func (df *DocumentFetcher) FetchNextDoc( ctx context.Context, mapping *core.DocumentMapping, -) ([]byte, core.Doc, error) { +) ([]byte, core.Doc, ExecInfo, error) { var err error - var encdoc *encodedDocument + var encdoc EncodedDocument var status client.DocumentStatus + var resultExecInfo ExecInfo // If the deletedDocFetcher isn't nil, this means that the user requested to include the deleted documents // in the query. To keep the active and deleted docs in lexicographic order of dockeys, we use the two distinct @@ -423,22 +644,16 @@ func (df *DocumentFetcher) FetchNextDoc( if ddf != nil { // If we've reached the end of the deleted docs, we can skip to getting the next active docs. if !ddf.kvEnd { - if df.reverse { - if df.kvEnd || ddf.kv.Key.DocKey > df.kv.Key.DocKey { - encdoc, err = ddf.FetchNext(ctx) - if err != nil { - return nil, core.Doc{}, err - } - status = client.Deleted - } - } else { - if df.kvEnd || ddf.kv.Key.DocKey < df.kv.Key.DocKey { - encdoc, err = ddf.FetchNext(ctx) - if err != nil { - return nil, core.Doc{}, err - } - status = client.Deleted + if df.kvEnd || + (df.reverse && ddf.kv.Key.DocKey > df.kv.Key.DocKey) || + (!df.reverse && ddf.kv.Key.DocKey < df.kv.Key.DocKey) { + var execInfo ExecInfo + encdoc, execInfo, err = ddf.FetchNext(ctx) + if err != nil { + return nil, core.Doc{}, ExecInfo{}, err } + status = client.Deleted + resultExecInfo.Add(execInfo) } } } @@ -446,42 +661,40 @@ func (df *DocumentFetcher) FetchNextDoc( // At this point id encdoc is nil, it means that the next document to be // returned will be from the active ones. if encdoc == nil { - encdoc, err = df.FetchNext(ctx) + var execInfo ExecInfo + encdoc, execInfo, err = df.FetchNext(ctx) if err != nil { - return nil, core.Doc{}, err + return nil, core.Doc{}, ExecInfo{}, err } + resultExecInfo.Add(execInfo) if encdoc == nil { - return nil, core.Doc{}, nil + return nil, core.Doc{}, resultExecInfo, err } status = client.Active } - doc, err := encdoc.DecodeToDoc(mapping) + doc, err := encdoc.DecodeToDoc() if err != nil { - return nil, core.Doc{}, err + return nil, core.Doc{}, ExecInfo{}, err } doc.Status = status - return encdoc.Key, doc, err + return encdoc.Key(), doc, resultExecInfo, err } // Close closes the DocumentFetcher. func (df *DocumentFetcher) Close() error { - if df.kvIter == nil { - return nil - } - - err := df.kvIter.Close() - if err != nil { - return err - } - - if df.kvResultsIter == nil { - return nil + if df.kvIter != nil { + err := df.kvIter.Close() + if err != nil { + return err + } } - err = df.kvResultsIter.Close() - if err != nil { - return err + if df.kvResultsIter != nil { + err := df.kvResultsIter.Close() + if err != nil { + return err + } } if df.deletedDocFetcher != nil { diff --git a/db/fetcher/mocks/EncodedDocument.go b/db/fetcher/mocks/EncodedDocument.go new file mode 100644 index 0000000000..23522ef1f2 --- /dev/null +++ b/db/fetcher/mocks/EncodedDocument.go @@ -0,0 +1,257 @@ +// Code generated by mockery v2.30.1. DO NOT EDIT. + +package mocks + +import ( + client "github.com/sourcenetwork/defradb/client" + core "github.com/sourcenetwork/defradb/core" + + mock "github.com/stretchr/testify/mock" +) + +// EncodedDocument is an autogenerated mock type for the EncodedDocument type +type EncodedDocument struct { + mock.Mock +} + +type EncodedDocument_Expecter struct { + mock *mock.Mock +} + +func (_m *EncodedDocument) EXPECT() *EncodedDocument_Expecter { + return &EncodedDocument_Expecter{mock: &_m.Mock} +} + +// Decode provides a mock function with given fields: +func (_m *EncodedDocument) Decode() (*client.Document, error) { + ret := _m.Called() + + var r0 *client.Document + var r1 error + if rf, ok := ret.Get(0).(func() (*client.Document, error)); ok { + return rf() + } + if rf, ok := ret.Get(0).(func() *client.Document); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*client.Document) + } + } + + if rf, ok := ret.Get(1).(func() error); ok { + r1 = rf() + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// EncodedDocument_Decode_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Decode' +type EncodedDocument_Decode_Call struct { + *mock.Call +} + +// Decode is a helper method to define mock.On call +func (_e *EncodedDocument_Expecter) Decode() *EncodedDocument_Decode_Call { + return &EncodedDocument_Decode_Call{Call: _e.mock.On("Decode")} +} + +func (_c *EncodedDocument_Decode_Call) Run(run func()) *EncodedDocument_Decode_Call { + _c.Call.Run(func(args mock.Arguments) { + run() + }) + return _c +} + +func (_c *EncodedDocument_Decode_Call) Return(_a0 *client.Document, _a1 error) *EncodedDocument_Decode_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *EncodedDocument_Decode_Call) RunAndReturn(run func() (*client.Document, error)) *EncodedDocument_Decode_Call { + _c.Call.Return(run) + return _c +} + +// DecodeToDoc provides a mock function with given fields: +func (_m *EncodedDocument) DecodeToDoc() (core.Doc, error) { + ret := _m.Called() + + var r0 core.Doc + var r1 error + if rf, ok := ret.Get(0).(func() (core.Doc, error)); ok { + return rf() + } + if rf, ok := ret.Get(0).(func() core.Doc); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(core.Doc) + } + + if rf, ok := ret.Get(1).(func() error); ok { + r1 = rf() + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// EncodedDocument_DecodeToDoc_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'DecodeToDoc' +type EncodedDocument_DecodeToDoc_Call struct { + *mock.Call +} + +// DecodeToDoc is a helper method to define mock.On call +func (_e *EncodedDocument_Expecter) DecodeToDoc() *EncodedDocument_DecodeToDoc_Call { + return &EncodedDocument_DecodeToDoc_Call{Call: _e.mock.On("DecodeToDoc")} +} + +func (_c *EncodedDocument_DecodeToDoc_Call) Run(run func()) *EncodedDocument_DecodeToDoc_Call { + _c.Call.Run(func(args mock.Arguments) { + run() + }) + return _c +} + +func (_c *EncodedDocument_DecodeToDoc_Call) Return(_a0 core.Doc, _a1 error) *EncodedDocument_DecodeToDoc_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *EncodedDocument_DecodeToDoc_Call) RunAndReturn(run func() (core.Doc, error)) *EncodedDocument_DecodeToDoc_Call { + _c.Call.Return(run) + return _c +} + +// Key provides a mock function with given fields: +func (_m *EncodedDocument) Key() []byte { + ret := _m.Called() + + var r0 []byte + if rf, ok := ret.Get(0).(func() []byte); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]byte) + } + } + + return r0 +} + +// EncodedDocument_Key_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Key' +type EncodedDocument_Key_Call struct { + *mock.Call +} + +// Key is a helper method to define mock.On call +func (_e *EncodedDocument_Expecter) Key() *EncodedDocument_Key_Call { + return &EncodedDocument_Key_Call{Call: _e.mock.On("Key")} +} + +func (_c *EncodedDocument_Key_Call) Run(run func()) *EncodedDocument_Key_Call { + _c.Call.Run(func(args mock.Arguments) { + run() + }) + return _c +} + +func (_c *EncodedDocument_Key_Call) Return(_a0 []byte) *EncodedDocument_Key_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *EncodedDocument_Key_Call) RunAndReturn(run func() []byte) *EncodedDocument_Key_Call { + _c.Call.Return(run) + return _c +} + +// Reset provides a mock function with given fields: +func (_m *EncodedDocument) Reset() { + _m.Called() +} + +// EncodedDocument_Reset_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Reset' +type EncodedDocument_Reset_Call struct { + *mock.Call +} + +// Reset is a helper method to define mock.On call +func (_e *EncodedDocument_Expecter) Reset() *EncodedDocument_Reset_Call { + return &EncodedDocument_Reset_Call{Call: _e.mock.On("Reset")} +} + +func (_c *EncodedDocument_Reset_Call) Run(run func()) *EncodedDocument_Reset_Call { + _c.Call.Run(func(args mock.Arguments) { + run() + }) + return _c +} + +func (_c *EncodedDocument_Reset_Call) Return() *EncodedDocument_Reset_Call { + _c.Call.Return() + return _c +} + +func (_c *EncodedDocument_Reset_Call) RunAndReturn(run func()) *EncodedDocument_Reset_Call { + _c.Call.Return(run) + return _c +} + +// SchemaVersionID provides a mock function with given fields: +func (_m *EncodedDocument) SchemaVersionID() string { + ret := _m.Called() + + var r0 string + if rf, ok := ret.Get(0).(func() string); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(string) + } + + return r0 +} + +// EncodedDocument_SchemaVersionID_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'SchemaVersionID' +type EncodedDocument_SchemaVersionID_Call struct { + *mock.Call +} + +// SchemaVersionID is a helper method to define mock.On call +func (_e *EncodedDocument_Expecter) SchemaVersionID() *EncodedDocument_SchemaVersionID_Call { + return &EncodedDocument_SchemaVersionID_Call{Call: _e.mock.On("SchemaVersionID")} +} + +func (_c *EncodedDocument_SchemaVersionID_Call) Run(run func()) *EncodedDocument_SchemaVersionID_Call { + _c.Call.Run(func(args mock.Arguments) { + run() + }) + return _c +} + +func (_c *EncodedDocument_SchemaVersionID_Call) Return(_a0 string) *EncodedDocument_SchemaVersionID_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *EncodedDocument_SchemaVersionID_Call) RunAndReturn(run func() string) *EncodedDocument_SchemaVersionID_Call { + _c.Call.Return(run) + return _c +} + +// NewEncodedDocument creates a new instance of EncodedDocument. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewEncodedDocument(t interface { + mock.TestingT + Cleanup(func()) +}) *EncodedDocument { + mock := &EncodedDocument{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/db/fetcher/mocks/Fetcher.go b/db/fetcher/mocks/Fetcher.go new file mode 100644 index 0000000000..39f9c89c39 --- /dev/null +++ b/db/fetcher/mocks/Fetcher.go @@ -0,0 +1,370 @@ +// Code generated by mockery v2.32.0. DO NOT EDIT. + +package mocks + +import ( + context "context" + + client "github.com/sourcenetwork/defradb/client" + + core "github.com/sourcenetwork/defradb/core" + + datastore "github.com/sourcenetwork/defradb/datastore" + + fetcher "github.com/sourcenetwork/defradb/db/fetcher" + + mapper "github.com/sourcenetwork/defradb/planner/mapper" + + mock "github.com/stretchr/testify/mock" +) + +// Fetcher is an autogenerated mock type for the Fetcher type +type Fetcher struct { + mock.Mock +} + +type Fetcher_Expecter struct { + mock *mock.Mock +} + +func (_m *Fetcher) EXPECT() *Fetcher_Expecter { + return &Fetcher_Expecter{mock: &_m.Mock} +} + +// Close provides a mock function with given fields: +func (_m *Fetcher) Close() error { + ret := _m.Called() + + var r0 error + if rf, ok := ret.Get(0).(func() error); ok { + r0 = rf() + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// Fetcher_Close_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Close' +type Fetcher_Close_Call struct { + *mock.Call +} + +// Close is a helper method to define mock.On call +func (_e *Fetcher_Expecter) Close() *Fetcher_Close_Call { + return &Fetcher_Close_Call{Call: _e.mock.On("Close")} +} + +func (_c *Fetcher_Close_Call) Run(run func()) *Fetcher_Close_Call { + _c.Call.Run(func(args mock.Arguments) { + run() + }) + return _c +} + +func (_c *Fetcher_Close_Call) Return(_a0 error) *Fetcher_Close_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *Fetcher_Close_Call) RunAndReturn(run func() error) *Fetcher_Close_Call { + _c.Call.Return(run) + return _c +} + +// FetchNext provides a mock function with given fields: ctx +func (_m *Fetcher) FetchNext(ctx context.Context) (fetcher.EncodedDocument, fetcher.ExecInfo, error) { + ret := _m.Called(ctx) + + var r0 fetcher.EncodedDocument + var r1 fetcher.ExecInfo + var r2 error + if rf, ok := ret.Get(0).(func(context.Context) (fetcher.EncodedDocument, fetcher.ExecInfo, error)); ok { + return rf(ctx) + } + if rf, ok := ret.Get(0).(func(context.Context) fetcher.EncodedDocument); ok { + r0 = rf(ctx) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(fetcher.EncodedDocument) + } + } + + if rf, ok := ret.Get(1).(func(context.Context) fetcher.ExecInfo); ok { + r1 = rf(ctx) + } else { + r1 = ret.Get(1).(fetcher.ExecInfo) + } + + if rf, ok := ret.Get(2).(func(context.Context) error); ok { + r2 = rf(ctx) + } else { + r2 = ret.Error(2) + } + + return r0, r1, r2 +} + +// Fetcher_FetchNext_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'FetchNext' +type Fetcher_FetchNext_Call struct { + *mock.Call +} + +// FetchNext is a helper method to define mock.On call +// - ctx context.Context +func (_e *Fetcher_Expecter) FetchNext(ctx interface{}) *Fetcher_FetchNext_Call { + return &Fetcher_FetchNext_Call{Call: _e.mock.On("FetchNext", ctx)} +} + +func (_c *Fetcher_FetchNext_Call) Run(run func(ctx context.Context)) *Fetcher_FetchNext_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context)) + }) + return _c +} + +func (_c *Fetcher_FetchNext_Call) Return(_a0 fetcher.EncodedDocument, _a1 fetcher.ExecInfo, _a2 error) *Fetcher_FetchNext_Call { + _c.Call.Return(_a0, _a1, _a2) + return _c +} + +func (_c *Fetcher_FetchNext_Call) RunAndReturn(run func(context.Context) (fetcher.EncodedDocument, fetcher.ExecInfo, error)) *Fetcher_FetchNext_Call { + _c.Call.Return(run) + return _c +} + +// FetchNextDecoded provides a mock function with given fields: ctx +func (_m *Fetcher) FetchNextDecoded(ctx context.Context) (*client.Document, fetcher.ExecInfo, error) { + ret := _m.Called(ctx) + + var r0 *client.Document + var r1 fetcher.ExecInfo + var r2 error + if rf, ok := ret.Get(0).(func(context.Context) (*client.Document, fetcher.ExecInfo, error)); ok { + return rf(ctx) + } + if rf, ok := ret.Get(0).(func(context.Context) *client.Document); ok { + r0 = rf(ctx) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*client.Document) + } + } + + if rf, ok := ret.Get(1).(func(context.Context) fetcher.ExecInfo); ok { + r1 = rf(ctx) + } else { + r1 = ret.Get(1).(fetcher.ExecInfo) + } + + if rf, ok := ret.Get(2).(func(context.Context) error); ok { + r2 = rf(ctx) + } else { + r2 = ret.Error(2) + } + + return r0, r1, r2 +} + +// Fetcher_FetchNextDecoded_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'FetchNextDecoded' +type Fetcher_FetchNextDecoded_Call struct { + *mock.Call +} + +// FetchNextDecoded is a helper method to define mock.On call +// - ctx context.Context +func (_e *Fetcher_Expecter) FetchNextDecoded(ctx interface{}) *Fetcher_FetchNextDecoded_Call { + return &Fetcher_FetchNextDecoded_Call{Call: _e.mock.On("FetchNextDecoded", ctx)} +} + +func (_c *Fetcher_FetchNextDecoded_Call) Run(run func(ctx context.Context)) *Fetcher_FetchNextDecoded_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context)) + }) + return _c +} + +func (_c *Fetcher_FetchNextDecoded_Call) Return(_a0 *client.Document, _a1 fetcher.ExecInfo, _a2 error) *Fetcher_FetchNextDecoded_Call { + _c.Call.Return(_a0, _a1, _a2) + return _c +} + +func (_c *Fetcher_FetchNextDecoded_Call) RunAndReturn(run func(context.Context) (*client.Document, fetcher.ExecInfo, error)) *Fetcher_FetchNextDecoded_Call { + _c.Call.Return(run) + return _c +} + +// FetchNextDoc provides a mock function with given fields: ctx, mapping +func (_m *Fetcher) FetchNextDoc(ctx context.Context, mapping *core.DocumentMapping) ([]byte, core.Doc, fetcher.ExecInfo, error) { + ret := _m.Called(ctx, mapping) + + var r0 []byte + var r1 core.Doc + var r2 fetcher.ExecInfo + var r3 error + if rf, ok := ret.Get(0).(func(context.Context, *core.DocumentMapping) ([]byte, core.Doc, fetcher.ExecInfo, error)); ok { + return rf(ctx, mapping) + } + if rf, ok := ret.Get(0).(func(context.Context, *core.DocumentMapping) []byte); ok { + r0 = rf(ctx, mapping) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]byte) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, *core.DocumentMapping) core.Doc); ok { + r1 = rf(ctx, mapping) + } else { + r1 = ret.Get(1).(core.Doc) + } + + if rf, ok := ret.Get(2).(func(context.Context, *core.DocumentMapping) fetcher.ExecInfo); ok { + r2 = rf(ctx, mapping) + } else { + r2 = ret.Get(2).(fetcher.ExecInfo) + } + + if rf, ok := ret.Get(3).(func(context.Context, *core.DocumentMapping) error); ok { + r3 = rf(ctx, mapping) + } else { + r3 = ret.Error(3) + } + + return r0, r1, r2, r3 +} + +// Fetcher_FetchNextDoc_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'FetchNextDoc' +type Fetcher_FetchNextDoc_Call struct { + *mock.Call +} + +// FetchNextDoc is a helper method to define mock.On call +// - ctx context.Context +// - mapping *core.DocumentMapping +func (_e *Fetcher_Expecter) FetchNextDoc(ctx interface{}, mapping interface{}) *Fetcher_FetchNextDoc_Call { + return &Fetcher_FetchNextDoc_Call{Call: _e.mock.On("FetchNextDoc", ctx, mapping)} +} + +func (_c *Fetcher_FetchNextDoc_Call) Run(run func(ctx context.Context, mapping *core.DocumentMapping)) *Fetcher_FetchNextDoc_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(*core.DocumentMapping)) + }) + return _c +} + +func (_c *Fetcher_FetchNextDoc_Call) Return(_a0 []byte, _a1 core.Doc, _a2 fetcher.ExecInfo, _a3 error) *Fetcher_FetchNextDoc_Call { + _c.Call.Return(_a0, _a1, _a2, _a3) + return _c +} + +func (_c *Fetcher_FetchNextDoc_Call) RunAndReturn(run func(context.Context, *core.DocumentMapping) ([]byte, core.Doc, fetcher.ExecInfo, error)) *Fetcher_FetchNextDoc_Call { + _c.Call.Return(run) + return _c +} + +// Init provides a mock function with given fields: ctx, txn, col, fields, filter, docmapper, reverse, showDeleted +func (_m *Fetcher) Init(ctx context.Context, txn datastore.Txn, col *client.CollectionDescription, fields []client.FieldDescription, filter *mapper.Filter, docmapper *core.DocumentMapping, reverse bool, showDeleted bool) error { + ret := _m.Called(ctx, txn, col, fields, filter, docmapper, reverse, showDeleted) + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, datastore.Txn, *client.CollectionDescription, []client.FieldDescription, *mapper.Filter, *core.DocumentMapping, bool, bool) error); ok { + r0 = rf(ctx, txn, col, fields, filter, docmapper, reverse, showDeleted) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// Fetcher_Init_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Init' +type Fetcher_Init_Call struct { + *mock.Call +} + +// Init is a helper method to define mock.On call +// - ctx context.Context +// - txn datastore.Txn +// - col *client.CollectionDescription +// - fields []client.FieldDescription +// - filter *mapper.Filter +// - docmapper *core.DocumentMapping +// - reverse bool +// - showDeleted bool +func (_e *Fetcher_Expecter) Init(ctx interface{}, txn interface{}, col interface{}, fields interface{}, filter interface{}, docmapper interface{}, reverse interface{}, showDeleted interface{}) *Fetcher_Init_Call { + return &Fetcher_Init_Call{Call: _e.mock.On("Init", ctx, txn, col, fields, filter, docmapper, reverse, showDeleted)} +} + +func (_c *Fetcher_Init_Call) Run(run func(ctx context.Context, txn datastore.Txn, col *client.CollectionDescription, fields []client.FieldDescription, filter *mapper.Filter, docmapper *core.DocumentMapping, reverse bool, showDeleted bool)) *Fetcher_Init_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(datastore.Txn), args[2].(*client.CollectionDescription), args[3].([]client.FieldDescription), args[4].(*mapper.Filter), args[5].(*core.DocumentMapping), args[6].(bool), args[7].(bool)) + }) + return _c +} + +func (_c *Fetcher_Init_Call) Return(_a0 error) *Fetcher_Init_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *Fetcher_Init_Call) RunAndReturn(run func(context.Context, datastore.Txn, *client.CollectionDescription, []client.FieldDescription, *mapper.Filter, *core.DocumentMapping, bool, bool) error) *Fetcher_Init_Call { + _c.Call.Return(run) + return _c +} + +// Start provides a mock function with given fields: ctx, spans +func (_m *Fetcher) Start(ctx context.Context, spans core.Spans) error { + ret := _m.Called(ctx, spans) + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, core.Spans) error); ok { + r0 = rf(ctx, spans) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// Fetcher_Start_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Start' +type Fetcher_Start_Call struct { + *mock.Call +} + +// Start is a helper method to define mock.On call +// - ctx context.Context +// - spans core.Spans +func (_e *Fetcher_Expecter) Start(ctx interface{}, spans interface{}) *Fetcher_Start_Call { + return &Fetcher_Start_Call{Call: _e.mock.On("Start", ctx, spans)} +} + +func (_c *Fetcher_Start_Call) Run(run func(ctx context.Context, spans core.Spans)) *Fetcher_Start_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(core.Spans)) + }) + return _c +} + +func (_c *Fetcher_Start_Call) Return(_a0 error) *Fetcher_Start_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *Fetcher_Start_Call) RunAndReturn(run func(context.Context, core.Spans) error) *Fetcher_Start_Call { + _c.Call.Return(run) + return _c +} + +// NewFetcher creates a new instance of Fetcher. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewFetcher(t interface { + mock.TestingT + Cleanup(func()) +}) *Fetcher { + mock := &Fetcher{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/db/fetcher/mocks/utils.go b/db/fetcher/mocks/utils.go new file mode 100644 index 0000000000..3ffe12fce2 --- /dev/null +++ b/db/fetcher/mocks/utils.go @@ -0,0 +1,41 @@ +// Copyright 2023 Democratized Data Foundation +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package mocks + +import ( + "testing" + + "github.com/sourcenetwork/defradb/client" + "github.com/sourcenetwork/defradb/core" + + "github.com/stretchr/testify/mock" +) + +func NewStubbedFetcher(t *testing.T) *Fetcher { + f := NewFetcher(t) + f.EXPECT().Init( + mock.Anything, + mock.Anything, + mock.Anything, + mock.Anything, + mock.Anything, + mock.Anything, + mock.Anything, + mock.Anything, + ).Maybe().Return(nil) + f.EXPECT().Start(mock.Anything, mock.Anything).Maybe().Return(nil) + f.EXPECT().FetchNext(mock.Anything).Maybe().Return(nil, nil) + f.EXPECT().FetchNextDoc(mock.Anything, mock.Anything).Maybe(). + Return(NewEncodedDocument(t), core.Doc{}, nil) + f.EXPECT().FetchNextDecoded(mock.Anything).Maybe().Return(&client.Document{}, nil) + f.EXPECT().Close().Maybe().Return(nil) + return f +} diff --git a/db/fetcher/versioned.go b/db/fetcher/versioned.go index 8fd8e4245c..53ae6b8eaf 100644 --- a/db/fetcher/versioned.go +++ b/db/fetcher/versioned.go @@ -27,6 +27,7 @@ import ( "github.com/sourcenetwork/defradb/errors" "github.com/sourcenetwork/defradb/events" "github.com/sourcenetwork/defradb/merkle/crdt" + "github.com/sourcenetwork/defradb/planner/mapper" ) var ( @@ -98,22 +99,41 @@ type VersionedFetcher struct { // Init initializes the VersionedFetcher. func (vf *VersionedFetcher) Init( + ctx context.Context, + txn datastore.Txn, col *client.CollectionDescription, - fields []*client.FieldDescription, + fields []client.FieldDescription, + filter *mapper.Filter, + docmapper *core.DocumentMapping, reverse bool, showDeleted bool, ) error { vf.col = col vf.queuedCids = list.New() vf.mCRDTs = make(map[uint32]crdt.MerkleCRDT) + vf.txn = txn + + // create store + root := memory.NewDatastore(ctx) + vf.root = root + + var err error + vf.store, err = datastore.NewTxnFrom( + ctx, + vf.root, + false, + ) // were going to discard and nuke this later + if err != nil { + return err + } // run the DF init, VersionedFetchers only supports the Primary (0) index vf.DocumentFetcher = new(DocumentFetcher) - return vf.DocumentFetcher.Init(col, fields, reverse, showDeleted) + return vf.DocumentFetcher.Init(ctx, vf.store, col, fields, filter, docmapper, reverse, showDeleted) } // Start serializes the correct state according to the Key and CID. -func (vf *VersionedFetcher) Start(ctx context.Context, txn datastore.Txn, spans core.Spans) error { +func (vf *VersionedFetcher) Start(ctx context.Context, spans core.Spans) error { if vf.col == nil { return client.NewErrUninitializeProperty("VersionedFetcher", "CollectionDescription") } @@ -140,29 +160,15 @@ func (vf *VersionedFetcher) Start(ctx context.Context, txn datastore.Txn, spans return NewErrFailedToDecodeCIDForVFetcher(err) } - vf.txn = txn vf.ctx = ctx vf.key = dk vf.version = c - // create store - root := memory.NewDatastore(ctx) - vf.root = root - - vf.store, err = datastore.NewTxnFrom( - ctx, - vf.root, - false, - ) // were going to discard and nuke this later - if err != nil { - return err - } - if err := vf.seekTo(vf.version); err != nil { return NewErrFailedToSeek(c, err) } - return vf.DocumentFetcher.Start(ctx, vf.store, core.Spans{}) + return vf.DocumentFetcher.Start(ctx, core.Spans{}) } // Rootstore returns the rootstore of the VersionedFetcher. @@ -191,7 +197,7 @@ func (vf *VersionedFetcher) SeekTo(ctx context.Context, c cid.Cid) error { return err } - return vf.DocumentFetcher.Start(ctx, vf.store, core.Spans{}) + return vf.DocumentFetcher.Start(ctx, core.Spans{}) } // seekTo seeks to the given CID version by stepping through the CRDT state graph from the beginning diff --git a/db/fetcher_test.go b/db/fetcher_test.go index af6613373f..209fb7a8c3 100644 --- a/db/fetcher_test.go +++ b/db/fetcher_test.go @@ -15,9 +15,11 @@ import ( "testing" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" "github.com/sourcenetwork/defradb/client" "github.com/sourcenetwork/defradb/core" + "github.com/sourcenetwork/defradb/datastore" "github.com/sourcenetwork/defradb/db/base" "github.com/sourcenetwork/defradb/db/fetcher" ) @@ -50,10 +52,10 @@ func newTestCollectionDescription() client.CollectionDescription { } } -func newTestFetcher() (*fetcher.DocumentFetcher, error) { +func newTestFetcher(ctx context.Context, txn datastore.Txn) (*fetcher.DocumentFetcher, error) { df := new(fetcher.DocumentFetcher) desc := newTestCollectionDescription() - err := df.Init(&desc, nil, false, false) + err := df.Init(ctx, txn, &desc, desc.Schema.Fields, nil, nil, false, false) if err != nil { return nil, err } @@ -61,7 +63,7 @@ func newTestFetcher() (*fetcher.DocumentFetcher, error) { } func TestFetcherInit(t *testing.T) { - _, err := newTestFetcher() + _, err := newTestFetcher(context.Background(), nil) assert.NoError(t, err) } @@ -77,27 +79,17 @@ func TestFetcherStart(t *testing.T) { t.Error(err) return } - df, err := newTestFetcher() + df, err := newTestFetcher(ctx, txn) assert.NoError(t, err) - err = df.Start(ctx, txn, core.Spans{}) + err = df.Start(ctx, core.Spans{}) assert.NoError(t, err) } func TestFetcherStartWithoutInit(t *testing.T) { ctx := context.Background() - db, err := newMemoryDB(ctx) - if err != nil { - t.Error(err) - return - } - txn, err := db.NewTxn(ctx, true) - if err != nil { - t.Error(err) - return - } df := new(fetcher.DocumentFetcher) - err = df.Start(ctx, txn, core.Spans{}) + err := df.Start(ctx, core.Spans{}) assert.Error(t, err) } @@ -133,13 +125,13 @@ func TestFetcherGetAllPrimaryIndexEncodedDocSingle(t *testing.T) { df := new(fetcher.DocumentFetcher) desc := col.Description() - err = df.Init(&desc, nil, false, false) + err = df.Init(ctx, txn, &desc, desc.Schema.Fields, nil, nil, false, false) assert.NoError(t, err) - err = df.Start(ctx, txn, core.Spans{}) + err = df.Start(ctx, core.Spans{}) assert.NoError(t, err) - encdoc, err := df.FetchNext(ctx) + encdoc, _, err := df.FetchNext(ctx) assert.NoError(t, err) assert.NotNil(t, encdoc) } @@ -178,16 +170,16 @@ func TestFetcherGetAllPrimaryIndexEncodedDocMultiple(t *testing.T) { df := new(fetcher.DocumentFetcher) desc := col.Description() - err = df.Init(&desc, nil, false, false) + err = df.Init(ctx, txn, &desc, desc.Schema.Fields, nil, nil, false, false) assert.NoError(t, err) - err = df.Start(ctx, txn, core.Spans{}) + err = df.Start(ctx, core.Spans{}) assert.NoError(t, err) - encdoc, err := df.FetchNext(ctx) + encdoc, _, err := df.FetchNext(ctx) assert.NoError(t, err) assert.NotNil(t, encdoc) - encdoc, err = df.FetchNext(ctx) + encdoc, _, err = df.FetchNext(ctx) assert.NoError(t, err) assert.NotNil(t, encdoc) } @@ -208,23 +200,23 @@ func TestFetcherGetAllPrimaryIndexDecodedSingle(t *testing.T) { err = col.Save(ctx, doc) assert.NoError(t, err) - df := new(fetcher.DocumentFetcher) - desc := col.Description() - err = df.Init(&desc, nil, false, false) - assert.NoError(t, err) - txn, err := db.NewTxn(ctx, true) if err != nil { t.Error(err) return } - err = df.Start(ctx, txn, core.Spans{}) + df := new(fetcher.DocumentFetcher) + desc := col.Description() + err = df.Init(ctx, txn, &desc, desc.Schema.Fields, nil, nil, false, false) + assert.NoError(t, err) + + err = df.Start(ctx, core.Spans{}) assert.NoError(t, err) - ddoc, err := df.FetchNextDecoded(ctx) + ddoc, _, err := df.FetchNextDecoded(ctx) assert.NoError(t, err) - assert.NotNil(t, ddoc) + require.NotNil(t, ddoc) // value check name, err := ddoc.Get("Name") @@ -260,21 +252,21 @@ func TestFetcherGetAllPrimaryIndexDecodedMultiple(t *testing.T) { err = col.Save(ctx, doc) assert.NoError(t, err) - df := new(fetcher.DocumentFetcher) - desc := col.Description() - err = df.Init(&desc, nil, false, false) - assert.NoError(t, err) - txn, err := db.NewTxn(ctx, true) if err != nil { t.Error(err) return } - err = df.Start(ctx, txn, core.Spans{}) + df := new(fetcher.DocumentFetcher) + desc := col.Description() + err = df.Init(ctx, txn, &desc, desc.Schema.Fields, nil, nil, false, false) + assert.NoError(t, err) + + err = df.Start(ctx, core.Spans{}) assert.NoError(t, err) - ddoc, err := df.FetchNextDecoded(ctx) + ddoc, _, err := df.FetchNextDecoded(ctx) assert.NoError(t, err) assert.NotNil(t, ddoc) @@ -287,7 +279,7 @@ func TestFetcherGetAllPrimaryIndexDecodedMultiple(t *testing.T) { assert.Equal(t, "John", name) assert.Equal(t, uint64(21), age) - ddoc, err = df.FetchNextDecoded(ctx) + ddoc, _, err = df.FetchNextDecoded(ctx) assert.NoError(t, err) assert.NotNil(t, ddoc) @@ -317,9 +309,15 @@ func TestFetcherGetOnePrimaryIndexDecoded(t *testing.T) { err = col.Save(ctx, doc) assert.NoError(t, err) + txn, err := db.NewTxn(ctx, true) + if err != nil { + t.Error(err) + return + } + df := new(fetcher.DocumentFetcher) desc := col.Description() - err = df.Init(&desc, nil, false, false) + err = df.Init(ctx, txn, &desc, desc.Schema.Fields, nil, nil, false, false) assert.NoError(t, err) // create a span for our document we wish to find @@ -328,16 +326,10 @@ func TestFetcherGetOnePrimaryIndexDecoded(t *testing.T) { core.NewSpan(docKey, docKey.PrefixEnd()), ) - txn, err := db.NewTxn(ctx, true) - if err != nil { - t.Error(err) - return - } - - err = df.Start(ctx, txn, spans) + err = df.Start(ctx, spans) assert.NoError(t, err) - ddoc, err := df.FetchNextDecoded(ctx) + ddoc, _, err := df.FetchNextDecoded(ctx) assert.NoError(t, err) assert.NotNil(t, ddoc) diff --git a/db/index.go b/db/index.go new file mode 100644 index 0000000000..2c5ea2d6b2 --- /dev/null +++ b/db/index.go @@ -0,0 +1,237 @@ +// Copyright 2023 Democratized Data Foundation +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package db + +import ( + "context" + "time" + + ds "github.com/ipfs/go-datastore" + + "github.com/ipfs/go-datastore/query" + + "github.com/sourcenetwork/defradb/client" + "github.com/sourcenetwork/defradb/core" + "github.com/sourcenetwork/defradb/datastore" + "github.com/sourcenetwork/defradb/errors" +) + +// CollectionIndex is an interface for collection indexes +// It abstracts away common index functionality to be implemented +// by different index types: non-unique, unique, and composite +type CollectionIndex interface { + // Save indexes a document by storing it + Save(context.Context, datastore.Txn, *client.Document) error + // Update updates an existing document in the index + Update(context.Context, datastore.Txn, *client.Document, *client.Document) error + // RemoveAll removes all documents from the index + RemoveAll(context.Context, datastore.Txn) error + // Name returns the name of the index + Name() string + // Description returns the description of the index + Description() client.IndexDescription +} + +func canConvertIndexFieldValue[T any](val any) bool { + _, ok := val.(T) + return ok +} + +func getValidateIndexFieldFunc(kind client.FieldKind) func(any) bool { + switch kind { + case client.FieldKind_STRING: + return canConvertIndexFieldValue[string] + case client.FieldKind_INT: + return canConvertIndexFieldValue[int64] + case client.FieldKind_FLOAT: + return canConvertIndexFieldValue[float64] + case client.FieldKind_BOOL: + return canConvertIndexFieldValue[bool] + case client.FieldKind_DATETIME: + return func(val any) bool { + timeStrVal, ok := val.(string) + if !ok { + return false + } + _, err := time.Parse(time.RFC3339, timeStrVal) + return err == nil + } + default: + return nil + } +} + +func getFieldValidateFunc(kind client.FieldKind) (func(any) bool, error) { + validateFunc := getValidateIndexFieldFunc(kind) + if validateFunc == nil { + return nil, NewErrUnsupportedIndexFieldType(kind) + } + return validateFunc, nil +} + +// NewCollectionIndex creates a new collection index +func NewCollectionIndex( + collection client.Collection, + desc client.IndexDescription, +) (CollectionIndex, error) { + if len(desc.Fields) == 0 { + return nil, NewErrIndexDescHasNoFields(desc) + } + index := &collectionSimpleIndex{collection: collection, desc: desc} + schema := collection.Description().Schema + fieldID := client.FieldID(schema.GetFieldKey(desc.Fields[0].Name)) + field, foundField := collection.Description().GetFieldByID(fieldID) + if fieldID == client.FieldID(0) || !foundField { + return nil, NewErrIndexDescHasNonExistingField(desc, desc.Fields[0].Name) + } + var e error + index.fieldDesc = field + index.validateFieldFunc, e = getFieldValidateFunc(field.Kind) + return index, e +} + +// collectionSimpleIndex is an non-unique index that indexes documents by a single field. +// Single-field indexes store values only in ascending order. +type collectionSimpleIndex struct { + collection client.Collection + desc client.IndexDescription + validateFieldFunc func(any) bool + fieldDesc client.FieldDescription +} + +var _ CollectionIndex = (*collectionSimpleIndex)(nil) + +func (i *collectionSimpleIndex) getDocumentsIndexKey( + doc *client.Document, +) (core.IndexDataStoreKey, error) { + fieldValue, err := i.getDocFieldValue(doc) + if err != nil { + return core.IndexDataStoreKey{}, err + } + + indexDataStoreKey := core.IndexDataStoreKey{} + indexDataStoreKey.CollectionID = i.collection.ID() + indexDataStoreKey.IndexID = i.desc.ID + indexDataStoreKey.FieldValues = [][]byte{fieldValue, []byte(doc.Key().String())} + return indexDataStoreKey, nil +} + +func (i *collectionSimpleIndex) getDocFieldValue(doc *client.Document) ([]byte, error) { + // collectionSimpleIndex only supports single field indexes, that's why we + // can safely access the first field + indexedFieldName := i.desc.Fields[0].Name + fieldVal, err := doc.GetValue(indexedFieldName) + if err != nil { + if errors.Is(err, client.ErrFieldNotExist) { + return client.NewCBORValue(client.LWW_REGISTER, nil).Bytes() + } else { + return nil, err + } + } + writeableVal, ok := fieldVal.(client.WriteableValue) + if !ok || !i.validateFieldFunc(fieldVal.Value()) { + return nil, NewErrInvalidFieldValue(i.fieldDesc.Kind, writeableVal) + } + return writeableVal.Bytes() +} + +// Save indexes a document by storing the indexed field value. +func (i *collectionSimpleIndex) Save( + ctx context.Context, + txn datastore.Txn, + doc *client.Document, +) error { + key, err := i.getDocumentsIndexKey(doc) + if err != nil { + return err + } + err = txn.Datastore().Put(ctx, key.ToDS(), []byte{}) + if err != nil { + return NewErrFailedToStoreIndexedField(key.ToDS().String(), err) + } + return nil +} + +// Update updates indexed field values of an existing document. +// It removes the old document from the index and adds the new one. +func (i *collectionSimpleIndex) Update( + ctx context.Context, + txn datastore.Txn, + oldDoc *client.Document, + newDoc *client.Document, +) error { + key, err := i.getDocumentsIndexKey(oldDoc) + if err != nil { + return err + } + err = txn.Datastore().Delete(ctx, key.ToDS()) + if err != nil { + return err + } + return i.Save(ctx, txn, newDoc) +} + +func fetchKeysForPrefix( + ctx context.Context, + prefix string, + storage ds.Read, +) ([]ds.Key, error) { + q, err := storage.Query(ctx, query.Query{Prefix: prefix}) + if err != nil { + return nil, err + } + + keys := make([]ds.Key, 0) + for res := range q.Next() { + if res.Error != nil { + _ = q.Close() + return nil, res.Error + } + keys = append(keys, ds.NewKey(res.Key)) + } + if err = q.Close(); err != nil { + return nil, err + } + + return keys, nil +} + +// RemoveAll remove all artifacts of the index from the storage, i.e. all index +// field values for all documents. +func (i *collectionSimpleIndex) RemoveAll(ctx context.Context, txn datastore.Txn) error { + prefixKey := core.IndexDataStoreKey{} + prefixKey.CollectionID = i.collection.ID() + prefixKey.IndexID = i.desc.ID + + keys, err := fetchKeysForPrefix(ctx, prefixKey.ToString(), txn.Datastore()) + if err != nil { + return err + } + + for _, key := range keys { + err := txn.Datastore().Delete(ctx, key) + if err != nil { + return NewCanNotDeleteIndexedField(err) + } + } + + return nil +} + +// Name returns the name of the index +func (i *collectionSimpleIndex) Name() string { + return i.desc.Name +} + +// Description returns the description of the index +func (i *collectionSimpleIndex) Description() client.IndexDescription { + return i.desc +} diff --git a/db/index_test.go b/db/index_test.go new file mode 100644 index 0000000000..dce7e65bb4 --- /dev/null +++ b/db/index_test.go @@ -0,0 +1,1424 @@ +// Copyright 2023 Democratized Data Foundation +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package db + +import ( + "context" + "encoding/binary" + "encoding/json" + "fmt" + "testing" + + ds "github.com/ipfs/go-datastore" + "github.com/ipfs/go-datastore/query" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" + + "github.com/sourcenetwork/defradb/client" + "github.com/sourcenetwork/defradb/core" + "github.com/sourcenetwork/defradb/datastore" + "github.com/sourcenetwork/defradb/datastore/mocks" + "github.com/sourcenetwork/defradb/errors" + "github.com/sourcenetwork/defradb/request/graphql/schema" +) + +const ( + usersColName = "Users" + productsColName = "Products" + + usersNameFieldName = "name" + usersAgeFieldName = "age" + usersWeightFieldName = "weight" + + productsIDFieldName = "id" + productsPriceFieldName = "price" + productsCategoryFieldName = "category" + productsAvailableFieldName = "available" + + testUsersColIndexName = "user_name" + testUsersColIndexAge = "user_age" + testUsersColIndexWeight = "user_weight" + + userColVersionID = "bafkreiefzlx2xsfaxixs24hcqwwqpa3nuqbutkapasymk3d5v4fxa4rlhy" +) + +type indexTestFixture struct { + ctx context.Context + db *implicitTxnDB + txn datastore.Txn + users *collection + t *testing.T +} + +func getUsersCollectionDesc() client.CollectionDescription { + return client.CollectionDescription{ + Name: usersColName, + Schema: client.SchemaDescription{ + Fields: []client.FieldDescription{ + { + Name: "_key", + Kind: client.FieldKind_DocKey, + }, + { + Name: usersNameFieldName, + Kind: client.FieldKind_STRING, + Typ: client.LWW_REGISTER, + }, + { + Name: usersAgeFieldName, + Kind: client.FieldKind_INT, + Typ: client.LWW_REGISTER, + }, + { + Name: usersWeightFieldName, + Kind: client.FieldKind_FLOAT, + Typ: client.LWW_REGISTER, + }, + }, + }, + } +} + +func getProductsCollectionDesc() client.CollectionDescription { + return client.CollectionDescription{ + Name: productsColName, + Schema: client.SchemaDescription{ + Fields: []client.FieldDescription{ + { + Name: "_key", + Kind: client.FieldKind_DocKey, + }, + { + Name: productsIDFieldName, + Kind: client.FieldKind_INT, + Typ: client.LWW_REGISTER, + }, + { + Name: productsPriceFieldName, + Kind: client.FieldKind_FLOAT, + Typ: client.LWW_REGISTER, + }, + { + Name: productsCategoryFieldName, + Kind: client.FieldKind_STRING, + Typ: client.LWW_REGISTER, + }, + { + Name: productsAvailableFieldName, + Kind: client.FieldKind_BOOL, + Typ: client.LWW_REGISTER, + }, + }, + }, + } +} + +func newIndexTestFixtureBare(t *testing.T) *indexTestFixture { + ctx := context.Background() + db, err := newMemoryDB(ctx) + require.NoError(t, err) + txn, err := db.NewTxn(ctx, false) + require.NoError(t, err) + + return &indexTestFixture{ + ctx: ctx, + db: db, + txn: txn, + t: t, + } +} + +func newIndexTestFixture(t *testing.T) *indexTestFixture { + f := newIndexTestFixtureBare(t) + f.users = f.createCollection(getUsersCollectionDesc()) + return f +} + +func (f *indexTestFixture) createCollectionIndex( + desc client.IndexDescription, +) (client.IndexDescription, error) { + return f.createCollectionIndexFor(f.users.Name(), desc) +} + +func getUsersIndexDescOnName() client.IndexDescription { + return client.IndexDescription{ + Name: testUsersColIndexName, + Fields: []client.IndexedFieldDescription{ + {Name: usersNameFieldName, Direction: client.Ascending}, + }, + } +} + +func getUsersIndexDescOnAge() client.IndexDescription { + return client.IndexDescription{ + Name: testUsersColIndexAge, + Fields: []client.IndexedFieldDescription{ + {Name: usersAgeFieldName, Direction: client.Ascending}, + }, + } +} + +func getUsersIndexDescOnWeight() client.IndexDescription { + return client.IndexDescription{ + Name: testUsersColIndexWeight, + Fields: []client.IndexedFieldDescription{ + {Name: usersWeightFieldName, Direction: client.Ascending}, + }, + } +} + +func getProductsIndexDescOnCategory() client.IndexDescription { + return client.IndexDescription{ + Name: testUsersColIndexAge, + Fields: []client.IndexedFieldDescription{ + {Name: productsCategoryFieldName, Direction: client.Ascending}, + }, + } +} + +func (f *indexTestFixture) createUserCollectionIndexOnName() client.IndexDescription { + newDesc, err := f.createCollectionIndexFor(f.users.Name(), getUsersIndexDescOnName()) + require.NoError(f.t, err) + f.commitTxn() + return newDesc +} + +func (f *indexTestFixture) createUserCollectionIndexOnAge() client.IndexDescription { + newDesc, err := f.createCollectionIndexFor(f.users.Name(), getUsersIndexDescOnAge()) + require.NoError(f.t, err) + f.commitTxn() + return newDesc +} + +func (f *indexTestFixture) dropIndex(colName, indexName string) error { + return f.db.dropCollectionIndex(f.ctx, f.txn, colName, indexName) +} + +func (f *indexTestFixture) countIndexPrefixes(colName, indexName string) int { + prefix := core.NewCollectionIndexKey(usersColName, indexName) + q, err := f.txn.Systemstore().Query(f.ctx, query.Query{ + Prefix: prefix.ToString(), + }) + assert.NoError(f.t, err) + defer func() { + err := q.Close() + assert.NoError(f.t, err) + }() + + count := 0 + for res := range q.Next() { + if res.Error != nil { + assert.NoError(f.t, err) + } + count++ + } + return count +} + +func (f *indexTestFixture) commitTxn() { + err := f.txn.Commit(f.ctx) + require.NoError(f.t, err) + txn, err := f.db.NewTxn(f.ctx, false) + require.NoError(f.t, err) + f.txn = txn +} + +func (f *indexTestFixture) createCollectionIndexFor( + collectionName string, + desc client.IndexDescription, +) (client.IndexDescription, error) { + return f.db.createCollectionIndex(f.ctx, f.txn, collectionName, desc) +} + +func (f *indexTestFixture) getAllIndexes() (map[client.CollectionName][]client.IndexDescription, error) { + return f.db.getAllIndexes(f.ctx, f.txn) +} + +func (f *indexTestFixture) getCollectionIndexes(colName string) ([]client.IndexDescription, error) { + return f.db.fetchCollectionIndexDescriptions(f.ctx, f.txn, colName) +} + +func (f *indexTestFixture) createCollection( + desc client.CollectionDescription, +) *collection { + col, err := f.db.createCollection(f.ctx, f.txn, desc) + assert.NoError(f.t, err) + err = f.txn.Commit(f.ctx) + assert.NoError(f.t, err) + f.txn, err = f.db.NewTxn(f.ctx, false) + assert.NoError(f.t, err) + return col.(*collection) +} + +func TestCreateIndex_IfFieldsIsEmpty_ReturnError(t *testing.T) { + f := newIndexTestFixture(t) + + _, err := f.createCollectionIndex(client.IndexDescription{ + Name: "some_index_name", + }) + assert.EqualError(t, err, errIndexMissingFields) +} + +func TestCreateIndex_IfIndexDescriptionIDIsNotZero_ReturnError(t *testing.T) { + f := newIndexTestFixture(t) + + for _, id := range []uint32{1, 20, 999} { + desc := client.IndexDescription{ + Name: "some_index_name", + ID: id, + Fields: []client.IndexedFieldDescription{ + {Name: usersNameFieldName, Direction: client.Ascending}, + }, + } + _, err := f.createCollectionIndex(desc) + assert.ErrorIs(t, err, NewErrNonZeroIndexIDProvided(0)) + } +} + +func TestCreateIndex_IfValidInput_CreateIndex(t *testing.T) { + f := newIndexTestFixture(t) + + desc := client.IndexDescription{ + Name: "some_index_name", + Fields: []client.IndexedFieldDescription{ + {Name: usersNameFieldName, Direction: client.Ascending}, + }, + } + resultDesc, err := f.createCollectionIndex(desc) + assert.NoError(t, err) + assert.Equal(t, desc.Name, resultDesc.Name) + assert.Equal(t, desc.Fields, resultDesc.Fields) +} + +func TestCreateIndex_IfFieldNameIsEmpty_ReturnError(t *testing.T) { + f := newIndexTestFixture(t) + + desc := client.IndexDescription{ + Name: "some_index_name", + Fields: []client.IndexedFieldDescription{ + {Name: "", Direction: client.Ascending}, + }, + } + _, err := f.createCollectionIndex(desc) + assert.EqualError(t, err, errIndexFieldMissingName) +} + +func TestCreateIndex_IfFieldHasNoDirection_DefaultToAsc(t *testing.T) { + f := newIndexTestFixture(t) + + desc := client.IndexDescription{ + Name: "some_index_name", + Fields: []client.IndexedFieldDescription{{Name: usersNameFieldName}}, + } + newDesc, err := f.createCollectionIndex(desc) + assert.NoError(t, err) + assert.Equal(t, client.Ascending, newDesc.Fields[0].Direction) +} + +func TestCreateIndex_IfNameIsNotSpecified_Generate(t *testing.T) { + f := newIndexTestFixtureBare(t) + colDesc := getUsersCollectionDesc() + const colName = "UsErS" + const fieldName = "NaMe" + colDesc.Name = colName + colDesc.Schema.Name = colName // Which one should we use? + colDesc.Schema.Fields[1].Name = fieldName + f.users = f.createCollection(colDesc) + + desc := client.IndexDescription{ + Name: "", + Fields: []client.IndexedFieldDescription{ + {Name: fieldName, Direction: client.Ascending}, + }, + } + + newDesc, err := f.createCollectionIndex(desc) + assert.NoError(t, err) + assert.Equal(t, colName+"_"+fieldName+"_ASC", newDesc.Name) +} + +func TestCreateIndex_IfSingleFieldInDescOrder_ReturnError(t *testing.T) { + f := newIndexTestFixture(t) + + desc := client.IndexDescription{ + Fields: []client.IndexedFieldDescription{ + {Name: usersNameFieldName, Direction: client.Descending}, + }, + } + _, err := f.createCollectionIndex(desc) + assert.EqualError(t, err, errIndexSingleFieldWrongDirection) +} + +func TestCreateIndex_IfIndexWithNameAlreadyExists_ReturnError(t *testing.T) { + f := newIndexTestFixture(t) + + name := "some_index_name" + desc1 := client.IndexDescription{ + Name: name, + Fields: []client.IndexedFieldDescription{{Name: usersNameFieldName}}, + } + desc2 := client.IndexDescription{ + Name: name, + Fields: []client.IndexedFieldDescription{{Name: usersAgeFieldName}}, + } + _, err := f.createCollectionIndex(desc1) + assert.NoError(t, err) + _, err = f.createCollectionIndex(desc2) + assert.ErrorIs(t, err, NewErrIndexWithNameAlreadyExists(name)) +} + +func TestCreateIndex_IfGeneratedNameMatchesExisting_AddIncrement(t *testing.T) { + f := newIndexTestFixture(t) + + name := usersColName + "_" + usersAgeFieldName + "_ASC" + desc1 := client.IndexDescription{ + Name: name, + Fields: []client.IndexedFieldDescription{{Name: usersNameFieldName}}, + } + desc2 := client.IndexDescription{ + Name: name + "_2", + Fields: []client.IndexedFieldDescription{{Name: usersWeightFieldName}}, + } + desc3 := client.IndexDescription{ + Name: "", + Fields: []client.IndexedFieldDescription{{Name: usersAgeFieldName}}, + } + _, err := f.createCollectionIndex(desc1) + assert.NoError(t, err) + _, err = f.createCollectionIndex(desc2) + assert.NoError(t, err) + newDesc3, err := f.createCollectionIndex(desc3) + assert.NoError(t, err) + assert.Equal(t, name+"_3", newDesc3.Name) +} + +func TestCreateIndex_ShouldSaveToSystemStorage(t *testing.T) { + f := newIndexTestFixture(t) + + name := "users_age_ASC" + desc := client.IndexDescription{ + Name: name, + Fields: []client.IndexedFieldDescription{{Name: usersNameFieldName}}, + } + _, err := f.createCollectionIndex(desc) + assert.NoError(t, err) + + key := core.NewCollectionIndexKey(f.users.Name(), name) + data, err := f.txn.Systemstore().Get(f.ctx, key.ToDS()) + assert.NoError(t, err) + var deserialized client.IndexDescription + err = json.Unmarshal(data, &deserialized) + assert.NoError(t, err) + desc.ID = 1 + assert.Equal(t, desc, deserialized) +} + +func TestCreateIndex_IfStorageFails_ReturnError(t *testing.T) { + testErr := errors.New("test error") + + testCases := []struct { + Name string + ExpectedError error + GetMockSystemstore func(t *testing.T) *mocks.DSReaderWriter + AlterDescription func(desc *client.IndexDescription) + }{ + { + Name: "call Has() for custom index name", + ExpectedError: testErr, + GetMockSystemstore: func(t *testing.T) *mocks.DSReaderWriter { + store := mocks.NewDSReaderWriter(t) + store.EXPECT().Has(mock.Anything, mock.Anything).Unset() + store.EXPECT().Has(mock.Anything, mock.Anything).Return(false, testErr) + return store + }, + AlterDescription: func(desc *client.IndexDescription) {}, + }, + { + Name: "call Has() for generated index name", + ExpectedError: testErr, + GetMockSystemstore: func(t *testing.T) *mocks.DSReaderWriter { + store := mocks.NewDSReaderWriter(t) + store.EXPECT().Has(mock.Anything, mock.Anything).Unset() + store.EXPECT().Has(mock.Anything, mock.Anything).Return(false, testErr) + return store + }, + AlterDescription: func(desc *client.IndexDescription) { + desc.Name = "" + }, + }, + { + Name: "fails to store index description", + ExpectedError: NewErrInvalidStoredIndex(nil), + GetMockSystemstore: func(t *testing.T) *mocks.DSReaderWriter { + store := mocks.NewDSReaderWriter(t) + store.EXPECT().Put(mock.Anything, mock.Anything, mock.Anything).Unset() + key := core.NewCollectionIndexKey(usersColName, testUsersColIndexName) + store.EXPECT().Put(mock.Anything, key.ToDS(), mock.Anything).Return(testErr) + return store + }, + AlterDescription: func(desc *client.IndexDescription) {}, + }, + } + + for _, testCase := range testCases { + f := newIndexTestFixture(t) + + mockedTxn := f.mockTxn() + + mockedTxn.MockSystemstore = testCase.GetMockSystemstore(t) + f.stubSystemStore(mockedTxn.MockSystemstore.EXPECT()) + mockedTxn.EXPECT().Systemstore().Unset() + mockedTxn.EXPECT().Systemstore().Return(mockedTxn.MockSystemstore).Maybe() + + desc := client.IndexDescription{ + Name: testUsersColIndexName, + Fields: []client.IndexedFieldDescription{{Name: usersNameFieldName}}, + } + testCase.AlterDescription(&desc) + + _, err := f.createCollectionIndex(desc) + assert.ErrorIs(t, err, testErr, testCase.Name) + } +} + +func TestCreateIndex_IfCollectionDoesntExist_ReturnError(t *testing.T) { + f := newIndexTestFixture(t) + + desc := client.IndexDescription{ + Fields: []client.IndexedFieldDescription{{Name: productsPriceFieldName}}, + } + + _, err := f.createCollectionIndexFor(productsColName, desc) + assert.ErrorIs(t, err, NewErrCanNotReadCollection(usersColName, nil)) +} + +func TestCreateIndex_IfPropertyDoesntExist_ReturnError(t *testing.T) { + f := newIndexTestFixture(t) + + const field = "non_existing_field" + desc := client.IndexDescription{ + Fields: []client.IndexedFieldDescription{{Name: field}}, + } + + _, err := f.createCollectionIndex(desc) + assert.ErrorIs(t, err, NewErrNonExistingFieldForIndex(field)) +} + +func TestCreateIndex_WithMultipleCollectionsAndIndexes_AssignIncrementedIDPerCollection(t *testing.T) { + f := newIndexTestFixtureBare(t) + users := f.createCollection(getUsersCollectionDesc()) + products := f.createCollection(getProductsCollectionDesc()) + + makeIndex := func(fieldName string) client.IndexDescription { + return client.IndexDescription{ + Fields: []client.IndexedFieldDescription{ + {Name: fieldName, Direction: client.Ascending}, + }, + } + } + + createIndexAndAssert := func(col client.Collection, fieldName string, expectedID uint32) { + desc, err := f.createCollectionIndexFor(col.Name(), makeIndex(fieldName)) + require.NoError(t, err) + assert.Equal(t, expectedID, desc.ID) + seqKey := core.NewSequenceKey(fmt.Sprintf("%s/%d", core.COLLECTION_INDEX, col.ID())) + storedSeqKey, err := f.txn.Systemstore().Get(f.ctx, seqKey.ToDS()) + assert.NoError(t, err) + storedSeqVal := binary.BigEndian.Uint64(storedSeqKey) + assert.Equal(t, expectedID, uint32(storedSeqVal)) + } + + createIndexAndAssert(users, usersNameFieldName, 1) + createIndexAndAssert(users, usersAgeFieldName, 2) + createIndexAndAssert(products, productsIDFieldName, 1) + createIndexAndAssert(products, productsCategoryFieldName, 2) +} + +func TestCreateIndex_IfFailsToCreateTxn_ReturnError(t *testing.T) { + f := newIndexTestFixture(t) + + testErr := errors.New("test error") + + mockedRootStore := mocks.NewRootStore(t) + mockedRootStore.EXPECT().NewTransaction(mock.Anything, mock.Anything).Return(nil, testErr) + f.db.rootstore = mockedRootStore + + _, err := f.users.CreateIndex(f.ctx, getUsersIndexDescOnName()) + require.ErrorIs(t, err, testErr) +} + +func TestCreateIndex_IfProvideInvalidIndexName_ReturnError(t *testing.T) { + f := newIndexTestFixture(t) + + indexDesc := getUsersIndexDescOnName() + indexDesc.Name = "!" + _, err := f.users.CreateIndex(f.ctx, indexDesc) + require.ErrorIs(t, err, schema.NewErrIndexWithInvalidName(indexDesc.Name)) +} + +func TestCreateIndex_ShouldUpdateCollectionsDescription(t *testing.T) { + f := newIndexTestFixture(t) + + indOnName, err := f.users.CreateIndex(f.ctx, getUsersIndexDescOnName()) + require.NoError(t, err) + + assert.ElementsMatch(t, []client.IndexDescription{indOnName}, f.users.Description().Indexes) + + indOnAge, err := f.users.CreateIndex(f.ctx, getUsersIndexDescOnAge()) + require.NoError(t, err) + + assert.ElementsMatch(t, []client.IndexDescription{indOnName, indOnAge}, + f.users.Description().Indexes) +} + +func TestCreateIndex_NewCollectionDescription_ShouldIncludeIndexDescription(t *testing.T) { + f := newIndexTestFixture(t) + + _, err := f.createCollectionIndex(getUsersIndexDescOnName()) + require.NoError(t, err) + + desc := getUsersIndexDescOnAge() + desc.Name = "" + _, err = f.createCollectionIndex(desc) + require.NoError(t, err) + + cols, err := f.db.getAllCollections(f.ctx, f.txn) + require.NoError(t, err) + + require.Equal(t, 1, len(cols)) + col := cols[0] + require.Equal(t, 2, len(col.Description().Indexes)) + require.NotEmpty(t, col.Description().Indexes[0].Name) + require.NotEmpty(t, col.Description().Indexes[1].Name) +} + +func TestCreateIndex_IfAttemptToIndexOnUnsupportedType_ReturnError(t *testing.T) { + f := newIndexTestFixtureBare(t) + + const unsupportedKind = client.FieldKind_BOOL_ARRAY + + desc := client.CollectionDescription{ + Name: "testTypeCol", + Schema: client.SchemaDescription{ + Fields: []client.FieldDescription{ + { + Name: "_key", + Kind: client.FieldKind_DocKey, + }, + { + Name: "field", + Kind: unsupportedKind, + Typ: client.LWW_REGISTER, + }, + }, + }, + } + + collection := f.createCollection(desc) + + indexDesc := client.IndexDescription{ + Fields: []client.IndexedFieldDescription{ + {Name: "field", Direction: client.Ascending}, + }, + } + + _, err := f.createCollectionIndexFor(collection.Name(), indexDesc) + require.ErrorIs(f.t, err, NewErrUnsupportedIndexFieldType(unsupportedKind)) + f.commitTxn() +} + +func TestCreateIndex_IfFailedToReadIndexUponRetrievingCollectionDesc_ReturnError(t *testing.T) { + f := newIndexTestFixture(t) + + testErr := errors.New("test error") + + mockedTxn := f.mockTxn().ClearSystemStore() + onSystemStore := mockedTxn.MockSystemstore.EXPECT() + + colIndexKey := core.NewCollectionIndexKey(f.users.Description().Name, "") + matchPrefixFunc := func(q query.Query) bool { + res := q.Prefix == colIndexKey.ToDS().String() + return res + } + + onSystemStore.Query(mock.Anything, mock.MatchedBy(matchPrefixFunc)).Return(nil, testErr) + + descData, err := json.Marshal(getUsersCollectionDesc()) + require.NoError(t, err) + + onSystemStore.Query(mock.Anything, mock.Anything). + Return(mocks.NewQueryResultsWithValues(t, []byte("schemaID")), nil) + onSystemStore.Get(mock.Anything, mock.Anything).Unset() + onSystemStore.Get(mock.Anything, mock.Anything).Return(descData, nil) + + f.stubSystemStore(onSystemStore) + + _, err = f.db.getAllCollections(f.ctx, f.txn) + require.ErrorIs(t, err, testErr) +} + +func TestGetIndexes_ShouldReturnListOfAllExistingIndexes(t *testing.T) { + f := newIndexTestFixture(t) + + usersIndexDesc := client.IndexDescription{ + Name: "users_name_index", + Fields: []client.IndexedFieldDescription{{Name: usersNameFieldName}}, + } + _, err := f.createCollectionIndexFor(usersColName, usersIndexDesc) + assert.NoError(t, err) + + f.createCollection(getProductsCollectionDesc()) + productsIndexDesc := client.IndexDescription{ + Name: "products_description_index", + Fields: []client.IndexedFieldDescription{{Name: productsPriceFieldName}}, + } + _, err = f.createCollectionIndexFor(productsColName, productsIndexDesc) + assert.NoError(t, err) + + indexes, err := f.getAllIndexes() + assert.NoError(t, err) + + require.Equal(t, 2, len(indexes)) + + assert.Equal(t, 1, len(indexes[usersColName])) + assert.Equal(t, usersIndexDesc.Name, indexes[usersColName][0].Name) + assert.Equal(t, 1, len(indexes[productsColName])) + assert.Equal(t, productsIndexDesc.Name, indexes[productsColName][0].Name) +} + +func TestGetIndexes_IfInvalidIndexIsStored_ReturnError(t *testing.T) { + f := newIndexTestFixture(t) + + indexKey := core.NewCollectionIndexKey(usersColName, "users_name_index") + err := f.txn.Systemstore().Put(f.ctx, indexKey.ToDS(), []byte("invalid")) + assert.NoError(t, err) + + _, err = f.getAllIndexes() + assert.ErrorIs(t, err, NewErrInvalidStoredIndex(nil)) +} + +func TestGetIndexes_IfInvalidIndexKeyIsStored_ReturnError(t *testing.T) { + f := newIndexTestFixture(t) + + indexKey := core.NewCollectionIndexKey(usersColName, "users_name_index") + key := ds.NewKey(indexKey.ToString() + "/invalid") + desc := client.IndexDescription{ + Name: "some_index_name", + Fields: []client.IndexedFieldDescription{ + {Name: usersNameFieldName, Direction: client.Ascending}, + }, + } + descData, _ := json.Marshal(desc) + err := f.txn.Systemstore().Put(f.ctx, key, descData) + assert.NoError(t, err) + + _, err = f.getAllIndexes() + assert.ErrorIs(t, err, NewErrInvalidStoredIndexKey(key.String())) +} + +func TestGetIndexes_IfSystemStoreFails_ReturnError(t *testing.T) { + f := newIndexTestFixture(t) + + mockedTxn := f.mockTxn() + + mockedTxn.MockSystemstore.EXPECT().Query(mock.Anything, mock.Anything).Unset() + mockedTxn.MockSystemstore.EXPECT().Query(mock.Anything, mock.Anything). + Return(nil, errors.New("test error")) + + _, err := f.getAllIndexes() + assert.ErrorIs(t, err, NewErrFailedToCreateCollectionQuery(nil)) +} + +func TestGetIndexes_IfSystemStoreFails_ShouldCloseIterator(t *testing.T) { + f := newIndexTestFixture(t) + + mockedTxn := f.mockTxn() + mockedTxn.MockSystemstore.EXPECT().Query(mock.Anything, mock.Anything).Unset() + q := mocks.NewQueryResultsWithValues(t) + q.EXPECT().Close().Return(nil) + mockedTxn.MockSystemstore.EXPECT().Query(mock.Anything, mock.Anything).Return(q, nil) + + _, _ = f.getAllIndexes() +} + +func TestGetIndexes_IfSystemStoreQueryIteratorFails_ReturnError(t *testing.T) { + f := newIndexTestFixture(t) + + testErr := errors.New("test error") + + mockedTxn := f.mockTxn() + + mockedTxn.MockSystemstore.EXPECT().Query(mock.Anything, mock.Anything).Unset() + q := mocks.NewQueryResultsWithResults(t, query.Result{Error: testErr}) + q.EXPECT().Close().Unset() + q.EXPECT().Close().Return(nil) + mockedTxn.MockSystemstore.EXPECT().Query(mock.Anything, mock.Anything).Return(q, nil) + + _, err := f.getAllIndexes() + assert.ErrorIs(t, err, testErr) +} + +func TestGetIndexes_IfSystemStoreHasInvalidData_ReturnError(t *testing.T) { + f := newIndexTestFixture(t) + + mockedTxn := f.mockTxn() + + mockedTxn.MockSystemstore.EXPECT().Query(mock.Anything, mock.Anything).Unset() + q := mocks.NewQueryResultsWithValues(t, []byte("invalid")) + q.EXPECT().Close().Unset() + q.EXPECT().Close().Return(nil) + mockedTxn.MockSystemstore.EXPECT().Query(mock.Anything, mock.Anything).Return(q, nil) + + _, err := f.getAllIndexes() + assert.ErrorIs(t, err, NewErrInvalidStoredIndex(nil)) +} + +func TestGetIndexes_IfFailsToReadSeqNumber_ReturnError(t *testing.T) { + testErr := errors.New("test error") + + testCases := []struct { + Name string + StubSystemStore func(*mocks.DSReaderWriter_Expecter, core.Key) + }{ + { + Name: "Read Sequence Number", + StubSystemStore: func(onSystemStore *mocks.DSReaderWriter_Expecter, seqKey core.Key) { + onSystemStore.Get(mock.Anything, seqKey.ToDS()).Return(nil, testErr) + }, + }, + { + Name: "Increment Sequence Number", + StubSystemStore: func(onSystemStore *mocks.DSReaderWriter_Expecter, seqKey core.Key) { + onSystemStore.Put(mock.Anything, seqKey.ToDS(), mock.Anything).Return(testErr) + }, + }, + } + + for _, tc := range testCases { + f := newIndexTestFixture(t) + + mockedTxn := f.mockTxn() + onSystemStore := mockedTxn.MockSystemstore.EXPECT() + f.resetSystemStoreStubs(onSystemStore) + + seqKey := core.NewSequenceKey(fmt.Sprintf("%s/%d", core.COLLECTION_INDEX, f.users.ID())) + tc.StubSystemStore(onSystemStore, seqKey) + f.stubSystemStore(onSystemStore) + + _, err := f.createCollectionIndexFor(f.users.Name(), getUsersIndexDescOnName()) + assert.ErrorIs(t, err, testErr) + } +} + +func TestGetCollectionIndexes_ShouldReturnListOfCollectionIndexes(t *testing.T) { + f := newIndexTestFixture(t) + + usersIndexDesc := client.IndexDescription{ + Name: "users_name_index", + Fields: []client.IndexedFieldDescription{{Name: usersNameFieldName}}, + } + _, err := f.createCollectionIndexFor(usersColName, usersIndexDesc) + assert.NoError(t, err) + + f.createCollection(getProductsCollectionDesc()) + productsIndexDesc := client.IndexDescription{ + Name: "products_description_index", + Fields: []client.IndexedFieldDescription{{Name: productsPriceFieldName}}, + } + _, err = f.createCollectionIndexFor(productsColName, productsIndexDesc) + assert.NoError(t, err) + + userIndexes, err := f.getCollectionIndexes(usersColName) + assert.NoError(t, err) + require.Equal(t, 1, len(userIndexes)) + usersIndexDesc.ID = 1 + assert.Equal(t, usersIndexDesc, userIndexes[0]) + + productIndexes, err := f.getCollectionIndexes(productsColName) + assert.NoError(t, err) + require.Equal(t, 1, len(productIndexes)) + productsIndexDesc.ID = 1 + assert.Equal(t, productsIndexDesc, productIndexes[0]) +} + +func TestGetCollectionIndexes_IfSystemStoreFails_ReturnError(t *testing.T) { + f := newIndexTestFixture(t) + + mockedTxn := f.mockTxn() + mockedTxn.MockSystemstore = mocks.NewDSReaderWriter(t) + mockedTxn.MockSystemstore.EXPECT().Query(mock.Anything, mock.Anything). + Return(nil, errors.New("test error")) + mockedTxn.EXPECT().Systemstore().Unset() + mockedTxn.EXPECT().Systemstore().Return(mockedTxn.MockSystemstore) + + _, err := f.getCollectionIndexes(usersColName) + assert.ErrorIs(t, err, NewErrFailedToCreateCollectionQuery(nil)) +} + +func TestGetCollectionIndexes_IfSystemStoreFails_ShouldCloseIterator(t *testing.T) { + f := newIndexTestFixture(t) + + mockedTxn := f.mockTxn() + mockedTxn.MockSystemstore = mocks.NewDSReaderWriter(t) + query := mocks.NewQueryResultsWithValues(t) + query.EXPECT().Close().Return(nil) + mockedTxn.MockSystemstore.EXPECT().Query(mock.Anything, mock.Anything).Return(query, nil) + mockedTxn.EXPECT().Systemstore().Unset() + mockedTxn.EXPECT().Systemstore().Return(mockedTxn.MockSystemstore) + + _, _ = f.getCollectionIndexes(usersColName) +} + +func TestGetCollectionIndexes_IfSystemStoreQueryIteratorFails_ReturnError(t *testing.T) { + f := newIndexTestFixture(t) + + testErr := errors.New("test error") + + mockedTxn := f.mockTxn() + mockedTxn.MockSystemstore = mocks.NewDSReaderWriter(t) + mockedTxn.MockSystemstore.EXPECT().Query(mock.Anything, mock.Anything). + Return(mocks.NewQueryResultsWithResults(t, query.Result{Error: testErr}), nil) + mockedTxn.EXPECT().Systemstore().Unset() + mockedTxn.EXPECT().Systemstore().Return(mockedTxn.MockSystemstore) + + _, err := f.getCollectionIndexes(usersColName) + assert.ErrorIs(t, err, testErr) +} + +func TestGetCollectionIndexes_IfInvalidIndexIsStored_ReturnError(t *testing.T) { + f := newIndexTestFixture(t) + + indexKey := core.NewCollectionIndexKey(usersColName, "users_name_index") + err := f.txn.Systemstore().Put(f.ctx, indexKey.ToDS(), []byte("invalid")) + assert.NoError(t, err) + + _, err = f.getCollectionIndexes(usersColName) + assert.ErrorIs(t, err, NewErrInvalidStoredIndex(nil)) +} + +func TestCollectionGetIndexes_ShouldReturnIndexes(t *testing.T) { + f := newIndexTestFixture(t) + + f.createUserCollectionIndexOnName() + + indexes, err := f.users.GetIndexes(f.ctx) + assert.NoError(t, err) + + require.Equal(t, 1, len(indexes)) + assert.Equal(t, testUsersColIndexName, indexes[0].Name) +} + +func TestCollectionGetIndexes_ShouldCloseQueryIterator(t *testing.T) { + f := newIndexTestFixture(t) + + f.createUserCollectionIndexOnName() + + mockedTxn := f.mockTxn() + + mockedTxn.MockSystemstore = mocks.NewDSReaderWriter(f.t) + mockedTxn.EXPECT().Systemstore().Unset() + mockedTxn.EXPECT().Systemstore().Return(mockedTxn.MockSystemstore).Maybe() + queryResults := mocks.NewQueryResultsWithValues(f.t) + queryResults.EXPECT().Close().Unset() + queryResults.EXPECT().Close().Return(nil) + mockedTxn.MockSystemstore.EXPECT().Query(mock.Anything, mock.Anything). + Return(queryResults, nil) + + _, err := f.users.WithTxn(mockedTxn).GetIndexes(f.ctx) + assert.NoError(t, err) +} + +func TestCollectionGetIndexes_IfSystemStoreFails_ReturnError(t *testing.T) { + testErr := errors.New("test error") + + testCases := []struct { + Name string + ExpectedError error + GetMockSystemstore func(t *testing.T) *mocks.DSReaderWriter + }{ + { + Name: "Query fails", + ExpectedError: testErr, + GetMockSystemstore: func(t *testing.T) *mocks.DSReaderWriter { + store := mocks.NewDSReaderWriter(t) + store.EXPECT().Query(mock.Anything, mock.Anything).Unset() + store.EXPECT().Query(mock.Anything, mock.Anything).Return(nil, testErr) + return store + }, + }, + { + Name: "Query iterator fails", + ExpectedError: testErr, + GetMockSystemstore: func(t *testing.T) *mocks.DSReaderWriter { + store := mocks.NewDSReaderWriter(t) + store.EXPECT().Query(mock.Anything, mock.Anything). + Return(mocks.NewQueryResultsWithResults(t, query.Result{Error: testErr}), nil) + return store + }, + }, + { + Name: "Query iterator returns invalid value", + ExpectedError: NewErrInvalidStoredIndex(nil), + GetMockSystemstore: func(t *testing.T) *mocks.DSReaderWriter { + store := mocks.NewDSReaderWriter(t) + store.EXPECT().Query(mock.Anything, mock.Anything). + Return(mocks.NewQueryResultsWithValues(t, []byte("invalid")), nil) + return store + }, + }, + } + + for _, testCase := range testCases { + f := newIndexTestFixture(t) + + f.createUserCollectionIndexOnName() + + mockedTxn := f.mockTxn() + + mockedTxn.MockSystemstore = testCase.GetMockSystemstore(t) + mockedTxn.EXPECT().Systemstore().Unset() + mockedTxn.EXPECT().Systemstore().Return(mockedTxn.MockSystemstore).Maybe() + + _, err := f.users.WithTxn(mockedTxn).GetIndexes(f.ctx) + require.ErrorIs(t, err, testCase.ExpectedError) + } +} + +func TestCollectionGetIndexes_IfFailsToCreateTxn_ShouldNotCache(t *testing.T) { + f := newIndexTestFixture(t) + + f.createUserCollectionIndexOnName() + + testErr := errors.New("test error") + + workingRootStore := f.db.rootstore + mockedRootStore := mocks.NewRootStore(t) + f.db.rootstore = mockedRootStore + mockedRootStore.EXPECT().NewTransaction(mock.Anything, mock.Anything).Return(nil, testErr) + + _, err := f.users.GetIndexes(f.ctx) + require.ErrorIs(t, err, testErr) + + f.db.rootstore = workingRootStore + + indexes, err := f.users.GetIndexes(f.ctx) + require.NoError(t, err) + + require.Equal(t, 1, len(indexes)) + assert.Equal(t, testUsersColIndexName, indexes[0].Name) +} + +func TestCollectionGetIndexes_IfStoredIndexWithUnsupportedType_ReturnError(t *testing.T) { + f := newIndexTestFixtureBare(t) + + const unsupportedKind = client.FieldKind_BOOL_ARRAY + + desc := client.CollectionDescription{ + Name: "testTypeCol", + Schema: client.SchemaDescription{ + Fields: []client.FieldDescription{ + { + Name: "_key", + Kind: client.FieldKind_DocKey, + }, + { + Name: "field", + Kind: unsupportedKind, + Typ: client.LWW_REGISTER, + }, + }, + }, + } + + collection := f.createCollection(desc) + + indexDesc := client.IndexDescription{ + Fields: []client.IndexedFieldDescription{ + {Name: "field", Direction: client.Ascending}, + }, + } + indexDescData, err := json.Marshal(indexDesc) + require.NoError(t, err) + + mockedTxn := f.mockTxn() + mockedTxn.MockSystemstore.EXPECT().Query(mock.Anything, mock.Anything).Unset() + mockedTxn.MockSystemstore.EXPECT().Query(mock.Anything, mock.Anything). + Return(mocks.NewQueryResultsWithValues(t, indexDescData), nil) + + _, err = collection.WithTxn(mockedTxn).GetIndexes(f.ctx) + require.ErrorIs(t, err, NewErrUnsupportedIndexFieldType(unsupportedKind)) +} + +func TestCollectionGetIndexes_IfInvalidIndexIsStored_ReturnError(t *testing.T) { + f := newIndexTestFixture(t) + + f.createUserCollectionIndexOnName() + f.createUserCollectionIndexOnAge() + + indexes, err := f.users.GetIndexes(f.ctx) + assert.NoError(t, err) + require.Len(t, indexes, 2) + require.ElementsMatch(t, + []string{testUsersColIndexName, testUsersColIndexAge}, + []string{indexes[0].Name, indexes[1].Name}, + ) + require.ElementsMatch(t, []uint32{1, 2}, []uint32{indexes[0].ID, indexes[1].ID}) +} + +func TestCollectionGetIndexes_IfIndexIsCreated_ReturnUpdateIndexes(t *testing.T) { + f := newIndexTestFixture(t) + + f.createUserCollectionIndexOnName() + + indexes, err := f.users.GetIndexes(f.ctx) + assert.NoError(t, err) + assert.Len(t, indexes, 1) + + _, err = f.users.CreateIndex(f.ctx, getUsersIndexDescOnAge()) + assert.NoError(t, err) + + indexes, err = f.users.GetIndexes(f.ctx) + assert.NoError(t, err) + assert.Len(t, indexes, 2) +} + +func TestCollectionGetIndexes_IfIndexIsDropped_ReturnUpdateIndexes(t *testing.T) { + f := newIndexTestFixture(t) + + f.createUserCollectionIndexOnName() + f.createUserCollectionIndexOnAge() + + indexes, err := f.users.GetIndexes(f.ctx) + assert.NoError(t, err) + assert.Len(t, indexes, 2) + + err = f.users.DropIndex(f.ctx, testUsersColIndexName) + assert.NoError(t, err) + + indexes, err = f.users.GetIndexes(f.ctx) + assert.NoError(t, err) + assert.Len(t, indexes, 1) + assert.Equal(t, indexes[0].Name, testUsersColIndexAge) + + err = f.users.DropIndex(f.ctx, testUsersColIndexAge) + assert.NoError(t, err) + + indexes, err = f.users.GetIndexes(f.ctx) + assert.NoError(t, err) + assert.Len(t, indexes, 0) +} + +func TestCollectionGetIndexes_ShouldReturnIndexesInOrderedByName(t *testing.T) { + f := newIndexTestFixtureBare(t) + colDesc := client.CollectionDescription{ + Name: "testCollection", + Schema: client.SchemaDescription{ + Fields: []client.FieldDescription{ + { + Name: "_key", + Kind: client.FieldKind_DocKey, + }, + }, + }, + } + const ( + num = 30 + fieldNamePrefix = "field_" + indexNamePrefix = "index_" + ) + + toSuffix := func(i int) string { + return fmt.Sprintf("%02d", i) + } + + for i := 1; i <= num; i++ { + colDesc.Schema.Fields = append(colDesc.Schema.Fields, + client.FieldDescription{ + Name: fieldNamePrefix + toSuffix(i), + Kind: client.FieldKind_STRING, + Typ: client.LWW_REGISTER, + }) + } + + collection := f.createCollection(colDesc) + + for i := 1; i <= num; i++ { + iStr := toSuffix(i) + indexDesc := client.IndexDescription{ + Name: indexNamePrefix + iStr, + Fields: []client.IndexedFieldDescription{ + {Name: fieldNamePrefix + iStr, Direction: client.Ascending}, + }, + } + + _, err := f.createCollectionIndexFor(collection.Name(), indexDesc) + require.NoError(t, err) + } + f.commitTxn() + + indexes, err := collection.GetIndexes(f.ctx) + require.NoError(t, err) + require.Len(t, indexes, num) + + for i := 1; i <= num; i++ { + assert.Equal(t, indexNamePrefix+toSuffix(i), indexes[i-1].Name, "i = %d", i) + } +} + +func TestDropIndex_ShouldDeleteIndex(t *testing.T) { + f := newIndexTestFixture(t) + desc := f.createUserCollectionIndexOnName() + + err := f.dropIndex(usersColName, desc.Name) + assert.NoError(t, err) + + indexKey := core.NewCollectionIndexKey(usersColName, desc.Name) + _, err = f.txn.Systemstore().Get(f.ctx, indexKey.ToDS()) + assert.Error(t, err) +} + +func TestDropIndex_IfStorageFails_ReturnError(t *testing.T) { + f := newIndexTestFixture(t) + desc := f.createUserCollectionIndexOnName() + + f.db.Close(f.ctx) + + err := f.dropIndex(productsColName, desc.Name) + assert.Error(t, err) +} + +func TestDropIndex_IfCollectionDoesntExist_ReturnError(t *testing.T) { + f := newIndexTestFixture(t) + + err := f.dropIndex(productsColName, "any_name") + assert.ErrorIs(t, err, NewErrCanNotReadCollection(usersColName, nil)) +} + +func TestDropIndex_IfFailsToQuerySystemStorage_ReturnError(t *testing.T) { + f := newIndexTestFixture(t) + desc := f.createUserCollectionIndexOnName() + + testErr := errors.New("test error") + + mockTxn := f.mockTxn().ClearSystemStore() + systemStoreOn := mockTxn.MockSystemstore.EXPECT() + systemStoreOn.Query(mock.Anything, mock.Anything).Return(nil, testErr) + f.stubSystemStore(systemStoreOn) + + err := f.dropIndex(usersColName, desc.Name) + require.ErrorIs(t, err, testErr) +} + +func TestDropIndex_IfFailsToCreateTxn_ReturnError(t *testing.T) { + f := newIndexTestFixture(t) + + f.createUserCollectionIndexOnName() + + testErr := errors.New("test error") + + mockedRootStore := mocks.NewRootStore(t) + mockedRootStore.EXPECT().NewTransaction(mock.Anything, mock.Anything).Return(nil, testErr) + f.db.rootstore = mockedRootStore + + err := f.users.DropIndex(f.ctx, testUsersColIndexName) + require.ErrorIs(t, err, testErr) +} + +func TestDropIndex_IfFailsToDeleteFromStorage_ReturnError(t *testing.T) { + f := newIndexTestFixture(t) + + f.createUserCollectionIndexOnName() + + testErr := errors.New("test error") + + mockedTxn := f.mockTxn().ClearSystemStore() + systemStoreOn := mockedTxn.MockSystemstore.EXPECT() + systemStoreOn.Delete(mock.Anything, mock.Anything).Return(testErr) + f.stubSystemStore(systemStoreOn) + mockedTxn.MockDatastore.EXPECT().Query(mock.Anything, mock.Anything).Maybe(). + Return(mocks.NewQueryResultsWithValues(t), nil) + + err := f.users.WithTxn(mockedTxn).DropIndex(f.ctx, testUsersColIndexName) + require.ErrorIs(t, err, testErr) +} + +func TestDropIndex_ShouldUpdateCollectionsDescription(t *testing.T) { + f := newIndexTestFixture(t) + col := f.users.WithTxn(f.txn) + _, err := col.CreateIndex(f.ctx, getUsersIndexDescOnName()) + require.NoError(t, err) + indOnAge, err := col.CreateIndex(f.ctx, getUsersIndexDescOnAge()) + require.NoError(t, err) + f.commitTxn() + + err = f.users.DropIndex(f.ctx, testUsersColIndexName) + require.NoError(t, err) + + assert.ElementsMatch(t, []client.IndexDescription{indOnAge}, + f.users.Description().Indexes) + + err = f.users.DropIndex(f.ctx, testUsersColIndexAge) + require.NoError(t, err) + + assert.ElementsMatch(t, []client.IndexDescription{}, f.users.Description().Indexes) +} + +func TestDropIndex_IfIndexWithNameDoesNotExist_ReturnError(t *testing.T) { + f := newIndexTestFixture(t) + + const name = "not_existing_index" + err := f.users.DropIndex(f.ctx, name) + require.ErrorIs(t, err, NewErrIndexWithNameDoesNotExists(name)) +} + +func TestDropIndex_IfSystemStoreFails_ReturnError(t *testing.T) { + testErr := errors.New("test error") + + f := newIndexTestFixture(t) + + f.createUserCollectionIndexOnName() + + mockedTxn := f.mockTxn() + + mockedTxn.MockSystemstore = mocks.NewDSReaderWriter(t) + mockedTxn.MockSystemstore.EXPECT().Query(mock.Anything, mock.Anything).Unset() + mockedTxn.MockSystemstore.EXPECT().Query(mock.Anything, mock.Anything).Return(nil, testErr) + mockedTxn.EXPECT().Systemstore().Unset() + mockedTxn.EXPECT().Systemstore().Return(mockedTxn.MockSystemstore).Maybe() + + err := f.users.WithTxn(mockedTxn).DropIndex(f.ctx, testUsersColIndexName) + require.ErrorIs(t, err, testErr) +} + +func TestDropAllIndexes_ShouldDeleteAllIndexes(t *testing.T) { + f := newIndexTestFixture(t) + _, err := f.createCollectionIndexFor(usersColName, client.IndexDescription{ + Fields: []client.IndexedFieldDescription{ + {Name: usersNameFieldName, Direction: client.Ascending}, + }, + }) + assert.NoError(f.t, err) + + _, err = f.createCollectionIndexFor(usersColName, client.IndexDescription{ + Fields: []client.IndexedFieldDescription{ + {Name: usersAgeFieldName, Direction: client.Ascending}, + }, + }) + assert.NoError(f.t, err) + + assert.Equal(t, 2, f.countIndexPrefixes(usersColName, "")) + + err = f.users.dropAllIndexes(f.ctx, f.txn) + assert.NoError(t, err) + + assert.Equal(t, 0, f.countIndexPrefixes(usersColName, "")) +} + +func TestDropAllIndexes_IfStorageFails_ReturnError(t *testing.T) { + f := newIndexTestFixture(t) + f.createUserCollectionIndexOnName() + + f.db.Close(f.ctx) + + err := f.users.dropAllIndexes(f.ctx, f.txn) + assert.Error(t, err) +} + +func TestDropAllIndexes_IfSystemStorageFails_ReturnError(t *testing.T) { + testErr := errors.New("test error") + + testCases := []struct { + Name string + ExpectedError error + GetMockSystemstore func(t *testing.T) *mocks.DSReaderWriter + }{ + { + Name: "Query fails", + ExpectedError: testErr, + GetMockSystemstore: func(t *testing.T) *mocks.DSReaderWriter { + store := mocks.NewDSReaderWriter(t) + store.EXPECT().Query(mock.Anything, mock.Anything).Unset() + store.EXPECT().Query(mock.Anything, mock.Anything).Return(nil, testErr) + return store + }, + }, + { + Name: "Query iterator fails", + ExpectedError: testErr, + GetMockSystemstore: func(t *testing.T) *mocks.DSReaderWriter { + store := mocks.NewDSReaderWriter(t) + store.EXPECT().Query(mock.Anything, mock.Anything). + Return(mocks.NewQueryResultsWithResults(t, query.Result{Error: testErr}), nil) + return store + }, + }, + { + Name: "System storage fails to delete", + ExpectedError: NewErrInvalidStoredIndex(nil), + GetMockSystemstore: func(t *testing.T) *mocks.DSReaderWriter { + store := mocks.NewDSReaderWriter(t) + store.EXPECT().Query(mock.Anything, mock.Anything). + Return(mocks.NewQueryResultsWithValues(t, []byte{}), nil) + store.EXPECT().Delete(mock.Anything, mock.Anything).Maybe().Return(testErr) + return store + }, + }, + } + + for _, testCase := range testCases { + f := newIndexTestFixture(t) + f.createUserCollectionIndexOnName() + + mockedTxn := f.mockTxn() + + mockedTxn.MockSystemstore = testCase.GetMockSystemstore(t) + mockedTxn.EXPECT().Systemstore().Unset() + mockedTxn.EXPECT().Systemstore().Return(mockedTxn.MockSystemstore).Maybe() + + err := f.users.dropAllIndexes(f.ctx, f.txn) + assert.ErrorIs(t, err, testErr, testCase.Name) + } +} + +func TestDropAllIndexes_ShouldCloseQueryIterator(t *testing.T) { + f := newIndexTestFixture(t) + f.createUserCollectionIndexOnName() + + mockedTxn := f.mockTxn() + + mockedTxn.MockSystemstore = mocks.NewDSReaderWriter(t) + q := mocks.NewQueryResultsWithValues(t, []byte{}) + q.EXPECT().Close().Unset() + q.EXPECT().Close().Return(nil) + mockedTxn.MockSystemstore.EXPECT().Query(mock.Anything, mock.Anything).Return(q, nil) + mockedTxn.MockSystemstore.EXPECT().Delete(mock.Anything, mock.Anything).Maybe().Return(nil) + mockedTxn.EXPECT().Systemstore().Unset() + mockedTxn.EXPECT().Systemstore().Return(mockedTxn.MockSystemstore).Maybe() + + _ = f.users.dropAllIndexes(f.ctx, f.txn) +} + +func TestNewCollectionIndex_IfDescriptionHasNoFields_ReturnError(t *testing.T) { + f := newIndexTestFixture(t) + desc := getUsersIndexDescOnName() + desc.Fields = nil + _, err := NewCollectionIndex(f.users, desc) + require.ErrorIs(t, err, NewErrIndexDescHasNoFields(desc)) +} + +func TestNewCollectionIndex_IfDescriptionHasNonExistingField_ReturnError(t *testing.T) { + f := newIndexTestFixture(t) + desc := getUsersIndexDescOnName() + desc.Fields[0].Name = "non_existing_field" + _, err := NewCollectionIndex(f.users, desc) + require.ErrorIs(t, err, NewErrIndexDescHasNonExistingField(desc, desc.Fields[0].Name)) +} diff --git a/db/indexed_docs_test.go b/db/indexed_docs_test.go new file mode 100644 index 0000000000..2c89d5f472 --- /dev/null +++ b/db/indexed_docs_test.go @@ -0,0 +1,1049 @@ +// Copyright 2023 Democratized Data Foundation +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package db + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "strconv" + "testing" + "time" + + ipfsDatastore "github.com/ipfs/go-datastore" + "github.com/ipfs/go-datastore/query" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" + + "github.com/sourcenetwork/defradb/client" + "github.com/sourcenetwork/defradb/core" + "github.com/sourcenetwork/defradb/datastore" + "github.com/sourcenetwork/defradb/datastore/mocks" + "github.com/sourcenetwork/defradb/db/fetcher" + fetcherMocks "github.com/sourcenetwork/defradb/db/fetcher/mocks" + "github.com/sourcenetwork/defradb/planner/mapper" +) + +type userDoc struct { + Name string `json:"name"` + Age int `json:"age"` + Weight float64 `json:"weight"` +} + +type productDoc struct { + ID int `json:"id"` + Price float64 `json:"price"` + Category string `json:"category"` + Available bool `json:"available"` +} + +func (f *indexTestFixture) saveDocToCollection(doc *client.Document, col client.Collection) { + err := col.Create(f.ctx, doc) + require.NoError(f.t, err) + f.txn, err = f.db.NewTxn(f.ctx, false) + require.NoError(f.t, err) +} + +func (f *indexTestFixture) newUserDoc(name string, age int) *client.Document { + d := userDoc{Name: name, Age: age, Weight: 154.1} + data, err := json.Marshal(d) + require.NoError(f.t, err) + + doc, err := client.NewDocFromJSON(data) + require.NoError(f.t, err) + return doc +} + +func (f *indexTestFixture) newProdDoc(id int, price float64, cat string) *client.Document { + d := productDoc{ID: id, Price: price, Category: cat} + data, err := json.Marshal(d) + require.NoError(f.t, err) + + doc, err := client.NewDocFromJSON(data) + require.NoError(f.t, err) + return doc +} + +// indexKeyBuilder is a helper for building index keys that can be turned into a string. +// The format of the non-unique index key is: "////" +// Example: "/5/1/12/bae-61cd6879-63ca-5ca9-8731-470a3c1dac69" +type indexKeyBuilder struct { + f *indexTestFixture + colName string + fieldName string + doc *client.Document + values [][]byte + isUnique bool +} + +func newIndexKeyBuilder(f *indexTestFixture) *indexKeyBuilder { + return &indexKeyBuilder{f: f} +} + +func (b *indexKeyBuilder) Col(colName string) *indexKeyBuilder { + b.colName = colName + return b +} + +// Field sets the field name for the index key. +// If the field name is not set, the index key will contain only collection id. +// When building a key it will it will find the field id to use in the key. +func (b *indexKeyBuilder) Field(fieldName string) *indexKeyBuilder { + b.fieldName = fieldName + return b +} + +// Doc sets the document for the index key. +// For non-unique index keys, it will try to find the field value in the document +// corresponding to the field name set in the builder. +// As the last value in the index key, it will use the document id. +func (b *indexKeyBuilder) Doc(doc *client.Document) *indexKeyBuilder { + b.doc = doc + return b +} + +// Values sets the values for the index key. +// It will override the field values stored in the document. +func (b *indexKeyBuilder) Values(values ...[]byte) *indexKeyBuilder { + b.values = values + return b +} + +func (b *indexKeyBuilder) Unique() *indexKeyBuilder { + b.isUnique = true + return b +} + +func (b *indexKeyBuilder) Build() core.IndexDataStoreKey { + key := core.IndexDataStoreKey{} + + if b.colName == "" { + return key + } + + cols, err := b.f.db.getAllCollections(b.f.ctx, b.f.txn) + require.NoError(b.f.t, err) + var collection client.Collection + for _, col := range cols { + if col.Name() == b.colName { + collection = col + break + } + } + if collection == nil { + panic(errors.New("collection not found")) + } + key.CollectionID = collection.ID() + + if b.fieldName == "" { + return key + } + + indexes, err := collection.GetIndexes(b.f.ctx) + require.NoError(b.f.t, err) + for _, index := range indexes { + if index.Fields[0].Name == b.fieldName { + key.IndexID = index.ID + break + } + } + + if b.doc != nil { + var fieldBytesVal []byte + var writeableVal client.WriteableValue + if len(b.values) == 0 { + fieldVal, err := b.doc.GetValue(b.fieldName) + require.NoError(b.f.t, err) + var ok bool + writeableVal, ok = fieldVal.(client.WriteableValue) + require.True(b.f.t, ok) + } else { + writeableVal = client.NewCBORValue(client.LWW_REGISTER, b.values[0]) + } + fieldBytesVal, err = writeableVal.Bytes() + require.NoError(b.f.t, err) + + key.FieldValues = [][]byte{fieldBytesVal, []byte(b.doc.Key().String())} + } else if len(b.values) > 0 { + key.FieldValues = b.values + } + + return key +} + +func (f *indexTestFixture) getPrefixFromDataStore(prefix string) [][]byte { + q := query.Query{Prefix: prefix} + res, err := f.txn.Datastore().Query(f.ctx, q) + require.NoError(f.t, err) + + var keys [][]byte + for r := range res.Next() { + keys = append(keys, r.Entry.Value) + } + return keys +} + +func (f *indexTestFixture) mockTxn() *mocks.MultiStoreTxn { + mockedTxn := mocks.NewTxnWithMultistore(f.t) + + systemStoreOn := mockedTxn.MockSystemstore.EXPECT() + f.resetSystemStoreStubs(systemStoreOn) + f.stubSystemStore(systemStoreOn) + + f.txn = mockedTxn + return mockedTxn +} + +func (*indexTestFixture) resetSystemStoreStubs(systemStoreOn *mocks.DSReaderWriter_Expecter) { + systemStoreOn.Query(mock.Anything, mock.Anything).Unset() + systemStoreOn.Get(mock.Anything, mock.Anything).Unset() + systemStoreOn.Put(mock.Anything, mock.Anything, mock.Anything).Unset() +} + +func (f *indexTestFixture) stubSystemStore(systemStoreOn *mocks.DSReaderWriter_Expecter) { + desc := getUsersIndexDescOnName() + desc.ID = 1 + indexOnNameDescData, err := json.Marshal(desc) + require.NoError(f.t, err) + + colIndexKey := core.NewCollectionIndexKey(usersColName, "") + matchPrefixFunc := func(q query.Query) bool { + return q.Prefix == colIndexKey.ToDS().String() + } + + systemStoreOn.Query(mock.Anything, mock.MatchedBy(matchPrefixFunc)). + RunAndReturn(func(context.Context, query.Query) (query.Results, error) { + return mocks.NewQueryResultsWithValues(f.t, indexOnNameDescData), nil + }).Maybe() + systemStoreOn.Query(mock.Anything, mock.MatchedBy(matchPrefixFunc)).Maybe(). + Return(mocks.NewQueryResultsWithValues(f.t, indexOnNameDescData), nil) + systemStoreOn.Query(mock.Anything, mock.Anything).Maybe(). + Return(mocks.NewQueryResultsWithValues(f.t), nil) + + colKey := core.NewCollectionKey(usersColName) + systemStoreOn.Get(mock.Anything, colKey.ToDS()).Maybe().Return([]byte(userColVersionID), nil) + + colVersionIDKey := core.NewCollectionSchemaVersionKey(userColVersionID) + colDesc := getUsersCollectionDesc() + colDesc.ID = 1 + for i := range colDesc.Schema.Fields { + colDesc.Schema.Fields[i].ID = client.FieldID(i) + } + colDescBytes, err := json.Marshal(colDesc) + require.NoError(f.t, err) + systemStoreOn.Get(mock.Anything, colVersionIDKey.ToDS()).Maybe().Return(colDescBytes, nil) + + colIndexOnNameKey := core.NewCollectionIndexKey(usersColName, testUsersColIndexName) + systemStoreOn.Get(mock.Anything, colIndexOnNameKey.ToDS()).Maybe().Return(indexOnNameDescData, nil) + + if f.users != nil { + sequenceKey := core.NewSequenceKey(fmt.Sprintf("%s/%d", core.COLLECTION_INDEX, f.users.ID())) + systemStoreOn.Get(mock.Anything, sequenceKey.ToDS()).Maybe().Return([]byte{0, 0, 0, 0, 0, 0, 0, 1}, nil) + } + + systemStoreOn.Get(mock.Anything, mock.Anything).Maybe().Return([]byte{}, nil) + + systemStoreOn.Put(mock.Anything, mock.Anything, mock.Anything).Maybe().Return(nil) + + systemStoreOn.Has(mock.Anything, mock.Anything).Maybe().Return(false, nil) + + systemStoreOn.Delete(mock.Anything, mock.Anything).Maybe().Return(nil) +} + +func TestNonUnique_IfDocIsAdded_ShouldBeIndexed(t *testing.T) { + f := newIndexTestFixture(t) + f.createUserCollectionIndexOnName() + + doc := f.newUserDoc("John", 21) + f.saveDocToCollection(doc, f.users) + + key := newIndexKeyBuilder(f).Col(usersColName).Field(usersNameFieldName).Doc(doc).Build() + + data, err := f.txn.Datastore().Get(f.ctx, key.ToDS()) + require.NoError(t, err) + assert.Len(t, data, 0) +} + +func TestNonUnique_IfFailsToStoredIndexedDoc_Error(t *testing.T) { + f := newIndexTestFixture(t) + f.createUserCollectionIndexOnName() + + doc := f.newUserDoc("John", 21) + key := newIndexKeyBuilder(f).Col(usersColName).Field(usersNameFieldName).Doc(doc).Build() + + mockTxn := f.mockTxn() + + dataStoreOn := mockTxn.MockDatastore.EXPECT() + dataStoreOn.Put(mock.Anything, mock.Anything, mock.Anything).Unset() + dataStoreOn.Put(mock.Anything, key.ToDS(), mock.Anything).Return(errors.New("error")) + dataStoreOn.Put(mock.Anything, mock.Anything, mock.Anything).Return(nil) + + err := f.users.WithTxn(mockTxn).Create(f.ctx, doc) + require.ErrorIs(f.t, err, NewErrFailedToStoreIndexedField("name", nil)) +} + +func TestNonUnique_IfDocDoesNotHaveIndexedField_SkipIndex(t *testing.T) { + f := newIndexTestFixture(t) + f.createUserCollectionIndexOnName() + + data, err := json.Marshal(struct { + Age int `json:"age"` + Weight float64 `json:"weight"` + }{Age: 21, Weight: 154.1}) + require.NoError(f.t, err) + + doc, err := client.NewDocFromJSON(data) + require.NoError(f.t, err) + + err = f.users.Create(f.ctx, doc) + require.NoError(f.t, err) + + key := newIndexKeyBuilder(f).Col(usersColName).Build() + prefixes := f.getPrefixFromDataStore(key.ToString()) + assert.Len(t, prefixes, 0) +} + +func TestNonUnique_IfSystemStorageHasInvalidIndexDescription_Error(t *testing.T) { + f := newIndexTestFixture(t) + f.createUserCollectionIndexOnName() + + doc := f.newUserDoc("John", 21) + + mockTxn := f.mockTxn().ClearSystemStore() + systemStoreOn := mockTxn.MockSystemstore.EXPECT() + systemStoreOn.Query(mock.Anything, mock.Anything). + Return(mocks.NewQueryResultsWithValues(t, []byte("invalid")), nil) + + err := f.users.WithTxn(mockTxn).Create(f.ctx, doc) + require.ErrorIs(t, err, NewErrInvalidStoredIndex(nil)) +} + +func TestNonUnique_IfSystemStorageFailsToReadIndexDesc_Error(t *testing.T) { + f := newIndexTestFixture(t) + f.createUserCollectionIndexOnName() + + doc := f.newUserDoc("John", 21) + + testErr := errors.New("test error") + + mockTxn := f.mockTxn().ClearSystemStore() + systemStoreOn := mockTxn.MockSystemstore.EXPECT() + systemStoreOn.Query(mock.Anything, mock.Anything). + Return(nil, testErr) + + err := f.users.WithTxn(mockTxn).Create(f.ctx, doc) + require.ErrorIs(t, err, testErr) +} + +func TestNonUnique_IfIndexIntField_StoreIt(t *testing.T) { + f := newIndexTestFixture(t) + f.createUserCollectionIndexOnAge() + + doc := f.newUserDoc("John", 21) + f.saveDocToCollection(doc, f.users) + + key := newIndexKeyBuilder(f).Col(usersColName).Field(usersAgeFieldName).Doc(doc).Build() + + data, err := f.txn.Datastore().Get(f.ctx, key.ToDS()) + require.NoError(t, err) + assert.Len(t, data, 0) +} + +func TestNonUnique_IfMultipleCollectionsWithIndexes_StoreIndexWithCollectionID(t *testing.T) { + f := newIndexTestFixtureBare(t) + users := f.createCollection(getUsersCollectionDesc()) + products := f.createCollection(getProductsCollectionDesc()) + + _, err := f.createCollectionIndexFor(users.Name(), getUsersIndexDescOnName()) + require.NoError(f.t, err) + _, err = f.createCollectionIndexFor(products.Name(), getProductsIndexDescOnCategory()) + require.NoError(f.t, err) + f.commitTxn() + + userDoc := f.newUserDoc("John", 21) + prodDoc := f.newProdDoc(1, 3, "games") + + err = users.Create(f.ctx, userDoc) + require.NoError(f.t, err) + err = products.Create(f.ctx, prodDoc) + require.NoError(f.t, err) + f.commitTxn() + + userDocKey := newIndexKeyBuilder(f).Col(usersColName).Field(usersNameFieldName).Doc(userDoc).Build() + prodDocKey := newIndexKeyBuilder(f).Col(productsColName).Field(productsCategoryFieldName).Doc(prodDoc).Build() + + data, err := f.txn.Datastore().Get(f.ctx, userDocKey.ToDS()) + require.NoError(t, err) + assert.Len(t, data, 0) + data, err = f.txn.Datastore().Get(f.ctx, prodDocKey.ToDS()) + require.NoError(t, err) + assert.Len(t, data, 0) +} + +func TestNonUnique_IfMultipleIndexes_StoreIndexWithIndexID(t *testing.T) { + f := newIndexTestFixture(t) + f.createUserCollectionIndexOnName() + f.createUserCollectionIndexOnAge() + + doc := f.newUserDoc("John", 21) + f.saveDocToCollection(doc, f.users) + + nameKey := newIndexKeyBuilder(f).Col(usersColName).Field(usersNameFieldName).Doc(doc).Build() + ageKey := newIndexKeyBuilder(f).Col(usersColName).Field(usersAgeFieldName).Doc(doc).Build() + + data, err := f.txn.Datastore().Get(f.ctx, nameKey.ToDS()) + require.NoError(t, err) + assert.Len(t, data, 0) + data, err = f.txn.Datastore().Get(f.ctx, ageKey.ToDS()) + require.NoError(t, err) + assert.Len(t, data, 0) +} + +func TestNonUnique_StoringIndexedFieldValueOfDifferentTypes(t *testing.T) { + f := newIndexTestFixtureBare(t) + + now := time.Now() + nowStr := now.Format(time.RFC3339) + + testCase := []struct { + Name string + FieldKind client.FieldKind + // FieldVal is the value the index will receive for serialization + FieldVal any + ShouldFail bool + }{ + {Name: "invalid int", FieldKind: client.FieldKind_INT, FieldVal: "invalid", ShouldFail: true}, + {Name: "invalid float", FieldKind: client.FieldKind_FLOAT, FieldVal: "invalid", ShouldFail: true}, + {Name: "invalid bool", FieldKind: client.FieldKind_BOOL, FieldVal: "invalid", ShouldFail: true}, + {Name: "invalid datetime", FieldKind: client.FieldKind_DATETIME, FieldVal: nowStr[1:], ShouldFail: true}, + {Name: "invalid datetime type", FieldKind: client.FieldKind_DATETIME, FieldVal: 1, ShouldFail: true}, + + {Name: "valid int", FieldKind: client.FieldKind_INT, FieldVal: 12}, + {Name: "valid float", FieldKind: client.FieldKind_FLOAT, FieldVal: 36.654}, + {Name: "valid bool true", FieldKind: client.FieldKind_BOOL, FieldVal: true}, + {Name: "valid bool false", FieldKind: client.FieldKind_BOOL, FieldVal: false}, + {Name: "valid datetime string", FieldKind: client.FieldKind_DATETIME, FieldVal: nowStr}, + {Name: "valid empty string", FieldKind: client.FieldKind_STRING, FieldVal: ""}, + } + + for i, tc := range testCase { + desc := client.CollectionDescription{ + Name: "testTypeCol" + strconv.Itoa(i), + Schema: client.SchemaDescription{ + Fields: []client.FieldDescription{ + { + Name: "_key", + Kind: client.FieldKind_DocKey, + }, + { + Name: "field", + Kind: tc.FieldKind, + Typ: client.LWW_REGISTER, + }, + }, + }, + } + + collection := f.createCollection(desc) + + indexDesc := client.IndexDescription{ + Fields: []client.IndexedFieldDescription{ + {Name: "field", Direction: client.Ascending}, + }, + } + + _, err := f.createCollectionIndexFor(collection.Name(), indexDesc) + require.NoError(f.t, err) + f.commitTxn() + + d := struct { + Field any `json:"field"` + }{Field: tc.FieldVal} + data, err := json.Marshal(d) + require.NoError(f.t, err) + doc, err := client.NewDocFromJSON(data) + require.NoError(f.t, err) + + err = collection.Create(f.ctx, doc) + f.commitTxn() + if tc.ShouldFail { + require.ErrorIs(f.t, err, + NewErrInvalidFieldValue(tc.FieldKind, tc.FieldVal), "test case: %s", tc.Name) + } else { + assertMsg := fmt.Sprintf("test case: %s", tc.Name) + require.NoError(f.t, err, assertMsg) + + keyBuilder := newIndexKeyBuilder(f).Col(collection.Name()).Field("field").Doc(doc) + key := keyBuilder.Build() + + keyStr := key.ToDS() + data, err := f.txn.Datastore().Get(f.ctx, keyStr) + require.NoError(t, err, assertMsg) + assert.Len(t, data, 0, assertMsg) + } + } +} + +func TestNonUnique_IfIndexedFieldIsNil_StoreItAsNil(t *testing.T) { + f := newIndexTestFixture(t) + f.createUserCollectionIndexOnName() + + docJSON, err := json.Marshal(struct { + Age int `json:"age"` + }{Age: 44}) + require.NoError(f.t, err) + + doc, err := client.NewDocFromJSON(docJSON) + require.NoError(f.t, err) + + f.saveDocToCollection(doc, f.users) + + key := newIndexKeyBuilder(f).Col(usersColName).Field(usersNameFieldName).Doc(doc). + Values([]byte(nil)).Build() + + data, err := f.txn.Datastore().Get(f.ctx, key.ToDS()) + require.NoError(t, err) + assert.Len(t, data, 0) +} + +func TestNonUniqueCreate_ShouldIndexExistingDocs(t *testing.T) { + f := newIndexTestFixture(t) + + doc1 := f.newUserDoc("John", 21) + f.saveDocToCollection(doc1, f.users) + doc2 := f.newUserDoc("Islam", 18) + f.saveDocToCollection(doc2, f.users) + + f.createUserCollectionIndexOnName() + + key1 := newIndexKeyBuilder(f).Col(usersColName).Field(usersNameFieldName).Doc(doc1).Build() + key2 := newIndexKeyBuilder(f).Col(usersColName).Field(usersNameFieldName).Doc(doc2).Build() + + data, err := f.txn.Datastore().Get(f.ctx, key1.ToDS()) + require.NoError(t, err) + assert.Len(t, data, 0) + data, err = f.txn.Datastore().Get(f.ctx, key2.ToDS()) + require.NoError(t, err) + assert.Len(t, data, 0) +} + +func TestNonUniqueCreate_IfUponIndexingExistingDocsFetcherFails_ReturnError(t *testing.T) { + testError := errors.New("test error") + + cases := []struct { + Name string + PrepareFetcher func() fetcher.Fetcher + }{ + { + Name: "Fails to init", + PrepareFetcher: func() fetcher.Fetcher { + f := fetcherMocks.NewStubbedFetcher(t) + f.EXPECT().Init(mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything).Unset() + f.EXPECT().Init(mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(testError) + f.EXPECT().Close().Unset() + f.EXPECT().Close().Return(nil) + return f + }, + }, + { + Name: "Fails to start", + PrepareFetcher: func() fetcher.Fetcher { + f := fetcherMocks.NewStubbedFetcher(t) + f.EXPECT().Start(mock.Anything, mock.Anything).Unset() + f.EXPECT().Start(mock.Anything, mock.Anything).Return(testError) + f.EXPECT().Close().Unset() + f.EXPECT().Close().Return(nil) + return f + }, + }, + { + Name: "Fails to fetch next decoded", + PrepareFetcher: func() fetcher.Fetcher { + f := fetcherMocks.NewStubbedFetcher(t) + f.EXPECT().FetchNextDecoded(mock.Anything).Unset() + f.EXPECT().FetchNextDecoded(mock.Anything).Return(nil, fetcher.ExecInfo{}, testError) + f.EXPECT().Close().Unset() + f.EXPECT().Close().Return(nil) + return f + }, + }, + { + Name: "Fails to close", + PrepareFetcher: func() fetcher.Fetcher { + f := fetcherMocks.NewStubbedFetcher(t) + f.EXPECT().FetchNextDecoded(mock.Anything).Unset() + f.EXPECT().FetchNextDecoded(mock.Anything).Return(nil, fetcher.ExecInfo{}, nil) + f.EXPECT().Close().Unset() + f.EXPECT().Close().Return(testError) + return f + }, + }, + } + + for _, tc := range cases { + f := newIndexTestFixture(t) + + doc := f.newUserDoc("John", 21) + f.saveDocToCollection(doc, f.users) + + f.users.fetcherFactory = tc.PrepareFetcher + key := newIndexKeyBuilder(f).Col(usersColName).Field(usersNameFieldName).Doc(doc).Build() + + _, err := f.users.CreateIndex(f.ctx, getUsersIndexDescOnName()) + require.ErrorIs(t, err, testError, tc.Name) + + _, err = f.txn.Datastore().Get(f.ctx, key.ToDS()) + require.Error(t, err, tc.Name) + } +} + +func TestNonUniqueCreate_IfDatastoreFailsToStoreIndex_ReturnError(t *testing.T) { + f := newIndexTestFixture(t) + + doc := f.newUserDoc("John", 21) + f.saveDocToCollection(doc, f.users) + + fieldKeyString := core.DataStoreKey{ + CollectionID: f.users.desc.IDString(), + }.WithDocKey(doc.Key().String()). + WithFieldId("1"). + WithValueFlag(). + ToString() + + invalidKeyString := fieldKeyString + "/doesn't matter/" + + // Insert an invalid key within the document prefix, this will generate an error within the fetcher. + f.users.db.multistore.Datastore().Put(f.ctx, ipfsDatastore.NewKey(invalidKeyString), []byte("doesn't matter")) + + _, err := f.users.CreateIndex(f.ctx, getUsersIndexDescOnName()) + require.ErrorIs(f.t, err, core.ErrInvalidKey) +} + +func TestNonUniqueDrop_ShouldDeleteStoredIndexedFields(t *testing.T) { + f := newIndexTestFixtureBare(t) + users := f.createCollection(getUsersCollectionDesc()) + _, err := f.createCollectionIndexFor(users.Name(), getUsersIndexDescOnName()) + require.NoError(f.t, err) + _, err = f.createCollectionIndexFor(users.Name(), getUsersIndexDescOnAge()) + require.NoError(f.t, err) + _, err = f.createCollectionIndexFor(users.Name(), getUsersIndexDescOnWeight()) + require.NoError(f.t, err) + f.commitTxn() + + f.saveDocToCollection(f.newUserDoc("John", 21), users) + f.saveDocToCollection(f.newUserDoc("Islam", 23), users) + + products := f.createCollection(getProductsCollectionDesc()) + _, err = f.createCollectionIndexFor(products.Name(), getProductsIndexDescOnCategory()) + require.NoError(f.t, err) + f.commitTxn() + + f.saveDocToCollection(f.newProdDoc(1, 55, "games"), products) + + userNameKey := newIndexKeyBuilder(f).Col(usersColName).Field(usersNameFieldName).Build() + userAgeKey := newIndexKeyBuilder(f).Col(usersColName).Field(usersAgeFieldName).Build() + userWeightKey := newIndexKeyBuilder(f).Col(usersColName).Field(usersWeightFieldName).Build() + prodCatKey := newIndexKeyBuilder(f).Col(productsColName).Field(productsCategoryFieldName).Build() + + err = f.dropIndex(usersColName, testUsersColIndexAge) + require.NoError(f.t, err) + + assert.Len(t, f.getPrefixFromDataStore(userNameKey.ToString()), 2) + assert.Len(t, f.getPrefixFromDataStore(userAgeKey.ToString()), 0) + assert.Len(t, f.getPrefixFromDataStore(userWeightKey.ToString()), 2) + assert.Len(t, f.getPrefixFromDataStore(prodCatKey.ToString()), 1) +} + +func TestNonUniqueDrop_IfDataStorageFails_ReturnError(t *testing.T) { + testErr := errors.New("test error") + + testCases := []struct { + description string + prepareSystemStorage func(*mocks.DSReaderWriter_Expecter) + }{ + { + description: "Fails to query data storage", + prepareSystemStorage: func(mockedDS *mocks.DSReaderWriter_Expecter) { + mockedDS.Query(mock.Anything, mock.Anything).Unset() + mockedDS.Query(mock.Anything, mock.Anything).Return(nil, testErr) + }, + }, + { + description: "Fails to iterate data storage", + prepareSystemStorage: func(mockedDS *mocks.DSReaderWriter_Expecter) { + mockedDS.Query(mock.Anything, mock.Anything).Unset() + q := mocks.NewQueryResultsWithResults(t, query.Result{Error: testErr}) + mockedDS.Query(mock.Anything, mock.Anything).Return(q, nil) + q.EXPECT().Close().Unset() + q.EXPECT().Close().Return(nil) + }, + }, + { + description: "Fails to delete from data storage", + prepareSystemStorage: func(mockedDS *mocks.DSReaderWriter_Expecter) { + q := mocks.NewQueryResultsWithResults(t, query.Result{Entry: query.Entry{Key: ""}}) + q.EXPECT().Close().Unset() + q.EXPECT().Close().Return(nil) + mockedDS.Query(mock.Anything, mock.Anything).Return(q, nil) + mockedDS.Delete(mock.Anything, mock.Anything).Unset() + mockedDS.Delete(mock.Anything, mock.Anything).Return(testErr) + }, + }, + { + description: "Fails to close data storage query iterator", + prepareSystemStorage: func(mockedDS *mocks.DSReaderWriter_Expecter) { + q := mocks.NewQueryResultsWithResults(t, query.Result{Entry: query.Entry{Key: ""}}) + q.EXPECT().Close().Unset() + q.EXPECT().Close().Return(testErr) + mockedDS.Query(mock.Anything, mock.Anything).Return(q, nil) + }, + }, + } + + for _, tc := range testCases { + f := newIndexTestFixture(t) + f.createUserCollectionIndexOnName() + + mockedTxn := f.mockTxn() + mockedTxn.MockDatastore = mocks.NewDSReaderWriter(t) + tc.prepareSystemStorage(mockedTxn.MockDatastore.EXPECT()) + mockedTxn.EXPECT().Datastore().Unset() + mockedTxn.EXPECT().Datastore().Return(mockedTxn.MockDatastore) + + err := f.dropIndex(usersColName, testUsersColIndexName) + require.ErrorIs(t, err, testErr, tc.description) + } +} + +func TestNonUniqueDrop_ShouldCloseQueryIterator(t *testing.T) { + f := newIndexTestFixture(t) + f.createUserCollectionIndexOnName() + + mockedTxn := f.mockTxn() + + mockedTxn.MockDatastore = mocks.NewDSReaderWriter(f.t) + mockedTxn.EXPECT().Datastore().Unset() + mockedTxn.EXPECT().Datastore().Return(mockedTxn.MockDatastore).Maybe() + queryResults := mocks.NewQueryResultsWithValues(f.t) + queryResults.EXPECT().Close().Unset() + queryResults.EXPECT().Close().Return(nil) + mockedTxn.MockDatastore.EXPECT().Query(mock.Anything, mock.Anything). + Return(queryResults, nil) + + err := f.dropIndex(usersColName, testUsersColIndexName) + assert.NoError(t, err) +} + +func TestNonUniqueUpdate_ShouldDeleteOldValueAndStoreNewOne(t *testing.T) { + f := newIndexTestFixture(t) + f.createUserCollectionIndexOnName() + + cases := []struct { + Name string + NewValue string + Exec func(doc *client.Document) error + }{ + { + Name: "update", + NewValue: "Islam", + Exec: func(doc *client.Document) error { + return f.users.Update(f.ctx, doc) + }, + }, + { + Name: "save", + NewValue: "Andy", + Exec: func(doc *client.Document) error { + return f.users.Save(f.ctx, doc) + }, + }, + } + + doc := f.newUserDoc("John", 21) + f.saveDocToCollection(doc, f.users) + + for _, tc := range cases { + oldKey := newIndexKeyBuilder(f).Col(usersColName).Field(usersNameFieldName).Doc(doc).Build() + + err := doc.Set(usersNameFieldName, tc.NewValue) + require.NoError(t, err) + err = tc.Exec(doc) + require.NoError(t, err) + f.commitTxn() + + newKey := newIndexKeyBuilder(f).Col(usersColName).Field(usersNameFieldName).Doc(doc).Build() + + _, err = f.txn.Datastore().Get(f.ctx, oldKey.ToDS()) + require.Error(t, err) + _, err = f.txn.Datastore().Get(f.ctx, newKey.ToDS()) + require.NoError(t, err) + } +} + +func TestNonUniqueUpdate_IfFailsToReadIndexDescription_ReturnError(t *testing.T) { + f := newIndexTestFixture(t) + f.createUserCollectionIndexOnName() + + doc := f.newUserDoc("John", 21) + f.saveDocToCollection(doc, f.users) + + err := doc.Set(usersNameFieldName, "Islam") + require.NoError(t, err) + + // retrieve the collection without index cached + usersCol, err := f.db.getCollectionByName(f.ctx, f.txn, usersColName) + require.NoError(t, err) + + testErr := errors.New("test error") + + mockedTxn := f.mockTxn() + mockedTxn.MockSystemstore = mocks.NewDSReaderWriter(t) + mockedTxn.MockSystemstore.EXPECT().Query(mock.Anything, mock.Anything).Return(nil, testErr) + mockedTxn.EXPECT().Systemstore().Unset() + mockedTxn.EXPECT().Systemstore().Return(mockedTxn.MockSystemstore) + mockedTxn.MockDatastore.EXPECT().Get(mock.Anything, mock.Anything).Unset() + mockedTxn.MockDatastore.EXPECT().Get(mock.Anything, mock.Anything).Return([]byte{}, nil) + + usersCol.(*collection).fetcherFactory = func() fetcher.Fetcher { + return fetcherMocks.NewStubbedFetcher(t) + } + err = usersCol.WithTxn(mockedTxn).Update(f.ctx, doc) + require.ErrorIs(t, err, testErr) +} + +func TestNonUniqueUpdate_IfFetcherFails_ReturnError(t *testing.T) { + testError := errors.New("test error") + + cases := []struct { + Name string + PrepareFetcher func() fetcher.Fetcher + }{ + { + Name: "Fails to init", + PrepareFetcher: func() fetcher.Fetcher { + f := fetcherMocks.NewStubbedFetcher(t) + f.EXPECT().Init(mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything).Unset() + f.EXPECT().Init(mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(testError) + f.EXPECT().Close().Unset() + f.EXPECT().Close().Return(nil) + return f + }, + }, + { + Name: "Fails to start", + PrepareFetcher: func() fetcher.Fetcher { + f := fetcherMocks.NewStubbedFetcher(t) + f.EXPECT().Start(mock.Anything, mock.Anything).Unset() + f.EXPECT().Start(mock.Anything, mock.Anything).Return(testError) + f.EXPECT().Close().Unset() + f.EXPECT().Close().Return(nil) + return f + }, + }, + { + Name: "Fails to fetch next decoded", + PrepareFetcher: func() fetcher.Fetcher { + f := fetcherMocks.NewStubbedFetcher(t) + f.EXPECT().FetchNextDecoded(mock.Anything).Unset() + f.EXPECT().FetchNextDecoded(mock.Anything).Return(nil, fetcher.ExecInfo{}, testError) + f.EXPECT().Close().Unset() + f.EXPECT().Close().Return(nil) + return f + }, + }, + { + Name: "Fails to close", + PrepareFetcher: func() fetcher.Fetcher { + f := fetcherMocks.NewStubbedFetcher(t) + f.EXPECT().FetchNextDecoded(mock.Anything).Unset() + // By default the the stubbed fetcher returns an empty, invalid document + // here we need to make sure it reaches the Close call by overriding that default. + f.EXPECT().FetchNextDecoded(mock.Anything).Maybe().Return(nil, fetcher.ExecInfo{}, nil) + f.EXPECT().Close().Unset() + f.EXPECT().Close().Return(testError) + return f + }, + }, + } + + for _, tc := range cases { + t.Log(tc.Name) + + f := newIndexTestFixture(t) + f.createUserCollectionIndexOnName() + + doc := f.newUserDoc("John", 21) + f.saveDocToCollection(doc, f.users) + + f.users.fetcherFactory = tc.PrepareFetcher + oldKey := newIndexKeyBuilder(f).Col(usersColName).Field(usersNameFieldName).Doc(doc).Build() + + err := doc.Set(usersNameFieldName, "Islam") + require.NoError(t, err, tc.Name) + err = f.users.Update(f.ctx, doc) + require.Error(t, err, tc.Name) + + newKey := newIndexKeyBuilder(f).Col(usersColName).Field(usersNameFieldName).Doc(doc).Build() + + _, err = f.txn.Datastore().Get(f.ctx, oldKey.ToDS()) + require.NoError(t, err, tc.Name) + _, err = f.txn.Datastore().Get(f.ctx, newKey.ToDS()) + require.Error(t, err, tc.Name) + } +} + +func TestNonUniqueUpdate_IfFailsToUpdateIndex_ReturnError(t *testing.T) { + f := newIndexTestFixture(t) + f.createUserCollectionIndexOnAge() + + doc := f.newUserDoc("John", 21) + f.saveDocToCollection(doc, f.users) + f.commitTxn() + + validKey := newIndexKeyBuilder(f).Col(usersColName).Field(usersAgeFieldName).Doc(doc).Build() + invalidKey := newIndexKeyBuilder(f).Col(usersColName).Field(usersAgeFieldName).Doc(doc). + Values([]byte("invalid")).Build() + + err := f.txn.Datastore().Delete(f.ctx, validKey.ToDS()) + require.NoError(f.t, err) + err = f.txn.Datastore().Put(f.ctx, invalidKey.ToDS(), []byte{}) + require.NoError(f.t, err) + f.commitTxn() + + err = doc.Set(usersAgeFieldName, 23) + require.NoError(t, err) + err = f.users.Update(f.ctx, doc) + require.Error(t, err) +} + +func TestNonUniqueUpdate_ShouldPassToFetcherOnlyRelevantFields(t *testing.T) { + f := newIndexTestFixture(t) + f.createUserCollectionIndexOnName() + f.createUserCollectionIndexOnAge() + + f.users.fetcherFactory = func() fetcher.Fetcher { + f := fetcherMocks.NewStubbedFetcher(t) + f.EXPECT().Init(mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything).Unset() + f.EXPECT().Init(mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything). + RunAndReturn(func( + ctx context.Context, + txn datastore.Txn, + col *client.CollectionDescription, + fields []client.FieldDescription, + filter *mapper.Filter, + mapping *core.DocumentMapping, + reverse, showDeleted bool, + ) error { + require.Equal(t, 2, len(fields)) + require.ElementsMatch(t, + []string{usersNameFieldName, usersAgeFieldName}, + []string{fields[0].Name, fields[1].Name}) + return errors.New("early exit") + }) + return f + } + doc := f.newUserDoc("John", 21) + f.saveDocToCollection(doc, f.users) + + err := doc.Set(usersNameFieldName, "Islam") + require.NoError(t, err) + _ = f.users.Update(f.ctx, doc) +} + +func TestNonUniqueUpdate_IfDatastoreFails_ReturnError(t *testing.T) { + testErr := errors.New("error") + + cases := []struct { + Name string + StubDataStore func(*mocks.DSReaderWriter_Expecter) + }{ + { + Name: "Delete old value", + StubDataStore: func(ds *mocks.DSReaderWriter_Expecter) { + ds.Delete(mock.Anything, mock.Anything).Return(testErr) + ds.Get(mock.Anything, mock.Anything).Maybe().Return([]byte{}, nil) + }, + }, + { + Name: "Store new value", + StubDataStore: func(ds *mocks.DSReaderWriter_Expecter) { + ds.Delete(mock.Anything, mock.Anything).Maybe().Return(nil) + ds.Get(mock.Anything, mock.Anything).Maybe().Return([]byte{}, nil) + ds.Put(mock.Anything, mock.Anything, mock.Anything).Maybe().Return(testErr) + }, + }, + } + + for _, tc := range cases { + t.Log(tc.Name) + + f := newIndexTestFixture(t) + f.createUserCollectionIndexOnName() + + doc := f.newUserDoc("John", 21) + err := doc.Set(usersNameFieldName, "Islam") + require.NoError(t, err) + + // This is only required as we are using it as a return value + // in production this value will have been set by the fetcher + doc.SchemaVersionID = f.users.Schema().VersionID + + f.users.fetcherFactory = func() fetcher.Fetcher { + df := fetcherMocks.NewStubbedFetcher(t) + df.EXPECT().FetchNextDecoded(mock.Anything).Unset() + df.EXPECT().FetchNextDecoded(mock.Anything).Return(doc, fetcher.ExecInfo{}, nil) + return df + } + + mockedTxn := f.mockTxn() + mockedTxn.MockDatastore = mocks.NewDSReaderWriter(f.t) + tc.StubDataStore(mockedTxn.MockDatastore.EXPECT()) + mockedTxn.EXPECT().Datastore().Unset() + mockedTxn.EXPECT().Datastore().Return(mockedTxn.MockDatastore).Maybe() + + err = f.users.WithTxn(mockedTxn).Update(f.ctx, doc) + require.ErrorIs(t, err, testErr) + } +} + +func TestNonUpdate_IfIndexedFieldWasNil_ShouldDeleteIt(t *testing.T) { + f := newIndexTestFixture(t) + f.createUserCollectionIndexOnName() + + docJSON, err := json.Marshal(struct { + Age int `json:"age"` + }{Age: 44}) + require.NoError(f.t, err) + + doc, err := client.NewDocFromJSON(docJSON) + require.NoError(f.t, err) + + f.saveDocToCollection(doc, f.users) + + oldKey := newIndexKeyBuilder(f).Col(usersColName).Field(usersNameFieldName).Doc(doc). + Values([]byte(nil)).Build() + + err = doc.Set(usersNameFieldName, "John") + require.NoError(f.t, err) + + err = f.users.Update(f.ctx, doc) + require.NoError(f.t, err) + f.commitTxn() + + newKey := newIndexKeyBuilder(f).Col(usersColName).Field(usersNameFieldName).Doc(doc).Build() + + _, err = f.txn.Datastore().Get(f.ctx, newKey.ToDS()) + require.NoError(t, err) + _, err = f.txn.Datastore().Get(f.ctx, oldKey.ToDS()) + require.Error(t, err) +} diff --git a/db/subscriptions.go b/db/subscriptions.go index 3243d0e779..af981ad95f 100644 --- a/db/subscriptions.go +++ b/db/subscriptions.go @@ -15,6 +15,7 @@ import ( "github.com/sourcenetwork/defradb/client" "github.com/sourcenetwork/defradb/client/request" + "github.com/sourcenetwork/defradb/datastore" "github.com/sourcenetwork/defradb/events" "github.com/sourcenetwork/defradb/planner" ) @@ -58,25 +59,37 @@ func (db *db) handleSubscription( continue } - p := planner.New(ctx, db.WithTxn(txn), txn) + db.handleEvent(ctx, txn, pub, evt, r) - s := r.ToSelect(evt.DocKey, evt.Cid.String()) + txn.Discard(ctx) + } +} - result, err := p.RunSubscriptionRequest(ctx, s) - if err != nil { - pub.Publish(client.GQLResult{ - Errors: []error{err}, - }) - continue - } +func (db *db) handleEvent( + ctx context.Context, + txn datastore.Txn, + pub *events.Publisher[events.Update], + evt events.Update, + r *request.ObjectSubscription, +) { + p := planner.New(ctx, db.WithTxn(txn), txn) - // Don't send anything back to the client if the request yields an empty dataset. - if len(result) == 0 { - continue - } + s := r.ToSelect(evt.DocKey, evt.Cid.String()) + result, err := p.RunSubscriptionRequest(ctx, s) + if err != nil { pub.Publish(client.GQLResult{ - Data: result, + Errors: []error{err}, }) + return } + + // Don't send anything back to the client if the request yields an empty dataset. + if len(result) == 0 { + return + } + + pub.Publish(client.GQLResult{ + Data: result, + }) } diff --git a/db/txn_db.go b/db/txn_db.go index 71e204c356..a7096a46a7 100644 --- a/db/txn_db.go +++ b/db/txn_db.go @@ -186,6 +186,26 @@ func (db *explicitTxnDB) GetAllCollections(ctx context.Context) ([]client.Collec return db.getAllCollections(ctx, db.txn) } +// GetAllIndexes gets all the indexes in the database. +func (db *implicitTxnDB) GetAllIndexes( + ctx context.Context, +) (map[client.CollectionName][]client.IndexDescription, error) { + txn, err := db.NewTxn(ctx, true) + if err != nil { + return nil, err + } + defer txn.Discard(ctx) + + return db.getAllIndexes(ctx, txn) +} + +// GetAllIndexes gets all the indexes in the database. +func (db *explicitTxnDB) GetAllIndexes( + ctx context.Context, +) (map[client.CollectionName][]client.IndexDescription, error) { + return db.getAllIndexes(ctx, db.txn) +} + // AddSchema takes the provided GQL schema in SDL format, and applies it to the database, // creating the necessary collections, request types, etc. // @@ -259,6 +279,25 @@ func (db *explicitTxnDB) PatchSchema(ctx context.Context, patchString string) er return db.patchSchema(ctx, db.txn, patchString) } +func (db *implicitTxnDB) SetMigration(ctx context.Context, cfg client.LensConfig) error { + txn, err := db.NewTxn(ctx, false) + if err != nil { + return err + } + defer txn.Discard(ctx) + + err = db.lensRegistry.SetMigration(ctx, txn, cfg) + if err != nil { + return err + } + + return txn.Commit(ctx) +} + +func (db *explicitTxnDB) SetMigration(ctx context.Context, cfg client.LensConfig) error { + return db.lensRegistry.SetMigration(ctx, db.txn, cfg) +} + // SetReplicator adds a new replicator to the database. func (db *implicitTxnDB) SetReplicator(ctx context.Context, rep client.Replicator) error { txn, err := db.NewTxn(ctx, false) @@ -334,3 +373,47 @@ func (db *implicitTxnDB) GetAllP2PCollections(ctx context.Context) ([]string, er func (db *explicitTxnDB) GetAllP2PCollections(ctx context.Context) ([]string, error) { return db.getAllP2PCollections(ctx, db.txn) } + +// BasicImport imports a json dataset. +// filepath must be accessible to the node. +func (db *implicitTxnDB) BasicImport(ctx context.Context, filepath string) error { + txn, err := db.NewTxn(ctx, false) + if err != nil { + return err + } + defer txn.Discard(ctx) + + err = db.basicImport(ctx, txn, filepath) + if err != nil { + return err + } + + return txn.Commit(ctx) +} + +// BasicImport imports a json dataset. +// filepath must be accessible to the node. +func (db *explicitTxnDB) BasicImport(ctx context.Context, filepath string) error { + return db.basicImport(ctx, db.txn, filepath) +} + +// BasicExport exports the current data or subset of data to file in json format. +func (db *implicitTxnDB) BasicExport(ctx context.Context, config *client.BackupConfig) error { + txn, err := db.NewTxn(ctx, true) + if err != nil { + return err + } + defer txn.Discard(ctx) + + err = db.basicExport(ctx, txn, config) + if err != nil { + return err + } + + return txn.Commit(ctx) +} + +// BasicExport exports the current data or subset of data to file in json format. +func (db *explicitTxnDB) BasicExport(ctx context.Context, config *client.BackupConfig) error { + return db.basicExport(ctx, db.txn, config) +} diff --git a/docs/cli/defradb.md b/docs/cli/defradb.md index 9a90455e3c..459f43075d 100644 --- a/docs/cli/defradb.md +++ b/docs/cli/defradb.md @@ -6,10 +6,7 @@ DefraDB Edge Database DefraDB is the edge database to power the user-centric future. -Start a database node, issue a request to a local or remote node, and much more. - -DefraDB is released under the BSL license, (c) 2022 Democratized Data Foundation. -See https://docs.source.network/BSL.txt for more information. +Start a DefraDB node, interact with a local or remote node, and much more. ### Options @@ -28,7 +25,7 @@ See https://docs.source.network/BSL.txt for more information. ### SEE ALSO -* [defradb client](defradb_client.md) - Interact with a running DefraDB node as a client +* [defradb client](defradb_client.md) - Interact with a DefraDB node * [defradb init](defradb_init.md) - Initialize DefraDB's root directory and configuration file * [defradb server-dump](defradb_server-dump.md) - Dumps the state of the entire database * [defradb start](defradb_start.md) - Start a DefraDB node diff --git a/docs/cli/defradb_client.md b/docs/cli/defradb_client.md index 13e87bf46c..7173befb6b 100644 --- a/docs/cli/defradb_client.md +++ b/docs/cli/defradb_client.md @@ -1,11 +1,11 @@ ## defradb client -Interact with a running DefraDB node as a client +Interact with a DefraDB node ### Synopsis -Interact with a running DefraDB node as a client. -Execute queries, add schema types, and run debug routines. +Interact with a DefraDB node. +Execute queries, add schema types, obtain node info, etc. ### Options @@ -29,11 +29,13 @@ Execute queries, add schema types, and run debug routines. ### SEE ALSO * [defradb](defradb.md) - DefraDB Edge Database +* [defradb client backup](defradb_client_backup.md) - Interact with the backup utility * [defradb client blocks](defradb_client_blocks.md) - Interact with the database's blockstore -* [defradb client dump](defradb_client_dump.md) - Dump the contents of a database node-side -* [defradb client peerid](defradb_client_peerid.md) - Get the PeerID of the DefraDB node -* [defradb client ping](defradb_client_ping.md) - Ping to test connection to a node +* [defradb client dump](defradb_client_dump.md) - Dump the contents of DefraDB node-side +* [defradb client index](defradb_client_index.md) - Manage collections' indexes of a running DefraDB instance +* [defradb client peerid](defradb_client_peerid.md) - Get the PeerID of the node +* [defradb client ping](defradb_client_ping.md) - Ping to test connection with a node * [defradb client query](defradb_client_query.md) - Send a DefraDB GraphQL query request -* [defradb client rpc](defradb_client_rpc.md) - Interact with a DefraDB gRPC server -* [defradb client schema](defradb_client_schema.md) - Interact with the schema system of a running DefraDB instance +* [defradb client rpc](defradb_client_rpc.md) - Interact with a DefraDB node via RPC +* [defradb client schema](defradb_client_schema.md) - Interact with the schema system of a DefraDB node diff --git a/docs/cli/defradb_client_backup.md b/docs/cli/defradb_client_backup.md new file mode 100644 index 0000000000..baa08725e1 --- /dev/null +++ b/docs/cli/defradb_client_backup.md @@ -0,0 +1,34 @@ +## defradb client backup + +Interact with the backup utility + +### Synopsis + +Export to or Import from a backup file. +Currently only supports JSON format. + +### Options + +``` + -h, --help help for backup +``` + +### Options inherited from parent commands + +``` + --logformat string Log format to use. Options are csv, json (default "csv") + --logger stringArray Override logger parameters. Usage: --logger ,level=,output=,... + --loglevel string Log level to use. Options are debug, info, error, fatal (default "info") + --lognocolor Disable colored log output + --logoutput string Log output path (default "stderr") + --logtrace Include stacktrace in error and fatal logs + --rootdir string Directory for data and configuration to use (default: $HOME/.defradb) + --url string URL of HTTP endpoint to listen on or connect to (default "localhost:9181") +``` + +### SEE ALSO + +* [defradb client](defradb_client.md) - Interact with a DefraDB node +* [defradb client backup export](defradb_client_backup_export.md) - Export the database to a file +* [defradb client backup import](defradb_client_backup_import.md) - Import a JSON data file to the database + diff --git a/docs/cli/defradb_client_backup_export.md b/docs/cli/defradb_client_backup_export.md new file mode 100644 index 0000000000..ea8a22d634 --- /dev/null +++ b/docs/cli/defradb_client_backup_export.md @@ -0,0 +1,46 @@ +## defradb client backup export + +Export the database to a file + +### Synopsis + +Export the database to a file. If a file exists at the location, it will be overwritten. + +If the --collection flag is provided, only the data for that collection will be exported. +Otherwise, all collections in the database will be exported. + +If the --pretty flag is provided, the JSON will be pretty printed. + +Example: export data for the 'Users' collection: + defradb client export --collection Users user_data.json + +``` +defradb client backup export [-c --collections | -p --pretty | -f --format] [flags] +``` + +### Options + +``` + -c, --collections strings List of collections + -f, --format string Define the output format. Supported formats: [json] (default "json") + -h, --help help for export + -p, --pretty Set the output JSON to be pretty printed +``` + +### Options inherited from parent commands + +``` + --logformat string Log format to use. Options are csv, json (default "csv") + --logger stringArray Override logger parameters. Usage: --logger ,level=,output=,... + --loglevel string Log level to use. Options are debug, info, error, fatal (default "info") + --lognocolor Disable colored log output + --logoutput string Log output path (default "stderr") + --logtrace Include stacktrace in error and fatal logs + --rootdir string Directory for data and configuration to use (default: $HOME/.defradb) + --url string URL of HTTP endpoint to listen on or connect to (default "localhost:9181") +``` + +### SEE ALSO + +* [defradb client backup](defradb_client_backup.md) - Interact with the backup utility + diff --git a/docs/cli/defradb_client_backup_import.md b/docs/cli/defradb_client_backup_import.md new file mode 100644 index 0000000000..c539a4d77a --- /dev/null +++ b/docs/cli/defradb_client_backup_import.md @@ -0,0 +1,38 @@ +## defradb client backup import + +Import a JSON data file to the database + +### Synopsis + +Import a JSON data file to the database. + +Example: import data to the database: + defradb client import user_data.json + +``` +defradb client backup import [flags] +``` + +### Options + +``` + -h, --help help for import +``` + +### Options inherited from parent commands + +``` + --logformat string Log format to use. Options are csv, json (default "csv") + --logger stringArray Override logger parameters. Usage: --logger ,level=,output=,... + --loglevel string Log level to use. Options are debug, info, error, fatal (default "info") + --lognocolor Disable colored log output + --logoutput string Log output path (default "stderr") + --logtrace Include stacktrace in error and fatal logs + --rootdir string Directory for data and configuration to use (default: $HOME/.defradb) + --url string URL of HTTP endpoint to listen on or connect to (default "localhost:9181") +``` + +### SEE ALSO + +* [defradb client backup](defradb_client_backup.md) - Interact with the backup utility + diff --git a/docs/cli/defradb_client_blocks.md b/docs/cli/defradb_client_blocks.md index 2824b677ac..e05a853440 100644 --- a/docs/cli/defradb_client_blocks.md +++ b/docs/cli/defradb_client_blocks.md @@ -23,6 +23,6 @@ Interact with the database's blockstore ### SEE ALSO -* [defradb client](defradb_client.md) - Interact with a running DefraDB node as a client -* [defradb client blocks get](defradb_client_blocks_get.md) - Get a block by its CID from the blockstore. +* [defradb client](defradb_client.md) - Interact with a DefraDB node +* [defradb client blocks get](defradb_client_blocks_get.md) - Get a block by its CID from the blockstore diff --git a/docs/cli/defradb_client_blocks_get.md b/docs/cli/defradb_client_blocks_get.md index 3c007f2f30..38ff02b63c 100644 --- a/docs/cli/defradb_client_blocks_get.md +++ b/docs/cli/defradb_client_blocks_get.md @@ -1,6 +1,6 @@ ## defradb client blocks get -Get a block by its CID from the blockstore. +Get a block by its CID from the blockstore ``` defradb client blocks get [CID] [flags] diff --git a/docs/cli/defradb_client_dump.md b/docs/cli/defradb_client_dump.md index 1e4404e2fc..862154bc17 100644 --- a/docs/cli/defradb_client_dump.md +++ b/docs/cli/defradb_client_dump.md @@ -1,6 +1,6 @@ ## defradb client dump -Dump the contents of a database node-side +Dump the contents of DefraDB node-side ``` defradb client dump [flags] @@ -27,5 +27,5 @@ defradb client dump [flags] ### SEE ALSO -* [defradb client](defradb_client.md) - Interact with a running DefraDB node as a client +* [defradb client](defradb_client.md) - Interact with a DefraDB node diff --git a/docs/cli/defradb_client_index.md b/docs/cli/defradb_client_index.md new file mode 100644 index 0000000000..4babb57d46 --- /dev/null +++ b/docs/cli/defradb_client_index.md @@ -0,0 +1,34 @@ +## defradb client index + +Manage collections' indexes of a running DefraDB instance + +### Synopsis + +Manage (create, drop, or list) collection indexes on a DefraDB node. + +### Options + +``` + -h, --help help for index +``` + +### Options inherited from parent commands + +``` + --logformat string Log format to use. Options are csv, json (default "csv") + --logger stringArray Override logger parameters. Usage: --logger ,level=,output=,... + --loglevel string Log level to use. Options are debug, info, error, fatal (default "info") + --lognocolor Disable colored log output + --logoutput string Log output path (default "stderr") + --logtrace Include stacktrace in error and fatal logs + --rootdir string Directory for data and configuration to use (default: $HOME/.defradb) + --url string URL of HTTP endpoint to listen on or connect to (default "localhost:9181") +``` + +### SEE ALSO + +* [defradb client](defradb_client.md) - Interact with a DefraDB node +* [defradb client index create](defradb_client_index_create.md) - Creates a secondary index on a collection's field(s) +* [defradb client index drop](defradb_client_index_drop.md) - Drop a collection's secondary index +* [defradb client index list](defradb_client_index_list.md) - Shows the list indexes in the database or for a specific collection + diff --git a/docs/cli/defradb_client_index_create.md b/docs/cli/defradb_client_index_create.md new file mode 100644 index 0000000000..7f67e58075 --- /dev/null +++ b/docs/cli/defradb_client_index_create.md @@ -0,0 +1,46 @@ +## defradb client index create + +Creates a secondary index on a collection's field(s) + +### Synopsis + +Creates a secondary index on a collection's field(s). + +The --name flag is optional. If not provided, a name will be generated automatically. + +Example: create an index for 'Users' collection on 'name' field: + defradb client index create --collection Users --fields name + +Example: create a named index for 'Users' collection on 'name' field: + defradb client index create --collection Users --fields name --name UsersByName + +``` +defradb client index create -c --collection --fields [-n --name ] [flags] +``` + +### Options + +``` + -c, --collection string Collection name + --fields string Fields to index + -h, --help help for create + -n, --name string Index name +``` + +### Options inherited from parent commands + +``` + --logformat string Log format to use. Options are csv, json (default "csv") + --logger stringArray Override logger parameters. Usage: --logger ,level=,output=,... + --loglevel string Log level to use. Options are debug, info, error, fatal (default "info") + --lognocolor Disable colored log output + --logoutput string Log output path (default "stderr") + --logtrace Include stacktrace in error and fatal logs + --rootdir string Directory for data and configuration to use (default: $HOME/.defradb) + --url string URL of HTTP endpoint to listen on or connect to (default "localhost:9181") +``` + +### SEE ALSO + +* [defradb client index](defradb_client_index.md) - Manage collections' indexes of a running DefraDB instance + diff --git a/docs/cli/defradb_client_index_drop.md b/docs/cli/defradb_client_index_drop.md new file mode 100644 index 0000000000..f551fe4658 --- /dev/null +++ b/docs/cli/defradb_client_index_drop.md @@ -0,0 +1,40 @@ +## defradb client index drop + +Drop a collection's secondary index + +### Synopsis + +Drop a collection's secondary index. + +Example: drop the index 'UsersByName' for 'Users' collection: + defradb client index create --collection Users --name UsersByName + +``` +defradb client index drop -c --collection -n --name [flags] +``` + +### Options + +``` + -c, --collection string Collection name + -h, --help help for drop + -n, --name string Index name +``` + +### Options inherited from parent commands + +``` + --logformat string Log format to use. Options are csv, json (default "csv") + --logger stringArray Override logger parameters. Usage: --logger ,level=,output=,... + --loglevel string Log level to use. Options are debug, info, error, fatal (default "info") + --lognocolor Disable colored log output + --logoutput string Log output path (default "stderr") + --logtrace Include stacktrace in error and fatal logs + --rootdir string Directory for data and configuration to use (default: $HOME/.defradb) + --url string URL of HTTP endpoint to listen on or connect to (default "localhost:9181") +``` + +### SEE ALSO + +* [defradb client index](defradb_client_index.md) - Manage collections' indexes of a running DefraDB instance + diff --git a/docs/cli/defradb_client_index_list.md b/docs/cli/defradb_client_index_list.md new file mode 100644 index 0000000000..bf434d30f2 --- /dev/null +++ b/docs/cli/defradb_client_index_list.md @@ -0,0 +1,42 @@ +## defradb client index list + +Shows the list indexes in the database or for a specific collection + +### Synopsis + +Shows the list indexes in the database or for a specific collection. + +If the --collection flag is provided, only the indexes for that collection will be shown. +Otherwise, all indexes in the database will be shown. + +Example: show all index for 'Users' collection: + defradb client index list --collection Users + +``` +defradb client index list [-c --collection ] [flags] +``` + +### Options + +``` + -c, --collection string Collection name + -h, --help help for list +``` + +### Options inherited from parent commands + +``` + --logformat string Log format to use. Options are csv, json (default "csv") + --logger stringArray Override logger parameters. Usage: --logger ,level=,output=,... + --loglevel string Log level to use. Options are debug, info, error, fatal (default "info") + --lognocolor Disable colored log output + --logoutput string Log output path (default "stderr") + --logtrace Include stacktrace in error and fatal logs + --rootdir string Directory for data and configuration to use (default: $HOME/.defradb) + --url string URL of HTTP endpoint to listen on or connect to (default "localhost:9181") +``` + +### SEE ALSO + +* [defradb client index](defradb_client_index.md) - Manage collections' indexes of a running DefraDB instance + diff --git a/docs/cli/defradb_client_peerid.md b/docs/cli/defradb_client_peerid.md index 3c8bfe6d4e..f4596111c8 100644 --- a/docs/cli/defradb_client_peerid.md +++ b/docs/cli/defradb_client_peerid.md @@ -1,6 +1,10 @@ ## defradb client peerid -Get the PeerID of the DefraDB node +Get the PeerID of the node + +### Synopsis + +Get the PeerID of the node. ``` defradb client peerid [flags] @@ -27,5 +31,5 @@ defradb client peerid [flags] ### SEE ALSO -* [defradb client](defradb_client.md) - Interact with a running DefraDB node as a client +* [defradb client](defradb_client.md) - Interact with a DefraDB node diff --git a/docs/cli/defradb_client_ping.md b/docs/cli/defradb_client_ping.md index bcadf6a0a9..8edd7aff94 100644 --- a/docs/cli/defradb_client_ping.md +++ b/docs/cli/defradb_client_ping.md @@ -1,6 +1,6 @@ ## defradb client ping -Ping to test connection to a node +Ping to test connection with a node ``` defradb client ping [flags] @@ -27,5 +27,5 @@ defradb client ping [flags] ### SEE ALSO -* [defradb client](defradb_client.md) - Interact with a running DefraDB node as a client +* [defradb client](defradb_client.md) - Interact with a DefraDB node diff --git a/docs/cli/defradb_client_query.md b/docs/cli/defradb_client_query.md index 5370ebbdab..8f5c3477c3 100644 --- a/docs/cli/defradb_client_query.md +++ b/docs/cli/defradb_client_query.md @@ -46,5 +46,5 @@ defradb client query [query request] [flags] ### SEE ALSO -* [defradb client](defradb_client.md) - Interact with a running DefraDB node as a client +* [defradb client](defradb_client.md) - Interact with a DefraDB node diff --git a/docs/cli/defradb_client_rpc.md b/docs/cli/defradb_client_rpc.md index 3acf2972a0..d7046433c5 100644 --- a/docs/cli/defradb_client_rpc.md +++ b/docs/cli/defradb_client_rpc.md @@ -1,15 +1,15 @@ ## defradb client rpc -Interact with a DefraDB gRPC server +Interact with a DefraDB node via RPC ### Synopsis -Interact with a DefraDB gRPC server. +Interact with a DefraDB node via RPC. ### Options ``` - --addr string gRPC endpoint address (default "0.0.0.0:9161") + --addr string RPC endpoint address (default "0.0.0.0:9161") -h, --help help for rpc ``` @@ -28,7 +28,7 @@ Interact with a DefraDB gRPC server. ### SEE ALSO -* [defradb client](defradb_client.md) - Interact with a running DefraDB node as a client -* [defradb client rpc p2pcollection](defradb_client_rpc_p2pcollection.md) - Interact with the P2P collection system -* [defradb client rpc replicator](defradb_client_rpc_replicator.md) - Interact with the replicator system +* [defradb client](defradb_client.md) - Interact with a DefraDB node +* [defradb client rpc p2pcollection](defradb_client_rpc_p2pcollection.md) - Configure the P2P collection system +* [defradb client rpc replicator](defradb_client_rpc_replicator.md) - Configure the replicator system diff --git a/docs/cli/defradb_client_rpc_addreplicator.md b/docs/cli/defradb_client_rpc_addreplicator.md index 3c10e403b1..e80b667f18 100644 --- a/docs/cli/defradb_client_rpc_addreplicator.md +++ b/docs/cli/defradb_client_rpc_addreplicator.md @@ -5,7 +5,7 @@ Add a new replicator ### Synopsis Use this command if you wish to add a new target replicator -for the p2p data sync system. +for the P2P data sync system. ``` defradb client rpc addreplicator [flags] diff --git a/docs/cli/defradb_client_rpc_p2pcollection.md b/docs/cli/defradb_client_rpc_p2pcollection.md index e6886c0078..ede32521d4 100644 --- a/docs/cli/defradb_client_rpc_p2pcollection.md +++ b/docs/cli/defradb_client_rpc_p2pcollection.md @@ -1,10 +1,11 @@ ## defradb client rpc p2pcollection -Interact with the P2P collection system +Configure the P2P collection system ### Synopsis -Add, delete, or get the list of P2P collections +Add, delete, or get the list of P2P collections. +The selected collections synchronize their events on the pubsub network. ### Options @@ -15,7 +16,7 @@ Add, delete, or get the list of P2P collections ### Options inherited from parent commands ``` - --addr string gRPC endpoint address (default "0.0.0.0:9161") + --addr string RPC endpoint address (default "0.0.0.0:9161") --logformat string Log format to use. Options are csv, json (default "csv") --logger stringArray Override logger parameters. Usage: --logger ,level=,output=,... --loglevel string Log level to use. Options are debug, info, error, fatal (default "info") @@ -28,8 +29,8 @@ Add, delete, or get the list of P2P collections ### SEE ALSO -* [defradb client rpc](defradb_client_rpc.md) - Interact with a DefraDB gRPC server +* [defradb client rpc](defradb_client_rpc.md) - Interact with a DefraDB node via RPC * [defradb client rpc p2pcollection add](defradb_client_rpc_p2pcollection_add.md) - Add P2P collections * [defradb client rpc p2pcollection getall](defradb_client_rpc_p2pcollection_getall.md) - Get all P2P collections -* [defradb client rpc p2pcollection remove](defradb_client_rpc_p2pcollection_remove.md) - Add P2P collections +* [defradb client rpc p2pcollection remove](defradb_client_rpc_p2pcollection_remove.md) - Remove P2P collections diff --git a/docs/cli/defradb_client_rpc_p2pcollection_add.md b/docs/cli/defradb_client_rpc_p2pcollection_add.md index c0fb67c53b..92ac0d82e6 100644 --- a/docs/cli/defradb_client_rpc_p2pcollection_add.md +++ b/docs/cli/defradb_client_rpc_p2pcollection_add.md @@ -4,7 +4,8 @@ Add P2P collections ### Synopsis -Use this command if you wish to add new P2P collections to the pubsub topics +Add P2P collections to the synchronized pubsub topics. +The collections are synchronized between nodes of a pubsub network. ``` defradb client rpc p2pcollection add [collectionID] [flags] @@ -19,7 +20,7 @@ defradb client rpc p2pcollection add [collectionID] [flags] ### Options inherited from parent commands ``` - --addr string gRPC endpoint address (default "0.0.0.0:9161") + --addr string RPC endpoint address (default "0.0.0.0:9161") --logformat string Log format to use. Options are csv, json (default "csv") --logger stringArray Override logger parameters. Usage: --logger ,level=,output=,... --loglevel string Log level to use. Options are debug, info, error, fatal (default "info") @@ -32,5 +33,5 @@ defradb client rpc p2pcollection add [collectionID] [flags] ### SEE ALSO -* [defradb client rpc p2pcollection](defradb_client_rpc_p2pcollection.md) - Interact with the P2P collection system +* [defradb client rpc p2pcollection](defradb_client_rpc_p2pcollection.md) - Configure the P2P collection system diff --git a/docs/cli/defradb_client_rpc_p2pcollection_getall.md b/docs/cli/defradb_client_rpc_p2pcollection_getall.md index c808c72f34..946a2e0156 100644 --- a/docs/cli/defradb_client_rpc_p2pcollection_getall.md +++ b/docs/cli/defradb_client_rpc_p2pcollection_getall.md @@ -4,7 +4,8 @@ Get all P2P collections ### Synopsis -Use this command if you wish to get all P2P collections in the pubsub topics +Get all P2P collections in the pubsub topics. +This is the list of collections of the node that are synchronized on the pubsub network. ``` defradb client rpc p2pcollection getall [flags] @@ -19,7 +20,7 @@ defradb client rpc p2pcollection getall [flags] ### Options inherited from parent commands ``` - --addr string gRPC endpoint address (default "0.0.0.0:9161") + --addr string RPC endpoint address (default "0.0.0.0:9161") --logformat string Log format to use. Options are csv, json (default "csv") --logger stringArray Override logger parameters. Usage: --logger ,level=,output=,... --loglevel string Log level to use. Options are debug, info, error, fatal (default "info") @@ -32,5 +33,5 @@ defradb client rpc p2pcollection getall [flags] ### SEE ALSO -* [defradb client rpc p2pcollection](defradb_client_rpc_p2pcollection.md) - Interact with the P2P collection system +* [defradb client rpc p2pcollection](defradb_client_rpc_p2pcollection.md) - Configure the P2P collection system diff --git a/docs/cli/defradb_client_rpc_p2pcollection_remove.md b/docs/cli/defradb_client_rpc_p2pcollection_remove.md index 985d21afc2..77658b4d50 100644 --- a/docs/cli/defradb_client_rpc_p2pcollection_remove.md +++ b/docs/cli/defradb_client_rpc_p2pcollection_remove.md @@ -1,10 +1,11 @@ ## defradb client rpc p2pcollection remove -Add P2P collections +Remove P2P collections ### Synopsis -Use this command if you wish to remove P2P collections from the pubsub topics +Remove P2P collections from the followed pubsub topics. +The removed collections will no longer be synchronized between nodes. ``` defradb client rpc p2pcollection remove [collectionID] [flags] @@ -19,7 +20,7 @@ defradb client rpc p2pcollection remove [collectionID] [flags] ### Options inherited from parent commands ``` - --addr string gRPC endpoint address (default "0.0.0.0:9161") + --addr string RPC endpoint address (default "0.0.0.0:9161") --logformat string Log format to use. Options are csv, json (default "csv") --logger stringArray Override logger parameters. Usage: --logger ,level=,output=,... --loglevel string Log level to use. Options are debug, info, error, fatal (default "info") @@ -32,5 +33,5 @@ defradb client rpc p2pcollection remove [collectionID] [flags] ### SEE ALSO -* [defradb client rpc p2pcollection](defradb_client_rpc_p2pcollection.md) - Interact with the P2P collection system +* [defradb client rpc p2pcollection](defradb_client_rpc_p2pcollection.md) - Configure the P2P collection system diff --git a/docs/cli/defradb_client_rpc_replicator.md b/docs/cli/defradb_client_rpc_replicator.md index 8d577b4c27..e88933791c 100644 --- a/docs/cli/defradb_client_rpc_replicator.md +++ b/docs/cli/defradb_client_rpc_replicator.md @@ -1,10 +1,11 @@ ## defradb client rpc replicator -Interact with the replicator system +Configure the replicator system ### Synopsis -Add, delete, or get the list of persisted replicators +Configure the replicator system. Add, delete, or get the list of persisted replicators. +A replicator replicates one or all collection(s) from one node to another. ### Options @@ -15,7 +16,7 @@ Add, delete, or get the list of persisted replicators ### Options inherited from parent commands ``` - --addr string gRPC endpoint address (default "0.0.0.0:9161") + --addr string RPC endpoint address (default "0.0.0.0:9161") --logformat string Log format to use. Options are csv, json (default "csv") --logger stringArray Override logger parameters. Usage: --logger ,level=,output=,... --loglevel string Log level to use. Options are debug, info, error, fatal (default "info") @@ -28,8 +29,8 @@ Add, delete, or get the list of persisted replicators ### SEE ALSO -* [defradb client rpc](defradb_client_rpc.md) - Interact with a DefraDB gRPC server -* [defradb client rpc replicator delete](defradb_client_rpc_replicator_delete.md) - Delete a replicator +* [defradb client rpc](defradb_client_rpc.md) - Interact with a DefraDB node via RPC +* [defradb client rpc replicator delete](defradb_client_rpc_replicator_delete.md) - Delete a replicator. It will stop synchronizing * [defradb client rpc replicator getall](defradb_client_rpc_replicator_getall.md) - Get all replicators * [defradb client rpc replicator set](defradb_client_rpc_replicator_set.md) - Set a P2P replicator diff --git a/docs/cli/defradb_client_rpc_replicator_delete.md b/docs/cli/defradb_client_rpc_replicator_delete.md index cb7182f01b..c851d2f508 100644 --- a/docs/cli/defradb_client_rpc_replicator_delete.md +++ b/docs/cli/defradb_client_rpc_replicator_delete.md @@ -1,11 +1,10 @@ ## defradb client rpc replicator delete -Delete a replicator +Delete a replicator. It will stop synchronizing ### Synopsis -Use this command if you wish to remove the target replicator - for the p2p data sync system. +Delete a replicator. It will stop synchronizing. ``` defradb client rpc replicator delete [-f, --full | -c, --collection] [flags] @@ -22,7 +21,7 @@ defradb client rpc replicator delete [-f, --full | -c, --collection] [fla ### Options inherited from parent commands ``` - --addr string gRPC endpoint address (default "0.0.0.0:9161") + --addr string RPC endpoint address (default "0.0.0.0:9161") --logformat string Log format to use. Options are csv, json (default "csv") --logger stringArray Override logger parameters. Usage: --logger ,level=,output=,... --loglevel string Log level to use. Options are debug, info, error, fatal (default "info") @@ -35,5 +34,5 @@ defradb client rpc replicator delete [-f, --full | -c, --collection] [fla ### SEE ALSO -* [defradb client rpc replicator](defradb_client_rpc_replicator.md) - Interact with the replicator system +* [defradb client rpc replicator](defradb_client_rpc_replicator.md) - Configure the replicator system diff --git a/docs/cli/defradb_client_rpc_replicator_getall.md b/docs/cli/defradb_client_rpc_replicator_getall.md index 41f47d63fd..2449dba1fd 100644 --- a/docs/cli/defradb_client_rpc_replicator_getall.md +++ b/docs/cli/defradb_client_rpc_replicator_getall.md @@ -4,7 +4,8 @@ Get all replicators ### Synopsis -Use this command if you wish to get all the replicators for the p2p data sync system. +Get all the replicators active in the P2P data sync system. +These are the replicators that are currently replicating data from one node to another. ``` defradb client rpc replicator getall [flags] @@ -19,7 +20,7 @@ defradb client rpc replicator getall [flags] ### Options inherited from parent commands ``` - --addr string gRPC endpoint address (default "0.0.0.0:9161") + --addr string RPC endpoint address (default "0.0.0.0:9161") --logformat string Log format to use. Options are csv, json (default "csv") --logger stringArray Override logger parameters. Usage: --logger ,level=,output=,... --loglevel string Log level to use. Options are debug, info, error, fatal (default "info") @@ -32,5 +33,5 @@ defradb client rpc replicator getall [flags] ### SEE ALSO -* [defradb client rpc replicator](defradb_client_rpc_replicator.md) - Interact with the replicator system +* [defradb client rpc replicator](defradb_client_rpc_replicator.md) - Configure the replicator system diff --git a/docs/cli/defradb_client_rpc_replicator_set.md b/docs/cli/defradb_client_rpc_replicator_set.md index 1f94a34467..24b7add648 100644 --- a/docs/cli/defradb_client_rpc_replicator_set.md +++ b/docs/cli/defradb_client_rpc_replicator_set.md @@ -4,8 +4,9 @@ Set a P2P replicator ### Synopsis -Use this command if you wish to add a new target replicator - for the p2p data sync system or add schemas to an existing one +Add a new target replicator. +A replicator replicates one or all collection(s) from this node to another. + ``` defradb client rpc replicator set [-f, --full | -c, --collection] [flags] @@ -22,7 +23,7 @@ defradb client rpc replicator set [-f, --full | -c, --collection] [flags] ### Options inherited from parent commands ``` - --addr string gRPC endpoint address (default "0.0.0.0:9161") + --addr string RPC endpoint address (default "0.0.0.0:9161") --logformat string Log format to use. Options are csv, json (default "csv") --logger stringArray Override logger parameters. Usage: --logger ,level=,output=,... --loglevel string Log level to use. Options are debug, info, error, fatal (default "info") @@ -35,5 +36,5 @@ defradb client rpc replicator set [-f, --full | -c, --collection] [flags] ### SEE ALSO -* [defradb client rpc replicator](defradb_client_rpc_replicator.md) - Interact with the replicator system +* [defradb client rpc replicator](defradb_client_rpc_replicator.md) - Configure the replicator system diff --git a/docs/cli/defradb_client_schema.md b/docs/cli/defradb_client_schema.md index b19ff013b9..c36c8d4bce 100644 --- a/docs/cli/defradb_client_schema.md +++ b/docs/cli/defradb_client_schema.md @@ -1,10 +1,10 @@ ## defradb client schema -Interact with the schema system of a running DefraDB instance +Interact with the schema system of a DefraDB node ### Synopsis -Make changes, updates, or look for existing schema types to a DefraDB node. +Make changes, updates, or look for existing schema types. ### Options @@ -27,7 +27,9 @@ Make changes, updates, or look for existing schema types to a DefraDB node. ### SEE ALSO -* [defradb client](defradb_client.md) - Interact with a running DefraDB node as a client -* [defradb client schema add](defradb_client_schema_add.md) - Add a new schema type to DefraDB +* [defradb client](defradb_client.md) - Interact with a DefraDB node +* [defradb client schema add](defradb_client_schema_add.md) - Add new schema +* [defradb client schema list](defradb_client_schema_list.md) - List schema types with their respective fields +* [defradb client schema migration](defradb_client_schema_migration.md) - Interact with the schema migration system of a running DefraDB instance * [defradb client schema patch](defradb_client_schema_patch.md) - Patch an existing schema type diff --git a/docs/cli/defradb_client_schema_add.md b/docs/cli/defradb_client_schema_add.md index 29c713bfbe..b278431034 100644 --- a/docs/cli/defradb_client_schema_add.md +++ b/docs/cli/defradb_client_schema_add.md @@ -1,10 +1,10 @@ ## defradb client schema add -Add a new schema type to DefraDB +Add new schema ### Synopsis -Add a new schema type to DefraDB. +Add new schema. Example: add from an argument string: defradb client schema add 'type Foo { ... }' @@ -43,5 +43,5 @@ defradb client schema add [schema] [flags] ### SEE ALSO -* [defradb client schema](defradb_client_schema.md) - Interact with the schema system of a running DefraDB instance +* [defradb client schema](defradb_client_schema.md) - Interact with the schema system of a DefraDB node diff --git a/docs/cli/defradb_client_schema_list.md b/docs/cli/defradb_client_schema_list.md new file mode 100644 index 0000000000..ffbe253e31 --- /dev/null +++ b/docs/cli/defradb_client_schema_list.md @@ -0,0 +1,31 @@ +## defradb client schema list + +List schema types with their respective fields + +``` +defradb client schema list [flags] +``` + +### Options + +``` + -h, --help help for list +``` + +### Options inherited from parent commands + +``` + --logformat string Log format to use. Options are csv, json (default "csv") + --logger stringArray Override logger parameters. Usage: --logger ,level=,output=,... + --loglevel string Log level to use. Options are debug, info, error, fatal (default "info") + --lognocolor Disable colored log output + --logoutput string Log output path (default "stderr") + --logtrace Include stacktrace in error and fatal logs + --rootdir string Directory for data and configuration to use (default: $HOME/.defradb) + --url string URL of HTTP endpoint to listen on or connect to (default "localhost:9181") +``` + +### SEE ALSO + +* [defradb client schema](defradb_client_schema.md) - Interact with the schema system of a DefraDB node + diff --git a/docs/cli/defradb_client_schema_migration.md b/docs/cli/defradb_client_schema_migration.md new file mode 100644 index 0000000000..0a20968378 --- /dev/null +++ b/docs/cli/defradb_client_schema_migration.md @@ -0,0 +1,33 @@ +## defradb client schema migration + +Interact with the schema migration system of a running DefraDB instance + +### Synopsis + +Make set or look for existing schema migrations on a DefraDB node. + +### Options + +``` + -h, --help help for migration +``` + +### Options inherited from parent commands + +``` + --logformat string Log format to use. Options are csv, json (default "csv") + --logger stringArray Override logger parameters. Usage: --logger ,level=,output=,... + --loglevel string Log level to use. Options are debug, info, error, fatal (default "info") + --lognocolor Disable colored log output + --logoutput string Log output path (default "stderr") + --logtrace Include stacktrace in error and fatal logs + --rootdir string Directory for data and configuration to use (default: $HOME/.defradb) + --url string URL of HTTP endpoint to listen on or connect to (default "localhost:9181") +``` + +### SEE ALSO + +* [defradb client schema](defradb_client_schema.md) - Interact with the schema system of a DefraDB node +* [defradb client schema migration get](defradb_client_schema_migration_get.md) - Gets the schema migrations within DefraDB +* [defradb client schema migration set](defradb_client_schema_migration_set.md) - Set a schema migration within DefraDB + diff --git a/docs/cli/defradb_client_schema_migration_get.md b/docs/cli/defradb_client_schema_migration_get.md new file mode 100644 index 0000000000..d2164ed6bd --- /dev/null +++ b/docs/cli/defradb_client_schema_migration_get.md @@ -0,0 +1,40 @@ +## defradb client schema migration get + +Gets the schema migrations within DefraDB + +### Synopsis + +Gets the schema migrations within the local DefraDB node. + +Example: + defradb client schema migration get' + +Learn more about the DefraDB GraphQL Schema Language on https://docs.source.network. + +``` +defradb client schema migration get [flags] +``` + +### Options + +``` + -h, --help help for get +``` + +### Options inherited from parent commands + +``` + --logformat string Log format to use. Options are csv, json (default "csv") + --logger stringArray Override logger parameters. Usage: --logger ,level=,output=,... + --loglevel string Log level to use. Options are debug, info, error, fatal (default "info") + --lognocolor Disable colored log output + --logoutput string Log output path (default "stderr") + --logtrace Include stacktrace in error and fatal logs + --rootdir string Directory for data and configuration to use (default: $HOME/.defradb) + --url string URL of HTTP endpoint to listen on or connect to (default "localhost:9181") +``` + +### SEE ALSO + +* [defradb client schema migration](defradb_client_schema_migration.md) - Interact with the schema migration system of a running DefraDB instance + diff --git a/docs/cli/defradb_client_schema_migration_set.md b/docs/cli/defradb_client_schema_migration_set.md new file mode 100644 index 0000000000..8013fd2a29 --- /dev/null +++ b/docs/cli/defradb_client_schema_migration_set.md @@ -0,0 +1,47 @@ +## defradb client schema migration set + +Set a schema migration within DefraDB + +### Synopsis + +Set a migration between two schema versions within the local DefraDB node. + +Example: set from an argument string: + defradb client schema migration set bae123 bae456 '{"lenses": [...' + +Example: set from file: + defradb client schema migration set bae123 bae456 -f schema_migration.lens + +Example: add from stdin: + cat schema_migration.lens | defradb client schema migration set bae123 bae456 - + +Learn more about the DefraDB GraphQL Schema Language on https://docs.source.network. + +``` +defradb client schema migration set [src] [dst] [cfg] [flags] +``` + +### Options + +``` + -f, --file string Lens configuration file + -h, --help help for set +``` + +### Options inherited from parent commands + +``` + --logformat string Log format to use. Options are csv, json (default "csv") + --logger stringArray Override logger parameters. Usage: --logger ,level=,output=,... + --loglevel string Log level to use. Options are debug, info, error, fatal (default "info") + --lognocolor Disable colored log output + --logoutput string Log output path (default "stderr") + --logtrace Include stacktrace in error and fatal logs + --rootdir string Directory for data and configuration to use (default: $HOME/.defradb) + --url string URL of HTTP endpoint to listen on or connect to (default "localhost:9181") +``` + +### SEE ALSO + +* [defradb client schema migration](defradb_client_schema_migration.md) - Interact with the schema migration system of a running DefraDB instance + diff --git a/docs/cli/defradb_client_schema_patch.md b/docs/cli/defradb_client_schema_patch.md index a70ea29517..ec64d293e0 100644 --- a/docs/cli/defradb_client_schema_patch.md +++ b/docs/cli/defradb_client_schema_patch.md @@ -6,7 +6,7 @@ Patch an existing schema type Patch an existing schema. -Uses JSON PATCH formatting as a DDL. +Uses JSON Patch to modify schema types. Example: patch from an argument string: defradb client schema patch '[{ "op": "add", "path": "...", "value": {...} }]' @@ -45,5 +45,5 @@ defradb client schema patch [schema] [flags] ### SEE ALSO -* [defradb client schema](defradb_client_schema.md) - Interact with the schema system of a running DefraDB instance +* [defradb client schema](defradb_client_schema.md) - Interact with the schema system of a DefraDB node diff --git a/docs/cli/defradb_init.md b/docs/cli/defradb_init.md index 84a4bff742..f8d69f5794 100644 --- a/docs/cli/defradb_init.md +++ b/docs/cli/defradb_init.md @@ -5,6 +5,7 @@ Initialize DefraDB's root directory and configuration file ### Synopsis Initialize a directory for configuration and data at the given path. +Passed flags will be persisted in the stored configuration. ``` defradb init [flags] diff --git a/docs/cli/defradb_start.md b/docs/cli/defradb_start.md index 014f011ad2..d23b1fcacb 100644 --- a/docs/cli/defradb_start.md +++ b/docs/cli/defradb_start.md @@ -4,7 +4,7 @@ Start a DefraDB node ### Synopsis -Start a new instance of DefraDB node. +Start a DefraDB node. ``` defradb start [flags] diff --git a/docs/data_format_changes/i1243-enable-previously-skipped-explain-tests.md b/docs/data_format_changes/i1243-enable-previously-skipped-explain-tests.md new file mode 100644 index 0000000000..c63c7ddf5e --- /dev/null +++ b/docs/data_format_changes/i1243-enable-previously-skipped-explain-tests.md @@ -0,0 +1,5 @@ +# Enable Refactored Explain Tests That Were Always Skipped + +Previously we had explain tests always being skipped, the integration of explain setup into the action based testing +setup enabled them, but since they were being skipped previously change detector keeps failing. This isn't a breaking +change. diff --git a/docs/data_format_changes/i1448-migration-engine.md b/docs/data_format_changes/i1448-migration-engine.md new file mode 100644 index 0000000000..aff5be3759 --- /dev/null +++ b/docs/data_format_changes/i1448-migration-engine.md @@ -0,0 +1,3 @@ +# Add lens migration engine to defra + +A new key-value was added to the datastore, it tracks the schema version of a datastore document and is required. If need be it could be set to the latest schema version for all documents, but that would prevent the migration of those records from their true version to that set version. \ No newline at end of file diff --git a/docs/data_format_changes/i1530-change-detector-without-non-mutations.md b/docs/data_format_changes/i1530-change-detector-without-non-mutations.md new file mode 100644 index 0000000000..602254206d --- /dev/null +++ b/docs/data_format_changes/i1530-change-detector-without-non-mutations.md @@ -0,0 +1,3 @@ +# Change detector without non-mutations actions + +The previous fix caused a regression in the change detector and we need a documentation to break the cycle. diff --git a/docs/data_format_changes/i1602-no-change-tests-updated.md b/docs/data_format_changes/i1602-no-change-tests-updated.md new file mode 100644 index 0000000000..765f2261a5 --- /dev/null +++ b/docs/data_format_changes/i1602-no-change-tests-updated.md @@ -0,0 +1,3 @@ +# Rework transaction test framework capabilities + +This is not a breaking change, a test was split, which caused the change detector test-case to change. \ No newline at end of file diff --git a/docs/data_format_changes/i1620-remove-first-crdt-byte.md b/docs/data_format_changes/i1620-remove-first-crdt-byte.md new file mode 100644 index 0000000000..7f605f9129 --- /dev/null +++ b/docs/data_format_changes/i1620-remove-first-crdt-byte.md @@ -0,0 +1,3 @@ +# Remove the first CRDT byte from field encoded values + +The first CRDT byte was legacy code and no longer necessary as we have this information independently available via the client.FieldDescription, since the FieldDescription.Typ is the exact same value. \ No newline at end of file diff --git a/go.mod b/go.mod index a5919c79bd..d935137715 100644 --- a/go.mod +++ b/go.mod @@ -1,58 +1,60 @@ module github.com/sourcenetwork/defradb -go 1.19 +go 1.20 require ( + github.com/bits-and-blooms/bitset v1.8.0 github.com/bxcodec/faker v2.0.1+incompatible github.com/dgraph-io/badger/v3 v3.2103.5 github.com/evanphx/json-patch/v5 v5.6.0 github.com/fxamacker/cbor/v2 v2.4.0 - github.com/go-chi/chi/v5 v5.0.8 + github.com/go-chi/chi/v5 v5.0.10 github.com/go-chi/cors v1.2.1 github.com/go-errors/errors v1.4.2 github.com/gofrs/uuid/v5 v5.0.0 - github.com/gogo/protobuf v1.3.2 github.com/graphql-go/graphql v0.8.1 github.com/grpc-ecosystem/go-grpc-middleware v1.4.0 - github.com/iancoleman/strcase v0.2.0 - github.com/ipfs/boxo v0.8.1 + github.com/iancoleman/strcase v0.3.0 + github.com/ipfs/boxo v0.10.2 github.com/ipfs/go-block-format v0.1.2 github.com/ipfs/go-cid v0.4.1 github.com/ipfs/go-datastore v0.6.0 - github.com/ipfs/go-ipld-format v0.4.0 + github.com/ipfs/go-ipld-format v0.5.0 github.com/ipfs/go-log v1.0.5 github.com/ipfs/go-log/v2 v2.5.1 github.com/jbenet/goprocess v0.1.4 - github.com/libp2p/go-libp2p v0.27.1 + github.com/lens-vm/lens/host-go v0.0.0-20230729032926-5acb4df9bd25 + github.com/libp2p/go-libp2p v0.28.0 github.com/libp2p/go-libp2p-gostream v0.6.0 - github.com/libp2p/go-libp2p-kad-dht v0.23.0 + github.com/libp2p/go-libp2p-kad-dht v0.24.2 github.com/libp2p/go-libp2p-pubsub v0.9.3 github.com/libp2p/go-libp2p-record v0.2.0 github.com/mitchellh/mapstructure v1.5.0 - github.com/multiformats/go-multiaddr v0.9.0 + github.com/multiformats/go-multiaddr v0.10.1 github.com/multiformats/go-multibase v0.2.0 - github.com/multiformats/go-multihash v0.2.1 - github.com/multiformats/go-varint v0.0.7 + github.com/multiformats/go-multihash v0.2.3 github.com/pkg/errors v0.9.1 - github.com/sourcenetwork/immutable v0.2.2 + github.com/planetscale/vtprotobuf v0.4.0 + github.com/sourcenetwork/immutable v0.3.0 github.com/spf13/cobra v1.7.0 github.com/spf13/pflag v1.0.5 - github.com/spf13/viper v1.15.0 - github.com/stretchr/testify v1.8.2 + github.com/spf13/viper v1.16.0 + github.com/stretchr/testify v1.8.4 github.com/textileio/go-libp2p-pubsub-rpc v0.0.9 github.com/tidwall/btree v1.6.0 github.com/ugorji/go/codec v1.2.11 github.com/valyala/fastjson v1.6.4 - go.opentelemetry.io/otel/metric v0.36.0 - go.opentelemetry.io/otel/sdk/metric v0.36.0 + go.opentelemetry.io/otel/metric v1.16.0 + go.opentelemetry.io/otel/sdk/metric v0.39.0 go.uber.org/zap v1.24.0 - golang.org/x/crypto v0.9.0 - golang.org/x/net v0.10.0 - google.golang.org/grpc v1.55.0 + golang.org/x/crypto v0.11.0 + golang.org/x/net v0.12.0 + google.golang.org/grpc v1.56.2 + google.golang.org/protobuf v1.31.0 ) require ( - github.com/benbjohnson/clock v1.3.0 // indirect + github.com/benbjohnson/clock v1.3.5 // indirect github.com/beorn7/perks v1.0.1 // indirect github.com/cespare/xxhash v1.1.0 // indirect github.com/cespare/xxhash/v2 v2.2.0 // indirect @@ -62,7 +64,7 @@ require ( github.com/cskr/pubsub v1.0.2 // indirect github.com/davecgh/go-spew v1.1.1 // indirect github.com/davidlazar/go-crypto v0.0.0-20200604182044-b73af7476f6c // indirect - github.com/decred/dcrd/dcrec/secp256k1/v4 v4.1.0 // indirect + github.com/decred/dcrd/dcrec/secp256k1/v4 v4.2.0 // indirect github.com/dgraph-io/ristretto v0.1.1 // indirect github.com/docker/go-units v0.5.0 // indirect github.com/dustin/go-humanize v1.0.0 // indirect @@ -70,10 +72,11 @@ require ( github.com/flynn/noise v1.0.0 // indirect github.com/francoispqt/gojay v1.2.13 // indirect github.com/fsnotify/fsnotify v1.6.0 // indirect - github.com/go-logr/logr v1.2.3 // indirect + github.com/go-logr/logr v1.2.4 // indirect github.com/go-logr/stdr v1.2.2 // indirect github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 // indirect github.com/godbus/dbus/v5 v5.1.0 // indirect + github.com/gogo/protobuf v1.3.2 // indirect github.com/golang/glog v1.1.0 // indirect github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect github.com/golang/mock v1.6.0 // indirect @@ -81,15 +84,16 @@ require ( github.com/golang/snappy v0.0.4 // indirect github.com/google/flatbuffers v2.0.6+incompatible // indirect github.com/google/gopacket v1.1.19 // indirect - github.com/google/pprof v0.0.0-20230405160723-4a4c7d95572b // indirect + github.com/google/pprof v0.0.0-20230602150820-91b7bce49751 // indirect github.com/google/uuid v1.3.0 // indirect + github.com/gorilla/websocket v1.5.0 // indirect github.com/hashicorp/errwrap v1.1.0 // indirect github.com/hashicorp/go-multierror v1.1.1 // indirect github.com/hashicorp/golang-lru v0.5.5-0.20210104140557-80c98217689d // indirect github.com/hashicorp/golang-lru/v2 v2.0.2 // indirect github.com/hashicorp/hcl v1.0.0 // indirect github.com/hsanjuan/ipfs-lite v1.4.1 // indirect - github.com/huin/goupnp v1.1.0 // indirect + github.com/huin/goupnp v1.2.0 // indirect github.com/inconshreveable/mousetrap v1.1.0 // indirect github.com/ipfs/bbloom v0.0.4 // indirect github.com/ipfs/go-bitswap v0.12.0 // indirect @@ -103,7 +107,7 @@ require ( github.com/ipfs/go-ipfs-pq v0.0.3 // indirect github.com/ipfs/go-ipfs-util v0.0.2 // indirect github.com/ipfs/go-ipld-cbor v0.0.6 // indirect - github.com/ipfs/go-ipld-legacy v0.1.1 // indirect + github.com/ipfs/go-ipld-legacy v0.2.1 // indirect github.com/ipfs/go-libipfs v0.7.0 // indirect github.com/ipfs/go-merkledag v0.9.0 // indirect github.com/ipfs/go-metrics-interface v0.0.1 // indirect @@ -113,8 +117,8 @@ require ( github.com/ipld/go-ipld-prime v0.20.0 // indirect github.com/jackpal/go-nat-pmp v1.0.2 // indirect github.com/jbenet/go-temp-err-catcher v0.1.0 // indirect - github.com/klauspost/compress v1.16.4 // indirect - github.com/klauspost/cpuid/v2 v2.2.4 // indirect + github.com/klauspost/compress v1.16.5 // indirect + github.com/klauspost/cpuid/v2 v2.2.5 // indirect github.com/koron/go-ssdp v0.0.4 // indirect github.com/libp2p/go-buffer-pool v0.1.0 // indirect github.com/libp2p/go-cidranger v1.1.0 // indirect @@ -122,78 +126,78 @@ require ( github.com/libp2p/go-libp2p-asn-util v0.3.0 // indirect github.com/libp2p/go-libp2p-connmgr v0.4.0 // indirect github.com/libp2p/go-libp2p-core v0.20.0 // indirect - github.com/libp2p/go-libp2p-kbucket v0.5.0 // indirect - github.com/libp2p/go-libp2p-routing-helpers v0.4.0 // indirect + github.com/libp2p/go-libp2p-kbucket v0.6.3 // indirect + github.com/libp2p/go-libp2p-routing-helpers v0.7.0 // indirect github.com/libp2p/go-msgio v0.3.0 // indirect - github.com/libp2p/go-nat v0.1.0 // indirect + github.com/libp2p/go-nat v0.2.0 // indirect github.com/libp2p/go-netroute v0.2.1 // indirect - github.com/libp2p/go-reuseport v0.2.0 // indirect + github.com/libp2p/go-reuseport v0.3.0 // indirect github.com/libp2p/go-yamux/v4 v4.0.0 // indirect github.com/magiconair/properties v1.8.7 // indirect github.com/marten-seemann/tcp v0.0.0-20210406111302-dfbc87cc63fd // indirect - github.com/mattn/go-isatty v0.0.18 // indirect + github.com/mattn/go-isatty v0.0.19 // indirect github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect - github.com/miekg/dns v1.1.53 // indirect + github.com/miekg/dns v1.1.54 // indirect github.com/mikioh/tcpinfo v0.0.0-20190314235526-30a79bb1804b // indirect github.com/mikioh/tcpopt v0.0.0-20190314235656-172688c1accc // indirect - github.com/minio/sha256-simd v1.0.0 // indirect + github.com/minio/sha256-simd v1.0.1 // indirect github.com/mr-tron/base58 v1.2.0 // indirect github.com/multiformats/go-base32 v0.1.0 // indirect github.com/multiformats/go-base36 v0.2.0 // indirect github.com/multiformats/go-multiaddr-dns v0.3.1 // indirect github.com/multiformats/go-multiaddr-fmt v0.1.0 // indirect - github.com/multiformats/go-multicodec v0.8.1 // indirect + github.com/multiformats/go-multicodec v0.9.0 // indirect github.com/multiformats/go-multistream v0.4.1 // indirect - github.com/onsi/ginkgo/v2 v2.9.2 // indirect + github.com/multiformats/go-varint v0.0.7 // indirect + github.com/onsi/ginkgo/v2 v2.9.7 // indirect github.com/opencontainers/runtime-spec v1.0.2 // indirect github.com/opentracing/opentracing-go v1.2.0 // indirect github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58 // indirect - github.com/pelletier/go-toml/v2 v2.0.6 // indirect + github.com/pelletier/go-toml/v2 v2.0.8 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect github.com/polydawn/refmt v0.89.0 // indirect github.com/prometheus/client_golang v1.14.0 // indirect - github.com/prometheus/client_model v0.3.0 // indirect + github.com/prometheus/client_model v0.4.0 // indirect github.com/prometheus/common v0.42.0 // indirect github.com/prometheus/procfs v0.9.0 // indirect github.com/quic-go/qpack v0.4.0 // indirect github.com/quic-go/qtls-go1-19 v0.3.2 // indirect github.com/quic-go/qtls-go1-20 v0.2.2 // indirect github.com/quic-go/quic-go v0.33.0 // indirect - github.com/quic-go/webtransport-go v0.5.2 // indirect + github.com/quic-go/webtransport-go v0.5.3 // indirect github.com/raulk/go-watchdog v1.3.0 // indirect github.com/russross/blackfriday/v2 v2.1.0 // indirect github.com/spaolacci/murmur3 v1.1.0 // indirect - github.com/spf13/afero v1.9.3 // indirect - github.com/spf13/cast v1.5.0 // indirect + github.com/spf13/afero v1.9.5 // indirect + github.com/spf13/cast v1.5.1 // indirect github.com/spf13/jwalterweatherman v1.1.0 // indirect github.com/stretchr/objx v0.5.0 // indirect github.com/subosito/gotenv v1.4.2 // indirect + github.com/tetratelabs/wazero v1.3.1 // indirect github.com/textileio/go-log/v2 v2.1.3-gke-2 // indirect github.com/whyrusleeping/cbor-gen v0.0.0-20230126041949-52956bd4c9aa // indirect github.com/whyrusleeping/go-keyspace v0.0.0-20160322163242-5b898ac5add1 // indirect github.com/x448/float16 v0.8.4 // indirect go.opencensus.io v0.24.0 // indirect - go.opentelemetry.io/otel v1.14.0 // indirect - go.opentelemetry.io/otel/sdk v1.14.0 // indirect - go.opentelemetry.io/otel/trace v1.14.0 // indirect - go.uber.org/atomic v1.10.0 // indirect - go.uber.org/dig v1.16.1 // indirect + go.opentelemetry.io/otel v1.16.0 // indirect + go.opentelemetry.io/otel/sdk v1.16.0 // indirect + go.opentelemetry.io/otel/trace v1.16.0 // indirect + go.uber.org/atomic v1.11.0 // indirect + go.uber.org/dig v1.17.0 // indirect go.uber.org/fx v1.19.2 // indirect go.uber.org/multierr v1.11.0 // indirect - golang.org/x/exp v0.0.0-20230321023759-10a507213a29 // indirect - golang.org/x/mod v0.10.0 // indirect - golang.org/x/sync v0.1.0 // indirect - golang.org/x/sys v0.8.0 // indirect - golang.org/x/text v0.9.0 // indirect - golang.org/x/tools v0.7.0 // indirect + golang.org/x/exp v0.0.0-20230626212559-97b1e661b5df // indirect + golang.org/x/mod v0.11.0 // indirect + golang.org/x/sync v0.2.0 // indirect + golang.org/x/sys v0.10.0 // indirect + golang.org/x/text v0.11.0 // indirect + golang.org/x/tools v0.9.1 // indirect golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2 // indirect - gonum.org/v1/gonum v0.11.0 // indirect - google.golang.org/genproto v0.0.0-20230306155012-7f2fa6fef1f4 // indirect - google.golang.org/protobuf v1.30.0 // indirect + gonum.org/v1/gonum v0.13.0 // indirect + google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1 // indirect gopkg.in/ini.v1 v1.67.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect - lukechampine.com/blake3 v1.1.7 // indirect - nhooyr.io/websocket v1.8.7 // indirect + lukechampine.com/blake3 v1.2.1 // indirect ) // SourceNetwork fork og graphql-go diff --git a/go.sum b/go.sum index 8b9d753290..2fdb78dca4 100644 --- a/go.sum +++ b/go.sum @@ -76,13 +76,16 @@ github.com/aws/aws-lambda-go v1.13.3/go.mod h1:4UKl9IzQMoD+QF79YdCuzCwp8VbmG4VAQ github.com/aws/aws-sdk-go v1.27.0/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= github.com/aws/aws-sdk-go-v2 v0.18.0/go.mod h1:JWVYvqSMppoMJC0x5wdwiImzgXTI9FuZwxzkQq9wy+g= github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= -github.com/benbjohnson/clock v1.3.0 h1:ip6w0uFQkncKQ979AypyG0ER7mqUSBdKLOgAle/AT8A= github.com/benbjohnson/clock v1.3.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= +github.com/benbjohnson/clock v1.3.5 h1:VvXlSJBzZpA/zum6Sj74hxwYI2DIxRWuNIoXAzHZz5o= +github.com/benbjohnson/clock v1.3.5/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= +github.com/bits-and-blooms/bitset v1.8.0 h1:FD+XqgOZDUxxZ8hzoBFuV9+cGWY9CslN6d5MS5JVb4c= +github.com/bits-and-blooms/bitset v1.8.0/go.mod h1:7hO7Gc7Pp1vODcmWvKMRA9BNmbv6a/7QIWpPxHddWR8= github.com/bradfitz/go-smtpd v0.0.0-20170404230938-deb6d6237625/go.mod h1:HYsPBTaaSFSlLx/70C2HPIMNZpVV8+vt/A+FMnYP11g= github.com/btcsuite/btcd v0.0.0-20190213025234-306aecffea32/go.mod h1:DrZx5ec/dmnfpw9KyYoQyYo7d0KEvTkk/5M/vbZjAr8= github.com/btcsuite/btcd v0.0.0-20190523000118-16327141da8c/go.mod h1:3J08xEfcugPacsc34/LKRU2yO7YmuT8yt28J8k2+rrI= @@ -156,9 +159,9 @@ github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSs github.com/davidlazar/go-crypto v0.0.0-20170701192655-dcfb0a7ac018/go.mod h1:rQYf4tfk5sSwFsnDg3qYaBxSjsD9S8+59vW0dKUgme4= github.com/davidlazar/go-crypto v0.0.0-20200604182044-b73af7476f6c h1:pFUpOrbxDR6AkioZ1ySsx5yxlDQZ8stG2b88gTPxgJU= github.com/davidlazar/go-crypto v0.0.0-20200604182044-b73af7476f6c/go.mod h1:6UhI8N9EjYm1c2odKpFpAYeR8dsBeM7PtzQhRgxRr9U= -github.com/decred/dcrd/crypto/blake256 v1.0.0 h1:/8DMNYp9SGi5f0w7uCm6d6M4OU2rGFK09Y2A4Xv7EE0= -github.com/decred/dcrd/dcrec/secp256k1/v4 v4.1.0 h1:HbphB4TFFXpv7MNrT52FGrrgVXF1owhMVTHFZIlnvd4= -github.com/decred/dcrd/dcrec/secp256k1/v4 v4.1.0/go.mod h1:DZGJHZMqrU4JJqFAWUS2UO1+lbSKsdiOoYi9Zzey7Fc= +github.com/decred/dcrd/crypto/blake256 v1.0.1 h1:7PltbUIQB7u/FfZ39+DGa/ShuMyJ5ilcvdfma9wOH6Y= +github.com/decred/dcrd/dcrec/secp256k1/v4 v4.2.0 h1:8UrgZ3GkP4i/CLijOJx79Yu+etlyjdBU4sfcs2WYQMs= +github.com/decred/dcrd/dcrec/secp256k1/v4 v4.2.0/go.mod h1:v57UDF4pDQJcEfFUCRop3lJL149eHGSe9Jvczhzjo/0= github.com/decred/dcrd/lru v1.0.0/go.mod h1:mxKOwFd7lFjN2GZYsiz/ecgqR6kkYAl+0pz0tEMk218= github.com/dgraph-io/badger v1.5.5-0.20190226225317-8115aed38f8f/go.mod h1:VZxzAIRPHRVNRKRo6AXrX9BJegn6il06VMTZVJYCIjQ= github.com/dgraph-io/badger v1.6.0-rc1/go.mod h1:zwt7syl517jmP8s94KqSxTlM6IMsdhYy6psNgSztDR4= @@ -214,14 +217,10 @@ github.com/fsnotify/fsnotify v1.6.0/go.mod h1:sl3t1tCWJFWoRz9R8WJCbQihKKwmorjAbS github.com/fxamacker/cbor/v2 v2.4.0 h1:ri0ArlOR+5XunOP8CRUowT0pSJOwhW098ZCUyskZD88= github.com/fxamacker/cbor/v2 v2.4.0/go.mod h1:TA1xS00nchWmaBnEIxPSE5oHLuJBAVvqrtAnWBwBCVo= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= -github.com/gin-contrib/sse v0.1.0 h1:Y/yl/+YNO8GZSjAhjMsSuLt29uWRFHdHYUb5lYOV9qE= -github.com/gin-contrib/sse v0.1.0/go.mod h1:RHrZQHXnP2xjPF+u1gW/2HnVO7nvIa9PG3Gm+fLHvGI= -github.com/gin-gonic/gin v1.6.3 h1:ahKqKTFpO5KTPHxWZjEdPScmYaGtLo8Y4DMHoEsnp14= -github.com/gin-gonic/gin v1.6.3/go.mod h1:75u5sXoLsGZoRN5Sgbi1eraJ4GU3++wFwWzhwvtwp4M= github.com/gliderlabs/ssh v0.1.1/go.mod h1:U7qILu1NlMHj9FlMhZLlkCdDnU1DBEAqr0aevW3Awn0= github.com/go-check/check v0.0.0-20180628173108-788fd7840127/go.mod h1:9ES+weclKsC9YodN5RgxqK/VD9HM9JsCSh7rNhMZE98= -github.com/go-chi/chi/v5 v5.0.8 h1:lD+NLqFcAi1ovnVZpsnObHGW4xb4J8lNmoYVfECH1Y0= -github.com/go-chi/chi/v5 v5.0.8/go.mod h1:DslCQbL2OYiznFReuXYUmQ2hGd1aDpCnlMNITLSKoi8= +github.com/go-chi/chi/v5 v5.0.10 h1:rLz5avzKpjqxrYwXNfmjkrYYXOyLJd37pz53UFHC6vk= +github.com/go-chi/chi/v5 v5.0.10/go.mod h1:DslCQbL2OYiznFReuXYUmQ2hGd1aDpCnlMNITLSKoi8= github.com/go-chi/cors v1.2.1 h1:xEC8UT3Rlp2QuWNEr4Fs/c2EAGVKBwy/1vHx3bppil4= github.com/go-chi/cors v1.2.1/go.mod h1:sSbTewc+6wYHBBCW7ytsFSn836hqM7JxpglAy2Vzc58= github.com/go-errors/errors v1.0.1/go.mod h1:f4zRHt4oKfwPJE5k8C9vpYG+aDHdBFUsgrm6/TyX73Q= @@ -238,28 +237,15 @@ github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9 github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A= github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= -github.com/go-logr/logr v1.2.3 h1:2DntVwHkVopvECVRSlL5PSo9eG+cAkDCuckLubN+rq0= -github.com/go-logr/logr v1.2.3/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +github.com/go-logr/logr v1.2.4 h1:g01GSCwiDw2xSZfjJ2/T9M+S6pFdcNtFYsp+Y43HYDQ= +github.com/go-logr/logr v1.2.4/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= -github.com/go-playground/assert/v2 v2.0.1/go.mod h1:VDjEfimB/XKnb+ZQfWdccd7VUvScMdVu0Titje2rxJ4= -github.com/go-playground/locales v0.13.0 h1:HyWk6mgj5qFqCT5fjGBuRArbVDfE4hi8+e8ceBS/t7Q= -github.com/go-playground/locales v0.13.0/go.mod h1:taPMhCMXrRLJO55olJkUXHZBHCxTMfnGwq/HNwmWNS8= -github.com/go-playground/universal-translator v0.17.0 h1:icxd5fm+REJzpZx7ZfpaD876Lmtgy7VtROAbHHXk8no= -github.com/go-playground/universal-translator v0.17.0/go.mod h1:UkSxE5sNxxRwHyU+Scu5vgOQjsIJAF8j9muTVoKLVtA= -github.com/go-playground/validator/v10 v10.2.0 h1:KgJ0snyC2R9VXYN2rneOtQcw5aHQB1Vv0sFl1UcHBOY= -github.com/go-playground/validator/v10 v10.2.0/go.mod h1:uOYAAleCW8F/7oMFd6aG0GOhaH6EGOAJShg8Id5JGkI= github.com/go-sql-driver/mysql v1.4.0/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 h1:tfuBGBXKqDEevZMzYi5KSi8KkcZtzBcTgAUUtapy0OI= github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572/go.mod h1:9Pwr4B2jHnOSGXyyzV8ROjYa2ojvAY6HCGYYfMoC3Ls= github.com/go-yaml/yaml v2.1.0+incompatible/go.mod h1:w2MrLa16VYP0jy6N7M5kHaCkaLENm+P+Tv+MfurjSw0= -github.com/gobwas/httphead v0.0.0-20180130184737-2c6c146eadee h1:s+21KNqlpePfkah2I+gwHF8xmJWRjooY+5248k6m4A0= -github.com/gobwas/httphead v0.0.0-20180130184737-2c6c146eadee/go.mod h1:L0fX3K22YWvt/FAX9NnzrNzcI4wNYi9Yku4O0LKYflo= -github.com/gobwas/pool v0.2.0 h1:QEmUOlnSjWtnpRGHF3SauEiOsy82Cup83Vf2LcMlnc8= -github.com/gobwas/pool v0.2.0/go.mod h1:q8bcK0KcYlCgd9e7WYLm9LpyS+YeLd8JVDW6WezmKEw= -github.com/gobwas/ws v1.0.2 h1:CoAavW/wd/kulfZmSIBt6p24n4j7tHgNVCjsfHVNUbo= -github.com/gobwas/ws v1.0.2/go.mod h1:szmBTxLgaFppYjEmNtny/v3w89xOydFnnZMcgRRu/EM= github.com/godbus/dbus/v5 v5.0.3/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= github.com/godbus/dbus/v5 v5.1.0 h1:4KLkAxT3aOY8Li4FRJe/KvhoNFFxo0m6fNuFUO8QJUk= @@ -352,8 +338,8 @@ github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hf github.com/google/pprof v0.0.0-20201023163331-3e6fc7fc9c4c/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20201203190320-1bf35d6f28c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20201218002935-b9804c9f04c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20230405160723-4a4c7d95572b h1:Qcx5LM0fSiks9uCyFZwDBUasd3lxd1RM0GYpL+Li5o4= -github.com/google/pprof v0.0.0-20230405160723-4a4c7d95572b/go.mod h1:79YE0hCXdHag9sBkw2o+N/YnZtTkXi0UT9Nnixa5eYk= +github.com/google/pprof v0.0.0-20230602150820-91b7bce49751 h1:hR7/MlvK23p6+lIw9SN1TigNLn9ZnF3W4SYRKq2gAHs= +github.com/google/pprof v0.0.0-20230602150820-91b7bce49751/go.mod h1:Jh3hGz2jkYak8qXPD19ryItVnUgpgeqzdkY/D0EaeuA= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= @@ -377,6 +363,7 @@ github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoA github.com/gorilla/websocket v1.4.1/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= github.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWmnc= +github.com/gorilla/websocket v1.5.0/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de4/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= github.com/grpc-ecosystem/go-grpc-middleware v1.4.0 h1:UH//fgunKIs4JdUbpDl1VZCDaL56wXCB/5+wF6uHfaI= @@ -422,11 +409,11 @@ github.com/hsanjuan/ipfs-lite v1.4.1 h1:l+mnqk6wm2GiVJWn4u0UBtX+YqqA5cfsjX1ZujPx github.com/hsanjuan/ipfs-lite v1.4.1/go.mod h1:+c/L+PWf0l7DhmQF3cO2O3GBRQT/pUZrl86VG//O9Hk= github.com/hudl/fargo v1.3.0/go.mod h1:y3CKSmjA+wD2gak7sUSXTAoopbhU08POFhmITJgmKTg= github.com/huin/goupnp v1.0.0/go.mod h1:n9v9KO1tAxYH82qOn+UTIFQDmx5n1Zxd/ClZDMX7Bnc= -github.com/huin/goupnp v1.1.0 h1:gEe0Dp/lZmPZiDFzJJaOfUpOvv2MKUkoBX8lDrn9vKU= -github.com/huin/goupnp v1.1.0/go.mod h1:gnGPsThkYa7bFi/KWmEysQRf48l2dvR5bxr2OFckNX8= +github.com/huin/goupnp v1.2.0 h1:uOKW26NG1hsSSbXIZ1IR7XP9Gjd1U8pnLaCMgntmkmY= +github.com/huin/goupnp v1.2.0/go.mod h1:gnGPsThkYa7bFi/KWmEysQRf48l2dvR5bxr2OFckNX8= github.com/huin/goutil v0.0.0-20170803182201-1ca381bf3150/go.mod h1:PpLOETDnJ0o3iZrZfqZzyLl6l7F3c6L1oWn7OICBi6o= -github.com/iancoleman/strcase v0.2.0 h1:05I4QRnGpI0m37iZQRuskXh+w77mr6Z41lwQzuHLwW0= -github.com/iancoleman/strcase v0.2.0/go.mod h1:iwCmte+B7n89clKwxIoIXy/HfoL7AsD47ZCWhYzw7ho= +github.com/iancoleman/strcase v0.3.0 h1:nTXanmYxhfFAMjZL34Ov6gkzEsSJZ5DbhxWjvSASxEI= +github.com/iancoleman/strcase v0.3.0/go.mod h1:iwCmte+B7n89clKwxIoIXy/HfoL7AsD47ZCWhYzw7ho= github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= @@ -436,8 +423,8 @@ github.com/influxdata/influxdb1-client v0.0.0-20191209144304-8bf82d3c094d/go.mod github.com/ipfs/bbloom v0.0.1/go.mod h1:oqo8CVWsJFMOZqTglBG4wydCE4IQA/G2/SEofB0rjUI= github.com/ipfs/bbloom v0.0.4 h1:Gi+8EGJ2y5qiD5FbsbpX/TMNcJw8gSqr7eyjHa4Fhvs= github.com/ipfs/bbloom v0.0.4/go.mod h1:cS9YprKXpoZ9lT0n/Mw/a6/aFV6DTjTLYHeA+gyqMG0= -github.com/ipfs/boxo v0.8.1 h1:3DkKBCK+3rdEB5t77WDShUXXhktYwH99mkAsgajsKrU= -github.com/ipfs/boxo v0.8.1/go.mod h1:xJ2hVb4La5WyD7GvKYE0lq2g1rmQZoCD2K4WNrV6aZI= +github.com/ipfs/boxo v0.10.2 h1:kspw9HmMyKzLQxpKk417sF69i6iuf50AXtRjFqCYyL4= +github.com/ipfs/boxo v0.10.2/go.mod h1:1qgKq45mPRCxf4ZPoJV2lnXxyxucigILMJOrQrVivv8= github.com/ipfs/go-bitfield v1.1.0 h1:fh7FIo8bSwaJEh6DdTWbCeZ1eqOaOkKFI74SCnsWbGA= github.com/ipfs/go-bitswap v0.0.9/go.mod h1:kAPf5qgn2W2DrgAcscZ3HrM9qh4pH+X8Fkk3UPrwvis= github.com/ipfs/go-bitswap v0.1.0/go.mod h1:FFJEf18E9izuCqUtHxbWEvq+reg7o4CW5wSAE1wsxj0= @@ -539,10 +526,10 @@ github.com/ipfs/go-ipld-cbor v0.0.6/go.mod h1:ssdxxaLJPXH7OjF5V4NSjBbcfh+evoR4uk github.com/ipfs/go-ipld-format v0.0.1/go.mod h1:kyJtbkDALmFHv3QR6et67i35QzO3S0dCDnkOJhcZkms= github.com/ipfs/go-ipld-format v0.0.2/go.mod h1:4B6+FM2u9OJ9zCV+kSbgFAZlOrv1Hqbf0INGQgiKf9k= github.com/ipfs/go-ipld-format v0.2.0/go.mod h1:3l3C1uKoadTPbeNfrDi+xMInYKlx2Cvg1BuydPSdzQs= -github.com/ipfs/go-ipld-format v0.4.0 h1:yqJSaJftjmjc9jEOFYlpkwOLVKv68OD27jFLlSghBlQ= -github.com/ipfs/go-ipld-format v0.4.0/go.mod h1:co/SdBE8h99968X0hViiw1MNlh6fvxxnHpvVLnH7jSM= -github.com/ipfs/go-ipld-legacy v0.1.1 h1:BvD8PEuqwBHLTKqlGFTHSwrwFOMkVESEvwIYwR2cdcc= -github.com/ipfs/go-ipld-legacy v0.1.1/go.mod h1:8AyKFCjgRPsQFf15ZQgDB8Din4DML/fOmKZkkFkrIEg= +github.com/ipfs/go-ipld-format v0.5.0 h1:WyEle9K96MSrvr47zZHKKcDxJ/vlpET6PSiQsAFO+Ds= +github.com/ipfs/go-ipld-format v0.5.0/go.mod h1:ImdZqJQaEouMjCvqCe0ORUS+uoBmf7Hf+EO/jh+nk3M= +github.com/ipfs/go-ipld-legacy v0.2.1 h1:mDFtrBpmU7b//LzLSypVrXsD8QxkEWxu5qVxN99/+tk= +github.com/ipfs/go-ipld-legacy v0.2.1/go.mod h1:782MOUghNzMO2DER0FlBR94mllfdCJCkTtDtPM51otM= github.com/ipfs/go-ipns v0.3.0 h1:ai791nTgVo+zTuq2bLvEGmWP1M0A6kGTXUsgv/Yq67A= github.com/ipfs/go-libipfs v0.7.0 h1:Mi54WJTODaOL2/ZSm5loi3SwI3jI2OuFWUrQIkJ5cpM= github.com/ipfs/go-libipfs v0.7.0/go.mod h1:KsIf/03CqhICzyRGyGo68tooiBE2iFbI/rXW7FhAYr0= @@ -582,7 +569,6 @@ github.com/ipfs/interface-go-ipfs-core v0.10.0 h1:b/psL1oqJcySdQAsIBfW5ZJJkOAsYl github.com/ipfs/interface-go-ipfs-core v0.10.0/go.mod h1:F3EcmDy53GFkF0H3iEJpfJC320fZ/4G60eftnItrrJ0= github.com/ipld/go-codec-dagpb v1.6.0 h1:9nYazfyu9B1p3NAgfVdpRco3Fs2nFC72DqVsMj6rOcc= github.com/ipld/go-codec-dagpb v1.6.0/go.mod h1:ANzFhfP2uMJxRBr8CE+WQWs5UsNa0pYtmKZ+agnUw9s= -github.com/ipld/go-ipld-prime v0.9.1-0.20210324083106-dc342a9917db/go.mod h1:KvBLMr4PX1gWptgkzRjVZCrLmSGcZCb/jioOQwCqZN8= github.com/ipld/go-ipld-prime v0.11.0/go.mod h1:+WIAkokurHmZ/KwzDOMUuoeJgaRQktHtEaLglS3ZeV8= github.com/ipld/go-ipld-prime v0.20.0 h1:Ud3VwE9ClxpO2LkCYP7vWPc0Fo+dYdYzgxUJZ3uRG4g= github.com/ipld/go-ipld-prime v0.20.0/go.mod h1:PzqZ/ZR981eKbgdr3y2DJYeD/8bgMawdGVlJDE8kK+M= @@ -609,9 +595,7 @@ github.com/jrick/logrotate v1.0.0/go.mod h1:LNinyqDIJnpAur+b8yyulnQw/wDuN1+BYKlT github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.8/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= -github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= -github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= github.com/jtolds/gls v4.2.1+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= @@ -625,14 +609,12 @@ github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQL github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/kkdai/bstream v0.0.0-20161212061736-f391b8402d23/go.mod h1:J+Gs4SYgM6CZQHDETBtE9HaSEkGmuNXF86RwHhHUvq4= -github.com/klauspost/compress v1.10.3/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= github.com/klauspost/compress v1.12.3/go.mod h1:8dP1Hq4DHOhN9w426knH3Rhby4rFm6D8eO+e+Dq5Gzg= -github.com/klauspost/compress v1.16.4 h1:91KN02FnsOYhuunwU4ssRe8lc2JosWmizWa91B5v1PU= -github.com/klauspost/compress v1.16.4/go.mod h1:ntbaceVETuRiXiv4DpjP66DpAtAGkEQskQzEyD//IeE= +github.com/klauspost/compress v1.16.5 h1:IFV2oUNUzZaz+XyusxpLzpzS8Pt5rh0Z16For/djlyI= +github.com/klauspost/compress v1.16.5/go.mod h1:ntbaceVETuRiXiv4DpjP66DpAtAGkEQskQzEyD//IeE= github.com/klauspost/cpuid/v2 v2.0.4/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg= -github.com/klauspost/cpuid/v2 v2.0.9/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg= -github.com/klauspost/cpuid/v2 v2.2.4 h1:acbojRNwl3o09bUq+yDCtZFc1aiwaAAxtcn8YkZXnvk= -github.com/klauspost/cpuid/v2 v2.2.4/go.mod h1:RVVoqg1df56z8g3pUjL/3lE5UfnlrJX8tyFgg4nqhuY= +github.com/klauspost/cpuid/v2 v2.2.5 h1:0E5MSMDEoAulmXNFquVs//DdoomxaoTY1kUhbc/qbZg= +github.com/klauspost/cpuid/v2 v2.2.5/go.mod h1:Lcz8mBdAVJIBVzewtcLocK12l3Y+JytZYpaMropDUws= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/koron/go-ssdp v0.0.0-20180514024734-4a0ed625a78b/go.mod h1:5Ky9EC2xfoUKUor0Hjgi2BJhCSXJfMOFlmyYrVKGQMk= @@ -649,8 +631,8 @@ github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/pty v1.1.3/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= -github.com/leodido/go-urn v1.2.0 h1:hpXL4XnriNwQ/ABnpepYM/1vCLWNDfUNts8dX3xTG6Y= -github.com/leodido/go-urn v1.2.0/go.mod h1:+8+nEpDfqqsY+g338gtMEUOtuK+4dEMhiQEgxpxOKII= +github.com/lens-vm/lens/host-go v0.0.0-20230729032926-5acb4df9bd25 h1:hC67vWtvuDnw8w6u4jLFoj3SOH92/4Lq8SCR++L7njw= +github.com/lens-vm/lens/host-go v0.0.0-20230729032926-5acb4df9bd25/go.mod h1:rDE4oJUIAQoXX9heUg8VOQf5LscRWj0BeE5mbGqOs3E= github.com/libp2p/go-addr-util v0.0.1/go.mod h1:4ac6O7n9rIAKB1dnd+s8IbbMXkt+oBpzX4/+RACcnlQ= github.com/libp2p/go-addr-util v0.0.2/go.mod h1:Ecd6Fb3yIuLzq4bD7VcywcVSBtefcAwnUISBM3WG15E= github.com/libp2p/go-buffer-pool v0.0.1/go.mod h1:xtyIz9PMobb13WaxR6Zo1Pd1zXJKYg0a8KiIvDp3TzQ= @@ -678,8 +660,8 @@ github.com/libp2p/go-libp2p v0.7.0/go.mod h1:hZJf8txWeCduQRDC/WSqBGMxaTHCOYHt2xS github.com/libp2p/go-libp2p v0.7.4/go.mod h1:oXsBlTLF1q7pxr+9w6lqzS1ILpyHsaBPniVO7zIHGMw= github.com/libp2p/go-libp2p v0.8.1/go.mod h1:QRNH9pwdbEBpx5DTJYg+qxcVaDMAz3Ee/qDKwXujH5o= github.com/libp2p/go-libp2p v0.14.3/go.mod h1:d12V4PdKbpL0T1/gsUNN8DfgMuRPDX8bS2QxCZlwRH0= -github.com/libp2p/go-libp2p v0.27.1 h1:k1u6RHsX3hqKnslDjsSgLNURxJ3O1atIZCY4gpMbbus= -github.com/libp2p/go-libp2p v0.27.1/go.mod h1:FAvvfQa/YOShUYdiSS03IR9OXzkcJXwcNA2FUCh9ImE= +github.com/libp2p/go-libp2p v0.28.0 h1:zO8cY98nJiPzZpFv5w5gqqb8aVzt4ukQ0nVOSaaKhJ8= +github.com/libp2p/go-libp2p v0.28.0/go.mod h1:s3Xabc9LSwOcnv9UD4nORnXKTsWkPMkIMB/JIGXVnzk= github.com/libp2p/go-libp2p-asn-util v0.3.0 h1:gMDcMyYiZKkocGXDQ5nsUQyquC9+H+iLEQHwOCZ7s8s= github.com/libp2p/go-libp2p-asn-util v0.3.0/go.mod h1:B1mcOrKUE35Xq/ASTmQ4tN3LNzVVaMNmq2NACuqyB9w= github.com/libp2p/go-libp2p-autonat v0.0.6/go.mod h1:uZneLdOkZHro35xIhpbtTzLlgYturpu4J5+0cZK3MqE= @@ -741,10 +723,10 @@ github.com/libp2p/go-libp2p-interface-connmgr v0.0.1/go.mod h1:GarlRLH0LdeWcLnYM github.com/libp2p/go-libp2p-interface-connmgr v0.0.4/go.mod h1:GarlRLH0LdeWcLnYM/SaBykKFl9U5JFnbBGruAk/D5k= github.com/libp2p/go-libp2p-interface-connmgr v0.0.5/go.mod h1:GarlRLH0LdeWcLnYM/SaBykKFl9U5JFnbBGruAk/D5k= github.com/libp2p/go-libp2p-interface-pnet v0.0.1/go.mod h1:el9jHpQAXK5dnTpKA4yfCNBZXvrzdOU75zz+C6ryp3k= -github.com/libp2p/go-libp2p-kad-dht v0.23.0 h1:sxE6LxLopp79eLeV695n7+c77V/Vn4AMF28AdM/XFqM= -github.com/libp2p/go-libp2p-kad-dht v0.23.0/go.mod h1:oO5N308VT2msnQI6qi5M61wzPmJYg7Tr9e16m5n7uDU= -github.com/libp2p/go-libp2p-kbucket v0.5.0 h1:g/7tVm8ACHDxH29BGrpsQlnNeu+6OF1A9bno/4/U1oA= -github.com/libp2p/go-libp2p-kbucket v0.5.0/go.mod h1:zGzGCpQd78b5BNTDGHNDLaTt9aDK/A02xeZp9QeFC4U= +github.com/libp2p/go-libp2p-kad-dht v0.24.2 h1:zd7myKBKCmtZBhI3I0zm8xBkb28v3gmSEtQfBdAdFwc= +github.com/libp2p/go-libp2p-kad-dht v0.24.2/go.mod h1:BShPzRbK6+fN3hk8a0WGAYKpb8m4k+DtchkqouGTrSg= +github.com/libp2p/go-libp2p-kbucket v0.6.3 h1:p507271wWzpy2f1XxPzCQG9NiN6R6lHL9GiSErbQQo0= +github.com/libp2p/go-libp2p-kbucket v0.6.3/go.mod h1:RCseT7AH6eJWxxk2ol03xtP9pEHetYSPXOaJnOiD8i0= github.com/libp2p/go-libp2p-loggables v0.0.1/go.mod h1:lDipDlBNYbpyqyPX/KcoO+eq0sJYEVR2JgOexcivchg= github.com/libp2p/go-libp2p-loggables v0.1.0/go.mod h1:EyumB2Y6PrYjr55Q3/tiJ/o3xoDasoRYM7nOzEpoa90= github.com/libp2p/go-libp2p-metrics v0.0.1/go.mod h1:jQJ95SXXA/K1VZi13h52WZMa9ja78zjyy5rspMsC/08= @@ -787,8 +769,8 @@ github.com/libp2p/go-libp2p-record v0.1.0/go.mod h1:ujNc8iuE5dlKWVy6wuL6dd58t0n7 github.com/libp2p/go-libp2p-record v0.2.0 h1:oiNUOCWno2BFuxt3my4i1frNrt7PerzB3queqa1NkQ0= github.com/libp2p/go-libp2p-record v0.2.0/go.mod h1:I+3zMkvvg5m2OcSdoL0KPljyJyvNDFGKX7QdlpYUcwk= github.com/libp2p/go-libp2p-routing v0.0.1/go.mod h1:N51q3yTr4Zdr7V8Jt2JIktVU+3xBBylx1MZeVA6t1Ys= -github.com/libp2p/go-libp2p-routing-helpers v0.4.0 h1:b7y4aixQ7AwbqYfcOQ6wTw8DQvuRZeTAA0Od3YYN5yc= -github.com/libp2p/go-libp2p-routing-helpers v0.4.0/go.mod h1:dYEAgkVhqho3/YKxfOEGdFMIcWfAFNlZX8iAIihYA2E= +github.com/libp2p/go-libp2p-routing-helpers v0.7.0 h1:sirOYVD0wGWjkDwHZvinunIpaqPLBXkcnXApVHwZFGA= +github.com/libp2p/go-libp2p-routing-helpers v0.7.0/go.mod h1:R289GUxUMzRXIbWGSuUUTPrlVJZ3Y/pPz495+qgXJX8= github.com/libp2p/go-libp2p-secio v0.0.3/go.mod h1:hS7HQ00MgLhRO/Wyu1bTX6ctJKhVpm+j2/S2A5UqYb0= github.com/libp2p/go-libp2p-secio v0.1.0/go.mod h1:tMJo2w7h3+wN4pgU2LSYeiKPrfqBgkOsdiKK77hE7c8= github.com/libp2p/go-libp2p-secio v0.2.0/go.mod h1:2JdZepB8J5V9mBp79BmwsaPQhRPNN2NrnB2lKQcdy6g= @@ -850,8 +832,8 @@ github.com/libp2p/go-msgio v0.3.0/go.mod h1:nyRM819GmVaF9LX3l03RMh10QdOroF++NBbx github.com/libp2p/go-nat v0.0.3/go.mod h1:88nUEt0k0JD45Bk93NIwDqjlhiOwOoV36GchpcVc1yI= github.com/libp2p/go-nat v0.0.4/go.mod h1:Nmw50VAvKuk38jUBcmNh6p9lUJLoODbJRvYAa/+KSDo= github.com/libp2p/go-nat v0.0.5/go.mod h1:B7NxsVNPZmRLvMOwiEO1scOSyjA56zxYAGv1yQgRkEU= -github.com/libp2p/go-nat v0.1.0 h1:MfVsH6DLcpa04Xr+p8hmVRG4juse0s3J8HyNWYHffXg= -github.com/libp2p/go-nat v0.1.0/go.mod h1:X7teVkwRHNInVNWQiO/tAiAVRwSr5zoRz4YSTC3uRBM= +github.com/libp2p/go-nat v0.2.0 h1:Tyz+bUFAYqGyJ/ppPPymMGbIgNRH+WqC5QrT5fKrrGk= +github.com/libp2p/go-nat v0.2.0/go.mod h1:3MJr+GRpRkyT65EpVPBstXLvOlAPzUVlG6Pwg9ohLJk= github.com/libp2p/go-netroute v0.1.2/go.mod h1:jZLDV+1PE8y5XxBySEBgbuVAXbhtuHSdmLPL2n9MKbk= github.com/libp2p/go-netroute v0.1.3/go.mod h1:jZLDV+1PE8y5XxBySEBgbuVAXbhtuHSdmLPL2n9MKbk= github.com/libp2p/go-netroute v0.1.5/go.mod h1:V1SR3AaECRkEQCoFFzYwVYWvYIEtlxx89+O3qcpCl4A= @@ -865,8 +847,8 @@ github.com/libp2p/go-openssl v0.0.5/go.mod h1:unDrJpgy3oFr+rqXsarWifmJuNnJR4chtO github.com/libp2p/go-openssl v0.0.7/go.mod h1:unDrJpgy3oFr+rqXsarWifmJuNnJR4chtO1HmaZjggc= github.com/libp2p/go-reuseport v0.0.1/go.mod h1:jn6RmB1ufnQwl0Q1f+YxAj8isJgDCQzaaxIFYDhcYEA= github.com/libp2p/go-reuseport v0.0.2/go.mod h1:SPD+5RwGC7rcnzngoYC86GjPzjSywuQyMVAheVBD9nQ= -github.com/libp2p/go-reuseport v0.2.0 h1:18PRvIMlpY6ZK85nIAicSBuXXvrYoSw3dsBAR7zc560= -github.com/libp2p/go-reuseport v0.2.0/go.mod h1:bvVho6eLMm6Bz5hmU0LYN3ixd3nPPvtIlaURZZgOY4k= +github.com/libp2p/go-reuseport v0.3.0 h1:iiZslO5byUYZEg9iCwJGf5h+sf1Agmqx2V2FDjPyvUw= +github.com/libp2p/go-reuseport v0.3.0/go.mod h1:laea40AimhtfEqysZ71UpYj4S+R9VpH8PgqLo7L+SwI= github.com/libp2p/go-reuseport-transport v0.0.2/go.mod h1:YkbSDrvjUVDL6b8XqriyA20obEtsW9BLkuOUyQAOCbs= github.com/libp2p/go-reuseport-transport v0.0.3/go.mod h1:Spv+MPft1exxARzP2Sruj2Wb5JSyHNncjf1Oi2dEbzM= github.com/libp2p/go-reuseport-transport v0.0.4/go.mod h1:trPa7r/7TJK/d+0hdBLOCGvpQQVOU74OXbNCIMkufGw= @@ -925,11 +907,10 @@ github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNx github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= github.com/mattn/go-isatty v0.0.5/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= -github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= github.com/mattn/go-isatty v0.0.13/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94= -github.com/mattn/go-isatty v0.0.18 h1:DOKFKCQ7FNG2L1rbrmstDN4QVRdS89Nkh85u68Uwp98= -github.com/mattn/go-isatty v0.0.18/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= +github.com/mattn/go-isatty v0.0.19 h1:JITubQf0MOLdlGRuRq+jtsDlekdYPia9ZFsB8h/APPA= +github.com/mattn/go-isatty v0.0.19/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= github.com/matttproud/golang_protobuf_extensions v1.0.4 h1:mmDVorXM7PCGKw94cs5zkfA9PSy5pEvNWRP0ET0TIVo= @@ -940,8 +921,8 @@ github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3N github.com/miekg/dns v1.1.12/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= github.com/miekg/dns v1.1.28/go.mod h1:KNUDUusw/aVsxyTYZM1oqvCicbwhgbNgztCETuNZ7xM= github.com/miekg/dns v1.1.41/go.mod h1:p6aan82bvRIyn+zDIv9xYNUpwa73JcSh9BKwknJysuI= -github.com/miekg/dns v1.1.53 h1:ZBkuHr5dxHtB1caEOlZTLPo7D3L3TWckgUUs/RHfDxw= -github.com/miekg/dns v1.1.53/go.mod h1:uInx36IzPl7FYnDcMeVWxj9byh7DutNykX4G9Sj60FY= +github.com/miekg/dns v1.1.54 h1:5jon9mWcb0sFJGpnI99tOMhCPyJ+RPVz5b63MQG0VWI= +github.com/miekg/dns v1.1.54/go.mod h1:uInx36IzPl7FYnDcMeVWxj9byh7DutNykX4G9Sj60FY= github.com/mikioh/tcp v0.0.0-20190314235350-803a9b46060c h1:bzE/A84HN25pxAuk9Eej1Kz9OUelF97nAc82bDquQI8= github.com/mikioh/tcp v0.0.0-20190314235350-803a9b46060c/go.mod h1:0SQS9kMwD2VsyFEB++InYyBJroV/FRmBgcydeSUcJms= github.com/mikioh/tcpinfo v0.0.0-20190314235526-30a79bb1804b h1:z78hV3sbSMAUoyUMM0I83AUIT6Hu17AWfgjzIbtrYFc= @@ -954,8 +935,9 @@ github.com/minio/sha256-simd v0.0.0-20190328051042-05b4dd3047e5/go.mod h1:2FMWW+ github.com/minio/sha256-simd v0.1.0/go.mod h1:2FMWW+8GMoPweT6+pI63m9YE3Lmw4J71hV56Chs1E/U= github.com/minio/sha256-simd v0.1.1-0.20190913151208-6de447530771/go.mod h1:B5e1o+1/KgNmWrSQK08Y6Z1Vb5pwIktudl0J58iy0KM= github.com/minio/sha256-simd v0.1.1/go.mod h1:B5e1o+1/KgNmWrSQK08Y6Z1Vb5pwIktudl0J58iy0KM= -github.com/minio/sha256-simd v1.0.0 h1:v1ta+49hkWZyvaKwrQB8elexRqm6Y0aMLjCNsrYxo6g= github.com/minio/sha256-simd v1.0.0/go.mod h1:OuYzVNI5vcoYIAmbIvHPl3N3jUzVedXbKy5RFepssQM= +github.com/minio/sha256-simd v1.0.1 h1:6kaan5IFmwTNynnKKpDHe6FWHohJOHhCPchzK49dzMM= +github.com/minio/sha256-simd v1.0.1/go.mod h1:Pz6AKMiUdngCLpeTL/RJY1M9rUuPMYujV5xJjtbRSN8= github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc= github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= @@ -968,11 +950,9 @@ github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY= github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= -github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= -github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= github.com/mr-tron/base58 v1.1.0/go.mod h1:xcD2VGqlgYjBdcBLw+TuYLr8afG+Hj8g2eTVqeSzSU8= github.com/mr-tron/base58 v1.1.1/go.mod h1:xcD2VGqlgYjBdcBLw+TuYLr8afG+Hj8g2eTVqeSzSU8= github.com/mr-tron/base58 v1.1.2/go.mod h1:BinMc/sQntlIE1frQmRFPUoPA1Zkr8VRgBdjWI2mNwc= @@ -996,8 +976,8 @@ github.com/multiformats/go-multiaddr v0.2.2/go.mod h1:NtfXiOtHvghW9KojvtySjH5y0u github.com/multiformats/go-multiaddr v0.3.0/go.mod h1:dF9kph9wfJ+3VLAaeBqo9Of8x4fJxp6ggJGteB8HQTI= github.com/multiformats/go-multiaddr v0.3.1/go.mod h1:uPbspcUPd5AfaP6ql3ujFY+QWzmBD8uLLL4bXW0XfGc= github.com/multiformats/go-multiaddr v0.3.3/go.mod h1:lCKNGP1EQ1eZ35Za2wlqnabm9xQkib3fyB+nZXHLag0= -github.com/multiformats/go-multiaddr v0.9.0 h1:3h4V1LHIk5w4hJHekMKWALPXErDfz/sggzwC/NcqbDQ= -github.com/multiformats/go-multiaddr v0.9.0/go.mod h1:mI67Lb1EeTOYb8GQfL/7wpIZwc46ElrvzhYnoJOmTT0= +github.com/multiformats/go-multiaddr v0.10.1 h1:HghtFrWyZEPrpTvgAMFJi6gFdgHfs2cb0pyfDsk+lqU= +github.com/multiformats/go-multiaddr v0.10.1/go.mod h1:jLEZsA61rwWNZQTHHnqq2HNa+4os/Hz54eqiRnsRqYQ= github.com/multiformats/go-multiaddr-dns v0.0.1/go.mod h1:9kWcqw/Pj6FwxAwW38n/9403szc57zJPs45fmnznu3Q= github.com/multiformats/go-multiaddr-dns v0.0.2/go.mod h1:9kWcqw/Pj6FwxAwW38n/9403szc57zJPs45fmnznu3Q= github.com/multiformats/go-multiaddr-dns v0.2.0/go.mod h1:TJ5pr5bBO7Y1B18djPuRsVkduhQH2YqYSbxWJzYGdK0= @@ -1018,8 +998,8 @@ github.com/multiformats/go-multibase v0.0.1/go.mod h1:bja2MqRZ3ggyXtZSEDKpl0uO/g github.com/multiformats/go-multibase v0.0.3/go.mod h1:5+1R4eQrT3PkYZ24C3W2Ue2tPwIdYQD509ZjSb5y9Oc= github.com/multiformats/go-multibase v0.2.0 h1:isdYCVLvksgWlMW9OZRYJEa9pZETFivncJHmHnnd87g= github.com/multiformats/go-multibase v0.2.0/go.mod h1:bFBZX4lKCA/2lyOFSAoKH5SS6oPyjtnzK/XTFDPkNuk= -github.com/multiformats/go-multicodec v0.8.1 h1:ycepHwavHafh3grIbR1jIXnKCsFm0fqsfEOsJ8NtKE8= -github.com/multiformats/go-multicodec v0.8.1/go.mod h1:L3QTQvMIaVBkXOXXtVmYE+LI16i14xuaojr/H7Ai54k= +github.com/multiformats/go-multicodec v0.9.0 h1:pb/dlPnzee/Sxv/j4PmkDRxCOi3hXTz3IbPKOXWJkmg= +github.com/multiformats/go-multicodec v0.9.0/go.mod h1:L3QTQvMIaVBkXOXXtVmYE+LI16i14xuaojr/H7Ai54k= github.com/multiformats/go-multihash v0.0.1/go.mod h1:w/5tugSrLEbWqlcgJabL3oHFKTwfvkofsjW2Qa1ct4U= github.com/multiformats/go-multihash v0.0.5/go.mod h1:lt/HCbqlQwlPBz7lv0sQCdtfcMtlJvakRUn/0Ual8po= github.com/multiformats/go-multihash v0.0.8/go.mod h1:YSLudS+Pi8NHE7o6tb3D8vrpKa63epEDmG8nTduyAew= @@ -1027,8 +1007,8 @@ github.com/multiformats/go-multihash v0.0.10/go.mod h1:YSLudS+Pi8NHE7o6tb3D8vrpK github.com/multiformats/go-multihash v0.0.13/go.mod h1:VdAWLKTwram9oKAatUcLxBNUjdtcVwxObEQBtRfuyjc= github.com/multiformats/go-multihash v0.0.14/go.mod h1:VdAWLKTwram9oKAatUcLxBNUjdtcVwxObEQBtRfuyjc= github.com/multiformats/go-multihash v0.0.15/go.mod h1:D6aZrWNLFTV/ynMpKsNtB40mJzmCl4jb1alC0OvHiHg= -github.com/multiformats/go-multihash v0.2.1 h1:aem8ZT0VA2nCHHk7bPJ1BjUbHNciqZC/d16Vve9l108= -github.com/multiformats/go-multihash v0.2.1/go.mod h1:WxoMcYG85AZVQUyRyo9s4wULvW5qrI9vb2Lt6evduFc= +github.com/multiformats/go-multihash v0.2.3 h1:7Lyc8XfX/IY2jWb/gI7JP+o7JEq9hOa7BFvVU9RSh+U= +github.com/multiformats/go-multihash v0.2.3/go.mod h1:dXgKXCXjBzdscBLk9JkjINiEsCKRVch90MdaGiKsvSM= github.com/multiformats/go-multistream v0.0.1/go.mod h1:fJTiDfXJVmItycydCnNx4+wSzZ5NwG2FEVAI30fiovg= github.com/multiformats/go-multistream v0.0.4/go.mod h1:fJTiDfXJVmItycydCnNx4+wSzZ5NwG2FEVAI30fiovg= github.com/multiformats/go-multistream v0.1.0/go.mod h1:fJTiDfXJVmItycydCnNx4+wSzZ5NwG2FEVAI30fiovg= @@ -1064,15 +1044,15 @@ github.com/onsi/ginkgo v1.8.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+W github.com/onsi/ginkgo v1.12.0/go.mod h1:oUhWkIvk5aDxtKvDDuw8gItl8pKl42LzjC9KZE0HfGg= github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= github.com/onsi/ginkgo v1.14.0/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY= -github.com/onsi/ginkgo/v2 v2.9.2 h1:BA2GMJOtfGAfagzYtrAlufIP0lq6QERkFmHLMLPwFSU= -github.com/onsi/ginkgo/v2 v2.9.2/go.mod h1:WHcJJG2dIlcCqVfBAwUCrJxSPFb6v4azBwgxeMeDuts= +github.com/onsi/ginkgo/v2 v2.9.7 h1:06xGQy5www2oN160RtEZoTvnP2sPhEfePYmCDc2szss= +github.com/onsi/ginkgo/v2 v2.9.7/go.mod h1:cxrmXWykAwTwhQsJOPfdIDiJ+l2RYq7U8hFU+M/1uw0= github.com/onsi/gomega v1.4.1/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/onsi/gomega v1.5.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= github.com/onsi/gomega v1.9.0/go.mod h1:Ho0h+IUsWyvy1OpqCwxlQ/21gkhVunqlU8fDGcoTdcA= github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= -github.com/onsi/gomega v1.27.4 h1:Z2AnStgsdSayCMDiCU42qIz+HLqEPcgiOCXjAU/w+8E= +github.com/onsi/gomega v1.27.7 h1:fVih9JD6ogIiHUN6ePK7HJidyEDpWGVB5mzM7cWNXoU= github.com/op/go-logging v0.0.0-20160315200505-970db520ece7/go.mod h1:HzydrMdWErDVzsI23lYNej1Htcns9BCg93Dk0bBINWk= github.com/opencontainers/runtime-spec v1.0.2 h1:UfAcuLBJB9Coz72x1hgl8O5RVzTdNiaglX6v2DM6FI0= github.com/opencontainers/runtime-spec v1.0.2/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= @@ -1093,8 +1073,8 @@ github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58 h1:onHthvaw9LFnH4t2D github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58/go.mod h1:DXv8WO4yhMYhSNPKjeNKa5WY9YCIEBRbNzFFPJbWO6Y= github.com/pborman/uuid v1.2.0/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k= github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= -github.com/pelletier/go-toml/v2 v2.0.6 h1:nrzqCb7j9cDFj2coyLNLaZuJTLjWjlaz6nvTvIwycIU= -github.com/pelletier/go-toml/v2 v2.0.6/go.mod h1:eumQOmlWiOPt5WriQQqoM5y18pDHwha2N+QD+EUNTek= +github.com/pelletier/go-toml/v2 v2.0.8 h1:0ctb6s9mE31h0/lhu+J6OPmVeDxJn+kYnJc2jZR9tGQ= +github.com/pelletier/go-toml/v2 v2.0.8/go.mod h1:vuYfssBdrU2XDZ9bYydBu6t+6a6PYNcZljzZR9VXg+4= github.com/performancecopilot/speed v3.0.0+incompatible/go.mod h1:/CLtqpZ5gBg1M9iaPbIdPPGyKcA8hKdoy6hAWba7Yac= github.com/pierrec/lz4 v1.0.2-0.20190131084431-473cd7ce01a1/go.mod h1:3/3N9NVKO0jef7pBehbT1qWhCMrIgbYNnFAZCqQ5LRc= github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= @@ -1104,11 +1084,12 @@ github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/profile v1.2.1/go.mod h1:hJw3o1OdXxsrSjjVksARp5W95eeEaEfptyVZyv6JUPA= github.com/pkg/sftp v1.13.1/go.mod h1:3HaPG6Dq1ILlpPZRO0HVMrsydcdLt6HRDccSgb87qRg= +github.com/planetscale/vtprotobuf v0.4.0 h1:NEI+g4woRaAZgeZ3sAvbtyvMBRjIv5kE7EWYQ8m4JwY= +github.com/planetscale/vtprotobuf v0.4.0/go.mod h1:wm1N3qk9G/4+VM1WhpkLbvY/d8+0PbwYYpP5P5VhTks= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/polydawn/refmt v0.0.0-20190221155625-df39d6c2d992/go.mod h1:uIp+gprXxxrWSjjklXD+mN4wed/tMfjMMmN/9+JsA9o= github.com/polydawn/refmt v0.0.0-20190408063855-01bf1e26dd14/go.mod h1:uIp+gprXxxrWSjjklXD+mN4wed/tMfjMMmN/9+JsA9o= -github.com/polydawn/refmt v0.0.0-20190807091052-3d65705ee9f1/go.mod h1:uIp+gprXxxrWSjjklXD+mN4wed/tMfjMMmN/9+JsA9o= github.com/polydawn/refmt v0.0.0-20201211092308-30ac6d18308e/go.mod h1:uIp+gprXxxrWSjjklXD+mN4wed/tMfjMMmN/9+JsA9o= github.com/polydawn/refmt v0.89.0 h1:ADJTApkvkeBZsN0tBTx8QjpD9JkmxbKp0cxfr9qszm4= github.com/polydawn/refmt v0.89.0/go.mod h1:/zvteZs/GwLtCgZ4BL6CBsk9IKIlexP43ObX9AxTqTw= @@ -1128,8 +1109,8 @@ github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1: github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.1.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/client_model v0.3.0 h1:UBgGFHqYdG/TPFD1B1ogZywDqEkwp3fBMvqdiQ7Xew4= -github.com/prometheus/client_model v0.3.0/go.mod h1:LDGWKZIo7rky3hgvBe+caln+Dr3dPggB5dvjtD7w9+w= +github.com/prometheus/client_model v0.4.0 h1:5lQXD3cAg1OXBf4Wq03gTrXHeaV0TQvGfUooCfx1yqY= +github.com/prometheus/client_model v0.4.0/go.mod h1:oMQmHW1/JoDwqLtg57MGgP/Fb1CJEYF2imWWhWtMkYU= github.com/prometheus/common v0.0.0-20180801064454-c7de2306084e/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= github.com/prometheus/common v0.2.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= @@ -1155,8 +1136,8 @@ github.com/quic-go/qtls-go1-20 v0.2.2 h1:WLOPx6OY/hxtTxKV1Zrq20FtXtDEkeY00CGQm8G github.com/quic-go/qtls-go1-20 v0.2.2/go.mod h1:JKtK6mjbAVcUTN/9jZpvLbGxvdWIKS8uT7EiStoU1SM= github.com/quic-go/quic-go v0.33.0 h1:ItNoTDN/Fm/zBlq769lLJc8ECe9gYaW40veHCCco7y0= github.com/quic-go/quic-go v0.33.0/go.mod h1:YMuhaAV9/jIu0XclDXwZPAsP/2Kgr5yMYhe9oxhhOFA= -github.com/quic-go/webtransport-go v0.5.2 h1:GA6Bl6oZY+g/flt00Pnu0XtivSD8vukOu3lYhJjnGEk= -github.com/quic-go/webtransport-go v0.5.2/go.mod h1:OhmmgJIzTTqXK5xvtuX0oBpLV2GkLWNDA+UeTGJXErU= +github.com/quic-go/webtransport-go v0.5.3 h1:5XMlzemqB4qmOlgIus5zB45AcZ2kCgCy2EptUrfOPWU= +github.com/quic-go/webtransport-go v0.5.3/go.mod h1:OhmmgJIzTTqXK5xvtuX0oBpLV2GkLWNDA+UeTGJXErU= github.com/raulk/go-watchdog v1.3.0 h1:oUmdlHxdkXRJlwfG0O9omj8ukerm8MEQavSiDTEtBsk= github.com/raulk/go-watchdog v1.3.0/go.mod h1:fIvOnLbF0b0ZwkB9YU4mOW9Did//4vPZtDqv66NfsMU= github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= @@ -1216,19 +1197,19 @@ github.com/sourcenetwork/go-libp2p-pubsub-rpc v0.0.0-20230209220544-e16d5e34c4fc github.com/sourcenetwork/go-libp2p-pubsub-rpc v0.0.0-20230209220544-e16d5e34c4fc/go.mod h1:3rOV6TxePSwADKpnwXBKpTjAA4QyjZBus13xc6VCtSw= github.com/sourcenetwork/graphql-go v0.7.10-0.20230511091704-fe7085512c23 h1:QcSWSYlE1alUC0uOO/trppYMLpR8OuFIL8IqR+PR5sA= github.com/sourcenetwork/graphql-go v0.7.10-0.20230511091704-fe7085512c23/go.mod h1:3Ty9EMes+aoxl8xS0CsuCGQZ4JEsOlC5yqQDLOKoBRw= -github.com/sourcenetwork/immutable v0.2.2 h1:Qjz1cCWhgjS6YkUTWb53R22wSYMEZhzBghhEzWaFi8c= -github.com/sourcenetwork/immutable v0.2.2/go.mod h1:4jpxObkIQw8pvlIRm4ndZqf3pH9ZjYEw/UYI6GZDJww= +github.com/sourcenetwork/immutable v0.3.0 h1:gHPtGvLrTBTK5YpDAhMU+u+S8v1F6iYmc3nbZLryMdc= +github.com/sourcenetwork/immutable v0.3.0/go.mod h1:GD7ceuh/HD7z6cdIwzKK2ctzgZ1qqYFJpsFp+8qYnbI= github.com/spacemonkeygo/openssl v0.0.0-20181017203307-c2dcc5cca94a/go.mod h1:7AyxJNCJ7SBZ1MfVQCWD6Uqo2oubI2Eq2y2eqf+A5r0= github.com/spacemonkeygo/spacelog v0.0.0-20180420211403-2296661a0572/go.mod h1:w0SWMsp6j9O/dk4/ZpIhL+3CkG8ofA2vuv7k+ltqUMc= github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= github.com/spaolacci/murmur3 v1.1.0 h1:7c1g84S4BPRrfL5Xrdp6fOJ206sU9y293DDHaoy0bLI= github.com/spaolacci/murmur3 v1.1.0/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= -github.com/spf13/afero v1.9.3 h1:41FoI0fD7OR7mGcKE/aOiLkGreyf8ifIOQmJANWogMk= -github.com/spf13/afero v1.9.3/go.mod h1:iUV7ddyEEZPO5gA3zD4fJt6iStLlL+Lg4m2cihcDf8Y= +github.com/spf13/afero v1.9.5 h1:stMpOSZFs//0Lv29HduCmli3GUfpFoF3Y1Q/aXj/wVM= +github.com/spf13/afero v1.9.5/go.mod h1:UBogFpq8E9Hx+xc5CNTTEpTnuHVmXDwZcZcE1eb/UhQ= github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= -github.com/spf13/cast v1.5.0 h1:rj3WzYc11XZaIZMPKmwP96zkFEnnAmV8s6XbB2aY32w= -github.com/spf13/cast v1.5.0/go.mod h1:SpXXQ5YoyJw6s3/6cMTQuxvgRl3PCJiyaX9p6b155UU= +github.com/spf13/cast v1.5.1 h1:R+kOtfhWQE6TVQzY+4D7wJLBgkdVasCEFxSUBYBYIlA= +github.com/spf13/cast v1.5.1/go.mod h1:b9PdjNptOpzXr7Rq1q9gJML/2cdGQAo69NKzQ10KN48= github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= github.com/spf13/cobra v0.0.5/go.mod h1:3K3wKZymM7VvHMDS9+Akkh4K60UwM26emMESw8tLCHU= github.com/spf13/cobra v1.7.0 h1:hyqWnYt1ZQShIddO5kBpj3vu05/++x6tJ6dg8EC572I= @@ -1241,8 +1222,8 @@ github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnIn github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DMA2s= -github.com/spf13/viper v1.15.0 h1:js3yy885G8xwJa6iOISGFwd+qlUo5AvyXb7CiihdtiU= -github.com/spf13/viper v1.15.0/go.mod h1:fFcTBJxvhhzSJiZy8n+PeW6t8l+KeT/uTARa0jHOQLA= +github.com/spf13/viper v1.16.0 h1:rGGH0XDZhdUOryiDWjmIvUSWpbNqisK8Wk0Vyefw8hc= +github.com/spf13/viper v1.16.0/go.mod h1:yg78JgCJcbrQOvV9YLXgkLaZqUidkY9K+Dd1FofRzQg= github.com/src-d/envconfig v1.0.0/go.mod h1:Q9YQZ7BKITldTBnoxsE5gOeB5y66RyPXeue/R4aaNBc= github.com/streadway/amqp v0.0.0-20190404075320-75d898a42a94/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw= github.com/streadway/amqp v0.0.0-20190827072141-edfb9018d271/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw= @@ -1261,13 +1242,16 @@ github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/ github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= -github.com/stretchr/testify v1.8.2 h1:+h33VjcLVPDHtOdpUCuF+7gSuG3yGIftsP1YvFihtJ8= -github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= +github.com/stretchr/testify v1.8.3/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= +github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= +github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= github.com/subosito/gotenv v1.4.2 h1:X1TuBLAMDFbaTAChgCBLu3DU3UPyELpnF2jjJ2cz/S8= github.com/subosito/gotenv v1.4.2/go.mod h1:ayKnFf/c6rvx/2iiLrJUk1e6plDbT3edrFNGqEflhK0= +github.com/syndtr/goleveldb v1.0.0 h1:fBdIW9lB4Iz0n9khmH8w27SJ3QEJ7+IgjPEwGSZiFdE= github.com/syndtr/goleveldb v1.0.0/go.mod h1:ZVVdQEZoIme9iO1Ch2Jdy24qqXrMMOU6lpPAyBWyWuQ= -github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7 h1:epCh84lMvA70Z7CTTCmYQn2CKbY8j86K7/FAIr141uY= github.com/tarm/serial v0.0.0-20180830185346-98f6abe2eb07/go.mod h1:kDXzergiv9cbyO7IOYJZWg1U88JhDg3PB6klq9Hg2pA= +github.com/tetratelabs/wazero v1.3.1 h1:rnb9FgOEQRLLR8tgoD1mfjNjMhFeWRUk+a4b4j/GpUM= +github.com/tetratelabs/wazero v1.3.1/go.mod h1:wYx2gNRg8/WihJfSDxA1TIL8H+GkfLYm+bIfbblu9VQ= github.com/textileio/go-datastore-extensions v1.0.1 h1:qIJGqJaigQ1wD4TdwS/hf73u0HChhXvvUSJuxBEKS+c= github.com/textileio/go-ds-badger3 v0.1.0 h1:q0kBuBmAcRUR3ClMSYlyw0224XeuzjjGinU53Qz1uXI= github.com/textileio/go-log/v2 v2.1.3-gke-2 h1:YkMA5ua0Cf/X6CkbexInsoJ/HdaHQBlgiv9Yy9hddNM= @@ -1275,9 +1259,7 @@ github.com/textileio/go-log/v2 v2.1.3-gke-2/go.mod h1:DwACkjFS3kjZZR/4Spx3aPfSsc github.com/tidwall/btree v1.6.0 h1:LDZfKfQIBHGHWSwckhXI0RPSXzlo+KYdjK7FWSqOzzg= github.com/tidwall/btree v1.6.0/go.mod h1:twD9XRA5jj9VUQGELzDO4HPQTNJsoWWfYEL+EUQ2cKY= github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= -github.com/ugorji/go v1.1.7/go.mod h1:kZn38zHttfInRq0xu/PH0az30d+z6vm202qpg1oXVMw= github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0= -github.com/ugorji/go/codec v1.1.7/go.mod h1:Ax+UKWsSmolVDwsd+7N3ZtXu+yMGCf907BLYF3GoBXY= github.com/ugorji/go/codec v1.2.11 h1:BMaWp1Bb6fHwEtbplGBGJ498wD+LKlNSl25MjdZY4dU= github.com/ugorji/go/codec v1.2.11/go.mod h1:UNopzCgEMSXjBc6AOMqYvWC1ktqTAfzJZUZgYf6w6lg= github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA= @@ -1333,25 +1315,25 @@ go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk= go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= -go.opentelemetry.io/otel v1.14.0 h1:/79Huy8wbf5DnIPhemGB+zEPVwnN6fuQybr/SRXa6hM= -go.opentelemetry.io/otel v1.14.0/go.mod h1:o4buv+dJzx8rohcUeRmWUZhqupFvzWis188WlggnNeU= -go.opentelemetry.io/otel/metric v0.36.0 h1:t0lgGI+L68QWt3QtOIlqM9gXoxqxWLhZ3R/e5oOAY0Q= -go.opentelemetry.io/otel/metric v0.36.0/go.mod h1:wKVw57sd2HdSZAzyfOM9gTqqE8v7CbqWsYL6AyrH9qk= -go.opentelemetry.io/otel/sdk v1.14.0 h1:PDCppFRDq8A1jL9v6KMI6dYesaq+DFcDZvjsoGvxGzY= -go.opentelemetry.io/otel/sdk v1.14.0/go.mod h1:bwIC5TjrNG6QDCHNWvW4HLHtUQ4I+VQDsnjhvyZCALM= -go.opentelemetry.io/otel/sdk/metric v0.36.0 h1:dEXpkkOAEcHiRiaZdvd63MouV+3bCtAB/bF3jlNKnr8= -go.opentelemetry.io/otel/sdk/metric v0.36.0/go.mod h1:Lv4HQQPSCSkhyBKzLNtE8YhTSdK4HCwNh3lh7CiR20s= -go.opentelemetry.io/otel/trace v1.14.0 h1:wp2Mmvj41tDsyAJXiWDWpfNsOiIyd38fy85pyKcFq/M= -go.opentelemetry.io/otel/trace v1.14.0/go.mod h1:8avnQLK+CG77yNLUae4ea2JDQ6iT+gozhnZjy/rw9G8= +go.opentelemetry.io/otel v1.16.0 h1:Z7GVAX/UkAXPKsy94IU+i6thsQS4nb7LviLpnaNeW8s= +go.opentelemetry.io/otel v1.16.0/go.mod h1:vl0h9NUa1D5s1nv3A5vZOYWn8av4K8Ml6JDeHrT/bx4= +go.opentelemetry.io/otel/metric v1.16.0 h1:RbrpwVG1Hfv85LgnZ7+txXioPDoh6EdbZHo26Q3hqOo= +go.opentelemetry.io/otel/metric v1.16.0/go.mod h1:QE47cpOmkwipPiefDwo2wDzwJrlfxxNYodqc4xnGCo4= +go.opentelemetry.io/otel/sdk v1.16.0 h1:Z1Ok1YsijYL0CSJpHt4cS3wDDh7p572grzNrBMiMWgE= +go.opentelemetry.io/otel/sdk v1.16.0/go.mod h1:tMsIuKXuuIWPBAOrH+eHtvhTL+SntFtXF9QD68aP6p4= +go.opentelemetry.io/otel/sdk/metric v0.39.0 h1:Kun8i1eYf48kHH83RucG93ffz0zGV1sh46FAScOTuDI= +go.opentelemetry.io/otel/sdk/metric v0.39.0/go.mod h1:piDIRgjcK7u0HCL5pCA4e74qpK/jk3NiUoAHATVAmiI= +go.opentelemetry.io/otel/trace v1.16.0 h1:8JRpaObFoW0pxuVPapkgH8UhHQj+bJW8jJsCZEu5MQs= +go.opentelemetry.io/otel/trace v1.16.0/go.mod h1:Yt9vYq1SdNz3xdjZZK7wcXv1qv2pwLkqr2QVwea0ef0= go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= go.uber.org/atomic v1.6.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= -go.uber.org/atomic v1.10.0 h1:9qC72Qh0+3MqyJbAn8YU5xVq1frD8bn3JtD2oXtafVQ= -go.uber.org/atomic v1.10.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0= -go.uber.org/dig v1.16.1 h1:+alNIBsl0qfY0j6epRubp/9obgtrObRAc5aD+6jbWY8= -go.uber.org/dig v1.16.1/go.mod h1:557JTAUZT5bUK0SvCwikmLPPtdQhfvLYtO5tJgQSbnk= +go.uber.org/atomic v1.11.0 h1:ZvwS0R+56ePWxUNi+Atn9dWONBPp/AUETXlHW0DxSjE= +go.uber.org/atomic v1.11.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0= +go.uber.org/dig v1.17.0 h1:5Chju+tUvcC+N7N6EV08BJz41UZuO3BmHcN4A287ZLI= +go.uber.org/dig v1.17.0/go.mod h1:rTxpf7l5I0eBTlE6/9RL+lDybC7WFwY2QH55ZSjy1mU= go.uber.org/fx v1.19.2 h1:SyFgYQFr1Wl0AYstE8vyYIzP4bFz2URrScjwC4cwUvY= go.uber.org/fx v1.19.2/go.mod h1:43G1VcqSzbIv77y00p1DRAsyZS8WdzuYdhZXmEUkMyQ= go.uber.org/goleak v1.0.0/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A= @@ -1404,9 +1386,9 @@ golang.org/x/crypto v0.0.0-20200820211705-5c72a883971a/go.mod h1:LzIPMQfyMNhhGPh golang.org/x/crypto v0.0.0-20210220033148-5ea612d1eb83/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I= golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= golang.org/x/crypto v0.0.0-20210421170649-83a5a9bb288b/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= -golang.org/x/crypto v0.0.0-20211108221036-ceb1ce70b4fa/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= -golang.org/x/crypto v0.9.0 h1:LF6fAI+IutBocDJ2OT0Q1g8plpYljMZ4+lty+dsqw3g= -golang.org/x/crypto v0.9.0/go.mod h1:yrmDGqONDYtNj3tH8X9dzUun2m2lzPa9ngI6/RUPGR0= +golang.org/x/crypto v0.0.0-20220722155217-630584e8d5aa/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= +golang.org/x/crypto v0.11.0 h1:6Ewdq3tDic1mg5xRO4milcWCfMVQhI4NkqWWvqejpuA= +golang.org/x/crypto v0.11.0/go.mod h1:xgJhtzW8F9jGdVFWZESrid1U1bjeNy4zgy5cRr/CIio= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= @@ -1417,8 +1399,8 @@ golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u0 golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= -golang.org/x/exp v0.0.0-20230321023759-10a507213a29 h1:ooxPy7fPvB4kwsA2h+iBNHkAbp/4JxTSwCmvdjEYmug= -golang.org/x/exp v0.0.0-20230321023759-10a507213a29/go.mod h1:CxIveKay+FTh1D0yPZemJVgC/95VzuuOLq5Qi4xnoYc= +golang.org/x/exp v0.0.0-20230626212559-97b1e661b5df h1:UA2aFVmmsIlefxMk29Dp2juaUSth8Pyn3Tq5Y5mJGME= +golang.org/x/exp v0.0.0-20230626212559-97b1e661b5df/go.mod h1:FXUEEKJgO7OQYeo8N01OfiKP8RXMtf6e8aTskBGqWdc= golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= golang.org/x/lint v0.0.0-20180702182130-06c8688daad7/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= @@ -1444,8 +1426,8 @@ golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.10.0 h1:lFO9qtOdlre5W1jxS3r/4szv2/6iXxScdzjoBMXNhYk= -golang.org/x/mod v0.10.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= +golang.org/x/mod v0.11.0 h1:bUO06HqtnRcc/7l71XBe4WcqTZ+3AH1J59zWDDwLKgU= +golang.org/x/mod v0.11.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/net v0.0.0-20180719180050-a680a1efc54d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -1500,8 +1482,9 @@ golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v golang.org/x/net v0.0.0-20210316092652-d523dce5a7f4/go.mod h1:RBQZq4jEuRlivfhVLdyRGr576XBO4/greRjx4P4O3yc= golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= golang.org/x/net v0.0.0-20210423184538-5f58ad60dda6/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk= -golang.org/x/net v0.10.0 h1:X2//UzNDwYmtCLn7To6G58Wr6f5ahEAQgKNzv9Y951M= -golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg= +golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.12.0 h1:cfawfvKITfUsFCeJIHJrbSxpeu/E81khclypR0GVT50= +golang.org/x/net v0.12.0/go.mod h1:zEVYFnQC7m/vmpQFELhcD1EWkZlX69l4oqgmer6hfKA= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20181017192945-9dcd33a902f4/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20181203162652-d668ce993890/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= @@ -1525,8 +1508,8 @@ golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.1.0 h1:wsuoTGHzEhffawBOhz5CYhcrV4IdKZbEyZjBMuTp12o= -golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.2.0 h1:PUR+T4wwASmuSTYdKjYHI5TD22Wy5ogLU5qZCOLxBrI= +golang.org/x/sync v0.2.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20180810173357-98c5dad5d1a0/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -1608,12 +1591,12 @@ golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211025201205-69cdffdb9359/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220704084225-05e143d24a9e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20221010170243-090e33056c14/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.8.0 h1:EBmGv8NaZBZTWvrbjNoL6HVt+IVy3QDQpJs7VRIw3tU= -golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.10.0 h1:SqMFp9UcQJZa+pmYuAKjd9xq1f0j5rLcDIk0mj4qAsA= +golang.org/x/sys v0.10.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -1623,8 +1606,9 @@ golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.9.0 h1:2sjJmO8cDvYveuX97RDLsxlyUxLl+GHoLxBiRdHllBE= -golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= +golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= +golang.org/x/text v0.11.0 h1:LAntKIrcmeSKERyiOh0XMV39LXS8IE9UL2yP7+f5ij4= +golang.org/x/text v0.11.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= @@ -1691,16 +1675,16 @@ golang.org/x/tools v0.0.0-20210108195828-e2f9c7f1fc8e/go.mod h1:emZCQorbCU4vsT4f golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0= golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= -golang.org/x/tools v0.7.0 h1:W4OVu8VVOaIO0yzWMNdepAulS7YfoS3Zabrm8DOXXU4= -golang.org/x/tools v0.7.0/go.mod h1:4pg6aUX35JBAogB10C9AtvVL+qowtN4pT3CGSQex14s= +golang.org/x/tools v0.9.1 h1:8WMNJAz3zrtPmnYC7ISf5dEn3MT0gY7jBJfw27yrrLo= +golang.org/x/tools v0.9.1/go.mod h1:owI94Op576fPu3cIGQeHs3joujW/2Oc6MtlxbF5dfNc= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2 h1:H2TDz8ibqkAF6YGhCdN3jS9O0/s90v0rJh3X/OLHEUk= golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8= -gonum.org/v1/gonum v0.11.0 h1:f1IJhK4Km5tBJmaiJXtk/PkL4cdVX6J+tGiM187uT5E= -gonum.org/v1/gonum v0.11.0/go.mod h1:fSG4YDCxxUZQJ7rKsQrj0gMOg00Il0Z96/qMA4bVQhA= +gonum.org/v1/gonum v0.13.0 h1:a0T3bh+7fhRyqeNbiC3qVHYmkiQgit3wnNan/2c0HMM= +gonum.org/v1/gonum v0.13.0/go.mod h1:/WPYRckkfWrhWefxyYTfrTtQR0KH4iyHNuzxqXAKyAU= google.golang.org/api v0.0.0-20180910000450-7ca32eb868bf/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0= google.golang.org/api v0.0.0-20181030000543-1d582fd0359e/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0= google.golang.org/api v0.1.0/go.mod h1:UGEZY7KEX120AnNLIHFMKIo4obdJhkp2tPbaPlQx13Y= @@ -1775,8 +1759,8 @@ google.golang.org/genproto v0.0.0-20201210142538-e3217bee35cc/go.mod h1:FWY/as6D google.golang.org/genproto v0.0.0-20201214200347-8c77b98c765d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20210108203827-ffc7fda8c3d7/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20210226172003-ab064af71705/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20230306155012-7f2fa6fef1f4 h1:DdoeryqhaXp1LtT/emMP1BRJPHHKFi5akj/nbx/zNTA= -google.golang.org/genproto v0.0.0-20230306155012-7f2fa6fef1f4/go.mod h1:NWraEVixdDnqcqQ30jipen1STv2r/n24Wb7twVTGR4s= +google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1 h1:KpwkzHKEF7B9Zxg18WzOa7djJ+Ha5DzthMyZYQfEn2A= +google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1/go.mod h1:nKE/iIaLqn2bQwXBg8f1g2Ylh6r5MN5CmZvuzZCgsCU= google.golang.org/grpc v1.14.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= google.golang.org/grpc v1.16.0/go.mod h1:0JHn/cJsOMiMfNA9+DeHDlAU7KAAB5GDlYFpa9MZMio= google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs= @@ -1801,8 +1785,8 @@ google.golang.org/grpc v1.31.1/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= google.golang.org/grpc v1.34.0/go.mod h1:WotjhfgOW/POjDeRt8vscBtXq+2VjORFy659qA51WJ8= google.golang.org/grpc v1.35.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= -google.golang.org/grpc v1.55.0 h1:3Oj82/tFSCeUrRTg/5E/7d/W5A1tj6Ky1ABAuZuv5ag= -google.golang.org/grpc v1.55.0/go.mod h1:iYEXKGkEBhg1PjZQvoYEVPTDkHo1/bjTnfwTeGONTY8= +google.golang.org/grpc v1.56.2 h1:fVRFRnXvU+x6C4IlHZewvJOVHoOv1TUuQyoRsYnB4bI= +google.golang.org/grpc v1.56.2/go.mod h1:I9bI3vqKfayGqPUAwGdOSu7kt6oIJLixfffKrpXqQ9s= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= @@ -1815,8 +1799,8 @@ google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGj google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.30.0 h1:kPPoIgf3TsEvrm0PFe15JQ+570QVxYzEvvHqChK+cng= -google.golang.org/protobuf v1.30.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= +google.golang.org/protobuf v1.31.0 h1:g0LDEJHgrBl9N9r17Ru3sqWhkIx2NB67okBHPwC7hs8= +google.golang.org/protobuf v1.31.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= @@ -1856,10 +1840,8 @@ honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWh honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= -lukechampine.com/blake3 v1.1.7 h1:GgRMhmdsuK8+ii6UZFDL8Nb+VyMwadAgcJyfYHxG6n0= -lukechampine.com/blake3 v1.1.7/go.mod h1:tkKEOtDkNtklkXtLNEOGNq5tcV90tJiA1vAA12R78LA= -nhooyr.io/websocket v1.8.7 h1:usjR2uOr/zjjkVMy0lW+PPohFok7PCow5sDjLgX4P4g= -nhooyr.io/websocket v1.8.7/go.mod h1:B70DZP8IakI65RVQ51MsWP/8jndNma26DVA/nFSCgW0= +lukechampine.com/blake3 v1.2.1 h1:YuqqRuaqsGV71BV/nm9xlI0MKUv4QC54jQnBChWbGnI= +lukechampine.com/blake3 v1.2.1/go.mod h1:0OFRp7fBtAylGVCO40o87sbupkyIGgbpv1+M1k1LM6k= rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= diff --git a/lens/fetcher.go b/lens/fetcher.go new file mode 100644 index 0000000000..bfd8fca3bc --- /dev/null +++ b/lens/fetcher.go @@ -0,0 +1,426 @@ +// Copyright 2023 Democratized Data Foundation +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package lens + +import ( + "context" + "reflect" + + "github.com/fxamacker/cbor/v2" + + "github.com/sourcenetwork/defradb/client" + "github.com/sourcenetwork/defradb/client/request" + "github.com/sourcenetwork/defradb/core" + "github.com/sourcenetwork/defradb/datastore" + "github.com/sourcenetwork/defradb/db/fetcher" + "github.com/sourcenetwork/defradb/planner/mapper" +) + +// todo: The code in here can be significantly simplified with: +// https://github.com/sourcenetwork/defradb/issues/1589 + +type lensedFetcher struct { + source fetcher.Fetcher + registry client.LensRegistry + lens Lens + + txn datastore.Txn + + col *client.CollectionDescription + // Cache the fieldDescriptions mapped by name to allow for cheaper access within the fetcher loop + fieldDescriptionsByName map[string]client.FieldDescription + + targetVersionID string + + // If true there are migrations registered for the collection being fetched. + hasMigrations bool +} + +var _ fetcher.Fetcher = (*lensedFetcher)(nil) + +// NewFetcher returns a new fetcher that will migrate any documents from the given +// source Fetcher as they are are yielded. +func NewFetcher(source fetcher.Fetcher, registry client.LensRegistry) fetcher.Fetcher { + return &lensedFetcher{ + source: source, + registry: registry, + } +} + +func (f *lensedFetcher) Init( + ctx context.Context, + txn datastore.Txn, + col *client.CollectionDescription, + fields []client.FieldDescription, + filter *mapper.Filter, + docmapper *core.DocumentMapping, + reverse bool, + showDeleted bool, +) error { + f.col = col + + f.fieldDescriptionsByName = make(map[string]client.FieldDescription, len(col.Schema.Fields)) + // Add cache the field descriptions in reverse, allowing smaller-index fields to overwrite any later + // ones. This should never really happen here, but it ensures the result is consistent with col.GetField + // which returns the first one it finds with a matching name. + for i := len(col.Schema.Fields) - 1; i >= 0; i-- { + field := col.Schema.Fields[i] + f.fieldDescriptionsByName[field.Name] = field + } + + history, err := getTargetedSchemaHistory(ctx, txn, f.registry.Config(), f.col.Schema.SchemaID, f.col.Schema.VersionID) + if err != nil { + return err + } + f.lens = new(f.registry, f.col.Schema.VersionID, history) + f.txn = txn + + for schemaVersionID := range history { + if f.registry.HasMigration(schemaVersionID) { + f.hasMigrations = true + break + } + } + + f.targetVersionID = col.Schema.VersionID + + var innerFetcherFields []client.FieldDescription + if f.hasMigrations { + // If there are migrations present, they may require fields that are not otherwise + // requested. At the moment this means we need to pass in nil so that the underlying + // fetcher fetches everything. + innerFetcherFields = nil + } else { + innerFetcherFields = fields + } + return f.source.Init(ctx, txn, col, innerFetcherFields, filter, docmapper, reverse, showDeleted) +} + +func (f *lensedFetcher) Start(ctx context.Context, spans core.Spans) error { + return f.source.Start(ctx, spans) +} + +func (f *lensedFetcher) FetchNext(ctx context.Context) (fetcher.EncodedDocument, fetcher.ExecInfo, error) { + panic("This function is never called and is dead code. As this type is internal, panicing is okay for now") +} + +func (f *lensedFetcher) FetchNextDecoded( + ctx context.Context, +) (*client.Document, fetcher.ExecInfo, error) { + doc, execInfo, err := f.source.FetchNextDecoded(ctx) + if err != nil { + return nil, fetcher.ExecInfo{}, err + } + + if doc == nil { + return nil, execInfo, nil + } + + if !f.hasMigrations || doc.SchemaVersionID == f.targetVersionID { + // If there are no migrations registered for this schema, or if the document is already + // at the target schema version, no migration is required and we can return it early. + return doc, execInfo, nil + } + + sourceLensDoc, err := clientDocToLensDoc(doc) + if err != nil { + return nil, fetcher.ExecInfo{}, err + } + + err = f.lens.Put(doc.SchemaVersionID, sourceLensDoc) + if err != nil { + return nil, fetcher.ExecInfo{}, err + } + + hasNext, err := f.lens.Next() + if err != nil { + return nil, fetcher.ExecInfo{}, err + } + if !hasNext { + // The migration decided to not yield a document, so we cycle through the next fetcher doc + doc, nextExecInfo, err := f.FetchNextDecoded(ctx) + execInfo.Add(nextExecInfo) + return doc, execInfo, err + } + + migratedLensDoc, err := f.lens.Value() + if err != nil { + return nil, fetcher.ExecInfo{}, err + } + + migratedDoc, err := f.lensDocToClientDoc(migratedLensDoc) + if err != nil { + return nil, fetcher.ExecInfo{}, err + } + + err = f.updateDataStore(ctx, sourceLensDoc, migratedLensDoc) + if err != nil { + return nil, fetcher.ExecInfo{}, err + } + + return migratedDoc, execInfo, nil +} + +func (f *lensedFetcher) FetchNextDoc( + ctx context.Context, + mapping *core.DocumentMapping, +) ([]byte, core.Doc, fetcher.ExecInfo, error) { + key, doc, execInfo, err := f.source.FetchNextDoc(ctx, mapping) + if err != nil { + return nil, core.Doc{}, fetcher.ExecInfo{}, err + } + + if len(doc.Fields) == 0 { + return key, doc, execInfo, nil + } + + if doc.SchemaVersionID == f.targetVersionID { + // If the document is already at the target schema version, no migration is required and + // we can return it early. + return key, doc, execInfo, nil + } + + sourceLensDoc, err := coreDocToLensDoc(mapping, doc) + if err != nil { + return nil, core.Doc{}, fetcher.ExecInfo{}, err + } + err = f.lens.Put(doc.SchemaVersionID, sourceLensDoc) + if err != nil { + return nil, core.Doc{}, fetcher.ExecInfo{}, err + } + + hasNext, err := f.lens.Next() + if err != nil { + return nil, core.Doc{}, fetcher.ExecInfo{}, err + } + if !hasNext { + // The migration decided to not yield a document, so we cycle through the next fetcher doc + key, doc, nextExecInfo, err := f.FetchNextDoc(ctx, mapping) + execInfo.Add(nextExecInfo) + return key, doc, execInfo, err + } + + migratedLensDoc, err := f.lens.Value() + if err != nil { + return nil, core.Doc{}, fetcher.ExecInfo{}, err + } + + migratedDoc, err := f.lensDocToCoreDoc(mapping, migratedLensDoc) + if err != nil { + return nil, core.Doc{}, fetcher.ExecInfo{}, err + } + + err = f.updateDataStore(ctx, sourceLensDoc, migratedLensDoc) + if err != nil { + return nil, core.Doc{}, fetcher.ExecInfo{}, err + } + + return key, migratedDoc, execInfo, nil +} + +func (f *lensedFetcher) Close() error { + if f.lens != nil { + f.lens.Reset() + } + return f.source.Close() +} + +// clientDocToLensDoc converts a client.Document to a LensDoc. +func clientDocToLensDoc(doc *client.Document) (LensDoc, error) { + docAsMap := map[string]any{} + + for field, fieldValue := range doc.Values() { + docAsMap[field.Name()] = fieldValue.Value() + } + docAsMap[request.KeyFieldName] = doc.Key().String() + + // Note: client.Document does not have a means of flagging as to whether it is + // deleted or not, and, currently the fetcher does not ever returned deleted items + // from the function that returs this type. + + return docAsMap, nil +} + +// coreDocToLensDoc converts a core.Doc to a LensDoc. +func coreDocToLensDoc(mapping *core.DocumentMapping, doc core.Doc) (LensDoc, error) { + docAsMap := map[string]any{} + + for fieldIndex, fieldValue := range doc.Fields { + fieldName, ok := mapping.TryToFindNameFromIndex(fieldIndex) + if !ok { + continue + } + docAsMap[fieldName] = fieldValue + } + + docAsMap[request.DeletedFieldName] = doc.Status.IsDeleted() + + return docAsMap, nil +} + +// lensDocToCoreDoc converts a LensDoc to a core.Doc. +func (f *lensedFetcher) lensDocToCoreDoc(mapping *core.DocumentMapping, docAsMap LensDoc) (core.Doc, error) { + doc := mapping.NewDoc() + + for fieldName, fieldByteValue := range docAsMap { + if fieldName == request.KeyFieldName { + key, ok := fieldByteValue.(string) + if !ok { + return core.Doc{}, core.ErrInvalidKey + } + + doc.SetKey(key) + continue + } + + fieldDesc, fieldFound := f.fieldDescriptionsByName[fieldName] + if !fieldFound { + // Note: This can technically happen if a Lens migration returns a field that + // we do not know about. In which case we have to skip it. + continue + } + + fieldValue, err := core.DecodeFieldValue(fieldDesc, fieldByteValue) + if err != nil { + return core.Doc{}, err + } + + index := mapping.FirstIndexOfName(fieldName) + doc.Fields[index] = fieldValue + } + + if value, ok := docAsMap[request.DeletedFieldName]; ok { + if wasDeleted, ok := value.(bool); ok { + if wasDeleted { + doc.Status = client.Deleted + } else { + doc.Status = client.Active + } + } + } + + doc.SchemaVersionID = f.col.Schema.VersionID + + return doc, nil +} + +// lensDocToClientDoc converts a LensDoc to a client.Document. +func (f *lensedFetcher) lensDocToClientDoc(docAsMap LensDoc) (*client.Document, error) { + key, err := client.NewDocKeyFromString(docAsMap[request.KeyFieldName].(string)) + if err != nil { + return nil, err + } + doc := client.NewDocWithKey(key) + + for fieldName, fieldByteValue := range docAsMap { + if fieldName == request.KeyFieldName { + continue + } + + fieldDesc, fieldFound := f.fieldDescriptionsByName[fieldName] + if !fieldFound { + // Note: This can technically happen if a Lens migration returns a field that + // we do not know about. In which case we have to skip it. + continue + } + + fieldValue, err := core.DecodeFieldValue(fieldDesc, fieldByteValue) + if err != nil { + return nil, err + } + + err = doc.SetAs(fieldDesc.Name, fieldValue, fieldDesc.Typ) + if err != nil { + return nil, err + } + } + + doc.SchemaVersionID = f.col.Schema.VersionID + + // Note: client.Document does not have a means of flagging as to whether it is + // deleted or not, and, currently the fetcher does not ever returned deleted items + // from the function that returs this type. + + return doc, nil +} + +// updateDataStore updates the datastore with the migrated values. +// +// This removes the need to migrate a document everytime it is fetched as the second time around +// the underlying fetcher will return the migrated values cached in the datastore instead of the +// underlying dag store values. +func (f *lensedFetcher) updateDataStore(ctx context.Context, original map[string]any, migrated map[string]any) error { + modifiedFieldValuesByName := map[string]any{} + for name, originalValue := range original { + migratedValue, ok := migrated[name] + if !ok { + // If the field is present in the original, and missing from the migrated, it + // means that a migration has removed it, and we should set it to nil. + modifiedFieldValuesByName[name] = nil + continue + } + + // Note: A deep equals check is required here, as the values may be inline-array slices + // Todo: `reflect.DeepEqual` is pretty rubish long-term here and should be replaced + // with something more defra specific: https://github.com/sourcenetwork/defradb/issues/1606 + if !reflect.DeepEqual(originalValue, migratedValue) { + modifiedFieldValuesByName[name] = migratedValue + } + } + + for name, migratedValue := range migrated { + if _, ok := original[name]; !ok { + // If a field has been added by a migration we need to make sure we + // preserve it here. + modifiedFieldValuesByName[name] = migratedValue + continue + } + } + + dockey, ok := original[request.KeyFieldName].(string) + if !ok { + return core.ErrInvalidKey + } + + datastoreKeyBase := core.DataStoreKey{ + CollectionID: f.col.IDString(), + DocKey: dockey, + InstanceType: core.ValueKey, + } + + for fieldName, value := range modifiedFieldValuesByName { + fieldDesc, ok := f.fieldDescriptionsByName[fieldName] + if !ok { + // It may be that the migration has set fields that are unknown to us locally + // in which case we have to skip them for now. + continue + } + fieldKey := datastoreKeyBase.WithFieldId(fieldDesc.ID.String()) + + bytes, err := cbor.Marshal(value) + if err != nil { + return err + } + + err = f.txn.Datastore().Put(ctx, fieldKey.ToDS(), bytes) + if err != nil { + return err + } + } + + versionKey := datastoreKeyBase.WithFieldId(core.DATASTORE_DOC_VERSION_FIELD_ID) + err := f.txn.Datastore().Put(ctx, versionKey.ToDS(), []byte(f.targetVersionID)) + if err != nil { + return err + } + + return nil +} diff --git a/lens/history.go b/lens/history.go new file mode 100644 index 0000000000..0b2a914d94 --- /dev/null +++ b/lens/history.go @@ -0,0 +1,279 @@ +// Copyright 2023 Democratized Data Foundation +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package lens + +import ( + "context" + + "github.com/ipfs/go-datastore/query" + "github.com/sourcenetwork/immutable" + + "github.com/sourcenetwork/defradb/client" + "github.com/sourcenetwork/defradb/core" + "github.com/sourcenetwork/defradb/datastore" +) + +// schemaHistoryLink represents an item in a particular schema's history, it +// links to the previous and next version items if they exist. +type schemaHistoryLink struct { + // The schema version id of this history item. + schemaVersionID string + + // The history link to the next schema version, if there is one + // (for the most recent schema version this will be None). + next immutable.Option[*schemaHistoryLink] + + // The history link to the previous schema version, if there is + // one (for the initial schema version this will be None). + previous immutable.Option[*schemaHistoryLink] +} + +// targetedSchemaHistoryLink represents an item in a particular schema's history, it +// links to the previous and next version items if they exist. +// +// It also contains a vector which describes the distance and direction to the +// target schema version (given as an input param on construction). +type targetedSchemaHistoryLink struct { + // The schema version id of this history item. + schemaVersionID string + + // The link to next schema version, if there is one + // (for the most recent schema version this will be None). + next immutable.Option[*targetedSchemaHistoryLink] + + // The link to the previous schema version, if there is + // one (for the initial schema version this will be None). + previous immutable.Option[*targetedSchemaHistoryLink] + + // The distance and direction from this history item to the target. + // + // A zero value indicates that this is the target item. A positive value + // indicates that the target is more recent. A negative value indicates + // that the target predates this history item. + targetVector int +} + +// getTargetedSchemaHistory returns the history of the schema of the given id, relative +// to the given target schema version id. +// +// This includes any history items that are only known via registered +// schema migrations. +func getTargetedSchemaHistory( + ctx context.Context, + txn datastore.Txn, + lensConfigs []client.LensConfig, + schemaID string, + targetSchemaVersionID string, +) (map[schemaVersionID]*targetedSchemaHistoryLink, error) { + history, err := getSchemaHistory(ctx, txn, lensConfigs, schemaID) + if err != nil { + return nil, err + } + + result := map[schemaVersionID]*targetedSchemaHistoryLink{} + + for _, item := range history { + result[item.schemaVersionID] = &targetedSchemaHistoryLink{ + schemaVersionID: item.schemaVersionID, + } + } + + for _, item := range result { + schemaHistoryLink := history[item.schemaVersionID] + nextHistoryItem := schemaHistoryLink.next + if !nextHistoryItem.HasValue() { + continue + } + nextItem := result[nextHistoryItem.Value().schemaVersionID] + item.next = immutable.Some(nextItem) + nextItem.previous = immutable.Some(item) + } + + orphanSchemaVersions := map[string]struct{}{} + + for schemaVersion, item := range result { + if item.schemaVersionID == targetSchemaVersionID { + continue + } + if item.targetVector != 0 { + continue + } + + distanceTravelled := 0 + currentItem := item + wasFound := false + for { + if !currentItem.next.HasValue() { + break + } + + currentItem = currentItem.next.Value() + distanceTravelled++ + if currentItem.targetVector != 0 { + distanceTravelled += currentItem.targetVector + wasFound = true + break + } + if currentItem.schemaVersionID == targetSchemaVersionID { + wasFound = true + break + } + } + + if !wasFound { + // The target was not found going up the chain, try looking back. + // This is important for downgrading schema versions. + for { + if !currentItem.previous.HasValue() { + break + } + + currentItem = currentItem.previous.Value() + distanceTravelled-- + if currentItem.targetVector != 0 { + distanceTravelled += currentItem.targetVector + wasFound = true + break + } + if currentItem.schemaVersionID == targetSchemaVersionID { + wasFound = true + break + } + } + } + + if !wasFound { + // This may happen if users define schema migrations to unknown schema versions + // with no migration path to known schema versions, esentially creating orphan + // migrations. These may become linked later and should remain persisted in the + // database, but we can drop them from the history here/now. + orphanSchemaVersions[schemaVersion] = struct{}{} + continue + } + + item.targetVector = distanceTravelled + } + + for schemaVersion := range orphanSchemaVersions { + delete(result, schemaVersion) + } + + return result, nil +} + +type schemaHistoryPairing struct { + schemaVersionID string + nextSchemaVersionID string +} + +// getSchemaHistory returns the history of the schema of the given id as linked list +// with each item mapped by schema version id. +// +// This includes any history items that are only known via registered +// schema migrations. +func getSchemaHistory( + ctx context.Context, + txn datastore.Txn, + lensConfigs []client.LensConfig, + schemaID string, +) (map[schemaVersionID]*schemaHistoryLink, error) { + pairings := map[string]*schemaHistoryPairing{} + + for _, config := range lensConfigs { + pairings[config.SourceSchemaVersionID] = &schemaHistoryPairing{ + schemaVersionID: config.SourceSchemaVersionID, + nextSchemaVersionID: config.DestinationSchemaVersionID, + } + + if _, ok := pairings[config.DestinationSchemaVersionID]; !ok { + pairings[config.DestinationSchemaVersionID] = &schemaHistoryPairing{ + schemaVersionID: config.DestinationSchemaVersionID, + } + } + } + + prefix := core.NewSchemaHistoryKey(schemaID, "") + q, err := txn.Systemstore().Query(ctx, query.Query{ + Prefix: prefix.ToString(), + }) + if err != nil { + return nil, err + } + + for res := range q.Next() { + // check for Done on context first + select { + case <-ctx.Done(): + // we've been cancelled! ;) + return nil, q.Close() + default: + // noop, just continue on the with the for loop + } + + if res.Error != nil { + err = q.Close() + if err != nil { + return nil, err + } + return nil, res.Error + } + + key, err := core.NewSchemaHistoryKeyFromString(res.Key) + if err != nil { + err = q.Close() + if err != nil { + return nil, err + } + return nil, err + } + + // The local schema version history takes priority over and migration-defined history + // and overwrites whatever already exists in the pairings (if any) + pairings[key.PreviousSchemaVersionID] = &schemaHistoryPairing{ + schemaVersionID: key.PreviousSchemaVersionID, + nextSchemaVersionID: string(res.Value), + } + + if _, ok := pairings[string(res.Value)]; !ok { + pairings[string(res.Value)] = &schemaHistoryPairing{ + schemaVersionID: string(res.Value), + } + } + } + + err = q.Close() + if err != nil { + return nil, err + } + + history := map[schemaVersionID]*schemaHistoryLink{} + + for _, pairing := range pairings { + // Convert the temporary types to the cleaner return type: + history[pairing.schemaVersionID] = &schemaHistoryLink{ + schemaVersionID: pairing.schemaVersionID, + } + } + + for _, pairing := range pairings { + src := history[pairing.schemaVersionID] + + // Use the internal pairings to set the next/previous links. This must be + // done after the `history` map has been fully populated, else `src` and + // `next` may not yet have been added to the map. + if next, hasNext := history[pairing.nextSchemaVersionID]; hasNext { + src.next = immutable.Some(next) + next.previous = immutable.Some(src) + } + } + + return history, nil +} diff --git a/lens/lens.go b/lens/lens.go new file mode 100644 index 0000000000..50549542b8 --- /dev/null +++ b/lens/lens.go @@ -0,0 +1,206 @@ +// Copyright 2023 Democratized Data Foundation +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package lens + +import ( + "github.com/sourcenetwork/immutable/enumerable" + + "github.com/sourcenetwork/defradb/client" +) + +type schemaVersionID = string + +// LensDoc represents a document that will be sent to/from a Lens. +type LensDoc = map[string]any + +type lensInput struct { + SchemaVersionID schemaVersionID + Doc LensDoc +} + +// Lens migrate items fed in to the target schema version. +// +// Source documents may be of various schema versions, and may need to be migrated across multiple +// versions. As the input versions are unknown until enumerated, the migration pipeline is constructed +// lazily, as new source schema versions are discovered. If a migration does not exist for a schema +// version, the document will be passed on to the next stage. +type Lens interface { + enumerable.Enumerable[LensDoc] + + // Put feeds the given document into the Lens, so that its transformed output may be yielded. + Put(schemaVersionID schemaVersionID, value LensDoc) error +} + +type lens struct { + lensRegistry client.LensRegistry + + // The primary access points to the lens pipes through which all things flow. + lensPipesBySchemaVersionIDs map[schemaVersionID]enumerable.Concatenation[LensDoc] + + // The input pipes, into which items are added to the pipe system. + lensInputPipesBySchemaVersionIDs map[schemaVersionID]enumerable.Queue[LensDoc] + + // The output pipe, through which all outputs must exit. + outputPipe enumerable.Concatenation[LensDoc] + + schemaVersionHistory map[schemaVersionID]*targetedSchemaHistoryLink + + source enumerable.Queue[lensInput] +} + +var _ Lens = (*lens)(nil) + +func new( + lensRegistry client.LensRegistry, + targetSchemaVersionID schemaVersionID, + schemaVersionHistory map[schemaVersionID]*targetedSchemaHistoryLink, +) Lens { + targetSource := enumerable.NewQueue[LensDoc]() + outputPipe := enumerable.Concat[LensDoc](targetSource) + + return &lens{ + lensRegistry: lensRegistry, + source: enumerable.NewQueue[lensInput](), + outputPipe: outputPipe, + schemaVersionHistory: schemaVersionHistory, + lensInputPipesBySchemaVersionIDs: map[schemaVersionID]enumerable.Queue[LensDoc]{ + targetSchemaVersionID: targetSource, + }, + lensPipesBySchemaVersionIDs: map[schemaVersionID]enumerable.Concatenation[LensDoc]{ + targetSchemaVersionID: outputPipe, + }, + } +} + +// todo - instead of this and a lens-fetcher, we could instead make lens-fetcher (and other fetchers) enumerables +// instead and use those as the `source` directly. +// https://github.com/sourcenetwork/defradb/issues/1589 +func (l *lens) Put(schemaVersionID schemaVersionID, value LensDoc) error { + return l.source.Put(lensInput{ + SchemaVersionID: schemaVersionID, + Doc: value, + }) +} + +// Next reads documents from source, and migrates them to the target schema version. +// +// Source documents may be of various schema versions, and may need to be migrated across multiple +// versions. As the input versions are unknown until enumerated, the migration pipeline is constructed +// lazily, as new source schema versions are discovered. If a migration does not exist for a schema +// version, the document will be passed on to the next stage. +// +// Perhaps the best way to visualize this is as a multi-input marble-run, where inputs and their paths +// are constructed as new marble types are discovered. +// +// - Each version can have one or none migrations. +// - Each migration in the document's path to the target version is guaranteed to recieve the document +// exactly once. +// - Schema history is assumed to be a single straight line with no branching, this will be fixed with +// https://github.com/sourcenetwork/defradb/issues/1598 +func (l *lens) Next() (bool, error) { + // Check the output pipe first, there could be items remaining within waiting to be yielded. + hasValue, err := l.outputPipe.Next() + if err != nil || hasValue { + return hasValue, err + } + + hasValue, err = l.source.Next() + if err != nil || !hasValue { + return false, err + } + + doc, err := l.source.Value() + if err != nil { + return false, err + } + + var inputPipe enumerable.Queue[LensDoc] + if p, ok := l.lensInputPipesBySchemaVersionIDs[doc.SchemaVersionID]; ok { + // If the input pipe exists we can safely assume that it has been correctly connected + // up to the output via any intermediary pipes. + inputPipe = p + } else { + historyLocation := l.schemaVersionHistory[doc.SchemaVersionID] + var pipeHead enumerable.Enumerable[LensDoc] + + for { + junctionPipe, junctionPreviouslyExisted := l.lensPipesBySchemaVersionIDs[historyLocation.schemaVersionID] + if !junctionPreviouslyExisted { + versionInputPipe := enumerable.NewQueue[LensDoc]() + l.lensInputPipesBySchemaVersionIDs[historyLocation.schemaVersionID] = versionInputPipe + if inputPipe == nil { + // The input pipe will be fed documents which are currently at this schema version + inputPipe = versionInputPipe + } + // It is a source of the schemaVersion junction pipe, other schema versions + // may also join as sources to this junction pipe + junctionPipe = enumerable.Concat[LensDoc](versionInputPipe) + l.lensPipesBySchemaVersionIDs[historyLocation.schemaVersionID] = junctionPipe + } + + // If we have previously laid pipe, we need to connect it to the current junction. + // This links a lens migration to the next stage. + if pipeHead != nil { + junctionPipe.Append(pipeHead) + } + + if junctionPreviouslyExisted { + // If the junction pipe previously existed, then we can assume it is already connected to outputPipe + // via any intermediary pipes. + break + } + + if historyLocation.targetVector > 0 { + // Aquire a lens migration from the registery, using the junctionPipe as its source. + // The new pipeHead will then be connected as a source to the next migration-stage on + // the next loop. + pipeHead, err = l.lensRegistry.MigrateUp(junctionPipe, historyLocation.schemaVersionID) + if err != nil { + return false, err + } + + historyLocation = historyLocation.next.Value() + } else { + // The pipe head then becomes the schema version migration to the next version + // sourcing from any documents at schemaVersionID, or lower schema versions. + // This also ensures each document only passes through each migration once, + // in order, and through the same state container (in case migrations use state). + pipeHead, err = l.lensRegistry.MigrateDown(junctionPipe, historyLocation.schemaVersionID) + if err != nil { + return false, err + } + + // Aquire a lens migration from the registery, using the junctionPipe as its source. + // The new pipeHead will then be connected as a source to the next migration-stage on + // the next loop. + historyLocation = historyLocation.previous.Value() + } + } + } + + // Place the current doc in the appropriate input pipe + err = inputPipe.Put(doc.Doc) + if err != nil { + return false, err + } + + // Then draw out the next result result from the output pipe, pulling it through any migrations + // along the way. Typically this will be the (now migrated) document just placed into the input pipe. + return l.outputPipe.Next() +} + +func (l *lens) Value() (LensDoc, error) { + return l.outputPipe.Value() +} + +func (l *lens) Reset() { + l.outputPipe.Reset() +} diff --git a/lens/registry.go b/lens/registry.go new file mode 100644 index 0000000000..aee26104ef --- /dev/null +++ b/lens/registry.go @@ -0,0 +1,365 @@ +// Copyright 2023 Democratized Data Foundation +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package lens + +import ( + "context" + "encoding/json" + "sync" + + "github.com/ipfs/go-datastore/query" + "github.com/lens-vm/lens/host-go/config" + "github.com/lens-vm/lens/host-go/engine/module" + "github.com/lens-vm/lens/host-go/runtimes/wazero" + "github.com/sourcenetwork/immutable" + "github.com/sourcenetwork/immutable/enumerable" + + "github.com/sourcenetwork/defradb/client" + "github.com/sourcenetwork/defradb/core" + "github.com/sourcenetwork/defradb/datastore" + "github.com/sourcenetwork/defradb/errors" +) + +// todo: This file, particularly the `lensPool` stuff, contains fairly sensitive code that is both +// cumbersome to fully test with integration/benchmark tests, and can have a significant affect on +// the users if broken (deadlocks, large performance degradation). It should have proper unit tests. +// https://github.com/sourcenetwork/defradb/issues/1596 + +// lensRegistry is responsible for managing all migration related state within a local +// database instance. +type lensRegistry struct { + poolSize int + + // The runtime used to execute lens wasm modules. + runtime module.Runtime + + // The modules by file path used to instantiate lens wasm module instances. + modulesByPath map[string]module.Module + moduleLock sync.Mutex + + lensPoolsBySchemaVersionID map[string]*lensPool + + // lens configurations by source schema version ID + configs map[string]client.LensConfig +} + +var _ client.LensRegistry = (*lensRegistry)(nil) + +// DefaultPoolSize is the default size of the lens pool for each schema version. +const DefaultPoolSize int = 5 + +// NewRegistry instantiates a new registery. +// +// It will be of size 5 (per schema version) if a size is not provided. +func NewRegistry(lensPoolSize immutable.Option[int]) *lensRegistry { + var size int + if lensPoolSize.HasValue() { + size = lensPoolSize.Value() + } else { + size = DefaultPoolSize + } + + return &lensRegistry{ + poolSize: size, + runtime: wazero.New(), + modulesByPath: map[string]module.Module{}, + lensPoolsBySchemaVersionID: map[string]*lensPool{}, + configs: map[string]client.LensConfig{}, + } +} + +func (r *lensRegistry) SetMigration(ctx context.Context, txn datastore.Txn, cfg client.LensConfig) error { + key := core.NewSchemaVersionMigrationKey(cfg.SourceSchemaVersionID) + + json, err := json.Marshal(cfg) + if err != nil { + return err + } + + err = txn.Systemstore().Put(ctx, key.ToDS(), json) + if err != nil { + return err + } + + err = r.cacheLens(txn, cfg) + if err != nil { + return err + } + + return nil +} + +func (r *lensRegistry) cacheLens(txn datastore.Txn, cfg client.LensConfig) error { + locker, lockerAlreadyExists := r.lensPoolsBySchemaVersionID[cfg.SourceSchemaVersionID] + if !lockerAlreadyExists { + locker = r.newPool(r.poolSize, cfg) + } + + newLensPipes := make([]*lensPipe, r.poolSize) + for i := 0; i < r.poolSize; i++ { + var err error + newLensPipes[i], err = r.newLensPipe(cfg) + if err != nil { + return err + } + } + + // todo - handling txns like this means that the migrations are not available within the current + // transaction if used for stuff (e.g. GQL requests) before commit. + // https://github.com/sourcenetwork/defradb/issues/1592 + txn.OnSuccess(func() { + if !lockerAlreadyExists { + r.lensPoolsBySchemaVersionID[cfg.SourceSchemaVersionID] = locker + } + + drainLoop: + for { + select { + case <-locker.pipes: + default: + break drainLoop + } + } + + for _, lensPipe := range newLensPipes { + locker.returnLens(lensPipe) + } + + r.configs[cfg.SourceSchemaVersionID] = cfg + }) + + return nil +} + +func (r *lensRegistry) ReloadLenses(ctx context.Context, txn datastore.Txn) error { + prefix := core.NewSchemaVersionMigrationKey("") + q, err := txn.Systemstore().Query(ctx, query.Query{ + Prefix: prefix.ToString(), + }) + if err != nil { + return err + } + + for res := range q.Next() { + // check for Done on context first + select { + case <-ctx.Done(): + // we've been cancelled! ;) + err = q.Close() + if err != nil { + return err + } + + return nil + default: + // noop, just continue on the with the for loop + } + + if res.Error != nil { + err = q.Close() + if err != nil { + return errors.Wrap(err.Error(), res.Error) + } + return res.Error + } + + var cfg client.LensConfig + err = json.Unmarshal(res.Value, &cfg) + if err != nil { + err = q.Close() + if err != nil { + return err + } + return err + } + + err = r.cacheLens(txn, cfg) + if err != nil { + err = q.Close() + if err != nil { + return errors.Wrap(err.Error(), res.Error) + } + return err + } + } + + err = q.Close() + if err != nil { + return err + } + + return nil +} + +func (r *lensRegistry) MigrateUp( + src enumerable.Enumerable[LensDoc], + schemaVersionID string, +) (enumerable.Enumerable[LensDoc], error) { + lensPool, ok := r.lensPoolsBySchemaVersionID[schemaVersionID] + if !ok { + // If there are no migrations for this schema version, just return the given source. + return src, nil + } + + lens, err := lensPool.borrow() + if err != nil { + return nil, err + } + + lens.SetSource(src) + + return lens, nil +} + +func (*lensRegistry) MigrateDown( + src enumerable.Enumerable[LensDoc], + schemaVersionID string, +) (enumerable.Enumerable[LensDoc], error) { + // todo: https://github.com/sourcenetwork/defradb/issues/1591 + return src, nil +} + +func (r *lensRegistry) Config() []client.LensConfig { + result := []client.LensConfig{} + for _, cfg := range r.configs { + result = append(result, cfg) + } + return result +} + +func (r *lensRegistry) HasMigration(schemaVersionID string) bool { + _, hasMigration := r.lensPoolsBySchemaVersionID[schemaVersionID] + return hasMigration +} + +// lensPool provides a pool-like mechanic for caching a limited number of wasm lens modules in +// a thread safe fashion. +// +// Instanstiating a lens module is pretty expensive as it has to spin up the wasm runtime environment +// so we need to limit how frequently we do this. +type lensPool struct { + // The config used to create the lenses within this locker. + cfg client.LensConfig + + registry *lensRegistry + + // Using a buffered channel provides an easy way to manage a finite + // number of lenses. + // + // We wish to limit this as creating lenses is expensive, and we do not want + // to be dynamically resizing this collection and spinning up new lens instances + // in user time, or holding on to large numbers of them. + pipes chan *lensPipe +} + +func (r *lensRegistry) newPool(lensPoolSize int, cfg client.LensConfig) *lensPool { + return &lensPool{ + cfg: cfg, + registry: r, + pipes: make(chan *lensPipe, lensPoolSize), + } +} + +// borrow attempts to borrow a module from the locker, if one is not available +// it will return a new, temporary instance that will not be returned to the locker +// after use. +func (l *lensPool) borrow() (enumerable.Socket[LensDoc], error) { + select { + case lens := <-l.pipes: + return &borrowedEnumerable{ + source: lens, + pool: l, + }, nil + default: + // If there are no free cached migrations within the locker, create a new temporary one + // instead of blocking. + return l.registry.newLensPipe(l.cfg) + } +} + +// returnLens returns a borrowed module to the locker, allowing it to be reused by other contexts. +func (l *lensPool) returnLens(lens *lensPipe) { + l.pipes <- lens +} + +// borrowedEnumerable is an enumerable tied to a locker. +// +// it exposes the source enumerable and amends the Reset function so that when called, the source +// pipe is returned to the locker. +type borrowedEnumerable struct { + source *lensPipe + pool *lensPool +} + +var _ enumerable.Socket[LensDoc] = (*borrowedEnumerable)(nil) + +func (s *borrowedEnumerable) SetSource(newSource enumerable.Enumerable[LensDoc]) { + s.source.SetSource(newSource) +} + +func (s *borrowedEnumerable) Next() (bool, error) { + return s.source.Next() +} + +func (s *borrowedEnumerable) Value() (LensDoc, error) { + return s.source.Value() +} + +func (s *borrowedEnumerable) Reset() { + s.pool.returnLens(s.source) + s.source.Reset() +} + +// lensPipe provides a mechanic where the underlying wasm module can be hidden from consumers +// and allow input sources to be swapped in and out as different actors borrow it from the locker. +type lensPipe struct { + input enumerable.Socket[LensDoc] + enumerable enumerable.Enumerable[LensDoc] +} + +var _ enumerable.Socket[LensDoc] = (*lensPipe)(nil) + +func (r *lensRegistry) newLensPipe(cfg client.LensConfig) (*lensPipe, error) { + socket := enumerable.NewSocket[LensDoc]() + + r.moduleLock.Lock() + enumerable, err := config.LoadInto[LensDoc, LensDoc](r.runtime, r.modulesByPath, cfg.Lens, socket) + r.moduleLock.Unlock() + + if err != nil { + return nil, err + } + + return &lensPipe{ + input: socket, + enumerable: enumerable, + }, nil +} + +func (p *lensPipe) SetSource(newSource enumerable.Enumerable[LensDoc]) { + p.input.SetSource(newSource) +} + +func (p *lensPipe) Next() (bool, error) { + return p.enumerable.Next() +} + +func (p *lensPipe) Value() (LensDoc, error) { + return p.enumerable.Value() +} + +func (p *lensPipe) Reset() { + p.input.Reset() + // WARNING: Currently the wasm module state is not reset by calling reset on the enumerable + // this means that state from one context may leak to the next useage. There is a ticket here + // to fix this: https://github.com/lens-vm/lens/issues/46 + p.enumerable.Reset() +} diff --git a/licenses/BSL.txt b/licenses/BSL.txt index 54cfbb9150..093935cc02 100644 --- a/licenses/BSL.txt +++ b/licenses/BSL.txt @@ -7,7 +7,7 @@ Parameters Licensor: Democratized Data (D2) Foundation -Licensed Work: DefraDB v0.5.1 +Licensed Work: DefraDB v0.6.0 The Licensed Work is (c) 2023 D2 Foundation. @@ -28,7 +28,7 @@ Additional Use Grant: You may only use the Licensed Work for the -Change Date: 2027-05-16 +Change Date: 2027-07-31 Change License: Apache License, Version 2.0 diff --git a/logging/config.go b/logging/config.go index b6ccccc50d..63cde2ceb5 100644 --- a/logging/config.go +++ b/logging/config.go @@ -110,7 +110,7 @@ type Config struct { OutputPaths []string OverridesByLoggerName map[string]Config - pipe io.Writer // this is used for testing purposes only + Pipe io.Writer // this is used for testing purposes only } func (c Config) forLogger(name string) Config { @@ -121,7 +121,7 @@ func (c Config) forLogger(name string) Config { EnableCaller: c.EnableCaller, EncoderFormat: c.EncoderFormat, OutputPaths: c.OutputPaths, - pipe: c.pipe, + Pipe: c.Pipe, } if override, hasOverride := c.OverridesByLoggerName[name]; hasOverride { @@ -143,8 +143,8 @@ func (c Config) forLogger(name string) Config { if len(override.OutputPaths) != 0 { loggerConfig.OutputPaths = override.OutputPaths } - if override.pipe != nil { - loggerConfig.pipe = override.pipe + if override.Pipe != nil { + loggerConfig.Pipe = override.Pipe } } @@ -161,7 +161,7 @@ func (c Config) copy() Config { EnableCaller: o.EnableCaller, DisableColor: o.DisableColor, OutputPaths: o.OutputPaths, - pipe: o.pipe, + Pipe: o.Pipe, } } @@ -173,7 +173,7 @@ func (c Config) copy() Config { EnableCaller: c.EnableCaller, DisableColor: c.DisableColor, OverridesByLoggerName: overridesByLoggerName, - pipe: c.pipe, + Pipe: c.Pipe, } } @@ -205,8 +205,8 @@ func (oldConfig Config) with(newConfigOptions Config) Config { newConfig.OutputPaths = validatePaths(newConfigOptions.OutputPaths) } - if newConfigOptions.pipe != nil { - newConfig.pipe = newConfigOptions.pipe + if newConfigOptions.Pipe != nil { + newConfig.Pipe = newConfigOptions.Pipe } for k, o := range newConfigOptions.OverridesByLoggerName { @@ -219,7 +219,7 @@ func (oldConfig Config) with(newConfigOptions Config) Config { DisableColor: o.DisableColor, EncoderFormat: o.EncoderFormat, OutputPaths: validatePaths(o.OutputPaths), - pipe: o.pipe, + Pipe: o.Pipe, } } diff --git a/logging/logger.go b/logging/logger.go index cb416e3876..9b9bb20e35 100644 --- a/logging/logger.go +++ b/logging/logger.go @@ -172,8 +172,8 @@ func (l *logger) ApplyConfig(config Config) { l.logger = newLogger if !willOutputToStderrOrStdout(config.OutputPaths) { - if config.pipe != nil { // for testing purposes only - l.consoleLogger = stdlog.New(config.pipe, "", 0) + if config.Pipe != nil { // for testing purposes only + l.consoleLogger = stdlog.New(config.Pipe, "", 0) } else { l.consoleLogger = stdlog.New(os.Stderr, "", 0) } @@ -245,7 +245,7 @@ func buildZapLogger(name string, config Config) (*zap.Logger, error) { return nil, err } - if willOutputToStderrOrStdout(defaultConfig.OutputPaths) && config.pipe != nil { + if willOutputToStderrOrStdout(defaultConfig.OutputPaths) && config.Pipe != nil { newLogger = newLogger.WithOptions(zap.WrapCore(func(zapcore.Core) zapcore.Core { cfg := zap.NewProductionEncoderConfig() cfg.ConsoleSeparator = defaultConfig.EncoderConfig.ConsoleSeparator @@ -253,7 +253,7 @@ func buildZapLogger(name string, config Config) (*zap.Logger, error) { cfg.EncodeLevel = defaultConfig.EncoderConfig.EncodeLevel return zapcore.NewCore( zapcore.NewJSONEncoder(cfg), - zapcore.AddSync(config.pipe), + zapcore.Lock(zapcore.AddSync(config.Pipe)), zap.NewAtomicLevelAt(zapcore.Level(config.Level.LogLevel)), ) })) diff --git a/logging/logging_test.go b/logging/logging_test.go index 9ffb7d66d6..0c776ffa33 100644 --- a/logging/logging_test.go +++ b/logging/logging_test.go @@ -379,7 +379,7 @@ func TestLogDoesntWriteMessagesToLogGivenNoLogPath(t *testing.T) { logger, _ := getLogger(t, func(c *Config) { c.Level = NewLogLevelOption(tc.LogLevel) c.OutputPaths = []string{} - c.pipe = b + c.Pipe = b }) logMessage := "test log message" @@ -416,7 +416,7 @@ func TestLogDoesntWriteMessagesToLogGivenNotFoundLogPath(t *testing.T) { logger, _ := getLogger(t, func(c *Config) { c.Level = NewLogLevelOption(tc.LogLevel) c.OutputPaths = []string{"/path/not/found"} - c.pipe = b + c.Pipe = b }) logMessage := "test log message" @@ -453,7 +453,7 @@ func TestLogDoesntWriteMessagesToLogGivenStderrLogPath(t *testing.T) { logger, _ := getLogger(t, func(c *Config) { c.Level = NewLogLevelOption(tc.LogLevel) c.OutputPaths = []string{stderr} - c.pipe = b + c.Pipe = b }) logMessage := "test log message" @@ -568,7 +568,7 @@ func TestLogWritesMessagesToFeedbackLog(t *testing.T) { c.Level = NewLogLevelOption(tc.LogLevel) c.EnableStackTrace = NewEnableStackTraceOption(tc.WithStackTrace) c.EnableCaller = NewEnableCallerOption(tc.WithCaller) - c.pipe = b + c.Pipe = b }) logMessage := "test log message" @@ -613,7 +613,7 @@ func TestLogWritesMessagesToLogGivenPipeWithValidPath(t *testing.T) { b := &bytes.Buffer{} logger, logPath := getLogger(t, func(c *Config) { c.Level = NewLogLevelOption(Info) - c.pipe = b + c.Pipe = b }) logMessage := "test log message" @@ -874,7 +874,7 @@ func TestGetGoLoggerAndApplyConfig(t *testing.T) { b := &bytes.Buffer{} l.ApplyConfig(Config{ EncoderFormat: NewEncoderFormatOption(JSON), - pipe: b, + Pipe: b, }) l.ZapEventLogger.Info("some info") @@ -906,7 +906,7 @@ func TestGetGoLoggerV2AndApplyConfig(t *testing.T) { b := &bytes.Buffer{} l.ApplyConfig(Config{ EncoderFormat: NewEncoderFormatOption(JSON), - pipe: b, + Pipe: b, }) l.ZapEventLogger.Info("some info") diff --git a/merkle/clock/clock_test.go b/merkle/clock/clock_test.go index 2624335bd8..8cee13a2bb 100644 --- a/merkle/clock/clock_test.go +++ b/merkle/clock/clock_test.go @@ -16,9 +16,9 @@ import ( cid "github.com/ipfs/go-cid" ds "github.com/ipfs/go-datastore" - mh "github.com/multiformats/go-multihash" "github.com/sourcenetwork/defradb/core" + ccid "github.com/sourcenetwork/defradb/core/cid" "github.com/sourcenetwork/defradb/core/crdt" "github.com/sourcenetwork/defradb/datastore" ) @@ -79,15 +79,7 @@ func TestMerkleClockPutBlockWithHeads(t *testing.T) { delta := &crdt.LWWRegDelta{ Data: []byte("test"), } - pref := cid.Prefix{ - Version: 1, - Codec: cid.Raw, - MhType: mh.SHA2_256, - MhLength: -1, // default length - } - - // And then feed it some data - c, err := pref.Sum([]byte("Hello World!")) + c, err := ccid.NewSHA256CidV1([]byte("Hello World!")) if err != nil { t.Error("Failed to create new head CID:", err) return diff --git a/merkle/clock/heads_test.go b/merkle/clock/heads_test.go index c9c6212c5c..a857571515 100644 --- a/merkle/clock/heads_test.go +++ b/merkle/clock/heads_test.go @@ -20,26 +20,19 @@ import ( "testing" "github.com/ipfs/go-cid" - mh "github.com/multiformats/go-multihash" "github.com/sourcenetwork/defradb/core" + ccid "github.com/sourcenetwork/defradb/core/cid" "github.com/sourcenetwork/defradb/datastore" ) func newRandomCID() cid.Cid { - pref := cid.Prefix{ - Version: 1, - Codec: cid.Raw, - MhType: mh.SHA2_256, - MhLength: -1, // default length - } - // And then feed it some data bs := make([]byte, 4) i := rand.Uint32() binary.LittleEndian.PutUint32(bs, i) - c, err := pref.Sum(bs) + c, err := ccid.NewSHA256CidV1(bs) if err != nil { return cid.Undef } diff --git a/merkle/clock/ipld.go b/merkle/clock/ipld.go index e982ced1ee..484a145dce 100644 --- a/merkle/clock/ipld.go +++ b/merkle/clock/ipld.go @@ -27,10 +27,6 @@ import ( var _ core.NodeGetter = (*CrdtNodeGetter)(nil) -func init() { - ipld.Register(cid.DagProtobuf, dag.DecodeProtobufBlock) -} - type DeltaExtractorFn func(ipld.Node) (core.Delta, error) // crdtNodeGetter wraps an ipld.NodeGetter with some additional utility methods diff --git a/metric/metric.go b/metric/metric.go index f8eb82d699..f267a7ed5d 100644 --- a/metric/metric.go +++ b/metric/metric.go @@ -20,9 +20,8 @@ import ( "encoding/json" "go.opentelemetry.io/otel/metric" - "go.opentelemetry.io/otel/metric/instrument" - "go.opentelemetry.io/otel/metric/unit" otelMetric "go.opentelemetry.io/otel/sdk/metric" + "go.opentelemetry.io/otel/sdk/metric/metricdata" ) var _ Metric = (*Meter)(nil) @@ -35,7 +34,7 @@ type Metric interface { Register(name string) // Dump is responsible to read the metrics and output all the gathered data. - Dump(ctx context.Context) (any, error) + Dump(ctx context.Context) (*metricdata.ResourceMetrics, error) // Close shutsdown the meter. Close(ctx context.Context) error @@ -61,8 +60,12 @@ func (m *Meter) Register(name string) { } // Dump is responsible to read the metrics and output all the gathered data. -func (m *Meter) Dump(ctx context.Context) (any, error) { - return m.reader.Collect(ctx) +func (m *Meter) Dump(ctx context.Context) (*metricdata.ResourceMetrics, error) { + out := &metricdata.ResourceMetrics{} + if err := m.reader.Collect(ctx, out); err != nil { + return nil, err + } + return out, nil } // Close shutsdown the meter. @@ -73,33 +76,33 @@ func (m *Meter) Close(ctx context.Context) error { // GetSyncHistogram returns a new histogram with the given name and unit. func (m *Meter) GetSyncHistogram( name string, - unit unit.Unit, -) (instrument.Int64Histogram, error) { + unit string, +) (metric.Int64Histogram, error) { return m.meter.Int64Histogram( name, - instrument.WithUnit(unit), + metric.WithUnit(unit), ) } // GetSyncCounter returns a new counter with the given name and unit. func (m *Meter) GetSyncCounter( name string, - unit unit.Unit, -) (instrument.Int64Counter, error) { + unit string, +) (metric.Int64Counter, error) { return m.meter.Int64Counter( name, - instrument.WithUnit(unit), + metric.WithUnit(unit), ) } // DumpScopeMetricsString returns a string representation of the metrics. func (m *Meter) DumpScopeMetricsString(ctx context.Context) (string, error) { - data, err := m.reader.Collect(ctx) - if err != nil { + out := &metricdata.ResourceMetrics{} + if err := m.reader.Collect(ctx, out); err != nil { return "", err } - jsonBytes, err := json.MarshalIndent(data.ScopeMetrics, "", " ") + jsonBytes, err := json.MarshalIndent(out.ScopeMetrics, "", " ") if err != nil { return "", err } diff --git a/metric/metric_test.go b/metric/metric_test.go index a8af66414d..b89700fd74 100644 --- a/metric/metric_test.go +++ b/metric/metric_test.go @@ -16,7 +16,6 @@ import ( "time" "github.com/stretchr/testify/assert" - "go.opentelemetry.io/otel/metric/unit" "go.opentelemetry.io/otel/sdk/metric/metricdata" ) @@ -25,7 +24,7 @@ func TestMetricSyncHistogram(t *testing.T) { meter.Register("HistogramOnly") workDuration, err := meter.GetSyncHistogram( "workDuration", - unit.Milliseconds, + "ms", ) if err != nil { t.Error(err) @@ -46,30 +45,25 @@ func TestMetricSyncHistogram(t *testing.T) { // Goes in third bucket. workDuration.Record(ctx, elapsedTime.Nanoseconds()) - dump, err := meter.Dump(ctx) + data, err := meter.Dump(ctx) if err != nil { t.Error(err) } - data, isMatricData := dump.(metricdata.ResourceMetrics) - if !isMatricData { - t.Error(err) - } - assert.Equal(t, 1, len(data.ScopeMetrics)) assert.Equal(t, "HistogramOnly", data.ScopeMetrics[0].Scope.Name) assert.Equal(t, 1, len(data.ScopeMetrics[0].Metrics)) assert.Equal(t, "workDuration", data.ScopeMetrics[0].Metrics[0].Name) firstMetricData := data.ScopeMetrics[0].Metrics[0].Data - histData, isHistData := firstMetricData.(metricdata.Histogram) + histData, isHistData := firstMetricData.(metricdata.Histogram[int64]) if !isHistData { t.Error(err) } assert.Equal(t, 1, len(histData.DataPoints)) assert.Equal(t, uint64(3), histData.DataPoints[0].Count) - assert.Equal(t, 12.0, histData.DataPoints[0].Sum) // 2 + 4 + 6 + assert.Equal(t, int64(12), histData.DataPoints[0].Sum) // 2 + 4 + 6 assert.Equal( t, []float64{0, 5, 10, 25, 50, 75, 100, 250, 500, 750, 1000, 2500, 5000, 7500, 10000}, @@ -91,7 +85,7 @@ func TestMetricSyncCounter(t *testing.T) { meter.Register("CounterOnly") stuffCounter, err := meter.GetSyncCounter( "countStuff", - unit.Dimensionless, + "1", ) if err != nil { t.Error(err) @@ -101,16 +95,11 @@ func TestMetricSyncCounter(t *testing.T) { stuffCounter.Add(ctx, 12) stuffCounter.Add(ctx, 1) - dump, err := meter.Dump(ctx) + data, err := meter.Dump(ctx) if err != nil { t.Error(err) } - data, isMatricData := dump.(metricdata.ResourceMetrics) - if !isMatricData { - t.Error(err) - } - assert.Equal(t, 1, len(data.ScopeMetrics)) assert.Equal(t, "CounterOnly", data.ScopeMetrics[0].Scope.Name) assert.Equal(t, 1, len(data.ScopeMetrics[0].Metrics)) @@ -137,7 +126,7 @@ func TestMetricWithCounterAndHistogramIntrumentOnOneMeter(t *testing.T) { stuffCounter, err := meter.GetSyncCounter( "countStuff", - unit.Dimensionless, + "1", ) if err != nil { t.Error(err) @@ -145,7 +134,7 @@ func TestMetricWithCounterAndHistogramIntrumentOnOneMeter(t *testing.T) { workDuration, err := meter.GetSyncHistogram( "workDuration", - unit.Milliseconds, + "ms", ) if err != nil { t.Error(err) @@ -166,16 +155,11 @@ func TestMetricWithCounterAndHistogramIntrumentOnOneMeter(t *testing.T) { stuffCounter.Add(ctx, 1) - dump, err := meter.Dump(ctx) + data, err := meter.Dump(ctx) if err != nil { t.Error(err) } - data, isMatricData := dump.(metricdata.ResourceMetrics) - if !isMatricData { - t.Error(err) - } - assert.Equal(t, 1, len(data.ScopeMetrics)) assert.Equal(t, "CounterAndHistogram", data.ScopeMetrics[0].Scope.Name) @@ -197,14 +181,14 @@ func TestMetricWithCounterAndHistogramIntrumentOnOneMeter(t *testing.T) { assert.Equal(t, "workDuration", metrics[1].Name) histMetricData := metrics[1].Data - histData, isHistData := histMetricData.(metricdata.Histogram) + histData, isHistData := histMetricData.(metricdata.Histogram[int64]) if !isHistData { t.Error(err) } assert.Equal(t, 1, len(histData.DataPoints)) assert.Equal(t, uint64(3), histData.DataPoints[0].Count) - assert.Equal(t, 12.0, histData.DataPoints[0].Sum) // 2 + 4 + 6 + assert.Equal(t, int64(12), histData.DataPoints[0].Sum) // 2 + 4 + 6 assert.Equal( t, []float64{0, 5, 10, 25, 50, 75, 100, 250, 500, 750, 1000, 2500, 5000, 7500, 10000}, diff --git a/net/api/client/client.go b/net/api/client/client.go index b6bfe5e386..2ea92bd14c 100644 --- a/net/api/client/client.go +++ b/net/api/client/client.go @@ -18,15 +18,22 @@ import ( "github.com/libp2p/go-libp2p/core/peer" ma "github.com/multiformats/go-multiaddr" + codec "github.com/planetscale/vtprotobuf/codec/grpc" "google.golang.org/grpc" + "google.golang.org/grpc/encoding" + _ "google.golang.org/grpc/encoding/proto" "github.com/sourcenetwork/defradb/client" "github.com/sourcenetwork/defradb/errors" - pb "github.com/sourcenetwork/defradb/net/api/pb" + pb "github.com/sourcenetwork/defradb/net/pb" ) +func init() { + encoding.RegisterCodec(codec.Codec{}) +} + type Client struct { - c pb.ServiceClient + c pb.CollectionClient conn *grpc.ClientConn } @@ -38,7 +45,7 @@ func NewClient(target string, opts ...grpc.DialOption) (*Client, error) { } return &Client{ - c: pb.NewServiceClient(conn), + c: pb.NewCollectionClient(conn), conn: conn, }, nil } diff --git a/net/api/pb/Makefile b/net/api/pb/Makefile index e96e192c5a..62eef77354 100644 --- a/net/api/pb/Makefile +++ b/net/api/pb/Makefile @@ -4,9 +4,12 @@ GO = $(PB:.proto=.pb.go) all: $(GO) %.pb.go: %.proto - protoc -I=. -I=$(GOPATH)/src -I=$(GOPATH)/src/github.com/gogo/protobuf/protobuf --gogofaster_out=\ - plugins=grpc:\ - . $< + protoc \ + --go_out=. --plugin protoc-gen-go="${GOBIN}/protoc-gen-go" \ + --go-grpc_out=. --plugin protoc-gen-go-grpc="${GOBIN}/protoc-gen-go-grpc" \ + --go-vtproto_out=. --plugin protoc-gen-go-vtproto="${GOBIN}/protoc-gen-go-vtproto" \ + --go-vtproto_opt=features=marshal+unmarshal+size \ + $< clean: rm -f *.pb.go diff --git a/net/api/pb/api.pb.go b/net/api/pb/api.pb.go index e34f954bc9..ad48069b8f 100644 --- a/net/api/pb/api.pb.go +++ b/net/api/pb/api.pb.go @@ -1,3150 +1,1100 @@ -// Code generated by protoc-gen-gogo. DO NOT EDIT. +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.30.0 +// protoc v3.21.9 // source: api.proto package api_pb import ( - context "context" - fmt "fmt" - proto "github.com/gogo/protobuf/proto" - grpc "google.golang.org/grpc" - codes "google.golang.org/grpc/codes" - status "google.golang.org/grpc/status" - io "io" - math "math" - math_bits "math/bits" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" ) -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) type SetReplicatorRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + Collections []string `protobuf:"bytes,1,rep,name=collections,proto3" json:"collections,omitempty"` Addr []byte `protobuf:"bytes,2,opt,name=addr,proto3" json:"addr,omitempty"` } -func (m *SetReplicatorRequest) Reset() { *m = SetReplicatorRequest{} } -func (m *SetReplicatorRequest) String() string { return proto.CompactTextString(m) } -func (*SetReplicatorRequest) ProtoMessage() {} -func (*SetReplicatorRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_00212fb1f9d3bf1c, []int{0} -} -func (m *SetReplicatorRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *SetReplicatorRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_SetReplicatorRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil +func (x *SetReplicatorRequest) Reset() { + *x = SetReplicatorRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_api_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } } -func (m *SetReplicatorRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_SetReplicatorRequest.Merge(m, src) -} -func (m *SetReplicatorRequest) XXX_Size() int { - return m.Size() + +func (x *SetReplicatorRequest) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *SetReplicatorRequest) XXX_DiscardUnknown() { - xxx_messageInfo_SetReplicatorRequest.DiscardUnknown(m) + +func (*SetReplicatorRequest) ProtoMessage() {} + +func (x *SetReplicatorRequest) ProtoReflect() protoreflect.Message { + mi := &file_api_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -var xxx_messageInfo_SetReplicatorRequest proto.InternalMessageInfo +// Deprecated: Use SetReplicatorRequest.ProtoReflect.Descriptor instead. +func (*SetReplicatorRequest) Descriptor() ([]byte, []int) { + return file_api_proto_rawDescGZIP(), []int{0} +} -func (m *SetReplicatorRequest) GetCollections() []string { - if m != nil { - return m.Collections +func (x *SetReplicatorRequest) GetCollections() []string { + if x != nil { + return x.Collections } return nil } -func (m *SetReplicatorRequest) GetAddr() []byte { - if m != nil { - return m.Addr +func (x *SetReplicatorRequest) GetAddr() []byte { + if x != nil { + return x.Addr } return nil } type SetReplicatorReply struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + PeerID []byte `protobuf:"bytes,1,opt,name=peerID,proto3" json:"peerID,omitempty"` } -func (m *SetReplicatorReply) Reset() { *m = SetReplicatorReply{} } -func (m *SetReplicatorReply) String() string { return proto.CompactTextString(m) } -func (*SetReplicatorReply) ProtoMessage() {} -func (*SetReplicatorReply) Descriptor() ([]byte, []int) { - return fileDescriptor_00212fb1f9d3bf1c, []int{1} -} -func (m *SetReplicatorReply) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *SetReplicatorReply) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_SetReplicatorReply.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil +func (x *SetReplicatorReply) Reset() { + *x = SetReplicatorReply{} + if protoimpl.UnsafeEnabled { + mi := &file_api_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } } -func (m *SetReplicatorReply) XXX_Merge(src proto.Message) { - xxx_messageInfo_SetReplicatorReply.Merge(m, src) -} -func (m *SetReplicatorReply) XXX_Size() int { - return m.Size() + +func (x *SetReplicatorReply) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *SetReplicatorReply) XXX_DiscardUnknown() { - xxx_messageInfo_SetReplicatorReply.DiscardUnknown(m) + +func (*SetReplicatorReply) ProtoMessage() {} + +func (x *SetReplicatorReply) ProtoReflect() protoreflect.Message { + mi := &file_api_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -var xxx_messageInfo_SetReplicatorReply proto.InternalMessageInfo +// Deprecated: Use SetReplicatorReply.ProtoReflect.Descriptor instead. +func (*SetReplicatorReply) Descriptor() ([]byte, []int) { + return file_api_proto_rawDescGZIP(), []int{1} +} -func (m *SetReplicatorReply) GetPeerID() []byte { - if m != nil { - return m.PeerID +func (x *SetReplicatorReply) GetPeerID() []byte { + if x != nil { + return x.PeerID } return nil } type DeleteReplicatorRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + PeerID []byte `protobuf:"bytes,1,opt,name=peerID,proto3" json:"peerID,omitempty"` } -func (m *DeleteReplicatorRequest) Reset() { *m = DeleteReplicatorRequest{} } -func (m *DeleteReplicatorRequest) String() string { return proto.CompactTextString(m) } -func (*DeleteReplicatorRequest) ProtoMessage() {} -func (*DeleteReplicatorRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_00212fb1f9d3bf1c, []int{2} -} -func (m *DeleteReplicatorRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *DeleteReplicatorRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_DeleteReplicatorRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil +func (x *DeleteReplicatorRequest) Reset() { + *x = DeleteReplicatorRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_api_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } } -func (m *DeleteReplicatorRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_DeleteReplicatorRequest.Merge(m, src) -} -func (m *DeleteReplicatorRequest) XXX_Size() int { - return m.Size() + +func (x *DeleteReplicatorRequest) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *DeleteReplicatorRequest) XXX_DiscardUnknown() { - xxx_messageInfo_DeleteReplicatorRequest.DiscardUnknown(m) + +func (*DeleteReplicatorRequest) ProtoMessage() {} + +func (x *DeleteReplicatorRequest) ProtoReflect() protoreflect.Message { + mi := &file_api_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -var xxx_messageInfo_DeleteReplicatorRequest proto.InternalMessageInfo +// Deprecated: Use DeleteReplicatorRequest.ProtoReflect.Descriptor instead. +func (*DeleteReplicatorRequest) Descriptor() ([]byte, []int) { + return file_api_proto_rawDescGZIP(), []int{2} +} -func (m *DeleteReplicatorRequest) GetPeerID() []byte { - if m != nil { - return m.PeerID +func (x *DeleteReplicatorRequest) GetPeerID() []byte { + if x != nil { + return x.PeerID } return nil } type DeleteReplicatorReply struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + PeerID []byte `protobuf:"bytes,1,opt,name=peerID,proto3" json:"peerID,omitempty"` } -func (m *DeleteReplicatorReply) Reset() { *m = DeleteReplicatorReply{} } -func (m *DeleteReplicatorReply) String() string { return proto.CompactTextString(m) } -func (*DeleteReplicatorReply) ProtoMessage() {} -func (*DeleteReplicatorReply) Descriptor() ([]byte, []int) { - return fileDescriptor_00212fb1f9d3bf1c, []int{3} -} -func (m *DeleteReplicatorReply) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *DeleteReplicatorReply) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_DeleteReplicatorReply.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil +func (x *DeleteReplicatorReply) Reset() { + *x = DeleteReplicatorReply{} + if protoimpl.UnsafeEnabled { + mi := &file_api_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } } -func (m *DeleteReplicatorReply) XXX_Merge(src proto.Message) { - xxx_messageInfo_DeleteReplicatorReply.Merge(m, src) -} -func (m *DeleteReplicatorReply) XXX_Size() int { - return m.Size() -} -func (m *DeleteReplicatorReply) XXX_DiscardUnknown() { - xxx_messageInfo_DeleteReplicatorReply.DiscardUnknown(m) -} - -var xxx_messageInfo_DeleteReplicatorReply proto.InternalMessageInfo -func (m *DeleteReplicatorReply) GetPeerID() []byte { - if m != nil { - return m.PeerID - } - return nil +func (x *DeleteReplicatorReply) String() string { + return protoimpl.X.MessageStringOf(x) } -type GetAllReplicatorRequest struct { -} +func (*DeleteReplicatorReply) ProtoMessage() {} -func (m *GetAllReplicatorRequest) Reset() { *m = GetAllReplicatorRequest{} } -func (m *GetAllReplicatorRequest) String() string { return proto.CompactTextString(m) } -func (*GetAllReplicatorRequest) ProtoMessage() {} -func (*GetAllReplicatorRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_00212fb1f9d3bf1c, []int{4} -} -func (m *GetAllReplicatorRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *GetAllReplicatorRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_GetAllReplicatorRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err +func (x *DeleteReplicatorReply) ProtoReflect() protoreflect.Message { + mi := &file_api_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) } - return b[:n], nil + return ms } -} -func (m *GetAllReplicatorRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_GetAllReplicatorRequest.Merge(m, src) -} -func (m *GetAllReplicatorRequest) XXX_Size() int { - return m.Size() -} -func (m *GetAllReplicatorRequest) XXX_DiscardUnknown() { - xxx_messageInfo_GetAllReplicatorRequest.DiscardUnknown(m) + return mi.MessageOf(x) } -var xxx_messageInfo_GetAllReplicatorRequest proto.InternalMessageInfo - -type GetAllReplicatorReply struct { - Replicators []*GetAllReplicatorReply_Replicators `protobuf:"bytes,1,rep,name=replicators,proto3" json:"replicators,omitempty"` +// Deprecated: Use DeleteReplicatorReply.ProtoReflect.Descriptor instead. +func (*DeleteReplicatorReply) Descriptor() ([]byte, []int) { + return file_api_proto_rawDescGZIP(), []int{3} } -func (m *GetAllReplicatorReply) Reset() { *m = GetAllReplicatorReply{} } -func (m *GetAllReplicatorReply) String() string { return proto.CompactTextString(m) } -func (*GetAllReplicatorReply) ProtoMessage() {} -func (*GetAllReplicatorReply) Descriptor() ([]byte, []int) { - return fileDescriptor_00212fb1f9d3bf1c, []int{5} -} -func (m *GetAllReplicatorReply) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *GetAllReplicatorReply) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_GetAllReplicatorReply.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil +func (x *DeleteReplicatorReply) GetPeerID() []byte { + if x != nil { + return x.PeerID } -} -func (m *GetAllReplicatorReply) XXX_Merge(src proto.Message) { - xxx_messageInfo_GetAllReplicatorReply.Merge(m, src) -} -func (m *GetAllReplicatorReply) XXX_Size() int { - return m.Size() -} -func (m *GetAllReplicatorReply) XXX_DiscardUnknown() { - xxx_messageInfo_GetAllReplicatorReply.DiscardUnknown(m) + return nil } -var xxx_messageInfo_GetAllReplicatorReply proto.InternalMessageInfo +type GetAllReplicatorRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} -func (m *GetAllReplicatorReply) GetReplicators() []*GetAllReplicatorReply_Replicators { - if m != nil { - return m.Replicators +func (x *GetAllReplicatorRequest) Reset() { + *x = GetAllReplicatorRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_api_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } - return nil } -type GetAllReplicatorReply_Replicators struct { - Info *GetAllReplicatorReply_Replicators_Info `protobuf:"bytes,1,opt,name=info,proto3" json:"info,omitempty"` - Schemas []string `protobuf:"bytes,2,rep,name=schemas,proto3" json:"schemas,omitempty"` +func (x *GetAllReplicatorRequest) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *GetAllReplicatorReply_Replicators) Reset() { *m = GetAllReplicatorReply_Replicators{} } -func (m *GetAllReplicatorReply_Replicators) String() string { return proto.CompactTextString(m) } -func (*GetAllReplicatorReply_Replicators) ProtoMessage() {} -func (*GetAllReplicatorReply_Replicators) Descriptor() ([]byte, []int) { - return fileDescriptor_00212fb1f9d3bf1c, []int{5, 0} -} -func (m *GetAllReplicatorReply_Replicators) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *GetAllReplicatorReply_Replicators) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_GetAllReplicatorReply_Replicators.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err +func (*GetAllReplicatorRequest) ProtoMessage() {} + +func (x *GetAllReplicatorRequest) ProtoReflect() protoreflect.Message { + mi := &file_api_proto_msgTypes[4] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) } - return b[:n], nil + return ms } + return mi.MessageOf(x) } -func (m *GetAllReplicatorReply_Replicators) XXX_Merge(src proto.Message) { - xxx_messageInfo_GetAllReplicatorReply_Replicators.Merge(m, src) -} -func (m *GetAllReplicatorReply_Replicators) XXX_Size() int { - return m.Size() -} -func (m *GetAllReplicatorReply_Replicators) XXX_DiscardUnknown() { - xxx_messageInfo_GetAllReplicatorReply_Replicators.DiscardUnknown(m) + +// Deprecated: Use GetAllReplicatorRequest.ProtoReflect.Descriptor instead. +func (*GetAllReplicatorRequest) Descriptor() ([]byte, []int) { + return file_api_proto_rawDescGZIP(), []int{4} } -var xxx_messageInfo_GetAllReplicatorReply_Replicators proto.InternalMessageInfo +type GetAllReplicatorReply struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields -func (m *GetAllReplicatorReply_Replicators) GetInfo() *GetAllReplicatorReply_Replicators_Info { - if m != nil { - return m.Info - } - return nil + Replicators []*GetAllReplicatorReply_Replicators `protobuf:"bytes,1,rep,name=replicators,proto3" json:"replicators,omitempty"` } -func (m *GetAllReplicatorReply_Replicators) GetSchemas() []string { - if m != nil { - return m.Schemas +func (x *GetAllReplicatorReply) Reset() { + *x = GetAllReplicatorReply{} + if protoimpl.UnsafeEnabled { + mi := &file_api_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } - return nil } -type GetAllReplicatorReply_Replicators_Info struct { - Id []byte `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` - Addrs []byte `protobuf:"bytes,2,opt,name=addrs,proto3" json:"addrs,omitempty"` +func (x *GetAllReplicatorReply) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *GetAllReplicatorReply_Replicators_Info) Reset() { - *m = GetAllReplicatorReply_Replicators_Info{} -} -func (m *GetAllReplicatorReply_Replicators_Info) String() string { return proto.CompactTextString(m) } -func (*GetAllReplicatorReply_Replicators_Info) ProtoMessage() {} -func (*GetAllReplicatorReply_Replicators_Info) Descriptor() ([]byte, []int) { - return fileDescriptor_00212fb1f9d3bf1c, []int{5, 0, 0} -} -func (m *GetAllReplicatorReply_Replicators_Info) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *GetAllReplicatorReply_Replicators_Info) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_GetAllReplicatorReply_Replicators_Info.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err +func (*GetAllReplicatorReply) ProtoMessage() {} + +func (x *GetAllReplicatorReply) ProtoReflect() protoreflect.Message { + mi := &file_api_proto_msgTypes[5] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) } - return b[:n], nil + return ms } -} -func (m *GetAllReplicatorReply_Replicators_Info) XXX_Merge(src proto.Message) { - xxx_messageInfo_GetAllReplicatorReply_Replicators_Info.Merge(m, src) -} -func (m *GetAllReplicatorReply_Replicators_Info) XXX_Size() int { - return m.Size() -} -func (m *GetAllReplicatorReply_Replicators_Info) XXX_DiscardUnknown() { - xxx_messageInfo_GetAllReplicatorReply_Replicators_Info.DiscardUnknown(m) + return mi.MessageOf(x) } -var xxx_messageInfo_GetAllReplicatorReply_Replicators_Info proto.InternalMessageInfo - -func (m *GetAllReplicatorReply_Replicators_Info) GetId() []byte { - if m != nil { - return m.Id - } - return nil +// Deprecated: Use GetAllReplicatorReply.ProtoReflect.Descriptor instead. +func (*GetAllReplicatorReply) Descriptor() ([]byte, []int) { + return file_api_proto_rawDescGZIP(), []int{5} } -func (m *GetAllReplicatorReply_Replicators_Info) GetAddrs() []byte { - if m != nil { - return m.Addrs +func (x *GetAllReplicatorReply) GetReplicators() []*GetAllReplicatorReply_Replicators { + if x != nil { + return x.Replicators } return nil } type AddP2PCollectionsRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + Collections []string `protobuf:"bytes,1,rep,name=collections,proto3" json:"collections,omitempty"` } -func (m *AddP2PCollectionsRequest) Reset() { *m = AddP2PCollectionsRequest{} } -func (m *AddP2PCollectionsRequest) String() string { return proto.CompactTextString(m) } -func (*AddP2PCollectionsRequest) ProtoMessage() {} -func (*AddP2PCollectionsRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_00212fb1f9d3bf1c, []int{6} -} -func (m *AddP2PCollectionsRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *AddP2PCollectionsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_AddP2PCollectionsRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil +func (x *AddP2PCollectionsRequest) Reset() { + *x = AddP2PCollectionsRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_api_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } } -func (m *AddP2PCollectionsRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_AddP2PCollectionsRequest.Merge(m, src) -} -func (m *AddP2PCollectionsRequest) XXX_Size() int { - return m.Size() + +func (x *AddP2PCollectionsRequest) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *AddP2PCollectionsRequest) XXX_DiscardUnknown() { - xxx_messageInfo_AddP2PCollectionsRequest.DiscardUnknown(m) + +func (*AddP2PCollectionsRequest) ProtoMessage() {} + +func (x *AddP2PCollectionsRequest) ProtoReflect() protoreflect.Message { + mi := &file_api_proto_msgTypes[6] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -var xxx_messageInfo_AddP2PCollectionsRequest proto.InternalMessageInfo +// Deprecated: Use AddP2PCollectionsRequest.ProtoReflect.Descriptor instead. +func (*AddP2PCollectionsRequest) Descriptor() ([]byte, []int) { + return file_api_proto_rawDescGZIP(), []int{6} +} -func (m *AddP2PCollectionsRequest) GetCollections() []string { - if m != nil { - return m.Collections +func (x *AddP2PCollectionsRequest) GetCollections() []string { + if x != nil { + return x.Collections } return nil } type AddP2PCollectionsReply struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + Err string `protobuf:"bytes,1,opt,name=err,proto3" json:"err,omitempty"` } -func (m *AddP2PCollectionsReply) Reset() { *m = AddP2PCollectionsReply{} } -func (m *AddP2PCollectionsReply) String() string { return proto.CompactTextString(m) } -func (*AddP2PCollectionsReply) ProtoMessage() {} -func (*AddP2PCollectionsReply) Descriptor() ([]byte, []int) { - return fileDescriptor_00212fb1f9d3bf1c, []int{7} -} -func (m *AddP2PCollectionsReply) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *AddP2PCollectionsReply) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_AddP2PCollectionsReply.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil +func (x *AddP2PCollectionsReply) Reset() { + *x = AddP2PCollectionsReply{} + if protoimpl.UnsafeEnabled { + mi := &file_api_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } } -func (m *AddP2PCollectionsReply) XXX_Merge(src proto.Message) { - xxx_messageInfo_AddP2PCollectionsReply.Merge(m, src) -} -func (m *AddP2PCollectionsReply) XXX_Size() int { - return m.Size() -} -func (m *AddP2PCollectionsReply) XXX_DiscardUnknown() { - xxx_messageInfo_AddP2PCollectionsReply.DiscardUnknown(m) -} - -var xxx_messageInfo_AddP2PCollectionsReply proto.InternalMessageInfo -func (m *AddP2PCollectionsReply) GetErr() string { - if m != nil { - return m.Err - } - return "" +func (x *AddP2PCollectionsReply) String() string { + return protoimpl.X.MessageStringOf(x) } -type RemoveP2PCollectionsRequest struct { - Collections []string `protobuf:"bytes,1,rep,name=collections,proto3" json:"collections,omitempty"` -} +func (*AddP2PCollectionsReply) ProtoMessage() {} -func (m *RemoveP2PCollectionsRequest) Reset() { *m = RemoveP2PCollectionsRequest{} } -func (m *RemoveP2PCollectionsRequest) String() string { return proto.CompactTextString(m) } -func (*RemoveP2PCollectionsRequest) ProtoMessage() {} -func (*RemoveP2PCollectionsRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_00212fb1f9d3bf1c, []int{8} -} -func (m *RemoveP2PCollectionsRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *RemoveP2PCollectionsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_RemoveP2PCollectionsRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err +func (x *AddP2PCollectionsReply) ProtoReflect() protoreflect.Message { + mi := &file_api_proto_msgTypes[7] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) } - return b[:n], nil + return ms } -} -func (m *RemoveP2PCollectionsRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_RemoveP2PCollectionsRequest.Merge(m, src) -} -func (m *RemoveP2PCollectionsRequest) XXX_Size() int { - return m.Size() -} -func (m *RemoveP2PCollectionsRequest) XXX_DiscardUnknown() { - xxx_messageInfo_RemoveP2PCollectionsRequest.DiscardUnknown(m) + return mi.MessageOf(x) } -var xxx_messageInfo_RemoveP2PCollectionsRequest proto.InternalMessageInfo +// Deprecated: Use AddP2PCollectionsReply.ProtoReflect.Descriptor instead. +func (*AddP2PCollectionsReply) Descriptor() ([]byte, []int) { + return file_api_proto_rawDescGZIP(), []int{7} +} -func (m *RemoveP2PCollectionsRequest) GetCollections() []string { - if m != nil { - return m.Collections +func (x *AddP2PCollectionsReply) GetErr() string { + if x != nil { + return x.Err } - return nil + return "" } -type RemoveP2PCollectionsReply struct { - Err string `protobuf:"bytes,1,opt,name=err,proto3" json:"err,omitempty"` +type RemoveP2PCollectionsRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Collections []string `protobuf:"bytes,1,rep,name=collections,proto3" json:"collections,omitempty"` } -func (m *RemoveP2PCollectionsReply) Reset() { *m = RemoveP2PCollectionsReply{} } -func (m *RemoveP2PCollectionsReply) String() string { return proto.CompactTextString(m) } -func (*RemoveP2PCollectionsReply) ProtoMessage() {} -func (*RemoveP2PCollectionsReply) Descriptor() ([]byte, []int) { - return fileDescriptor_00212fb1f9d3bf1c, []int{9} -} -func (m *RemoveP2PCollectionsReply) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *RemoveP2PCollectionsReply) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_RemoveP2PCollectionsReply.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil +func (x *RemoveP2PCollectionsRequest) Reset() { + *x = RemoveP2PCollectionsRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_api_proto_msgTypes[8] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } } -func (m *RemoveP2PCollectionsReply) XXX_Merge(src proto.Message) { - xxx_messageInfo_RemoveP2PCollectionsReply.Merge(m, src) -} -func (m *RemoveP2PCollectionsReply) XXX_Size() int { - return m.Size() -} -func (m *RemoveP2PCollectionsReply) XXX_DiscardUnknown() { - xxx_messageInfo_RemoveP2PCollectionsReply.DiscardUnknown(m) + +func (x *RemoveP2PCollectionsRequest) String() string { + return protoimpl.X.MessageStringOf(x) } -var xxx_messageInfo_RemoveP2PCollectionsReply proto.InternalMessageInfo +func (*RemoveP2PCollectionsRequest) ProtoMessage() {} -func (m *RemoveP2PCollectionsReply) GetErr() string { - if m != nil { - return m.Err +func (x *RemoveP2PCollectionsRequest) ProtoReflect() protoreflect.Message { + mi := &file_api_proto_msgTypes[8] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms } - return "" + return mi.MessageOf(x) } -type GetAllP2PCollectionsRequest struct { +// Deprecated: Use RemoveP2PCollectionsRequest.ProtoReflect.Descriptor instead. +func (*RemoveP2PCollectionsRequest) Descriptor() ([]byte, []int) { + return file_api_proto_rawDescGZIP(), []int{8} } -func (m *GetAllP2PCollectionsRequest) Reset() { *m = GetAllP2PCollectionsRequest{} } -func (m *GetAllP2PCollectionsRequest) String() string { return proto.CompactTextString(m) } -func (*GetAllP2PCollectionsRequest) ProtoMessage() {} -func (*GetAllP2PCollectionsRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_00212fb1f9d3bf1c, []int{10} -} -func (m *GetAllP2PCollectionsRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *GetAllP2PCollectionsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_GetAllP2PCollectionsRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil +func (x *RemoveP2PCollectionsRequest) GetCollections() []string { + if x != nil { + return x.Collections } -} -func (m *GetAllP2PCollectionsRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_GetAllP2PCollectionsRequest.Merge(m, src) -} -func (m *GetAllP2PCollectionsRequest) XXX_Size() int { - return m.Size() -} -func (m *GetAllP2PCollectionsRequest) XXX_DiscardUnknown() { - xxx_messageInfo_GetAllP2PCollectionsRequest.DiscardUnknown(m) + return nil } -var xxx_messageInfo_GetAllP2PCollectionsRequest proto.InternalMessageInfo +type RemoveP2PCollectionsReply struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields -type GetAllP2PCollectionsReply struct { - Collections []*GetAllP2PCollectionsReply_Collection `protobuf:"bytes,1,rep,name=collections,proto3" json:"collections,omitempty"` + Err string `protobuf:"bytes,1,opt,name=err,proto3" json:"err,omitempty"` } -func (m *GetAllP2PCollectionsReply) Reset() { *m = GetAllP2PCollectionsReply{} } -func (m *GetAllP2PCollectionsReply) String() string { return proto.CompactTextString(m) } -func (*GetAllP2PCollectionsReply) ProtoMessage() {} -func (*GetAllP2PCollectionsReply) Descriptor() ([]byte, []int) { - return fileDescriptor_00212fb1f9d3bf1c, []int{11} -} -func (m *GetAllP2PCollectionsReply) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *GetAllP2PCollectionsReply) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_GetAllP2PCollectionsReply.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil +func (x *RemoveP2PCollectionsReply) Reset() { + *x = RemoveP2PCollectionsReply{} + if protoimpl.UnsafeEnabled { + mi := &file_api_proto_msgTypes[9] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } } -func (m *GetAllP2PCollectionsReply) XXX_Merge(src proto.Message) { - xxx_messageInfo_GetAllP2PCollectionsReply.Merge(m, src) -} -func (m *GetAllP2PCollectionsReply) XXX_Size() int { - return m.Size() -} -func (m *GetAllP2PCollectionsReply) XXX_DiscardUnknown() { - xxx_messageInfo_GetAllP2PCollectionsReply.DiscardUnknown(m) -} -var xxx_messageInfo_GetAllP2PCollectionsReply proto.InternalMessageInfo - -func (m *GetAllP2PCollectionsReply) GetCollections() []*GetAllP2PCollectionsReply_Collection { - if m != nil { - return m.Collections - } - return nil +func (x *RemoveP2PCollectionsReply) String() string { + return protoimpl.X.MessageStringOf(x) } -type GetAllP2PCollectionsReply_Collection struct { - Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` - Name string `protobuf:"bytes,2,opt,name=name,proto3" json:"name,omitempty"` -} +func (*RemoveP2PCollectionsReply) ProtoMessage() {} -func (m *GetAllP2PCollectionsReply_Collection) Reset() { *m = GetAllP2PCollectionsReply_Collection{} } -func (m *GetAllP2PCollectionsReply_Collection) String() string { return proto.CompactTextString(m) } -func (*GetAllP2PCollectionsReply_Collection) ProtoMessage() {} -func (*GetAllP2PCollectionsReply_Collection) Descriptor() ([]byte, []int) { - return fileDescriptor_00212fb1f9d3bf1c, []int{11, 0} -} -func (m *GetAllP2PCollectionsReply_Collection) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *GetAllP2PCollectionsReply_Collection) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_GetAllP2PCollectionsReply_Collection.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err +func (x *RemoveP2PCollectionsReply) ProtoReflect() protoreflect.Message { + mi := &file_api_proto_msgTypes[9] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) } - return b[:n], nil + return ms } + return mi.MessageOf(x) } -func (m *GetAllP2PCollectionsReply_Collection) XXX_Merge(src proto.Message) { - xxx_messageInfo_GetAllP2PCollectionsReply_Collection.Merge(m, src) -} -func (m *GetAllP2PCollectionsReply_Collection) XXX_Size() int { - return m.Size() -} -func (m *GetAllP2PCollectionsReply_Collection) XXX_DiscardUnknown() { - xxx_messageInfo_GetAllP2PCollectionsReply_Collection.DiscardUnknown(m) -} - -var xxx_messageInfo_GetAllP2PCollectionsReply_Collection proto.InternalMessageInfo -func (m *GetAllP2PCollectionsReply_Collection) GetId() string { - if m != nil { - return m.Id - } - return "" +// Deprecated: Use RemoveP2PCollectionsReply.ProtoReflect.Descriptor instead. +func (*RemoveP2PCollectionsReply) Descriptor() ([]byte, []int) { + return file_api_proto_rawDescGZIP(), []int{9} } -func (m *GetAllP2PCollectionsReply_Collection) GetName() string { - if m != nil { - return m.Name +func (x *RemoveP2PCollectionsReply) GetErr() string { + if x != nil { + return x.Err } return "" } -func init() { - proto.RegisterType((*SetReplicatorRequest)(nil), "api.pb.SetReplicatorRequest") - proto.RegisterType((*SetReplicatorReply)(nil), "api.pb.SetReplicatorReply") - proto.RegisterType((*DeleteReplicatorRequest)(nil), "api.pb.DeleteReplicatorRequest") - proto.RegisterType((*DeleteReplicatorReply)(nil), "api.pb.DeleteReplicatorReply") - proto.RegisterType((*GetAllReplicatorRequest)(nil), "api.pb.GetAllReplicatorRequest") - proto.RegisterType((*GetAllReplicatorReply)(nil), "api.pb.GetAllReplicatorReply") - proto.RegisterType((*GetAllReplicatorReply_Replicators)(nil), "api.pb.GetAllReplicatorReply.Replicators") - proto.RegisterType((*GetAllReplicatorReply_Replicators_Info)(nil), "api.pb.GetAllReplicatorReply.Replicators.Info") - proto.RegisterType((*AddP2PCollectionsRequest)(nil), "api.pb.AddP2PCollectionsRequest") - proto.RegisterType((*AddP2PCollectionsReply)(nil), "api.pb.AddP2PCollectionsReply") - proto.RegisterType((*RemoveP2PCollectionsRequest)(nil), "api.pb.RemoveP2PCollectionsRequest") - proto.RegisterType((*RemoveP2PCollectionsReply)(nil), "api.pb.RemoveP2PCollectionsReply") - proto.RegisterType((*GetAllP2PCollectionsRequest)(nil), "api.pb.GetAllP2PCollectionsRequest") - proto.RegisterType((*GetAllP2PCollectionsReply)(nil), "api.pb.GetAllP2PCollectionsReply") - proto.RegisterType((*GetAllP2PCollectionsReply_Collection)(nil), "api.pb.GetAllP2PCollectionsReply.Collection") -} - -func init() { proto.RegisterFile("api.proto", fileDescriptor_00212fb1f9d3bf1c) } - -var fileDescriptor_00212fb1f9d3bf1c = []byte{ - // 524 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x54, 0x41, 0x6f, 0x94, 0x40, - 0x14, 0x06, 0x8a, 0x6d, 0x78, 0xa8, 0x69, 0x5f, 0xb6, 0x2d, 0x4b, 0x2d, 0xae, 0xe3, 0xa5, 0x9a, - 0x15, 0x15, 0xaf, 0x26, 0xa6, 0xb5, 0x89, 0x69, 0x6a, 0x4c, 0x43, 0x35, 0x5e, 0xa5, 0xf0, 0x1a, - 0x49, 0xd8, 0x05, 0x01, 0x9b, 0xf4, 0xe6, 0x4f, 0xf0, 0xea, 0xc1, 0xc4, 0x9f, 0xe3, 0xb1, 0x47, - 0x8f, 0x66, 0xf7, 0x8f, 0x18, 0x86, 0xa5, 0xec, 0x2e, 0x03, 0x69, 0xbc, 0xcd, 0xcc, 0x7b, 0xdf, - 0xf7, 0x0d, 0xf3, 0x7d, 0x0f, 0xd0, 0xbc, 0x24, 0xb4, 0x93, 0x34, 0xce, 0x63, 0x5c, 0xe5, 0xcb, - 0x33, 0xf6, 0x16, 0x7a, 0xa7, 0x94, 0xbb, 0x94, 0x44, 0xa1, 0xef, 0xe5, 0x71, 0xea, 0xd2, 0x97, - 0xaf, 0x94, 0xe5, 0x38, 0x00, 0xdd, 0x8f, 0xa3, 0x88, 0xfc, 0x3c, 0x8c, 0xc7, 0x99, 0x21, 0x0f, - 0x56, 0xf6, 0x34, 0x77, 0xfe, 0x08, 0x11, 0x54, 0x2f, 0x08, 0x52, 0x43, 0x19, 0xc8, 0x7b, 0xb7, - 0x5d, 0xbe, 0x66, 0x43, 0xc0, 0x25, 0xb6, 0x24, 0xba, 0xc4, 0x2d, 0x58, 0x4d, 0x88, 0xd2, 0xa3, - 0x43, 0x43, 0xe6, 0xbd, 0xb3, 0x1d, 0x7b, 0x0e, 0xdb, 0x87, 0x14, 0x51, 0x4e, 0x4d, 0xf9, 0x36, - 0xc8, 0x53, 0xd8, 0x6c, 0x42, 0xba, 0x34, 0xfa, 0xb0, 0xfd, 0x86, 0xf2, 0xfd, 0x28, 0x6a, 0x68, - 0xb0, 0x6f, 0x0a, 0x6c, 0x36, 0x6b, 0x05, 0xd9, 0x31, 0xe8, 0xe9, 0xf5, 0x51, 0xf9, 0xf1, 0xba, - 0xf3, 0xc8, 0x2e, 0x9f, 0xcc, 0x16, 0x62, 0xec, 0x7a, 0x9f, 0xb9, 0xf3, 0x68, 0xf3, 0x87, 0x0c, - 0xfa, 0x5c, 0x11, 0x0f, 0x40, 0x0d, 0xc7, 0xe7, 0x31, 0xbf, 0xa7, 0xee, 0xd8, 0x37, 0x66, 0xb5, - 0x8f, 0xc6, 0xe7, 0xb1, 0xcb, 0xb1, 0x68, 0xc0, 0x5a, 0xe6, 0x7f, 0xa6, 0x91, 0x97, 0x19, 0x0a, - 0x77, 0xa6, 0xda, 0x9a, 0x43, 0x50, 0x8b, 0x3e, 0xbc, 0x0b, 0x4a, 0x18, 0xcc, 0xde, 0x42, 0x09, - 0x03, 0xec, 0xc1, 0xad, 0xc2, 0xa1, 0x6c, 0x66, 0x57, 0xb9, 0x61, 0x2f, 0xc1, 0xd8, 0x0f, 0x82, - 0x13, 0xe7, 0xe4, 0x75, 0x6d, 0xec, 0x8d, 0x13, 0xc0, 0x1e, 0xc3, 0x96, 0x00, 0x5d, 0x3c, 0xe0, - 0x3a, 0xac, 0x50, 0x9a, 0x72, 0x79, 0xcd, 0x2d, 0x96, 0xec, 0x15, 0xec, 0xb8, 0x34, 0x8a, 0x2f, - 0xe8, 0x7f, 0xc5, 0x9e, 0x40, 0x5f, 0x4c, 0x20, 0xd6, 0xdb, 0x85, 0x9d, 0xf2, 0x45, 0x85, 0x7a, - 0xec, 0xa7, 0x0c, 0x7d, 0x71, 0xbd, 0xa0, 0x7b, 0xd7, 0xbc, 0x8d, 0xee, 0x0c, 0x17, 0x9d, 0x12, - 0xe0, 0xec, 0xfa, 0x60, 0xe1, 0xee, 0xe6, 0x33, 0x80, 0xba, 0x34, 0x67, 0x8d, 0xc6, 0xad, 0x41, - 0x50, 0xc7, 0xde, 0x88, 0xb8, 0x33, 0x9a, 0xcb, 0xd7, 0xce, 0x2f, 0x15, 0xd6, 0x4e, 0x29, 0xbd, - 0x08, 0x7d, 0xc2, 0x63, 0xb8, 0xb3, 0x30, 0x54, 0x78, 0xaf, 0xba, 0x89, 0x68, 0x72, 0x4d, 0xb3, - 0xa5, 0x9a, 0x44, 0x97, 0x4c, 0xc2, 0xf7, 0xb0, 0xbe, 0x3c, 0x40, 0x78, 0xbf, 0x42, 0xb4, 0x4c, - 0xa3, 0xb9, 0xdb, 0xde, 0x50, 0xb2, 0x7e, 0x80, 0x8d, 0xe5, 0xfc, 0x66, 0x35, 0x6d, 0xcb, 0x00, - 0xd6, 0xb4, 0xc2, 0xec, 0x33, 0x09, 0x3f, 0xc2, 0x46, 0x23, 0x60, 0x38, 0xa8, 0x50, 0x6d, 0xc9, - 0x35, 0xad, 0x8e, 0x8e, 0x92, 0xf8, 0x13, 0xf4, 0x44, 0x61, 0xc2, 0x87, 0x15, 0xb2, 0x23, 0xab, - 0xe6, 0x83, 0xee, 0xa6, 0x6b, 0x05, 0x51, 0x4e, 0x6a, 0x85, 0x8e, 0x74, 0xd6, 0x0a, 0xad, 0x51, - 0x63, 0xd2, 0x81, 0xf1, 0x7b, 0x62, 0xc9, 0x57, 0x13, 0x4b, 0xfe, 0x3b, 0xb1, 0xe4, 0xef, 0x53, - 0x4b, 0xba, 0x9a, 0x5a, 0xd2, 0x9f, 0xa9, 0x25, 0x9d, 0xad, 0xf2, 0x5f, 0xfc, 0x8b, 0x7f, 0x01, - 0x00, 0x00, 0xff, 0xff, 0xb7, 0xed, 0x74, 0x34, 0xef, 0x05, 0x00, 0x00, -} - -// Reference imports to suppress errors if they are not otherwise used. -var _ context.Context -var _ grpc.ClientConn - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the grpc package it is being compiled against. -const _ = grpc.SupportPackageIsVersion4 - -// ServiceClient is the client API for Service service. -// -// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. -type ServiceClient interface { - // SetReplicator for this peer - SetReplicator(ctx context.Context, in *SetReplicatorRequest, opts ...grpc.CallOption) (*SetReplicatorReply, error) - // DeleteReplicator for this peer - DeleteReplicator(ctx context.Context, in *DeleteReplicatorRequest, opts ...grpc.CallOption) (*DeleteReplicatorReply, error) - // DeleteReplicator for this peer - GetAllReplicators(ctx context.Context, in *GetAllReplicatorRequest, opts ...grpc.CallOption) (*GetAllReplicatorReply, error) - AddP2PCollections(ctx context.Context, in *AddP2PCollectionsRequest, opts ...grpc.CallOption) (*AddP2PCollectionsReply, error) - RemoveP2PCollections(ctx context.Context, in *RemoveP2PCollectionsRequest, opts ...grpc.CallOption) (*RemoveP2PCollectionsReply, error) - GetAllP2PCollections(ctx context.Context, in *GetAllP2PCollectionsRequest, opts ...grpc.CallOption) (*GetAllP2PCollectionsReply, error) -} - -type serviceClient struct { - cc *grpc.ClientConn -} - -func NewServiceClient(cc *grpc.ClientConn) ServiceClient { - return &serviceClient{cc} -} - -func (c *serviceClient) SetReplicator(ctx context.Context, in *SetReplicatorRequest, opts ...grpc.CallOption) (*SetReplicatorReply, error) { - out := new(SetReplicatorReply) - err := c.cc.Invoke(ctx, "/api.pb.Service/SetReplicator", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil +type GetAllP2PCollectionsRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields } -func (c *serviceClient) DeleteReplicator(ctx context.Context, in *DeleteReplicatorRequest, opts ...grpc.CallOption) (*DeleteReplicatorReply, error) { - out := new(DeleteReplicatorReply) - err := c.cc.Invoke(ctx, "/api.pb.Service/DeleteReplicator", in, out, opts...) - if err != nil { - return nil, err +func (x *GetAllP2PCollectionsRequest) Reset() { + *x = GetAllP2PCollectionsRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_api_proto_msgTypes[10] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } - return out, nil } -func (c *serviceClient) GetAllReplicators(ctx context.Context, in *GetAllReplicatorRequest, opts ...grpc.CallOption) (*GetAllReplicatorReply, error) { - out := new(GetAllReplicatorReply) - err := c.cc.Invoke(ctx, "/api.pb.Service/GetAllReplicators", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil +func (x *GetAllP2PCollectionsRequest) String() string { + return protoimpl.X.MessageStringOf(x) } -func (c *serviceClient) AddP2PCollections(ctx context.Context, in *AddP2PCollectionsRequest, opts ...grpc.CallOption) (*AddP2PCollectionsReply, error) { - out := new(AddP2PCollectionsReply) - err := c.cc.Invoke(ctx, "/api.pb.Service/AddP2PCollections", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} +func (*GetAllP2PCollectionsRequest) ProtoMessage() {} -func (c *serviceClient) RemoveP2PCollections(ctx context.Context, in *RemoveP2PCollectionsRequest, opts ...grpc.CallOption) (*RemoveP2PCollectionsReply, error) { - out := new(RemoveP2PCollectionsReply) - err := c.cc.Invoke(ctx, "/api.pb.Service/RemoveP2PCollections", in, out, opts...) - if err != nil { - return nil, err +func (x *GetAllP2PCollectionsRequest) ProtoReflect() protoreflect.Message { + mi := &file_api_proto_msgTypes[10] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms } - return out, nil + return mi.MessageOf(x) } -func (c *serviceClient) GetAllP2PCollections(ctx context.Context, in *GetAllP2PCollectionsRequest, opts ...grpc.CallOption) (*GetAllP2PCollectionsReply, error) { - out := new(GetAllP2PCollectionsReply) - err := c.cc.Invoke(ctx, "/api.pb.Service/GetAllP2PCollections", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil +// Deprecated: Use GetAllP2PCollectionsRequest.ProtoReflect.Descriptor instead. +func (*GetAllP2PCollectionsRequest) Descriptor() ([]byte, []int) { + return file_api_proto_rawDescGZIP(), []int{10} } -// ServiceServer is the server API for Service service. -type ServiceServer interface { - // SetReplicator for this peer - SetReplicator(context.Context, *SetReplicatorRequest) (*SetReplicatorReply, error) - // DeleteReplicator for this peer - DeleteReplicator(context.Context, *DeleteReplicatorRequest) (*DeleteReplicatorReply, error) - // DeleteReplicator for this peer - GetAllReplicators(context.Context, *GetAllReplicatorRequest) (*GetAllReplicatorReply, error) - AddP2PCollections(context.Context, *AddP2PCollectionsRequest) (*AddP2PCollectionsReply, error) - RemoveP2PCollections(context.Context, *RemoveP2PCollectionsRequest) (*RemoveP2PCollectionsReply, error) - GetAllP2PCollections(context.Context, *GetAllP2PCollectionsRequest) (*GetAllP2PCollectionsReply, error) -} +type GetAllP2PCollectionsReply struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields -// UnimplementedServiceServer can be embedded to have forward compatible implementations. -type UnimplementedServiceServer struct { + Collections []*GetAllP2PCollectionsReply_Collection `protobuf:"bytes,1,rep,name=collections,proto3" json:"collections,omitempty"` } -func (*UnimplementedServiceServer) SetReplicator(ctx context.Context, req *SetReplicatorRequest) (*SetReplicatorReply, error) { - return nil, status.Errorf(codes.Unimplemented, "method SetReplicator not implemented") -} -func (*UnimplementedServiceServer) DeleteReplicator(ctx context.Context, req *DeleteReplicatorRequest) (*DeleteReplicatorReply, error) { - return nil, status.Errorf(codes.Unimplemented, "method DeleteReplicator not implemented") -} -func (*UnimplementedServiceServer) GetAllReplicators(ctx context.Context, req *GetAllReplicatorRequest) (*GetAllReplicatorReply, error) { - return nil, status.Errorf(codes.Unimplemented, "method GetAllReplicators not implemented") -} -func (*UnimplementedServiceServer) AddP2PCollections(ctx context.Context, req *AddP2PCollectionsRequest) (*AddP2PCollectionsReply, error) { - return nil, status.Errorf(codes.Unimplemented, "method AddP2PCollections not implemented") -} -func (*UnimplementedServiceServer) RemoveP2PCollections(ctx context.Context, req *RemoveP2PCollectionsRequest) (*RemoveP2PCollectionsReply, error) { - return nil, status.Errorf(codes.Unimplemented, "method RemoveP2PCollections not implemented") -} -func (*UnimplementedServiceServer) GetAllP2PCollections(ctx context.Context, req *GetAllP2PCollectionsRequest) (*GetAllP2PCollectionsReply, error) { - return nil, status.Errorf(codes.Unimplemented, "method GetAllP2PCollections not implemented") +func (x *GetAllP2PCollectionsReply) Reset() { + *x = GetAllP2PCollectionsReply{} + if protoimpl.UnsafeEnabled { + mi := &file_api_proto_msgTypes[11] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } -func RegisterServiceServer(s *grpc.Server, srv ServiceServer) { - s.RegisterService(&_Service_serviceDesc, srv) +func (x *GetAllP2PCollectionsReply) String() string { + return protoimpl.X.MessageStringOf(x) } -func _Service_SetReplicator_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(SetReplicatorRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(ServiceServer).SetReplicator(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/api.pb.Service/SetReplicator", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(ServiceServer).SetReplicator(ctx, req.(*SetReplicatorRequest)) - } - return interceptor(ctx, in, info, handler) -} +func (*GetAllP2PCollectionsReply) ProtoMessage() {} -func _Service_DeleteReplicator_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(DeleteReplicatorRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(ServiceServer).DeleteReplicator(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/api.pb.Service/DeleteReplicator", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(ServiceServer).DeleteReplicator(ctx, req.(*DeleteReplicatorRequest)) +func (x *GetAllP2PCollectionsReply) ProtoReflect() protoreflect.Message { + mi := &file_api_proto_msgTypes[11] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms } - return interceptor(ctx, in, info, handler) + return mi.MessageOf(x) } -func _Service_GetAllReplicators_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(GetAllReplicatorRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(ServiceServer).GetAllReplicators(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/api.pb.Service/GetAllReplicators", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(ServiceServer).GetAllReplicators(ctx, req.(*GetAllReplicatorRequest)) - } - return interceptor(ctx, in, info, handler) +// Deprecated: Use GetAllP2PCollectionsReply.ProtoReflect.Descriptor instead. +func (*GetAllP2PCollectionsReply) Descriptor() ([]byte, []int) { + return file_api_proto_rawDescGZIP(), []int{11} } -func _Service_AddP2PCollections_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(AddP2PCollectionsRequest) - if err := dec(in); err != nil { - return nil, err +func (x *GetAllP2PCollectionsReply) GetCollections() []*GetAllP2PCollectionsReply_Collection { + if x != nil { + return x.Collections } - if interceptor == nil { - return srv.(ServiceServer).AddP2PCollections(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/api.pb.Service/AddP2PCollections", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(ServiceServer).AddP2PCollections(ctx, req.(*AddP2PCollectionsRequest)) - } - return interceptor(ctx, in, info, handler) + return nil } -func _Service_RemoveP2PCollections_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(RemoveP2PCollectionsRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(ServiceServer).RemoveP2PCollections(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/api.pb.Service/RemoveP2PCollections", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(ServiceServer).RemoveP2PCollections(ctx, req.(*RemoveP2PCollectionsRequest)) - } - return interceptor(ctx, in, info, handler) +type GetAllReplicatorReply_Replicators struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Info *GetAllReplicatorReply_Replicators_Info `protobuf:"bytes,1,opt,name=info,proto3" json:"info,omitempty"` + Schemas []string `protobuf:"bytes,2,rep,name=schemas,proto3" json:"schemas,omitempty"` } -func _Service_GetAllP2PCollections_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(GetAllP2PCollectionsRequest) - if err := dec(in); err != nil { - return nil, err +func (x *GetAllReplicatorReply_Replicators) Reset() { + *x = GetAllReplicatorReply_Replicators{} + if protoimpl.UnsafeEnabled { + mi := &file_api_proto_msgTypes[12] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } - if interceptor == nil { - return srv.(ServiceServer).GetAllP2PCollections(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/api.pb.Service/GetAllP2PCollections", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(ServiceServer).GetAllP2PCollections(ctx, req.(*GetAllP2PCollectionsRequest)) - } - return interceptor(ctx, in, info, handler) } -var _Service_serviceDesc = grpc.ServiceDesc{ - ServiceName: "api.pb.Service", - HandlerType: (*ServiceServer)(nil), - Methods: []grpc.MethodDesc{ - { - MethodName: "SetReplicator", - Handler: _Service_SetReplicator_Handler, - }, - { - MethodName: "DeleteReplicator", - Handler: _Service_DeleteReplicator_Handler, - }, - { - MethodName: "GetAllReplicators", - Handler: _Service_GetAllReplicators_Handler, - }, - { - MethodName: "AddP2PCollections", - Handler: _Service_AddP2PCollections_Handler, - }, - { - MethodName: "RemoveP2PCollections", - Handler: _Service_RemoveP2PCollections_Handler, - }, - { - MethodName: "GetAllP2PCollections", - Handler: _Service_GetAllP2PCollections_Handler, - }, - }, - Streams: []grpc.StreamDesc{}, - Metadata: "api.proto", +func (x *GetAllReplicatorReply_Replicators) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *SetReplicatorRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err +func (*GetAllReplicatorReply_Replicators) ProtoMessage() {} + +func (x *GetAllReplicatorReply_Replicators) ProtoReflect() protoreflect.Message { + mi := &file_api_proto_msgTypes[12] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms } - return dAtA[:n], nil + return mi.MessageOf(x) } -func (m *SetReplicatorRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) +// Deprecated: Use GetAllReplicatorReply_Replicators.ProtoReflect.Descriptor instead. +func (*GetAllReplicatorReply_Replicators) Descriptor() ([]byte, []int) { + return file_api_proto_rawDescGZIP(), []int{5, 0} } -func (m *SetReplicatorRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Addr) > 0 { - i -= len(m.Addr) - copy(dAtA[i:], m.Addr) - i = encodeVarintApi(dAtA, i, uint64(len(m.Addr))) - i-- - dAtA[i] = 0x12 +func (x *GetAllReplicatorReply_Replicators) GetInfo() *GetAllReplicatorReply_Replicators_Info { + if x != nil { + return x.Info } - if len(m.Collections) > 0 { - for iNdEx := len(m.Collections) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.Collections[iNdEx]) - copy(dAtA[i:], m.Collections[iNdEx]) - i = encodeVarintApi(dAtA, i, uint64(len(m.Collections[iNdEx]))) - i-- - dAtA[i] = 0xa - } - } - return len(dAtA) - i, nil + return nil } -func (m *SetReplicatorReply) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err +func (x *GetAllReplicatorReply_Replicators) GetSchemas() []string { + if x != nil { + return x.Schemas } - return dAtA[:n], nil + return nil } -func (m *SetReplicatorReply) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} +type GetAllReplicatorReply_Replicators_Info struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields -func (m *SetReplicatorReply) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.PeerID) > 0 { - i -= len(m.PeerID) - copy(dAtA[i:], m.PeerID) - i = encodeVarintApi(dAtA, i, uint64(len(m.PeerID))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil + Id []byte `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` + Addrs []byte `protobuf:"bytes,2,opt,name=addrs,proto3" json:"addrs,omitempty"` } -func (m *DeleteReplicatorRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err +func (x *GetAllReplicatorReply_Replicators_Info) Reset() { + *x = GetAllReplicatorReply_Replicators_Info{} + if protoimpl.UnsafeEnabled { + mi := &file_api_proto_msgTypes[13] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } - return dAtA[:n], nil } -func (m *DeleteReplicatorRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) +func (x *GetAllReplicatorReply_Replicators_Info) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *DeleteReplicatorRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.PeerID) > 0 { - i -= len(m.PeerID) - copy(dAtA[i:], m.PeerID) - i = encodeVarintApi(dAtA, i, uint64(len(m.PeerID))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} +func (*GetAllReplicatorReply_Replicators_Info) ProtoMessage() {} -func (m *DeleteReplicatorReply) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err +func (x *GetAllReplicatorReply_Replicators_Info) ProtoReflect() protoreflect.Message { + mi := &file_api_proto_msgTypes[13] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms } - return dAtA[:n], nil + return mi.MessageOf(x) } -func (m *DeleteReplicatorReply) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) +// Deprecated: Use GetAllReplicatorReply_Replicators_Info.ProtoReflect.Descriptor instead. +func (*GetAllReplicatorReply_Replicators_Info) Descriptor() ([]byte, []int) { + return file_api_proto_rawDescGZIP(), []int{5, 0, 0} } -func (m *DeleteReplicatorReply) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.PeerID) > 0 { - i -= len(m.PeerID) - copy(dAtA[i:], m.PeerID) - i = encodeVarintApi(dAtA, i, uint64(len(m.PeerID))) - i-- - dAtA[i] = 0xa +func (x *GetAllReplicatorReply_Replicators_Info) GetId() []byte { + if x != nil { + return x.Id } - return len(dAtA) - i, nil + return nil } -func (m *GetAllReplicatorRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err +func (x *GetAllReplicatorReply_Replicators_Info) GetAddrs() []byte { + if x != nil { + return x.Addrs } - return dAtA[:n], nil + return nil } -func (m *GetAllReplicatorRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} +type GetAllP2PCollectionsReply_Collection struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields -func (m *GetAllReplicatorRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - return len(dAtA) - i, nil + Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` + Name string `protobuf:"bytes,2,opt,name=name,proto3" json:"name,omitempty"` } -func (m *GetAllReplicatorReply) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *GetAllReplicatorReply) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *GetAllReplicatorReply) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Replicators) > 0 { - for iNdEx := len(m.Replicators) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Replicators[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintApi(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } +func (x *GetAllP2PCollectionsReply_Collection) Reset() { + *x = GetAllP2PCollectionsReply_Collection{} + if protoimpl.UnsafeEnabled { + mi := &file_api_proto_msgTypes[14] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } - return len(dAtA) - i, nil } -func (m *GetAllReplicatorReply_Replicators) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *GetAllReplicatorReply_Replicators) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *GetAllReplicatorReply_Replicators) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Schemas) > 0 { - for iNdEx := len(m.Schemas) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.Schemas[iNdEx]) - copy(dAtA[i:], m.Schemas[iNdEx]) - i = encodeVarintApi(dAtA, i, uint64(len(m.Schemas[iNdEx]))) - i-- - dAtA[i] = 0x12 - } - } - if m.Info != nil { - { - size, err := m.Info.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintApi(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil +func (x *GetAllP2PCollectionsReply_Collection) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *GetAllReplicatorReply_Replicators_Info) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err +func (*GetAllP2PCollectionsReply_Collection) ProtoMessage() {} + +func (x *GetAllP2PCollectionsReply_Collection) ProtoReflect() protoreflect.Message { + mi := &file_api_proto_msgTypes[14] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms } - return dAtA[:n], nil + return mi.MessageOf(x) } -func (m *GetAllReplicatorReply_Replicators_Info) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) +// Deprecated: Use GetAllP2PCollectionsReply_Collection.ProtoReflect.Descriptor instead. +func (*GetAllP2PCollectionsReply_Collection) Descriptor() ([]byte, []int) { + return file_api_proto_rawDescGZIP(), []int{11, 0} } -func (m *GetAllReplicatorReply_Replicators_Info) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Addrs) > 0 { - i -= len(m.Addrs) - copy(dAtA[i:], m.Addrs) - i = encodeVarintApi(dAtA, i, uint64(len(m.Addrs))) - i-- - dAtA[i] = 0x12 - } - if len(m.Id) > 0 { - i -= len(m.Id) - copy(dAtA[i:], m.Id) - i = encodeVarintApi(dAtA, i, uint64(len(m.Id))) - i-- - dAtA[i] = 0xa +func (x *GetAllP2PCollectionsReply_Collection) GetId() string { + if x != nil { + return x.Id } - return len(dAtA) - i, nil + return "" } -func (m *AddP2PCollectionsRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *AddP2PCollectionsRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *AddP2PCollectionsRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Collections) > 0 { - for iNdEx := len(m.Collections) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.Collections[iNdEx]) - copy(dAtA[i:], m.Collections[iNdEx]) - i = encodeVarintApi(dAtA, i, uint64(len(m.Collections[iNdEx]))) - i-- - dAtA[i] = 0xa - } +func (x *GetAllP2PCollectionsReply_Collection) GetName() string { + if x != nil { + return x.Name } - return len(dAtA) - i, nil + return "" } -func (m *AddP2PCollectionsReply) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *AddP2PCollectionsReply) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *AddP2PCollectionsReply) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Err) > 0 { - i -= len(m.Err) - copy(dAtA[i:], m.Err) - i = encodeVarintApi(dAtA, i, uint64(len(m.Err))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *RemoveP2PCollectionsRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *RemoveP2PCollectionsRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *RemoveP2PCollectionsRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Collections) > 0 { - for iNdEx := len(m.Collections) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.Collections[iNdEx]) - copy(dAtA[i:], m.Collections[iNdEx]) - i = encodeVarintApi(dAtA, i, uint64(len(m.Collections[iNdEx]))) - i-- - dAtA[i] = 0xa - } - } - return len(dAtA) - i, nil -} - -func (m *RemoveP2PCollectionsReply) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *RemoveP2PCollectionsReply) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *RemoveP2PCollectionsReply) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Err) > 0 { - i -= len(m.Err) - copy(dAtA[i:], m.Err) - i = encodeVarintApi(dAtA, i, uint64(len(m.Err))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *GetAllP2PCollectionsRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *GetAllP2PCollectionsRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *GetAllP2PCollectionsRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - return len(dAtA) - i, nil -} - -func (m *GetAllP2PCollectionsReply) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *GetAllP2PCollectionsReply) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *GetAllP2PCollectionsReply) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Collections) > 0 { - for iNdEx := len(m.Collections) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Collections[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintApi(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - } - return len(dAtA) - i, nil -} - -func (m *GetAllP2PCollectionsReply_Collection) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *GetAllP2PCollectionsReply_Collection) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *GetAllP2PCollectionsReply_Collection) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Name) > 0 { - i -= len(m.Name) - copy(dAtA[i:], m.Name) - i = encodeVarintApi(dAtA, i, uint64(len(m.Name))) - i-- - dAtA[i] = 0x12 - } - if len(m.Id) > 0 { - i -= len(m.Id) - copy(dAtA[i:], m.Id) - i = encodeVarintApi(dAtA, i, uint64(len(m.Id))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func encodeVarintApi(dAtA []byte, offset int, v uint64) int { - offset -= sovApi(v) - base := offset - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return base -} -func (m *SetReplicatorRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.Collections) > 0 { - for _, s := range m.Collections { - l = len(s) - n += 1 + l + sovApi(uint64(l)) - } - } - l = len(m.Addr) - if l > 0 { - n += 1 + l + sovApi(uint64(l)) - } - return n -} - -func (m *SetReplicatorReply) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.PeerID) - if l > 0 { - n += 1 + l + sovApi(uint64(l)) - } - return n -} - -func (m *DeleteReplicatorRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.PeerID) - if l > 0 { - n += 1 + l + sovApi(uint64(l)) - } - return n -} - -func (m *DeleteReplicatorReply) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.PeerID) - if l > 0 { - n += 1 + l + sovApi(uint64(l)) - } - return n -} - -func (m *GetAllReplicatorRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - return n -} - -func (m *GetAllReplicatorReply) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.Replicators) > 0 { - for _, e := range m.Replicators { - l = e.Size() - n += 1 + l + sovApi(uint64(l)) - } - } - return n -} - -func (m *GetAllReplicatorReply_Replicators) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.Info != nil { - l = m.Info.Size() - n += 1 + l + sovApi(uint64(l)) - } - if len(m.Schemas) > 0 { - for _, s := range m.Schemas { - l = len(s) - n += 1 + l + sovApi(uint64(l)) - } - } - return n -} - -func (m *GetAllReplicatorReply_Replicators_Info) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Id) - if l > 0 { - n += 1 + l + sovApi(uint64(l)) - } - l = len(m.Addrs) - if l > 0 { - n += 1 + l + sovApi(uint64(l)) - } - return n -} - -func (m *AddP2PCollectionsRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.Collections) > 0 { - for _, s := range m.Collections { - l = len(s) - n += 1 + l + sovApi(uint64(l)) - } - } - return n -} - -func (m *AddP2PCollectionsReply) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Err) - if l > 0 { - n += 1 + l + sovApi(uint64(l)) - } - return n -} - -func (m *RemoveP2PCollectionsRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.Collections) > 0 { - for _, s := range m.Collections { - l = len(s) - n += 1 + l + sovApi(uint64(l)) - } - } - return n -} - -func (m *RemoveP2PCollectionsReply) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Err) - if l > 0 { - n += 1 + l + sovApi(uint64(l)) - } - return n -} - -func (m *GetAllP2PCollectionsRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - return n -} - -func (m *GetAllP2PCollectionsReply) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.Collections) > 0 { - for _, e := range m.Collections { - l = e.Size() - n += 1 + l + sovApi(uint64(l)) - } - } - return n -} - -func (m *GetAllP2PCollectionsReply_Collection) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Id) - if l > 0 { - n += 1 + l + sovApi(uint64(l)) - } - l = len(m.Name) - if l > 0 { - n += 1 + l + sovApi(uint64(l)) - } - return n -} - -func sovApi(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 -} -func sozApi(x uint64) (n int) { - return sovApi(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (m *SetReplicatorRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowApi - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: SetReplicatorRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: SetReplicatorRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Collections", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowApi - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthApi - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthApi - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Collections = append(m.Collections, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Addr", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowApi - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthApi - } - postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLengthApi - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Addr = append(m.Addr[:0], dAtA[iNdEx:postIndex]...) - if m.Addr == nil { - m.Addr = []byte{} - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipApi(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthApi - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *SetReplicatorReply) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowApi - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: SetReplicatorReply: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: SetReplicatorReply: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field PeerID", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowApi - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthApi - } - postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLengthApi - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.PeerID = append(m.PeerID[:0], dAtA[iNdEx:postIndex]...) - if m.PeerID == nil { - m.PeerID = []byte{} - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipApi(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthApi - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *DeleteReplicatorRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowApi - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: DeleteReplicatorRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: DeleteReplicatorRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field PeerID", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowApi - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthApi - } - postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLengthApi - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.PeerID = append(m.PeerID[:0], dAtA[iNdEx:postIndex]...) - if m.PeerID == nil { - m.PeerID = []byte{} - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipApi(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthApi - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *DeleteReplicatorReply) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowApi - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: DeleteReplicatorReply: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: DeleteReplicatorReply: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field PeerID", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowApi - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthApi - } - postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLengthApi - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.PeerID = append(m.PeerID[:0], dAtA[iNdEx:postIndex]...) - if m.PeerID == nil { - m.PeerID = []byte{} - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipApi(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthApi - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *GetAllReplicatorRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowApi - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: GetAllReplicatorRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: GetAllReplicatorRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - default: - iNdEx = preIndex - skippy, err := skipApi(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthApi - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *GetAllReplicatorReply) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowApi - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: GetAllReplicatorReply: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: GetAllReplicatorReply: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Replicators", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowApi - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthApi - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthApi - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Replicators = append(m.Replicators, &GetAllReplicatorReply_Replicators{}) - if err := m.Replicators[len(m.Replicators)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipApi(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthApi - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *GetAllReplicatorReply_Replicators) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowApi - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Replicators: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Replicators: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Info", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowApi - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthApi - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthApi - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Info == nil { - m.Info = &GetAllReplicatorReply_Replicators_Info{} - } - if err := m.Info.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Schemas", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowApi - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthApi - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthApi - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Schemas = append(m.Schemas, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipApi(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthApi - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *GetAllReplicatorReply_Replicators_Info) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowApi - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Info: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Info: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Id", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowApi - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthApi - } - postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLengthApi - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Id = append(m.Id[:0], dAtA[iNdEx:postIndex]...) - if m.Id == nil { - m.Id = []byte{} - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Addrs", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowApi - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthApi - } - postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLengthApi - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Addrs = append(m.Addrs[:0], dAtA[iNdEx:postIndex]...) - if m.Addrs == nil { - m.Addrs = []byte{} - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipApi(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthApi - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *AddP2PCollectionsRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowApi - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: AddP2PCollectionsRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: AddP2PCollectionsRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Collections", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowApi - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthApi - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthApi - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Collections = append(m.Collections, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipApi(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthApi - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *AddP2PCollectionsReply) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowApi - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: AddP2PCollectionsReply: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: AddP2PCollectionsReply: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Err", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowApi - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthApi - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthApi - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Err = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipApi(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthApi - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *RemoveP2PCollectionsRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowApi - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: RemoveP2PCollectionsRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: RemoveP2PCollectionsRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Collections", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowApi - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthApi - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthApi - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Collections = append(m.Collections, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipApi(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthApi - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *RemoveP2PCollectionsReply) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowApi - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: RemoveP2PCollectionsReply: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: RemoveP2PCollectionsReply: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Err", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowApi - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthApi - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthApi - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Err = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipApi(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthApi - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *GetAllP2PCollectionsRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowApi - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: GetAllP2PCollectionsRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: GetAllP2PCollectionsRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - default: - iNdEx = preIndex - skippy, err := skipApi(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthApi - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *GetAllP2PCollectionsReply) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowApi - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: GetAllP2PCollectionsReply: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: GetAllP2PCollectionsReply: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Collections", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowApi - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthApi - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthApi - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Collections = append(m.Collections, &GetAllP2PCollectionsReply_Collection{}) - if err := m.Collections[len(m.Collections)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipApi(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthApi - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *GetAllP2PCollectionsReply_Collection) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowApi - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Collection: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Collection: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Id", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowApi - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthApi - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthApi - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Id = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowApi - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthApi - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthApi - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Name = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipApi(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthApi - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipApi(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - depth := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowApi - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowApi - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - case 1: - iNdEx += 8 - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowApi - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if length < 0 { - return 0, ErrInvalidLengthApi - } - iNdEx += length - case 3: - depth++ - case 4: - if depth == 0 { - return 0, ErrUnexpectedEndOfGroupApi - } - depth-- - case 5: - iNdEx += 4 - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - if iNdEx < 0 { - return 0, ErrInvalidLengthApi - } - if depth == 0 { - return iNdEx, nil - } - } - return 0, io.ErrUnexpectedEOF +var File_api_proto protoreflect.FileDescriptor + +var file_api_proto_rawDesc = []byte{ + 0x0a, 0x09, 0x61, 0x70, 0x69, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x06, 0x61, 0x70, 0x69, + 0x2e, 0x70, 0x62, 0x22, 0x4c, 0x0a, 0x14, 0x53, 0x65, 0x74, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, + 0x61, 0x74, 0x6f, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x20, 0x0a, 0x0b, 0x63, + 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, + 0x52, 0x0b, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x12, 0x0a, + 0x04, 0x61, 0x64, 0x64, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x04, 0x61, 0x64, 0x64, + 0x72, 0x22, 0x2c, 0x0a, 0x12, 0x53, 0x65, 0x74, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, + 0x6f, 0x72, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x12, 0x16, 0x0a, 0x06, 0x70, 0x65, 0x65, 0x72, 0x49, + 0x44, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x06, 0x70, 0x65, 0x65, 0x72, 0x49, 0x44, 0x22, + 0x31, 0x0a, 0x17, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, + 0x74, 0x6f, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x16, 0x0a, 0x06, 0x70, 0x65, + 0x65, 0x72, 0x49, 0x44, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x06, 0x70, 0x65, 0x65, 0x72, + 0x49, 0x44, 0x22, 0x2f, 0x0a, 0x15, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x52, 0x65, 0x70, 0x6c, + 0x69, 0x63, 0x61, 0x74, 0x6f, 0x72, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x12, 0x16, 0x0a, 0x06, 0x70, + 0x65, 0x65, 0x72, 0x49, 0x44, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x06, 0x70, 0x65, 0x65, + 0x72, 0x49, 0x44, 0x22, 0x19, 0x0a, 0x17, 0x47, 0x65, 0x74, 0x41, 0x6c, 0x6c, 0x52, 0x65, 0x70, + 0x6c, 0x69, 0x63, 0x61, 0x74, 0x6f, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0x80, + 0x02, 0x0a, 0x15, 0x47, 0x65, 0x74, 0x41, 0x6c, 0x6c, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, + 0x74, 0x6f, 0x72, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x12, 0x4b, 0x0a, 0x0b, 0x72, 0x65, 0x70, 0x6c, + 0x69, 0x63, 0x61, 0x74, 0x6f, 0x72, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x29, 0x2e, + 0x61, 0x70, 0x69, 0x2e, 0x70, 0x62, 0x2e, 0x47, 0x65, 0x74, 0x41, 0x6c, 0x6c, 0x52, 0x65, 0x70, + 0x6c, 0x69, 0x63, 0x61, 0x74, 0x6f, 0x72, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x2e, 0x52, 0x65, 0x70, + 0x6c, 0x69, 0x63, 0x61, 0x74, 0x6f, 0x72, 0x73, 0x52, 0x0b, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, + 0x61, 0x74, 0x6f, 0x72, 0x73, 0x1a, 0x99, 0x01, 0x0a, 0x0b, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, + 0x61, 0x74, 0x6f, 0x72, 0x73, 0x12, 0x42, 0x0a, 0x04, 0x69, 0x6e, 0x66, 0x6f, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x2e, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x70, 0x62, 0x2e, 0x47, 0x65, 0x74, + 0x41, 0x6c, 0x6c, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x6f, 0x72, 0x52, 0x65, 0x70, + 0x6c, 0x79, 0x2e, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x6f, 0x72, 0x73, 0x2e, 0x49, + 0x6e, 0x66, 0x6f, 0x52, 0x04, 0x69, 0x6e, 0x66, 0x6f, 0x12, 0x18, 0x0a, 0x07, 0x73, 0x63, 0x68, + 0x65, 0x6d, 0x61, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x07, 0x73, 0x63, 0x68, 0x65, + 0x6d, 0x61, 0x73, 0x1a, 0x2c, 0x0a, 0x04, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x0e, 0x0a, 0x02, 0x69, + 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x02, 0x69, 0x64, 0x12, 0x14, 0x0a, 0x05, 0x61, + 0x64, 0x64, 0x72, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x05, 0x61, 0x64, 0x64, 0x72, + 0x73, 0x22, 0x3c, 0x0a, 0x18, 0x41, 0x64, 0x64, 0x50, 0x32, 0x50, 0x43, 0x6f, 0x6c, 0x6c, 0x65, + 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x20, 0x0a, + 0x0b, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x01, 0x20, 0x03, + 0x28, 0x09, 0x52, 0x0b, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, + 0x2a, 0x0a, 0x16, 0x41, 0x64, 0x64, 0x50, 0x32, 0x50, 0x43, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, + 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x65, 0x72, 0x72, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x65, 0x72, 0x72, 0x22, 0x3f, 0x0a, 0x1b, 0x52, + 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x50, 0x32, 0x50, 0x43, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, + 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x20, 0x0a, 0x0b, 0x63, 0x6f, + 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, + 0x0b, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0x2d, 0x0a, 0x19, + 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x50, 0x32, 0x50, 0x43, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, + 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x65, 0x72, 0x72, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x65, 0x72, 0x72, 0x22, 0x1d, 0x0a, 0x1b, 0x47, + 0x65, 0x74, 0x41, 0x6c, 0x6c, 0x50, 0x32, 0x50, 0x43, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, + 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0x9d, 0x01, 0x0a, 0x19, 0x47, + 0x65, 0x74, 0x41, 0x6c, 0x6c, 0x50, 0x32, 0x50, 0x43, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, + 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x12, 0x4e, 0x0a, 0x0b, 0x63, 0x6f, 0x6c, 0x6c, + 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2c, 0x2e, + 0x61, 0x70, 0x69, 0x2e, 0x70, 0x62, 0x2e, 0x47, 0x65, 0x74, 0x41, 0x6c, 0x6c, 0x50, 0x32, 0x50, + 0x43, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x70, 0x6c, 0x79, + 0x2e, 0x43, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0b, 0x63, 0x6f, 0x6c, + 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0x30, 0x0a, 0x0a, 0x43, 0x6f, 0x6c, 0x6c, + 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x32, 0xa0, 0x04, 0x0a, 0x07, 0x53, + 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x4b, 0x0a, 0x0d, 0x53, 0x65, 0x74, 0x52, 0x65, 0x70, + 0x6c, 0x69, 0x63, 0x61, 0x74, 0x6f, 0x72, 0x12, 0x1c, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x70, 0x62, + 0x2e, 0x53, 0x65, 0x74, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x6f, 0x72, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1a, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x70, 0x62, 0x2e, 0x53, + 0x65, 0x74, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x6f, 0x72, 0x52, 0x65, 0x70, 0x6c, + 0x79, 0x22, 0x00, 0x12, 0x54, 0x0a, 0x10, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x52, 0x65, 0x70, + 0x6c, 0x69, 0x63, 0x61, 0x74, 0x6f, 0x72, 0x12, 0x1f, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x70, 0x62, + 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x6f, + 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1d, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x70, + 0x62, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, + 0x6f, 0x72, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x22, 0x00, 0x12, 0x55, 0x0a, 0x11, 0x47, 0x65, 0x74, + 0x41, 0x6c, 0x6c, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x6f, 0x72, 0x73, 0x12, 0x1f, + 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x70, 0x62, 0x2e, 0x47, 0x65, 0x74, 0x41, 0x6c, 0x6c, 0x52, 0x65, + 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x6f, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, + 0x1d, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x70, 0x62, 0x2e, 0x47, 0x65, 0x74, 0x41, 0x6c, 0x6c, 0x52, + 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x6f, 0x72, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x22, 0x00, + 0x12, 0x57, 0x0a, 0x11, 0x41, 0x64, 0x64, 0x50, 0x32, 0x50, 0x43, 0x6f, 0x6c, 0x6c, 0x65, 0x63, + 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x20, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x70, 0x62, 0x2e, 0x41, + 0x64, 0x64, 0x50, 0x32, 0x50, 0x43, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1e, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x70, 0x62, + 0x2e, 0x41, 0x64, 0x64, 0x50, 0x32, 0x50, 0x43, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, + 0x6e, 0x73, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x22, 0x00, 0x12, 0x60, 0x0a, 0x14, 0x52, 0x65, 0x6d, + 0x6f, 0x76, 0x65, 0x50, 0x32, 0x50, 0x43, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, + 0x73, 0x12, 0x23, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x70, 0x62, 0x2e, 0x52, 0x65, 0x6d, 0x6f, 0x76, + 0x65, 0x50, 0x32, 0x50, 0x43, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x21, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x70, 0x62, 0x2e, + 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x50, 0x32, 0x50, 0x43, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, + 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x22, 0x00, 0x12, 0x60, 0x0a, 0x14, 0x47, + 0x65, 0x74, 0x41, 0x6c, 0x6c, 0x50, 0x32, 0x50, 0x43, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, + 0x6f, 0x6e, 0x73, 0x12, 0x23, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x70, 0x62, 0x2e, 0x47, 0x65, 0x74, + 0x41, 0x6c, 0x6c, 0x50, 0x32, 0x50, 0x43, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, + 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x21, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x70, + 0x62, 0x2e, 0x47, 0x65, 0x74, 0x41, 0x6c, 0x6c, 0x50, 0x32, 0x50, 0x43, 0x6f, 0x6c, 0x6c, 0x65, + 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x22, 0x00, 0x42, 0x0a, 0x5a, + 0x08, 0x2f, 0x3b, 0x61, 0x70, 0x69, 0x5f, 0x70, 0x62, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x33, } var ( - ErrInvalidLengthApi = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowApi = fmt.Errorf("proto: integer overflow") - ErrUnexpectedEndOfGroupApi = fmt.Errorf("proto: unexpected end of group") + file_api_proto_rawDescOnce sync.Once + file_api_proto_rawDescData = file_api_proto_rawDesc ) + +func file_api_proto_rawDescGZIP() []byte { + file_api_proto_rawDescOnce.Do(func() { + file_api_proto_rawDescData = protoimpl.X.CompressGZIP(file_api_proto_rawDescData) + }) + return file_api_proto_rawDescData +} + +var file_api_proto_msgTypes = make([]protoimpl.MessageInfo, 15) +var file_api_proto_goTypes = []interface{}{ + (*SetReplicatorRequest)(nil), // 0: api.pb.SetReplicatorRequest + (*SetReplicatorReply)(nil), // 1: api.pb.SetReplicatorReply + (*DeleteReplicatorRequest)(nil), // 2: api.pb.DeleteReplicatorRequest + (*DeleteReplicatorReply)(nil), // 3: api.pb.DeleteReplicatorReply + (*GetAllReplicatorRequest)(nil), // 4: api.pb.GetAllReplicatorRequest + (*GetAllReplicatorReply)(nil), // 5: api.pb.GetAllReplicatorReply + (*AddP2PCollectionsRequest)(nil), // 6: api.pb.AddP2PCollectionsRequest + (*AddP2PCollectionsReply)(nil), // 7: api.pb.AddP2PCollectionsReply + (*RemoveP2PCollectionsRequest)(nil), // 8: api.pb.RemoveP2PCollectionsRequest + (*RemoveP2PCollectionsReply)(nil), // 9: api.pb.RemoveP2PCollectionsReply + (*GetAllP2PCollectionsRequest)(nil), // 10: api.pb.GetAllP2PCollectionsRequest + (*GetAllP2PCollectionsReply)(nil), // 11: api.pb.GetAllP2PCollectionsReply + (*GetAllReplicatorReply_Replicators)(nil), // 12: api.pb.GetAllReplicatorReply.Replicators + (*GetAllReplicatorReply_Replicators_Info)(nil), // 13: api.pb.GetAllReplicatorReply.Replicators.Info + (*GetAllP2PCollectionsReply_Collection)(nil), // 14: api.pb.GetAllP2PCollectionsReply.Collection +} +var file_api_proto_depIdxs = []int32{ + 12, // 0: api.pb.GetAllReplicatorReply.replicators:type_name -> api.pb.GetAllReplicatorReply.Replicators + 14, // 1: api.pb.GetAllP2PCollectionsReply.collections:type_name -> api.pb.GetAllP2PCollectionsReply.Collection + 13, // 2: api.pb.GetAllReplicatorReply.Replicators.info:type_name -> api.pb.GetAllReplicatorReply.Replicators.Info + 0, // 3: api.pb.Service.SetReplicator:input_type -> api.pb.SetReplicatorRequest + 2, // 4: api.pb.Service.DeleteReplicator:input_type -> api.pb.DeleteReplicatorRequest + 4, // 5: api.pb.Service.GetAllReplicators:input_type -> api.pb.GetAllReplicatorRequest + 6, // 6: api.pb.Service.AddP2PCollections:input_type -> api.pb.AddP2PCollectionsRequest + 8, // 7: api.pb.Service.RemoveP2PCollections:input_type -> api.pb.RemoveP2PCollectionsRequest + 10, // 8: api.pb.Service.GetAllP2PCollections:input_type -> api.pb.GetAllP2PCollectionsRequest + 1, // 9: api.pb.Service.SetReplicator:output_type -> api.pb.SetReplicatorReply + 3, // 10: api.pb.Service.DeleteReplicator:output_type -> api.pb.DeleteReplicatorReply + 5, // 11: api.pb.Service.GetAllReplicators:output_type -> api.pb.GetAllReplicatorReply + 7, // 12: api.pb.Service.AddP2PCollections:output_type -> api.pb.AddP2PCollectionsReply + 9, // 13: api.pb.Service.RemoveP2PCollections:output_type -> api.pb.RemoveP2PCollectionsReply + 11, // 14: api.pb.Service.GetAllP2PCollections:output_type -> api.pb.GetAllP2PCollectionsReply + 9, // [9:15] is the sub-list for method output_type + 3, // [3:9] is the sub-list for method input_type + 3, // [3:3] is the sub-list for extension type_name + 3, // [3:3] is the sub-list for extension extendee + 0, // [0:3] is the sub-list for field type_name +} + +func init() { file_api_proto_init() } +func file_api_proto_init() { + if File_api_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_api_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SetReplicatorRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_api_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SetReplicatorReply); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_api_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*DeleteReplicatorRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_api_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*DeleteReplicatorReply); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_api_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetAllReplicatorRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_api_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetAllReplicatorReply); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_api_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*AddP2PCollectionsRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_api_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*AddP2PCollectionsReply); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_api_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*RemoveP2PCollectionsRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_api_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*RemoveP2PCollectionsReply); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_api_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetAllP2PCollectionsRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_api_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetAllP2PCollectionsReply); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_api_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetAllReplicatorReply_Replicators); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_api_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetAllReplicatorReply_Replicators_Info); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_api_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetAllP2PCollectionsReply_Collection); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_api_proto_rawDesc, + NumEnums: 0, + NumMessages: 15, + NumExtensions: 0, + NumServices: 1, + }, + GoTypes: file_api_proto_goTypes, + DependencyIndexes: file_api_proto_depIdxs, + MessageInfos: file_api_proto_msgTypes, + }.Build() + File_api_proto = out.File + file_api_proto_rawDesc = nil + file_api_proto_goTypes = nil + file_api_proto_depIdxs = nil +} diff --git a/net/api/pb/api.proto b/net/api/pb/api.proto index df86e31931..367997c7af 100644 --- a/net/api/pb/api.proto +++ b/net/api/pb/api.proto @@ -1,6 +1,8 @@ syntax = "proto3"; package api.pb; +option go_package = "/;api_pb"; + message SetReplicatorRequest { repeated string collections = 1; bytes addr = 2; diff --git a/net/api/pb/api_grpc.pb.go b/net/api/pb/api_grpc.pb.go new file mode 100644 index 0000000000..5d1bc204d3 --- /dev/null +++ b/net/api/pb/api_grpc.pb.go @@ -0,0 +1,300 @@ +// Code generated by protoc-gen-go-grpc. DO NOT EDIT. +// versions: +// - protoc-gen-go-grpc v1.3.0 +// - protoc v3.21.9 +// source: api.proto + +package api_pb + +import ( + context "context" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" +) + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +// Requires gRPC-Go v1.32.0 or later. +const _ = grpc.SupportPackageIsVersion7 + +const ( + Service_SetReplicator_FullMethodName = "/api.pb.Service/SetReplicator" + Service_DeleteReplicator_FullMethodName = "/api.pb.Service/DeleteReplicator" + Service_GetAllReplicators_FullMethodName = "/api.pb.Service/GetAllReplicators" + Service_AddP2PCollections_FullMethodName = "/api.pb.Service/AddP2PCollections" + Service_RemoveP2PCollections_FullMethodName = "/api.pb.Service/RemoveP2PCollections" + Service_GetAllP2PCollections_FullMethodName = "/api.pb.Service/GetAllP2PCollections" +) + +// ServiceClient is the client API for Service service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. +type ServiceClient interface { + // SetReplicator for this peer + SetReplicator(ctx context.Context, in *SetReplicatorRequest, opts ...grpc.CallOption) (*SetReplicatorReply, error) + // DeleteReplicator for this peer + DeleteReplicator(ctx context.Context, in *DeleteReplicatorRequest, opts ...grpc.CallOption) (*DeleteReplicatorReply, error) + // DeleteReplicator for this peer + GetAllReplicators(ctx context.Context, in *GetAllReplicatorRequest, opts ...grpc.CallOption) (*GetAllReplicatorReply, error) + AddP2PCollections(ctx context.Context, in *AddP2PCollectionsRequest, opts ...grpc.CallOption) (*AddP2PCollectionsReply, error) + RemoveP2PCollections(ctx context.Context, in *RemoveP2PCollectionsRequest, opts ...grpc.CallOption) (*RemoveP2PCollectionsReply, error) + GetAllP2PCollections(ctx context.Context, in *GetAllP2PCollectionsRequest, opts ...grpc.CallOption) (*GetAllP2PCollectionsReply, error) +} + +type serviceClient struct { + cc grpc.ClientConnInterface +} + +func NewServiceClient(cc grpc.ClientConnInterface) ServiceClient { + return &serviceClient{cc} +} + +func (c *serviceClient) SetReplicator(ctx context.Context, in *SetReplicatorRequest, opts ...grpc.CallOption) (*SetReplicatorReply, error) { + out := new(SetReplicatorReply) + err := c.cc.Invoke(ctx, Service_SetReplicator_FullMethodName, in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *serviceClient) DeleteReplicator(ctx context.Context, in *DeleteReplicatorRequest, opts ...grpc.CallOption) (*DeleteReplicatorReply, error) { + out := new(DeleteReplicatorReply) + err := c.cc.Invoke(ctx, Service_DeleteReplicator_FullMethodName, in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *serviceClient) GetAllReplicators(ctx context.Context, in *GetAllReplicatorRequest, opts ...grpc.CallOption) (*GetAllReplicatorReply, error) { + out := new(GetAllReplicatorReply) + err := c.cc.Invoke(ctx, Service_GetAllReplicators_FullMethodName, in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *serviceClient) AddP2PCollections(ctx context.Context, in *AddP2PCollectionsRequest, opts ...grpc.CallOption) (*AddP2PCollectionsReply, error) { + out := new(AddP2PCollectionsReply) + err := c.cc.Invoke(ctx, Service_AddP2PCollections_FullMethodName, in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *serviceClient) RemoveP2PCollections(ctx context.Context, in *RemoveP2PCollectionsRequest, opts ...grpc.CallOption) (*RemoveP2PCollectionsReply, error) { + out := new(RemoveP2PCollectionsReply) + err := c.cc.Invoke(ctx, Service_RemoveP2PCollections_FullMethodName, in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *serviceClient) GetAllP2PCollections(ctx context.Context, in *GetAllP2PCollectionsRequest, opts ...grpc.CallOption) (*GetAllP2PCollectionsReply, error) { + out := new(GetAllP2PCollectionsReply) + err := c.cc.Invoke(ctx, Service_GetAllP2PCollections_FullMethodName, in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// ServiceServer is the server API for Service service. +// All implementations must embed UnimplementedServiceServer +// for forward compatibility +type ServiceServer interface { + // SetReplicator for this peer + SetReplicator(context.Context, *SetReplicatorRequest) (*SetReplicatorReply, error) + // DeleteReplicator for this peer + DeleteReplicator(context.Context, *DeleteReplicatorRequest) (*DeleteReplicatorReply, error) + // DeleteReplicator for this peer + GetAllReplicators(context.Context, *GetAllReplicatorRequest) (*GetAllReplicatorReply, error) + AddP2PCollections(context.Context, *AddP2PCollectionsRequest) (*AddP2PCollectionsReply, error) + RemoveP2PCollections(context.Context, *RemoveP2PCollectionsRequest) (*RemoveP2PCollectionsReply, error) + GetAllP2PCollections(context.Context, *GetAllP2PCollectionsRequest) (*GetAllP2PCollectionsReply, error) + mustEmbedUnimplementedServiceServer() +} + +// UnimplementedServiceServer must be embedded to have forward compatible implementations. +type UnimplementedServiceServer struct { +} + +func (UnimplementedServiceServer) SetReplicator(context.Context, *SetReplicatorRequest) (*SetReplicatorReply, error) { + return nil, status.Errorf(codes.Unimplemented, "method SetReplicator not implemented") +} +func (UnimplementedServiceServer) DeleteReplicator(context.Context, *DeleteReplicatorRequest) (*DeleteReplicatorReply, error) { + return nil, status.Errorf(codes.Unimplemented, "method DeleteReplicator not implemented") +} +func (UnimplementedServiceServer) GetAllReplicators(context.Context, *GetAllReplicatorRequest) (*GetAllReplicatorReply, error) { + return nil, status.Errorf(codes.Unimplemented, "method GetAllReplicators not implemented") +} +func (UnimplementedServiceServer) AddP2PCollections(context.Context, *AddP2PCollectionsRequest) (*AddP2PCollectionsReply, error) { + return nil, status.Errorf(codes.Unimplemented, "method AddP2PCollections not implemented") +} +func (UnimplementedServiceServer) RemoveP2PCollections(context.Context, *RemoveP2PCollectionsRequest) (*RemoveP2PCollectionsReply, error) { + return nil, status.Errorf(codes.Unimplemented, "method RemoveP2PCollections not implemented") +} +func (UnimplementedServiceServer) GetAllP2PCollections(context.Context, *GetAllP2PCollectionsRequest) (*GetAllP2PCollectionsReply, error) { + return nil, status.Errorf(codes.Unimplemented, "method GetAllP2PCollections not implemented") +} +func (UnimplementedServiceServer) mustEmbedUnimplementedServiceServer() {} + +// UnsafeServiceServer may be embedded to opt out of forward compatibility for this service. +// Use of this interface is not recommended, as added methods to ServiceServer will +// result in compilation errors. +type UnsafeServiceServer interface { + mustEmbedUnimplementedServiceServer() +} + +func RegisterServiceServer(s grpc.ServiceRegistrar, srv ServiceServer) { + s.RegisterService(&Service_ServiceDesc, srv) +} + +func _Service_SetReplicator_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(SetReplicatorRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ServiceServer).SetReplicator(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: Service_SetReplicator_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ServiceServer).SetReplicator(ctx, req.(*SetReplicatorRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Service_DeleteReplicator_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(DeleteReplicatorRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ServiceServer).DeleteReplicator(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: Service_DeleteReplicator_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ServiceServer).DeleteReplicator(ctx, req.(*DeleteReplicatorRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Service_GetAllReplicators_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetAllReplicatorRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ServiceServer).GetAllReplicators(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: Service_GetAllReplicators_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ServiceServer).GetAllReplicators(ctx, req.(*GetAllReplicatorRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Service_AddP2PCollections_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(AddP2PCollectionsRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ServiceServer).AddP2PCollections(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: Service_AddP2PCollections_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ServiceServer).AddP2PCollections(ctx, req.(*AddP2PCollectionsRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Service_RemoveP2PCollections_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(RemoveP2PCollectionsRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ServiceServer).RemoveP2PCollections(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: Service_RemoveP2PCollections_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ServiceServer).RemoveP2PCollections(ctx, req.(*RemoveP2PCollectionsRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Service_GetAllP2PCollections_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetAllP2PCollectionsRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ServiceServer).GetAllP2PCollections(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: Service_GetAllP2PCollections_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ServiceServer).GetAllP2PCollections(ctx, req.(*GetAllP2PCollectionsRequest)) + } + return interceptor(ctx, in, info, handler) +} + +// Service_ServiceDesc is the grpc.ServiceDesc for Service service. +// It's only intended for direct use with grpc.RegisterService, +// and not to be introspected or modified (even as a copy) +var Service_ServiceDesc = grpc.ServiceDesc{ + ServiceName: "api.pb.Service", + HandlerType: (*ServiceServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "SetReplicator", + Handler: _Service_SetReplicator_Handler, + }, + { + MethodName: "DeleteReplicator", + Handler: _Service_DeleteReplicator_Handler, + }, + { + MethodName: "GetAllReplicators", + Handler: _Service_GetAllReplicators_Handler, + }, + { + MethodName: "AddP2PCollections", + Handler: _Service_AddP2PCollections_Handler, + }, + { + MethodName: "RemoveP2PCollections", + Handler: _Service_RemoveP2PCollections_Handler, + }, + { + MethodName: "GetAllP2PCollections", + Handler: _Service_GetAllP2PCollections_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "api.proto", +} diff --git a/net/api/pb/api_vtproto.pb.go b/net/api/pb/api_vtproto.pb.go new file mode 100644 index 0000000000..e4ddfb9bcb --- /dev/null +++ b/net/api/pb/api_vtproto.pb.go @@ -0,0 +1,2316 @@ +// Code generated by protoc-gen-go-vtproto. DO NOT EDIT. +// protoc-gen-go-vtproto version: v0.4.0 +// source: api.proto + +package api_pb + +import ( + fmt "fmt" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + io "io" + bits "math/bits" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +func (m *SetReplicatorRequest) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SetReplicatorRequest) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *SetReplicatorRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.Addr) > 0 { + i -= len(m.Addr) + copy(dAtA[i:], m.Addr) + i = encodeVarint(dAtA, i, uint64(len(m.Addr))) + i-- + dAtA[i] = 0x12 + } + if len(m.Collections) > 0 { + for iNdEx := len(m.Collections) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Collections[iNdEx]) + copy(dAtA[i:], m.Collections[iNdEx]) + i = encodeVarint(dAtA, i, uint64(len(m.Collections[iNdEx]))) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *SetReplicatorReply) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SetReplicatorReply) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *SetReplicatorReply) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.PeerID) > 0 { + i -= len(m.PeerID) + copy(dAtA[i:], m.PeerID) + i = encodeVarint(dAtA, i, uint64(len(m.PeerID))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *DeleteReplicatorRequest) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DeleteReplicatorRequest) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *DeleteReplicatorRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.PeerID) > 0 { + i -= len(m.PeerID) + copy(dAtA[i:], m.PeerID) + i = encodeVarint(dAtA, i, uint64(len(m.PeerID))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *DeleteReplicatorReply) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DeleteReplicatorReply) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *DeleteReplicatorReply) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.PeerID) > 0 { + i -= len(m.PeerID) + copy(dAtA[i:], m.PeerID) + i = encodeVarint(dAtA, i, uint64(len(m.PeerID))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *GetAllReplicatorRequest) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GetAllReplicatorRequest) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *GetAllReplicatorRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + return len(dAtA) - i, nil +} + +func (m *GetAllReplicatorReply_Replicators_Info) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GetAllReplicatorReply_Replicators_Info) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *GetAllReplicatorReply_Replicators_Info) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.Addrs) > 0 { + i -= len(m.Addrs) + copy(dAtA[i:], m.Addrs) + i = encodeVarint(dAtA, i, uint64(len(m.Addrs))) + i-- + dAtA[i] = 0x12 + } + if len(m.Id) > 0 { + i -= len(m.Id) + copy(dAtA[i:], m.Id) + i = encodeVarint(dAtA, i, uint64(len(m.Id))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *GetAllReplicatorReply_Replicators) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GetAllReplicatorReply_Replicators) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *GetAllReplicatorReply_Replicators) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.Schemas) > 0 { + for iNdEx := len(m.Schemas) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Schemas[iNdEx]) + copy(dAtA[i:], m.Schemas[iNdEx]) + i = encodeVarint(dAtA, i, uint64(len(m.Schemas[iNdEx]))) + i-- + dAtA[i] = 0x12 + } + } + if m.Info != nil { + size, err := m.Info.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *GetAllReplicatorReply) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GetAllReplicatorReply) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *GetAllReplicatorReply) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.Replicators) > 0 { + for iNdEx := len(m.Replicators) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.Replicators[iNdEx].MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *AddP2PCollectionsRequest) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *AddP2PCollectionsRequest) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *AddP2PCollectionsRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.Collections) > 0 { + for iNdEx := len(m.Collections) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Collections[iNdEx]) + copy(dAtA[i:], m.Collections[iNdEx]) + i = encodeVarint(dAtA, i, uint64(len(m.Collections[iNdEx]))) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *AddP2PCollectionsReply) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *AddP2PCollectionsReply) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *AddP2PCollectionsReply) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.Err) > 0 { + i -= len(m.Err) + copy(dAtA[i:], m.Err) + i = encodeVarint(dAtA, i, uint64(len(m.Err))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *RemoveP2PCollectionsRequest) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RemoveP2PCollectionsRequest) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *RemoveP2PCollectionsRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.Collections) > 0 { + for iNdEx := len(m.Collections) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Collections[iNdEx]) + copy(dAtA[i:], m.Collections[iNdEx]) + i = encodeVarint(dAtA, i, uint64(len(m.Collections[iNdEx]))) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *RemoveP2PCollectionsReply) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RemoveP2PCollectionsReply) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *RemoveP2PCollectionsReply) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.Err) > 0 { + i -= len(m.Err) + copy(dAtA[i:], m.Err) + i = encodeVarint(dAtA, i, uint64(len(m.Err))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *GetAllP2PCollectionsRequest) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GetAllP2PCollectionsRequest) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *GetAllP2PCollectionsRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + return len(dAtA) - i, nil +} + +func (m *GetAllP2PCollectionsReply_Collection) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GetAllP2PCollectionsReply_Collection) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *GetAllP2PCollectionsReply_Collection) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarint(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0x12 + } + if len(m.Id) > 0 { + i -= len(m.Id) + copy(dAtA[i:], m.Id) + i = encodeVarint(dAtA, i, uint64(len(m.Id))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *GetAllP2PCollectionsReply) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GetAllP2PCollectionsReply) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *GetAllP2PCollectionsReply) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.Collections) > 0 { + for iNdEx := len(m.Collections) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.Collections[iNdEx].MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func encodeVarint(dAtA []byte, offset int, v uint64) int { + offset -= sov(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *SetReplicatorRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Collections) > 0 { + for _, s := range m.Collections { + l = len(s) + n += 1 + l + sov(uint64(l)) + } + } + l = len(m.Addr) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *SetReplicatorReply) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.PeerID) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *DeleteReplicatorRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.PeerID) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *DeleteReplicatorReply) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.PeerID) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *GetAllReplicatorRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += len(m.unknownFields) + return n +} + +func (m *GetAllReplicatorReply_Replicators_Info) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Id) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + l = len(m.Addrs) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *GetAllReplicatorReply_Replicators) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Info != nil { + l = m.Info.SizeVT() + n += 1 + l + sov(uint64(l)) + } + if len(m.Schemas) > 0 { + for _, s := range m.Schemas { + l = len(s) + n += 1 + l + sov(uint64(l)) + } + } + n += len(m.unknownFields) + return n +} + +func (m *GetAllReplicatorReply) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Replicators) > 0 { + for _, e := range m.Replicators { + l = e.SizeVT() + n += 1 + l + sov(uint64(l)) + } + } + n += len(m.unknownFields) + return n +} + +func (m *AddP2PCollectionsRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Collections) > 0 { + for _, s := range m.Collections { + l = len(s) + n += 1 + l + sov(uint64(l)) + } + } + n += len(m.unknownFields) + return n +} + +func (m *AddP2PCollectionsReply) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Err) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *RemoveP2PCollectionsRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Collections) > 0 { + for _, s := range m.Collections { + l = len(s) + n += 1 + l + sov(uint64(l)) + } + } + n += len(m.unknownFields) + return n +} + +func (m *RemoveP2PCollectionsReply) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Err) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *GetAllP2PCollectionsRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += len(m.unknownFields) + return n +} + +func (m *GetAllP2PCollectionsReply_Collection) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Id) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + l = len(m.Name) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *GetAllP2PCollectionsReply) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Collections) > 0 { + for _, e := range m.Collections { + l = e.SizeVT() + n += 1 + l + sov(uint64(l)) + } + } + n += len(m.unknownFields) + return n +} + +func sov(x uint64) (n int) { + return (bits.Len64(x|1) + 6) / 7 +} +func soz(x uint64) (n int) { + return sov(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *SetReplicatorRequest) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SetReplicatorRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SetReplicatorRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Collections", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Collections = append(m.Collections, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Addr", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Addr = append(m.Addr[:0], dAtA[iNdEx:postIndex]...) + if m.Addr == nil { + m.Addr = []byte{} + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SetReplicatorReply) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SetReplicatorReply: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SetReplicatorReply: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PeerID", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.PeerID = append(m.PeerID[:0], dAtA[iNdEx:postIndex]...) + if m.PeerID == nil { + m.PeerID = []byte{} + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DeleteReplicatorRequest) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DeleteReplicatorRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DeleteReplicatorRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PeerID", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.PeerID = append(m.PeerID[:0], dAtA[iNdEx:postIndex]...) + if m.PeerID == nil { + m.PeerID = []byte{} + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DeleteReplicatorReply) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DeleteReplicatorReply: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DeleteReplicatorReply: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PeerID", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.PeerID = append(m.PeerID[:0], dAtA[iNdEx:postIndex]...) + if m.PeerID == nil { + m.PeerID = []byte{} + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *GetAllReplicatorRequest) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GetAllReplicatorRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GetAllReplicatorRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *GetAllReplicatorReply_Replicators_Info) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GetAllReplicatorReply_Replicators_Info: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GetAllReplicatorReply_Replicators_Info: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Id", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Id = append(m.Id[:0], dAtA[iNdEx:postIndex]...) + if m.Id == nil { + m.Id = []byte{} + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Addrs", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Addrs = append(m.Addrs[:0], dAtA[iNdEx:postIndex]...) + if m.Addrs == nil { + m.Addrs = []byte{} + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *GetAllReplicatorReply_Replicators) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GetAllReplicatorReply_Replicators: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GetAllReplicatorReply_Replicators: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Info", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Info == nil { + m.Info = &GetAllReplicatorReply_Replicators_Info{} + } + if err := m.Info.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Schemas", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Schemas = append(m.Schemas, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *GetAllReplicatorReply) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GetAllReplicatorReply: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GetAllReplicatorReply: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Replicators", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Replicators = append(m.Replicators, &GetAllReplicatorReply_Replicators{}) + if err := m.Replicators[len(m.Replicators)-1].UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *AddP2PCollectionsRequest) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AddP2PCollectionsRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AddP2PCollectionsRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Collections", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Collections = append(m.Collections, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *AddP2PCollectionsReply) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AddP2PCollectionsReply: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AddP2PCollectionsReply: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Err", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Err = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RemoveP2PCollectionsRequest) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RemoveP2PCollectionsRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RemoveP2PCollectionsRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Collections", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Collections = append(m.Collections, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RemoveP2PCollectionsReply) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RemoveP2PCollectionsReply: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RemoveP2PCollectionsReply: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Err", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Err = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *GetAllP2PCollectionsRequest) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GetAllP2PCollectionsRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GetAllP2PCollectionsRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *GetAllP2PCollectionsReply_Collection) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GetAllP2PCollectionsReply_Collection: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GetAllP2PCollectionsReply_Collection: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Id", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Id = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *GetAllP2PCollectionsReply) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GetAllP2PCollectionsReply: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GetAllP2PCollectionsReply: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Collections", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Collections = append(m.Collections, &GetAllP2PCollectionsReply_Collection{}) + if err := m.Collections[len(m.Collections)-1].UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} + +func skip(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflow + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflow + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflow + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLength + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroup + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLength + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLength = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflow = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroup = fmt.Errorf("proto: unexpected end of group") +) diff --git a/net/api/service.go b/net/api/service.go deleted file mode 100644 index 2873eab65b..0000000000 --- a/net/api/service.go +++ /dev/null @@ -1,158 +0,0 @@ -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package api - -import ( - "context" - - libpeer "github.com/libp2p/go-libp2p/core/peer" - ma "github.com/multiformats/go-multiaddr" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/status" - - "github.com/sourcenetwork/defradb/logging" - "github.com/sourcenetwork/defradb/net" - pb "github.com/sourcenetwork/defradb/net/api/pb" -) - -var ( - log = logging.MustNewLogger("netapi") -) - -type Service struct { - peer *net.Peer -} - -func NewService(peer *net.Peer) *Service { - return &Service{peer: peer} -} - -func (s *Service) SetReplicator( - ctx context.Context, - req *pb.SetReplicatorRequest, -) (*pb.SetReplicatorReply, error) { - log.Debug(ctx, "Received SetReplicator request") - - addr, err := ma.NewMultiaddrBytes(req.Addr) - if err != nil { - return nil, status.Error(codes.InvalidArgument, err.Error()) - } - - pid, err := s.peer.SetReplicator(ctx, addr, req.Collections...) - if err != nil { - return nil, err - } - return &pb.SetReplicatorReply{ - PeerID: marshalPeerID(pid), - }, nil -} - -func (s *Service) DeleteReplicator( - ctx context.Context, - req *pb.DeleteReplicatorRequest, -) (*pb.DeleteReplicatorReply, error) { - log.Debug(ctx, "Received DeleteReplicator request") - err := s.peer.DeleteReplicator(ctx, libpeer.ID(req.PeerID)) - if err != nil { - return nil, err - } - return &pb.DeleteReplicatorReply{ - PeerID: req.PeerID, - }, nil -} - -func (s *Service) GetAllReplicators( - ctx context.Context, - req *pb.GetAllReplicatorRequest, -) (*pb.GetAllReplicatorReply, error) { - log.Debug(ctx, "Received GetAllReplicators request") - - reps, err := s.peer.GetAllReplicators(ctx) - if err != nil { - return nil, err - } - - pbReps := []*pb.GetAllReplicatorReply_Replicators{} - for _, rep := range reps { - pbReps = append(pbReps, &pb.GetAllReplicatorReply_Replicators{ - Info: &pb.GetAllReplicatorReply_Replicators_Info{ - Id: []byte(rep.Info.ID), - Addrs: rep.Info.Addrs[0].Bytes(), - }, - Schemas: rep.Schemas, - }) - } - - return &pb.GetAllReplicatorReply{ - Replicators: pbReps, - }, nil -} - -func marshalPeerID(id libpeer.ID) []byte { - b, _ := id.Marshal() // This will never return an error - return b -} - -// RemoveP2PCollections handles the request to add P2P collecctions to the stored list. -func (s *Service) AddP2PCollections( - ctx context.Context, - req *pb.AddP2PCollectionsRequest, -) (*pb.AddP2PCollectionsReply, error) { - log.Debug(ctx, "Received AddP2PCollections request") - - err := s.peer.AddP2PCollections(req.Collections) - if err != nil { - return nil, err - } - - return &pb.AddP2PCollectionsReply{}, nil -} - -// RemoveP2PCollections handles the request to remove P2P collecctions from the stored list. -func (s *Service) RemoveP2PCollections( - ctx context.Context, - req *pb.RemoveP2PCollectionsRequest, -) (*pb.RemoveP2PCollectionsReply, error) { - log.Debug(ctx, "Received RemoveP2PCollections request") - - err := s.peer.RemoveP2PCollections(req.Collections) - if err != nil { - return nil, err - } - - return &pb.RemoveP2PCollectionsReply{}, nil -} - -// GetAllP2PCollections handles the request to get all P2P collecctions from the stored list. -func (s *Service) GetAllP2PCollections( - ctx context.Context, - req *pb.GetAllP2PCollectionsRequest, -) (*pb.GetAllP2PCollectionsReply, error) { - log.Debug(ctx, "Received GetAllP2PCollections request") - collections, err := s.peer.GetAllP2PCollections() - if err != nil { - return nil, err - } - - var pbCols []*pb.GetAllP2PCollectionsReply_Collection - for _, col := range collections { - pbCols = append(pbCols, &pb.GetAllP2PCollectionsReply_Collection{ - Id: col.ID, - Name: col.Name, - }) - } - - return &pb.GetAllP2PCollectionsReply{ - Collections: pbCols, - }, nil -} diff --git a/net/client.go b/net/client.go index 2cbf246441..e38df0ed54 100644 --- a/net/client.go +++ b/net/client.go @@ -14,12 +14,10 @@ package net import ( "context" - "fmt" "time" "github.com/libp2p/go-libp2p/core/peer" - "github.com/sourcenetwork/defradb/client" "github.com/sourcenetwork/defradb/errors" "github.com/sourcenetwork/defradb/events" "github.com/sourcenetwork/defradb/logging" @@ -35,20 +33,16 @@ var ( // pushLog creates a pushLog request and sends it to another node // over libp2p grpc connection func (s *server) pushLog(ctx context.Context, evt events.Update, pid peer.ID) error { - dockey, err := client.NewDocKeyFromString(evt.DocKey) - if err != nil { - return errors.Wrap("failed to get DocKey from broadcast message", err) - } log.Debug( ctx, "Preparing pushLog request", - logging.NewKV("DocKey", dockey), + logging.NewKV("DocKey", evt.DocKey), logging.NewKV("CID", evt.Cid), logging.NewKV("SchemaId", evt.SchemaID)) body := &pb.PushLogRequest_Body{ - DocKey: &pb.ProtoDocKey{DocKey: dockey}, - Cid: &pb.ProtoCid{Cid: evt.Cid}, + DocKey: []byte(evt.DocKey), + Cid: evt.Cid.Bytes(), SchemaID: []byte(evt.SchemaID), Creator: s.peer.host.ID().String(), Log: &pb.Document_Log{ @@ -61,20 +55,26 @@ func (s *server) pushLog(ctx context.Context, evt events.Update, pid peer.ID) er log.Debug( ctx, "Pushing log", - logging.NewKV("DocKey", dockey), + logging.NewKV("DocKey", evt.DocKey), logging.NewKV("CID", evt.Cid), - logging.NewKV("PeerID", pid)) + logging.NewKV("PeerID", pid), + ) - client, err := s.dial(pid) // grpc dial over p2p stream + client, err := s.dial(pid) // grpc dial over P2P stream if err != nil { - return errors.Wrap("failed to push log", err) + return NewErrPushLog(err) } cctx, cancel := context.WithTimeout(ctx, PushTimeout) defer cancel() if _, err := client.PushLog(cctx, req); err != nil { - return errors.Wrap(fmt.Sprintf("Failed PushLog RPC request %s for %s to %s", evt.Cid, dockey, pid), err) + return NewErrPushLog( + err, + errors.NewKV("CID", evt.Cid), + errors.NewKV("DocKey", evt.DocKey), + errors.NewKV("PeerID", pid), + ) } return nil } diff --git a/net/client_test.go b/net/client_test.go new file mode 100644 index 0000000000..e28c543175 --- /dev/null +++ b/net/client_test.go @@ -0,0 +1,121 @@ +// Copyright 2023 Democratized Data Foundation +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package net + +import ( + "context" + "testing" + + "github.com/libp2p/go-libp2p/core/peer" + ma "github.com/multiformats/go-multiaddr" + "github.com/stretchr/testify/require" + "google.golang.org/grpc" + + "github.com/sourcenetwork/defradb/client" + "github.com/sourcenetwork/defradb/events" +) + +func TestPushlogWithDialFailure(t *testing.T) { + ctx := context.Background() + _, n := newTestNode(ctx, t) + + doc, err := client.NewDocFromJSON([]byte(`{"test": "test"}`)) + require.NoError(t, err) + + cid, err := createCID(doc) + require.NoError(t, err) + + n.server.opts = append( + n.server.opts, + grpc.WithTransportCredentials(nil), + grpc.WithCredentialsBundle(nil), + ) + + err = n.server.pushLog(ctx, events.Update{ + DocKey: doc.Key().String(), + Cid: cid, + SchemaID: "test", + Block: &EmptyNode{}, + Priority: 1, + }, peer.ID("some-peer-id")) + require.Contains(t, err.Error(), "no transport security set") +} + +func TestPushlogWithInvalidPeerID(t *testing.T) { + ctx := context.Background() + _, n := newTestNode(ctx, t) + + doc, err := client.NewDocFromJSON([]byte(`{"test": "test"}`)) + require.NoError(t, err) + + cid, err := createCID(doc) + require.NoError(t, err) + + err = n.server.pushLog(ctx, events.Update{ + DocKey: doc.Key().String(), + Cid: cid, + SchemaID: "test", + Block: &EmptyNode{}, + Priority: 1, + }, peer.ID("some-peer-id")) + require.Contains(t, err.Error(), "failed to parse peer ID") +} + +func TestPushlogW_WithValidPeerID_NoError(t *testing.T) { + ctx := context.Background() + _, n1 := newTestNode(ctx, t) + n1.Start() + _, n2 := newTestNode(ctx, t) + n2.Start() + + err := n1.host.Connect(ctx, peer.AddrInfo{ + ID: n2.PeerID(), + Addrs: []ma.Multiaddr{ + n2.host.Addrs()[0], + }, + }) + require.NoError(t, err) + + _, err = n1.db.AddSchema(ctx, `type User { + name: String + }`) + require.NoError(t, err) + + _, err = n2.db.AddSchema(ctx, `type User { + name: String + }`) + require.NoError(t, err) + + doc, err := client.NewDocFromJSON([]byte(`{"name": "test"}`)) + require.NoError(t, err) + + col, err := n1.db.GetCollectionByName(ctx, "User") + require.NoError(t, err) + err = col.Save(ctx, doc) + require.NoError(t, err) + + col, err = n2.db.GetCollectionByName(ctx, "User") + require.NoError(t, err) + err = col.Save(ctx, doc) + require.NoError(t, err) + + cid, err := createCID(doc) + require.NoError(t, err) + + err = n1.server.pushLog(ctx, events.Update{ + DocKey: doc.Key().String(), + Cid: cid, + SchemaID: col.SchemaID(), + Block: &EmptyNode{}, + Priority: 1, + }, n2.PeerID()) + require.NoError(t, err) +} diff --git a/node/config.go b/net/config.go similarity index 75% rename from node/config.go rename to net/config.go index f660a4a8b5..28fd73f25e 100644 --- a/node/config.go +++ b/net/config.go @@ -10,7 +10,7 @@ /* Node configuration, in which NodeOpt functions are applied on Options. */ -package node +package net import ( "time" @@ -19,6 +19,8 @@ import ( "github.com/libp2p/go-libp2p/p2p/net/connmgr" ma "github.com/multiformats/go-multiaddr" "google.golang.org/grpc" + + "github.com/sourcenetwork/defradb/config" ) // Options is the node options. @@ -58,8 +60,31 @@ func NewConnManager(low int, high int, grace time.Duration) (cconnmgr.ConnManage return c, nil } +// WithConfig provides the Node-specific configuration, from the top-level Net config. +func WithConfig(cfg *config.Config) NodeOpt { + return func(opt *Options) error { + var err error + err = WithListenP2PAddrStrings(cfg.Net.P2PAddress)(opt) + if err != nil { + return err + } + err = WithListenTCPAddrString(cfg.Net.TCPAddress)(opt) + if err != nil { + return err + } + opt.EnableRelay = cfg.Net.RelayEnabled + opt.EnablePubSub = cfg.Net.PubSubEnabled + opt.DataPath = cfg.Datastore.Badger.Path + opt.ConnManager, err = NewConnManager(100, 400, time.Second*20) + if err != nil { + return err + } + return nil + } +} + // DataPath sets the data path. -func DataPath(path string) NodeOpt { +func WithDataPath(path string) NodeOpt { return func(opt *Options) error { opt.DataPath = path return nil @@ -83,7 +108,7 @@ func WithEnableRelay(enable bool) NodeOpt { } // ListenP2PAddrStrings sets the address to listen on given as strings. -func ListenP2PAddrStrings(addrs ...string) NodeOpt { +func WithListenP2PAddrStrings(addrs ...string) NodeOpt { return func(opt *Options) error { for _, addrstr := range addrs { a, err := ma.NewMultiaddr(addrstr) @@ -97,7 +122,7 @@ func ListenP2PAddrStrings(addrs ...string) NodeOpt { } // ListenTCPAddrString sets the TCP address to listen on, as Multiaddr. -func ListenTCPAddrString(addr string) NodeOpt { +func WithListenTCPAddrString(addr string) NodeOpt { return func(opt *Options) error { a, err := ma.NewMultiaddr(addr) if err != nil { @@ -109,7 +134,7 @@ func ListenTCPAddrString(addr string) NodeOpt { } // ListenAddrs sets the address to listen on given as MultiAddr(s). -func ListenAddrs(addrs ...ma.Multiaddr) NodeOpt { +func WithListenAddrs(addrs ...ma.Multiaddr) NodeOpt { return func(opt *Options) error { opt.ListenAddrs = addrs return nil diff --git a/net/config_test.go b/net/config_test.go new file mode 100644 index 0000000000..bffc19aead --- /dev/null +++ b/net/config_test.go @@ -0,0 +1,127 @@ +// Copyright 2023 Democratized Data Foundation +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package net + +import ( + "testing" + "time" + + ma "github.com/multiformats/go-multiaddr" + "github.com/stretchr/testify/require" + + "github.com/sourcenetwork/defradb/config" +) + +func TestNewMergedOptionsSimple(t *testing.T) { + opt, err := NewMergedOptions() + require.NoError(t, err) + require.NotNil(t, opt) +} + +func TestNewMergedOptionsWithNilOption(t *testing.T) { + opt, err := NewMergedOptions(nil) + require.NoError(t, err) + require.NotNil(t, opt) +} + +func TestNewConnManagerSimple(t *testing.T) { + conMngr, err := NewConnManager(1, 10, time.Second) + require.NoError(t, err) + err = conMngr.Close() + require.NoError(t, err) +} + +func TestNewConnManagerWithError(t *testing.T) { + _, err := NewConnManager(1, 10, -time.Second) + require.Contains(t, err.Error(), "grace period must be non-negative") +} + +func TestWithConfigWithP2PAddressError(t *testing.T) { + cfg := config.Config{ + Net: &config.NetConfig{ + P2PAddress: "/willerror/0.0.0.0/tcp/9999", + }, + } + err := WithConfig(&cfg)(&Options{}) + require.Contains(t, err.Error(), "failed to parse multiaddr") +} + +func TestWithConfigWitTCPAddressError(t *testing.T) { + cfg := config.Config{ + Net: &config.NetConfig{ + P2PAddress: "/ip4/0.0.0.0/tcp/9999", + TCPAddress: "/willerror/0.0.0.0/tcp/9999", + }, + } + err := WithConfig(&cfg)(&Options{}) + require.Contains(t, err.Error(), "failed to parse multiaddr") +} + +func TestWithDataPath(t *testing.T) { + path := "test/path" + opt, err := NewMergedOptions(WithDataPath(path)) + require.NoError(t, err) + require.NotNil(t, opt) + require.Equal(t, path, opt.DataPath) +} + +func TestWithPubSub(t *testing.T) { + opt, err := NewMergedOptions(WithPubSub(true)) + require.NoError(t, err) + require.NotNil(t, opt) + require.True(t, opt.EnablePubSub) +} + +func TestWithEnableRelay(t *testing.T) { + opt, err := NewMergedOptions(WithEnableRelay(true)) + require.NoError(t, err) + require.NotNil(t, opt) + require.True(t, opt.EnableRelay) +} + +func TestWithListenP2PAddrStringsWithError(t *testing.T) { + addr := "/willerror/0.0.0.0/tcp/9999" + _, err := NewMergedOptions(WithListenP2PAddrStrings(addr)) + require.Contains(t, err.Error(), "failed to parse multiaddr") +} + +func TestWithListenP2PAddrStrings(t *testing.T) { + addr := "/ip4/0.0.0.0/tcp/9999" + opt, err := NewMergedOptions(WithListenP2PAddrStrings(addr)) + require.NoError(t, err) + require.NotNil(t, opt) + require.Equal(t, addr, opt.ListenAddrs[0].String()) +} + +func TestWithListenTCPAddrStringWithError(t *testing.T) { + addr := "/willerror/0.0.0.0/tcp/9999" + _, err := NewMergedOptions(WithListenTCPAddrString(addr)) + require.Contains(t, err.Error(), "failed to parse multiaddr") +} + +func TestWithListenTCPAddrString(t *testing.T) { + addr := "/ip4/0.0.0.0/tcp/9999" + opt, err := NewMergedOptions(WithListenTCPAddrString(addr)) + require.NoError(t, err) + require.NotNil(t, opt) + require.Equal(t, addr, opt.TCPAddr.String()) +} + +func TestWithListenAddrs(t *testing.T) { + addr := "/ip4/0.0.0.0/tcp/9999" + a, err := ma.NewMultiaddr(addr) + require.NoError(t, err) + + opt, err := NewMergedOptions(WithListenAddrs(a)) + require.NoError(t, err) + require.NotNil(t, opt) + require.Equal(t, addr, opt.ListenAddrs[0].String()) +} diff --git a/net/dag.go b/net/dag.go index 1fedd9301e..d814630f6a 100644 --- a/net/dag.go +++ b/net/dag.go @@ -30,7 +30,7 @@ var ( DAGSyncTimeout = time.Second * 60 ) -// A DAGSyncer is an abstraction to an IPLD-based p2p storage layer. A +// A DAGSyncer is an abstraction to an IPLD-based P2P storage layer. A // DAGSyncer is a DAGService with the ability to publish new ipld nodes to the // network, and retrieving others from it. type DAGSyncer interface { @@ -55,7 +55,7 @@ type dagJob struct { node ipld.Node // the current ipld Node collection client.Collection // collection our document belongs to - dockey core.DataStoreKey // dockey of our document + dsKey core.DataStoreKey // datastore key of our document fieldName string // field of the subgraph our node belongs to // Transaction common to a pushlog event. It is used to pass it along to processLog @@ -87,12 +87,13 @@ func (p *Peer) sendJobWorker() { return case newJob := <-p.sendJobs: - jobs, ok := docWorkerQueue[newJob.dockey.DocKey] + jobs, ok := docWorkerQueue[newJob.dsKey.DocKey] if !ok { jobs = make(chan *dagJob, numWorkers) for i := 0; i < numWorkers; i++ { go p.dagWorker(jobs) } + docWorkerQueue[newJob.dsKey.DocKey] = jobs } jobs <- newJob @@ -112,7 +113,7 @@ func (p *Peer) dagWorker(jobs chan *dagJob) { log.Debug( p.ctx, "Starting new job from DAG queue", - logging.NewKV("DocKey", job.dockey), + logging.NewKV("Datastore Key", job.dsKey), logging.NewKV("CID", job.node.Cid()), ) @@ -128,7 +129,7 @@ func (p *Peer) dagWorker(jobs chan *dagJob) { p.ctx, job.txn, job.collection, - job.dockey, + job.dsKey, job.node.Cid(), job.fieldName, job.node, @@ -140,7 +141,7 @@ func (p *Peer) dagWorker(jobs chan *dagJob) { p.ctx, "Error processing log", err, - logging.NewKV("DocKey", job.dockey), + logging.NewKV("Datastore key", job.dsKey), logging.NewKV("CID", job.node.Cid()), ) job.session.Done() @@ -157,7 +158,7 @@ func (p *Peer) dagWorker(jobs chan *dagJob) { j.session, j.txn, j.collection, - j.dockey, + j.dsKey, j.fieldName, j.node, children, diff --git a/net/dag_test.go b/net/dag_test.go new file mode 100644 index 0000000000..d0e9a18ce7 --- /dev/null +++ b/net/dag_test.go @@ -0,0 +1,398 @@ +// Copyright 2023 Democratized Data Foundation +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package net + +import ( + "context" + "sync" + "testing" + "time" + + dag "github.com/ipfs/boxo/ipld/merkledag" + "github.com/ipfs/go-cid" + format "github.com/ipfs/go-ipld-format" + ipld "github.com/ipfs/go-ipld-format" + mh "github.com/multiformats/go-multihash" + "github.com/stretchr/testify/require" + + "github.com/sourcenetwork/defradb/client" + "github.com/sourcenetwork/defradb/core" + "github.com/sourcenetwork/defradb/core/crdt" + netutils "github.com/sourcenetwork/defradb/net/utils" +) + +const timeout = 5 * time.Second + +func TestSendJobWorker_ExitOnContextClose_NoError(t *testing.T) { + ctx := context.Background() + _, n := newTestNode(ctx, t) + done := make(chan struct{}) + go func() { + n.sendJobWorker() + close(done) + }() + err := n.Close() + require.NoError(t, err) + select { + case <-done: + case <-time.After(timeout): + t.Error("failed to close sendJobWorker") + } +} + +func TestSendJobWorker_WithNewJobWithClosePriorToProcessing_NoError(t *testing.T) { + ctx := context.Background() + db, n := newTestNode(ctx, t) + done := make(chan struct{}) + go func() { + n.sendJobWorker() + close(done) + }() + _, err := db.AddSchema(ctx, `type User { + name: String + age: Int + }`) + require.NoError(t, err) + + col, err := db.GetCollectionByName(ctx, "User") + require.NoError(t, err) + + doc, err := client.NewDocFromJSON([]byte(`{"name": "John", "age": 30}`)) + require.NoError(t, err) + dsKey := core.DataStoreKeyFromDocKey(doc.Key()) + + txn, err := db.NewTxn(ctx, false) + require.NoError(t, err) + + wg := sync.WaitGroup{} + wg.Add(1) + + n.sendJobs <- &dagJob{ + session: &wg, + node: &EmptyNode{}, + collection: col, + dsKey: dsKey, + txn: txn, + } + + err = n.Close() + require.NoError(t, err) + select { + case <-done: + case <-time.After(timeout): + t.Error("failed to close sendJobWorker") + } +} + +func TestSendJobWorker_WithNewJob_NoError(t *testing.T) { + ctx := context.Background() + db, n := newTestNode(ctx, t) + done := make(chan struct{}) + go func() { + n.sendJobWorker() + close(done) + }() + _, err := db.AddSchema(ctx, `type User { + name: String + age: Int + }`) + require.NoError(t, err) + + col, err := db.GetCollectionByName(ctx, "User") + require.NoError(t, err) + + doc, err := client.NewDocFromJSON([]byte(`{"name": "John", "age": 30}`)) + require.NoError(t, err) + dsKey := core.DataStoreKeyFromDocKey(doc.Key()) + + txn, err := db.NewTxn(ctx, false) + require.NoError(t, err) + + wg := sync.WaitGroup{} + wg.Add(1) + + n.sendJobs <- &dagJob{ + session: &wg, + node: &EmptyNode{}, + collection: col, + dsKey: dsKey, + txn: txn, + } + // Give the jobworker time to process the job. + time.Sleep(100 * time.Microsecond) + err = n.Close() + require.NoError(t, err) + select { + case <-done: + case <-time.After(timeout): + t.Error("failed to close sendJobWorker") + } +} + +func TestSendJobWorker_WithCloseJob_NoError(t *testing.T) { + ctx := context.Background() + db, n := newTestNode(ctx, t) + done := make(chan struct{}) + go func() { + n.sendJobWorker() + close(done) + }() + _, err := db.AddSchema(ctx, `type User { + name: String + age: Int + }`) + require.NoError(t, err) + + col, err := db.GetCollectionByName(ctx, "User") + require.NoError(t, err) + + doc, err := client.NewDocFromJSON([]byte(`{"name": "John", "age": 30}`)) + require.NoError(t, err) + dsKey := core.DataStoreKeyFromDocKey(doc.Key()) + + txn, err := db.NewTxn(ctx, false) + require.NoError(t, err) + + wg := sync.WaitGroup{} + wg.Add(1) + + n.sendJobs <- &dagJob{ + session: &wg, + node: &EmptyNode{}, + collection: col, + dsKey: dsKey, + txn: txn, + } + + n.closeJob <- dsKey.DocKey + + err = n.Close() + require.NoError(t, err) + select { + case <-done: + case <-time.After(timeout): + t.Error("failed to close sendJobWorker") + } +} + +func TestSendJobWorker_WithPeerAndNoChildren_NoError(t *testing.T) { + ctx := context.Background() + db1, n1 := newTestNode(ctx, t) + db2, n2 := newTestNode(ctx, t) + + addrs, err := netutils.ParsePeers([]string{n1.host.Addrs()[0].String() + "/p2p/" + n1.PeerID().String()}) + require.NoError(t, err) + n2.Boostrap(addrs) + + done := make(chan struct{}) + go func() { + n2.sendJobWorker() + close(done) + }() + + _, err = db1.AddSchema(ctx, `type User { + name: String + age: Int + }`) + require.NoError(t, err) + _, err = db2.AddSchema(ctx, `type User { + name: String + age: Int + }`) + require.NoError(t, err) + + col, err := db1.GetCollectionByName(ctx, "User") + require.NoError(t, err) + + doc, err := client.NewDocFromJSON([]byte(`{"name": "John", "age": 30}`)) + require.NoError(t, err) + dsKey := core.DataStoreKeyFromDocKey(doc.Key()) + + err = col.Create(ctx, doc) + require.NoError(t, err) + + txn, err := db2.NewTxn(ctx, false) + require.NoError(t, err) + + wg := sync.WaitGroup{} + wg.Add(1) + + delta := &crdt.CompositeDAGDelta{ + SchemaVersionID: col.Schema().VersionID, + Priority: 1, + DocKey: doc.Key().Bytes(), + } + + node, err := makeNode(delta, []cid.Cid{}) + require.NoError(t, err) + + var getter format.NodeGetter = n2.Peer.newDAGSyncerTxn(txn) + if sessionMaker, ok := getter.(SessionDAGSyncer); ok { + log.Debug(ctx, "Upgrading DAGSyncer with a session") + getter = sessionMaker.Session(ctx) + } + + n2.sendJobs <- &dagJob{ + session: &wg, + nodeGetter: getter, + node: node, + collection: col, + dsKey: dsKey, + txn: txn, + } + // Give the jobworker time to process the job. + time.Sleep(100 * time.Microsecond) + err = n1.Close() + require.NoError(t, err) + err = n2.Close() + require.NoError(t, err) + select { + case <-done: + case <-time.After(timeout): + t.Error("failed to close sendJobWorker") + } +} + +func TestSendJobWorker_WithPeerAndChildren_NoError(t *testing.T) { + ctx := context.Background() + db1, n1 := newTestNode(ctx, t) + db2, n2 := newTestNode(ctx, t) + + addrs, err := netutils.ParsePeers([]string{n1.host.Addrs()[0].String() + "/p2p/" + n1.PeerID().String()}) + require.NoError(t, err) + n2.Boostrap(addrs) + + done := make(chan struct{}) + go func() { + n2.sendJobWorker() + close(done) + }() + + _, err = db1.AddSchema(ctx, `type User { + name: String + age: Int + }`) + require.NoError(t, err) + _, err = db2.AddSchema(ctx, `type User { + name: String + age: Int + }`) + require.NoError(t, err) + + col, err := db1.GetCollectionByName(ctx, "User") + require.NoError(t, err) + + doc, err := client.NewDocFromJSON([]byte(`{"name": "John", "age": 30}`)) + require.NoError(t, err) + dsKey := core.DataStoreKeyFromDocKey(doc.Key()) + + err = col.Create(ctx, doc) + require.NoError(t, err) + + txn, err := db2.NewTxn(ctx, false) + require.NoError(t, err) + + wg := sync.WaitGroup{} + wg.Add(1) + + links := []core.DAGLink{} + for k := range doc.Fields() { + delta := &crdt.LWWRegDelta{ + SchemaVersionID: col.Schema().VersionID, + Priority: 1, + DocKey: doc.Key().Bytes(), + FieldName: k, + } + + node, err := makeNode(delta, []cid.Cid{}) + require.NoError(t, err) + + links = append(links, core.DAGLink{ + Name: k, + Cid: node.Cid(), + }) + } + + delta := &crdt.CompositeDAGDelta{ + SchemaVersionID: col.Schema().VersionID, + Priority: 1, + DocKey: doc.Key().Bytes(), + SubDAGs: links, + } + + node, err := makeNode(delta, []cid.Cid{}) + require.NoError(t, err) + + var getter format.NodeGetter = n2.Peer.newDAGSyncerTxn(txn) + if sessionMaker, ok := getter.(SessionDAGSyncer); ok { + log.Debug(ctx, "Upgrading DAGSyncer with a session") + getter = sessionMaker.Session(ctx) + } + + n2.sendJobs <- &dagJob{ + session: &wg, + nodeGetter: getter, + node: node, + collection: col, + dsKey: dsKey, + txn: txn, + } + // Give the jobworker time to process the job. + time.Sleep(100 * time.Microsecond) + err = n1.Close() + require.NoError(t, err) + err = n2.Close() + require.NoError(t, err) + select { + case <-done: + case <-time.After(timeout): + t.Error("failed to close sendJobWorker") + } +} + +func makeNode(delta core.Delta, heads []cid.Cid) (ipld.Node, error) { + var data []byte + var err error + if delta != nil { + data, err = delta.Marshal() + if err != nil { + return nil, err + } + } + + nd := dag.NodeWithData(data) + // The cid builder defaults to v0, we want to be using v1 Cids + err = nd.SetCidBuilder(cid.V1Builder{ + Codec: cid.DagProtobuf, + MhType: mh.SHA2_256, + MhLength: -1, + }) + if err != nil { + return nil, err + } + + // add heads + for _, h := range heads { + if err = nd.AddRawLink("_head", &ipld.Link{Cid: h}); err != nil { + return nil, err + } + } + + // add delta specific links + if comp, ok := delta.(core.CompositeDelta); ok { + for _, dagLink := range comp.Links() { + if err = nd.AddRawLink(dagLink.Name, &ipld.Link{Cid: dagLink.Cid}); err != nil { + return nil, err + } + } + } + return nd, nil +} diff --git a/net/dialer.go b/net/dialer.go index be1140a2b5..16ac03d03f 100644 --- a/net/dialer.go +++ b/net/dialer.go @@ -20,9 +20,7 @@ import ( gostream "github.com/libp2p/go-libp2p-gostream" libpeer "github.com/libp2p/go-libp2p/core/peer" "google.golang.org/grpc" - "google.golang.org/grpc/codes" "google.golang.org/grpc/connectivity" - "google.golang.org/grpc/status" corenet "github.com/sourcenetwork/defradb/core/net" "github.com/sourcenetwork/defradb/errors" @@ -41,8 +39,7 @@ func (s *server) dial(peerID libpeer.ID) (pb.ServiceClient, error) { conn, ok := s.conns[peerID] if ok { if conn.GetState() == connectivity.Shutdown { - if err := conn.Close(); err != nil && status.Code(err) != codes.Canceled { - // log.Errorf("error closing connection: %v", err) + if err := conn.Close(); err != nil { return nil, err } } else { diff --git a/net/dialer_test.go b/net/dialer_test.go new file mode 100644 index 0000000000..5e01b2384f --- /dev/null +++ b/net/dialer_test.go @@ -0,0 +1,114 @@ +// Copyright 2023 Democratized Data Foundation +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package net + +import ( + "context" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + netutils "github.com/sourcenetwork/defradb/net/utils" +) + +func TestDial_WithConnectedPeer_NoError(t *testing.T) { + db := FixtureNewMemoryDBWithBroadcaster(t) + ctx := context.Background() + n1, err := NewNode( + ctx, + db, + WithListenP2PAddrStrings("/ip4/0.0.0.0/tcp/0"), + // WithDataPath() is a required option with the current implementation of key management + WithDataPath(t.TempDir()), + ) + assert.NoError(t, err) + n2, err := NewNode( + ctx, + db, + WithListenP2PAddrStrings("/ip4/0.0.0.0/tcp/0"), + // WithDataPath() is a required option with the current implementation of key management + WithDataPath(t.TempDir()), + ) + assert.NoError(t, err) + addrs, err := netutils.ParsePeers([]string{n1.host.Addrs()[0].String() + "/p2p/" + n1.PeerID().String()}) + if err != nil { + t.Fatal(err) + } + n2.Boostrap(addrs) + _, err = n1.server.dial(n2.PeerID()) + require.NoError(t, err) +} + +func TestDial_WithConnectedPeerAndSecondConnection_NoError(t *testing.T) { + db := FixtureNewMemoryDBWithBroadcaster(t) + ctx := context.Background() + n1, err := NewNode( + ctx, + db, + WithListenP2PAddrStrings("/ip4/0.0.0.0/tcp/0"), + // WithDataPath() is a required option with the current implementation of key management + WithDataPath(t.TempDir()), + ) + assert.NoError(t, err) + n2, err := NewNode( + ctx, + db, + WithListenP2PAddrStrings("/ip4/0.0.0.0/tcp/0"), + // WithDataPath() is a required option with the current implementation of key management + WithDataPath(t.TempDir()), + ) + assert.NoError(t, err) + addrs, err := netutils.ParsePeers([]string{n1.host.Addrs()[0].String() + "/p2p/" + n1.PeerID().String()}) + if err != nil { + t.Fatal(err) + } + n2.Boostrap(addrs) + _, err = n1.server.dial(n2.PeerID()) + require.NoError(t, err) + + _, err = n1.server.dial(n2.PeerID()) + require.NoError(t, err) +} + +func TestDial_WithConnectedPeerAndSecondConnectionWithConnectionShutdown_ClosingConnectionError(t *testing.T) { + db := FixtureNewMemoryDBWithBroadcaster(t) + ctx := context.Background() + n1, err := NewNode( + ctx, + db, + WithListenP2PAddrStrings("/ip4/0.0.0.0/tcp/0"), + // WithDataPath() is a required option with the current implementation of key management + WithDataPath(t.TempDir()), + ) + assert.NoError(t, err) + n2, err := NewNode( + ctx, + db, + WithListenP2PAddrStrings("/ip4/0.0.0.0/tcp/0"), + // WithDataPath() is a required option with the current implementation of key management + WithDataPath(t.TempDir()), + ) + assert.NoError(t, err) + addrs, err := netutils.ParsePeers([]string{n1.host.Addrs()[0].String() + "/p2p/" + n1.PeerID().String()}) + if err != nil { + t.Fatal(err) + } + n2.Boostrap(addrs) + _, err = n1.server.dial(n2.PeerID()) + require.NoError(t, err) + + err = n1.server.conns[n2.PeerID()].Close() + require.NoError(t, err) + + _, err = n1.server.dial(n2.PeerID()) + require.Contains(t, err.Error(), "grpc: the client connection is closing") +} diff --git a/net/doc.go b/net/doc.go index ac33d9a2d1..dd80ee53a4 100644 --- a/net/doc.go +++ b/net/doc.go @@ -11,7 +11,7 @@ // limitations under the License. /* -Package net provides p2p network functions for the core DefraDB instance. +Package net provides P2P network functions for the core DefraDB instance. Notable design descision: all DocKeys (Documents) have their own respective PubSub topics. diff --git a/net/errors.go b/net/errors.go new file mode 100644 index 0000000000..3f8d4926c5 --- /dev/null +++ b/net/errors.go @@ -0,0 +1,49 @@ +// Copyright 2023 Democratized Data Foundation +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package net + +import ( + "fmt" + + "github.com/sourcenetwork/defradb/errors" +) + +const ( + errPushLog = "failed to push log" + errFailedToGetDockey = "failed to get DocKey from broadcast message" + errPublishingToDockeyTopic = "can't publish log %s for dockey %s" + errPublishingToSchemaTopic = "can't publish log %s for schema %s" +) + +var ( + ErrPeerConnectionWaitTimout = errors.New("waiting for peer connection timed out") + ErrPubSubWaitTimeout = errors.New("waiting for pubsub timed out") + ErrPushLogWaitTimeout = errors.New("waiting for pushlog timed out") + ErrNilDB = errors.New("database object can't be nil") + ErrNilUpdateChannel = errors.New("tried to subscribe to update channel, but update channel is nil") + ErrSelfTargetForReplicator = errors.New("can't target ourselves as a replicator") +) + +func NewErrPushLog(inner error, kv ...errors.KV) error { + return errors.Wrap(errPushLog, inner, kv...) +} + +func NewErrFailedToGetDockey(inner error, kv ...errors.KV) error { + return errors.Wrap(errFailedToGetDockey, inner, kv...) +} + +func NewErrPublishingToDockeyTopic(inner error, cid, key string, kv ...errors.KV) error { + return errors.Wrap(fmt.Sprintf(errPublishingToDockeyTopic, cid, key), inner, kv...) +} + +func NewErrPublishingToSchemaTopic(inner error, cid, key string, kv ...errors.KV) error { + return errors.Wrap(fmt.Sprintf(errPublishingToSchemaTopic, cid, key), inner, kv...) +} diff --git a/node/node.go b/net/node.go similarity index 90% rename from node/node.go rename to net/node.go index 1df5da70e4..8f916cda16 100644 --- a/node/node.go +++ b/net/node.go @@ -1,4 +1,4 @@ -// Copyright 2022 Democratized Data Foundation +// Copyright 2023 Democratized Data Foundation // // Use of this software is governed by the Business Source License // included in the file licenses/BSL.txt. @@ -14,7 +14,7 @@ and GRPC server. Basically it combines db/DB, net/Peer, and net/Server into a single Node object. */ -package node +package net import ( "context" @@ -44,36 +44,26 @@ import ( "github.com/textileio/go-libp2p-pubsub-rpc/finalizer" "github.com/sourcenetwork/defradb/client" - "github.com/sourcenetwork/defradb/errors" "github.com/sourcenetwork/defradb/logging" - "github.com/sourcenetwork/defradb/net" ) -var ( - log = logging.MustNewLogger("node") -) - -const evtWaitTimeout = 10 * time.Second +var evtWaitTimeout = 10 * time.Second // Node is a networked peer instance of DefraDB. type Node struct { // embed the DB interface into the node client.DB - *net.Peer - - host host.Host - dht routing.Routing - pubsub *pubsub.PubSub + *Peer // receives an event when the status of a peer connection changes. peerEvent chan event.EvtPeerConnectednessChanged // receives an event when a pubsub topic is added. - pubSubEvent chan net.EvtPubSub + pubSubEvent chan EvtPubSub // receives an event when a pushLog request has been processed. - pushLogEvent chan net.EvtReceivedPushLog + pushLogEvent chan EvtReceivedPushLog ctx context.Context cancel context.CancelFunc @@ -95,7 +85,7 @@ func NewNode( // create our peerstore from the underlying defra rootstore // prefixed with "p2p" rootstore := db.Root() - pstore := namespace.Wrap(rootstore, ds.NewKey("peers")) + pstore := namespace.Wrap(rootstore, ds.NewKey("/db")) peerstore, err := pstoreds.NewPeerstore(ctx, pstore, pstoreds.DefaultOpts()) if err != nil { return nil, fin.Cleanup(err) @@ -158,7 +148,7 @@ func NewNode( ctx, cancel := context.WithCancel(ctx) - peer, err := net.NewPeer( + peer, err := NewPeer( ctx, db, h, @@ -180,13 +170,10 @@ func NewNode( // test, but we should resolve this when we can (e.g. via using subscribe-like // mechanics, potentially via use of a ring-buffer based [events.Channel] // implementation): https://github.com/sourcenetwork/defradb/issues/1358. - pubSubEvent: make(chan net.EvtPubSub, 20), - pushLogEvent: make(chan net.EvtReceivedPushLog, 20), + pubSubEvent: make(chan EvtPubSub, 20), + pushLogEvent: make(chan EvtReceivedPushLog, 20), peerEvent: make(chan event.EvtPeerConnectednessChanged, 20), Peer: peer, - host: h, - dht: ddht, - pubsub: ps, DB: db, ctx: ctx, cancel: cancel, @@ -249,6 +236,7 @@ func (n *Node) subscribeToPeerConnectionEvents() { n.ctx, fmt.Sprintf("failed to subscribe to peer connectedness changed event: %v", err), ) + return } go func() { for e := range sub.Out() { @@ -264,20 +252,21 @@ func (n *Node) subscribeToPeerConnectionEvents() { // subscribeToPubSubEvents subscribes the node to the event bus for a pubsub. func (n *Node) subscribeToPubSubEvents() { - sub, err := n.host.EventBus().Subscribe(new(net.EvtPubSub)) + sub, err := n.host.EventBus().Subscribe(new(EvtPubSub)) if err != nil { log.Info( n.ctx, fmt.Sprintf("failed to subscribe to pubsub event: %v", err), ) + return } go func() { for e := range sub.Out() { select { - case n.pubSubEvent <- e.(net.EvtPubSub): + case n.pubSubEvent <- e.(EvtPubSub): default: <-n.pubSubEvent - n.pubSubEvent <- e.(net.EvtPubSub) + n.pubSubEvent <- e.(EvtPubSub) } } }() @@ -285,20 +274,21 @@ func (n *Node) subscribeToPubSubEvents() { // subscribeToPushLogEvents subscribes the node to the event bus for a push log request completion. func (n *Node) subscribeToPushLogEvents() { - sub, err := n.host.EventBus().Subscribe(new(net.EvtReceivedPushLog)) + sub, err := n.host.EventBus().Subscribe(new(EvtReceivedPushLog)) if err != nil { log.Info( n.ctx, fmt.Sprintf("failed to subscribe to push log event: %v", err), ) + return } go func() { for e := range sub.Out() { select { - case n.pushLogEvent <- e.(net.EvtReceivedPushLog): + case n.pushLogEvent <- e.(EvtReceivedPushLog): default: <-n.pushLogEvent - n.pushLogEvent <- e.(net.EvtReceivedPushLog) + n.pushLogEvent <- e.(EvtReceivedPushLog) } } }() @@ -317,7 +307,7 @@ func (n *Node) WaitForPeerConnectionEvent(id peer.ID) error { } return nil case <-time.After(evtWaitTimeout): - return errors.New("waiting for peer connection timed out") + return ErrPeerConnectionWaitTimout case <-n.ctx.Done(): return nil } @@ -334,7 +324,7 @@ func (n *Node) WaitForPubSubEvent(id peer.ID) error { } return nil case <-time.After(evtWaitTimeout): - return errors.New("waiting for pubsub timed out") + return ErrPubSubWaitTimeout case <-n.ctx.Done(): return nil } @@ -357,7 +347,7 @@ func (n *Node) WaitForPushLogByPeerEvent(id peer.ID) error { } return nil case <-time.After(evtWaitTimeout): - return errors.New("waiting for pushlog timed out") + return ErrPushLogWaitTimeout case <-n.ctx.Done(): return nil } @@ -380,7 +370,7 @@ func (n *Node) WaitForPushLogFromPeerEvent(id peer.ID) error { } return nil case <-time.After(evtWaitTimeout): - return errors.New("waiting for pushlog timed out") + return ErrPushLogWaitTimeout case <-n.ctx.Done(): return nil } diff --git a/net/node_test.go b/net/node_test.go new file mode 100644 index 0000000000..b2ab255b20 --- /dev/null +++ b/net/node_test.go @@ -0,0 +1,799 @@ +// Copyright 2023 Democratized Data Foundation +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package net + +import ( + "bytes" + "context" + "testing" + "time" + + badger "github.com/dgraph-io/badger/v3" + "github.com/libp2p/go-libp2p/core/event" + "github.com/libp2p/go-libp2p/core/peer" + ma "github.com/multiformats/go-multiaddr" + "github.com/stretchr/testify/require" + + "github.com/sourcenetwork/defradb/client" + "github.com/sourcenetwork/defradb/config" + badgerds "github.com/sourcenetwork/defradb/datastore/badger/v3" + "github.com/sourcenetwork/defradb/datastore/memory" + "github.com/sourcenetwork/defradb/db" + "github.com/sourcenetwork/defradb/logging" + netutils "github.com/sourcenetwork/defradb/net/utils" +) + +// Node.Boostrap is not tested because the underlying, *ipfslite.Peer.Bootstrap is a best-effort function. + +func FixtureNewMemoryDBWithBroadcaster(t *testing.T) client.DB { + var database client.DB + var options []db.Option + ctx := context.Background() + options = append(options, db.WithUpdateEvents()) + opts := badgerds.Options{Options: badger.DefaultOptions("").WithInMemory(true)} + rootstore, err := badgerds.NewDatastore("", &opts) + require.NoError(t, err) + database, err = db.NewDB(ctx, rootstore, options...) + require.NoError(t, err) + return database +} + +func TestNewNode_WithEnableRelay_NoError(t *testing.T) { + ctx := context.Background() + store := memory.NewDatastore(ctx) + db, err := db.NewDB(ctx, store, db.WithUpdateEvents()) + require.NoError(t, err) + _, err = NewNode( + context.Background(), + db, + WithEnableRelay(true), + // WithDataPath() is a required option with the current implementation of key management + WithDataPath(t.TempDir()), + ) + require.NoError(t, err) +} + +func TestNewNode_WithInvalidListenTCPAddrString_ParseError(t *testing.T) { + ctx := context.Background() + store := memory.NewDatastore(ctx) + db, err := db.NewDB(ctx, store, db.WithUpdateEvents()) + require.NoError(t, err) + _, err = NewNode( + context.Background(), + db, + WithListenTCPAddrString("/ip4/碎片整理"), + // WithDataPath() is a required option with the current implementation of key management + WithDataPath(t.TempDir()), + ) + require.EqualError(t, err, "failed to parse multiaddr \"/ip4/碎片整理\": invalid value \"碎片整理\" for protocol ip4: failed to parse ip4 addr: 碎片整理") +} + +func TestNewNode_WithDBClosed_NoError(t *testing.T) { + ctx := context.Background() + store := memory.NewDatastore(ctx) + db, err := db.NewDB(ctx, store, db.WithUpdateEvents()) + require.NoError(t, err) + db.Close(ctx) + _, err = NewNode( + context.Background(), + db, + // WithDataPath() is a required option with the current implementation of key management + WithDataPath(t.TempDir()), + ) + require.ErrorContains(t, err, "datastore closed") +} + +func TestNewNode_NoPubSub_NoError(t *testing.T) { + ctx := context.Background() + store := memory.NewDatastore(ctx) + db, err := db.NewDB(ctx, store, db.WithUpdateEvents()) + require.NoError(t, err) + n, err := NewNode( + context.Background(), + db, + WithPubSub(false), + // WithDataPath() is a required option with the current implementation of key management + WithDataPath(t.TempDir()), + ) + require.NoError(t, err) + require.Nil(t, n.ps) +} + +func TestNewNode_WithPubSub_NoError(t *testing.T) { + ctx := context.Background() + store := memory.NewDatastore(ctx) + db, err := db.NewDB(ctx, store, db.WithUpdateEvents()) + require.NoError(t, err) + + n, err := NewNode( + ctx, + db, + WithPubSub(true), + // WithDataPath() is a required option with the current implementation of key management + WithDataPath(t.TempDir()), + ) + + require.NoError(t, err) + // overly simple check of validity of pubsub, avoiding the process of creating a PubSub + require.NotNil(t, n.ps) +} + +func TestNewNode_WithPubSub_FailsWithoutWithDataPath(t *testing.T) { + ctx := context.Background() + store := memory.NewDatastore(ctx) + db, err := db.NewDB(ctx, store, db.WithUpdateEvents()) + require.NoError(t, err) + + _, err = NewNode( + ctx, + db, + WithPubSub(true), + ) + require.EqualError(t, err, "1 error occurred:\n\t* mkdir : no such file or directory\n\n") +} + +func TestNodeClose_NoError(t *testing.T) { + ctx := context.Background() + store := memory.NewDatastore(ctx) + db, err := db.NewDB(ctx, store, db.WithUpdateEvents()) + require.NoError(t, err) + n, err := NewNode( + context.Background(), + db, + // WithDataPath() is a required option with the current implementation of key management + WithDataPath(t.TempDir()), + ) + require.NoError(t, err) + err = n.Close() + require.NoError(t, err) +} + +func TestNewNode_BootstrapWithNoPeer_NoError(t *testing.T) { + ctx := context.Background() + store := memory.NewDatastore(ctx) + db, err := db.NewDB(ctx, store, db.WithUpdateEvents()) + require.NoError(t, err) + + n1, err := NewNode( + ctx, + db, + WithListenP2PAddrStrings("/ip4/0.0.0.0/tcp/0"), + // WithDataPath() is a required option with the current implementation of key management + WithDataPath(t.TempDir()), + ) + require.NoError(t, err) + n1.Boostrap([]peer.AddrInfo{}) +} + +func TestNewNode_BootstrapWithOnePeer_NoError(t *testing.T) { + ctx := context.Background() + store := memory.NewDatastore(ctx) + db, err := db.NewDB(ctx, store, db.WithUpdateEvents()) + require.NoError(t, err) + + n1, err := NewNode( + ctx, + db, + WithListenP2PAddrStrings("/ip4/0.0.0.0/tcp/0"), + // WithDataPath() is a required option with the current implementation of key management + WithDataPath(t.TempDir()), + ) + require.NoError(t, err) + n2, err := NewNode( + ctx, + db, + WithListenP2PAddrStrings("/ip4/0.0.0.0/tcp/0"), + // WithDataPath() is a required option with the current implementation of key management + WithDataPath(t.TempDir()), + ) + require.NoError(t, err) + addrs, err := netutils.ParsePeers([]string{n1.host.Addrs()[0].String() + "/p2p/" + n1.PeerID().String()}) + if err != nil { + t.Fatal(err) + } + n2.Boostrap(addrs) +} + +func TestNewNode_BootstrapWithOneValidPeerAndManyInvalidPeers_NoError(t *testing.T) { + ctx := context.Background() + store := memory.NewDatastore(ctx) + db, err := db.NewDB(ctx, store, db.WithUpdateEvents()) + require.NoError(t, err) + + n1, err := NewNode( + ctx, + db, + WithListenP2PAddrStrings("/ip4/0.0.0.0/tcp/0"), + // WithDataPath() is a required option with the current implementation of key management + WithDataPath(t.TempDir()), + ) + require.NoError(t, err) + n2, err := NewNode( + ctx, + db, + WithListenP2PAddrStrings("/ip4/0.0.0.0/tcp/0"), + // WithDataPath() is a required option with the current implementation of key management + WithDataPath(t.TempDir()), + ) + require.NoError(t, err) + addrs, err := netutils.ParsePeers([]string{ + n1.host.Addrs()[0].String() + "/p2p/" + n1.PeerID().String(), + "/ip4/0.0.0.0/tcp/1234/p2p/" + "12D3KooWC8YY6Tx3uAeHsdBmoy7PJPwqXAHE4HkCZ5veankKWci6", + "/ip4/0.0.0.0/tcp/1235/p2p/" + "12D3KooWC8YY6Tx3uAeHsdBmoy7PJPwqXAHE4HkCZ5veankKWci5", + "/ip4/0.0.0.0/tcp/1236/p2p/" + "12D3KooWC8YY6Tx3uAeHsdBmoy7PJPwqXAHE4HkCZ5veankKWci4", + }) + require.NoError(t, err) + n2.Boostrap(addrs) +} + +func mergeOptions(nodeOpts ...NodeOpt) (Options, error) { + var options Options + var nodeOpt NodeOpt + for _, opt := range append(nodeOpts, nodeOpt) { + if opt == nil { + continue + } + if err := opt(&options); err != nil { + return options, err + } + } + return options, nil +} + +func TestListenAddrs_WithListenP2PAddrStrings_NoError(t *testing.T) { + ctx := context.Background() + store := memory.NewDatastore(ctx) + db, err := db.NewDB(ctx, store, db.WithUpdateEvents()) + require.NoError(t, err) + n, err := NewNode( + context.Background(), + db, + WithListenP2PAddrStrings("/ip4/0.0.0.0/tcp/0"), + // WithDataPath() is a required option with the current implementation of key management + WithDataPath(t.TempDir()), + ) + require.NoError(t, err) + + require.Contains(t, n.ListenAddrs()[0].String(), "/tcp/") +} + +func TestWithListenTCPAddrString_WithInvalidListenTCPAddrString_ParseError(t *testing.T) { + opt := WithListenTCPAddrString("/ip4/碎片整理") + options, err := mergeOptions(opt) + require.EqualError(t, err, "failed to parse multiaddr \"/ip4/碎片整理\": invalid value \"碎片整理\" for protocol ip4: failed to parse ip4 addr: 碎片整理") + require.Equal(t, Options{}, options) +} + +func TestNodeConfig_NoError(t *testing.T) { + tempDir := t.TempDir() + + cfg := config.DefaultConfig() + cfg.Net.P2PAddress = "/ip4/0.0.0.0/tcp/9179" + cfg.Net.TCPAddress = "/ip4/0.0.0.0/tcp/9169" + cfg.Net.RPCTimeout = "100s" + cfg.Net.RPCMaxConnectionIdle = "111s" + cfg.Net.RelayEnabled = true + cfg.Net.PubSubEnabled = true + cfg.Datastore.Badger.Path = tempDir + + configOpt := WithConfig(cfg) + options, err := NewMergedOptions(configOpt) + require.NoError(t, err) + + // confirming it provides the same config as a manually constructed node.Options + p2pAddr, err := ma.NewMultiaddr(cfg.Net.P2PAddress) + require.NoError(t, err) + tcpAddr, err := ma.NewMultiaddr(cfg.Net.TCPAddress) + require.NoError(t, err) + connManager, err := NewConnManager(100, 400, time.Second*20) + require.NoError(t, err) + expectedOptions := Options{ + ListenAddrs: []ma.Multiaddr{p2pAddr}, + TCPAddr: tcpAddr, + DataPath: tempDir, + EnablePubSub: true, + EnableRelay: true, + ConnManager: connManager, + } + + for k, v := range options.ListenAddrs { + require.Equal(t, expectedOptions.ListenAddrs[k], v) + } + require.Equal(t, expectedOptions.TCPAddr.String(), options.TCPAddr.String()) + require.Equal(t, expectedOptions.DataPath, options.DataPath) + require.Equal(t, expectedOptions.EnablePubSub, options.EnablePubSub) + require.Equal(t, expectedOptions.EnableRelay, options.EnableRelay) +} + +func TestSubscribeToPeerConnectionEvents_SubscriptionError(t *testing.T) { + db := FixtureNewMemoryDBWithBroadcaster(t) + n, err := NewNode( + context.Background(), + db, + // WithDataPath() is a required option with the current implementation of key management + WithDataPath(t.TempDir()), + ) + require.NoError(t, err) + + b := &bytes.Buffer{} + + log.ApplyConfig(logging.Config{ + Pipe: b, + }) + + n.Peer.host = &mockHost{n.Peer.host} + + n.subscribeToPeerConnectionEvents() + + logLines, err := parseLines(b) + if err != nil { + t.Fatal(err) + } + + if len(logLines) != 1 { + t.Fatalf("expecting exactly 1 log line but got %d lines", len(logLines)) + } + require.Equal(t, "failed to subscribe to peer connectedness changed event: mock error", logLines[0]["msg"]) + + // reset logger + log = logging.MustNewLogger("defra.net") +} + +func TestPeerConnectionEventEmitter_SingleEvent_NoError(t *testing.T) { + db := FixtureNewMemoryDBWithBroadcaster(t) + n, err := NewNode( + context.Background(), + db, + // WithDataPath() is a required option with the current implementation of key management + WithDataPath(t.TempDir()), + ) + require.NoError(t, err) + + emitter, err := n.host.EventBus().Emitter(new(event.EvtPeerConnectednessChanged)) + require.NoError(t, err) + + err = emitter.Emit(event.EvtPeerConnectednessChanged{}) + require.NoError(t, err) +} + +func TestPeerConnectionEventEmitter_MultiEvent_NoError(t *testing.T) { + db := FixtureNewMemoryDBWithBroadcaster(t) + n, err := NewNode( + context.Background(), + db, + // WithDataPath() is a required option with the current implementation of key management + WithDataPath(t.TempDir()), + ) + require.NoError(t, err) + + emitter, err := n.host.EventBus().Emitter(new(event.EvtPeerConnectednessChanged)) + require.NoError(t, err) + + // the emitter can take 20 events in the channel. This tests what happens whe go over the 20 events. + for i := 0; i < 21; i++ { + err = emitter.Emit(event.EvtPeerConnectednessChanged{}) + require.NoError(t, err) + } +} + +func TestSubscribeToPubSubEvents_SubscriptionError(t *testing.T) { + db := FixtureNewMemoryDBWithBroadcaster(t) + n, err := NewNode( + context.Background(), + db, + // WithDataPath() is a required option with the current implementation of key management + WithDataPath(t.TempDir()), + ) + require.NoError(t, err) + + b := &bytes.Buffer{} + + log.ApplyConfig(logging.Config{ + Pipe: b, + }) + + n.Peer.host = &mockHost{n.Peer.host} + + n.subscribeToPubSubEvents() + + logLines, err := parseLines(b) + if err != nil { + t.Fatal(err) + } + + if len(logLines) != 1 { + t.Fatalf("expecting exactly 1 log line but got %d lines", len(logLines)) + } + require.Equal(t, "failed to subscribe to pubsub event: mock error", logLines[0]["msg"]) + + // reset logger + log = logging.MustNewLogger("defra.net") +} + +func TestPubSubEventEmitter_SingleEvent_NoError(t *testing.T) { + db := FixtureNewMemoryDBWithBroadcaster(t) + n, err := NewNode( + context.Background(), + db, + // WithDataPath() is a required option with the current implementation of key management + WithDataPath(t.TempDir()), + ) + require.NoError(t, err) + + emitter, err := n.host.EventBus().Emitter(new(EvtPubSub)) + require.NoError(t, err) + + err = emitter.Emit(EvtPubSub{}) + require.NoError(t, err) +} + +func TestPubSubEventEmitter_MultiEvent_NoError(t *testing.T) { + db := FixtureNewMemoryDBWithBroadcaster(t) + n, err := NewNode( + context.Background(), + db, + // WithDataPath() is a required option with the current implementation of key management + WithDataPath(t.TempDir()), + ) + require.NoError(t, err) + + emitter, err := n.host.EventBus().Emitter(new(EvtPubSub)) + require.NoError(t, err) + + // the emitter can take 20 events in the channel. This tests what happens whe go over the 20 events. + for i := 0; i < 21; i++ { + err = emitter.Emit(EvtPubSub{}) + require.NoError(t, err) + } +} + +func TestSubscribeToPushLogEvents_SubscriptionError(t *testing.T) { + db := FixtureNewMemoryDBWithBroadcaster(t) + n, err := NewNode( + context.Background(), + db, + // WithDataPath() is a required option with the current implementation of key management + WithDataPath(t.TempDir()), + ) + require.NoError(t, err) + + b := &bytes.Buffer{} + + log.ApplyConfig(logging.Config{ + Pipe: b, + }) + + n.Peer.host = &mockHost{n.Peer.host} + + n.subscribeToPushLogEvents() + + logLines, err := parseLines(b) + if err != nil { + t.Fatal(err) + } + + if len(logLines) != 1 { + t.Fatalf("expecting exactly 1 log line but got %d lines", len(logLines)) + } + require.Equal(t, "failed to subscribe to push log event: mock error", logLines[0]["msg"]) + + // reset logger + log = logging.MustNewLogger("defra.net") +} + +func TestPushLogEventEmitter_SingleEvent_NoError(t *testing.T) { + db := FixtureNewMemoryDBWithBroadcaster(t) + n, err := NewNode( + context.Background(), + db, + // WithDataPath() is a required option with the current implementation of key management + WithDataPath(t.TempDir()), + ) + require.NoError(t, err) + + emitter, err := n.host.EventBus().Emitter(new(EvtReceivedPushLog)) + require.NoError(t, err) + + err = emitter.Emit(EvtReceivedPushLog{}) + require.NoError(t, err) +} + +func TestPushLogEventEmitter_MultiEvent_NoError(t *testing.T) { + db := FixtureNewMemoryDBWithBroadcaster(t) + n, err := NewNode( + context.Background(), + db, + // WithDataPath() is a required option with the current implementation of key management + WithDataPath(t.TempDir()), + ) + require.NoError(t, err) + + emitter, err := n.host.EventBus().Emitter(new(EvtReceivedPushLog)) + require.NoError(t, err) + + // the emitter can take 20 events in the channel. This tests what happens whe go over the 20 events. + for i := 0; i < 21; i++ { + err = emitter.Emit(EvtReceivedPushLog{}) + require.NoError(t, err) + } +} + +func TestWaitForPeerConnectionEvent_WithSamePeer_NoError(t *testing.T) { + db := FixtureNewMemoryDBWithBroadcaster(t) + n, err := NewNode( + context.Background(), + db, + // WithDataPath() is a required option with the current implementation of key management + WithDataPath(t.TempDir()), + ) + require.NoError(t, err) + + emitter, err := n.host.EventBus().Emitter(new(event.EvtPeerConnectednessChanged)) + require.NoError(t, err) + + err = emitter.Emit(event.EvtPeerConnectednessChanged{ + Peer: n.PeerID(), + }) + require.NoError(t, err) + + err = n.WaitForPeerConnectionEvent(n.PeerID()) + require.NoError(t, err) +} + +func TestWaitForPeerConnectionEvent_WithDifferentPeer_TimeoutError(t *testing.T) { + evtWaitTimeout = 100 * time.Microsecond + defer func() { + evtWaitTimeout = 10 * time.Second + }() + db := FixtureNewMemoryDBWithBroadcaster(t) + n, err := NewNode( + context.Background(), + db, + // WithDataPath() is a required option with the current implementation of key management + WithDataPath(t.TempDir()), + ) + require.NoError(t, err) + + emitter, err := n.host.EventBus().Emitter(new(event.EvtPeerConnectednessChanged)) + require.NoError(t, err) + + err = emitter.Emit(event.EvtPeerConnectednessChanged{}) + require.NoError(t, err) + + err = n.WaitForPeerConnectionEvent(n.PeerID()) + require.ErrorIs(t, err, ErrPeerConnectionWaitTimout) +} + +func TestWaitForPeerConnectionEvent_WithDifferentPeerAndContextClosed_NoError(t *testing.T) { + db := FixtureNewMemoryDBWithBroadcaster(t) + n, err := NewNode( + context.Background(), + db, + // WithDataPath() is a required option with the current implementation of key management + WithDataPath(t.TempDir()), + ) + require.NoError(t, err) + + emitter, err := n.host.EventBus().Emitter(new(event.EvtPeerConnectednessChanged)) + require.NoError(t, err) + + err = emitter.Emit(event.EvtPeerConnectednessChanged{}) + require.NoError(t, err) + + n.cancel() + + err = n.WaitForPeerConnectionEvent(n.PeerID()) + require.NoError(t, err) +} + +func TestWaitForPubSubEvent_WithSamePeer_NoError(t *testing.T) { + db := FixtureNewMemoryDBWithBroadcaster(t) + n, err := NewNode( + context.Background(), + db, + // WithDataPath() is a required option with the current implementation of key management + WithDataPath(t.TempDir()), + ) + require.NoError(t, err) + + emitter, err := n.host.EventBus().Emitter(new(EvtPubSub)) + require.NoError(t, err) + + err = emitter.Emit(EvtPubSub{ + Peer: n.PeerID(), + }) + require.NoError(t, err) + + err = n.WaitForPubSubEvent(n.PeerID()) + require.NoError(t, err) +} + +func TestWaitForPubSubEvent_WithDifferentPeer_TimeoutError(t *testing.T) { + evtWaitTimeout = 100 * time.Microsecond + defer func() { + evtWaitTimeout = 10 * time.Second + }() + db := FixtureNewMemoryDBWithBroadcaster(t) + n, err := NewNode( + context.Background(), + db, + // WithDataPath() is a required option with the current implementation of key management + WithDataPath(t.TempDir()), + ) + require.NoError(t, err) + + emitter, err := n.host.EventBus().Emitter(new(EvtPubSub)) + require.NoError(t, err) + + err = emitter.Emit(EvtPubSub{}) + require.NoError(t, err) + + err = n.WaitForPubSubEvent(n.PeerID()) + require.ErrorIs(t, err, ErrPubSubWaitTimeout) +} + +func TestWaitForPubSubEvent_WithDifferentPeerAndContextClosed_NoError(t *testing.T) { + db := FixtureNewMemoryDBWithBroadcaster(t) + n, err := NewNode( + context.Background(), + db, + // WithDataPath() is a required option with the current implementation of key management + WithDataPath(t.TempDir()), + ) + require.NoError(t, err) + + emitter, err := n.host.EventBus().Emitter(new(EvtPubSub)) + require.NoError(t, err) + + err = emitter.Emit(EvtPubSub{}) + require.NoError(t, err) + + n.cancel() + + err = n.WaitForPubSubEvent(n.PeerID()) + require.NoError(t, err) +} + +func TestWaitForPushLogByPeerEvent_WithSamePeer_NoError(t *testing.T) { + db := FixtureNewMemoryDBWithBroadcaster(t) + n, err := NewNode( + context.Background(), + db, + // WithDataPath() is a required option with the current implementation of key management + WithDataPath(t.TempDir()), + ) + require.NoError(t, err) + + emitter, err := n.host.EventBus().Emitter(new(EvtReceivedPushLog)) + require.NoError(t, err) + + err = emitter.Emit(EvtReceivedPushLog{ + ByPeer: n.PeerID(), + }) + require.NoError(t, err) + + err = n.WaitForPushLogByPeerEvent(n.PeerID()) + require.NoError(t, err) +} + +func TestWaitForPushLogByPeerEvent_WithDifferentPeer_TimeoutError(t *testing.T) { + evtWaitTimeout = 100 * time.Microsecond + defer func() { + evtWaitTimeout = 10 * time.Second + }() + db := FixtureNewMemoryDBWithBroadcaster(t) + n, err := NewNode( + context.Background(), + db, + // WithDataPath() is a required option with the current implementation of key management + WithDataPath(t.TempDir()), + ) + require.NoError(t, err) + + emitter, err := n.host.EventBus().Emitter(new(EvtReceivedPushLog)) + require.NoError(t, err) + + err = emitter.Emit(EvtReceivedPushLog{}) + require.NoError(t, err) + + err = n.WaitForPushLogByPeerEvent(n.PeerID()) + require.ErrorIs(t, err, ErrPushLogWaitTimeout) +} + +func TestWaitForPushLogByPeerEvent_WithDifferentPeerAndContextClosed_NoError(t *testing.T) { + db := FixtureNewMemoryDBWithBroadcaster(t) + n, err := NewNode( + context.Background(), + db, + // WithDataPath() is a required option with the current implementation of key management + WithDataPath(t.TempDir()), + ) + require.NoError(t, err) + + emitter, err := n.host.EventBus().Emitter(new(EvtReceivedPushLog)) + require.NoError(t, err) + + err = emitter.Emit(EvtReceivedPushLog{}) + require.NoError(t, err) + + n.cancel() + + err = n.WaitForPushLogByPeerEvent(n.PeerID()) + require.NoError(t, err) +} + +func TestWaitForPushLogFromPeerEvent_WithSamePeer_NoError(t *testing.T) { + db := FixtureNewMemoryDBWithBroadcaster(t) + n, err := NewNode( + context.Background(), + db, + // WithDataPath() is a required option with the current implementation of key management + WithDataPath(t.TempDir()), + ) + require.NoError(t, err) + + emitter, err := n.host.EventBus().Emitter(new(EvtReceivedPushLog)) + require.NoError(t, err) + + err = emitter.Emit(EvtReceivedPushLog{ + FromPeer: n.PeerID(), + }) + require.NoError(t, err) + + err = n.WaitForPushLogFromPeerEvent(n.PeerID()) + require.NoError(t, err) +} + +func TestWaitForPushLogFromPeerEvent_WithDifferentPeer_TimeoutError(t *testing.T) { + evtWaitTimeout = 100 * time.Microsecond + defer func() { + evtWaitTimeout = 10 * time.Second + }() + db := FixtureNewMemoryDBWithBroadcaster(t) + n, err := NewNode( + context.Background(), + db, + // WithDataPath() is a required option with the current implementation of key management + WithDataPath(t.TempDir()), + ) + require.NoError(t, err) + + emitter, err := n.host.EventBus().Emitter(new(EvtReceivedPushLog)) + require.NoError(t, err) + + err = emitter.Emit(EvtReceivedPushLog{}) + require.NoError(t, err) + + err = n.WaitForPushLogFromPeerEvent(n.PeerID()) + require.ErrorIs(t, err, ErrPushLogWaitTimeout) +} + +func TestWaitForPushLogFromPeerEvent_WithDifferentPeerAndContextClosed_NoError(t *testing.T) { + db := FixtureNewMemoryDBWithBroadcaster(t) + n, err := NewNode( + context.Background(), + db, + // WithDataPath() is a required option with the current implementation of key management + WithDataPath(t.TempDir()), + ) + require.NoError(t, err) + + emitter, err := n.host.EventBus().Emitter(new(EvtReceivedPushLog)) + require.NoError(t, err) + + err = emitter.Emit(EvtReceivedPushLog{}) + require.NoError(t, err) + + n.cancel() + + err = n.WaitForPushLogFromPeerEvent(n.PeerID()) + require.NoError(t, err) +} diff --git a/net/pb/Makefile b/net/pb/Makefile index e96e192c5a..62eef77354 100644 --- a/net/pb/Makefile +++ b/net/pb/Makefile @@ -4,9 +4,12 @@ GO = $(PB:.proto=.pb.go) all: $(GO) %.pb.go: %.proto - protoc -I=. -I=$(GOPATH)/src -I=$(GOPATH)/src/github.com/gogo/protobuf/protobuf --gogofaster_out=\ - plugins=grpc:\ - . $< + protoc \ + --go_out=. --plugin protoc-gen-go="${GOBIN}/protoc-gen-go" \ + --go-grpc_out=. --plugin protoc-gen-go-grpc="${GOBIN}/protoc-gen-go-grpc" \ + --go-vtproto_out=. --plugin protoc-gen-go-vtproto="${GOBIN}/protoc-gen-go-vtproto" \ + --go-vtproto_opt=features=marshal+unmarshal+size \ + $< clean: rm -f *.pb.go diff --git a/net/pb/custom.go b/net/pb/custom.go deleted file mode 100644 index c71f585fd4..0000000000 --- a/net/pb/custom.go +++ /dev/null @@ -1,196 +0,0 @@ -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package net_pb - -import ( - "encoding/json" - - "github.com/gogo/protobuf/proto" - "github.com/ipfs/go-cid" - "github.com/libp2p/go-libp2p/core/peer" - ma "github.com/multiformats/go-multiaddr" - "github.com/multiformats/go-varint" - - "github.com/sourcenetwork/defradb/client" - "github.com/sourcenetwork/defradb/errors" -) - -// customGogoType aggregates the interfaces that custom Gogo types need to implement. -// it is only used for type assertions. -type customGogoType interface { - proto.Marshaler - proto.Unmarshaler - json.Marshaler - json.Unmarshaler - proto.Sizer - MarshalTo(data []byte) (n int, err error) -} - -// LibP2P object custom protobuf types - -// ProtoPeerID is a custom type used by gogo to serde raw peer IDs into the peer.ID type, and back. -type ProtoPeerID struct { - peer.ID -} - -var _ customGogoType = (*ProtoPeerID)(nil) - -func (id ProtoPeerID) Marshal() ([]byte, error) { - return []byte(id.ID), nil -} - -func (id ProtoPeerID) MarshalTo(data []byte) (n int, err error) { - return copy(data, id.ID), nil -} - -func (id ProtoPeerID) MarshalJSON() ([]byte, error) { - m, _ := id.Marshal() - return json.Marshal(m) -} - -func (id *ProtoPeerID) Unmarshal(data []byte) (err error) { - id.ID = peer.ID(string(data)) - return nil -} - -func (id *ProtoPeerID) UnmarshalJSON(data []byte) error { - var v []byte - err := json.Unmarshal(data, &v) - if err != nil { - return err - } - return id.Unmarshal(v) -} - -func (id ProtoPeerID) Size() int { - return len([]byte(id.ID)) -} - -// ProtoAddr is a custom type used by gogo to serde raw multiaddresses into -// the ma.Multiaddr type, and back. -type ProtoAddr struct { - ma.Multiaddr -} - -var _ customGogoType = (*ProtoAddr)(nil) - -func (a ProtoAddr) Marshal() ([]byte, error) { - return a.Bytes(), nil -} - -func (a ProtoAddr) MarshalTo(data []byte) (n int, err error) { - return copy(data, a.Bytes()), nil -} - -func (a ProtoAddr) MarshalJSON() ([]byte, error) { - m, _ := a.Marshal() - return json.Marshal(m) -} - -func (a *ProtoAddr) Unmarshal(data []byte) (err error) { - a.Multiaddr, err = ma.NewMultiaddrBytes(data) - return err -} - -func (a *ProtoAddr) UnmarshalJSON(data []byte) error { - v := new([]byte) - err := json.Unmarshal(data, v) - if err != nil { - return err - } - return a.Unmarshal(*v) -} - -func (a ProtoAddr) Size() int { - return len(a.Bytes()) -} - -// ProtoCid is a custom type used by gogo to serde raw CIDs into the cid.CID type, and back. -type ProtoCid struct { - cid.Cid -} - -var _ customGogoType = (*ProtoCid)(nil) - -func (c ProtoCid) Marshal() ([]byte, error) { - return c.Bytes(), nil -} - -func (c ProtoCid) MarshalTo(data []byte) (n int, err error) { - return copy(data, c.Bytes()), nil -} - -func (c ProtoCid) MarshalJSON() ([]byte, error) { - m, _ := c.Marshal() - return json.Marshal(m) -} - -func (c *ProtoCid) Unmarshal(data []byte) (err error) { - c.Cid, err = cid.Cast(data) - if errors.Is(err, varint.ErrUnderflow) { - c.Cid = cid.Undef - return nil - } - return err -} - -func (c *ProtoCid) UnmarshalJSON(data []byte) error { - v := new([]byte) - err := json.Unmarshal(data, v) - if err != nil { - return err - } - return c.Unmarshal(*v) -} - -func (c ProtoCid) Size() int { - return len(c.Bytes()) -} - -// ProtoCid is a custom type used by gogo to serde raw CIDs into the cid.CID type, and back. -type ProtoDocKey struct { - client.DocKey -} - -var _ customGogoType = (*ProtoDocKey)(nil) - -func (c ProtoDocKey) Marshal() ([]byte, error) { - return []byte(c.String()), nil -} - -func (c ProtoDocKey) MarshalTo(data []byte) (n int, err error) { - return copy(data, []byte(c.String())), nil -} - -func (c ProtoDocKey) MarshalJSON() ([]byte, error) { - m, _ := c.Marshal() - return json.Marshal(m) -} - -func (c *ProtoDocKey) Unmarshal(data []byte) (err error) { - c.DocKey, err = client.NewDocKeyFromString(string(data)) - return err -} - -func (c *ProtoDocKey) UnmarshalJSON(data []byte) error { - v := new([]byte) - err := json.Unmarshal(data, v) - if err != nil { - return err - } - return c.Unmarshal(*v) -} - -func (c ProtoDocKey) Size() int { - return len([]byte(c.String())) -} diff --git a/net/pb/net.pb.go b/net/pb/net.pb.go index 14ba3650b5..70daae73a7 100644 --- a/net/pb/net.pb.go +++ b/net/pb/net.pb.go @@ -1,2445 +1,1922 @@ -// Code generated by protoc-gen-gogo. DO NOT EDIT. +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.30.0 +// protoc v3.21.9 // source: net.proto package net_pb import ( - context "context" - fmt "fmt" - _ "github.com/gogo/protobuf/gogoproto" - proto "github.com/gogo/protobuf/proto" - grpc "google.golang.org/grpc" - codes "google.golang.org/grpc/codes" - status "google.golang.org/grpc/status" - io "io" - math "math" - math_bits "math/bits" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" ) -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) // Log represents a thread log. type Document struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + // ID of the document. - DocKey *ProtoDocKey `protobuf:"bytes,1,opt,name=docKey,proto3,customtype=ProtoDocKey" json:"docKey,omitempty"` + DocKey []byte `protobuf:"bytes,1,opt,name=docKey,proto3" json:"docKey,omitempty"` // head of the log. - Head *ProtoCid `protobuf:"bytes,4,opt,name=head,proto3,customtype=ProtoCid" json:"head,omitempty"` + Head []byte `protobuf:"bytes,4,opt,name=head,proto3" json:"head,omitempty"` } -func (m *Document) Reset() { *m = Document{} } -func (m *Document) String() string { return proto.CompactTextString(m) } -func (*Document) ProtoMessage() {} -func (*Document) Descriptor() ([]byte, []int) { - return fileDescriptor_a5b10ce944527a32, []int{0} -} -func (m *Document) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *Document) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_Document.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil +func (x *Document) Reset() { + *x = Document{} + if protoimpl.UnsafeEnabled { + mi := &file_net_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } } -func (m *Document) XXX_Merge(src proto.Message) { - xxx_messageInfo_Document.Merge(m, src) -} -func (m *Document) XXX_Size() int { - return m.Size() -} -func (m *Document) XXX_DiscardUnknown() { - xxx_messageInfo_Document.DiscardUnknown(m) -} -var xxx_messageInfo_Document proto.InternalMessageInfo - -// Record is a thread record containing link data. -type Document_Log struct { - // block is the top-level node's raw data as an ipld.Block. - Block []byte `protobuf:"bytes,1,opt,name=block,proto3" json:"block,omitempty"` +func (x *Document) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *Document_Log) Reset() { *m = Document_Log{} } -func (m *Document_Log) String() string { return proto.CompactTextString(m) } -func (*Document_Log) ProtoMessage() {} -func (*Document_Log) Descriptor() ([]byte, []int) { - return fileDescriptor_a5b10ce944527a32, []int{0, 0} -} -func (m *Document_Log) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *Document_Log) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_Document_Log.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err +func (*Document) ProtoMessage() {} + +func (x *Document) ProtoReflect() protoreflect.Message { + mi := &file_net_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) } - return b[:n], nil + return ms } + return mi.MessageOf(x) } -func (m *Document_Log) XXX_Merge(src proto.Message) { - xxx_messageInfo_Document_Log.Merge(m, src) -} -func (m *Document_Log) XXX_Size() int { - return m.Size() -} -func (m *Document_Log) XXX_DiscardUnknown() { - xxx_messageInfo_Document_Log.DiscardUnknown(m) + +// Deprecated: Use Document.ProtoReflect.Descriptor instead. +func (*Document) Descriptor() ([]byte, []int) { + return file_net_proto_rawDescGZIP(), []int{0} } -var xxx_messageInfo_Document_Log proto.InternalMessageInfo +func (x *Document) GetDocKey() []byte { + if x != nil { + return x.DocKey + } + return nil +} -func (m *Document_Log) GetBlock() []byte { - if m != nil { - return m.Block +func (x *Document) GetHead() []byte { + if x != nil { + return x.Head } return nil } type GetDocGraphRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields } -func (m *GetDocGraphRequest) Reset() { *m = GetDocGraphRequest{} } -func (m *GetDocGraphRequest) String() string { return proto.CompactTextString(m) } -func (*GetDocGraphRequest) ProtoMessage() {} -func (*GetDocGraphRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_a5b10ce944527a32, []int{1} -} -func (m *GetDocGraphRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *GetDocGraphRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_GetDocGraphRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil +func (x *GetDocGraphRequest) Reset() { + *x = GetDocGraphRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_net_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } } -func (m *GetDocGraphRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_GetDocGraphRequest.Merge(m, src) -} -func (m *GetDocGraphRequest) XXX_Size() int { - return m.Size() -} -func (m *GetDocGraphRequest) XXX_DiscardUnknown() { - xxx_messageInfo_GetDocGraphRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_GetDocGraphRequest proto.InternalMessageInfo -type GetDocGraphReply struct { +func (x *GetDocGraphRequest) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *GetDocGraphReply) Reset() { *m = GetDocGraphReply{} } -func (m *GetDocGraphReply) String() string { return proto.CompactTextString(m) } -func (*GetDocGraphReply) ProtoMessage() {} -func (*GetDocGraphReply) Descriptor() ([]byte, []int) { - return fileDescriptor_a5b10ce944527a32, []int{2} -} -func (m *GetDocGraphReply) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *GetDocGraphReply) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_GetDocGraphReply.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err +func (*GetDocGraphRequest) ProtoMessage() {} + +func (x *GetDocGraphRequest) ProtoReflect() protoreflect.Message { + mi := &file_net_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) } - return b[:n], nil + return ms } + return mi.MessageOf(x) } -func (m *GetDocGraphReply) XXX_Merge(src proto.Message) { - xxx_messageInfo_GetDocGraphReply.Merge(m, src) -} -func (m *GetDocGraphReply) XXX_Size() int { - return m.Size() + +// Deprecated: Use GetDocGraphRequest.ProtoReflect.Descriptor instead. +func (*GetDocGraphRequest) Descriptor() ([]byte, []int) { + return file_net_proto_rawDescGZIP(), []int{1} } -func (m *GetDocGraphReply) XXX_DiscardUnknown() { - xxx_messageInfo_GetDocGraphReply.DiscardUnknown(m) + +type GetDocGraphReply struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields } -var xxx_messageInfo_GetDocGraphReply proto.InternalMessageInfo +func (x *GetDocGraphReply) Reset() { + *x = GetDocGraphReply{} + if protoimpl.UnsafeEnabled { + mi := &file_net_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} -type PushDocGraphRequest struct { +func (x *GetDocGraphReply) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *PushDocGraphRequest) Reset() { *m = PushDocGraphRequest{} } -func (m *PushDocGraphRequest) String() string { return proto.CompactTextString(m) } -func (*PushDocGraphRequest) ProtoMessage() {} -func (*PushDocGraphRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_a5b10ce944527a32, []int{3} -} -func (m *PushDocGraphRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *PushDocGraphRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_PushDocGraphRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err +func (*GetDocGraphReply) ProtoMessage() {} + +func (x *GetDocGraphReply) ProtoReflect() protoreflect.Message { + mi := &file_net_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) } - return b[:n], nil + return ms } + return mi.MessageOf(x) } -func (m *PushDocGraphRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_PushDocGraphRequest.Merge(m, src) -} -func (m *PushDocGraphRequest) XXX_Size() int { - return m.Size() + +// Deprecated: Use GetDocGraphReply.ProtoReflect.Descriptor instead. +func (*GetDocGraphReply) Descriptor() ([]byte, []int) { + return file_net_proto_rawDescGZIP(), []int{2} } -func (m *PushDocGraphRequest) XXX_DiscardUnknown() { - xxx_messageInfo_PushDocGraphRequest.DiscardUnknown(m) + +type PushDocGraphRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields } -var xxx_messageInfo_PushDocGraphRequest proto.InternalMessageInfo +func (x *PushDocGraphRequest) Reset() { + *x = PushDocGraphRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_net_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} -type PushDocGraphReply struct { +func (x *PushDocGraphRequest) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *PushDocGraphReply) Reset() { *m = PushDocGraphReply{} } -func (m *PushDocGraphReply) String() string { return proto.CompactTextString(m) } -func (*PushDocGraphReply) ProtoMessage() {} -func (*PushDocGraphReply) Descriptor() ([]byte, []int) { - return fileDescriptor_a5b10ce944527a32, []int{4} -} -func (m *PushDocGraphReply) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *PushDocGraphReply) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_PushDocGraphReply.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err +func (*PushDocGraphRequest) ProtoMessage() {} + +func (x *PushDocGraphRequest) ProtoReflect() protoreflect.Message { + mi := &file_net_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) } - return b[:n], nil + return ms } + return mi.MessageOf(x) } -func (m *PushDocGraphReply) XXX_Merge(src proto.Message) { - xxx_messageInfo_PushDocGraphReply.Merge(m, src) -} -func (m *PushDocGraphReply) XXX_Size() int { - return m.Size() + +// Deprecated: Use PushDocGraphRequest.ProtoReflect.Descriptor instead. +func (*PushDocGraphRequest) Descriptor() ([]byte, []int) { + return file_net_proto_rawDescGZIP(), []int{3} } -func (m *PushDocGraphReply) XXX_DiscardUnknown() { - xxx_messageInfo_PushDocGraphReply.DiscardUnknown(m) + +type PushDocGraphReply struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields } -var xxx_messageInfo_PushDocGraphReply proto.InternalMessageInfo +func (x *PushDocGraphReply) Reset() { + *x = PushDocGraphReply{} + if protoimpl.UnsafeEnabled { + mi := &file_net_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} -type GetLogRequest struct { +func (x *PushDocGraphReply) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *GetLogRequest) Reset() { *m = GetLogRequest{} } -func (m *GetLogRequest) String() string { return proto.CompactTextString(m) } -func (*GetLogRequest) ProtoMessage() {} -func (*GetLogRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_a5b10ce944527a32, []int{5} -} -func (m *GetLogRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *GetLogRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_GetLogRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err +func (*PushDocGraphReply) ProtoMessage() {} + +func (x *PushDocGraphReply) ProtoReflect() protoreflect.Message { + mi := &file_net_proto_msgTypes[4] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) } - return b[:n], nil + return ms } + return mi.MessageOf(x) } -func (m *GetLogRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_GetLogRequest.Merge(m, src) -} -func (m *GetLogRequest) XXX_Size() int { - return m.Size() + +// Deprecated: Use PushDocGraphReply.ProtoReflect.Descriptor instead. +func (*PushDocGraphReply) Descriptor() ([]byte, []int) { + return file_net_proto_rawDescGZIP(), []int{4} } -func (m *GetLogRequest) XXX_DiscardUnknown() { - xxx_messageInfo_GetLogRequest.DiscardUnknown(m) + +type GetLogRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields } -var xxx_messageInfo_GetLogRequest proto.InternalMessageInfo +func (x *GetLogRequest) Reset() { + *x = GetLogRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_net_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} -type GetLogReply struct { +func (x *GetLogRequest) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *GetLogReply) Reset() { *m = GetLogReply{} } -func (m *GetLogReply) String() string { return proto.CompactTextString(m) } -func (*GetLogReply) ProtoMessage() {} -func (*GetLogReply) Descriptor() ([]byte, []int) { - return fileDescriptor_a5b10ce944527a32, []int{6} -} -func (m *GetLogReply) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *GetLogReply) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_GetLogReply.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err +func (*GetLogRequest) ProtoMessage() {} + +func (x *GetLogRequest) ProtoReflect() protoreflect.Message { + mi := &file_net_proto_msgTypes[5] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) } - return b[:n], nil + return ms } + return mi.MessageOf(x) } -func (m *GetLogReply) XXX_Merge(src proto.Message) { - xxx_messageInfo_GetLogReply.Merge(m, src) -} -func (m *GetLogReply) XXX_Size() int { - return m.Size() + +// Deprecated: Use GetLogRequest.ProtoReflect.Descriptor instead. +func (*GetLogRequest) Descriptor() ([]byte, []int) { + return file_net_proto_rawDescGZIP(), []int{5} } -func (m *GetLogReply) XXX_DiscardUnknown() { - xxx_messageInfo_GetLogReply.DiscardUnknown(m) + +type GetLogReply struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields } -var xxx_messageInfo_GetLogReply proto.InternalMessageInfo +func (x *GetLogReply) Reset() { + *x = GetLogReply{} + if protoimpl.UnsafeEnabled { + mi := &file_net_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} -type PushLogRequest struct { - Body *PushLogRequest_Body `protobuf:"bytes,1,opt,name=body,proto3" json:"body,omitempty"` +func (x *GetLogReply) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *PushLogRequest) Reset() { *m = PushLogRequest{} } -func (m *PushLogRequest) String() string { return proto.CompactTextString(m) } -func (*PushLogRequest) ProtoMessage() {} -func (*PushLogRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_a5b10ce944527a32, []int{7} -} -func (m *PushLogRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *PushLogRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_PushLogRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err +func (*GetLogReply) ProtoMessage() {} + +func (x *GetLogReply) ProtoReflect() protoreflect.Message { + mi := &file_net_proto_msgTypes[6] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) } - return b[:n], nil + return ms } + return mi.MessageOf(x) } -func (m *PushLogRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_PushLogRequest.Merge(m, src) -} -func (m *PushLogRequest) XXX_Size() int { - return m.Size() -} -func (m *PushLogRequest) XXX_DiscardUnknown() { - xxx_messageInfo_PushLogRequest.DiscardUnknown(m) + +// Deprecated: Use GetLogReply.ProtoReflect.Descriptor instead. +func (*GetLogReply) Descriptor() ([]byte, []int) { + return file_net_proto_rawDescGZIP(), []int{6} } -var xxx_messageInfo_PushLogRequest proto.InternalMessageInfo +type PushLogRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Body *PushLogRequest_Body `protobuf:"bytes,1,opt,name=body,proto3" json:"body,omitempty"` +} -func (m *PushLogRequest) GetBody() *PushLogRequest_Body { - if m != nil { - return m.Body +func (x *PushLogRequest) Reset() { + *x = PushLogRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_net_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } - return nil } -type PushLogRequest_Body struct { - // docKey is the target DocKey. - DocKey *ProtoDocKey `protobuf:"bytes,1,opt,name=docKey,proto3,customtype=ProtoDocKey" json:"docKey,omitempty"` - // cid is the target CID. - Cid *ProtoCid `protobuf:"bytes,2,opt,name=cid,proto3,customtype=ProtoCid" json:"cid,omitempty"` - // - SchemaID []byte `protobuf:"bytes,3,opt,name=schemaID,proto3" json:"schemaID,omitempty"` - // from is the peer ID of the peer that created the log - Creator string `protobuf:"bytes,4,opt,name=creator,proto3" json:"creator,omitempty"` - // record is the actual record payload. - Log *Document_Log `protobuf:"bytes,5,opt,name=log,proto3" json:"log,omitempty"` +func (x *PushLogRequest) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *PushLogRequest_Body) Reset() { *m = PushLogRequest_Body{} } -func (m *PushLogRequest_Body) String() string { return proto.CompactTextString(m) } -func (*PushLogRequest_Body) ProtoMessage() {} -func (*PushLogRequest_Body) Descriptor() ([]byte, []int) { - return fileDescriptor_a5b10ce944527a32, []int{7, 0} -} -func (m *PushLogRequest_Body) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *PushLogRequest_Body) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_PushLogRequest_Body.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err +func (*PushLogRequest) ProtoMessage() {} + +func (x *PushLogRequest) ProtoReflect() protoreflect.Message { + mi := &file_net_proto_msgTypes[7] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) } - return b[:n], nil + return ms } -} -func (m *PushLogRequest_Body) XXX_Merge(src proto.Message) { - xxx_messageInfo_PushLogRequest_Body.Merge(m, src) -} -func (m *PushLogRequest_Body) XXX_Size() int { - return m.Size() -} -func (m *PushLogRequest_Body) XXX_DiscardUnknown() { - xxx_messageInfo_PushLogRequest_Body.DiscardUnknown(m) + return mi.MessageOf(x) } -var xxx_messageInfo_PushLogRequest_Body proto.InternalMessageInfo +// Deprecated: Use PushLogRequest.ProtoReflect.Descriptor instead. +func (*PushLogRequest) Descriptor() ([]byte, []int) { + return file_net_proto_rawDescGZIP(), []int{7} +} -func (m *PushLogRequest_Body) GetSchemaID() []byte { - if m != nil { - return m.SchemaID +func (x *PushLogRequest) GetBody() *PushLogRequest_Body { + if x != nil { + return x.Body } return nil } -func (m *PushLogRequest_Body) GetCreator() string { - if m != nil { - return m.Creator - } - return "" +type GetHeadLogRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields } -func (m *PushLogRequest_Body) GetLog() *Document_Log { - if m != nil { - return m.Log +func (x *GetHeadLogRequest) Reset() { + *x = GetHeadLogRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_net_proto_msgTypes[8] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } - return nil } -type GetHeadLogRequest struct { +func (x *GetHeadLogRequest) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *GetHeadLogRequest) Reset() { *m = GetHeadLogRequest{} } -func (m *GetHeadLogRequest) String() string { return proto.CompactTextString(m) } -func (*GetHeadLogRequest) ProtoMessage() {} -func (*GetHeadLogRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_a5b10ce944527a32, []int{8} -} -func (m *GetHeadLogRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *GetHeadLogRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_GetHeadLogRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err +func (*GetHeadLogRequest) ProtoMessage() {} + +func (x *GetHeadLogRequest) ProtoReflect() protoreflect.Message { + mi := &file_net_proto_msgTypes[8] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) } - return b[:n], nil + return ms } + return mi.MessageOf(x) } -func (m *GetHeadLogRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_GetHeadLogRequest.Merge(m, src) -} -func (m *GetHeadLogRequest) XXX_Size() int { - return m.Size() + +// Deprecated: Use GetHeadLogRequest.ProtoReflect.Descriptor instead. +func (*GetHeadLogRequest) Descriptor() ([]byte, []int) { + return file_net_proto_rawDescGZIP(), []int{8} } -func (m *GetHeadLogRequest) XXX_DiscardUnknown() { - xxx_messageInfo_GetHeadLogRequest.DiscardUnknown(m) + +type PushLogReply struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields } -var xxx_messageInfo_GetHeadLogRequest proto.InternalMessageInfo +func (x *PushLogReply) Reset() { + *x = PushLogReply{} + if protoimpl.UnsafeEnabled { + mi := &file_net_proto_msgTypes[9] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} -type PushLogReply struct { +func (x *PushLogReply) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *PushLogReply) Reset() { *m = PushLogReply{} } -func (m *PushLogReply) String() string { return proto.CompactTextString(m) } -func (*PushLogReply) ProtoMessage() {} -func (*PushLogReply) Descriptor() ([]byte, []int) { - return fileDescriptor_a5b10ce944527a32, []int{9} -} -func (m *PushLogReply) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *PushLogReply) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_PushLogReply.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err +func (*PushLogReply) ProtoMessage() {} + +func (x *PushLogReply) ProtoReflect() protoreflect.Message { + mi := &file_net_proto_msgTypes[9] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) } - return b[:n], nil + return ms } + return mi.MessageOf(x) +} + +// Deprecated: Use PushLogReply.ProtoReflect.Descriptor instead. +func (*PushLogReply) Descriptor() ([]byte, []int) { + return file_net_proto_rawDescGZIP(), []int{9} } -func (m *PushLogReply) XXX_Merge(src proto.Message) { - xxx_messageInfo_PushLogReply.Merge(m, src) + +type GetHeadLogReply struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields } -func (m *PushLogReply) XXX_Size() int { - return m.Size() + +func (x *GetHeadLogReply) Reset() { + *x = GetHeadLogReply{} + if protoimpl.UnsafeEnabled { + mi := &file_net_proto_msgTypes[10] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } -func (m *PushLogReply) XXX_DiscardUnknown() { - xxx_messageInfo_PushLogReply.DiscardUnknown(m) + +func (x *GetHeadLogReply) String() string { + return protoimpl.X.MessageStringOf(x) } -var xxx_messageInfo_PushLogReply proto.InternalMessageInfo +func (*GetHeadLogReply) ProtoMessage() {} -type GetHeadLogReply struct { +func (x *GetHeadLogReply) ProtoReflect() protoreflect.Message { + mi := &file_net_proto_msgTypes[10] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -func (m *GetHeadLogReply) Reset() { *m = GetHeadLogReply{} } -func (m *GetHeadLogReply) String() string { return proto.CompactTextString(m) } -func (*GetHeadLogReply) ProtoMessage() {} +// Deprecated: Use GetHeadLogReply.ProtoReflect.Descriptor instead. func (*GetHeadLogReply) Descriptor() ([]byte, []int) { - return fileDescriptor_a5b10ce944527a32, []int{10} -} -func (m *GetHeadLogReply) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *GetHeadLogReply) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_GetHeadLogReply.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *GetHeadLogReply) XXX_Merge(src proto.Message) { - xxx_messageInfo_GetHeadLogReply.Merge(m, src) -} -func (m *GetHeadLogReply) XXX_Size() int { - return m.Size() -} -func (m *GetHeadLogReply) XXX_DiscardUnknown() { - xxx_messageInfo_GetHeadLogReply.DiscardUnknown(m) -} - -var xxx_messageInfo_GetHeadLogReply proto.InternalMessageInfo - -func init() { - proto.RegisterType((*Document)(nil), "net.pb.Document") - proto.RegisterType((*Document_Log)(nil), "net.pb.Document.Log") - proto.RegisterType((*GetDocGraphRequest)(nil), "net.pb.GetDocGraphRequest") - proto.RegisterType((*GetDocGraphReply)(nil), "net.pb.GetDocGraphReply") - proto.RegisterType((*PushDocGraphRequest)(nil), "net.pb.PushDocGraphRequest") - proto.RegisterType((*PushDocGraphReply)(nil), "net.pb.PushDocGraphReply") - proto.RegisterType((*GetLogRequest)(nil), "net.pb.GetLogRequest") - proto.RegisterType((*GetLogReply)(nil), "net.pb.GetLogReply") - proto.RegisterType((*PushLogRequest)(nil), "net.pb.PushLogRequest") - proto.RegisterType((*PushLogRequest_Body)(nil), "net.pb.PushLogRequest.Body") - proto.RegisterType((*GetHeadLogRequest)(nil), "net.pb.GetHeadLogRequest") - proto.RegisterType((*PushLogReply)(nil), "net.pb.PushLogReply") - proto.RegisterType((*GetHeadLogReply)(nil), "net.pb.GetHeadLogReply") -} - -func init() { proto.RegisterFile("net.proto", fileDescriptor_a5b10ce944527a32) } - -var fileDescriptor_a5b10ce944527a32 = []byte{ - // 480 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x93, 0xc1, 0x6e, 0xda, 0x40, - 0x10, 0x86, 0x71, 0x20, 0x40, 0x06, 0x12, 0x9a, 0x81, 0xb4, 0xce, 0x46, 0x72, 0x22, 0x0e, 0x6d, - 0x2f, 0x35, 0x52, 0x2a, 0x55, 0xea, 0x95, 0x52, 0x91, 0xaa, 0x39, 0x44, 0xee, 0x13, 0xd8, 0xeb, - 0xad, 0x8d, 0x0a, 0x59, 0x6a, 0xd6, 0x95, 0x78, 0x8b, 0xbe, 0x48, 0xdf, 0xa3, 0xc7, 0xf4, 0x56, - 0xe5, 0x10, 0x55, 0xf0, 0x04, 0x7d, 0x83, 0x6a, 0x67, 0x63, 0xc0, 0xe0, 0x43, 0x6f, 0x9e, 0xf9, - 0xff, 0x99, 0xd9, 0xf9, 0x46, 0x86, 0x83, 0x5b, 0xa1, 0xdc, 0x69, 0x22, 0x95, 0xc4, 0x2a, 0x7d, - 0x06, 0xec, 0x55, 0x34, 0x52, 0x71, 0x1a, 0xb8, 0x5c, 0x4e, 0x7a, 0x91, 0x8c, 0x64, 0x8f, 0xe4, - 0x20, 0xfd, 0x4c, 0x11, 0x05, 0xf4, 0x65, 0xca, 0xba, 0x09, 0xd4, 0x07, 0x92, 0xa7, 0x13, 0x71, - 0xab, 0xf0, 0x05, 0x54, 0x43, 0xc9, 0x3f, 0x8a, 0xb9, 0x6d, 0x5d, 0x58, 0x2f, 0x9b, 0xfd, 0xd6, - 0xfd, 0xc3, 0x79, 0xe3, 0x46, 0xdb, 0x06, 0x94, 0xf6, 0x1e, 0x65, 0xbc, 0x80, 0x4a, 0x2c, 0xfc, - 0xd0, 0xae, 0x90, 0xad, 0x79, 0xff, 0x70, 0x5e, 0x27, 0xdb, 0xbb, 0x51, 0xe8, 0x91, 0xc2, 0xce, - 0xa0, 0x7c, 0x2d, 0x23, 0xec, 0xc0, 0x7e, 0x30, 0x96, 0xfc, 0x8b, 0x69, 0xe8, 0x99, 0xa0, 0xdb, - 0x01, 0x1c, 0x0a, 0x35, 0x90, 0x7c, 0x98, 0xf8, 0xd3, 0xd8, 0x13, 0x5f, 0x53, 0x31, 0x53, 0x5d, - 0x84, 0x27, 0xb9, 0xec, 0x74, 0x3c, 0xef, 0x9e, 0x40, 0xfb, 0x26, 0x9d, 0xc5, 0xdb, 0xd6, 0x36, - 0x1c, 0xe7, 0xd3, 0xda, 0xdb, 0x82, 0xc3, 0xa1, 0x50, 0xd7, 0x32, 0xca, 0x5c, 0x87, 0xd0, 0xc8, - 0x12, 0x5a, 0xff, 0x6b, 0xc1, 0x91, 0xae, 0x5a, 0x3b, 0xb0, 0x07, 0x95, 0x40, 0x86, 0x66, 0xdd, - 0xc6, 0xe5, 0x99, 0x6b, 0x10, 0xba, 0x79, 0x97, 0xdb, 0x97, 0xe1, 0xdc, 0x23, 0x23, 0xfb, 0x61, - 0x41, 0x45, 0x87, 0xff, 0x8f, 0xca, 0x81, 0x32, 0x1f, 0x85, 0xf6, 0x5e, 0x01, 0x29, 0x2d, 0x20, - 0x83, 0xfa, 0x8c, 0xc7, 0x62, 0xe2, 0x7f, 0x18, 0xd8, 0x65, 0x82, 0xb4, 0x8a, 0xd1, 0x86, 0x1a, - 0x4f, 0x84, 0xaf, 0x64, 0x42, 0xa4, 0x0f, 0xbc, 0x2c, 0xc4, 0xe7, 0x50, 0x1e, 0xcb, 0xc8, 0xde, - 0xa7, 0x77, 0x77, 0xb2, 0x77, 0x67, 0x87, 0x74, 0xf5, 0xe3, 0xb5, 0x41, 0x83, 0x1a, 0x0a, 0x75, - 0x25, 0xfc, 0x70, 0x83, 0xcb, 0x11, 0x34, 0x57, 0x1b, 0x6a, 0x30, 0xc7, 0xd0, 0xda, 0x34, 0x4d, - 0xc7, 0xf3, 0xcb, 0x5f, 0x7b, 0x50, 0xfb, 0x24, 0x92, 0x6f, 0x23, 0x2e, 0xf0, 0x3d, 0x61, 0xcc, - 0x58, 0x23, 0xcb, 0xa6, 0xed, 0x9e, 0x90, 0xd9, 0x85, 0x9a, 0x9e, 0x51, 0xc2, 0x2b, 0x33, 0x75, - 0xd5, 0x27, 0x47, 0x7b, 0xbb, 0xd1, 0x69, 0xb1, 0x68, 0x3a, 0xbd, 0x81, 0xaa, 0xb9, 0x2b, 0x9e, - 0x6c, 0xcc, 0x5b, 0x2f, 0xc8, 0xda, 0xdb, 0x69, 0x53, 0xf7, 0x16, 0x6a, 0x8f, 0x7b, 0xe3, 0xd3, - 0xe2, 0x53, 0xb3, 0xce, 0x4e, 0xde, 0x94, 0xf6, 0x01, 0xd6, 0x88, 0xf0, 0x74, 0xa3, 0x7f, 0x9e, - 0x2d, 0x7b, 0x56, 0x24, 0x51, 0x8f, 0xbe, 0xfd, 0x73, 0xe1, 0x58, 0x77, 0x0b, 0xc7, 0xfa, 0xb3, - 0x70, 0xac, 0xef, 0x4b, 0xa7, 0x74, 0xb7, 0x74, 0x4a, 0xbf, 0x97, 0x4e, 0x29, 0xa8, 0xd2, 0xaf, - 0xf8, 0xfa, 0x5f, 0x00, 0x00, 0x00, 0xff, 0xff, 0x4b, 0x46, 0x27, 0x1c, 0xce, 0x03, 0x00, 0x00, -} - -// Reference imports to suppress errors if they are not otherwise used. -var _ context.Context -var _ grpc.ClientConn + return file_net_proto_rawDescGZIP(), []int{10} +} -// This is a compile-time assertion to ensure that this generated file -// is compatible with the grpc package it is being compiled against. -const _ = grpc.SupportPackageIsVersion4 +type SetReplicatorRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields -// ServiceClient is the client API for Service service. -// -// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. -type ServiceClient interface { - // GetDocGraph from this peer. - GetDocGraph(ctx context.Context, in *GetDocGraphRequest, opts ...grpc.CallOption) (*GetDocGraphReply, error) - // PushDocGraph to this peer. - PushDocGraph(ctx context.Context, in *PushDocGraphRequest, opts ...grpc.CallOption) (*PushDocGraphReply, error) - // GetLog from this peer. - GetLog(ctx context.Context, in *GetLogRequest, opts ...grpc.CallOption) (*GetLogReply, error) - // PushLog to this peer. - PushLog(ctx context.Context, in *PushLogRequest, opts ...grpc.CallOption) (*PushLogReply, error) - // GetHeadLog from this peer - GetHeadLog(ctx context.Context, in *GetHeadLogRequest, opts ...grpc.CallOption) (*GetHeadLogReply, error) + Collections []string `protobuf:"bytes,1,rep,name=collections,proto3" json:"collections,omitempty"` + Addr []byte `protobuf:"bytes,2,opt,name=addr,proto3" json:"addr,omitempty"` } -type serviceClient struct { - cc *grpc.ClientConn +func (x *SetReplicatorRequest) Reset() { + *x = SetReplicatorRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_net_proto_msgTypes[11] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } -func NewServiceClient(cc *grpc.ClientConn) ServiceClient { - return &serviceClient{cc} +func (x *SetReplicatorRequest) String() string { + return protoimpl.X.MessageStringOf(x) } -func (c *serviceClient) GetDocGraph(ctx context.Context, in *GetDocGraphRequest, opts ...grpc.CallOption) (*GetDocGraphReply, error) { - out := new(GetDocGraphReply) - err := c.cc.Invoke(ctx, "/net.pb.Service/GetDocGraph", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} +func (*SetReplicatorRequest) ProtoMessage() {} -func (c *serviceClient) PushDocGraph(ctx context.Context, in *PushDocGraphRequest, opts ...grpc.CallOption) (*PushDocGraphReply, error) { - out := new(PushDocGraphReply) - err := c.cc.Invoke(ctx, "/net.pb.Service/PushDocGraph", in, out, opts...) - if err != nil { - return nil, err +func (x *SetReplicatorRequest) ProtoReflect() protoreflect.Message { + mi := &file_net_proto_msgTypes[11] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms } - return out, nil + return mi.MessageOf(x) } -func (c *serviceClient) GetLog(ctx context.Context, in *GetLogRequest, opts ...grpc.CallOption) (*GetLogReply, error) { - out := new(GetLogReply) - err := c.cc.Invoke(ctx, "/net.pb.Service/GetLog", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil +// Deprecated: Use SetReplicatorRequest.ProtoReflect.Descriptor instead. +func (*SetReplicatorRequest) Descriptor() ([]byte, []int) { + return file_net_proto_rawDescGZIP(), []int{11} } -func (c *serviceClient) PushLog(ctx context.Context, in *PushLogRequest, opts ...grpc.CallOption) (*PushLogReply, error) { - out := new(PushLogReply) - err := c.cc.Invoke(ctx, "/net.pb.Service/PushLog", in, out, opts...) - if err != nil { - return nil, err +func (x *SetReplicatorRequest) GetCollections() []string { + if x != nil { + return x.Collections } - return out, nil + return nil } -func (c *serviceClient) GetHeadLog(ctx context.Context, in *GetHeadLogRequest, opts ...grpc.CallOption) (*GetHeadLogReply, error) { - out := new(GetHeadLogReply) - err := c.cc.Invoke(ctx, "/net.pb.Service/GetHeadLog", in, out, opts...) - if err != nil { - return nil, err +func (x *SetReplicatorRequest) GetAddr() []byte { + if x != nil { + return x.Addr } - return out, nil + return nil } -// ServiceServer is the server API for Service service. -type ServiceServer interface { - // GetDocGraph from this peer. - GetDocGraph(context.Context, *GetDocGraphRequest) (*GetDocGraphReply, error) - // PushDocGraph to this peer. - PushDocGraph(context.Context, *PushDocGraphRequest) (*PushDocGraphReply, error) - // GetLog from this peer. - GetLog(context.Context, *GetLogRequest) (*GetLogReply, error) - // PushLog to this peer. - PushLog(context.Context, *PushLogRequest) (*PushLogReply, error) - // GetHeadLog from this peer - GetHeadLog(context.Context, *GetHeadLogRequest) (*GetHeadLogReply, error) -} +type SetReplicatorReply struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields -// UnimplementedServiceServer can be embedded to have forward compatible implementations. -type UnimplementedServiceServer struct { + PeerID []byte `protobuf:"bytes,1,opt,name=peerID,proto3" json:"peerID,omitempty"` } -func (*UnimplementedServiceServer) GetDocGraph(ctx context.Context, req *GetDocGraphRequest) (*GetDocGraphReply, error) { - return nil, status.Errorf(codes.Unimplemented, "method GetDocGraph not implemented") +func (x *SetReplicatorReply) Reset() { + *x = SetReplicatorReply{} + if protoimpl.UnsafeEnabled { + mi := &file_net_proto_msgTypes[12] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } -func (*UnimplementedServiceServer) PushDocGraph(ctx context.Context, req *PushDocGraphRequest) (*PushDocGraphReply, error) { - return nil, status.Errorf(codes.Unimplemented, "method PushDocGraph not implemented") + +func (x *SetReplicatorReply) String() string { + return protoimpl.X.MessageStringOf(x) } -func (*UnimplementedServiceServer) GetLog(ctx context.Context, req *GetLogRequest) (*GetLogReply, error) { - return nil, status.Errorf(codes.Unimplemented, "method GetLog not implemented") + +func (*SetReplicatorReply) ProtoMessage() {} + +func (x *SetReplicatorReply) ProtoReflect() protoreflect.Message { + mi := &file_net_proto_msgTypes[12] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -func (*UnimplementedServiceServer) PushLog(ctx context.Context, req *PushLogRequest) (*PushLogReply, error) { - return nil, status.Errorf(codes.Unimplemented, "method PushLog not implemented") + +// Deprecated: Use SetReplicatorReply.ProtoReflect.Descriptor instead. +func (*SetReplicatorReply) Descriptor() ([]byte, []int) { + return file_net_proto_rawDescGZIP(), []int{12} } -func (*UnimplementedServiceServer) GetHeadLog(ctx context.Context, req *GetHeadLogRequest) (*GetHeadLogReply, error) { - return nil, status.Errorf(codes.Unimplemented, "method GetHeadLog not implemented") + +func (x *SetReplicatorReply) GetPeerID() []byte { + if x != nil { + return x.PeerID + } + return nil } -func RegisterServiceServer(s *grpc.Server, srv ServiceServer) { - s.RegisterService(&_Service_serviceDesc, srv) +type DeleteReplicatorRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + PeerID []byte `protobuf:"bytes,1,opt,name=peerID,proto3" json:"peerID,omitempty"` + Collections []string `protobuf:"bytes,2,rep,name=collections,proto3" json:"collections,omitempty"` } -func _Service_GetDocGraph_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(GetDocGraphRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(ServiceServer).GetDocGraph(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/net.pb.Service/GetDocGraph", +func (x *DeleteReplicatorRequest) Reset() { + *x = DeleteReplicatorRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_net_proto_msgTypes[13] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(ServiceServer).GetDocGraph(ctx, req.(*GetDocGraphRequest)) - } - return interceptor(ctx, in, info, handler) } -func _Service_PushDocGraph_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(PushDocGraphRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(ServiceServer).PushDocGraph(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/net.pb.Service/PushDocGraph", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(ServiceServer).PushDocGraph(ctx, req.(*PushDocGraphRequest)) - } - return interceptor(ctx, in, info, handler) +func (x *DeleteReplicatorRequest) String() string { + return protoimpl.X.MessageStringOf(x) } -func _Service_GetLog_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(GetLogRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(ServiceServer).GetLog(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/net.pb.Service/GetLog", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(ServiceServer).GetLog(ctx, req.(*GetLogRequest)) +func (*DeleteReplicatorRequest) ProtoMessage() {} + +func (x *DeleteReplicatorRequest) ProtoReflect() protoreflect.Message { + mi := &file_net_proto_msgTypes[13] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms } - return interceptor(ctx, in, info, handler) + return mi.MessageOf(x) } -func _Service_PushLog_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(PushLogRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(ServiceServer).PushLog(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/net.pb.Service/PushLog", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(ServiceServer).PushLog(ctx, req.(*PushLogRequest)) - } - return interceptor(ctx, in, info, handler) +// Deprecated: Use DeleteReplicatorRequest.ProtoReflect.Descriptor instead. +func (*DeleteReplicatorRequest) Descriptor() ([]byte, []int) { + return file_net_proto_rawDescGZIP(), []int{13} } -func _Service_GetHeadLog_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(GetHeadLogRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(ServiceServer).GetHeadLog(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/net.pb.Service/GetHeadLog", +func (x *DeleteReplicatorRequest) GetPeerID() []byte { + if x != nil { + return x.PeerID } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(ServiceServer).GetHeadLog(ctx, req.(*GetHeadLogRequest)) + return nil +} + +func (x *DeleteReplicatorRequest) GetCollections() []string { + if x != nil { + return x.Collections } - return interceptor(ctx, in, info, handler) + return nil } -var _Service_serviceDesc = grpc.ServiceDesc{ - ServiceName: "net.pb.Service", - HandlerType: (*ServiceServer)(nil), - Methods: []grpc.MethodDesc{ - { - MethodName: "GetDocGraph", - Handler: _Service_GetDocGraph_Handler, - }, - { - MethodName: "PushDocGraph", - Handler: _Service_PushDocGraph_Handler, - }, - { - MethodName: "GetLog", - Handler: _Service_GetLog_Handler, - }, - { - MethodName: "PushLog", - Handler: _Service_PushLog_Handler, - }, - { - MethodName: "GetHeadLog", - Handler: _Service_GetHeadLog_Handler, - }, - }, - Streams: []grpc.StreamDesc{}, - Metadata: "net.proto", +type DeleteReplicatorReply struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + PeerID []byte `protobuf:"bytes,1,opt,name=peerID,proto3" json:"peerID,omitempty"` } -func (m *Document) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err +func (x *DeleteReplicatorReply) Reset() { + *x = DeleteReplicatorReply{} + if protoimpl.UnsafeEnabled { + mi := &file_net_proto_msgTypes[14] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } - return dAtA[:n], nil } -func (m *Document) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) +func (x *DeleteReplicatorReply) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *Document) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.Head != nil { - { - size := m.Head.Size() - i -= size - if _, err := m.Head.MarshalTo(dAtA[i:]); err != nil { - return 0, err - } - i = encodeVarintNet(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x22 - } - if m.DocKey != nil { - { - size := m.DocKey.Size() - i -= size - if _, err := m.DocKey.MarshalTo(dAtA[i:]); err != nil { - return 0, err - } - i = encodeVarintNet(dAtA, i, uint64(size)) +func (*DeleteReplicatorReply) ProtoMessage() {} + +func (x *DeleteReplicatorReply) ProtoReflect() protoreflect.Message { + mi := &file_net_proto_msgTypes[14] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) } - i-- - dAtA[i] = 0xa + return ms } - return len(dAtA) - i, nil + return mi.MessageOf(x) } -func (m *Document_Log) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil +// Deprecated: Use DeleteReplicatorReply.ProtoReflect.Descriptor instead. +func (*DeleteReplicatorReply) Descriptor() ([]byte, []int) { + return file_net_proto_rawDescGZIP(), []int{14} } -func (m *Document_Log) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) +func (x *DeleteReplicatorReply) GetPeerID() []byte { + if x != nil { + return x.PeerID + } + return nil } -func (m *Document_Log) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Block) > 0 { - i -= len(m.Block) - copy(dAtA[i:], m.Block) - i = encodeVarintNet(dAtA, i, uint64(len(m.Block))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil +type GetAllReplicatorRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields } -func (m *GetDocGraphRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err +func (x *GetAllReplicatorRequest) Reset() { + *x = GetAllReplicatorRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_net_proto_msgTypes[15] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } - return dAtA[:n], nil } -func (m *GetDocGraphRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) +func (x *GetAllReplicatorRequest) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *GetDocGraphRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - return len(dAtA) - i, nil -} +func (*GetAllReplicatorRequest) ProtoMessage() {} -func (m *GetDocGraphReply) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err +func (x *GetAllReplicatorRequest) ProtoReflect() protoreflect.Message { + mi := &file_net_proto_msgTypes[15] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms } - return dAtA[:n], nil + return mi.MessageOf(x) } -func (m *GetDocGraphReply) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) +// Deprecated: Use GetAllReplicatorRequest.ProtoReflect.Descriptor instead. +func (*GetAllReplicatorRequest) Descriptor() ([]byte, []int) { + return file_net_proto_rawDescGZIP(), []int{15} } -func (m *GetDocGraphReply) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - return len(dAtA) - i, nil +type GetAllReplicatorReply struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Replicators []*GetAllReplicatorReply_Replicators `protobuf:"bytes,1,rep,name=replicators,proto3" json:"replicators,omitempty"` } -func (m *PushDocGraphRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err +func (x *GetAllReplicatorReply) Reset() { + *x = GetAllReplicatorReply{} + if protoimpl.UnsafeEnabled { + mi := &file_net_proto_msgTypes[16] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } - return dAtA[:n], nil } -func (m *PushDocGraphRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) +func (x *GetAllReplicatorReply) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *PushDocGraphRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - return len(dAtA) - i, nil -} +func (*GetAllReplicatorReply) ProtoMessage() {} -func (m *PushDocGraphReply) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err +func (x *GetAllReplicatorReply) ProtoReflect() protoreflect.Message { + mi := &file_net_proto_msgTypes[16] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms } - return dAtA[:n], nil + return mi.MessageOf(x) +} + +// Deprecated: Use GetAllReplicatorReply.ProtoReflect.Descriptor instead. +func (*GetAllReplicatorReply) Descriptor() ([]byte, []int) { + return file_net_proto_rawDescGZIP(), []int{16} } -func (m *PushDocGraphReply) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) +func (x *GetAllReplicatorReply) GetReplicators() []*GetAllReplicatorReply_Replicators { + if x != nil { + return x.Replicators + } + return nil } -func (m *PushDocGraphReply) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - return len(dAtA) - i, nil +type AddP2PCollectionsRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Collections []string `protobuf:"bytes,1,rep,name=collections,proto3" json:"collections,omitempty"` } -func (m *GetLogRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err +func (x *AddP2PCollectionsRequest) Reset() { + *x = AddP2PCollectionsRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_net_proto_msgTypes[17] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } - return dAtA[:n], nil } -func (m *GetLogRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) +func (x *AddP2PCollectionsRequest) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *GetLogRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - return len(dAtA) - i, nil -} +func (*AddP2PCollectionsRequest) ProtoMessage() {} -func (m *GetLogReply) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err +func (x *AddP2PCollectionsRequest) ProtoReflect() protoreflect.Message { + mi := &file_net_proto_msgTypes[17] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms } - return dAtA[:n], nil + return mi.MessageOf(x) +} + +// Deprecated: Use AddP2PCollectionsRequest.ProtoReflect.Descriptor instead. +func (*AddP2PCollectionsRequest) Descriptor() ([]byte, []int) { + return file_net_proto_rawDescGZIP(), []int{17} } -func (m *GetLogReply) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) +func (x *AddP2PCollectionsRequest) GetCollections() []string { + if x != nil { + return x.Collections + } + return nil } -func (m *GetLogReply) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - return len(dAtA) - i, nil +type AddP2PCollectionsReply struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Err string `protobuf:"bytes,1,opt,name=err,proto3" json:"err,omitempty"` } -func (m *PushLogRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err +func (x *AddP2PCollectionsReply) Reset() { + *x = AddP2PCollectionsReply{} + if protoimpl.UnsafeEnabled { + mi := &file_net_proto_msgTypes[18] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } - return dAtA[:n], nil } -func (m *PushLogRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) +func (x *AddP2PCollectionsReply) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *PushLogRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.Body != nil { - { - size, err := m.Body.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintNet(dAtA, i, uint64(size)) +func (*AddP2PCollectionsReply) ProtoMessage() {} + +func (x *AddP2PCollectionsReply) ProtoReflect() protoreflect.Message { + mi := &file_net_proto_msgTypes[18] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) } - i-- - dAtA[i] = 0xa + return ms } - return len(dAtA) - i, nil + return mi.MessageOf(x) +} + +// Deprecated: Use AddP2PCollectionsReply.ProtoReflect.Descriptor instead. +func (*AddP2PCollectionsReply) Descriptor() ([]byte, []int) { + return file_net_proto_rawDescGZIP(), []int{18} } -func (m *PushLogRequest_Body) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err +func (x *AddP2PCollectionsReply) GetErr() string { + if x != nil { + return x.Err } - return dAtA[:n], nil + return "" } -func (m *PushLogRequest_Body) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) +type RemoveP2PCollectionsRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Collections []string `protobuf:"bytes,1,rep,name=collections,proto3" json:"collections,omitempty"` } -func (m *PushLogRequest_Body) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.Log != nil { - { - size, err := m.Log.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintNet(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x2a - } - if len(m.Creator) > 0 { - i -= len(m.Creator) - copy(dAtA[i:], m.Creator) - i = encodeVarintNet(dAtA, i, uint64(len(m.Creator))) - i-- - dAtA[i] = 0x22 - } - if len(m.SchemaID) > 0 { - i -= len(m.SchemaID) - copy(dAtA[i:], m.SchemaID) - i = encodeVarintNet(dAtA, i, uint64(len(m.SchemaID))) - i-- - dAtA[i] = 0x1a - } - if m.Cid != nil { - { - size := m.Cid.Size() - i -= size - if _, err := m.Cid.MarshalTo(dAtA[i:]); err != nil { - return 0, err - } - i = encodeVarintNet(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - if m.DocKey != nil { - { - size := m.DocKey.Size() - i -= size - if _, err := m.DocKey.MarshalTo(dAtA[i:]); err != nil { - return 0, err - } - i = encodeVarintNet(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa +func (x *RemoveP2PCollectionsRequest) Reset() { + *x = RemoveP2PCollectionsRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_net_proto_msgTypes[19] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } - return len(dAtA) - i, nil } -func (m *GetHeadLogRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil +func (x *RemoveP2PCollectionsRequest) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *GetHeadLogRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) +func (*RemoveP2PCollectionsRequest) ProtoMessage() {} + +func (x *RemoveP2PCollectionsRequest) ProtoReflect() protoreflect.Message { + mi := &file_net_proto_msgTypes[19] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -func (m *GetHeadLogRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - return len(dAtA) - i, nil +// Deprecated: Use RemoveP2PCollectionsRequest.ProtoReflect.Descriptor instead. +func (*RemoveP2PCollectionsRequest) Descriptor() ([]byte, []int) { + return file_net_proto_rawDescGZIP(), []int{19} } -func (m *PushLogReply) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err +func (x *RemoveP2PCollectionsRequest) GetCollections() []string { + if x != nil { + return x.Collections } - return dAtA[:n], nil + return nil } -func (m *PushLogReply) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} +type RemoveP2PCollectionsReply struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields -func (m *PushLogReply) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - return len(dAtA) - i, nil + Err string `protobuf:"bytes,1,opt,name=err,proto3" json:"err,omitempty"` } -func (m *GetHeadLogReply) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err +func (x *RemoveP2PCollectionsReply) Reset() { + *x = RemoveP2PCollectionsReply{} + if protoimpl.UnsafeEnabled { + mi := &file_net_proto_msgTypes[20] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } - return dAtA[:n], nil } -func (m *GetHeadLogReply) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) +func (x *RemoveP2PCollectionsReply) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *GetHeadLogReply) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - return len(dAtA) - i, nil -} +func (*RemoveP2PCollectionsReply) ProtoMessage() {} -func encodeVarintNet(dAtA []byte, offset int, v uint64) int { - offset -= sovNet(v) - base := offset - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ +func (x *RemoveP2PCollectionsReply) ProtoReflect() protoreflect.Message { + mi := &file_net_proto_msgTypes[20] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms } - dAtA[offset] = uint8(v) - return base + return mi.MessageOf(x) } -func (m *Document) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.DocKey != nil { - l = m.DocKey.Size() - n += 1 + l + sovNet(uint64(l)) - } - if m.Head != nil { - l = m.Head.Size() - n += 1 + l + sovNet(uint64(l)) - } - return n + +// Deprecated: Use RemoveP2PCollectionsReply.ProtoReflect.Descriptor instead. +func (*RemoveP2PCollectionsReply) Descriptor() ([]byte, []int) { + return file_net_proto_rawDescGZIP(), []int{20} } -func (m *Document_Log) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Block) - if l > 0 { - n += 1 + l + sovNet(uint64(l)) +func (x *RemoveP2PCollectionsReply) GetErr() string { + if x != nil { + return x.Err } - return n + return "" } -func (m *GetDocGraphRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - return n +type GetAllP2PCollectionsRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields } -func (m *GetDocGraphReply) Size() (n int) { - if m == nil { - return 0 +func (x *GetAllP2PCollectionsRequest) Reset() { + *x = GetAllP2PCollectionsRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_net_proto_msgTypes[21] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } - var l int - _ = l - return n } -func (m *PushDocGraphRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - return n +func (x *GetAllP2PCollectionsRequest) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *PushDocGraphReply) Size() (n int) { - if m == nil { - return 0 +func (*GetAllP2PCollectionsRequest) ProtoMessage() {} + +func (x *GetAllP2PCollectionsRequest) ProtoReflect() protoreflect.Message { + mi := &file_net_proto_msgTypes[21] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms } - var l int - _ = l - return n + return mi.MessageOf(x) } -func (m *GetLogRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - return n +// Deprecated: Use GetAllP2PCollectionsRequest.ProtoReflect.Descriptor instead. +func (*GetAllP2PCollectionsRequest) Descriptor() ([]byte, []int) { + return file_net_proto_rawDescGZIP(), []int{21} } -func (m *GetLogReply) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - return n +type GetAllP2PCollectionsReply struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Collections []*GetAllP2PCollectionsReply_Collection `protobuf:"bytes,1,rep,name=collections,proto3" json:"collections,omitempty"` } -func (m *PushLogRequest) Size() (n int) { - if m == nil { - return 0 +func (x *GetAllP2PCollectionsReply) Reset() { + *x = GetAllP2PCollectionsReply{} + if protoimpl.UnsafeEnabled { + mi := &file_net_proto_msgTypes[22] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } - var l int - _ = l - if m.Body != nil { - l = m.Body.Size() - n += 1 + l + sovNet(uint64(l)) - } - return n } -func (m *PushLogRequest_Body) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.DocKey != nil { - l = m.DocKey.Size() - n += 1 + l + sovNet(uint64(l)) - } - if m.Cid != nil { - l = m.Cid.Size() - n += 1 + l + sovNet(uint64(l)) - } - l = len(m.SchemaID) - if l > 0 { - n += 1 + l + sovNet(uint64(l)) - } - l = len(m.Creator) - if l > 0 { - n += 1 + l + sovNet(uint64(l)) - } - if m.Log != nil { - l = m.Log.Size() - n += 1 + l + sovNet(uint64(l)) - } - return n +func (x *GetAllP2PCollectionsReply) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *GetHeadLogRequest) Size() (n int) { - if m == nil { - return 0 +func (*GetAllP2PCollectionsReply) ProtoMessage() {} + +func (x *GetAllP2PCollectionsReply) ProtoReflect() protoreflect.Message { + mi := &file_net_proto_msgTypes[22] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms } - var l int - _ = l - return n + return mi.MessageOf(x) } -func (m *PushLogReply) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - return n +// Deprecated: Use GetAllP2PCollectionsReply.ProtoReflect.Descriptor instead. +func (*GetAllP2PCollectionsReply) Descriptor() ([]byte, []int) { + return file_net_proto_rawDescGZIP(), []int{22} } -func (m *GetHeadLogReply) Size() (n int) { - if m == nil { - return 0 +func (x *GetAllP2PCollectionsReply) GetCollections() []*GetAllP2PCollectionsReply_Collection { + if x != nil { + return x.Collections } - var l int - _ = l - return n + return nil } -func sovNet(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 +// Record is a thread record containing link data. +type Document_Log struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // block is the top-level node's raw data as an ipld.Block. + Block []byte `protobuf:"bytes,1,opt,name=block,proto3" json:"block,omitempty"` } -func sozNet(x uint64) (n int) { - return sovNet(uint64((x << 1) ^ uint64((int64(x) >> 63)))) + +func (x *Document_Log) Reset() { + *x = Document_Log{} + if protoimpl.UnsafeEnabled { + mi := &file_net_proto_msgTypes[23] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } -func (m *Document) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowNet - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Document: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Document: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field DocKey", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowNet - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthNet - } - postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLengthNet - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - var v ProtoDocKey - m.DocKey = &v - if err := m.DocKey.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Head", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowNet - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthNet - } - postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLengthNet - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - var v ProtoCid - m.Head = &v - if err := m.Head.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipNet(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthNet - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy + +func (x *Document_Log) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Document_Log) ProtoMessage() {} + +func (x *Document_Log) ProtoReflect() protoreflect.Message { + mi := &file_net_proto_msgTypes[23] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) } + return ms } + return mi.MessageOf(x) +} - if iNdEx > l { - return io.ErrUnexpectedEOF +// Deprecated: Use Document_Log.ProtoReflect.Descriptor instead. +func (*Document_Log) Descriptor() ([]byte, []int) { + return file_net_proto_rawDescGZIP(), []int{0, 0} +} + +func (x *Document_Log) GetBlock() []byte { + if x != nil { + return x.Block } return nil } -func (m *Document_Log) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowNet - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Log: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Log: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Block", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowNet - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthNet - } - postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLengthNet - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Block = append(m.Block[:0], dAtA[iNdEx:postIndex]...) - if m.Block == nil { - m.Block = []byte{} - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipNet(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthNet - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - if iNdEx > l { - return io.ErrUnexpectedEOF +type PushLogRequest_Body struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // docKey is the DocKey of the document that is affected by the log. + DocKey []byte `protobuf:"bytes,1,opt,name=docKey,proto3" json:"docKey,omitempty"` + // cid is the CID of the composite of the document. + Cid []byte `protobuf:"bytes,2,opt,name=cid,proto3" json:"cid,omitempty"` + // schemaID is the SchemaID of the collection that the document resides in. + SchemaID []byte `protobuf:"bytes,3,opt,name=schemaID,proto3" json:"schemaID,omitempty"` + // creator is the PeerID of the peer that created the log. + Creator string `protobuf:"bytes,4,opt,name=creator,proto3" json:"creator,omitempty"` + // log hold the block that represent version of the document. + Log *Document_Log `protobuf:"bytes,6,opt,name=log,proto3" json:"log,omitempty"` +} + +func (x *PushLogRequest_Body) Reset() { + *x = PushLogRequest_Body{} + if protoimpl.UnsafeEnabled { + mi := &file_net_proto_msgTypes[24] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } - return nil } -func (m *GetDocGraphRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowNet - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: GetDocGraphRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: GetDocGraphRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - default: - iNdEx = preIndex - skippy, err := skipNet(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthNet - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy + +func (x *PushLogRequest_Body) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*PushLogRequest_Body) ProtoMessage() {} + +func (x *PushLogRequest_Body) ProtoReflect() protoreflect.Message { + mi := &file_net_proto_msgTypes[24] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) } + return ms } + return mi.MessageOf(x) +} + +// Deprecated: Use PushLogRequest_Body.ProtoReflect.Descriptor instead. +func (*PushLogRequest_Body) Descriptor() ([]byte, []int) { + return file_net_proto_rawDescGZIP(), []int{7, 0} +} - if iNdEx > l { - return io.ErrUnexpectedEOF +func (x *PushLogRequest_Body) GetDocKey() []byte { + if x != nil { + return x.DocKey } return nil } -func (m *GetDocGraphReply) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowNet - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: GetDocGraphReply: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: GetDocGraphReply: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - default: - iNdEx = preIndex - skippy, err := skipNet(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthNet - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - if iNdEx > l { - return io.ErrUnexpectedEOF +func (x *PushLogRequest_Body) GetCid() []byte { + if x != nil { + return x.Cid } return nil } -func (m *PushDocGraphRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowNet - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: PushDocGraphRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: PushDocGraphRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - default: - iNdEx = preIndex - skippy, err := skipNet(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthNet - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - if iNdEx > l { - return io.ErrUnexpectedEOF +func (x *PushLogRequest_Body) GetSchemaID() []byte { + if x != nil { + return x.SchemaID } return nil } -func (m *PushDocGraphReply) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowNet - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: PushDocGraphReply: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: PushDocGraphReply: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - default: - iNdEx = preIndex - skippy, err := skipNet(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthNet - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } + +func (x *PushLogRequest_Body) GetCreator() string { + if x != nil { + return x.Creator } + return "" +} - if iNdEx > l { - return io.ErrUnexpectedEOF +func (x *PushLogRequest_Body) GetLog() *Document_Log { + if x != nil { + return x.Log } return nil } -func (m *GetLogRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowNet - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: GetLogRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: GetLogRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - default: - iNdEx = preIndex - skippy, err := skipNet(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthNet - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - if iNdEx > l { - return io.ErrUnexpectedEOF +type GetAllReplicatorReply_Replicators struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Info *GetAllReplicatorReply_Replicators_Info `protobuf:"bytes,1,opt,name=info,proto3" json:"info,omitempty"` + Schemas []string `protobuf:"bytes,2,rep,name=schemas,proto3" json:"schemas,omitempty"` +} + +func (x *GetAllReplicatorReply_Replicators) Reset() { + *x = GetAllReplicatorReply_Replicators{} + if protoimpl.UnsafeEnabled { + mi := &file_net_proto_msgTypes[25] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } - return nil } -func (m *GetLogReply) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowNet - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: GetLogReply: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: GetLogReply: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - default: - iNdEx = preIndex - skippy, err := skipNet(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthNet - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy + +func (x *GetAllReplicatorReply_Replicators) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetAllReplicatorReply_Replicators) ProtoMessage() {} + +func (x *GetAllReplicatorReply_Replicators) ProtoReflect() protoreflect.Message { + mi := &file_net_proto_msgTypes[25] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) } + return ms } + return mi.MessageOf(x) +} + +// Deprecated: Use GetAllReplicatorReply_Replicators.ProtoReflect.Descriptor instead. +func (*GetAllReplicatorReply_Replicators) Descriptor() ([]byte, []int) { + return file_net_proto_rawDescGZIP(), []int{16, 0} +} - if iNdEx > l { - return io.ErrUnexpectedEOF +func (x *GetAllReplicatorReply_Replicators) GetInfo() *GetAllReplicatorReply_Replicators_Info { + if x != nil { + return x.Info } return nil } -func (m *PushLogRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowNet - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: PushLogRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: PushLogRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Body", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowNet - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthNet - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthNet - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Body == nil { - m.Body = &PushLogRequest_Body{} - } - if err := m.Body.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipNet(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthNet - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - if iNdEx > l { - return io.ErrUnexpectedEOF +func (x *GetAllReplicatorReply_Replicators) GetSchemas() []string { + if x != nil { + return x.Schemas } return nil } -func (m *PushLogRequest_Body) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowNet - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Body: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Body: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field DocKey", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowNet - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthNet - } - postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLengthNet - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - var v ProtoDocKey - m.DocKey = &v - if err := m.DocKey.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Cid", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowNet - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthNet - } - postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLengthNet - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - var v ProtoCid - m.Cid = &v - if err := m.Cid.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SchemaID", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowNet - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthNet - } - postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLengthNet - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.SchemaID = append(m.SchemaID[:0], dAtA[iNdEx:postIndex]...) - if m.SchemaID == nil { - m.SchemaID = []byte{} - } - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Creator", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowNet - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthNet - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthNet - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Creator = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Log", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowNet - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthNet - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthNet - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Log == nil { - m.Log = &Document_Log{} - } - if err := m.Log.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipNet(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthNet - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - if iNdEx > l { - return io.ErrUnexpectedEOF +type GetAllReplicatorReply_Replicators_Info struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Id []byte `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` + Addrs []byte `protobuf:"bytes,2,opt,name=addrs,proto3" json:"addrs,omitempty"` +} + +func (x *GetAllReplicatorReply_Replicators_Info) Reset() { + *x = GetAllReplicatorReply_Replicators_Info{} + if protoimpl.UnsafeEnabled { + mi := &file_net_proto_msgTypes[26] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } - return nil } -func (m *GetHeadLogRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowNet - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: GetHeadLogRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: GetHeadLogRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - default: - iNdEx = preIndex - skippy, err := skipNet(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthNet - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy + +func (x *GetAllReplicatorReply_Replicators_Info) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetAllReplicatorReply_Replicators_Info) ProtoMessage() {} + +func (x *GetAllReplicatorReply_Replicators_Info) ProtoReflect() protoreflect.Message { + mi := &file_net_proto_msgTypes[26] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) } + return ms } + return mi.MessageOf(x) +} + +// Deprecated: Use GetAllReplicatorReply_Replicators_Info.ProtoReflect.Descriptor instead. +func (*GetAllReplicatorReply_Replicators_Info) Descriptor() ([]byte, []int) { + return file_net_proto_rawDescGZIP(), []int{16, 0, 0} +} - if iNdEx > l { - return io.ErrUnexpectedEOF +func (x *GetAllReplicatorReply_Replicators_Info) GetId() []byte { + if x != nil { + return x.Id } return nil } -func (m *PushLogReply) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowNet - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: PushLogReply: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: PushLogReply: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - default: - iNdEx = preIndex - skippy, err := skipNet(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthNet - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - if iNdEx > l { - return io.ErrUnexpectedEOF +func (x *GetAllReplicatorReply_Replicators_Info) GetAddrs() []byte { + if x != nil { + return x.Addrs } return nil } -func (m *GetHeadLogReply) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowNet - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: GetHeadLogReply: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: GetHeadLogReply: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - default: - iNdEx = preIndex - skippy, err := skipNet(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthNet - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy + +type GetAllP2PCollectionsReply_Collection struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` + Name string `protobuf:"bytes,2,opt,name=name,proto3" json:"name,omitempty"` +} + +func (x *GetAllP2PCollectionsReply_Collection) Reset() { + *x = GetAllP2PCollectionsReply_Collection{} + if protoimpl.UnsafeEnabled { + mi := &file_net_proto_msgTypes[27] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GetAllP2PCollectionsReply_Collection) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetAllP2PCollectionsReply_Collection) ProtoMessage() {} + +func (x *GetAllP2PCollectionsReply_Collection) ProtoReflect() protoreflect.Message { + mi := &file_net_proto_msgTypes[27] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) } + return ms } + return mi.MessageOf(x) +} + +// Deprecated: Use GetAllP2PCollectionsReply_Collection.ProtoReflect.Descriptor instead. +func (*GetAllP2PCollectionsReply_Collection) Descriptor() ([]byte, []int) { + return file_net_proto_rawDescGZIP(), []int{22, 0} +} - if iNdEx > l { - return io.ErrUnexpectedEOF +func (x *GetAllP2PCollectionsReply_Collection) GetId() string { + if x != nil { + return x.Id } - return nil + return "" } -func skipNet(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - depth := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowNet - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowNet - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - case 1: - iNdEx += 8 - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowNet - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if length < 0 { - return 0, ErrInvalidLengthNet - } - iNdEx += length - case 3: - depth++ - case 4: - if depth == 0 { - return 0, ErrUnexpectedEndOfGroupNet - } - depth-- - case 5: - iNdEx += 4 - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - if iNdEx < 0 { - return 0, ErrInvalidLengthNet - } - if depth == 0 { - return iNdEx, nil - } + +func (x *GetAllP2PCollectionsReply_Collection) GetName() string { + if x != nil { + return x.Name } - return 0, io.ErrUnexpectedEOF + return "" +} + +var File_net_proto protoreflect.FileDescriptor + +var file_net_proto_rawDesc = []byte{ + 0x0a, 0x09, 0x6e, 0x65, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x06, 0x6e, 0x65, 0x74, + 0x2e, 0x70, 0x62, 0x22, 0x53, 0x0a, 0x08, 0x44, 0x6f, 0x63, 0x75, 0x6d, 0x65, 0x6e, 0x74, 0x12, + 0x16, 0x0a, 0x06, 0x64, 0x6f, 0x63, 0x4b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, + 0x06, 0x64, 0x6f, 0x63, 0x4b, 0x65, 0x79, 0x12, 0x12, 0x0a, 0x04, 0x68, 0x65, 0x61, 0x64, 0x18, + 0x04, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x04, 0x68, 0x65, 0x61, 0x64, 0x1a, 0x1b, 0x0a, 0x03, 0x4c, + 0x6f, 0x67, 0x12, 0x14, 0x0a, 0x05, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x0c, 0x52, 0x05, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x22, 0x14, 0x0a, 0x12, 0x47, 0x65, 0x74, 0x44, + 0x6f, 0x63, 0x47, 0x72, 0x61, 0x70, 0x68, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0x12, + 0x0a, 0x10, 0x47, 0x65, 0x74, 0x44, 0x6f, 0x63, 0x47, 0x72, 0x61, 0x70, 0x68, 0x52, 0x65, 0x70, + 0x6c, 0x79, 0x22, 0x15, 0x0a, 0x13, 0x50, 0x75, 0x73, 0x68, 0x44, 0x6f, 0x63, 0x47, 0x72, 0x61, + 0x70, 0x68, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0x13, 0x0a, 0x11, 0x50, 0x75, 0x73, + 0x68, 0x44, 0x6f, 0x63, 0x47, 0x72, 0x61, 0x70, 0x68, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x22, 0x0f, + 0x0a, 0x0d, 0x47, 0x65, 0x74, 0x4c, 0x6f, 0x67, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, + 0x0d, 0x0a, 0x0b, 0x47, 0x65, 0x74, 0x4c, 0x6f, 0x67, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x22, 0xd2, + 0x01, 0x0a, 0x0e, 0x50, 0x75, 0x73, 0x68, 0x4c, 0x6f, 0x67, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x12, 0x2f, 0x0a, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x1b, 0x2e, 0x6e, 0x65, 0x74, 0x2e, 0x70, 0x62, 0x2e, 0x50, 0x75, 0x73, 0x68, 0x4c, 0x6f, 0x67, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x42, 0x6f, 0x64, 0x79, 0x52, 0x04, 0x62, 0x6f, + 0x64, 0x79, 0x1a, 0x8e, 0x01, 0x0a, 0x04, 0x42, 0x6f, 0x64, 0x79, 0x12, 0x16, 0x0a, 0x06, 0x64, + 0x6f, 0x63, 0x4b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x06, 0x64, 0x6f, 0x63, + 0x4b, 0x65, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x63, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, + 0x52, 0x03, 0x63, 0x69, 0x64, 0x12, 0x1a, 0x0a, 0x08, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x49, + 0x44, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x08, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x49, + 0x44, 0x12, 0x18, 0x0a, 0x07, 0x63, 0x72, 0x65, 0x61, 0x74, 0x6f, 0x72, 0x18, 0x04, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x07, 0x63, 0x72, 0x65, 0x61, 0x74, 0x6f, 0x72, 0x12, 0x26, 0x0a, 0x03, 0x6c, + 0x6f, 0x67, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x6e, 0x65, 0x74, 0x2e, 0x70, + 0x62, 0x2e, 0x44, 0x6f, 0x63, 0x75, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x4c, 0x6f, 0x67, 0x52, 0x03, + 0x6c, 0x6f, 0x67, 0x22, 0x13, 0x0a, 0x11, 0x47, 0x65, 0x74, 0x48, 0x65, 0x61, 0x64, 0x4c, 0x6f, + 0x67, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0x0e, 0x0a, 0x0c, 0x50, 0x75, 0x73, 0x68, + 0x4c, 0x6f, 0x67, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x22, 0x11, 0x0a, 0x0f, 0x47, 0x65, 0x74, 0x48, + 0x65, 0x61, 0x64, 0x4c, 0x6f, 0x67, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x22, 0x4c, 0x0a, 0x14, 0x53, + 0x65, 0x74, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x6f, 0x72, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x12, 0x20, 0x0a, 0x0b, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, + 0x6e, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0b, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, + 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x12, 0x0a, 0x04, 0x61, 0x64, 0x64, 0x72, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x0c, 0x52, 0x04, 0x61, 0x64, 0x64, 0x72, 0x22, 0x2c, 0x0a, 0x12, 0x53, 0x65, 0x74, + 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x6f, 0x72, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x12, + 0x16, 0x0a, 0x06, 0x70, 0x65, 0x65, 0x72, 0x49, 0x44, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, + 0x06, 0x70, 0x65, 0x65, 0x72, 0x49, 0x44, 0x22, 0x53, 0x0a, 0x17, 0x44, 0x65, 0x6c, 0x65, 0x74, + 0x65, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x6f, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x12, 0x16, 0x0a, 0x06, 0x70, 0x65, 0x65, 0x72, 0x49, 0x44, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x0c, 0x52, 0x06, 0x70, 0x65, 0x65, 0x72, 0x49, 0x44, 0x12, 0x20, 0x0a, 0x0b, 0x63, 0x6f, + 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, + 0x0b, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0x2f, 0x0a, 0x15, + 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x6f, 0x72, + 0x52, 0x65, 0x70, 0x6c, 0x79, 0x12, 0x16, 0x0a, 0x06, 0x70, 0x65, 0x65, 0x72, 0x49, 0x44, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x06, 0x70, 0x65, 0x65, 0x72, 0x49, 0x44, 0x22, 0x19, 0x0a, + 0x17, 0x47, 0x65, 0x74, 0x41, 0x6c, 0x6c, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x6f, + 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0x80, 0x02, 0x0a, 0x15, 0x47, 0x65, 0x74, + 0x41, 0x6c, 0x6c, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x6f, 0x72, 0x52, 0x65, 0x70, + 0x6c, 0x79, 0x12, 0x4b, 0x0a, 0x0b, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x6f, 0x72, + 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x6e, 0x65, 0x74, 0x2e, 0x70, 0x62, + 0x2e, 0x47, 0x65, 0x74, 0x41, 0x6c, 0x6c, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x6f, + 0x72, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x2e, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x6f, + 0x72, 0x73, 0x52, 0x0b, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x6f, 0x72, 0x73, 0x1a, + 0x99, 0x01, 0x0a, 0x0b, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x6f, 0x72, 0x73, 0x12, + 0x42, 0x0a, 0x04, 0x69, 0x6e, 0x66, 0x6f, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2e, 0x2e, + 0x6e, 0x65, 0x74, 0x2e, 0x70, 0x62, 0x2e, 0x47, 0x65, 0x74, 0x41, 0x6c, 0x6c, 0x52, 0x65, 0x70, + 0x6c, 0x69, 0x63, 0x61, 0x74, 0x6f, 0x72, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x2e, 0x52, 0x65, 0x70, + 0x6c, 0x69, 0x63, 0x61, 0x74, 0x6f, 0x72, 0x73, 0x2e, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x04, 0x69, + 0x6e, 0x66, 0x6f, 0x12, 0x18, 0x0a, 0x07, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x73, 0x18, 0x02, + 0x20, 0x03, 0x28, 0x09, 0x52, 0x07, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x73, 0x1a, 0x2c, 0x0a, + 0x04, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x0c, 0x52, 0x02, 0x69, 0x64, 0x12, 0x14, 0x0a, 0x05, 0x61, 0x64, 0x64, 0x72, 0x73, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x0c, 0x52, 0x05, 0x61, 0x64, 0x64, 0x72, 0x73, 0x22, 0x3c, 0x0a, 0x18, 0x41, + 0x64, 0x64, 0x50, 0x32, 0x50, 0x43, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x20, 0x0a, 0x0b, 0x63, 0x6f, 0x6c, 0x6c, 0x65, + 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0b, 0x63, 0x6f, + 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0x2a, 0x0a, 0x16, 0x41, 0x64, 0x64, + 0x50, 0x32, 0x50, 0x43, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, + 0x70, 0x6c, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x65, 0x72, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x03, 0x65, 0x72, 0x72, 0x22, 0x3f, 0x0a, 0x1b, 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x50, + 0x32, 0x50, 0x43, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x12, 0x20, 0x0a, 0x0b, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, + 0x6f, 0x6e, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0b, 0x63, 0x6f, 0x6c, 0x6c, 0x65, + 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0x2d, 0x0a, 0x19, 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, + 0x50, 0x32, 0x50, 0x43, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, + 0x70, 0x6c, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x65, 0x72, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x03, 0x65, 0x72, 0x72, 0x22, 0x1d, 0x0a, 0x1b, 0x47, 0x65, 0x74, 0x41, 0x6c, 0x6c, 0x50, + 0x32, 0x50, 0x43, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x22, 0x9d, 0x01, 0x0a, 0x19, 0x47, 0x65, 0x74, 0x41, 0x6c, 0x6c, 0x50, + 0x32, 0x50, 0x43, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x70, + 0x6c, 0x79, 0x12, 0x4e, 0x0a, 0x0b, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, + 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x6e, 0x65, 0x74, 0x2e, 0x70, 0x62, + 0x2e, 0x47, 0x65, 0x74, 0x41, 0x6c, 0x6c, 0x50, 0x32, 0x50, 0x43, 0x6f, 0x6c, 0x6c, 0x65, 0x63, + 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x2e, 0x43, 0x6f, 0x6c, 0x6c, 0x65, + 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0b, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, + 0x6e, 0x73, 0x1a, 0x30, 0x0a, 0x0a, 0x43, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, + 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, + 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, + 0x6e, 0x61, 0x6d, 0x65, 0x32, 0xd1, 0x02, 0x0a, 0x07, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, + 0x12, 0x45, 0x0a, 0x0b, 0x47, 0x65, 0x74, 0x44, 0x6f, 0x63, 0x47, 0x72, 0x61, 0x70, 0x68, 0x12, + 0x1a, 0x2e, 0x6e, 0x65, 0x74, 0x2e, 0x70, 0x62, 0x2e, 0x47, 0x65, 0x74, 0x44, 0x6f, 0x63, 0x47, + 0x72, 0x61, 0x70, 0x68, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x18, 0x2e, 0x6e, 0x65, + 0x74, 0x2e, 0x70, 0x62, 0x2e, 0x47, 0x65, 0x74, 0x44, 0x6f, 0x63, 0x47, 0x72, 0x61, 0x70, 0x68, + 0x52, 0x65, 0x70, 0x6c, 0x79, 0x22, 0x00, 0x12, 0x48, 0x0a, 0x0c, 0x50, 0x75, 0x73, 0x68, 0x44, + 0x6f, 0x63, 0x47, 0x72, 0x61, 0x70, 0x68, 0x12, 0x1b, 0x2e, 0x6e, 0x65, 0x74, 0x2e, 0x70, 0x62, + 0x2e, 0x50, 0x75, 0x73, 0x68, 0x44, 0x6f, 0x63, 0x47, 0x72, 0x61, 0x70, 0x68, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x1a, 0x19, 0x2e, 0x6e, 0x65, 0x74, 0x2e, 0x70, 0x62, 0x2e, 0x50, 0x75, + 0x73, 0x68, 0x44, 0x6f, 0x63, 0x47, 0x72, 0x61, 0x70, 0x68, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x22, + 0x00, 0x12, 0x36, 0x0a, 0x06, 0x47, 0x65, 0x74, 0x4c, 0x6f, 0x67, 0x12, 0x15, 0x2e, 0x6e, 0x65, + 0x74, 0x2e, 0x70, 0x62, 0x2e, 0x47, 0x65, 0x74, 0x4c, 0x6f, 0x67, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x1a, 0x13, 0x2e, 0x6e, 0x65, 0x74, 0x2e, 0x70, 0x62, 0x2e, 0x47, 0x65, 0x74, 0x4c, + 0x6f, 0x67, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x22, 0x00, 0x12, 0x39, 0x0a, 0x07, 0x50, 0x75, 0x73, + 0x68, 0x4c, 0x6f, 0x67, 0x12, 0x16, 0x2e, 0x6e, 0x65, 0x74, 0x2e, 0x70, 0x62, 0x2e, 0x50, 0x75, + 0x73, 0x68, 0x4c, 0x6f, 0x67, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x14, 0x2e, 0x6e, + 0x65, 0x74, 0x2e, 0x70, 0x62, 0x2e, 0x50, 0x75, 0x73, 0x68, 0x4c, 0x6f, 0x67, 0x52, 0x65, 0x70, + 0x6c, 0x79, 0x22, 0x00, 0x12, 0x42, 0x0a, 0x0a, 0x47, 0x65, 0x74, 0x48, 0x65, 0x61, 0x64, 0x4c, + 0x6f, 0x67, 0x12, 0x19, 0x2e, 0x6e, 0x65, 0x74, 0x2e, 0x70, 0x62, 0x2e, 0x47, 0x65, 0x74, 0x48, + 0x65, 0x61, 0x64, 0x4c, 0x6f, 0x67, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x17, 0x2e, + 0x6e, 0x65, 0x74, 0x2e, 0x70, 0x62, 0x2e, 0x47, 0x65, 0x74, 0x48, 0x65, 0x61, 0x64, 0x4c, 0x6f, + 0x67, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x22, 0x00, 0x32, 0xa3, 0x04, 0x0a, 0x0a, 0x43, 0x6f, 0x6c, + 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x4b, 0x0a, 0x0d, 0x53, 0x65, 0x74, 0x52, 0x65, + 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x6f, 0x72, 0x12, 0x1c, 0x2e, 0x6e, 0x65, 0x74, 0x2e, 0x70, + 0x62, 0x2e, 0x53, 0x65, 0x74, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x6f, 0x72, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1a, 0x2e, 0x6e, 0x65, 0x74, 0x2e, 0x70, 0x62, 0x2e, + 0x53, 0x65, 0x74, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x6f, 0x72, 0x52, 0x65, 0x70, + 0x6c, 0x79, 0x22, 0x00, 0x12, 0x54, 0x0a, 0x10, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x52, 0x65, + 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x6f, 0x72, 0x12, 0x1f, 0x2e, 0x6e, 0x65, 0x74, 0x2e, 0x70, + 0x62, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, + 0x6f, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1d, 0x2e, 0x6e, 0x65, 0x74, 0x2e, + 0x70, 0x62, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, + 0x74, 0x6f, 0x72, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x22, 0x00, 0x12, 0x55, 0x0a, 0x11, 0x47, 0x65, + 0x74, 0x41, 0x6c, 0x6c, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x6f, 0x72, 0x73, 0x12, + 0x1f, 0x2e, 0x6e, 0x65, 0x74, 0x2e, 0x70, 0x62, 0x2e, 0x47, 0x65, 0x74, 0x41, 0x6c, 0x6c, 0x52, + 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x6f, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x1a, 0x1d, 0x2e, 0x6e, 0x65, 0x74, 0x2e, 0x70, 0x62, 0x2e, 0x47, 0x65, 0x74, 0x41, 0x6c, 0x6c, + 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x6f, 0x72, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x22, + 0x00, 0x12, 0x57, 0x0a, 0x11, 0x41, 0x64, 0x64, 0x50, 0x32, 0x50, 0x43, 0x6f, 0x6c, 0x6c, 0x65, + 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x20, 0x2e, 0x6e, 0x65, 0x74, 0x2e, 0x70, 0x62, 0x2e, + 0x41, 0x64, 0x64, 0x50, 0x32, 0x50, 0x43, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, + 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1e, 0x2e, 0x6e, 0x65, 0x74, 0x2e, 0x70, + 0x62, 0x2e, 0x41, 0x64, 0x64, 0x50, 0x32, 0x50, 0x43, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, + 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x22, 0x00, 0x12, 0x60, 0x0a, 0x14, 0x52, 0x65, + 0x6d, 0x6f, 0x76, 0x65, 0x50, 0x32, 0x50, 0x43, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, + 0x6e, 0x73, 0x12, 0x23, 0x2e, 0x6e, 0x65, 0x74, 0x2e, 0x70, 0x62, 0x2e, 0x52, 0x65, 0x6d, 0x6f, + 0x76, 0x65, 0x50, 0x32, 0x50, 0x43, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x21, 0x2e, 0x6e, 0x65, 0x74, 0x2e, 0x70, 0x62, + 0x2e, 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x50, 0x32, 0x50, 0x43, 0x6f, 0x6c, 0x6c, 0x65, 0x63, + 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x22, 0x00, 0x12, 0x60, 0x0a, 0x14, + 0x47, 0x65, 0x74, 0x41, 0x6c, 0x6c, 0x50, 0x32, 0x50, 0x43, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, + 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x23, 0x2e, 0x6e, 0x65, 0x74, 0x2e, 0x70, 0x62, 0x2e, 0x47, 0x65, + 0x74, 0x41, 0x6c, 0x6c, 0x50, 0x32, 0x50, 0x43, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, + 0x6e, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x21, 0x2e, 0x6e, 0x65, 0x74, 0x2e, + 0x70, 0x62, 0x2e, 0x47, 0x65, 0x74, 0x41, 0x6c, 0x6c, 0x50, 0x32, 0x50, 0x43, 0x6f, 0x6c, 0x6c, + 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x22, 0x00, 0x42, 0x0a, + 0x5a, 0x08, 0x2f, 0x3b, 0x6e, 0x65, 0x74, 0x5f, 0x70, 0x62, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x33, } var ( - ErrInvalidLengthNet = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowNet = fmt.Errorf("proto: integer overflow") - ErrUnexpectedEndOfGroupNet = fmt.Errorf("proto: unexpected end of group") + file_net_proto_rawDescOnce sync.Once + file_net_proto_rawDescData = file_net_proto_rawDesc ) + +func file_net_proto_rawDescGZIP() []byte { + file_net_proto_rawDescOnce.Do(func() { + file_net_proto_rawDescData = protoimpl.X.CompressGZIP(file_net_proto_rawDescData) + }) + return file_net_proto_rawDescData +} + +var file_net_proto_msgTypes = make([]protoimpl.MessageInfo, 28) +var file_net_proto_goTypes = []interface{}{ + (*Document)(nil), // 0: net.pb.Document + (*GetDocGraphRequest)(nil), // 1: net.pb.GetDocGraphRequest + (*GetDocGraphReply)(nil), // 2: net.pb.GetDocGraphReply + (*PushDocGraphRequest)(nil), // 3: net.pb.PushDocGraphRequest + (*PushDocGraphReply)(nil), // 4: net.pb.PushDocGraphReply + (*GetLogRequest)(nil), // 5: net.pb.GetLogRequest + (*GetLogReply)(nil), // 6: net.pb.GetLogReply + (*PushLogRequest)(nil), // 7: net.pb.PushLogRequest + (*GetHeadLogRequest)(nil), // 8: net.pb.GetHeadLogRequest + (*PushLogReply)(nil), // 9: net.pb.PushLogReply + (*GetHeadLogReply)(nil), // 10: net.pb.GetHeadLogReply + (*SetReplicatorRequest)(nil), // 11: net.pb.SetReplicatorRequest + (*SetReplicatorReply)(nil), // 12: net.pb.SetReplicatorReply + (*DeleteReplicatorRequest)(nil), // 13: net.pb.DeleteReplicatorRequest + (*DeleteReplicatorReply)(nil), // 14: net.pb.DeleteReplicatorReply + (*GetAllReplicatorRequest)(nil), // 15: net.pb.GetAllReplicatorRequest + (*GetAllReplicatorReply)(nil), // 16: net.pb.GetAllReplicatorReply + (*AddP2PCollectionsRequest)(nil), // 17: net.pb.AddP2PCollectionsRequest + (*AddP2PCollectionsReply)(nil), // 18: net.pb.AddP2PCollectionsReply + (*RemoveP2PCollectionsRequest)(nil), // 19: net.pb.RemoveP2PCollectionsRequest + (*RemoveP2PCollectionsReply)(nil), // 20: net.pb.RemoveP2PCollectionsReply + (*GetAllP2PCollectionsRequest)(nil), // 21: net.pb.GetAllP2PCollectionsRequest + (*GetAllP2PCollectionsReply)(nil), // 22: net.pb.GetAllP2PCollectionsReply + (*Document_Log)(nil), // 23: net.pb.Document.Log + (*PushLogRequest_Body)(nil), // 24: net.pb.PushLogRequest.Body + (*GetAllReplicatorReply_Replicators)(nil), // 25: net.pb.GetAllReplicatorReply.Replicators + (*GetAllReplicatorReply_Replicators_Info)(nil), // 26: net.pb.GetAllReplicatorReply.Replicators.Info + (*GetAllP2PCollectionsReply_Collection)(nil), // 27: net.pb.GetAllP2PCollectionsReply.Collection +} +var file_net_proto_depIdxs = []int32{ + 24, // 0: net.pb.PushLogRequest.body:type_name -> net.pb.PushLogRequest.Body + 25, // 1: net.pb.GetAllReplicatorReply.replicators:type_name -> net.pb.GetAllReplicatorReply.Replicators + 27, // 2: net.pb.GetAllP2PCollectionsReply.collections:type_name -> net.pb.GetAllP2PCollectionsReply.Collection + 23, // 3: net.pb.PushLogRequest.Body.log:type_name -> net.pb.Document.Log + 26, // 4: net.pb.GetAllReplicatorReply.Replicators.info:type_name -> net.pb.GetAllReplicatorReply.Replicators.Info + 1, // 5: net.pb.Service.GetDocGraph:input_type -> net.pb.GetDocGraphRequest + 3, // 6: net.pb.Service.PushDocGraph:input_type -> net.pb.PushDocGraphRequest + 5, // 7: net.pb.Service.GetLog:input_type -> net.pb.GetLogRequest + 7, // 8: net.pb.Service.PushLog:input_type -> net.pb.PushLogRequest + 8, // 9: net.pb.Service.GetHeadLog:input_type -> net.pb.GetHeadLogRequest + 11, // 10: net.pb.Collection.SetReplicator:input_type -> net.pb.SetReplicatorRequest + 13, // 11: net.pb.Collection.DeleteReplicator:input_type -> net.pb.DeleteReplicatorRequest + 15, // 12: net.pb.Collection.GetAllReplicators:input_type -> net.pb.GetAllReplicatorRequest + 17, // 13: net.pb.Collection.AddP2PCollections:input_type -> net.pb.AddP2PCollectionsRequest + 19, // 14: net.pb.Collection.RemoveP2PCollections:input_type -> net.pb.RemoveP2PCollectionsRequest + 21, // 15: net.pb.Collection.GetAllP2PCollections:input_type -> net.pb.GetAllP2PCollectionsRequest + 2, // 16: net.pb.Service.GetDocGraph:output_type -> net.pb.GetDocGraphReply + 4, // 17: net.pb.Service.PushDocGraph:output_type -> net.pb.PushDocGraphReply + 6, // 18: net.pb.Service.GetLog:output_type -> net.pb.GetLogReply + 9, // 19: net.pb.Service.PushLog:output_type -> net.pb.PushLogReply + 10, // 20: net.pb.Service.GetHeadLog:output_type -> net.pb.GetHeadLogReply + 12, // 21: net.pb.Collection.SetReplicator:output_type -> net.pb.SetReplicatorReply + 14, // 22: net.pb.Collection.DeleteReplicator:output_type -> net.pb.DeleteReplicatorReply + 16, // 23: net.pb.Collection.GetAllReplicators:output_type -> net.pb.GetAllReplicatorReply + 18, // 24: net.pb.Collection.AddP2PCollections:output_type -> net.pb.AddP2PCollectionsReply + 20, // 25: net.pb.Collection.RemoveP2PCollections:output_type -> net.pb.RemoveP2PCollectionsReply + 22, // 26: net.pb.Collection.GetAllP2PCollections:output_type -> net.pb.GetAllP2PCollectionsReply + 16, // [16:27] is the sub-list for method output_type + 5, // [5:16] is the sub-list for method input_type + 5, // [5:5] is the sub-list for extension type_name + 5, // [5:5] is the sub-list for extension extendee + 0, // [0:5] is the sub-list for field type_name +} + +func init() { file_net_proto_init() } +func file_net_proto_init() { + if File_net_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_net_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Document); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_net_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetDocGraphRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_net_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetDocGraphReply); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_net_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*PushDocGraphRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_net_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*PushDocGraphReply); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_net_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetLogRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_net_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetLogReply); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_net_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*PushLogRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_net_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetHeadLogRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_net_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*PushLogReply); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_net_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetHeadLogReply); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_net_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SetReplicatorRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_net_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SetReplicatorReply); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_net_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*DeleteReplicatorRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_net_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*DeleteReplicatorReply); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_net_proto_msgTypes[15].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetAllReplicatorRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_net_proto_msgTypes[16].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetAllReplicatorReply); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_net_proto_msgTypes[17].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*AddP2PCollectionsRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_net_proto_msgTypes[18].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*AddP2PCollectionsReply); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_net_proto_msgTypes[19].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*RemoveP2PCollectionsRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_net_proto_msgTypes[20].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*RemoveP2PCollectionsReply); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_net_proto_msgTypes[21].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetAllP2PCollectionsRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_net_proto_msgTypes[22].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetAllP2PCollectionsReply); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_net_proto_msgTypes[23].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Document_Log); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_net_proto_msgTypes[24].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*PushLogRequest_Body); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_net_proto_msgTypes[25].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetAllReplicatorReply_Replicators); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_net_proto_msgTypes[26].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetAllReplicatorReply_Replicators_Info); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_net_proto_msgTypes[27].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetAllP2PCollectionsReply_Collection); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_net_proto_rawDesc, + NumEnums: 0, + NumMessages: 28, + NumExtensions: 0, + NumServices: 2, + }, + GoTypes: file_net_proto_goTypes, + DependencyIndexes: file_net_proto_depIdxs, + MessageInfos: file_net_proto_msgTypes, + }.Build() + File_net_proto = out.File + file_net_proto_rawDesc = nil + file_net_proto_goTypes = nil + file_net_proto_depIdxs = nil +} diff --git a/net/pb/net.proto b/net/pb/net.proto index ae69b2c47d..a4799a1d89 100644 --- a/net/pb/net.proto +++ b/net/pb/net.proto @@ -1,14 +1,14 @@ syntax = "proto3"; package net.pb; -import "github.com/gogo/protobuf/gogoproto/gogo.proto"; +option go_package = "/;net_pb"; // Log represents a thread log. message Document { // ID of the document. - bytes docKey = 1 [(gogoproto.customtype) = "ProtoDocKey"]; + bytes docKey = 1; // head of the log. - bytes head = 4 [(gogoproto.customtype) = "ProtoCid"]; + bytes head = 4; // Record is a thread record containing link data. message Log { @@ -34,15 +34,15 @@ message PushLogRequest { message Body { // docKey is the DocKey of the document that is affected by the log. - bytes docKey = 1 [(gogoproto.customtype) = "ProtoDocKey"]; + bytes docKey = 1; // cid is the CID of the composite of the document. - bytes cid = 2 [(gogoproto.customtype) = "ProtoCid"]; + bytes cid = 2; // schemaID is the SchemaID of the collection that the document resides in. bytes schemaID = 3; // creator is the PeerID of the peer that created the log. string creator = 4; // log hold the block that represent version of the document. - Document.Log log = 5; + Document.Log log = 6; } } @@ -64,4 +64,83 @@ service Service { rpc PushLog(PushLogRequest) returns (PushLogReply) {} // GetHeadLog from this peer rpc GetHeadLog(GetHeadLogRequest) returns (GetHeadLogReply) {} +} + +message SetReplicatorRequest { + repeated string collections = 1; + bytes addr = 2; +} + +message SetReplicatorReply { + bytes peerID = 1; +} + +message DeleteReplicatorRequest { + bytes peerID = 1; + repeated string collections = 2; +} + +message DeleteReplicatorReply { + bytes peerID = 1; +} + +message GetAllReplicatorRequest {} + +message GetAllReplicatorReply { + message Replicators { + message Info { + bytes id = 1; + bytes addrs = 2; + } + Info info = 1; + repeated string schemas = 2; + } + + repeated Replicators replicators = 1; + +} + +message AddP2PCollectionsRequest { + repeated string collections = 1; +} + +message AddP2PCollectionsReply { + string err = 1; +} + +message RemoveP2PCollectionsRequest { + repeated string collections = 1; +} + +message RemoveP2PCollectionsReply { + string err = 1; +} + +message GetAllP2PCollectionsRequest {} + +message GetAllP2PCollectionsReply { + message Collection { + string id = 1; + string name = 2; + } + repeated Collection collections = 1; +} + + +// Collection is the peer-to-peer network API for document sync by replication and subscription to collections +service Collection { + // SetReplicator for this peer + rpc SetReplicator(SetReplicatorRequest) returns (SetReplicatorReply) {} + + // DeleteReplicator for this peer + rpc DeleteReplicator(DeleteReplicatorRequest) returns (DeleteReplicatorReply) {} + + // DeleteReplicator for this peer + rpc GetAllReplicators(GetAllReplicatorRequest) returns (GetAllReplicatorReply) {} + + rpc AddP2PCollections(AddP2PCollectionsRequest) returns (AddP2PCollectionsReply) {} + + rpc RemoveP2PCollections(RemoveP2PCollectionsRequest) returns (RemoveP2PCollectionsReply) {} + + rpc GetAllP2PCollections(GetAllP2PCollectionsRequest) returns (GetAllP2PCollectionsReply) {} } \ No newline at end of file diff --git a/net/pb/net_grpc.pb.go b/net/pb/net_grpc.pb.go new file mode 100644 index 0000000000..e50cbec859 --- /dev/null +++ b/net/pb/net_grpc.pb.go @@ -0,0 +1,548 @@ +// Code generated by protoc-gen-go-grpc. DO NOT EDIT. +// versions: +// - protoc-gen-go-grpc v1.3.0 +// - protoc v3.21.9 +// source: net.proto + +package net_pb + +import ( + context "context" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" +) + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +// Requires gRPC-Go v1.32.0 or later. +const _ = grpc.SupportPackageIsVersion7 + +const ( + Service_GetDocGraph_FullMethodName = "/net.pb.Service/GetDocGraph" + Service_PushDocGraph_FullMethodName = "/net.pb.Service/PushDocGraph" + Service_GetLog_FullMethodName = "/net.pb.Service/GetLog" + Service_PushLog_FullMethodName = "/net.pb.Service/PushLog" + Service_GetHeadLog_FullMethodName = "/net.pb.Service/GetHeadLog" +) + +// ServiceClient is the client API for Service service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. +type ServiceClient interface { + // GetDocGraph from this peer. + GetDocGraph(ctx context.Context, in *GetDocGraphRequest, opts ...grpc.CallOption) (*GetDocGraphReply, error) + // PushDocGraph to this peer. + PushDocGraph(ctx context.Context, in *PushDocGraphRequest, opts ...grpc.CallOption) (*PushDocGraphReply, error) + // GetLog from this peer. + GetLog(ctx context.Context, in *GetLogRequest, opts ...grpc.CallOption) (*GetLogReply, error) + // PushLog to this peer. + PushLog(ctx context.Context, in *PushLogRequest, opts ...grpc.CallOption) (*PushLogReply, error) + // GetHeadLog from this peer + GetHeadLog(ctx context.Context, in *GetHeadLogRequest, opts ...grpc.CallOption) (*GetHeadLogReply, error) +} + +type serviceClient struct { + cc grpc.ClientConnInterface +} + +func NewServiceClient(cc grpc.ClientConnInterface) ServiceClient { + return &serviceClient{cc} +} + +func (c *serviceClient) GetDocGraph(ctx context.Context, in *GetDocGraphRequest, opts ...grpc.CallOption) (*GetDocGraphReply, error) { + out := new(GetDocGraphReply) + err := c.cc.Invoke(ctx, Service_GetDocGraph_FullMethodName, in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *serviceClient) PushDocGraph(ctx context.Context, in *PushDocGraphRequest, opts ...grpc.CallOption) (*PushDocGraphReply, error) { + out := new(PushDocGraphReply) + err := c.cc.Invoke(ctx, Service_PushDocGraph_FullMethodName, in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *serviceClient) GetLog(ctx context.Context, in *GetLogRequest, opts ...grpc.CallOption) (*GetLogReply, error) { + out := new(GetLogReply) + err := c.cc.Invoke(ctx, Service_GetLog_FullMethodName, in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *serviceClient) PushLog(ctx context.Context, in *PushLogRequest, opts ...grpc.CallOption) (*PushLogReply, error) { + out := new(PushLogReply) + err := c.cc.Invoke(ctx, Service_PushLog_FullMethodName, in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *serviceClient) GetHeadLog(ctx context.Context, in *GetHeadLogRequest, opts ...grpc.CallOption) (*GetHeadLogReply, error) { + out := new(GetHeadLogReply) + err := c.cc.Invoke(ctx, Service_GetHeadLog_FullMethodName, in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// ServiceServer is the server API for Service service. +// All implementations must embed UnimplementedServiceServer +// for forward compatibility +type ServiceServer interface { + // GetDocGraph from this peer. + GetDocGraph(context.Context, *GetDocGraphRequest) (*GetDocGraphReply, error) + // PushDocGraph to this peer. + PushDocGraph(context.Context, *PushDocGraphRequest) (*PushDocGraphReply, error) + // GetLog from this peer. + GetLog(context.Context, *GetLogRequest) (*GetLogReply, error) + // PushLog to this peer. + PushLog(context.Context, *PushLogRequest) (*PushLogReply, error) + // GetHeadLog from this peer + GetHeadLog(context.Context, *GetHeadLogRequest) (*GetHeadLogReply, error) + mustEmbedUnimplementedServiceServer() +} + +// UnimplementedServiceServer must be embedded to have forward compatible implementations. +type UnimplementedServiceServer struct { +} + +func (UnimplementedServiceServer) GetDocGraph(context.Context, *GetDocGraphRequest) (*GetDocGraphReply, error) { + return nil, status.Errorf(codes.Unimplemented, "method GetDocGraph not implemented") +} +func (UnimplementedServiceServer) PushDocGraph(context.Context, *PushDocGraphRequest) (*PushDocGraphReply, error) { + return nil, status.Errorf(codes.Unimplemented, "method PushDocGraph not implemented") +} +func (UnimplementedServiceServer) GetLog(context.Context, *GetLogRequest) (*GetLogReply, error) { + return nil, status.Errorf(codes.Unimplemented, "method GetLog not implemented") +} +func (UnimplementedServiceServer) PushLog(context.Context, *PushLogRequest) (*PushLogReply, error) { + return nil, status.Errorf(codes.Unimplemented, "method PushLog not implemented") +} +func (UnimplementedServiceServer) GetHeadLog(context.Context, *GetHeadLogRequest) (*GetHeadLogReply, error) { + return nil, status.Errorf(codes.Unimplemented, "method GetHeadLog not implemented") +} +func (UnimplementedServiceServer) mustEmbedUnimplementedServiceServer() {} + +// UnsafeServiceServer may be embedded to opt out of forward compatibility for this service. +// Use of this interface is not recommended, as added methods to ServiceServer will +// result in compilation errors. +type UnsafeServiceServer interface { + mustEmbedUnimplementedServiceServer() +} + +func RegisterServiceServer(s grpc.ServiceRegistrar, srv ServiceServer) { + s.RegisterService(&Service_ServiceDesc, srv) +} + +func _Service_GetDocGraph_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetDocGraphRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ServiceServer).GetDocGraph(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: Service_GetDocGraph_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ServiceServer).GetDocGraph(ctx, req.(*GetDocGraphRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Service_PushDocGraph_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(PushDocGraphRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ServiceServer).PushDocGraph(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: Service_PushDocGraph_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ServiceServer).PushDocGraph(ctx, req.(*PushDocGraphRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Service_GetLog_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetLogRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ServiceServer).GetLog(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: Service_GetLog_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ServiceServer).GetLog(ctx, req.(*GetLogRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Service_PushLog_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(PushLogRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ServiceServer).PushLog(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: Service_PushLog_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ServiceServer).PushLog(ctx, req.(*PushLogRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Service_GetHeadLog_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetHeadLogRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ServiceServer).GetHeadLog(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: Service_GetHeadLog_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ServiceServer).GetHeadLog(ctx, req.(*GetHeadLogRequest)) + } + return interceptor(ctx, in, info, handler) +} + +// Service_ServiceDesc is the grpc.ServiceDesc for Service service. +// It's only intended for direct use with grpc.RegisterService, +// and not to be introspected or modified (even as a copy) +var Service_ServiceDesc = grpc.ServiceDesc{ + ServiceName: "net.pb.Service", + HandlerType: (*ServiceServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "GetDocGraph", + Handler: _Service_GetDocGraph_Handler, + }, + { + MethodName: "PushDocGraph", + Handler: _Service_PushDocGraph_Handler, + }, + { + MethodName: "GetLog", + Handler: _Service_GetLog_Handler, + }, + { + MethodName: "PushLog", + Handler: _Service_PushLog_Handler, + }, + { + MethodName: "GetHeadLog", + Handler: _Service_GetHeadLog_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "net.proto", +} + +const ( + Collection_SetReplicator_FullMethodName = "/net.pb.Collection/SetReplicator" + Collection_DeleteReplicator_FullMethodName = "/net.pb.Collection/DeleteReplicator" + Collection_GetAllReplicators_FullMethodName = "/net.pb.Collection/GetAllReplicators" + Collection_AddP2PCollections_FullMethodName = "/net.pb.Collection/AddP2PCollections" + Collection_RemoveP2PCollections_FullMethodName = "/net.pb.Collection/RemoveP2PCollections" + Collection_GetAllP2PCollections_FullMethodName = "/net.pb.Collection/GetAllP2PCollections" +) + +// CollectionClient is the client API for Collection service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. +type CollectionClient interface { + // SetReplicator for this peer + SetReplicator(ctx context.Context, in *SetReplicatorRequest, opts ...grpc.CallOption) (*SetReplicatorReply, error) + // DeleteReplicator for this peer + DeleteReplicator(ctx context.Context, in *DeleteReplicatorRequest, opts ...grpc.CallOption) (*DeleteReplicatorReply, error) + // DeleteReplicator for this peer + GetAllReplicators(ctx context.Context, in *GetAllReplicatorRequest, opts ...grpc.CallOption) (*GetAllReplicatorReply, error) + AddP2PCollections(ctx context.Context, in *AddP2PCollectionsRequest, opts ...grpc.CallOption) (*AddP2PCollectionsReply, error) + RemoveP2PCollections(ctx context.Context, in *RemoveP2PCollectionsRequest, opts ...grpc.CallOption) (*RemoveP2PCollectionsReply, error) + GetAllP2PCollections(ctx context.Context, in *GetAllP2PCollectionsRequest, opts ...grpc.CallOption) (*GetAllP2PCollectionsReply, error) +} + +type collectionClient struct { + cc grpc.ClientConnInterface +} + +func NewCollectionClient(cc grpc.ClientConnInterface) CollectionClient { + return &collectionClient{cc} +} + +func (c *collectionClient) SetReplicator(ctx context.Context, in *SetReplicatorRequest, opts ...grpc.CallOption) (*SetReplicatorReply, error) { + out := new(SetReplicatorReply) + err := c.cc.Invoke(ctx, Collection_SetReplicator_FullMethodName, in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *collectionClient) DeleteReplicator(ctx context.Context, in *DeleteReplicatorRequest, opts ...grpc.CallOption) (*DeleteReplicatorReply, error) { + out := new(DeleteReplicatorReply) + err := c.cc.Invoke(ctx, Collection_DeleteReplicator_FullMethodName, in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *collectionClient) GetAllReplicators(ctx context.Context, in *GetAllReplicatorRequest, opts ...grpc.CallOption) (*GetAllReplicatorReply, error) { + out := new(GetAllReplicatorReply) + err := c.cc.Invoke(ctx, Collection_GetAllReplicators_FullMethodName, in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *collectionClient) AddP2PCollections(ctx context.Context, in *AddP2PCollectionsRequest, opts ...grpc.CallOption) (*AddP2PCollectionsReply, error) { + out := new(AddP2PCollectionsReply) + err := c.cc.Invoke(ctx, Collection_AddP2PCollections_FullMethodName, in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *collectionClient) RemoveP2PCollections(ctx context.Context, in *RemoveP2PCollectionsRequest, opts ...grpc.CallOption) (*RemoveP2PCollectionsReply, error) { + out := new(RemoveP2PCollectionsReply) + err := c.cc.Invoke(ctx, Collection_RemoveP2PCollections_FullMethodName, in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *collectionClient) GetAllP2PCollections(ctx context.Context, in *GetAllP2PCollectionsRequest, opts ...grpc.CallOption) (*GetAllP2PCollectionsReply, error) { + out := new(GetAllP2PCollectionsReply) + err := c.cc.Invoke(ctx, Collection_GetAllP2PCollections_FullMethodName, in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// CollectionServer is the server API for Collection service. +// All implementations must embed UnimplementedCollectionServer +// for forward compatibility +type CollectionServer interface { + // SetReplicator for this peer + SetReplicator(context.Context, *SetReplicatorRequest) (*SetReplicatorReply, error) + // DeleteReplicator for this peer + DeleteReplicator(context.Context, *DeleteReplicatorRequest) (*DeleteReplicatorReply, error) + // DeleteReplicator for this peer + GetAllReplicators(context.Context, *GetAllReplicatorRequest) (*GetAllReplicatorReply, error) + AddP2PCollections(context.Context, *AddP2PCollectionsRequest) (*AddP2PCollectionsReply, error) + RemoveP2PCollections(context.Context, *RemoveP2PCollectionsRequest) (*RemoveP2PCollectionsReply, error) + GetAllP2PCollections(context.Context, *GetAllP2PCollectionsRequest) (*GetAllP2PCollectionsReply, error) + mustEmbedUnimplementedCollectionServer() +} + +// UnimplementedCollectionServer must be embedded to have forward compatible implementations. +type UnimplementedCollectionServer struct { +} + +func (UnimplementedCollectionServer) SetReplicator(context.Context, *SetReplicatorRequest) (*SetReplicatorReply, error) { + return nil, status.Errorf(codes.Unimplemented, "method SetReplicator not implemented") +} +func (UnimplementedCollectionServer) DeleteReplicator(context.Context, *DeleteReplicatorRequest) (*DeleteReplicatorReply, error) { + return nil, status.Errorf(codes.Unimplemented, "method DeleteReplicator not implemented") +} +func (UnimplementedCollectionServer) GetAllReplicators(context.Context, *GetAllReplicatorRequest) (*GetAllReplicatorReply, error) { + return nil, status.Errorf(codes.Unimplemented, "method GetAllReplicators not implemented") +} +func (UnimplementedCollectionServer) AddP2PCollections(context.Context, *AddP2PCollectionsRequest) (*AddP2PCollectionsReply, error) { + return nil, status.Errorf(codes.Unimplemented, "method AddP2PCollections not implemented") +} +func (UnimplementedCollectionServer) RemoveP2PCollections(context.Context, *RemoveP2PCollectionsRequest) (*RemoveP2PCollectionsReply, error) { + return nil, status.Errorf(codes.Unimplemented, "method RemoveP2PCollections not implemented") +} +func (UnimplementedCollectionServer) GetAllP2PCollections(context.Context, *GetAllP2PCollectionsRequest) (*GetAllP2PCollectionsReply, error) { + return nil, status.Errorf(codes.Unimplemented, "method GetAllP2PCollections not implemented") +} +func (UnimplementedCollectionServer) mustEmbedUnimplementedCollectionServer() {} + +// UnsafeCollectionServer may be embedded to opt out of forward compatibility for this service. +// Use of this interface is not recommended, as added methods to CollectionServer will +// result in compilation errors. +type UnsafeCollectionServer interface { + mustEmbedUnimplementedCollectionServer() +} + +func RegisterCollectionServer(s grpc.ServiceRegistrar, srv CollectionServer) { + s.RegisterService(&Collection_ServiceDesc, srv) +} + +func _Collection_SetReplicator_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(SetReplicatorRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(CollectionServer).SetReplicator(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: Collection_SetReplicator_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(CollectionServer).SetReplicator(ctx, req.(*SetReplicatorRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Collection_DeleteReplicator_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(DeleteReplicatorRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(CollectionServer).DeleteReplicator(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: Collection_DeleteReplicator_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(CollectionServer).DeleteReplicator(ctx, req.(*DeleteReplicatorRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Collection_GetAllReplicators_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetAllReplicatorRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(CollectionServer).GetAllReplicators(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: Collection_GetAllReplicators_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(CollectionServer).GetAllReplicators(ctx, req.(*GetAllReplicatorRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Collection_AddP2PCollections_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(AddP2PCollectionsRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(CollectionServer).AddP2PCollections(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: Collection_AddP2PCollections_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(CollectionServer).AddP2PCollections(ctx, req.(*AddP2PCollectionsRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Collection_RemoveP2PCollections_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(RemoveP2PCollectionsRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(CollectionServer).RemoveP2PCollections(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: Collection_RemoveP2PCollections_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(CollectionServer).RemoveP2PCollections(ctx, req.(*RemoveP2PCollectionsRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Collection_GetAllP2PCollections_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetAllP2PCollectionsRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(CollectionServer).GetAllP2PCollections(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: Collection_GetAllP2PCollections_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(CollectionServer).GetAllP2PCollections(ctx, req.(*GetAllP2PCollectionsRequest)) + } + return interceptor(ctx, in, info, handler) +} + +// Collection_ServiceDesc is the grpc.ServiceDesc for Collection service. +// It's only intended for direct use with grpc.RegisterService, +// and not to be introspected or modified (even as a copy) +var Collection_ServiceDesc = grpc.ServiceDesc{ + ServiceName: "net.pb.Collection", + HandlerType: (*CollectionServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "SetReplicator", + Handler: _Collection_SetReplicator_Handler, + }, + { + MethodName: "DeleteReplicator", + Handler: _Collection_DeleteReplicator_Handler, + }, + { + MethodName: "GetAllReplicators", + Handler: _Collection_GetAllReplicators_Handler, + }, + { + MethodName: "AddP2PCollections", + Handler: _Collection_AddP2PCollections_Handler, + }, + { + MethodName: "RemoveP2PCollections", + Handler: _Collection_RemoveP2PCollections_Handler, + }, + { + MethodName: "GetAllP2PCollections", + Handler: _Collection_GetAllP2PCollections_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "net.proto", +} diff --git a/net/pb/net_vtproto.pb.go b/net/pb/net_vtproto.pb.go new file mode 100644 index 0000000000..9ac8b5c379 --- /dev/null +++ b/net/pb/net_vtproto.pb.go @@ -0,0 +1,3998 @@ +// Code generated by protoc-gen-go-vtproto. DO NOT EDIT. +// protoc-gen-go-vtproto version: v0.4.0 +// source: net.proto + +package net_pb + +import ( + fmt "fmt" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + io "io" + bits "math/bits" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +func (m *Document_Log) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Document_Log) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *Document_Log) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.Block) > 0 { + i -= len(m.Block) + copy(dAtA[i:], m.Block) + i = encodeVarint(dAtA, i, uint64(len(m.Block))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *Document) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Document) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *Document) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.Head) > 0 { + i -= len(m.Head) + copy(dAtA[i:], m.Head) + i = encodeVarint(dAtA, i, uint64(len(m.Head))) + i-- + dAtA[i] = 0x22 + } + if len(m.DocKey) > 0 { + i -= len(m.DocKey) + copy(dAtA[i:], m.DocKey) + i = encodeVarint(dAtA, i, uint64(len(m.DocKey))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *GetDocGraphRequest) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GetDocGraphRequest) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *GetDocGraphRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + return len(dAtA) - i, nil +} + +func (m *GetDocGraphReply) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GetDocGraphReply) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *GetDocGraphReply) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + return len(dAtA) - i, nil +} + +func (m *PushDocGraphRequest) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PushDocGraphRequest) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *PushDocGraphRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + return len(dAtA) - i, nil +} + +func (m *PushDocGraphReply) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PushDocGraphReply) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *PushDocGraphReply) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + return len(dAtA) - i, nil +} + +func (m *GetLogRequest) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GetLogRequest) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *GetLogRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + return len(dAtA) - i, nil +} + +func (m *GetLogReply) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GetLogReply) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *GetLogReply) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + return len(dAtA) - i, nil +} + +func (m *PushLogRequest_Body) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PushLogRequest_Body) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *PushLogRequest_Body) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.Log != nil { + size, err := m.Log.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x32 + } + if len(m.Creator) > 0 { + i -= len(m.Creator) + copy(dAtA[i:], m.Creator) + i = encodeVarint(dAtA, i, uint64(len(m.Creator))) + i-- + dAtA[i] = 0x22 + } + if len(m.SchemaID) > 0 { + i -= len(m.SchemaID) + copy(dAtA[i:], m.SchemaID) + i = encodeVarint(dAtA, i, uint64(len(m.SchemaID))) + i-- + dAtA[i] = 0x1a + } + if len(m.Cid) > 0 { + i -= len(m.Cid) + copy(dAtA[i:], m.Cid) + i = encodeVarint(dAtA, i, uint64(len(m.Cid))) + i-- + dAtA[i] = 0x12 + } + if len(m.DocKey) > 0 { + i -= len(m.DocKey) + copy(dAtA[i:], m.DocKey) + i = encodeVarint(dAtA, i, uint64(len(m.DocKey))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *PushLogRequest) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PushLogRequest) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *PushLogRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.Body != nil { + size, err := m.Body.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *GetHeadLogRequest) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GetHeadLogRequest) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *GetHeadLogRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + return len(dAtA) - i, nil +} + +func (m *PushLogReply) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PushLogReply) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *PushLogReply) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + return len(dAtA) - i, nil +} + +func (m *GetHeadLogReply) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GetHeadLogReply) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *GetHeadLogReply) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + return len(dAtA) - i, nil +} + +func (m *SetReplicatorRequest) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SetReplicatorRequest) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *SetReplicatorRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.Addr) > 0 { + i -= len(m.Addr) + copy(dAtA[i:], m.Addr) + i = encodeVarint(dAtA, i, uint64(len(m.Addr))) + i-- + dAtA[i] = 0x12 + } + if len(m.Collections) > 0 { + for iNdEx := len(m.Collections) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Collections[iNdEx]) + copy(dAtA[i:], m.Collections[iNdEx]) + i = encodeVarint(dAtA, i, uint64(len(m.Collections[iNdEx]))) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *SetReplicatorReply) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SetReplicatorReply) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *SetReplicatorReply) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.PeerID) > 0 { + i -= len(m.PeerID) + copy(dAtA[i:], m.PeerID) + i = encodeVarint(dAtA, i, uint64(len(m.PeerID))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *DeleteReplicatorRequest) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DeleteReplicatorRequest) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *DeleteReplicatorRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.Collections) > 0 { + for iNdEx := len(m.Collections) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Collections[iNdEx]) + copy(dAtA[i:], m.Collections[iNdEx]) + i = encodeVarint(dAtA, i, uint64(len(m.Collections[iNdEx]))) + i-- + dAtA[i] = 0x12 + } + } + if len(m.PeerID) > 0 { + i -= len(m.PeerID) + copy(dAtA[i:], m.PeerID) + i = encodeVarint(dAtA, i, uint64(len(m.PeerID))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *DeleteReplicatorReply) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DeleteReplicatorReply) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *DeleteReplicatorReply) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.PeerID) > 0 { + i -= len(m.PeerID) + copy(dAtA[i:], m.PeerID) + i = encodeVarint(dAtA, i, uint64(len(m.PeerID))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *GetAllReplicatorRequest) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GetAllReplicatorRequest) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *GetAllReplicatorRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + return len(dAtA) - i, nil +} + +func (m *GetAllReplicatorReply_Replicators_Info) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GetAllReplicatorReply_Replicators_Info) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *GetAllReplicatorReply_Replicators_Info) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.Addrs) > 0 { + i -= len(m.Addrs) + copy(dAtA[i:], m.Addrs) + i = encodeVarint(dAtA, i, uint64(len(m.Addrs))) + i-- + dAtA[i] = 0x12 + } + if len(m.Id) > 0 { + i -= len(m.Id) + copy(dAtA[i:], m.Id) + i = encodeVarint(dAtA, i, uint64(len(m.Id))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *GetAllReplicatorReply_Replicators) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GetAllReplicatorReply_Replicators) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *GetAllReplicatorReply_Replicators) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.Schemas) > 0 { + for iNdEx := len(m.Schemas) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Schemas[iNdEx]) + copy(dAtA[i:], m.Schemas[iNdEx]) + i = encodeVarint(dAtA, i, uint64(len(m.Schemas[iNdEx]))) + i-- + dAtA[i] = 0x12 + } + } + if m.Info != nil { + size, err := m.Info.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *GetAllReplicatorReply) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GetAllReplicatorReply) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *GetAllReplicatorReply) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.Replicators) > 0 { + for iNdEx := len(m.Replicators) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.Replicators[iNdEx].MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *AddP2PCollectionsRequest) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *AddP2PCollectionsRequest) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *AddP2PCollectionsRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.Collections) > 0 { + for iNdEx := len(m.Collections) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Collections[iNdEx]) + copy(dAtA[i:], m.Collections[iNdEx]) + i = encodeVarint(dAtA, i, uint64(len(m.Collections[iNdEx]))) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *AddP2PCollectionsReply) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *AddP2PCollectionsReply) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *AddP2PCollectionsReply) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.Err) > 0 { + i -= len(m.Err) + copy(dAtA[i:], m.Err) + i = encodeVarint(dAtA, i, uint64(len(m.Err))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *RemoveP2PCollectionsRequest) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RemoveP2PCollectionsRequest) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *RemoveP2PCollectionsRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.Collections) > 0 { + for iNdEx := len(m.Collections) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Collections[iNdEx]) + copy(dAtA[i:], m.Collections[iNdEx]) + i = encodeVarint(dAtA, i, uint64(len(m.Collections[iNdEx]))) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *RemoveP2PCollectionsReply) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RemoveP2PCollectionsReply) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *RemoveP2PCollectionsReply) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.Err) > 0 { + i -= len(m.Err) + copy(dAtA[i:], m.Err) + i = encodeVarint(dAtA, i, uint64(len(m.Err))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *GetAllP2PCollectionsRequest) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GetAllP2PCollectionsRequest) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *GetAllP2PCollectionsRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + return len(dAtA) - i, nil +} + +func (m *GetAllP2PCollectionsReply_Collection) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GetAllP2PCollectionsReply_Collection) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *GetAllP2PCollectionsReply_Collection) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarint(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0x12 + } + if len(m.Id) > 0 { + i -= len(m.Id) + copy(dAtA[i:], m.Id) + i = encodeVarint(dAtA, i, uint64(len(m.Id))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *GetAllP2PCollectionsReply) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GetAllP2PCollectionsReply) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *GetAllP2PCollectionsReply) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.Collections) > 0 { + for iNdEx := len(m.Collections) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.Collections[iNdEx].MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func encodeVarint(dAtA []byte, offset int, v uint64) int { + offset -= sov(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *Document_Log) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Block) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *Document) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.DocKey) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + l = len(m.Head) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *GetDocGraphRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += len(m.unknownFields) + return n +} + +func (m *GetDocGraphReply) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += len(m.unknownFields) + return n +} + +func (m *PushDocGraphRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += len(m.unknownFields) + return n +} + +func (m *PushDocGraphReply) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += len(m.unknownFields) + return n +} + +func (m *GetLogRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += len(m.unknownFields) + return n +} + +func (m *GetLogReply) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += len(m.unknownFields) + return n +} + +func (m *PushLogRequest_Body) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.DocKey) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + l = len(m.Cid) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + l = len(m.SchemaID) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + l = len(m.Creator) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + if m.Log != nil { + l = m.Log.SizeVT() + n += 1 + l + sov(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *PushLogRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Body != nil { + l = m.Body.SizeVT() + n += 1 + l + sov(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *GetHeadLogRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += len(m.unknownFields) + return n +} + +func (m *PushLogReply) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += len(m.unknownFields) + return n +} + +func (m *GetHeadLogReply) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += len(m.unknownFields) + return n +} + +func (m *SetReplicatorRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Collections) > 0 { + for _, s := range m.Collections { + l = len(s) + n += 1 + l + sov(uint64(l)) + } + } + l = len(m.Addr) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *SetReplicatorReply) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.PeerID) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *DeleteReplicatorRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.PeerID) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + if len(m.Collections) > 0 { + for _, s := range m.Collections { + l = len(s) + n += 1 + l + sov(uint64(l)) + } + } + n += len(m.unknownFields) + return n +} + +func (m *DeleteReplicatorReply) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.PeerID) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *GetAllReplicatorRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += len(m.unknownFields) + return n +} + +func (m *GetAllReplicatorReply_Replicators_Info) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Id) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + l = len(m.Addrs) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *GetAllReplicatorReply_Replicators) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Info != nil { + l = m.Info.SizeVT() + n += 1 + l + sov(uint64(l)) + } + if len(m.Schemas) > 0 { + for _, s := range m.Schemas { + l = len(s) + n += 1 + l + sov(uint64(l)) + } + } + n += len(m.unknownFields) + return n +} + +func (m *GetAllReplicatorReply) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Replicators) > 0 { + for _, e := range m.Replicators { + l = e.SizeVT() + n += 1 + l + sov(uint64(l)) + } + } + n += len(m.unknownFields) + return n +} + +func (m *AddP2PCollectionsRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Collections) > 0 { + for _, s := range m.Collections { + l = len(s) + n += 1 + l + sov(uint64(l)) + } + } + n += len(m.unknownFields) + return n +} + +func (m *AddP2PCollectionsReply) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Err) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *RemoveP2PCollectionsRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Collections) > 0 { + for _, s := range m.Collections { + l = len(s) + n += 1 + l + sov(uint64(l)) + } + } + n += len(m.unknownFields) + return n +} + +func (m *RemoveP2PCollectionsReply) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Err) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *GetAllP2PCollectionsRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += len(m.unknownFields) + return n +} + +func (m *GetAllP2PCollectionsReply_Collection) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Id) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + l = len(m.Name) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *GetAllP2PCollectionsReply) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Collections) > 0 { + for _, e := range m.Collections { + l = e.SizeVT() + n += 1 + l + sov(uint64(l)) + } + } + n += len(m.unknownFields) + return n +} + +func sov(x uint64) (n int) { + return (bits.Len64(x|1) + 6) / 7 +} +func soz(x uint64) (n int) { + return sov(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *Document_Log) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Document_Log: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Document_Log: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Block", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Block = append(m.Block[:0], dAtA[iNdEx:postIndex]...) + if m.Block == nil { + m.Block = []byte{} + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Document) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Document: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Document: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DocKey", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.DocKey = append(m.DocKey[:0], dAtA[iNdEx:postIndex]...) + if m.DocKey == nil { + m.DocKey = []byte{} + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Head", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Head = append(m.Head[:0], dAtA[iNdEx:postIndex]...) + if m.Head == nil { + m.Head = []byte{} + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *GetDocGraphRequest) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GetDocGraphRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GetDocGraphRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *GetDocGraphReply) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GetDocGraphReply: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GetDocGraphReply: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PushDocGraphRequest) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PushDocGraphRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PushDocGraphRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PushDocGraphReply) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PushDocGraphReply: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PushDocGraphReply: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *GetLogRequest) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GetLogRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GetLogRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *GetLogReply) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GetLogReply: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GetLogReply: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PushLogRequest_Body) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PushLogRequest_Body: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PushLogRequest_Body: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DocKey", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.DocKey = append(m.DocKey[:0], dAtA[iNdEx:postIndex]...) + if m.DocKey == nil { + m.DocKey = []byte{} + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Cid", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Cid = append(m.Cid[:0], dAtA[iNdEx:postIndex]...) + if m.Cid == nil { + m.Cid = []byte{} + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SchemaID", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.SchemaID = append(m.SchemaID[:0], dAtA[iNdEx:postIndex]...) + if m.SchemaID == nil { + m.SchemaID = []byte{} + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Creator", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Creator = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Log", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Log == nil { + m.Log = &Document_Log{} + } + if err := m.Log.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PushLogRequest) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PushLogRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PushLogRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Body", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Body == nil { + m.Body = &PushLogRequest_Body{} + } + if err := m.Body.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *GetHeadLogRequest) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GetHeadLogRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GetHeadLogRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PushLogReply) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PushLogReply: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PushLogReply: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *GetHeadLogReply) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GetHeadLogReply: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GetHeadLogReply: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SetReplicatorRequest) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SetReplicatorRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SetReplicatorRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Collections", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Collections = append(m.Collections, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Addr", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Addr = append(m.Addr[:0], dAtA[iNdEx:postIndex]...) + if m.Addr == nil { + m.Addr = []byte{} + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SetReplicatorReply) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SetReplicatorReply: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SetReplicatorReply: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PeerID", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.PeerID = append(m.PeerID[:0], dAtA[iNdEx:postIndex]...) + if m.PeerID == nil { + m.PeerID = []byte{} + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DeleteReplicatorRequest) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DeleteReplicatorRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DeleteReplicatorRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PeerID", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.PeerID = append(m.PeerID[:0], dAtA[iNdEx:postIndex]...) + if m.PeerID == nil { + m.PeerID = []byte{} + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Collections", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Collections = append(m.Collections, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DeleteReplicatorReply) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DeleteReplicatorReply: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DeleteReplicatorReply: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PeerID", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.PeerID = append(m.PeerID[:0], dAtA[iNdEx:postIndex]...) + if m.PeerID == nil { + m.PeerID = []byte{} + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *GetAllReplicatorRequest) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GetAllReplicatorRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GetAllReplicatorRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *GetAllReplicatorReply_Replicators_Info) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GetAllReplicatorReply_Replicators_Info: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GetAllReplicatorReply_Replicators_Info: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Id", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Id = append(m.Id[:0], dAtA[iNdEx:postIndex]...) + if m.Id == nil { + m.Id = []byte{} + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Addrs", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Addrs = append(m.Addrs[:0], dAtA[iNdEx:postIndex]...) + if m.Addrs == nil { + m.Addrs = []byte{} + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *GetAllReplicatorReply_Replicators) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GetAllReplicatorReply_Replicators: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GetAllReplicatorReply_Replicators: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Info", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Info == nil { + m.Info = &GetAllReplicatorReply_Replicators_Info{} + } + if err := m.Info.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Schemas", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Schemas = append(m.Schemas, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *GetAllReplicatorReply) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GetAllReplicatorReply: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GetAllReplicatorReply: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Replicators", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Replicators = append(m.Replicators, &GetAllReplicatorReply_Replicators{}) + if err := m.Replicators[len(m.Replicators)-1].UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *AddP2PCollectionsRequest) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AddP2PCollectionsRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AddP2PCollectionsRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Collections", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Collections = append(m.Collections, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *AddP2PCollectionsReply) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AddP2PCollectionsReply: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AddP2PCollectionsReply: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Err", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Err = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RemoveP2PCollectionsRequest) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RemoveP2PCollectionsRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RemoveP2PCollectionsRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Collections", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Collections = append(m.Collections, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RemoveP2PCollectionsReply) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RemoveP2PCollectionsReply: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RemoveP2PCollectionsReply: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Err", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Err = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *GetAllP2PCollectionsRequest) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GetAllP2PCollectionsRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GetAllP2PCollectionsRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *GetAllP2PCollectionsReply_Collection) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GetAllP2PCollectionsReply_Collection: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GetAllP2PCollectionsReply_Collection: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Id", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Id = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *GetAllP2PCollectionsReply) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GetAllP2PCollectionsReply: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GetAllP2PCollectionsReply: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Collections", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Collections = append(m.Collections, &GetAllP2PCollectionsReply_Collection{}) + if err := m.Collections[len(m.Collections)-1].UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} + +func skip(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflow + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflow + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflow + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLength + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroup + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLength + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLength = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflow = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroup = fmt.Errorf("proto: unexpected end of group") +) diff --git a/net/peer.go b/net/peer.go index bb3e11c420..26a24a38ae 100644 --- a/net/peer.go +++ b/net/peer.go @@ -34,6 +34,8 @@ import ( "github.com/libp2p/go-libp2p/core/routing" ma "github.com/multiformats/go-multiaddr" "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" "github.com/sourcenetwork/defradb/client" "github.com/sourcenetwork/defradb/core" @@ -63,7 +65,7 @@ type Peer struct { ps *pubsub.PubSub server *server - p2pRPC *grpc.Server // rpc server over the p2p network + p2pRPC *grpc.Server // rpc server over the P2P network // Used to close the dagWorker pool for a given document. // The string represents a dockey. @@ -84,6 +86,8 @@ type Peer struct { ctx context.Context cancel context.CancelFunc + + pb.UnimplementedCollectionServer } // NewPeer creates a new instance of the DefraDB server as a peer-to-peer node. @@ -98,7 +102,7 @@ func NewPeer( dialOptions []grpc.DialOption, ) (*Peer, error) { if db == nil { - return nil, errors.New("database object can't be empty") + return nil, ErrNilDB } ctx, cancel := context.WithCancel(ctx) @@ -167,7 +171,7 @@ func (p *Peer) Start() error { if p.ps != nil { if !p.db.Events().Updates.HasValue() { - return errors.New("tried to subscribe to update channel, but update channel is nil") + return ErrNilUpdateChannel } updateChannel, err := p.db.Events().Updates.Value().Subscribe() @@ -180,7 +184,7 @@ func (p *Peer) Start() error { go p.handleBroadcastLoop() } - // register the p2p gRPC server + // register the P2P gRPC server go func() { pb.RegisterServiceServer(p.p2pRPC, p.server) if err := p.p2pRPC.Serve(p2plistener); err != nil && @@ -294,8 +298,8 @@ func (p *Peer) RegisterNewDocument( // publish log body := &pb.PushLogRequest_Body{ - DocKey: &pb.ProtoDocKey{DocKey: dockey}, - Cid: &pb.ProtoCid{Cid: c}, + DocKey: []byte(dockey.String()), + Cid: c.Bytes(), SchemaID: []byte(schemaID), Creator: p.host.ID().String(), Log: &pb.Document_Log{ @@ -309,25 +313,36 @@ func (p *Peer) RegisterNewDocument( return p.server.publishLog(p.ctx, schemaID, req) } +func marshalPeerID(id peer.ID) []byte { + b, _ := id.Marshal() // This will never return an error + return b +} + // SetReplicator adds a target peer node as a replication destination for documents in our DB. func (p *Peer) SetReplicator( ctx context.Context, - paddr ma.Multiaddr, - collectionNames ...string, -) (peer.ID, error) { + req *pb.SetReplicatorRequest, +) (*pb.SetReplicatorReply, error) { + addr, err := ma.NewMultiaddrBytes(req.Addr) + if err != nil { + return nil, status.Error(codes.InvalidArgument, err.Error()) + } + txn, err := p.db.NewTxn(ctx, true) if err != nil { - return "", err + return nil, err } store := p.db.WithTxn(txn) - pid, err := p.setReplicator(ctx, store, paddr, collectionNames...) + pid, err := p.setReplicator(ctx, store, addr, req.Collections...) if err != nil { txn.Discard(ctx) - return "", err + return nil, err } - return pid, txn.Commit(ctx) + return &pb.SetReplicatorReply{ + PeerID: marshalPeerID(pid), + }, txn.Commit(ctx) } // setReplicator adds a target peer node as a replication destination for documents in our DB. @@ -511,28 +526,30 @@ func (p *Peer) pushToReplicator( } } -// DeleteReplicator adds a target peer node as a replication destination for documents in our DB. +// DeleteReplicator removes a peer node from the replicators. func (p *Peer) DeleteReplicator( ctx context.Context, - pid peer.ID, - collectionNames ...string, -) error { + req *pb.DeleteReplicatorRequest, +) (*pb.DeleteReplicatorReply, error) { + log.Debug(ctx, "Received DeleteReplicator request") + txn, err := p.db.NewTxn(ctx, true) if err != nil { - return err + return nil, err } store := p.db.WithTxn(txn) - err = p.deleteReplicator(ctx, store, pid, collectionNames...) + err = p.deleteReplicator(ctx, store, peer.ID(req.PeerID), req.Collections...) if err != nil { txn.Discard(ctx) - return err + return nil, err } - return txn.Commit(ctx) + return &pb.DeleteReplicatorReply{ + PeerID: req.PeerID, + }, txn.Commit(ctx) } -// DeleteReplicator adds a target peer node as a replication destination for documents in our DB. func (p *Peer) deleteReplicator( ctx context.Context, store client.Store, @@ -541,7 +558,7 @@ func (p *Peer) deleteReplicator( ) error { // make sure it's not ourselves if pid == p.host.ID() { - return errors.New("can't target ourselves as a replicator") + return ErrSelfTargetForReplicator } // verify collections @@ -595,9 +612,32 @@ func (p *Peer) deleteReplicator( }) } -// GetAllReplicators adds a target peer node as a replication destination for documents in our DB. -func (p *Peer) GetAllReplicators(ctx context.Context) ([]client.Replicator, error) { - return p.db.GetAllReplicators(ctx) +// GetAllReplicators returns all replicators and the schemas that are replicated to them. +func (p *Peer) GetAllReplicators( + ctx context.Context, + req *pb.GetAllReplicatorRequest, +) (*pb.GetAllReplicatorReply, error) { + log.Debug(ctx, "Received GetAllReplicators request") + + reps, err := p.db.GetAllReplicators(ctx) + if err != nil { + return nil, err + } + + pbReps := []*pb.GetAllReplicatorReply_Replicators{} + for _, rep := range reps { + pbReps = append(pbReps, &pb.GetAllReplicatorReply_Replicators{ + Info: &pb.GetAllReplicatorReply_Replicators_Info{ + Id: []byte(rep.Info.ID), + Addrs: rep.Info.Addrs[0].Bytes(), + }, + Schemas: rep.Schemas, + }) + } + + return &pb.GetAllReplicatorReply{ + Replicators: pbReps, + }, nil } func (p *Peer) loadReplicators(ctx context.Context) error { @@ -651,7 +691,7 @@ func (p *Peer) loadP2PCollections(ctx context.Context) (map[string]struct{}, err func (p *Peer) handleDocCreateLog(evt events.Update) error { dockey, err := client.NewDocKeyFromString(evt.DocKey) if err != nil { - return errors.Wrap("failed to get DocKey from broadcast message", err) + return NewErrFailedToGetDockey(err) } // We need to register the document before pushing to the replicators if we want to @@ -669,7 +709,7 @@ func (p *Peer) handleDocCreateLog(evt events.Update) error { func (p *Peer) handleDocUpdateLog(evt events.Update) error { dockey, err := client.NewDocKeyFromString(evt.DocKey) if err != nil { - return errors.Wrap("failed to get DocKey from broadcast message", err) + return NewErrFailedToGetDockey(err) } log.Debug( p.ctx, @@ -679,8 +719,8 @@ func (p *Peer) handleDocUpdateLog(evt events.Update) error { logging.NewKV("SchemaId", evt.SchemaID)) body := &pb.PushLogRequest_Body{ - DocKey: &pb.ProtoDocKey{DocKey: dockey}, - Cid: &pb.ProtoCid{Cid: evt.Cid}, + DocKey: []byte(dockey.String()), + Cid: evt.Cid.Bytes(), SchemaID: []byte(evt.SchemaID), Creator: p.host.ID().String(), Log: &pb.Document_Log{ @@ -695,11 +735,11 @@ func (p *Peer) handleDocUpdateLog(evt events.Update) error { p.pushLogToReplicators(p.ctx, evt) if err := p.server.publishLog(p.ctx, evt.DocKey, req); err != nil { - return errors.Wrap(fmt.Sprintf("can't publish log %s for dockey %s", evt.Cid, evt.DocKey), err) + return NewErrPublishingToDockeyTopic(err, evt.Cid.String(), evt.DocKey) } if err := p.server.publishLog(p.ctx, evt.SchemaID, req); err != nil { - return errors.Wrap(fmt.Sprintf("can't publish log %s for schemaID %s", evt.Cid, evt.SchemaID), err) + return NewErrPublishingToSchemaTopic(err, evt.Cid.String(), evt.SchemaID) } return nil @@ -816,39 +856,44 @@ func (p *Peer) rollbackRemovePubSubTopics(topics []string, cause error) error { // changes to the server may still be applied. // // WARNING: Calling this on collections with a large number of documents may take a long time to process. -func (p *Peer) AddP2PCollections(collections []string) error { +func (p *Peer) AddP2PCollections( + ctx context.Context, + req *pb.AddP2PCollectionsRequest, +) (*pb.AddP2PCollectionsReply, error) { + log.Debug(ctx, "Received AddP2PCollections request") + txn, err := p.db.NewTxn(p.ctx, false) if err != nil { - return err + return nil, err } defer txn.Discard(p.ctx) store := p.db.WithTxn(txn) // first let's make sure the collections actually exists storeCollections := []client.Collection{} - for _, col := range collections { + for _, col := range req.Collections { storeCol, err := store.GetCollectionBySchemaID(p.ctx, col) if err != nil { - return err + return nil, err } storeCollections = append(storeCollections, storeCol) } // Ensure we can add all the collections to the store on the transaction // before adding to topics. - for _, col := range collections { + for _, col := range req.Collections { err := store.AddP2PCollection(p.ctx, col) if err != nil { - return err + return nil, err } } // Add pubsub topics and remove them if we get an error. addedTopics := []string{} - for _, col := range collections { + for _, col := range req.Collections { err = p.server.addPubSubTopic(col, true) if err != nil { - return p.rollbackAddPubSubTopics(addedTopics, err) + return nil, p.rollbackAddPubSubTopics(addedTopics, err) } addedTopics = append(addedTopics, col) } @@ -859,12 +904,12 @@ func (p *Peer) AddP2PCollections(collections []string) error { for _, col := range storeCollections { keyChan, err := col.GetAllDocKeys(p.ctx) if err != nil { - return err + return nil, err } for key := range keyChan { err := p.server.removePubSubTopic(key.Key.String()) if err != nil { - return p.rollbackRemovePubSubTopics(removedTopics, err) + return nil, p.rollbackRemovePubSubTopics(removedTopics, err) } removedTopics = append(removedTopics, key.Key.String()) } @@ -872,10 +917,10 @@ func (p *Peer) AddP2PCollections(collections []string) error { if err = txn.Commit(p.ctx); err != nil { err = p.rollbackRemovePubSubTopics(removedTopics, err) - return p.rollbackAddPubSubTopics(addedTopics, err) + return nil, p.rollbackAddPubSubTopics(addedTopics, err) } - return nil + return &pb.AddP2PCollectionsReply{}, nil } // RemoveP2PCollections removes the given collectionIDs from the pubsup topics. @@ -884,39 +929,44 @@ func (p *Peer) AddP2PCollections(collections []string) error { // changes to the server may still be applied. // // WARNING: Calling this on collections with a large number of documents may take a long time to process. -func (p *Peer) RemoveP2PCollections(collections []string) error { +func (p *Peer) RemoveP2PCollections( + ctx context.Context, + req *pb.RemoveP2PCollectionsRequest, +) (*pb.RemoveP2PCollectionsReply, error) { + log.Debug(ctx, "Received RemoveP2PCollections request") + txn, err := p.db.NewTxn(p.ctx, false) if err != nil { - return err + return nil, err } defer txn.Discard(p.ctx) store := p.db.WithTxn(txn) // first let's make sure the collections actually exists storeCollections := []client.Collection{} - for _, col := range collections { + for _, col := range req.Collections { storeCol, err := store.GetCollectionBySchemaID(p.ctx, col) if err != nil { - return err + return nil, err } storeCollections = append(storeCollections, storeCol) } // Ensure we can remove all the collections to the store on the transaction // before adding to topics. - for _, col := range collections { + for _, col := range req.Collections { err := store.RemoveP2PCollection(p.ctx, col) if err != nil { - return err + return nil, err } } // Remove pubsub topics and add them back if we get an error. removedTopics := []string{} - for _, col := range collections { + for _, col := range req.Collections { err = p.server.removePubSubTopic(col) if err != nil { - return p.rollbackRemovePubSubTopics(removedTopics, err) + return nil, p.rollbackRemovePubSubTopics(removedTopics, err) } removedTopics = append(removedTopics, col) } @@ -927,12 +977,12 @@ func (p *Peer) RemoveP2PCollections(collections []string) error { for _, col := range storeCollections { keyChan, err := col.GetAllDocKeys(p.ctx) if err != nil { - return err + return nil, err } for key := range keyChan { err := p.server.addPubSubTopic(key.Key.String(), true) if err != nil { - return p.rollbackAddPubSubTopics(addedTopics, err) + return nil, p.rollbackAddPubSubTopics(addedTopics, err) } addedTopics = append(addedTopics, key.Key.String()) } @@ -940,38 +990,45 @@ func (p *Peer) RemoveP2PCollections(collections []string) error { if err = txn.Commit(p.ctx); err != nil { err = p.rollbackAddPubSubTopics(addedTopics, err) - return p.rollbackRemovePubSubTopics(removedTopics, err) + return nil, p.rollbackRemovePubSubTopics(removedTopics, err) } - return nil + return &pb.RemoveP2PCollectionsReply{}, nil } -// GetAllP2PCollections gets all the collectionIDs that have been added to the -// pubsub topics from the system store. -func (p *Peer) GetAllP2PCollections() ([]client.P2PCollection, error) { +// GetAllP2PCollections gets all the collectionIDs from the pubsup topics +func (p *Peer) GetAllP2PCollections( + ctx context.Context, + req *pb.GetAllP2PCollectionsRequest, +) (*pb.GetAllP2PCollectionsReply, error) { + log.Debug(ctx, "Received GetAllP2PCollections request") + txn, err := p.db.NewTxn(p.ctx, false) if err != nil { return nil, err } - defer txn.Discard(p.ctx) store := p.db.WithTxn(txn) collections, err := p.db.GetAllP2PCollections(p.ctx) if err != nil { + txn.Discard(p.ctx) return nil, err } - p2pCols := []client.P2PCollection{} + pbCols := []*pb.GetAllP2PCollectionsReply_Collection{} for _, colID := range collections { col, err := store.GetCollectionBySchemaID(p.ctx, colID) if err != nil { + txn.Discard(p.ctx) return nil, err } - p2pCols = append(p2pCols, client.P2PCollection{ - ID: colID, + pbCols = append(pbCols, &pb.GetAllP2PCollectionsReply_Collection{ + Id: colID, Name: col.Name(), }) } - return p2pCols, txn.Commit(p.ctx) + return &pb.GetAllP2PCollectionsReply{ + Collections: pbCols, + }, txn.Commit(p.ctx) } diff --git a/net/peer_test.go b/net/peer_test.go new file mode 100644 index 0000000000..092e908cd2 --- /dev/null +++ b/net/peer_test.go @@ -0,0 +1,1182 @@ +// Copyright 2023 Democratized Data Foundation +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package net + +import ( + "bytes" + "context" + "testing" + "time" + + "github.com/ipfs/go-cid" + ds "github.com/ipfs/go-datastore" + ipld "github.com/ipfs/go-ipld-format" + libp2p "github.com/libp2p/go-libp2p" + pubsub "github.com/libp2p/go-libp2p-pubsub" + "github.com/libp2p/go-libp2p/core/peer" + ma "github.com/multiformats/go-multiaddr" + mh "github.com/multiformats/go-multihash" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + rpc "github.com/textileio/go-libp2p-pubsub-rpc" + + "github.com/sourcenetwork/defradb/client" + "github.com/sourcenetwork/defradb/config" + "github.com/sourcenetwork/defradb/core/crdt" + "github.com/sourcenetwork/defradb/datastore/memory" + "github.com/sourcenetwork/defradb/db" + "github.com/sourcenetwork/defradb/errors" + "github.com/sourcenetwork/defradb/events" + "github.com/sourcenetwork/defradb/logging" + pb "github.com/sourcenetwork/defradb/net/pb" + netutils "github.com/sourcenetwork/defradb/net/utils" +) + +type EmptyNode struct{} + +var ErrEmptyNode error = errors.New("dummy node") + +func (n *EmptyNode) Resolve([]string) (any, []string, error) { + return nil, nil, ErrEmptyNode +} + +func (n *EmptyNode) Tree(string, int) []string { + return nil +} + +func (n *EmptyNode) ResolveLink([]string) (*ipld.Link, []string, error) { + return nil, nil, ErrEmptyNode +} + +func (n *EmptyNode) Copy() ipld.Node { + return &EmptyNode{} +} + +func (n *EmptyNode) Cid() cid.Cid { + id, err := cid.V1Builder{ + Codec: cid.DagProtobuf, + MhType: mh.SHA2_256, + MhLength: 0, // default length + }.Sum(nil) + + if err != nil { + panic("failed to create an empty cid!") + } + return id +} + +func (n *EmptyNode) Links() []*ipld.Link { + return nil +} + +func (n *EmptyNode) Loggable() map[string]any { + return nil +} + +func (n *EmptyNode) String() string { + return "[]" +} + +func (n *EmptyNode) RawData() []byte { + return nil +} + +func (n *EmptyNode) Size() (uint64, error) { + return 0, nil +} + +func (n *EmptyNode) Stat() (*ipld.NodeStat, error) { + return &ipld.NodeStat{}, nil +} + +func createCID(doc *client.Document) (cid.Cid, error) { + pref := cid.V1Builder{ + Codec: cid.DagProtobuf, + MhType: mh.SHA2_256, + MhLength: 0, // default length + } + + buf, err := doc.Bytes() + if err != nil { + return cid.Cid{}, err + } + + // And then feed it some data + c, err := pref.Sum(buf) + if err != nil { + return cid.Cid{}, err + } + return c, nil +} + +const randomMultiaddr = "/ip4/0.0.0.0/tcp/0" + +func newTestNode(ctx context.Context, t *testing.T) (client.DB, *Node) { + store := memory.NewDatastore(ctx) + db, err := db.NewDB(ctx, store, db.WithUpdateEvents()) + require.NoError(t, err) + + cfg := config.DefaultConfig() + cfg.Net.P2PAddress = randomMultiaddr + cfg.Net.RPCAddress = "0.0.0.0:0" + cfg.Net.TCPAddress = randomMultiaddr + + n, err := NewNode( + ctx, + db, + WithConfig(cfg), + WithDataPath(t.TempDir()), + ) + require.NoError(t, err) + + return db, n +} + +func TestNewPeer_NoError(t *testing.T) { + ctx := context.Background() + store := memory.NewDatastore(ctx) + db, err := db.NewDB(ctx, store, db.WithUpdateEvents()) + require.NoError(t, err) + + h, err := libp2p.New() + require.NoError(t, err) + + _, err = NewPeer(ctx, db, h, nil, nil, nil, nil, nil) + require.NoError(t, err) +} + +func TestNewPeer_NoDB_NilDBError(t *testing.T) { + ctx := context.Background() + + h, err := libp2p.New() + require.NoError(t, err) + + _, err = NewPeer(ctx, nil, h, nil, nil, nil, nil, nil) + require.ErrorIs(t, err, ErrNilDB) +} + +func TestNewPeer_WithExistingTopic_TopicAlreadyExistsError(t *testing.T) { + ctx := context.Background() + store := memory.NewDatastore(ctx) + db, err := db.NewDB(ctx, store, db.WithUpdateEvents()) + require.NoError(t, err) + + _, err = db.AddSchema(ctx, `type User { + name: String + age: Int + }`) + require.NoError(t, err) + + col, err := db.GetCollectionByName(ctx, "User") + require.NoError(t, err) + + doc, err := client.NewDocFromJSON([]byte(`{"name": "John", "age": 30}`)) + require.NoError(t, err) + + err = col.Create(ctx, doc) + require.NoError(t, err) + + h, err := libp2p.New() + require.NoError(t, err) + + ps, err := pubsub.NewGossipSub( + ctx, + h, + pubsub.WithPeerExchange(true), + pubsub.WithFloodPublish(true), + ) + require.NoError(t, err) + + _, err = rpc.NewTopic(ctx, ps, h.ID(), doc.Key().String(), true) + require.NoError(t, err) + + _, err = NewPeer(ctx, db, h, nil, ps, nil, nil, nil) + require.ErrorContains(t, err, "topic already exists") +} + +func TestStartAndClose_NoError(t *testing.T) { + ctx := context.Background() + db, n := newTestNode(ctx, t) + + err := n.Start() + require.NoError(t, err) + + db.Close(ctx) +} + +func TestStart_WithKnownPeer_NoError(t *testing.T) { + ctx := context.Background() + store := memory.NewDatastore(ctx) + db1, err := db.NewDB(ctx, store, db.WithUpdateEvents()) + require.NoError(t, err) + + store2 := memory.NewDatastore(ctx) + db2, err := db.NewDB(ctx, store2, db.WithUpdateEvents()) + require.NoError(t, err) + + n1, err := NewNode( + ctx, + db1, + WithListenP2PAddrStrings("/ip4/0.0.0.0/tcp/0"), + // WithDataPath() is a required option with the current implementation of key management + WithDataPath(t.TempDir()), + ) + require.NoError(t, err) + n2, err := NewNode( + ctx, + db2, + WithListenP2PAddrStrings("/ip4/0.0.0.0/tcp/0"), + // WithDataPath() is a required option with the current implementation of key management + WithDataPath(t.TempDir()), + ) + require.NoError(t, err) + + addrs, err := netutils.ParsePeers([]string{n1.host.Addrs()[0].String() + "/p2p/" + n1.PeerID().String()}) + if err != nil { + t.Fatal(err) + } + n2.Boostrap(addrs) + + err = n2.Start() + require.NoError(t, err) + + db1.Close(ctx) + db2.Close(ctx) +} + +func TestStart_WithOfflineKnownPeer_NoError(t *testing.T) { + ctx := context.Background() + store := memory.NewDatastore(ctx) + db1, err := db.NewDB(ctx, store, db.WithUpdateEvents()) + require.NoError(t, err) + + store2 := memory.NewDatastore(ctx) + db2, err := db.NewDB(ctx, store2, db.WithUpdateEvents()) + require.NoError(t, err) + + n1, err := NewNode( + ctx, + db1, + WithListenP2PAddrStrings("/ip4/0.0.0.0/tcp/0"), + // WithDataPath() is a required option with the current implementation of key management + WithDataPath(t.TempDir()), + ) + require.NoError(t, err) + n2, err := NewNode( + ctx, + db2, + WithListenP2PAddrStrings("/ip4/0.0.0.0/tcp/0"), + // WithDataPath() is a required option with the current implementation of key management + WithDataPath(t.TempDir()), + ) + require.NoError(t, err) + + addrs, err := netutils.ParsePeers([]string{n1.host.Addrs()[0].String() + "/p2p/" + n1.PeerID().String()}) + if err != nil { + t.Fatal(err) + } + n2.Boostrap(addrs) + + b := &bytes.Buffer{} + + log.ApplyConfig(logging.Config{ + Pipe: b, + }) + + err = n1.Close() + require.NoError(t, err) + + // give time for n1 to close + time.Sleep(100 * time.Millisecond) + + err = n2.Start() + require.NoError(t, err) + + logLines, err := parseLines(b) + if err != nil { + t.Fatal(err) + } + + if len(logLines) != 1 { + t.Fatalf("expecting exactly 1 log line but got %d lines", len(logLines)) + } + assert.Equal(t, "Failure while reconnecting to a known peer", logLines[0]["msg"]) + + // reset logger + log = logging.MustNewLogger("defra.net") + + db1.Close(ctx) + db2.Close(ctx) +} + +func TestStart_WithNoUpdateChannel_NilUpdateChannelError(t *testing.T) { + ctx := context.Background() + store := memory.NewDatastore(ctx) + db, err := db.NewDB(ctx, store) + require.NoError(t, err) + + n, err := NewNode( + ctx, + db, + WithPubSub(true), + // WithDataPath() is a required option with the current implementation of key management + WithDataPath(t.TempDir()), + ) + require.NoError(t, err) + + err = n.Start() + require.ErrorIs(t, err, ErrNilUpdateChannel) + + db.Close(ctx) +} + +func TestStart_WitClosedUpdateChannel_ClosedChannelError(t *testing.T) { + ctx := context.Background() + store := memory.NewDatastore(ctx) + db, err := db.NewDB(ctx, store, db.WithUpdateEvents()) + require.NoError(t, err) + + n, err := NewNode( + ctx, + db, + WithPubSub(true), + // WithDataPath() is a required option with the current implementation of key management + WithDataPath(t.TempDir()), + ) + require.NoError(t, err) + + db.Events().Updates.Value().Close() + + err = n.Start() + require.ErrorContains(t, err, "cannot subscribe to a closed channel") + + db.Close(ctx) +} + +func TestRegisterNewDocument_NoError(t *testing.T) { + ctx := context.Background() + db, n := newTestNode(ctx, t) + + _, err := db.AddSchema(ctx, `type User { + name: String + age: Int + }`) + require.NoError(t, err) + + col, err := db.GetCollectionByName(ctx, "User") + require.NoError(t, err) + + doc, err := client.NewDocFromJSON([]byte(`{"name": "John", "age": 30}`)) + require.NoError(t, err) + + cid, err := createCID(doc) + require.NoError(t, err) + + err = n.RegisterNewDocument(ctx, doc.Key(), cid, &EmptyNode{}, col.SchemaID()) + require.NoError(t, err) +} + +func TestRegisterNewDocument_RPCTopicAlreadyRegisteredError(t *testing.T) { + ctx := context.Background() + db, n := newTestNode(ctx, t) + + _, err := db.AddSchema(ctx, `type User { + name: String + age: Int + }`) + require.NoError(t, err) + + col, err := db.GetCollectionByName(ctx, "User") + require.NoError(t, err) + + doc, err := client.NewDocFromJSON([]byte(`{"name": "John", "age": 30}`)) + require.NoError(t, err) + + _, err = rpc.NewTopic(ctx, n.Peer.ps, n.Peer.host.ID(), doc.Key().String(), true) + require.NoError(t, err) + + cid, err := createCID(doc) + require.NoError(t, err) + + err = n.RegisterNewDocument(ctx, doc.Key(), cid, &EmptyNode{}, col.SchemaID()) + require.Equal(t, err.Error(), "creating topic: joining topic: topic already exists") +} + +func TestSetReplicator_NoError(t *testing.T) { + ctx := context.Background() + db, n := newTestNode(ctx, t) + + _, err := db.AddSchema(ctx, `type User { + name: String + age: Int + }`) + require.NoError(t, err) + + addr, err := ma.NewMultiaddr("/ip4/0.0.0.0/tcp/0/p2p/QmYyQSo1c1Ym7orWxLYvCrM2EmxFTANf8wXmmE7DWjhx5N") + require.NoError(t, err) + + _, err = n.Peer.SetReplicator( + ctx, + &pb.SetReplicatorRequest{ + Addr: addr.Bytes(), + Collections: []string{"User"}, + }, + ) + require.NoError(t, err) +} + +func TestSetReplicator_WithInvalidAddress_InvalidArgumentError(t *testing.T) { + ctx := context.Background() + db, n := newTestNode(ctx, t) + + _, err := db.AddSchema(ctx, `type User { + name: String + age: Int + }`) + require.NoError(t, err) + + _, err = n.Peer.SetReplicator( + ctx, + &pb.SetReplicatorRequest{ + Addr: []byte("/some/invalid/address"), + Collections: []string{"User"}, + }, + ) + require.ErrorContains(t, err, "InvalidArgument") +} + +func TestSetReplicator_WithDBClosed_DatastoreClosedError(t *testing.T) { + ctx := context.Background() + db, n := newTestNode(ctx, t) + + db.Close(ctx) + + addr, err := ma.NewMultiaddr("/ip4/0.0.0.0/tcp/0/p2p/QmYyQSo1c1Ym7orWxLYvCrM2EmxFTANf8wXmmE7DWjhx5N") + require.NoError(t, err) + + _, err = n.Peer.SetReplicator( + ctx, + &pb.SetReplicatorRequest{ + Addr: addr.Bytes(), + Collections: []string{"User"}, + }, + ) + require.ErrorContains(t, err, "datastore closed") +} + +func TestSetReplicator_WithUndefinedCollection_KeyNotFoundError(t *testing.T) { + ctx := context.Background() + _, n := newTestNode(ctx, t) + + addr, err := ma.NewMultiaddr("/ip4/0.0.0.0/tcp/0/p2p/QmYyQSo1c1Ym7orWxLYvCrM2EmxFTANf8wXmmE7DWjhx5N") + require.NoError(t, err) + + _, err = n.Peer.SetReplicator( + ctx, + &pb.SetReplicatorRequest{ + Addr: addr.Bytes(), + Collections: []string{"User"}, + }, + ) + require.ErrorContains(t, err, "failed to get collection for replicator: datastore: key not found") +} + +func TestSetReplicator_ForAllCollections_NoError(t *testing.T) { + ctx := context.Background() + db, n := newTestNode(ctx, t) + + _, err := db.AddSchema(ctx, `type User { + name: String + age: Int + }`) + require.NoError(t, err) + + addr, err := ma.NewMultiaddr("/ip4/0.0.0.0/tcp/0/p2p/QmYyQSo1c1Ym7orWxLYvCrM2EmxFTANf8wXmmE7DWjhx5N") + require.NoError(t, err) + + _, err = n.Peer.SetReplicator( + ctx, + &pb.SetReplicatorRequest{ + Addr: addr.Bytes(), + }, + ) + require.NoError(t, err) +} + +func TestPushToReplicator_SingleDocumentNoPeer_FailedToReplicateLogError(t *testing.T) { + ctx := context.Background() + db, n := newTestNode(ctx, t) + _, err := db.AddSchema(ctx, `type User { + name: String + age: Int + }`) + require.NoError(t, err) + + col, err := db.GetCollectionByName(ctx, "User") + require.NoError(t, err) + + doc, err := client.NewDocFromJSON([]byte(`{"name": "John", "age": 30}`)) + require.NoError(t, err) + + err = col.Create(ctx, doc) + require.NoError(t, err) + + keysCh, err := col.GetAllDocKeys(ctx) + require.NoError(t, err) + + txn, err := db.NewTxn(ctx, true) + require.NoError(t, err) + + b := &bytes.Buffer{} + + log.ApplyConfig(logging.Config{ + Pipe: b, + }) + + n.pushToReplicator(ctx, txn, col, keysCh, n.PeerID()) + + logLines, err := parseLines(b) + if err != nil { + t.Fatal(err) + } + + if len(logLines) != 1 { + t.Fatalf("expecting exactly 1 log line but got %d lines", len(logLines)) + } + assert.Equal(t, "Failed to replicate log", logLines[0]["msg"]) + + // reset logger + log = logging.MustNewLogger("defra.net") +} + +func TestDeleteReplicator_WithDBClosed_DataStoreClosedError(t *testing.T) { + ctx := context.Background() + db, n := newTestNode(ctx, t) + + db.Close(ctx) + + _, err := n.Peer.DeleteReplicator( + ctx, + &pb.DeleteReplicatorRequest{ + PeerID: []byte(n.PeerID()), + Collections: []string{"User"}, + }, + ) + require.ErrorContains(t, err, "datastore closed") +} + +func TestDeleteReplicator_WithTargetSelf_SelfTargetForReplicatorError(t *testing.T) { + ctx := context.Background() + _, n := newTestNode(ctx, t) + + _, err := n.Peer.DeleteReplicator( + ctx, + &pb.DeleteReplicatorRequest{ + PeerID: []byte(n.PeerID()), + Collections: []string{"User"}, + }, + ) + require.ErrorIs(t, err, ErrSelfTargetForReplicator) +} + +func TestDeleteReplicator_WithInvalidCollection_KeyNotFoundError(t *testing.T) { + ctx := context.Background() + _, n := newTestNode(ctx, t) + + _, n2 := newTestNode(ctx, t) + + _, err := n.Peer.DeleteReplicator( + ctx, + &pb.DeleteReplicatorRequest{ + PeerID: []byte(n2.PeerID()), + Collections: []string{"User"}, + }, + ) + require.ErrorContains(t, err, "failed to get collection for replicator: datastore: key not found") +} + +func TestDeleteReplicator_WithCollectionAndPreviouslySetReplicator_NoError(t *testing.T) { + ctx := context.Background() + db, n := newTestNode(ctx, t) + + _, err := db.AddSchema(ctx, `type User { + name: String + age: Int + }`) + require.NoError(t, err) + + _, n2 := newTestNode(ctx, t) + + addr, err := ma.NewMultiaddr(n2.host.Addrs()[0].String() + "/p2p/" + n2.PeerID().String()) + require.NoError(t, err) + + _, err = n.Peer.SetReplicator( + ctx, + &pb.SetReplicatorRequest{ + Addr: addr.Bytes(), + }, + ) + require.NoError(t, err) + + _, err = n.Peer.DeleteReplicator( + ctx, + &pb.DeleteReplicatorRequest{ + PeerID: []byte(n2.PeerID()), + }, + ) + require.NoError(t, err) +} + +func TestDeleteReplicator_WithNoCollection_NoError(t *testing.T) { + ctx := context.Background() + _, n := newTestNode(ctx, t) + + _, n2 := newTestNode(ctx, t) + + _, err := n.Peer.DeleteReplicator( + ctx, + &pb.DeleteReplicatorRequest{ + PeerID: []byte(n2.PeerID()), + }, + ) + require.NoError(t, err) +} + +func TestDeleteReplicator_WithNotSetReplicator_KeyNotFoundError(t *testing.T) { + ctx := context.Background() + db, n := newTestNode(ctx, t) + + _, err := db.AddSchema(ctx, `type User { + name: String + age: Int + }`) + require.NoError(t, err) + + _, n2 := newTestNode(ctx, t) + + _, err = n.Peer.DeleteReplicator( + ctx, + &pb.DeleteReplicatorRequest{ + PeerID: []byte(n2.PeerID()), + Collections: []string{"User"}, + }, + ) + require.ErrorContains(t, err, "datastore: key not found") +} + +func TestGetAllReplicator_WithReplicator_NoError(t *testing.T) { + ctx := context.Background() + db, n := newTestNode(ctx, t) + + _, err := db.AddSchema(ctx, `type User { + name: String + age: Int + }`) + require.NoError(t, err) + + _, n2 := newTestNode(ctx, t) + + addr, err := ma.NewMultiaddr(n2.host.Addrs()[0].String() + "/p2p/" + n2.PeerID().String()) + require.NoError(t, err) + + _, err = n.Peer.SetReplicator( + ctx, + &pb.SetReplicatorRequest{ + Addr: addr.Bytes(), + }, + ) + require.NoError(t, err) + + reps, err := n.Peer.GetAllReplicators( + ctx, + &pb.GetAllReplicatorRequest{}, + ) + require.NoError(t, err) + + info, err := peer.AddrInfoFromP2pAddr(addr) + require.NoError(t, err) + + id, err := info.ID.MarshalBinary() + require.NoError(t, err) + + require.Equal(t, id, reps.Replicators[0].Info.Id) +} + +func TestGetAllReplicator_WithDBClosed_DatastoreClosedError(t *testing.T) { + ctx := context.Background() + db, n := newTestNode(ctx, t) + + db.Close(ctx) + + _, err := n.Peer.GetAllReplicators( + ctx, + &pb.GetAllReplicatorRequest{}, + ) + require.ErrorContains(t, err, "datastore closed") +} + +func TestLoadReplicators_WithDBClosed_DatastoreClosedError(t *testing.T) { + ctx := context.Background() + db, n := newTestNode(ctx, t) + + db.Close(ctx) + + err := n.Peer.loadReplicators(ctx) + require.ErrorContains(t, err, "datastore closed") +} + +func TestLoadReplicator_WithReplicator_NoError(t *testing.T) { + ctx := context.Background() + db, n := newTestNode(ctx, t) + + _, err := db.AddSchema(ctx, `type User { + name: String + age: Int + }`) + require.NoError(t, err) + + _, n2 := newTestNode(ctx, t) + + addr, err := ma.NewMultiaddr(n2.host.Addrs()[0].String() + "/p2p/" + n2.PeerID().String()) + require.NoError(t, err) + + _, err = n.Peer.SetReplicator( + ctx, + &pb.SetReplicatorRequest{ + Addr: addr.Bytes(), + }, + ) + require.NoError(t, err) + + err = n.Peer.loadReplicators(ctx) + require.NoError(t, err) +} + +func TestLoadReplicator_WithReplicatorAndEmptyReplicatorMap_NoError(t *testing.T) { + ctx := context.Background() + db, n := newTestNode(ctx, t) + + _, err := db.AddSchema(ctx, `type User { + name: String + age: Int + }`) + require.NoError(t, err) + + _, n2 := newTestNode(ctx, t) + + addr, err := ma.NewMultiaddr(n2.host.Addrs()[0].String() + "/p2p/" + n2.PeerID().String()) + require.NoError(t, err) + + _, err = n.Peer.SetReplicator( + ctx, + &pb.SetReplicatorRequest{ + Addr: addr.Bytes(), + }, + ) + require.NoError(t, err) + + n.replicators = make(map[string]map[peer.ID]struct{}) + + err = n.Peer.loadReplicators(ctx) + require.NoError(t, err) +} + +func TestAddP2PCollections_WithInvalidCollectionID_NotFoundError(t *testing.T) { + ctx := context.Background() + _, n := newTestNode(ctx, t) + + _, err := n.Peer.AddP2PCollections( + ctx, + &pb.AddP2PCollectionsRequest{ + Collections: []string{"invalid_collection"}, + }, + ) + require.Error(t, err, ds.ErrNotFound) +} + +func TestAddP2PCollections_NoError(t *testing.T) { + ctx := context.Background() + db, n := newTestNode(ctx, t) + + _, err := db.AddSchema(ctx, `type User { + name: String + age: Int + }`) + require.NoError(t, err) + + col, err := db.GetCollectionByName(ctx, "User") + require.NoError(t, err) + + _, err = n.Peer.AddP2PCollections( + ctx, + &pb.AddP2PCollectionsRequest{ + Collections: []string{col.SchemaID()}, + }, + ) + require.NoError(t, err) +} + +func TestRemoveP2PCollectionsWithInvalidCollectionID(t *testing.T) { + ctx := context.Background() + _, n := newTestNode(ctx, t) + + _, err := n.Peer.RemoveP2PCollections( + ctx, + &pb.RemoveP2PCollectionsRequest{ + Collections: []string{"invalid_collection"}, + }, + ) + require.Error(t, err, ds.ErrNotFound) +} + +func TestRemoveP2PCollections(t *testing.T) { + ctx := context.Background() + db, n := newTestNode(ctx, t) + + _, err := db.AddSchema(ctx, `type User { + name: String + age: Int + }`) + require.NoError(t, err) + + col, err := db.GetCollectionByName(ctx, "User") + require.NoError(t, err) + + _, err = n.Peer.RemoveP2PCollections( + ctx, + &pb.RemoveP2PCollectionsRequest{ + Collections: []string{col.SchemaID()}, + }, + ) + require.NoError(t, err) +} + +func TestGetAllP2PCollectionsWithNoCollections(t *testing.T) { + ctx := context.Background() + _, n := newTestNode(ctx, t) + + cols, err := n.Peer.GetAllP2PCollections( + ctx, + &pb.GetAllP2PCollectionsRequest{}, + ) + require.NoError(t, err) + require.Len(t, cols.Collections, 0) +} + +func TestGetAllP2PCollections(t *testing.T) { + ctx := context.Background() + db, n := newTestNode(ctx, t) + + _, err := db.AddSchema(ctx, `type User { + name: String + age: Int + }`) + require.NoError(t, err) + + col, err := db.GetCollectionByName(ctx, "User") + require.NoError(t, err) + + _, err = n.Peer.AddP2PCollections( + ctx, + &pb.AddP2PCollectionsRequest{ + Collections: []string{col.SchemaID()}, + }, + ) + require.NoError(t, err) + + cols, err := n.Peer.GetAllP2PCollections( + ctx, + &pb.GetAllP2PCollectionsRequest{}, + ) + require.NoError(t, err) + require.Equal(t, &pb.GetAllP2PCollectionsReply{ + Collections: []*pb.GetAllP2PCollectionsReply_Collection{{ + Id: col.SchemaID(), + Name: col.Name(), + }}, + }, cols) +} + +func TestHandleDocCreateLog_NoError(t *testing.T) { + ctx := context.Background() + db, n := newTestNode(ctx, t) + + _, err := db.AddSchema(ctx, `type User { + name: String + age: Int + }`) + require.NoError(t, err) + + col, err := db.GetCollectionByName(ctx, "User") + require.NoError(t, err) + + doc, err := client.NewDocFromJSON([]byte(`{"name": "John", "age": 30}`)) + require.NoError(t, err) + + err = col.Create(ctx, doc) + require.NoError(t, err) + + docCid, err := createCID(doc) + require.NoError(t, err) + + delta := &crdt.CompositeDAGDelta{ + SchemaVersionID: col.Schema().VersionID, + Priority: 1, + DocKey: doc.Key().Bytes(), + } + + node, err := makeNode(delta, []cid.Cid{docCid}) + require.NoError(t, err) + + err = n.handleDocCreateLog(events.Update{ + DocKey: doc.Key().String(), + Cid: docCid, + SchemaID: col.SchemaID(), + Block: node, + Priority: 0, + }) + require.NoError(t, err) +} + +func TestHandleDocCreateLog_WithInvalidDockey_NoError(t *testing.T) { + ctx := context.Background() + _, n := newTestNode(ctx, t) + + err := n.handleDocCreateLog(events.Update{ + DocKey: "some-invalid-key", + }) + require.ErrorContains(t, err, "failed to get DocKey from broadcast message: selected encoding not supported") +} + +func TestHandleDocCreateLog_WithExistingTopic_TopicExistsError(t *testing.T) { + ctx := context.Background() + db, n := newTestNode(ctx, t) + + _, err := db.AddSchema(ctx, `type User { + name: String + age: Int + }`) + require.NoError(t, err) + + col, err := db.GetCollectionByName(ctx, "User") + require.NoError(t, err) + + doc, err := client.NewDocFromJSON([]byte(`{"name": "John", "age": 30}`)) + require.NoError(t, err) + + err = col.Create(ctx, doc) + require.NoError(t, err) + + _, err = rpc.NewTopic(ctx, n.ps, n.host.ID(), doc.Key().String(), true) + require.NoError(t, err) + + err = n.handleDocCreateLog(events.Update{ + DocKey: doc.Key().String(), + SchemaID: col.SchemaID(), + }) + require.ErrorContains(t, err, "topic already exists") +} + +func TestHandleDocUpdateLog_NoError(t *testing.T) { + ctx := context.Background() + db, n := newTestNode(ctx, t) + + _, err := db.AddSchema(ctx, `type User { + name: String + age: Int + }`) + require.NoError(t, err) + + col, err := db.GetCollectionByName(ctx, "User") + require.NoError(t, err) + + doc, err := client.NewDocFromJSON([]byte(`{"name": "John", "age": 30}`)) + require.NoError(t, err) + + err = col.Create(ctx, doc) + require.NoError(t, err) + + docCid, err := createCID(doc) + require.NoError(t, err) + + delta := &crdt.CompositeDAGDelta{ + SchemaVersionID: col.Schema().VersionID, + Priority: 1, + DocKey: doc.Key().Bytes(), + } + + node, err := makeNode(delta, []cid.Cid{docCid}) + require.NoError(t, err) + + err = n.handleDocUpdateLog(events.Update{ + DocKey: doc.Key().String(), + Cid: docCid, + SchemaID: col.SchemaID(), + Block: node, + Priority: 0, + }) + require.NoError(t, err) +} + +func TestHandleDoUpdateLog_WithInvalidDockey_NoError(t *testing.T) { + ctx := context.Background() + _, n := newTestNode(ctx, t) + + err := n.handleDocUpdateLog(events.Update{ + DocKey: "some-invalid-key", + }) + require.ErrorContains(t, err, "failed to get DocKey from broadcast message: selected encoding not supported") +} + +func TestHandleDocUpdateLog_WithExistingDockeyTopic_TopicExistsError(t *testing.T) { + ctx := context.Background() + db, n := newTestNode(ctx, t) + + _, err := db.AddSchema(ctx, `type User { + name: String + age: Int + }`) + require.NoError(t, err) + + col, err := db.GetCollectionByName(ctx, "User") + require.NoError(t, err) + + doc, err := client.NewDocFromJSON([]byte(`{"name": "John", "age": 30}`)) + require.NoError(t, err) + + err = col.Create(ctx, doc) + require.NoError(t, err) + + docCid, err := createCID(doc) + require.NoError(t, err) + + delta := &crdt.CompositeDAGDelta{ + SchemaVersionID: col.Schema().VersionID, + Priority: 1, + DocKey: doc.Key().Bytes(), + } + + node, err := makeNode(delta, []cid.Cid{docCid}) + require.NoError(t, err) + + _, err = rpc.NewTopic(ctx, n.ps, n.host.ID(), doc.Key().String(), true) + require.NoError(t, err) + + err = n.handleDocUpdateLog(events.Update{ + DocKey: doc.Key().String(), + Cid: docCid, + SchemaID: col.SchemaID(), + Block: node, + }) + require.ErrorContains(t, err, "topic already exists") +} + +func TestHandleDocUpdateLog_WithExistingSchemaTopic_TopicExistsError(t *testing.T) { + ctx := context.Background() + db, n := newTestNode(ctx, t) + + _, err := db.AddSchema(ctx, `type User { + name: String + age: Int + }`) + require.NoError(t, err) + + col, err := db.GetCollectionByName(ctx, "User") + require.NoError(t, err) + + doc, err := client.NewDocFromJSON([]byte(`{"name": "John", "age": 30}`)) + require.NoError(t, err) + + err = col.Create(ctx, doc) + require.NoError(t, err) + + docCid, err := createCID(doc) + require.NoError(t, err) + + delta := &crdt.CompositeDAGDelta{ + SchemaVersionID: col.Schema().VersionID, + Priority: 1, + DocKey: doc.Key().Bytes(), + } + + node, err := makeNode(delta, []cid.Cid{docCid}) + require.NoError(t, err) + + _, err = rpc.NewTopic(ctx, n.ps, n.host.ID(), col.SchemaID(), true) + require.NoError(t, err) + + err = n.handleDocUpdateLog(events.Update{ + DocKey: doc.Key().String(), + Cid: docCid, + SchemaID: col.SchemaID(), + Block: node, + }) + require.ErrorContains(t, err, "topic already exists") +} + +func TestPushLogToReplicator_WithReplicator_FailedPushingLogError(t *testing.T) { + ctx := context.Background() + db, n := newTestNode(ctx, t) + + _, err := db.AddSchema(ctx, `type User { + name: String + age: Int + }`) + require.NoError(t, err) + + _, n2 := newTestNode(ctx, t) + + addr, err := ma.NewMultiaddr(n2.host.Addrs()[0].String() + "/p2p/" + n2.PeerID().String()) + require.NoError(t, err) + + _, err = n.Peer.SetReplicator( + ctx, + &pb.SetReplicatorRequest{ + Addr: addr.Bytes(), + }, + ) + require.NoError(t, err) + + col, err := db.GetCollectionByName(ctx, "User") + require.NoError(t, err) + + doc, err := client.NewDocFromJSON([]byte(`{"name": "John", "age": 30}`)) + require.NoError(t, err) + + err = col.Create(ctx, doc) + require.NoError(t, err) + + docCid, err := createCID(doc) + require.NoError(t, err) + + delta := &crdt.CompositeDAGDelta{ + SchemaVersionID: col.Schema().VersionID, + Priority: 1, + DocKey: doc.Key().Bytes(), + } + + node, err := makeNode(delta, []cid.Cid{docCid}) + require.NoError(t, err) + + n.pushLogToReplicators(ctx, events.Update{ + DocKey: doc.Key().String(), + Cid: docCid, + SchemaID: col.SchemaID(), + Block: node, + }) +} + +func TestSession_NoError(t *testing.T) { + ctx := context.Background() + _, n := newTestNode(ctx, t) + ng := n.Session(ctx) + require.Implements(t, (*ipld.NodeGetter)(nil), ng) +} diff --git a/net/process.go b/net/process.go index fb40eed08f..35d735d6e5 100644 --- a/net/process.go +++ b/net/process.go @@ -17,6 +17,7 @@ import ( "fmt" "sync" + dag "github.com/ipfs/boxo/ipld/merkledag" blocks "github.com/ipfs/go-block-format" "github.com/ipfs/go-cid" ipld "github.com/ipfs/go-ipld-format" @@ -38,7 +39,7 @@ func (p *Peer) processLog( ctx context.Context, txn datastore.Txn, col client.Collection, - dockey core.DataStoreKey, + dsKey core.DataStoreKey, c cid.Cid, field string, nd ipld.Node, @@ -47,7 +48,7 @@ func (p *Peer) processLog( ) ([]cid.Cid, error) { log.Debug(ctx, "Running processLog") - crdt, err := initCRDTForType(ctx, txn, col, dockey, field) + crdt, err := initCRDTForType(ctx, txn, col, dsKey, field) if err != nil { return nil, err } @@ -60,7 +61,7 @@ func (p *Peer) processLog( log.Debug( ctx, "Processing PushLog request", - logging.NewKV("DocKey", dockey), + logging.NewKV("Datastore key", dsKey), logging.NewKV("CID", c), ) @@ -86,7 +87,7 @@ func initCRDTForType( ctx context.Context, txn datastore.MultiStore, col client.Collection, - docKey core.DataStoreKey, + dsKey core.DataStoreKey, field string, ) (crdt.MerkleCRDT, error) { var key core.DataStoreKey @@ -97,18 +98,18 @@ func initCRDTForType( key = base.MakeCollectionKey( description, ).WithInstanceInfo( - docKey, + dsKey, ).WithFieldId( core.COMPOSITE_NAMESPACE, ) } else { - fd, ok := description.GetField(field) + fd, ok := description.Schema.GetField(field) if !ok { - return nil, errors.New(fmt.Sprintf("Couldn't find field %s for doc %s", field, docKey)) + return nil, errors.New(fmt.Sprintf("Couldn't find field %s for doc %s", field, dsKey)) } ctype = fd.Typ fieldID := fd.ID.String() - key = base.MakeCollectionKey(description).WithInstanceInfo(docKey).WithFieldId(fieldID) + key = base.MakeCollectionKey(description).WithInstanceInfo(dsKey).WithFieldId(fieldID) } log.Debug(ctx, "Got CRDT Type", logging.NewKV("CType", ctype), logging.NewKV("Field", field)) return crdt.DefaultFactory.InstanceWithStores( @@ -126,7 +127,7 @@ func decodeBlockBuffer(buf []byte, cid cid.Cid) (ipld.Node, error) { if err != nil { return nil, errors.Wrap("failed to create block", err) } - return ipld.Decode(blk) + return ipld.Decode(blk, dag.DecodeProtobufBlock) } func (p *Peer) createNodeGetter( @@ -143,7 +144,7 @@ func (p *Peer) handleChildBlocks( session *sync.WaitGroup, txn datastore.Txn, col client.Collection, - dockey core.DataStoreKey, + dsKey core.DataStoreKey, field string, nd ipld.Node, children []cid.Cid, @@ -187,14 +188,14 @@ func (p *Peer) handleChildBlocks( ctx, "Submitting new job to DAG queue", logging.NewKV("Collection", col.Name()), - logging.NewKV("DocKey", dockey), + logging.NewKV("Datastore key", dsKey), logging.NewKV("Field", fieldName), logging.NewKV("CID", cNode.Cid())) session.Add(1) job := &dagJob{ collection: col, - dockey: dockey, + dsKey: dsKey, fieldName: fieldName, session: session, nodeGetter: getter, diff --git a/net/server.go b/net/server.go index e04f6eb940..ad1fd2fb29 100644 --- a/net/server.go +++ b/net/server.go @@ -17,7 +17,7 @@ import ( "fmt" "sync" - "github.com/gogo/protobuf/proto" + "github.com/ipfs/go-cid" format "github.com/ipfs/go-ipld-format" "github.com/libp2p/go-libp2p/core/event" libpeer "github.com/libp2p/go-libp2p/core/peer" @@ -25,6 +25,7 @@ import ( "google.golang.org/grpc" "google.golang.org/grpc/credentials/insecure" grpcpeer "google.golang.org/grpc/peer" + "google.golang.org/protobuf/proto" "github.com/sourcenetwork/defradb/client" "github.com/sourcenetwork/defradb/core" @@ -56,6 +57,8 @@ type server struct { // This is used to prevent multiple concurrent processing of the same document and // limit unecessary transaction conflicts. docQueue *docQueue + + pb.UnimplementedServiceServer } // pubsubTopic is a wrapper of rpc.Topic to be able to track if the topic has @@ -198,12 +201,18 @@ func (s *server) PushLog(ctx context.Context, req *pb.PushLogRequest) (*pb.PushL } log.Debug(ctx, "Received a PushLog request", logging.NewKV("PeerID", pid)) - // parse request object - cid := req.Body.Cid.Cid + cid, err := cid.Cast(req.Body.Cid) + if err != nil { + return nil, err + } + dockey, err := client.NewDocKeyFromString(string(req.Body.DocKey)) + if err != nil { + return nil, err + } - s.docQueue.add(req.Body.DocKey.String()) + s.docQueue.add(dockey.String()) defer func() { - s.docQueue.done(req.Body.DocKey.String()) + s.docQueue.done(dockey.String()) if s.pushLogEmitter != nil { byPeer, err := libpeer.Decode(req.Body.Creator) if err != nil { @@ -238,7 +247,7 @@ func (s *server) PushLog(ctx context.Context, req *pb.PushLogRequest) (*pb.PushL } schemaID := string(req.Body.SchemaID) - docKey := core.DataStoreKeyFromDocKey(req.Body.DocKey.DocKey) + docKey := core.DataStoreKeyFromDocKey(dockey) var txnErr error for retry := 0; retry < s.peer.db.MaxTxnRetries(); retry++ { @@ -416,7 +425,7 @@ func (s *server) publishLog(ctx context.Context, topic string, req *pb.PushLogRe return s.publishLog(ctx, topic, req) } - data, err := req.Marshal() + data, err := req.MarshalVT() if err != nil { return errors.Wrap("failed marshling pubsub message", err) } @@ -424,10 +433,16 @@ func (s *server) publishLog(ctx context.Context, topic string, req *pb.PushLogRe if _, err := t.Publish(ctx, data, rpc.WithIgnoreResponse(true)); err != nil { return errors.Wrap(fmt.Sprintf("failed publishing to thread %s", topic), err) } + + cid, err := cid.Cast(req.Body.Cid) + if err != nil { + return err + } + log.Debug( ctx, "Published log", - logging.NewKV("CID", req.Body.Cid.Cid), + logging.NewKV("CID", cid), logging.NewKV("DocKey", topic), ) return nil diff --git a/net/server_test.go b/net/server_test.go new file mode 100644 index 0000000000..993c12d875 --- /dev/null +++ b/net/server_test.go @@ -0,0 +1,328 @@ +// Copyright 2023 Democratized Data Foundation +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package net + +import ( + "bufio" + "bytes" + "context" + "encoding/json" + "io" + "testing" + "time" + + "github.com/libp2p/go-libp2p/core/event" + "github.com/libp2p/go-libp2p/core/host" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + rpc "github.com/textileio/go-libp2p-pubsub-rpc" + grpcpeer "google.golang.org/grpc/peer" + + "github.com/sourcenetwork/defradb/client" + "github.com/sourcenetwork/defradb/datastore/memory" + "github.com/sourcenetwork/defradb/errors" + "github.com/sourcenetwork/defradb/logging" + net_pb "github.com/sourcenetwork/defradb/net/pb" +) + +func TestNewServerSimple(t *testing.T) { + ctx := context.Background() + db, n := newTestNode(ctx, t) + _, err := newServer(n.Peer, db) + require.NoError(t, err) +} + +func TestNewServerWithDBClosed(t *testing.T) { + ctx := context.Background() + db, n := newTestNode(ctx, t) + db.Close(ctx) + _, err := newServer(n.Peer, db) + require.ErrorIs(t, err, memory.ErrClosed) +} + +var mockError = errors.New("mock error") + +type mockDBColError struct { + client.DB +} + +func (mDB *mockDBColError) GetAllCollections(context.Context) ([]client.Collection, error) { + return nil, mockError +} + +func TestNewServerWithGetAllCollectionError(t *testing.T) { + ctx := context.Background() + db, n := newTestNode(ctx, t) + mDB := mockDBColError{db} + _, err := newServer(n.Peer, &mDB) + require.ErrorIs(t, err, mockError) +} + +func TestNewServerWithCollectionSubscribed(t *testing.T) { + ctx := context.Background() + db, n := newTestNode(ctx, t) + + _, err := db.AddSchema(ctx, `type User { + name: String + age: Int + }`) + require.NoError(t, err) + + col, err := db.GetCollectionByName(ctx, "User") + require.NoError(t, err) + + err = n.AddP2PCollection(ctx, col.SchemaID()) + require.NoError(t, err) + + _, err = newServer(n.Peer, db) + require.NoError(t, err) +} + +type mockDBDockeysError struct { + client.DB +} + +func (mDB *mockDBDockeysError) GetAllCollections(context.Context) ([]client.Collection, error) { + return []client.Collection{ + &mockCollection{}, + }, nil +} + +type mockCollection struct { + client.Collection +} + +func (mCol *mockCollection) SchemaID() string { + return "mockColID" +} +func (mCol *mockCollection) GetAllDocKeys(ctx context.Context) (<-chan client.DocKeysResult, error) { + return nil, mockError +} + +func TestNewServerWithGetAllDockeysError(t *testing.T) { + ctx := context.Background() + db, n := newTestNode(ctx, t) + + _, err := db.AddSchema(ctx, `type User { + name: String + age: Int + }`) + require.NoError(t, err) + + mDB := mockDBDockeysError{db} + + _, err = newServer(n.Peer, &mDB) + require.ErrorIs(t, err, mockError) +} + +func TestNewServerWithAddTopicError(t *testing.T) { + ctx := context.Background() + db, n := newTestNode(ctx, t) + + _, err := db.AddSchema(ctx, `type User { + name: String + age: Int + }`) + require.NoError(t, err) + + col, err := db.GetCollectionByName(ctx, "User") + require.NoError(t, err) + + doc, err := client.NewDocFromJSON([]byte(`{"name": "John", "age": 30}`)) + require.NoError(t, err) + + err = col.Create(ctx, doc) + require.NoError(t, err) + + _, err = rpc.NewTopic(ctx, n.Peer.ps, n.Peer.host.ID(), doc.Key().String(), true) + require.NoError(t, err) + + _, err = newServer(n.Peer, db) + require.ErrorContains(t, err, "topic already exists") +} + +type mockHost struct { + host.Host +} + +func (mH *mockHost) EventBus() event.Bus { + return &mockBus{} +} + +type mockBus struct { + event.Bus +} + +func (mB *mockBus) Emitter(eventType any, opts ...event.EmitterOpt) (event.Emitter, error) { + return nil, mockError +} + +func (mB *mockBus) Subscribe(eventType any, opts ...event.SubscriptionOpt) (event.Subscription, error) { + return nil, mockError +} + +func TestNewServerWithEmitterError(t *testing.T) { + ctx := context.Background() + db, n := newTestNode(ctx, t) + + _, err := db.AddSchema(ctx, `type User { + name: String + age: Int + }`) + require.NoError(t, err) + + col, err := db.GetCollectionByName(ctx, "User") + require.NoError(t, err) + + doc, err := client.NewDocFromJSON([]byte(`{"name": "John", "age": 30}`)) + require.NoError(t, err) + + err = col.Create(ctx, doc) + require.NoError(t, err) + + n.Peer.host = &mockHost{n.Peer.host} + + b := &bytes.Buffer{} + + log.ApplyConfig(logging.Config{ + Pipe: b, + }) + + _, err = newServer(n.Peer, db) + require.NoError(t, err) + + logLines, err := parseLines(b) + if err != nil { + t.Fatal(err) + } + + if len(logLines) != 2 { + t.Fatalf("expecting exactly 2 log line but got %d lines", len(logLines)) + } + assert.Equal(t, "could not create event emitter", logLines[0]["msg"]) + assert.Equal(t, "could not create event emitter", logLines[1]["msg"]) + + // reset logger + log = logging.MustNewLogger("defra.net") +} + +func parseLines(r io.Reader) ([]map[string]any, error) { + fileScanner := bufio.NewScanner(r) + + fileScanner.Split(bufio.ScanLines) + + logLines := []map[string]any{} + for fileScanner.Scan() { + loggedLine := make(map[string]any) + err := json.Unmarshal(fileScanner.Bytes(), &loggedLine) + if err != nil { + return nil, err + } + logLines = append(logLines, loggedLine) + } + + return logLines, nil +} + +func TestGetDocGraph(t *testing.T) { + ctx := context.Background() + _, n := newTestNode(ctx, t) + r, err := n.server.GetDocGraph(ctx, &net_pb.GetDocGraphRequest{}) + require.Nil(t, r) + require.Nil(t, err) +} + +func TestPushDocGraph(t *testing.T) { + ctx := context.Background() + _, n := newTestNode(ctx, t) + r, err := n.server.PushDocGraph(ctx, &net_pb.PushDocGraphRequest{}) + require.Nil(t, r) + require.Nil(t, err) +} + +func TestGetLog(t *testing.T) { + ctx := context.Background() + _, n := newTestNode(ctx, t) + r, err := n.server.GetLog(ctx, &net_pb.GetLogRequest{}) + require.Nil(t, r) + require.Nil(t, err) +} + +func TestGetHeadLog(t *testing.T) { + ctx := context.Background() + _, n := newTestNode(ctx, t) + r, err := n.server.GetHeadLog(ctx, &net_pb.GetHeadLogRequest{}) + require.Nil(t, r) + require.Nil(t, err) +} + +func TestDocQueue(t *testing.T) { + q := docQueue{ + docs: make(map[string]chan struct{}), + } + + testKey := "test" + + q.add(testKey) + go q.add(testKey) + // give time for the goroutine to block + time.Sleep(10 * time.Millisecond) + require.Len(t, q.docs, 1) + q.done(testKey) + // give time for the goroutine to add the key + time.Sleep(10 * time.Millisecond) + q.mu.Lock() + require.Len(t, q.docs, 1) + q.mu.Unlock() + q.done(testKey) + q.mu.Lock() + require.Len(t, q.docs, 0) + q.mu.Unlock() +} + +func TestPushLog(t *testing.T) { + ctx := context.Background() + db, n := newTestNode(ctx, t) + + _, err := db.AddSchema(ctx, `type User { + name: String + age: Int + }`) + require.NoError(t, err) + + col, err := db.GetCollectionByName(ctx, "User") + require.NoError(t, err) + + doc, err := client.NewDocFromJSON([]byte(`{"name": "John", "age": 30}`)) + require.NoError(t, err) + + cid, err := createCID(doc) + require.NoError(t, err) + + ctx = grpcpeer.NewContext(ctx, &grpcpeer.Peer{ + Addr: addr{n.PeerID()}, + }) + + block := &EmptyNode{} + + _, err = n.server.PushLog(ctx, &net_pb.PushLogRequest{ + Body: &net_pb.PushLogRequest_Body{ + DocKey: []byte(doc.Key().String()), + Cid: cid.Bytes(), + SchemaID: []byte(col.SchemaID()), + Creator: n.PeerID().String(), + Log: &net_pb.Document_Log{ + Block: block.RawData(), + }, + }, + }) + require.NoError(t, err) +} diff --git a/node/node_test.go b/node/node_test.go deleted file mode 100644 index 0a8c48c8fd..0000000000 --- a/node/node_test.go +++ /dev/null @@ -1,195 +0,0 @@ -// Copyright 2022 Democratized Data Foundation -// -// Use of this software is governed by the Business Source License -// included in the file licenses/BSL.txt. -// -// As of the Change Date specified in that file, in accordance with -// the Business Source License, use of this software will be governed -// by the Apache License, Version 2.0, included in the file -// licenses/APL.txt. - -package node - -import ( - "context" - "testing" - - badger "github.com/dgraph-io/badger/v3" - "github.com/libp2p/go-libp2p/core/peer" - "github.com/stretchr/testify/assert" - - "github.com/sourcenetwork/defradb/client" - badgerds "github.com/sourcenetwork/defradb/datastore/badger/v3" - "github.com/sourcenetwork/defradb/db" - netutils "github.com/sourcenetwork/defradb/net/utils" -) - -// Node.Boostrap is not tested because the underlying, *ipfslite.Peer.Bootstrap is a best-effort function. - -func FixtureNewMemoryDBWithBroadcaster(t *testing.T) client.DB { - var database client.DB - var options []db.Option - ctx := context.Background() - options = append(options, db.WithUpdateEvents()) - opts := badgerds.Options{Options: badger.DefaultOptions("").WithInMemory(true)} - rootstore, err := badgerds.NewDatastore("", &opts) - assert.NoError(t, err) - database, err = db.NewDB(ctx, rootstore, options...) - assert.NoError(t, err) - return database -} - -func TestNewNode(t *testing.T) { - db := FixtureNewMemoryDBWithBroadcaster(t) - _, err := NewNode( - context.Background(), - db, - // DataPath() is a required option with the current implementation of key management - DataPath(t.TempDir()), - ) - assert.NoError(t, err) -} - -func TestNewNodeNoPubSub(t *testing.T) { - db := FixtureNewMemoryDBWithBroadcaster(t) - n, err := NewNode( - context.Background(), - db, - WithPubSub(false), - // DataPath() is a required option with the current implementation of key management - DataPath(t.TempDir()), - ) - assert.NoError(t, err) - assert.Nil(t, n.pubsub) -} - -func TestNewNodeWithPubSub(t *testing.T) { - db := FixtureNewMemoryDBWithBroadcaster(t) - ctx := context.Background() - n, err := NewNode( - ctx, - db, - WithPubSub(true), - // DataPath() is a required option with the current implementation of key management - DataPath(t.TempDir()), - ) - assert.NoError(t, err) - // overly simple check of validity of pubsub, avoiding the process of creating a PubSub - assert.NotNil(t, n.pubsub) -} - -func TestNewNodeWithPubSubFailsWithoutDataPath(t *testing.T) { - db := FixtureNewMemoryDBWithBroadcaster(t) - ctx := context.Background() - _, err := NewNode( - ctx, - db, - WithPubSub(true), - ) - assert.EqualError(t, err, "1 error occurred:\n\t* mkdir : no such file or directory\n\n") -} - -func TestNodeClose(t *testing.T) { - db := FixtureNewMemoryDBWithBroadcaster(t) - n, err := NewNode( - context.Background(), - db, - // DataPath() is a required option with the current implementation of key management - DataPath(t.TempDir()), - ) - assert.NoError(t, err) - err = n.Close() - assert.NoError(t, err) -} - -func TestNewNodeBootstrapWithNoPeer(t *testing.T) { - db := FixtureNewMemoryDBWithBroadcaster(t) - ctx := context.Background() - n1, err := NewNode( - ctx, - db, - ListenP2PAddrStrings("/ip4/0.0.0.0/tcp/0"), - // DataPath() is a required option with the current implementation of key management - DataPath(t.TempDir()), - ) - assert.NoError(t, err) - n1.Boostrap([]peer.AddrInfo{}) -} - -func TestNewNodeBootstrapWithOnePeer(t *testing.T) { - db := FixtureNewMemoryDBWithBroadcaster(t) - ctx := context.Background() - n1, err := NewNode( - ctx, - db, - ListenP2PAddrStrings("/ip4/0.0.0.0/tcp/0"), - // DataPath() is a required option with the current implementation of key management - DataPath(t.TempDir()), - ) - assert.NoError(t, err) - n2, err := NewNode( - ctx, - db, - ListenP2PAddrStrings("/ip4/0.0.0.0/tcp/0"), - // DataPath() is a required option with the current implementation of key management - DataPath(t.TempDir()), - ) - assert.NoError(t, err) - addrs, err := netutils.ParsePeers([]string{n1.host.Addrs()[0].String() + "/p2p/" + n1.PeerID().String()}) - if err != nil { - t.Fatal(err) - } - n2.Boostrap(addrs) -} - -func TestNewNodeBootstrapWithOneValidPeerAndManyInvalidPeers(t *testing.T) { - db := FixtureNewMemoryDBWithBroadcaster(t) - ctx := context.Background() - n1, err := NewNode( - ctx, - db, - ListenP2PAddrStrings("/ip4/0.0.0.0/tcp/0"), - // DataPath() is a required option with the current implementation of key management - DataPath(t.TempDir()), - ) - assert.NoError(t, err) - n2, err := NewNode( - ctx, - db, - ListenP2PAddrStrings("/ip4/0.0.0.0/tcp/0"), - // DataPath() is a required option with the current implementation of key management - DataPath(t.TempDir()), - ) - assert.NoError(t, err) - addrs, err := netutils.ParsePeers([]string{ - n1.host.Addrs()[0].String() + "/p2p/" + n1.PeerID().String(), - "/ip4/0.0.0.0/tcp/1234/p2p/" + "12D3KooWC8YY6Tx3uAeHsdBmoy7PJPwqXAHE4HkCZ5veankKWci6", - "/ip4/0.0.0.0/tcp/1235/p2p/" + "12D3KooWC8YY6Tx3uAeHsdBmoy7PJPwqXAHE4HkCZ5veankKWci5", - "/ip4/0.0.0.0/tcp/1236/p2p/" + "12D3KooWC8YY6Tx3uAeHsdBmoy7PJPwqXAHE4HkCZ5veankKWci4", - }) - if err != nil { - t.Fatal(err) - } - n2.Boostrap(addrs) -} - -func mergeOptions(nodeOpts ...NodeOpt) (Options, error) { - var options Options - var nodeOpt NodeOpt - for _, opt := range append(nodeOpts, nodeOpt) { - if opt == nil { - continue - } - if err := opt(&options); err != nil { - return options, err - } - } - return options, nil -} - -func TestInvalidListenTCPAddrString(t *testing.T) { - opt := ListenTCPAddrString("/ip4/碎片整理") - options, err := mergeOptions(opt) - assert.EqualError(t, err, "failed to parse multiaddr \"/ip4/碎片整理\": invalid value \"碎片整理\" for protocol ip4: failed to parse ip4 addr: 碎片整理") - assert.Equal(t, Options{}, options) -} diff --git a/planner/average.go b/planner/average.go index aacadab2ca..9de120ed98 100644 --- a/planner/average.go +++ b/planner/average.go @@ -56,7 +56,7 @@ func (p *Planner) Average( sumFieldIndex: sumField.Index, countFieldIndex: countField.Index, virtualFieldIndex: field.Index, - docMapper: docMapper{&field.DocumentMapping}, + docMapper: docMapper{field.DocumentMapping}, }, nil } diff --git a/planner/commit.go b/planner/commit.go index fca9c62865..e6216e2b43 100644 --- a/planner/commit.go +++ b/planner/commit.go @@ -53,7 +53,7 @@ func (p *Planner) DAGScan(commitSelect *mapper.CommitSelect) *dagScanNode { visitedNodes: make(map[string]bool), queuedCids: []*cid.Cid{}, commitSelect: commitSelect, - docMapper: docMapper{&commitSelect.DocumentMapping}, + docMapper: docMapper{commitSelect.DocumentMapping}, } } @@ -333,7 +333,7 @@ func (n *dagScanNode) dagBlockToNodeDoc(block blocks.Block) (core.Doc, []*ipld.L return core.Doc{}, nil, err } - field, ok := c.Description().GetField(fieldName.(string)) + field, ok := c.Description().Schema.GetField(fieldName.(string)) if !ok { return core.Doc{}, nil, client.NewErrFieldNotExist(fieldName.(string)) } diff --git a/planner/count.go b/planner/count.go index 28222f11c7..a3eddf0fbc 100644 --- a/planner/count.go +++ b/planner/count.go @@ -48,7 +48,7 @@ func (p *Planner) Count(field *mapper.Aggregate, host *mapper.Select) (*countNod p: p, virtualFieldIndex: field.Index, aggregateMapping: field.AggregateTargets, - docMapper: docMapper{&field.DocumentMapping}, + docMapper: docMapper{field.DocumentMapping}, }, nil } @@ -75,10 +75,22 @@ func (n *countNode) simpleExplain() (map[string]any, error) { simpleExplainMap := map[string]any{} // Add the filter attribute if it exists. - if source.Filter == nil || source.Filter.ExternalConditions == nil { + if source.Filter == nil { simpleExplainMap[filterLabel] = nil } else { - simpleExplainMap[filterLabel] = source.Filter.ExternalConditions + // get the target aggregate document mapping. Since the filters + // are relative to the target aggregate collection (and doc mapper). + // + // We can determine if there is a child map if the index from the + // aggregate target is set (non nil) on the childMapping + var targetMap *core.DocumentMapping + if source.Index < len(n.documentMapping.ChildMappings) && + n.documentMapping.ChildMappings[source.Index] != nil { + targetMap = n.documentMapping.ChildMappings[source.Index] + } else { + targetMap = n.documentMapping + } + simpleExplainMap[filterLabel] = source.Filter.ToMap(targetMap) } // Add the main field name. diff --git a/planner/create.go b/planner/create.go index 291c723300..618591ccfe 100644 --- a/planner/create.go +++ b/planner/create.go @@ -88,9 +88,13 @@ func (n *createNode) Next() (bool, error) { currentValue.SetKey(n.doc.Key().String()) for i, value := range n.doc.Values() { - // On create the document will have no aliased fields/aggregates/etc so we can safely take - // the first index. - n.documentMapping.SetFirstOfName(¤tValue, i.Name(), value.Value()) + if len(n.documentMapping.IndexesByName[i.Name()]) > 0 { + n.documentMapping.SetFirstOfName(¤tValue, i.Name(), value.Value()) + } else if aliasName := i.Name() + request.RelatedObjectID; len(n.documentMapping.IndexesByName[aliasName]) > 0 { + n.documentMapping.SetFirstOfName(¤tValue, aliasName, value.Value()) + } else { + return false, client.NewErrFieldNotExist(i.Name()) + } } n.returned = true @@ -171,7 +175,7 @@ func (p *Planner) CreateDoc(parsed *mapper.Mutation) (planNode, error) { p: p, newDocStr: parsed.Data, results: results, - docMapper: docMapper{&parsed.DocumentMapping}, + docMapper: docMapper{parsed.DocumentMapping}, } // get collection diff --git a/planner/datasource.go b/planner/datasource.go index 2dea8290c5..afcfbab3ce 100644 --- a/planner/datasource.go +++ b/planner/datasource.go @@ -35,15 +35,13 @@ func (p *Planner) getSource(parsed *mapper.Select) (planSource, error) { return p.getCollectionScanPlan(parsed) } -// @todo: Add field selection func (p *Planner) getCollectionScanPlan(parsed *mapper.Select) (planSource, error) { colDesc, err := p.getCollectionDesc(parsed.CollectionName) if err != nil { return planSource{}, err } - scan := p.Scan(parsed) - err = scan.initCollection(colDesc) + scan, err := p.Scan(parsed) if err != nil { return planSource{}, err } diff --git a/planner/delete.go b/planner/delete.go index ef79463302..de59cf30b7 100644 --- a/planner/delete.go +++ b/planner/delete.go @@ -91,10 +91,10 @@ func (n *deleteNode) simpleExplain() (map[string]any, error) { simpleExplainMap[idsLabel] = n.ids // Add the filter attribute if it exists, otherwise have it nil. - if n.filter == nil || n.filter.ExternalConditions == nil { + if n.filter == nil { simpleExplainMap[filterLabel] = nil } else { - simpleExplainMap[filterLabel] = n.filter.ExternalConditions + simpleExplainMap[filterLabel] = n.filter.ToMap(n.documentMapping) } return simpleExplainMap, nil @@ -134,6 +134,6 @@ func (p *Planner) DeleteDocs(parsed *mapper.Mutation) (planNode, error) { ids: parsed.DocKeys.Value(), collection: col.WithTxn(p.txn), source: slctNode, - docMapper: docMapper{&parsed.DocumentMapping}, + docMapper: docMapper{parsed.DocumentMapping}, }, nil } diff --git a/planner/errors.go b/planner/errors.go index fbe3f89f78..c4856178f3 100644 --- a/planner/errors.go +++ b/planner/errors.go @@ -16,6 +16,7 @@ const ( errUnknownDependency string = "given field does not exist" errFailedToClosePlan string = "failed to close the plan" errFailedToCollectExecExplainInfo string = "failed to collect execution explain information" + errSubTypeInit string = "sub-type initialization error at scan node reset" ) var ( @@ -33,6 +34,7 @@ var ( ErrMissingChildValue = errors.New("expected child value, however none was yielded") ErrUnknownRelationType = errors.New("failed sub selection, unknown relation type") ErrUnknownExplainRequestType = errors.New("can not explain request of unknown type") + ErrSubTypeInit = errors.New(errSubTypeInit) ErrFailedToCollectExecExplainInfo = errors.New(errFailedToCollectExecExplainInfo) ErrUnknownDependency = errors.New(errUnknownDependency) ) @@ -48,3 +50,7 @@ func NewErrFailedToClosePlan(inner error, location string) error { func NewErrFailedToCollectExecExplainInfo(inner error) error { return errors.Wrap(errFailedToCollectExecExplainInfo, inner) } + +func NewErrSubTypeInit(inner error) error { + return errors.Wrap(errSubTypeInit, inner) +} diff --git a/planner/explain.go b/planner/explain.go index f4494fcf72..560063b4ba 100644 --- a/planner/explain.go +++ b/planner/explain.go @@ -57,12 +57,113 @@ const ( fieldNameLabel = "fieldName" filterLabel = "filter" idsLabel = "ids" + joinRootLabel = "root" + joinSubTypeLabel = "subType" + keysLabel = "_keys" limitLabel = "limit" offsetLabel = "offset" sourcesLabel = "sources" spansLabel = "spans" ) +// buildDebugExplainGraph dumps the entire plan graph as is, with all the plan nodes. +// +// Note: This also includes plan nodes that aren't "explainable". +func buildDebugExplainGraph(source planNode) (map[string]any, error) { + explainGraph := map[string]any{} + + if source == nil { + return explainGraph, nil + } + + switch node := source.(type) { + // Walk the multiple children if it is a MultiNode. + case MultiNode: + multiChildExplainGraph := []map[string]any{} + for _, childSource := range node.Children() { + childExplainGraph, err := buildDebugExplainGraph(childSource) + if err != nil { + return nil, err + } + multiChildExplainGraph = append(multiChildExplainGraph, childExplainGraph) + } + nodeLabelTitle := strcase.ToLowerCamel(node.Kind()) + explainGraph[nodeLabelTitle] = multiChildExplainGraph + + case *typeJoinMany: + var explainGraphBuilder = map[string]any{} + + // If root is not the last child then keep walking and explaining the root graph. + if node.root != nil { + indexJoinRootExplainGraph, err := buildDebugExplainGraph(node.root) + if err != nil { + return nil, err + } + // Add the explaination of the rest of the explain graph under the "root" graph. + explainGraphBuilder[joinRootLabel] = indexJoinRootExplainGraph + } + + if node.subType != nil { + indexJoinSubTypeExplainGraph, err := buildDebugExplainGraph(node.subType) + if err != nil { + return nil, err + } + // Add the explaination of the rest of the explain graph under the "subType" graph. + explainGraphBuilder[joinSubTypeLabel] = indexJoinSubTypeExplainGraph + } + + nodeLabelTitle := strcase.ToLowerCamel(node.Kind()) + explainGraph[nodeLabelTitle] = explainGraphBuilder + + case *typeJoinOne: + var explainGraphBuilder = map[string]any{} + + // If root is not the last child then keep walking and explaining the root graph. + if node.root != nil { + indexJoinRootExplainGraph, err := buildDebugExplainGraph(node.root) + if err != nil { + return nil, err + } + // Add the explaination of the rest of the explain graph under the "root" graph. + explainGraphBuilder[joinRootLabel] = indexJoinRootExplainGraph + } else { + explainGraphBuilder[joinRootLabel] = nil + } + + if node.subType != nil { + indexJoinSubTypeExplainGraph, err := buildDebugExplainGraph(node.subType) + if err != nil { + return nil, err + } + // Add the explaination of the rest of the explain graph under the "subType" graph. + explainGraphBuilder[joinSubTypeLabel] = indexJoinSubTypeExplainGraph + } else { + explainGraphBuilder[joinSubTypeLabel] = nil + } + + nodeLabelTitle := strcase.ToLowerCamel(node.Kind()) + explainGraph[nodeLabelTitle] = explainGraphBuilder + + default: + var explainGraphBuilder = map[string]any{} + + // If not the last child then keep walking the graph to find more plan nodes. + // Also make sure the next source / child isn't a recursive `topLevelNode`. + if next := node.Source(); next != nil && next.Kind() != topLevelNodeKind { + var err error + explainGraphBuilder, err = buildDebugExplainGraph(next) + if err != nil { + return nil, err + } + } + // Add the graph of the next node under current node. + nodeLabelTitle := strcase.ToLowerCamel(node.Kind()) + explainGraph[nodeLabelTitle] = explainGraphBuilder + } + + return explainGraph, nil +} + // buildSimpleExplainGraph builds the explainGraph from the given top level plan. // // Request: @@ -134,7 +235,7 @@ func buildSimpleExplainGraph(source planNode) (map[string]any, error) { return nil, err } // Add the explaination of the rest of the explain graph under the "root" graph. - indexJoinGraph["root"] = indexJoinRootExplainGraph + indexJoinGraph[joinRootLabel] = indexJoinRootExplainGraph } // Add this restructured typeIndexJoin explain graph. explainGraph[strcase.ToLowerCamel(node.Kind())] = indexJoinGraph @@ -345,6 +446,22 @@ func (p *Planner) explainRequest( return explainResult, nil + case request.DebugExplain: + // walks through the plan graph, and outputs the concrete planNodes that should + // be executed, maintaining their order in the plan graph (does not actually execute them). + explainGraph, err := buildDebugExplainGraph(plan) + if err != nil { + return nil, err + } + + explainResult := []map[string]any{ + { + request.ExplainLabel: explainGraph, + }, + } + + return explainResult, nil + case request.ExecuteExplain: return p.executeAndExplainRequest(ctx, plan) diff --git a/planner/group.go b/planner/group.go index e87d753d14..0890b13d84 100644 --- a/planner/group.go +++ b/planner/group.go @@ -91,7 +91,7 @@ func (p *Planner) GroupBy(n *mapper.GroupBy, parsed *mapper.Select, childSelects childSelects: childSelects, groupByFields: n.Fields, dataSources: dataSources, - docMapper: docMapper{&parsed.DocumentMapping}, + docMapper: docMapper{parsed.DocumentMapping}, } return &groupNodeObj, nil } @@ -236,10 +236,10 @@ func (n *groupNode) simpleExplain() (map[string]any, error) { childExplainGraph["docKeys"] = nil } - if c.Filter == nil || c.Filter.ExternalConditions == nil { + if c.Filter == nil { childExplainGraph[filterLabel] = nil } else { - childExplainGraph[filterLabel] = c.Filter.ExternalConditions + childExplainGraph[filterLabel] = c.Filter.ToMap(n.documentMapping) } if c.Limit != nil { diff --git a/planner/limit.go b/planner/limit.go index d3c2954d9b..979bc50c02 100644 --- a/planner/limit.go +++ b/planner/limit.go @@ -46,7 +46,7 @@ func (p *Planner) Limit(parsed *mapper.Select, n *mapper.Limit) (*limitNode, err limit: n.Limit, offset: n.Offset, rowIndex: 0, - docMapper: docMapper{&parsed.DocumentMapping}, + docMapper: docMapper{parsed.DocumentMapping}, }, nil } diff --git a/planner/mapper/aggregate.go b/planner/mapper/aggregate.go index 4d7671914e..ceed03448e 100644 --- a/planner/mapper/aggregate.go +++ b/planner/mapper/aggregate.go @@ -45,7 +45,7 @@ type AggregateTarget struct { type Aggregate struct { Field // The mapping of this aggregate's parent/host. - core.DocumentMapping + *core.DocumentMapping // The collection of targets that this aggregate will aggregate. AggregateTargets []AggregateTarget diff --git a/planner/mapper/errors.go b/planner/mapper/errors.go index 83a5c11b3a..552021ca94 100644 --- a/planner/mapper/errors.go +++ b/planner/mapper/errors.go @@ -12,8 +12,18 @@ package mapper import "github.com/sourcenetwork/defradb/errors" +const ( + errInvalidFieldToGroupBy string = "invalid field value to groupBy" +) + var ( ErrUnableToIdAggregateChild = errors.New("unable to identify aggregate child") ErrAggregateTargetMissing = errors.New("aggregate must be provided with a property to aggregate") ErrFailedToFindHostField = errors.New("failed to find host field") + ErrInvalidFieldIndex = errors.New("given field doesn't have any indexes") + ErrMissingSelect = errors.New("missing target select field") ) + +func NewErrInvalidFieldToGroupBy(field string) error { + return errors.New(errInvalidFieldToGroupBy, errors.NewKV("Field", field)) +} diff --git a/planner/mapper/mapper.go b/planner/mapper/mapper.go index a2a8250f4a..5b823f6ec2 100644 --- a/planner/mapper/mapper.go +++ b/planner/mapper/mapper.go @@ -25,6 +25,10 @@ import ( "github.com/sourcenetwork/defradb/datastore" ) +var ( + FilterEqOp = &Operator{Operation: "_eq"} +) + // ToSelect converts the given [parser.Select] into a [Select]. // // In the process of doing so it will construct the document map required to access the data @@ -69,8 +73,9 @@ func toSelect( fields = append(fields, filterDependencies...) // Resolve order dependencies that may have been missed due to not being rendered. - if err := resolveOrderDependencies( - descriptionsRepo, collectionName, selectRequest.OrderBy, mapping, &fields); err != nil { + err = resolveOrderDependencies( + descriptionsRepo, collectionName, selectRequest.OrderBy, mapping, &fields) + if err != nil { return nil, err } @@ -83,12 +88,31 @@ func toSelect( desc, descriptionsRepo, ) + if err != nil { return nil, err } - // If there is a groupBy, and no inner group has been requested, we need to map the property here + // Resolve groupBy mappings i.e. alias remapping and handle missed inner group. if selectRequest.GroupBy.HasValue() { + groupByFields := selectRequest.GroupBy.Value().Fields + // Remap all alias field names to use their internal field name mappings. + for index, groupByField := range groupByFields { + fieldDesc, ok := desc.Schema.GetField(groupByField) + if ok && fieldDesc.IsObject() && !fieldDesc.IsObjectArray() { + groupByFields[index] = groupByField + request.RelatedObjectID + } else if ok && fieldDesc.IsObjectArray() { + return nil, NewErrInvalidFieldToGroupBy(groupByField) + } + } + + selectRequest.GroupBy = immutable.Some( + request.GroupBy{ + Fields: groupByFields, + }, + ) + + // If there is a groupBy, and no inner group has been requested, we need to map the property here if _, isGroupFieldMapped := mapping.IndexesByName[request.GroupFieldName]; !isGroupFieldMapped { index := mapping.GetNextIndex() mapping.Add(index, request.GroupFieldName) @@ -97,7 +121,7 @@ func toSelect( return &Select{ Targetable: toTargetable(thisIndex, selectRequest, mapping), - DocumentMapping: *mapping, + DocumentMapping: mapping, Cid: selectRequest.CID, CollectionName: collectionName, Fields: fields, @@ -117,38 +141,103 @@ func resolveOrderDependencies( return nil } + currentExistingFields := existingFields // If there is orderby, and any one of the condition fields that are join fields and have not been // requested, we need to map them here. +outer: for _, condition := range source.Value().Conditions { - if len(condition.Fields) <= 1 { - continue - } - - joinField := condition.Fields[0] + fields := condition.Fields[:] // copy slice + for { + numFields := len(fields) + // <2 fields: Direct field on the root type: {age: DESC} + // 2 fields: Single depth related type: {author: {age: DESC}} + // >2 fields: Multi depth related type: {author: {friends: {age: DESC}}} + if numFields == 2 { + joinField := fields[0] + + // ensure the child select is resolved for this order join + innerSelect, err := resolveChildOrder(descriptionsRepo, descName, joinField, mapping, currentExistingFields) + if err != nil { + return err + } - // Check if the join field is already mapped, if not then map it. - if isOrderJoinFieldMapped := len(mapping.IndexesByName[joinField]) != 0; !isOrderJoinFieldMapped { - index := mapping.GetNextIndex() - mapping.Add(index, joinField) + // make sure the actual target field inside the join field + // is included in the select + targetFieldName := fields[1] + targetField := &Field{ + Index: innerSelect.FirstIndexOfName(targetFieldName), + Name: targetFieldName, + } + innerSelect.Fields = append(innerSelect.Fields, targetField) + continue outer + } else if numFields > 2 { + joinField := fields[0] - // Resolve the inner child fields and get it's mapping. - dummyJoinFieldSelect := request.Select{ - Field: request.Field{ - Name: joinField, - }, - } - innerSelect, err := toSelect(descriptionsRepo, index, &dummyJoinFieldSelect, descName) - if err != nil { - return err + // ensure the child select is resolved for this order join + innerSelect, err := resolveChildOrder(descriptionsRepo, descName, joinField, mapping, existingFields) + if err != nil { + return err + } + mapping = innerSelect.DocumentMapping + currentExistingFields = &innerSelect.Fields + fields = fields[1:] // chop off the front item, and loop again on inner + } else { // <= 1 + targetFieldName := fields[0] + *existingFields = append(*existingFields, &Field{ + Index: mapping.FirstIndexOfName(targetFieldName), + Name: targetFieldName, + }) + // nothing todo, continue the outer for loop + continue outer } - *existingFields = append(*existingFields, innerSelect) - mapping.SetChildAt(index, &innerSelect.DocumentMapping) } } return nil } +// given a type join field, ensure its mapping exists +// and add a coorsponding select field(s) +func resolveChildOrder( + descriptionsRepo *DescriptionsRepo, + descName string, + orderChildField string, + mapping *core.DocumentMapping, + existingFields *[]Requestable, +) (*Select, error) { + childFieldIndexes := mapping.IndexesByName[orderChildField] + // Check if the join field is already mapped, if not then map it. + if len(childFieldIndexes) == 0 { + index := mapping.GetNextIndex() + mapping.Add(index, orderChildField) + + // Resolve the inner child fields and get it's mapping. + dummyJoinFieldSelect := request.Select{ + Field: request.Field{ + Name: orderChildField, + }, + } + innerSelect, err := toSelect(descriptionsRepo, index, &dummyJoinFieldSelect, descName) + if err != nil { + return nil, err + } + *existingFields = append(*existingFields, innerSelect) + mapping.SetChildAt(index, innerSelect.DocumentMapping) + return innerSelect, nil + } else { + for _, field := range *existingFields { + fieldSelect, ok := field.(*Select) + if !ok { + continue + } + if fieldSelect.Field.Name == orderChildField { + return fieldSelect, nil + } + } + } + return nil, ErrMissingSelect +} + // resolveAggregates figures out which fields the given aggregates are targeting // and converts the aggregateRequest into an Aggregate, appending it onto the given // fields slice. @@ -166,7 +255,6 @@ func resolveAggregates( ) ([]Requestable, error) { fields := inputFields dependenciesByParentId := map[int][]int{} - for _, aggregate := range aggregates { aggregateTargets := make([]AggregateTarget, len(aggregate.targets)) @@ -182,7 +270,7 @@ func resolveAggregates( var hasHost bool var convertedFilter *Filter if childIsMapped { - fieldDesc, isField := desc.GetField(target.hostExternalName) + fieldDesc, isField := desc.Schema.GetField(target.hostExternalName) if isField && !fieldDesc.IsObject() { var order *OrderBy if target.order.HasValue() && len(target.order.Value().Conditions) > 0 { @@ -205,14 +293,14 @@ func resolveAggregates( Index: int(fieldDesc.ID), Name: target.hostExternalName, }, - Filter: ToFilter(target.filter, mapping), + Filter: ToFilter(target.filter.Value(), mapping), Limit: target.limit, OrderBy: order, } } else { childObjectIndex := mapping.FirstIndexOfName(target.hostExternalName) childMapping := mapping.ChildMappings[childObjectIndex] - convertedFilter = ToFilter(target.filter, childMapping) + convertedFilter = ToFilter(target.filter.Value(), childMapping) host, hasHost = tryGetTarget( target.hostExternalName, convertedFilter, @@ -238,7 +326,6 @@ func resolveAggregates( if err != nil { return nil, err } - mapAggregateNestedTargets(target, hostSelectRequest, selectRequest.Root) childMapping, childDesc, err := getTopLevelInfo(descriptionsRepo, hostSelectRequest, childCollectionName) @@ -251,13 +338,19 @@ func resolveAggregates( return nil, err } + err = resolveOrderDependencies( + descriptionsRepo, childCollectionName, target.order, childMapping, &childFields) + if err != nil { + return nil, err + } + childMapping = childMapping.CloneWithoutRender() mapping.SetChildAt(index, childMapping) if !childIsMapped { // If the child was not mapped, the filter will not have been converted yet // so we must do that now. - convertedFilter = ToFilter(target.filter, mapping.ChildMappings[index]) + convertedFilter = ToFilter(target.filter.Value(), mapping.ChildMappings[index]) } dummyJoin := &Select{ @@ -271,7 +364,7 @@ func resolveAggregates( OrderBy: toOrderBy(target.order, childMapping), }, CollectionName: childCollectionName, - DocumentMapping: *childMapping, + DocumentMapping: childMapping, Fields: childFields, } @@ -308,6 +401,12 @@ func resolveAggregates( return nil, ErrUnableToIdAggregateChild } + // ensure target aggregate field is included in the type join + hostSelect.Fields = append(hostSelect.Fields, &Field{ + Index: hostSelect.DocumentMapping.FirstIndexOfName(target.childExternalName), + Name: target.childExternalName, + }) + childTarget = OptionalChildTarget{ // If there are multiple children of the same name there is no way // for us (or the consumer) to identify which one they are hoping for @@ -326,7 +425,7 @@ func resolveAggregates( newAggregate := Aggregate{ Field: aggregate.field, - DocumentMapping: *mapping, + DocumentMapping: mapping, AggregateTargets: aggregateTargets, } fields = append(fields, &newAggregate) @@ -514,7 +613,7 @@ func getRequestables( return nil, nil, err } fields = append(fields, innerSelect) - mapping.SetChildAt(index, &innerSelect.DocumentMapping) + mapping.SetChildAt(index, innerSelect.DocumentMapping) mapping.RenderKeys = append(mapping.RenderKeys, core.RenderKey{ Index: index, @@ -594,7 +693,7 @@ func getCollectionName( return "", err } - hostFieldDesc, parentHasField := parentDescription.GetField(selectRequest.Name) + hostFieldDesc, parentHasField := parentDescription.Schema.GetField(selectRequest.Name) if parentHasField && hostFieldDesc.RelationType != 0 { // If this field exists on the parent, and it is a child object // then this collection name is the collection name of the child. @@ -696,6 +795,7 @@ func resolveInnerFilterDependencies( ) ([]Requestable, error) { newFields := []Requestable{} +sourceLoop: for key := range source { if strings.HasPrefix(key, "_") && key != request.KeyFieldName { continue @@ -732,7 +832,7 @@ func resolveInnerFilterDependencies( }, }, CollectionName: childCollectionName, - DocumentMapping: *childMapping, + DocumentMapping: childMapping, } newFields = append(newFields, dummyJoin) @@ -743,7 +843,18 @@ func resolveInnerFilterDependencies( if keyIndex >= len(mapping.ChildMappings) { // If the key index is outside the bounds of the child mapping array, then - // this is not a relation/join and we can continue (no child props to process) + // this is not a relation/join and we can add it to the fields and + // continue (no child props to process) + for _, field := range existingFields { + if field.GetIndex() == keyIndex { + continue sourceLoop + } + } + newFields = append(existingFields, &Field{ + Index: keyIndex, + Name: key, + }) + continue } @@ -778,7 +889,7 @@ func resolveInnerFilterDependencies( enumerable.New(existingFields), ) - matchingFields := enumerable.Where(allFields, func(existingField Requestable) (bool, error) { + matchingFields := enumerable.Where[Requestable](allFields, func(existingField Requestable) (bool, error) { return existingField.GetIndex() == keyIndex, nil }) @@ -860,7 +971,7 @@ func toTargetable(index int, selectRequest *request.Select, docMap *core.Documen return Targetable{ Field: toField(index, selectRequest), DocKeys: selectRequest.DocKeys, - Filter: ToFilter(selectRequest.Filter, docMap), + Filter: ToFilter(selectRequest.Filter.Value(), docMap), Limit: toLimit(selectRequest.Limit, selectRequest.Offset), GroupBy: toGroupBy(selectRequest.GroupBy, docMap), OrderBy: toOrderBy(selectRequest.OrderBy, docMap), @@ -878,20 +989,20 @@ func toField(index int, selectRequest *request.Select) Field { // ToFilter converts the given `source` request filter to a Filter using the given mapping. // // Any requestables identified by name will be converted to being identified by index instead. -func ToFilter(source immutable.Option[request.Filter], mapping *core.DocumentMapping) *Filter { - if !source.HasValue() { +func ToFilter(source request.Filter, mapping *core.DocumentMapping) *Filter { + if len(source.Conditions) == 0 { return nil } - conditions := make(map[connor.FilterKey]any, len(source.Value().Conditions)) + conditions := make(map[connor.FilterKey]any, len(source.Conditions)) - for sourceKey, sourceClause := range source.Value().Conditions { + for sourceKey, sourceClause := range source.Conditions { key, clause := toFilterMap(sourceKey, sourceClause, mapping) conditions[key] = clause } return &Filter{ Conditions: conditions, - ExternalConditions: source.Value().Conditions, + ExternalConditions: source.Conditions, } } @@ -928,6 +1039,13 @@ func toFilterMap( returnClauses = append(returnClauses, returnClause) } return key, returnClauses + case map[string]any: + innerMapClause := map[connor.FilterKey]any{} + for innerSourceKey, innerSourceValue := range typedClause { + rKey, rValue := toFilterMap(innerSourceKey, innerSourceValue, mapping) + innerMapClause[rKey] = rValue + } + return key, innerMapClause default: return key, typedClause } diff --git a/planner/mapper/select.go b/planner/mapper/select.go index 2696c0ca82..1c4b509caa 100644 --- a/planner/mapper/select.go +++ b/planner/mapper/select.go @@ -25,7 +25,7 @@ type Select struct { // The document mapping for this select, describing how items yielded // for this select can be accessed and rendered. - core.DocumentMapping + *core.DocumentMapping // A commit identifier that can be specified to request data at a given time. Cid immutable.Option[string] diff --git a/planner/mapper/targetable.go b/planner/mapper/targetable.go index 1d2861f23f..49190b911f 100644 --- a/planner/mapper/targetable.go +++ b/planner/mapper/targetable.go @@ -86,6 +86,55 @@ func NewFilter() *Filter { } } +func (f *Filter) ToMap(mapping *core.DocumentMapping) map[string]any { + return filterObjectToMap(mapping, f.Conditions) +} + +func filterObjectToMap(mapping *core.DocumentMapping, obj map[connor.FilterKey]any) map[string]any { + outmap := make(map[string]any) + if obj == nil { + return nil + } + for k, v := range obj { + switch keyType := k.(type) { + case *PropertyIndex: + subObj := v.(map[connor.FilterKey]any) + outkey, _ := mapping.TryToFindNameFromIndex(keyType.Index) + childMapping, ok := tryGetChildMapping(mapping, keyType.Index) + if ok { + outmap[outkey] = filterObjectToMap(childMapping, subObj) + } else { + outmap[outkey] = filterObjectToMap(mapping, subObj) + } + + case *Operator: + switch keyType.Operation { + case "_and", "_or": + v := v.([]any) + logicMapEntries := make([]any, len(v)) + for i, item := range v { + itemMap := item.(map[connor.FilterKey]any) + logicMapEntries[i] = filterObjectToMap(mapping, itemMap) + } + outmap[keyType.Operation] = logicMapEntries + case "_not": + itemMap := v.(map[connor.FilterKey]any) + outmap[keyType.Operation] = filterObjectToMap(mapping, itemMap) + default: + outmap[keyType.Operation] = v + } + } + } + return outmap +} + +func tryGetChildMapping(mapping *core.DocumentMapping, index int) (*core.DocumentMapping, bool) { + if index <= len(mapping.ChildMappings)-1 { + return mapping.ChildMappings[index], true + } + return nil, false +} + // Limit represents a limit-offset pairing that controls how many // and which records will be returned from a request. type Limit struct { diff --git a/planner/order.go b/planner/order.go index 7bbe0c91b0..5f61a952c9 100644 --- a/planner/order.go +++ b/planner/order.go @@ -82,7 +82,7 @@ func (p *Planner) OrderBy(parsed *mapper.Select, n *mapper.OrderBy) (*orderNode, p: p, ordering: n.Conditions, needSort: true, - docMapper: docMapper{&parsed.DocumentMapping}, + docMapper: docMapper{parsed.DocumentMapping}, }, nil } @@ -110,14 +110,20 @@ func (n *orderNode) simpleExplain() (map[string]any, error) { for _, element := range n.ordering { // Build the list containing the corresponding names of all the indexes. fieldNames := []string{} + + mapping := n.documentMapping for _, fieldIndex := range element.FieldIndexes { - // Try to find the name of this index. - fieldName, found := n.documentMapping.TryToFindNameFromIndex(fieldIndex) + fieldName, found := mapping.TryToFindNameFromIndex(fieldIndex) if !found { return nil, client.NewErrFieldIndexNotExist(fieldIndex) } fieldNames = append(fieldNames, fieldName) + if fieldIndex < len(mapping.ChildMappings) { + if childMapping := mapping.ChildMappings[fieldIndex]; childMapping != nil { + mapping = childMapping + } + } } // Put it all together for this order element. diff --git a/planner/planner.go b/planner/planner.go index fb6d325123..3af7b745e7 100644 --- a/planner/planner.go +++ b/planner/planner.go @@ -17,14 +17,9 @@ import ( "github.com/sourcenetwork/defradb/client/request" "github.com/sourcenetwork/defradb/core" "github.com/sourcenetwork/defradb/datastore" - "github.com/sourcenetwork/defradb/logging" "github.com/sourcenetwork/defradb/planner/mapper" ) -var ( - log = logging.MustNewLogger("planner") -) - // planNode is an interface all nodes in the plan tree need to implement. type planNode interface { // Initializes or Re-Initializes an existing planNode, often called internally by Start(). @@ -525,7 +520,11 @@ func (p *Planner) RunSubscriptionRequest( return nil, err } - return p.executeRequest(ctx, planNode) + data, err := p.executeRequest(ctx, planNode) + if err != nil { + return nil, err + } + return data, nil } // MakePlan makes a plan from the parsed request. diff --git a/planner/scan.go b/planner/scan.go index d8fb2c34c0..43bf47e27a 100644 --- a/planner/scan.go +++ b/planner/scan.go @@ -16,18 +16,18 @@ import ( "github.com/sourcenetwork/defradb/core" "github.com/sourcenetwork/defradb/db/base" "github.com/sourcenetwork/defradb/db/fetcher" + "github.com/sourcenetwork/defradb/lens" "github.com/sourcenetwork/defradb/planner/mapper" + "github.com/sourcenetwork/defradb/request/graphql/parser" ) +// scanExecInfo contains information about the execution of a scan. type scanExecInfo struct { // Total number of times scan was issued. iterations uint64 - // Total number of times attempted to fetch documents. - docFetches uint64 - - // Total number of documents that matched / passed the filter. - filterMatches uint64 + // Information about fetches. + fetches fetcher.ExecInfo } // scans an index for records @@ -38,7 +38,7 @@ type scanNode struct { p *Planner desc client.CollectionDescription - fields []*client.FieldDescription + fields []client.FieldDescription docKey []byte showDeleted bool @@ -47,8 +47,7 @@ type scanNode struct { reverse bool filter *mapper.Filter - - scanInitialized bool + slct *mapper.Select fetcher fetcher.Fetcher @@ -61,7 +60,16 @@ func (n *scanNode) Kind() string { func (n *scanNode) Init() error { // init the fetcher - if err := n.fetcher.Init(&n.desc, n.fields, n.reverse, n.showDeleted); err != nil { + if err := n.fetcher.Init( + n.p.ctx, + n.p.txn, + &n.desc, + n.fields, + n.filter, + n.slct.DocumentMapping, + n.reverse, + n.showDeleted, + ); err != nil { return err } return n.initScan() @@ -69,9 +77,63 @@ func (n *scanNode) Init() error { func (n *scanNode) initCollection(desc client.CollectionDescription) error { n.desc = desc + return n.initFields(n.slct.Fields) +} + +func (n *scanNode) initFields(fields []mapper.Requestable) error { + for _, r := range fields { + // add all the possible base level fields the fetcher is responsible + // for, including those that are needed by higher level aggregates + // or grouping alls, which them selves might have further dependents + switch requestable := r.(type) { + // field is simple as its just a base level field + case *mapper.Field: + n.tryAddField(requestable.GetName()) + // select might have its own select fields and filters fields + case *mapper.Select: + n.tryAddField(requestable.Field.Name + "_id") // foreign key for type joins + err := n.initFields(requestable.Fields) + if err != nil { + return err + } + // aggregate might have its own target fields and filter fields + case *mapper.Aggregate: + for _, target := range requestable.AggregateTargets { + if target.Filter != nil { + fieldDescs, err := parser.ParseFilterFieldsForDescription( + target.Filter.ExternalConditions, + n.desc.Schema, + ) + if err != nil { + return err + } + for _, fd := range fieldDescs { + n.tryAddField(fd.Name) + } + } + if target.ChildTarget.HasValue { + n.tryAddField(target.ChildTarget.Name) + } else { + n.tryAddField(target.Field.Name) + } + } + } + } return nil } +func (n *scanNode) tryAddField(fieldName string) bool { + fd, ok := n.desc.Schema.GetField(fieldName) + if !ok { + // skip fields that are not part of the + // schema description. The scanner (and fetcher) + // is only responsible for basic fields + return false + } + n.fields = append(n.fields, fd) + return true +} + // Start starts the internal logic of the scanner // like the DocumentFetcher, and more. func (n *scanNode) Start() error { @@ -84,12 +146,11 @@ func (n *scanNode) initScan() error { n.spans = core.NewSpans(core.NewSpan(start, start.PrefixEnd())) } - err := n.fetcher.Start(n.p.ctx, n.p.txn, n.spans) + err := n.fetcher.Start(n.p.ctx, n.spans) if err != nil { return err } - n.scanInitialized = true return nil } @@ -103,32 +164,25 @@ func (n *scanNode) Next() (bool, error) { return false, nil } - // keep scanning until we find a doc that passes the filter - for { - var err error - n.docKey, n.currentValue, err = n.fetcher.FetchNextDoc(n.p.ctx, n.documentMapping) - if err != nil { - return false, err - } - n.execInfo.docFetches++ + var err error + var execInfo fetcher.ExecInfo + n.docKey, n.currentValue, execInfo, err = n.fetcher.FetchNextDoc(n.p.ctx, n.documentMapping) + if err != nil { + return false, err + } + n.execInfo.fetches.Add(execInfo) - if len(n.currentValue.Fields) == 0 { - return false, nil - } - n.documentMapping.SetFirstOfName( - &n.currentValue, - request.DeletedFieldName, - n.currentValue.Status.IsDeleted(), - ) - passed, err := mapper.RunFilter(n.currentValue, n.filter) - if err != nil { - return false, err - } - if passed { - n.execInfo.filterMatches++ - return true, nil - } + if len(n.currentValue.Fields) == 0 { + return false, nil } + + n.documentMapping.SetFirstOfName( + &n.currentValue, + request.DeletedFieldName, + n.currentValue.Status.IsDeleted(), + ) + + return true, nil } func (n *scanNode) Spans(spans core.Spans) { @@ -160,10 +214,10 @@ func (n *scanNode) simpleExplain() (map[string]any, error) { simpleExplainMap := map[string]any{} // Add the filter attribute if it exists. - if n.filter == nil || n.filter.ExternalConditions == nil { + if n.filter == nil { simpleExplainMap[filterLabel] = nil } else { - simpleExplainMap[filterLabel] = n.filter.ExternalConditions + simpleExplainMap[filterLabel] = n.filter.ToMap(n.documentMapping) } // Add the collection attributes. @@ -176,11 +230,11 @@ func (n *scanNode) simpleExplain() (map[string]any, error) { return simpleExplainMap, nil } -func (n *scanNode) excuteExplain() map[string]any { +func (n *scanNode) executeExplain() map[string]any { return map[string]any{ - "iterations": n.execInfo.iterations, - "docFetches": n.execInfo.docFetches, - "filterMatches": n.execInfo.filterMatches, + "iterations": n.execInfo.iterations, + "docFetches": n.execInfo.fetches.DocsFetched, + "fieldFetches": n.execInfo.fetches.FieldsFetched, } } @@ -192,7 +246,7 @@ func (n *scanNode) Explain(explainType request.ExplainType) (map[string]any, err return n.simpleExplain() case request.ExecuteExplain: - return n.excuteExplain(), nil + return n.executeExplain(), nil default: return nil, ErrUnknownExplainRequestType @@ -202,18 +256,30 @@ func (n *scanNode) Explain(explainType request.ExplainType) (map[string]any, err // Merge implements mergeNode func (n *scanNode) Merge() bool { return true } -func (p *Planner) Scan(parsed *mapper.Select) *scanNode { +func (p *Planner) Scan(parsed *mapper.Select) (*scanNode, error) { var f fetcher.Fetcher if parsed.Cid.HasValue() { f = new(fetcher.VersionedFetcher) } else { f = new(fetcher.DocumentFetcher) + f = lens.NewFetcher(f, p.db.LensRegistry()) } - return &scanNode{ + scan := &scanNode{ p: p, fetcher: f, - docMapper: docMapper{&parsed.DocumentMapping}, + slct: parsed, + docMapper: docMapper{parsed.DocumentMapping}, + } + + colDesc, err := p.getCollectionDesc(parsed.CollectionName) + if err != nil { + return nil, err + } + err = scan.initCollection(colDesc) + if err != nil { + return nil, err } + return scan, nil } // multiScanNode is a buffered scanNode that has diff --git a/planner/select.go b/planner/select.go index a1d86bcddc..4fb9b143f2 100644 --- a/planner/select.go +++ b/planner/select.go @@ -115,7 +115,7 @@ type selectNode struct { // are defined in the subtype scan node. filter *mapper.Filter - docKeys immutable.Option[[]string] + keys immutable.Option[[]string] selectReq *mapper.Select groupSelects []*mapper.Select @@ -167,9 +167,9 @@ func (n *selectNode) Next() (bool, error) { n.execInfo.filterMatches++ - if n.docKeys.HasValue() { + if n.keys.HasValue() { docKey := n.currentValue.GetKey() - for _, key := range n.docKeys.Value() { + for _, key := range n.keys.Value() { if docKey == key { return true, nil } @@ -194,10 +194,17 @@ func (n *selectNode) simpleExplain() (map[string]any, error) { simpleExplainMap := map[string]any{} // Add the filter attribute if it exists. - if n.filter == nil || n.filter.ExternalConditions == nil { + if n.filter == nil { simpleExplainMap[filterLabel] = nil } else { - simpleExplainMap[filterLabel] = n.filter.ExternalConditions + simpleExplainMap[filterLabel] = n.filter.ToMap(n.documentMapping) + } + + // Add the keys attribute if it exists. + if !n.keys.HasValue() { + simpleExplainMap[keysLabel] = nil + } else { + simpleExplainMap[keysLabel] = n.keys.Value() } return simpleExplainMap, nil @@ -359,7 +366,6 @@ func (n *selectNode) addTypeIndexJoin(subSelect *mapper.Select) error { if err != nil { return err } - if err := n.addSubPlan(subSelect.Index, typeIndexJoin); err != nil { return err } @@ -405,9 +411,9 @@ func (p *Planner) SelectFromSource( source: source, origSource: source, selectReq: selectReq, - docMapper: docMapper{&selectReq.DocumentMapping}, + docMapper: docMapper{selectReq.DocumentMapping}, filter: selectReq.Filter, - docKeys: selectReq.DocKeys, + keys: selectReq.DocKeys, } limit := selectReq.Limit orderBy := selectReq.OrderBy @@ -452,7 +458,7 @@ func (p *Planner) SelectFromSource( order: orderPlan, group: groupPlan, aggregates: aggregates, - docMapper: docMapper{&selectReq.DocumentMapping}, + docMapper: docMapper{selectReq.DocumentMapping}, } return top, nil } @@ -462,9 +468,9 @@ func (p *Planner) Select(selectReq *mapper.Select) (planNode, error) { s := &selectNode{ planner: p, filter: selectReq.Filter, - docKeys: selectReq.DocKeys, + keys: selectReq.DocKeys, selectReq: selectReq, - docMapper: docMapper{&selectReq.DocumentMapping}, + docMapper: docMapper{selectReq.DocumentMapping}, } limit := selectReq.Limit orderBy := selectReq.OrderBy @@ -496,7 +502,7 @@ func (p *Planner) Select(selectReq *mapper.Select) (planNode, error) { order: orderPlan, group: groupPlan, aggregates: aggregates, - docMapper: docMapper{&selectReq.DocumentMapping}, + docMapper: docMapper{selectReq.DocumentMapping}, } return top, nil } diff --git a/planner/sum.go b/planner/sum.go index 7bb14f2501..0e1690898e 100644 --- a/planner/sum.go +++ b/planner/sum.go @@ -61,7 +61,7 @@ func (p *Planner) Sum( isFloat: isFloat, aggregateMapping: field.AggregateTargets, virtualFieldIndex: field.Index, - docMapper: docMapper{&field.DocumentMapping}, + docMapper: docMapper{field.DocumentMapping}, }, nil } @@ -82,7 +82,7 @@ func (p *Planner) isValueFloat( return false, err } - fieldDescription, fieldDescriptionFound := parentDescription.GetField(source.Name) + fieldDescription, fieldDescriptionFound := parentDescription.Schema.GetField(source.Name) if !fieldDescriptionFound { return false, client.NewErrFieldNotExist(source.Name) } @@ -130,7 +130,7 @@ func (p *Planner) isValueFloat( return false, err } - fieldDescription, fieldDescriptionFound := childCollectionDescription.GetField(source.ChildTarget.Name) + fieldDescription, fieldDescriptionFound := childCollectionDescription.Schema.GetField(source.ChildTarget.Name) if !fieldDescriptionFound { return false, client.NewErrFieldNotExist(source.ChildTarget.Name) } @@ -163,10 +163,19 @@ func (n *sumNode) simpleExplain() (map[string]any, error) { simpleExplainMap := map[string]any{} // Add the filter attribute if it exists. - if source.Filter == nil || source.Filter.ExternalConditions == nil { + if source.Filter == nil { simpleExplainMap[filterLabel] = nil } else { - simpleExplainMap[filterLabel] = source.Filter.ExternalConditions + // get the target aggregate document mapping. Since the filters + // are relative to the target aggregate collection (and doc mapper). + var targetMap *core.DocumentMapping + if source.Index < len(n.documentMapping.ChildMappings) && + n.documentMapping.ChildMappings[source.Index] != nil { + targetMap = n.documentMapping.ChildMappings[source.Index] + } else { + targetMap = n.documentMapping + } + simpleExplainMap[filterLabel] = source.Filter.ToMap(targetMap) } // Add the main field name. diff --git a/planner/top.go b/planner/top.go index 1f7764e091..93e530b2fc 100644 --- a/planner/top.go +++ b/planner/top.go @@ -186,7 +186,7 @@ func (n *topLevelNode) Next() (bool, error) { // Top creates a new topLevelNode using the given Select. func (p *Planner) Top(m *mapper.Select) (*topLevelNode, error) { node := topLevelNode{ - docMapper: docMapper{&m.DocumentMapping}, + docMapper: docMapper{m.DocumentMapping}, } aggregateChildren := []planNode{} @@ -209,6 +209,7 @@ func (p *Planner) Top(m *mapper.Select) (*topLevelNode, error) { } aggregateChildren = append(aggregateChildren, child) aggregateChildIndexes = append(aggregateChildIndexes, field.GetIndex()) + case *mapper.Select: child, err := p.Select(f) if err != nil { diff --git a/planner/type_join.go b/planner/type_join.go index b8dda2ebd2..1bab12b60f 100644 --- a/planner/type_join.go +++ b/planner/type_join.go @@ -79,7 +79,7 @@ func (p *Planner) makeTypeIndexJoin( var err error desc := parent.sourceInfo.collectionDescription - typeFieldDesc, ok := desc.GetField(subType.Name) + typeFieldDesc, ok := desc.Schema.GetField(subType.Name) if !ok { return nil, client.NewErrFieldNotExist(subType.Name) } @@ -138,7 +138,6 @@ func (n *typeIndexJoin) simpleExplain() (map[string]any, error) { joinDirectionLabel = "direction" joinDirectionPrimaryLabel = "primary" joinDirectionSecondaryLabel = "secondary" - joinSubTypeLabel = "subType" joinSubTypeNameLabel = "subTypeName" joinRootLabel = "rootName" ) @@ -228,12 +227,17 @@ func splitFilterByType(filter *mapper.Filter, subType int) (*mapper.Filter, *map keyFound, sub := removeConditionIndex(conditionKey, filter.Conditions) if !keyFound { - return filter, &mapper.Filter{} + return filter, nil } // create new splitup filter // our schema ensures that if sub exists, its of type map[string]any splitF := &mapper.Filter{Conditions: map[connor.FilterKey]any{conditionKey: sub}} + + // check if we have any remaining filters + if len(filter.Conditions) == 0 { + return nil, splitF + } return filter, splitF } @@ -264,7 +268,15 @@ func (p *Planner) makeTypeJoinOne( ) (*typeJoinOne, error) { // split filter if scan, ok := source.(*scanNode); ok { - scan.filter, parent.filter = splitFilterByType(scan.filter, subType.Index) + var parentfilter *mapper.Filter + scan.filter, parentfilter = splitFilterByType(scan.filter, subType.Index) + if parentfilter != nil { + if parent.filter == nil { + parent.filter = new(mapper.Filter) + } + parent.filter.Conditions = mergeFilterConditions( + parent.filter.Conditions, parentfilter.Conditions) + } subType.ShowDeleted = parent.selectReq.ShowDeleted } @@ -274,7 +286,7 @@ func (p *Planner) makeTypeJoinOne( } // get the correct sub field schema type (collection) - subTypeFieldDesc, ok := parent.sourceInfo.collectionDescription.GetField(subType.Name) + subTypeFieldDesc, ok := parent.sourceInfo.collectionDescription.Schema.GetField(subType.Name) if !ok { return nil, client.NewErrFieldNotExist(subType.Name) } @@ -335,50 +347,56 @@ func (n *typeJoinOne) Next() (bool, error) { doc := n.root.Value() if n.primary { - n.currentValue = n.valuesPrimary(doc) + n.currentValue, err = n.valuesPrimary(doc) } else { - n.currentValue = n.valuesSecondary(doc) + n.currentValue, err = n.valuesSecondary(doc) + } + + if err != nil { + return false, err } + return true, nil } -func (n *typeJoinOne) valuesSecondary(doc core.Doc) core.Doc { +func (n *typeJoinOne) valuesSecondary(doc core.Doc) (core.Doc, error) { fkIndex := &mapper.PropertyIndex{ - Index: n.subType.DocumentMap().FirstIndexOfName(n.subTypeFieldName + "_id"), + Index: n.subType.DocumentMap().FirstIndexOfName(n.subTypeFieldName + request.RelatedObjectID), } filter := map[connor.FilterKey]any{ - fkIndex: doc.GetKey(), - } - - // We have to reset the scan node after appending the new key-filter - if err := n.subType.Init(); err != nil { - log.ErrorE(n.p.ctx, "Sub-type initialization error at scan node reset", err) - return doc + fkIndex: map[connor.FilterKey]any{ + mapper.FilterEqOp: doc.GetKey(), + }, } // using the doc._key as a filter err := appendFilterToScanNode(n.subType, filter) if err != nil { - return core.Doc{} + return core.Doc{}, err + } + + // We have to reset the scan node after appending the new key-filter + if err := n.subType.Init(); err != nil { + return doc, NewErrSubTypeInit(err) } next, err := n.subType.Next() if !next || err != nil { - return doc + return doc, err } subdoc := n.subType.Value() doc.Fields[n.subSelect.Index] = subdoc - return doc + return doc, nil } -func (n *typeJoinOne) valuesPrimary(doc core.Doc) core.Doc { +func (n *typeJoinOne) valuesPrimary(doc core.Doc) (core.Doc, error) { // get the subtype doc key - subDocKey := n.docMapper.documentMapping.FirstOfName(doc, n.subTypeName+"_id") + subDocKey := n.docMapper.documentMapping.FirstOfName(doc, n.subTypeName+request.RelatedObjectID) subDocKeyStr, ok := subDocKey.(string) if !ok { - return doc + return doc, nil } // create the collection key for the sub doc @@ -394,8 +412,7 @@ func (n *typeJoinOne) valuesPrimary(doc core.Doc) core.Doc { // re-initialize the sub type plan if err := n.subType.Init(); err != nil { - log.ErrorE(n.p.ctx, "Sub-type initialization error at scan node reset", err) - return doc + return doc, NewErrSubTypeInit(err) } // if we don't find any docs from our point span lookup @@ -404,18 +421,17 @@ func (n *typeJoinOne) valuesPrimary(doc core.Doc) core.Doc { next, err := n.subType.Next() if err != nil { - log.ErrorE(n.p.ctx, "Sub-type initialization error at scan node reset", err) - return doc + return doc, err } if !next { - return doc + return doc, nil } subDoc := n.subType.Value() doc.Fields[n.subSelect.Index] = subDoc - return doc + return doc, nil } func (n *typeJoinOne) Close() error { @@ -453,7 +469,15 @@ func (p *Planner) makeTypeJoinMany( ) (*typeJoinMany, error) { // split filter if scan, ok := source.(*scanNode); ok { - scan.filter, parent.filter = splitFilterByType(scan.filter, subType.Index) + var parentfilter *mapper.Filter + scan.filter, parentfilter = splitFilterByType(scan.filter, subType.Index) + if parentfilter != nil { + if parent.filter == nil { + parent.filter = new(mapper.Filter) + } + parent.filter.Conditions = mergeFilterConditions( + parent.filter.Conditions, parentfilter.Conditions) + } subType.ShowDeleted = parent.selectReq.ShowDeleted } @@ -462,7 +486,7 @@ func (p *Planner) makeTypeJoinMany( return nil, err } - subTypeFieldDesc, ok := parent.sourceInfo.collectionDescription.GetField(subType.Name) + subTypeFieldDesc, ok := parent.sourceInfo.collectionDescription.Schema.GetField(subType.Name) if !ok { return nil, client.NewErrFieldNotExist(subType.Name) } @@ -526,11 +550,14 @@ func (n *typeJoinMany) Next() (bool, error) { // @todo: handle index for one-to-many setup } else { fkIndex := &mapper.PropertyIndex{ - Index: n.subSelect.FirstIndexOfName(n.rootName + "_id"), + Index: n.subSelect.FirstIndexOfName(n.rootName + request.RelatedObjectID), } filter := map[connor.FilterKey]any{ - fkIndex: n.currentValue.GetKey(), // user_id: "bae-ALICE" | user_id: "bae-CHARLIE" + fkIndex: map[connor.FilterKey]any{ + mapper.FilterEqOp: n.currentValue.GetKey(), + }, } + // using the doc._key as a filter err := appendFilterToScanNode(n.subType, filter) if err != nil { @@ -574,19 +601,11 @@ func appendFilterToScanNode(plan planNode, filterCondition map[connor.FilterKey] switch node := plan.(type) { case *scanNode: filter := node.filter - if filter == nil { + if filter == nil && len(filterCondition) > 0 { filter = mapper.NewFilter() } - // merge filter conditions - for k, v := range filterCondition { - indexKey, isIndexKey := k.(*mapper.PropertyIndex) - if !isIndexKey { - continue - } - removeConditionIndex(indexKey, filter.Conditions) - filter.Conditions[k] = v - } + filter.Conditions = mergeFilterConditions(filter.Conditions, filterCondition) node.filter = filter case nil: @@ -597,6 +616,23 @@ func appendFilterToScanNode(plan planNode, filterCondition map[connor.FilterKey] return nil } +// merge into dest with src, return dest +func mergeFilterConditions(dest map[connor.FilterKey]any, src map[connor.FilterKey]any) map[connor.FilterKey]any { + if dest == nil { + dest = make(map[connor.FilterKey]any) + } + // merge filter conditions + for k, v := range src { + indexKey, isIndexKey := k.(*mapper.PropertyIndex) + if !isIndexKey { + continue + } + removeConditionIndex(indexKey, dest) + dest[k] = v + } + return dest +} + func removeConditionIndex( key *mapper.PropertyIndex, filterConditions map[connor.FilterKey]any, diff --git a/planner/update.go b/planner/update.go index c13663ad77..36b5487c5e 100644 --- a/planner/update.go +++ b/planner/update.go @@ -118,10 +118,10 @@ func (n *updateNode) simpleExplain() (map[string]any, error) { simpleExplainMap[idsLabel] = n.ids // Add the filter attribute if it exists, otherwise have it nil. - if n.filter == nil || n.filter.ExternalConditions == nil { + if n.filter == nil { simpleExplainMap[filterLabel] = nil } else { - simpleExplainMap[filterLabel] = n.filter.ExternalConditions + simpleExplainMap[filterLabel] = n.filter.ToMap(n.documentMapping) } // Add the attribute that represents the patch to update with. @@ -160,7 +160,7 @@ func (p *Planner) UpdateDocs(parsed *mapper.Mutation) (planNode, error) { ids: parsed.DocKeys.Value(), isUpdating: true, patch: parsed.Data, - docMapper: docMapper{&parsed.DocumentMapping}, + docMapper: docMapper{parsed.DocumentMapping}, } // get collection diff --git a/playground/.gitignore b/playground/.gitignore new file mode 100644 index 0000000000..a547bf36d8 --- /dev/null +++ b/playground/.gitignore @@ -0,0 +1,24 @@ +# Logs +logs +*.log +npm-debug.log* +yarn-debug.log* +yarn-error.log* +pnpm-debug.log* +lerna-debug.log* + +node_modules +dist +dist-ssr +*.local + +# Editor directories and files +.vscode/* +!.vscode/extensions.json +.idea +.DS_Store +*.suo +*.ntvs* +*.njsproj +*.sln +*.sw? diff --git a/playground/README.md b/playground/README.md new file mode 100644 index 0000000000..3018611396 --- /dev/null +++ b/playground/README.md @@ -0,0 +1,27 @@ +# DefraDB Playground + +A web based playground for DefraDB. + +## Developing + +Run a development server bound to `localhost:5173`. + +```bash +npm install +npm run dev +``` + +Start DefraDB with CORS allowed. + +```bash +defradb start --allowed-origins="*" +``` + +## Building + +Create a static build and output files to `./dist`. + +```bash +npm install +npm run build +``` diff --git a/playground/index.html b/playground/index.html new file mode 100644 index 0000000000..ab81ce5bf4 --- /dev/null +++ b/playground/index.html @@ -0,0 +1,13 @@ + + + + + + + DefraDB Playground + + +
+ + + diff --git a/playground/package-lock.json b/playground/package-lock.json new file mode 100644 index 0000000000..9eb027211f --- /dev/null +++ b/playground/package-lock.json @@ -0,0 +1,3464 @@ +{ + "name": "playground", + "version": "0.0.0", + "lockfileVersion": 3, + "requires": true, + "packages": { + "": { + "name": "playground", + "version": "0.0.0", + "dependencies": { + "@tanstack/react-query": "^4.32.0", + "fast-json-patch": "^3.1.1", + "graphiql": "^3.0.4", + "graphql": "^16.7.1", + "react": "^18.2.0", + "react-dom": "^18.2.0", + "react-hook-form": "^7.45.2" + }, + "devDependencies": { + "@types/react": "^18.2.15", + "@types/react-dom": "^18.2.7", + "@typescript-eslint/eslint-plugin": "^5.59.0", + "@typescript-eslint/parser": "^5.62.0", + "@vitejs/plugin-react-swc": "^3.0.0", + "eslint": "^8.45.0", + "eslint-plugin-react-hooks": "^4.6.0", + "eslint-plugin-react-refresh": "^0.3.4", + "typescript": "^5.0.2", + "vite": "^4.3.9" + } + }, + "node_modules/@aashutoshrathi/word-wrap": { + "version": "1.2.6", + "resolved": "https://registry.npmjs.org/@aashutoshrathi/word-wrap/-/word-wrap-1.2.6.tgz", + "integrity": "sha512-1Yjs2SvM8TflER/OD3cOjhWWOZb58A2t7wpE2S9XfBYTiIl+XFhQG2bjy4Pu1I+EAlCNUzRDYDdFwFYUKvXcIA==", + "dev": true, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/@babel/runtime": { + "version": "7.22.6", + "resolved": "https://registry.npmjs.org/@babel/runtime/-/runtime-7.22.6.tgz", + "integrity": "sha512-wDb5pWm4WDdF6LFUde3Jl8WzPA+3ZbxYqkC6xAXuD3irdEHN1k0NfTRrJD8ZD378SJ61miMLCqIOXYhd8x+AJQ==", + "dependencies": { + "regenerator-runtime": "^0.13.11" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@codemirror/language": { + "version": "6.0.0", + "resolved": "https://registry.npmjs.org/@codemirror/language/-/language-6.0.0.tgz", + "integrity": "sha512-rtjk5ifyMzOna1c7PBu7J1VCt0PvA5wy3o8eMVnxMKb7z8KA7JFecvD04dSn14vj/bBaAbqRsGed5OjtofEnLA==", + "peer": true, + "dependencies": { + "@codemirror/state": "^6.0.0", + "@codemirror/view": "^6.0.0", + "@lezer/common": "^1.0.0", + "@lezer/highlight": "^1.0.0", + "@lezer/lr": "^1.0.0", + "style-mod": "^4.0.0" + } + }, + "node_modules/@codemirror/state": { + "version": "6.2.1", + "resolved": "https://registry.npmjs.org/@codemirror/state/-/state-6.2.1.tgz", + "integrity": "sha512-RupHSZ8+OjNT38zU9fKH2sv+Dnlr8Eb8sl4NOnnqz95mCFTZUaiRP8Xv5MeeaG0px2b8Bnfe7YGwCV3nsBhbuw==", + "peer": true + }, + "node_modules/@codemirror/view": { + "version": "6.15.3", + "resolved": "https://registry.npmjs.org/@codemirror/view/-/view-6.15.3.tgz", + "integrity": "sha512-chNgR8H7Ipx7AZUt0+Kknk7BCow/ron3mHd1VZdM7hQXiI79+UlWqcxpCiexTxZQ+iSkqndk3HHAclJOcjSuog==", + "peer": true, + "dependencies": { + "@codemirror/state": "^6.1.4", + "style-mod": "^4.0.0", + "w3c-keyname": "^2.2.4" + } + }, + "node_modules/@emotion/is-prop-valid": { + "version": "0.8.8", + "resolved": "https://registry.npmjs.org/@emotion/is-prop-valid/-/is-prop-valid-0.8.8.tgz", + "integrity": "sha512-u5WtneEAr5IDG2Wv65yhunPSMLIpuKsbuOktRojfrEiEvRyC85LgPMZI63cr7NUqT8ZIGdSVg8ZKGxIug4lXcA==", + "optional": true, + "dependencies": { + "@emotion/memoize": "0.7.4" + } + }, + "node_modules/@emotion/memoize": { + "version": "0.7.4", + "resolved": "https://registry.npmjs.org/@emotion/memoize/-/memoize-0.7.4.tgz", + "integrity": "sha512-Ja/Vfqe3HpuzRsG1oBtWTHk2PGZ7GR+2Vz5iYGelAw8dx32K0y7PjVuxK6z1nMpZOqAFsRUPCkK1YjJ56qJlgw==", + "optional": true + }, + "node_modules/@esbuild/darwin-arm64": { + "version": "0.17.19", + "resolved": "https://registry.npmjs.org/@esbuild/darwin-arm64/-/darwin-arm64-0.17.19.tgz", + "integrity": "sha512-80wEoCfF/hFKM6WE1FyBHc9SfUblloAWx6FJkFWTWiCoht9Mc0ARGEM47e67W9rI09YoUxJL68WHfDRYEAvOhg==", + "cpu": [ + "arm64" + ], + "dev": true, + "optional": true, + "os": [ + "darwin" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/@eslint-community/eslint-utils": { + "version": "4.4.0", + "resolved": "https://registry.npmjs.org/@eslint-community/eslint-utils/-/eslint-utils-4.4.0.tgz", + "integrity": "sha512-1/sA4dwrzBAyeUoQ6oxahHKmrZvsnLCg4RfxW3ZFGGmQkSNQPFNLV9CUEFQP1x9EYXHTo5p6xdhZM1Ne9p/AfA==", + "dev": true, + "dependencies": { + "eslint-visitor-keys": "^3.3.0" + }, + "engines": { + "node": "^12.22.0 || ^14.17.0 || >=16.0.0" + }, + "peerDependencies": { + "eslint": "^6.0.0 || ^7.0.0 || >=8.0.0" + } + }, + "node_modules/@eslint-community/regexpp": { + "version": "4.5.1", + "resolved": "https://registry.npmjs.org/@eslint-community/regexpp/-/regexpp-4.5.1.tgz", + "integrity": "sha512-Z5ba73P98O1KUYCCJTUeVpja9RcGoMdncZ6T49FCUl2lN38JtCJ+3WgIDBv0AuY4WChU5PmtJmOCTlN6FZTFKQ==", + "dev": true, + "engines": { + "node": "^12.0.0 || ^14.0.0 || >=16.0.0" + } + }, + "node_modules/@eslint/eslintrc": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/@eslint/eslintrc/-/eslintrc-2.1.0.tgz", + "integrity": "sha512-Lj7DECXqIVCqnqjjHMPna4vn6GJcMgul/wuS0je9OZ9gsL0zzDpKPVtcG1HaDVc+9y+qgXneTeUMbCqXJNpH1A==", + "dev": true, + "dependencies": { + "ajv": "^6.12.4", + "debug": "^4.3.2", + "espree": "^9.6.0", + "globals": "^13.19.0", + "ignore": "^5.2.0", + "import-fresh": "^3.2.1", + "js-yaml": "^4.1.0", + "minimatch": "^3.1.2", + "strip-json-comments": "^3.1.1" + }, + "engines": { + "node": "^12.22.0 || ^14.17.0 || >=16.0.0" + }, + "funding": { + "url": "https://opencollective.com/eslint" + } + }, + "node_modules/@eslint/js": { + "version": "8.44.0", + "resolved": "https://registry.npmjs.org/@eslint/js/-/js-8.44.0.tgz", + "integrity": "sha512-Ag+9YM4ocKQx9AarydN0KY2j0ErMHNIocPDrVo8zAE44xLTjEtz81OdR68/cydGtk6m6jDb5Za3r2useMzYmSw==", + "dev": true, + "engines": { + "node": "^12.22.0 || ^14.17.0 || >=16.0.0" + } + }, + "node_modules/@floating-ui/core": { + "version": "1.3.1", + "resolved": "https://registry.npmjs.org/@floating-ui/core/-/core-1.3.1.tgz", + "integrity": "sha512-Bu+AMaXNjrpjh41znzHqaz3r2Nr8hHuHZT6V2LBKMhyMl0FgKA62PNYbqnfgmzOhoWZj70Zecisbo4H1rotP5g==" + }, + "node_modules/@floating-ui/dom": { + "version": "1.4.5", + "resolved": "https://registry.npmjs.org/@floating-ui/dom/-/dom-1.4.5.tgz", + "integrity": "sha512-96KnRWkRnuBSSFbj0sFGwwOUd8EkiecINVl0O9wiZlZ64EkpyAOG3Xc2vKKNJmru0Z7RqWNymA+6b8OZqjgyyw==", + "dependencies": { + "@floating-ui/core": "^1.3.1" + } + }, + "node_modules/@floating-ui/react-dom": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/@floating-ui/react-dom/-/react-dom-2.0.1.tgz", + "integrity": "sha512-rZtAmSht4Lry6gdhAJDrCp/6rKN7++JnL1/Anbr/DdeyYXQPxvg/ivrbYvJulbRf4vL8b212suwMM2lxbv+RQA==", + "dependencies": { + "@floating-ui/dom": "^1.3.0" + }, + "peerDependencies": { + "react": ">=16.8.0", + "react-dom": ">=16.8.0" + } + }, + "node_modules/@graphiql/react": { + "version": "0.19.2", + "resolved": "https://registry.npmjs.org/@graphiql/react/-/react-0.19.2.tgz", + "integrity": "sha512-xdcLLUHr15AUxtv9Jyw7Mlf6Vd9EJb8ULImHTJOzDgW7DNAGUdU6Yu7xTGP/eCx+RrOQON1Bschv8Mjxk56tYg==", + "dependencies": { + "@graphiql/toolkit": "^0.9.1", + "@headlessui/react": "^1.7.15", + "@radix-ui/react-dialog": "^1.0.4", + "@radix-ui/react-dropdown-menu": "^2.0.5", + "@radix-ui/react-tooltip": "^1.0.6", + "@radix-ui/react-visually-hidden": "^1.0.3", + "@types/codemirror": "^5.60.8", + "clsx": "^1.2.1", + "codemirror": "^5.65.3", + "codemirror-graphql": "^2.0.9", + "copy-to-clipboard": "^3.2.0", + "framer-motion": "^6.5.1", + "graphql-language-service": "^5.1.7", + "markdown-it": "^12.2.0", + "set-value": "^4.1.0" + }, + "peerDependencies": { + "graphql": "^15.5.0 || ^16.0.0", + "react": "^16.8.0 || ^17 || ^18", + "react-dom": "^16.8.0 || ^17 || ^18" + } + }, + "node_modules/@graphiql/toolkit": { + "version": "0.9.1", + "resolved": "https://registry.npmjs.org/@graphiql/toolkit/-/toolkit-0.9.1.tgz", + "integrity": "sha512-LVt9pdk0830so50ZnU2Znb2rclcoWznG8r8asqAENzV0U1FM1kuY0sdPpc/rBc9MmmNgnB6A+WZzDhq6dbhTHA==", + "dependencies": { + "@n1ru4l/push-pull-async-iterable-iterator": "^3.1.0", + "meros": "^1.1.4" + }, + "peerDependencies": { + "graphql": "^15.5.0 || ^16.0.0", + "graphql-ws": ">= 4.5.0" + }, + "peerDependenciesMeta": { + "graphql-ws": { + "optional": true + } + } + }, + "node_modules/@headlessui/react": { + "version": "1.7.15", + "resolved": "https://registry.npmjs.org/@headlessui/react/-/react-1.7.15.tgz", + "integrity": "sha512-OTO0XtoRQ6JPB1cKNFYBZv2Q0JMqMGNhYP1CjPvcJvjz8YGokz8oAj89HIYZGN0gZzn/4kk9iUpmMF4Q21Gsqw==", + "dependencies": { + "client-only": "^0.0.1" + }, + "engines": { + "node": ">=10" + }, + "peerDependencies": { + "react": "^16 || ^17 || ^18", + "react-dom": "^16 || ^17 || ^18" + } + }, + "node_modules/@humanwhocodes/config-array": { + "version": "0.11.10", + "resolved": "https://registry.npmjs.org/@humanwhocodes/config-array/-/config-array-0.11.10.tgz", + "integrity": "sha512-KVVjQmNUepDVGXNuoRRdmmEjruj0KfiGSbS8LVc12LMsWDQzRXJ0qdhN8L8uUigKpfEHRhlaQFY0ib1tnUbNeQ==", + "dev": true, + "dependencies": { + "@humanwhocodes/object-schema": "^1.2.1", + "debug": "^4.1.1", + "minimatch": "^3.0.5" + }, + "engines": { + "node": ">=10.10.0" + } + }, + "node_modules/@humanwhocodes/module-importer": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/@humanwhocodes/module-importer/-/module-importer-1.0.1.tgz", + "integrity": "sha512-bxveV4V8v5Yb4ncFTT3rPSgZBOpCkjfK0y4oVVVJwIuDVBRMDXrPyXRL988i5ap9m9bnyEEjWfm5WkBmtffLfA==", + "dev": true, + "engines": { + "node": ">=12.22" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/nzakas" + } + }, + "node_modules/@humanwhocodes/object-schema": { + "version": "1.2.1", + "resolved": "https://registry.npmjs.org/@humanwhocodes/object-schema/-/object-schema-1.2.1.tgz", + "integrity": "sha512-ZnQMnLV4e7hDlUvw8H+U8ASL02SS2Gn6+9Ac3wGGLIe7+je2AeAOxPY+izIPJDfFDb7eDjev0Us8MO1iFRN8hA==", + "dev": true + }, + "node_modules/@lezer/common": { + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/@lezer/common/-/common-1.0.3.tgz", + "integrity": "sha512-JH4wAXCgUOcCGNekQPLhVeUtIqjH0yPBs7vvUdSjyQama9618IOKFJwkv2kcqdhF0my8hQEgCTEJU0GIgnahvA==", + "peer": true + }, + "node_modules/@lezer/highlight": { + "version": "1.1.6", + "resolved": "https://registry.npmjs.org/@lezer/highlight/-/highlight-1.1.6.tgz", + "integrity": "sha512-cmSJYa2us+r3SePpRCjN5ymCqCPv+zyXmDl0ciWtVaNiORT/MxM7ZgOMQZADD0o51qOaOg24qc/zBViOIwAjJg==", + "peer": true, + "dependencies": { + "@lezer/common": "^1.0.0" + } + }, + "node_modules/@lezer/lr": { + "version": "1.3.9", + "resolved": "https://registry.npmjs.org/@lezer/lr/-/lr-1.3.9.tgz", + "integrity": "sha512-XPz6dzuTHlnsbA5M2DZgjflNQ+9Hi5Swhic0RULdp3oOs3rh6bqGZolosVqN/fQIT8uNiepzINJDnS39oweTHQ==", + "peer": true, + "dependencies": { + "@lezer/common": "^1.0.0" + } + }, + "node_modules/@motionone/animation": { + "version": "10.15.1", + "resolved": "https://registry.npmjs.org/@motionone/animation/-/animation-10.15.1.tgz", + "integrity": "sha512-mZcJxLjHor+bhcPuIFErMDNyrdb2vJur8lSfMCsuCB4UyV8ILZLvK+t+pg56erv8ud9xQGK/1OGPt10agPrCyQ==", + "dependencies": { + "@motionone/easing": "^10.15.1", + "@motionone/types": "^10.15.1", + "@motionone/utils": "^10.15.1", + "tslib": "^2.3.1" + } + }, + "node_modules/@motionone/animation/node_modules/tslib": { + "version": "2.6.1", + "resolved": "https://registry.npmjs.org/tslib/-/tslib-2.6.1.tgz", + "integrity": "sha512-t0hLfiEKfMUoqhG+U1oid7Pva4bbDPHYfJNiB7BiIjRkj1pyC++4N3huJfqY6aRH6VTB0rvtzQwjM4K6qpfOig==" + }, + "node_modules/@motionone/dom": { + "version": "10.12.0", + "resolved": "https://registry.npmjs.org/@motionone/dom/-/dom-10.12.0.tgz", + "integrity": "sha512-UdPTtLMAktHiqV0atOczNYyDd/d8Cf5fFsd1tua03PqTwwCe/6lwhLSQ8a7TbnQ5SN0gm44N1slBfj+ORIhrqw==", + "dependencies": { + "@motionone/animation": "^10.12.0", + "@motionone/generators": "^10.12.0", + "@motionone/types": "^10.12.0", + "@motionone/utils": "^10.12.0", + "hey-listen": "^1.0.8", + "tslib": "^2.3.1" + } + }, + "node_modules/@motionone/dom/node_modules/tslib": { + "version": "2.6.1", + "resolved": "https://registry.npmjs.org/tslib/-/tslib-2.6.1.tgz", + "integrity": "sha512-t0hLfiEKfMUoqhG+U1oid7Pva4bbDPHYfJNiB7BiIjRkj1pyC++4N3huJfqY6aRH6VTB0rvtzQwjM4K6qpfOig==" + }, + "node_modules/@motionone/easing": { + "version": "10.15.1", + "resolved": "https://registry.npmjs.org/@motionone/easing/-/easing-10.15.1.tgz", + "integrity": "sha512-6hIHBSV+ZVehf9dcKZLT7p5PEKHGhDwky2k8RKkmOvUoYP3S+dXsKupyZpqx5apjd9f+php4vXk4LuS+ADsrWw==", + "dependencies": { + "@motionone/utils": "^10.15.1", + "tslib": "^2.3.1" + } + }, + "node_modules/@motionone/easing/node_modules/tslib": { + "version": "2.6.1", + "resolved": "https://registry.npmjs.org/tslib/-/tslib-2.6.1.tgz", + "integrity": "sha512-t0hLfiEKfMUoqhG+U1oid7Pva4bbDPHYfJNiB7BiIjRkj1pyC++4N3huJfqY6aRH6VTB0rvtzQwjM4K6qpfOig==" + }, + "node_modules/@motionone/generators": { + "version": "10.15.1", + "resolved": "https://registry.npmjs.org/@motionone/generators/-/generators-10.15.1.tgz", + "integrity": "sha512-67HLsvHJbw6cIbLA/o+gsm7h+6D4Sn7AUrB/GPxvujse1cGZ38F5H7DzoH7PhX+sjvtDnt2IhFYF2Zp1QTMKWQ==", + "dependencies": { + "@motionone/types": "^10.15.1", + "@motionone/utils": "^10.15.1", + "tslib": "^2.3.1" + } + }, + "node_modules/@motionone/generators/node_modules/tslib": { + "version": "2.6.1", + "resolved": "https://registry.npmjs.org/tslib/-/tslib-2.6.1.tgz", + "integrity": "sha512-t0hLfiEKfMUoqhG+U1oid7Pva4bbDPHYfJNiB7BiIjRkj1pyC++4N3huJfqY6aRH6VTB0rvtzQwjM4K6qpfOig==" + }, + "node_modules/@motionone/types": { + "version": "10.15.1", + "resolved": "https://registry.npmjs.org/@motionone/types/-/types-10.15.1.tgz", + "integrity": "sha512-iIUd/EgUsRZGrvW0jqdst8st7zKTzS9EsKkP+6c6n4MPZoQHwiHuVtTQLD6Kp0bsBLhNzKIBlHXponn/SDT4hA==" + }, + "node_modules/@motionone/utils": { + "version": "10.15.1", + "resolved": "https://registry.npmjs.org/@motionone/utils/-/utils-10.15.1.tgz", + "integrity": "sha512-p0YncgU+iklvYr/Dq4NobTRdAPv9PveRDUXabPEeOjBLSO/1FNB2phNTZxOxpi1/GZwYpAoECEa0Wam+nsmhSw==", + "dependencies": { + "@motionone/types": "^10.15.1", + "hey-listen": "^1.0.8", + "tslib": "^2.3.1" + } + }, + "node_modules/@motionone/utils/node_modules/tslib": { + "version": "2.6.1", + "resolved": "https://registry.npmjs.org/tslib/-/tslib-2.6.1.tgz", + "integrity": "sha512-t0hLfiEKfMUoqhG+U1oid7Pva4bbDPHYfJNiB7BiIjRkj1pyC++4N3huJfqY6aRH6VTB0rvtzQwjM4K6qpfOig==" + }, + "node_modules/@n1ru4l/push-pull-async-iterable-iterator": { + "version": "3.2.0", + "resolved": "https://registry.npmjs.org/@n1ru4l/push-pull-async-iterable-iterator/-/push-pull-async-iterable-iterator-3.2.0.tgz", + "integrity": "sha512-3fkKj25kEjsfObL6IlKPAlHYPq/oYwUkkQ03zsTTiDjD7vg/RxjdiLeCydqtxHZP0JgsXL3D/X5oAkMGzuUp/Q==", + "engines": { + "node": ">=12" + } + }, + "node_modules/@nodelib/fs.scandir": { + "version": "2.1.5", + "resolved": "https://registry.npmjs.org/@nodelib/fs.scandir/-/fs.scandir-2.1.5.tgz", + "integrity": "sha512-vq24Bq3ym5HEQm2NKCr3yXDwjc7vTsEThRDnkp2DK9p1uqLR+DHurm/NOTo0KG7HYHU7eppKZj3MyqYuMBf62g==", + "dev": true, + "dependencies": { + "@nodelib/fs.stat": "2.0.5", + "run-parallel": "^1.1.9" + }, + "engines": { + "node": ">= 8" + } + }, + "node_modules/@nodelib/fs.stat": { + "version": "2.0.5", + "resolved": "https://registry.npmjs.org/@nodelib/fs.stat/-/fs.stat-2.0.5.tgz", + "integrity": "sha512-RkhPPp2zrqDAQA/2jNhnztcPAlv64XdhIp7a7454A5ovI7Bukxgt7MX7udwAu3zg1DcpPU0rz3VV1SeaqvY4+A==", + "dev": true, + "engines": { + "node": ">= 8" + } + }, + "node_modules/@nodelib/fs.walk": { + "version": "1.2.8", + "resolved": "https://registry.npmjs.org/@nodelib/fs.walk/-/fs.walk-1.2.8.tgz", + "integrity": "sha512-oGB+UxlgWcgQkgwo8GcEGwemoTFt3FIO9ababBmaGwXIoBKZ+GTy0pP185beGg7Llih/NSHSV2XAs1lnznocSg==", + "dev": true, + "dependencies": { + "@nodelib/fs.scandir": "2.1.5", + "fastq": "^1.6.0" + }, + "engines": { + "node": ">= 8" + } + }, + "node_modules/@radix-ui/primitive": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/@radix-ui/primitive/-/primitive-1.0.1.tgz", + "integrity": "sha512-yQ8oGX2GVsEYMWGxcovu1uGWPCxV5BFfeeYxqPmuAzUyLT9qmaMXSAhXpb0WrspIeqYzdJpkh2vHModJPgRIaw==", + "dependencies": { + "@babel/runtime": "^7.13.10" + } + }, + "node_modules/@radix-ui/react-arrow": { + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/@radix-ui/react-arrow/-/react-arrow-1.0.3.tgz", + "integrity": "sha512-wSP+pHsB/jQRaL6voubsQ/ZlrGBHHrOjmBnr19hxYgtS0WvAFwZhK2WP/YY5yF9uKECCEEDGxuLxq1NBK51wFA==", + "dependencies": { + "@babel/runtime": "^7.13.10", + "@radix-ui/react-primitive": "1.0.3" + }, + "peerDependencies": { + "@types/react": "*", + "@types/react-dom": "*", + "react": "^16.8 || ^17.0 || ^18.0", + "react-dom": "^16.8 || ^17.0 || ^18.0" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + }, + "@types/react-dom": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-collection": { + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/@radix-ui/react-collection/-/react-collection-1.0.3.tgz", + "integrity": "sha512-3SzW+0PW7yBBoQlT8wNcGtaxaD0XSu0uLUFgrtHY08Acx05TaHaOmVLR73c0j/cqpDy53KBMO7s0dx2wmOIDIA==", + "dependencies": { + "@babel/runtime": "^7.13.10", + "@radix-ui/react-compose-refs": "1.0.1", + "@radix-ui/react-context": "1.0.1", + "@radix-ui/react-primitive": "1.0.3", + "@radix-ui/react-slot": "1.0.2" + }, + "peerDependencies": { + "@types/react": "*", + "@types/react-dom": "*", + "react": "^16.8 || ^17.0 || ^18.0", + "react-dom": "^16.8 || ^17.0 || ^18.0" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + }, + "@types/react-dom": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-compose-refs": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/@radix-ui/react-compose-refs/-/react-compose-refs-1.0.1.tgz", + "integrity": "sha512-fDSBgd44FKHa1FRMU59qBMPFcl2PZE+2nmqunj+BWFyYYjnhIDWL2ItDs3rrbJDQOtzt5nIebLCQc4QRfz6LJw==", + "dependencies": { + "@babel/runtime": "^7.13.10" + }, + "peerDependencies": { + "@types/react": "*", + "react": "^16.8 || ^17.0 || ^18.0" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-context": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/@radix-ui/react-context/-/react-context-1.0.1.tgz", + "integrity": "sha512-ebbrdFoYTcuZ0v4wG5tedGnp9tzcV8awzsxYph7gXUyvnNLuTIcCk1q17JEbnVhXAKG9oX3KtchwiMIAYp9NLg==", + "dependencies": { + "@babel/runtime": "^7.13.10" + }, + "peerDependencies": { + "@types/react": "*", + "react": "^16.8 || ^17.0 || ^18.0" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-dialog": { + "version": "1.0.4", + "resolved": "https://registry.npmjs.org/@radix-ui/react-dialog/-/react-dialog-1.0.4.tgz", + "integrity": "sha512-hJtRy/jPULGQZceSAP2Re6/4NpKo8im6V8P2hUqZsdFiSL8l35kYsw3qbRI6Ay5mQd2+wlLqje770eq+RJ3yZg==", + "dependencies": { + "@babel/runtime": "^7.13.10", + "@radix-ui/primitive": "1.0.1", + "@radix-ui/react-compose-refs": "1.0.1", + "@radix-ui/react-context": "1.0.1", + "@radix-ui/react-dismissable-layer": "1.0.4", + "@radix-ui/react-focus-guards": "1.0.1", + "@radix-ui/react-focus-scope": "1.0.3", + "@radix-ui/react-id": "1.0.1", + "@radix-ui/react-portal": "1.0.3", + "@radix-ui/react-presence": "1.0.1", + "@radix-ui/react-primitive": "1.0.3", + "@radix-ui/react-slot": "1.0.2", + "@radix-ui/react-use-controllable-state": "1.0.1", + "aria-hidden": "^1.1.1", + "react-remove-scroll": "2.5.5" + }, + "peerDependencies": { + "@types/react": "*", + "@types/react-dom": "*", + "react": "^16.8 || ^17.0 || ^18.0", + "react-dom": "^16.8 || ^17.0 || ^18.0" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + }, + "@types/react-dom": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-direction": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/@radix-ui/react-direction/-/react-direction-1.0.1.tgz", + "integrity": "sha512-RXcvnXgyvYvBEOhCBuddKecVkoMiI10Jcm5cTI7abJRAHYfFxeu+FBQs/DvdxSYucxR5mna0dNsL6QFlds5TMA==", + "dependencies": { + "@babel/runtime": "^7.13.10" + }, + "peerDependencies": { + "@types/react": "*", + "react": "^16.8 || ^17.0 || ^18.0" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-dismissable-layer": { + "version": "1.0.4", + "resolved": "https://registry.npmjs.org/@radix-ui/react-dismissable-layer/-/react-dismissable-layer-1.0.4.tgz", + "integrity": "sha512-7UpBa/RKMoHJYjie1gkF1DlK8l1fdU/VKDpoS3rCCo8YBJR294GwcEHyxHw72yvphJ7ld0AXEcSLAzY2F/WyCg==", + "dependencies": { + "@babel/runtime": "^7.13.10", + "@radix-ui/primitive": "1.0.1", + "@radix-ui/react-compose-refs": "1.0.1", + "@radix-ui/react-primitive": "1.0.3", + "@radix-ui/react-use-callback-ref": "1.0.1", + "@radix-ui/react-use-escape-keydown": "1.0.3" + }, + "peerDependencies": { + "@types/react": "*", + "@types/react-dom": "*", + "react": "^16.8 || ^17.0 || ^18.0", + "react-dom": "^16.8 || ^17.0 || ^18.0" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + }, + "@types/react-dom": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-dropdown-menu": { + "version": "2.0.5", + "resolved": "https://registry.npmjs.org/@radix-ui/react-dropdown-menu/-/react-dropdown-menu-2.0.5.tgz", + "integrity": "sha512-xdOrZzOTocqqkCkYo8yRPCib5OkTkqN7lqNCdxwPOdE466DOaNl4N8PkUIlsXthQvW5Wwkd+aEmWpfWlBoDPEw==", + "dependencies": { + "@babel/runtime": "^7.13.10", + "@radix-ui/primitive": "1.0.1", + "@radix-ui/react-compose-refs": "1.0.1", + "@radix-ui/react-context": "1.0.1", + "@radix-ui/react-id": "1.0.1", + "@radix-ui/react-menu": "2.0.5", + "@radix-ui/react-primitive": "1.0.3", + "@radix-ui/react-use-controllable-state": "1.0.1" + }, + "peerDependencies": { + "@types/react": "*", + "@types/react-dom": "*", + "react": "^16.8 || ^17.0 || ^18.0", + "react-dom": "^16.8 || ^17.0 || ^18.0" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + }, + "@types/react-dom": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-focus-guards": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/@radix-ui/react-focus-guards/-/react-focus-guards-1.0.1.tgz", + "integrity": "sha512-Rect2dWbQ8waGzhMavsIbmSVCgYxkXLxxR3ZvCX79JOglzdEy4JXMb98lq4hPxUbLr77nP0UOGf4rcMU+s1pUA==", + "dependencies": { + "@babel/runtime": "^7.13.10" + }, + "peerDependencies": { + "@types/react": "*", + "react": "^16.8 || ^17.0 || ^18.0" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-focus-scope": { + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/@radix-ui/react-focus-scope/-/react-focus-scope-1.0.3.tgz", + "integrity": "sha512-upXdPfqI4islj2CslyfUBNlaJCPybbqRHAi1KER7Isel9Q2AtSJ0zRBZv8mWQiFXD2nyAJ4BhC3yXgZ6kMBSrQ==", + "dependencies": { + "@babel/runtime": "^7.13.10", + "@radix-ui/react-compose-refs": "1.0.1", + "@radix-ui/react-primitive": "1.0.3", + "@radix-ui/react-use-callback-ref": "1.0.1" + }, + "peerDependencies": { + "@types/react": "*", + "@types/react-dom": "*", + "react": "^16.8 || ^17.0 || ^18.0", + "react-dom": "^16.8 || ^17.0 || ^18.0" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + }, + "@types/react-dom": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-id": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/@radix-ui/react-id/-/react-id-1.0.1.tgz", + "integrity": "sha512-tI7sT/kqYp8p96yGWY1OAnLHrqDgzHefRBKQ2YAkBS5ja7QLcZ9Z/uY7bEjPUatf8RomoXM8/1sMj1IJaE5UzQ==", + "dependencies": { + "@babel/runtime": "^7.13.10", + "@radix-ui/react-use-layout-effect": "1.0.1" + }, + "peerDependencies": { + "@types/react": "*", + "react": "^16.8 || ^17.0 || ^18.0" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-menu": { + "version": "2.0.5", + "resolved": "https://registry.npmjs.org/@radix-ui/react-menu/-/react-menu-2.0.5.tgz", + "integrity": "sha512-Gw4f9pwdH+w5w+49k0gLjN0PfRDHvxmAgG16AbyJZ7zhwZ6PBHKtWohvnSwfusfnK3L68dpBREHpVkj8wEM7ZA==", + "dependencies": { + "@babel/runtime": "^7.13.10", + "@radix-ui/primitive": "1.0.1", + "@radix-ui/react-collection": "1.0.3", + "@radix-ui/react-compose-refs": "1.0.1", + "@radix-ui/react-context": "1.0.1", + "@radix-ui/react-direction": "1.0.1", + "@radix-ui/react-dismissable-layer": "1.0.4", + "@radix-ui/react-focus-guards": "1.0.1", + "@radix-ui/react-focus-scope": "1.0.3", + "@radix-ui/react-id": "1.0.1", + "@radix-ui/react-popper": "1.1.2", + "@radix-ui/react-portal": "1.0.3", + "@radix-ui/react-presence": "1.0.1", + "@radix-ui/react-primitive": "1.0.3", + "@radix-ui/react-roving-focus": "1.0.4", + "@radix-ui/react-slot": "1.0.2", + "@radix-ui/react-use-callback-ref": "1.0.1", + "aria-hidden": "^1.1.1", + "react-remove-scroll": "2.5.5" + }, + "peerDependencies": { + "@types/react": "*", + "@types/react-dom": "*", + "react": "^16.8 || ^17.0 || ^18.0", + "react-dom": "^16.8 || ^17.0 || ^18.0" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + }, + "@types/react-dom": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-popper": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/@radix-ui/react-popper/-/react-popper-1.1.2.tgz", + "integrity": "sha512-1CnGGfFi/bbqtJZZ0P/NQY20xdG3E0LALJaLUEoKwPLwl6PPPfbeiCqMVQnhoFRAxjJj4RpBRJzDmUgsex2tSg==", + "dependencies": { + "@babel/runtime": "^7.13.10", + "@floating-ui/react-dom": "^2.0.0", + "@radix-ui/react-arrow": "1.0.3", + "@radix-ui/react-compose-refs": "1.0.1", + "@radix-ui/react-context": "1.0.1", + "@radix-ui/react-primitive": "1.0.3", + "@radix-ui/react-use-callback-ref": "1.0.1", + "@radix-ui/react-use-layout-effect": "1.0.1", + "@radix-ui/react-use-rect": "1.0.1", + "@radix-ui/react-use-size": "1.0.1", + "@radix-ui/rect": "1.0.1" + }, + "peerDependencies": { + "@types/react": "*", + "@types/react-dom": "*", + "react": "^16.8 || ^17.0 || ^18.0", + "react-dom": "^16.8 || ^17.0 || ^18.0" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + }, + "@types/react-dom": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-portal": { + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/@radix-ui/react-portal/-/react-portal-1.0.3.tgz", + "integrity": "sha512-xLYZeHrWoPmA5mEKEfZZevoVRK/Q43GfzRXkWV6qawIWWK8t6ifIiLQdd7rmQ4Vk1bmI21XhqF9BN3jWf+phpA==", + "dependencies": { + "@babel/runtime": "^7.13.10", + "@radix-ui/react-primitive": "1.0.3" + }, + "peerDependencies": { + "@types/react": "*", + "@types/react-dom": "*", + "react": "^16.8 || ^17.0 || ^18.0", + "react-dom": "^16.8 || ^17.0 || ^18.0" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + }, + "@types/react-dom": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-presence": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/@radix-ui/react-presence/-/react-presence-1.0.1.tgz", + "integrity": "sha512-UXLW4UAbIY5ZjcvzjfRFo5gxva8QirC9hF7wRE4U5gz+TP0DbRk+//qyuAQ1McDxBt1xNMBTaciFGvEmJvAZCg==", + "dependencies": { + "@babel/runtime": "^7.13.10", + "@radix-ui/react-compose-refs": "1.0.1", + "@radix-ui/react-use-layout-effect": "1.0.1" + }, + "peerDependencies": { + "@types/react": "*", + "@types/react-dom": "*", + "react": "^16.8 || ^17.0 || ^18.0", + "react-dom": "^16.8 || ^17.0 || ^18.0" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + }, + "@types/react-dom": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-primitive": { + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/@radix-ui/react-primitive/-/react-primitive-1.0.3.tgz", + "integrity": "sha512-yi58uVyoAcK/Nq1inRY56ZSjKypBNKTa/1mcL8qdl6oJeEaDbOldlzrGn7P6Q3Id5d+SYNGc5AJgc4vGhjs5+g==", + "dependencies": { + "@babel/runtime": "^7.13.10", + "@radix-ui/react-slot": "1.0.2" + }, + "peerDependencies": { + "@types/react": "*", + "@types/react-dom": "*", + "react": "^16.8 || ^17.0 || ^18.0", + "react-dom": "^16.8 || ^17.0 || ^18.0" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + }, + "@types/react-dom": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-roving-focus": { + "version": "1.0.4", + "resolved": "https://registry.npmjs.org/@radix-ui/react-roving-focus/-/react-roving-focus-1.0.4.tgz", + "integrity": "sha512-2mUg5Mgcu001VkGy+FfzZyzbmuUWzgWkj3rvv4yu+mLw03+mTzbxZHvfcGyFp2b8EkQeMkpRQ5FiA2Vr2O6TeQ==", + "dependencies": { + "@babel/runtime": "^7.13.10", + "@radix-ui/primitive": "1.0.1", + "@radix-ui/react-collection": "1.0.3", + "@radix-ui/react-compose-refs": "1.0.1", + "@radix-ui/react-context": "1.0.1", + "@radix-ui/react-direction": "1.0.1", + "@radix-ui/react-id": "1.0.1", + "@radix-ui/react-primitive": "1.0.3", + "@radix-ui/react-use-callback-ref": "1.0.1", + "@radix-ui/react-use-controllable-state": "1.0.1" + }, + "peerDependencies": { + "@types/react": "*", + "@types/react-dom": "*", + "react": "^16.8 || ^17.0 || ^18.0", + "react-dom": "^16.8 || ^17.0 || ^18.0" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + }, + "@types/react-dom": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-slot": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/@radix-ui/react-slot/-/react-slot-1.0.2.tgz", + "integrity": "sha512-YeTpuq4deV+6DusvVUW4ivBgnkHwECUu0BiN43L5UCDFgdhsRUWAghhTF5MbvNTPzmiFOx90asDSUjWuCNapwg==", + "dependencies": { + "@babel/runtime": "^7.13.10", + "@radix-ui/react-compose-refs": "1.0.1" + }, + "peerDependencies": { + "@types/react": "*", + "react": "^16.8 || ^17.0 || ^18.0" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-tooltip": { + "version": "1.0.6", + "resolved": "https://registry.npmjs.org/@radix-ui/react-tooltip/-/react-tooltip-1.0.6.tgz", + "integrity": "sha512-DmNFOiwEc2UDigsYj6clJENma58OelxD24O4IODoZ+3sQc3Zb+L8w1EP+y9laTuKCLAysPw4fD6/v0j4KNV8rg==", + "dependencies": { + "@babel/runtime": "^7.13.10", + "@radix-ui/primitive": "1.0.1", + "@radix-ui/react-compose-refs": "1.0.1", + "@radix-ui/react-context": "1.0.1", + "@radix-ui/react-dismissable-layer": "1.0.4", + "@radix-ui/react-id": "1.0.1", + "@radix-ui/react-popper": "1.1.2", + "@radix-ui/react-portal": "1.0.3", + "@radix-ui/react-presence": "1.0.1", + "@radix-ui/react-primitive": "1.0.3", + "@radix-ui/react-slot": "1.0.2", + "@radix-ui/react-use-controllable-state": "1.0.1", + "@radix-ui/react-visually-hidden": "1.0.3" + }, + "peerDependencies": { + "@types/react": "*", + "@types/react-dom": "*", + "react": "^16.8 || ^17.0 || ^18.0", + "react-dom": "^16.8 || ^17.0 || ^18.0" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + }, + "@types/react-dom": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-use-callback-ref": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/@radix-ui/react-use-callback-ref/-/react-use-callback-ref-1.0.1.tgz", + "integrity": "sha512-D94LjX4Sp0xJFVaoQOd3OO9k7tpBYNOXdVhkltUbGv2Qb9OXdrg/CpsjlZv7ia14Sylv398LswWBVVu5nqKzAQ==", + "dependencies": { + "@babel/runtime": "^7.13.10" + }, + "peerDependencies": { + "@types/react": "*", + "react": "^16.8 || ^17.0 || ^18.0" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-use-controllable-state": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/@radix-ui/react-use-controllable-state/-/react-use-controllable-state-1.0.1.tgz", + "integrity": "sha512-Svl5GY5FQeN758fWKrjM6Qb7asvXeiZltlT4U2gVfl8Gx5UAv2sMR0LWo8yhsIZh2oQ0eFdZ59aoOOMV7b47VA==", + "dependencies": { + "@babel/runtime": "^7.13.10", + "@radix-ui/react-use-callback-ref": "1.0.1" + }, + "peerDependencies": { + "@types/react": "*", + "react": "^16.8 || ^17.0 || ^18.0" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-use-escape-keydown": { + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/@radix-ui/react-use-escape-keydown/-/react-use-escape-keydown-1.0.3.tgz", + "integrity": "sha512-vyL82j40hcFicA+M4Ex7hVkB9vHgSse1ZWomAqV2Je3RleKGO5iM8KMOEtfoSB0PnIelMd2lATjTGMYqN5ylTg==", + "dependencies": { + "@babel/runtime": "^7.13.10", + "@radix-ui/react-use-callback-ref": "1.0.1" + }, + "peerDependencies": { + "@types/react": "*", + "react": "^16.8 || ^17.0 || ^18.0" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-use-layout-effect": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/@radix-ui/react-use-layout-effect/-/react-use-layout-effect-1.0.1.tgz", + "integrity": "sha512-v/5RegiJWYdoCvMnITBkNNx6bCj20fiaJnWtRkU18yITptraXjffz5Qbn05uOiQnOvi+dbkznkoaMltz1GnszQ==", + "dependencies": { + "@babel/runtime": "^7.13.10" + }, + "peerDependencies": { + "@types/react": "*", + "react": "^16.8 || ^17.0 || ^18.0" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-use-rect": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/@radix-ui/react-use-rect/-/react-use-rect-1.0.1.tgz", + "integrity": "sha512-Cq5DLuSiuYVKNU8orzJMbl15TXilTnJKUCltMVQg53BQOF1/C5toAaGrowkgksdBQ9H+SRL23g0HDmg9tvmxXw==", + "dependencies": { + "@babel/runtime": "^7.13.10", + "@radix-ui/rect": "1.0.1" + }, + "peerDependencies": { + "@types/react": "*", + "react": "^16.8 || ^17.0 || ^18.0" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-use-size": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/@radix-ui/react-use-size/-/react-use-size-1.0.1.tgz", + "integrity": "sha512-ibay+VqrgcaI6veAojjofPATwledXiSmX+C0KrBk/xgpX9rBzPV3OsfwlhQdUOFbh+LKQorLYT+xTXW9V8yd0g==", + "dependencies": { + "@babel/runtime": "^7.13.10", + "@radix-ui/react-use-layout-effect": "1.0.1" + }, + "peerDependencies": { + "@types/react": "*", + "react": "^16.8 || ^17.0 || ^18.0" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-visually-hidden": { + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/@radix-ui/react-visually-hidden/-/react-visually-hidden-1.0.3.tgz", + "integrity": "sha512-D4w41yN5YRKtu464TLnByKzMDG/JlMPHtfZgQAu9v6mNakUqGUI9vUrfQKz8NK41VMm/xbZbh76NUTVtIYqOMA==", + "dependencies": { + "@babel/runtime": "^7.13.10", + "@radix-ui/react-primitive": "1.0.3" + }, + "peerDependencies": { + "@types/react": "*", + "@types/react-dom": "*", + "react": "^16.8 || ^17.0 || ^18.0", + "react-dom": "^16.8 || ^17.0 || ^18.0" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + }, + "@types/react-dom": { + "optional": true + } + } + }, + "node_modules/@radix-ui/rect": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/@radix-ui/rect/-/rect-1.0.1.tgz", + "integrity": "sha512-fyrgCaedtvMg9NK3en0pnOYJdtfwxUcNolezkNPUsoX57X8oQk+NkqcvzHXD2uKNij6GXmWU9NDru2IWjrO4BQ==", + "dependencies": { + "@babel/runtime": "^7.13.10" + } + }, + "node_modules/@swc/core": { + "version": "1.3.62", + "resolved": "https://registry.npmjs.org/@swc/core/-/core-1.3.62.tgz", + "integrity": "sha512-J58hWY+/G8vOr4J6ZH9hLg0lMSijZtqIIf4HofZezGog/pVX6sJyBJ40dZ1ploFkDIlWTWvJyqtpesBKS73gkQ==", + "dev": true, + "hasInstallScript": true, + "engines": { + "node": ">=10" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/swc" + }, + "optionalDependencies": { + "@swc/core-darwin-arm64": "1.3.62", + "@swc/core-darwin-x64": "1.3.62", + "@swc/core-linux-arm-gnueabihf": "1.3.62", + "@swc/core-linux-arm64-gnu": "1.3.62", + "@swc/core-linux-arm64-musl": "1.3.62", + "@swc/core-linux-x64-gnu": "1.3.62", + "@swc/core-linux-x64-musl": "1.3.62", + "@swc/core-win32-arm64-msvc": "1.3.62", + "@swc/core-win32-ia32-msvc": "1.3.62", + "@swc/core-win32-x64-msvc": "1.3.62" + }, + "peerDependencies": { + "@swc/helpers": "^0.5.0" + }, + "peerDependenciesMeta": { + "@swc/helpers": { + "optional": true + } + } + }, + "node_modules/@swc/core-darwin-arm64": { + "version": "1.3.62", + "resolved": "https://registry.npmjs.org/@swc/core-darwin-arm64/-/core-darwin-arm64-1.3.62.tgz", + "integrity": "sha512-MmGilibITz68LEje6vJlKzc2gUUSgzvB3wGLSjEORikTNeM7P8jXVxE4A8fgZqDeudJUm9HVWrxCV+pHDSwXhA==", + "cpu": [ + "arm64" + ], + "dev": true, + "optional": true, + "os": [ + "darwin" + ], + "engines": { + "node": ">=10" + } + }, + "node_modules/@tanstack/query-core": { + "version": "4.32.0", + "resolved": "https://registry.npmjs.org/@tanstack/query-core/-/query-core-4.32.0.tgz", + "integrity": "sha512-ei4IYwL2kmlKSlCw9WgvV7PpXi0MiswVwfQRxawhJA690zWO3dU49igaQ/UMTl+Jy9jj9dK5IKAYvbX7kUvviQ==", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/tannerlinsley" + } + }, + "node_modules/@tanstack/react-query": { + "version": "4.32.0", + "resolved": "https://registry.npmjs.org/@tanstack/react-query/-/react-query-4.32.0.tgz", + "integrity": "sha512-B8WUMcByYAH9500ENejDCATOmEZhqjtS9wsfiQ3BNa+s+yAynY8SESI8WWHhSqUmjd0pmCSFRP6BOUGSda3QXA==", + "dependencies": { + "@tanstack/query-core": "4.32.0", + "use-sync-external-store": "^1.2.0" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/tannerlinsley" + }, + "peerDependencies": { + "react": "^16.8.0 || ^17.0.0 || ^18.0.0", + "react-dom": "^16.8.0 || ^17.0.0 || ^18.0.0", + "react-native": "*" + }, + "peerDependenciesMeta": { + "react-dom": { + "optional": true + }, + "react-native": { + "optional": true + } + } + }, + "node_modules/@types/codemirror": { + "version": "5.60.8", + "resolved": "https://registry.npmjs.org/@types/codemirror/-/codemirror-5.60.8.tgz", + "integrity": "sha512-VjFgDF/eB+Aklcy15TtOTLQeMjTo07k7KAjql8OK5Dirr7a6sJY4T1uVBDuTVG9VEmn1uUsohOpYnVfgC6/jyw==", + "dependencies": { + "@types/tern": "*" + } + }, + "node_modules/@types/estree": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/@types/estree/-/estree-1.0.1.tgz", + "integrity": "sha512-LG4opVs2ANWZ1TJoKc937iMmNstM/d0ae1vNbnBvBhqCSezgVUOzcLCqbI5elV8Vy6WKwKjaqR+zO9VKirBBCA==" + }, + "node_modules/@types/json-schema": { + "version": "7.0.12", + "resolved": "https://registry.npmjs.org/@types/json-schema/-/json-schema-7.0.12.tgz", + "integrity": "sha512-Hr5Jfhc9eYOQNPYO5WLDq/n4jqijdHNlDXjuAQkkt+mWdQR+XJToOHrsD4cPaMXpn6KO7y2+wM8AZEs8VpBLVA==", + "dev": true + }, + "node_modules/@types/prop-types": { + "version": "15.7.5", + "resolved": "https://registry.npmjs.org/@types/prop-types/-/prop-types-15.7.5.tgz", + "integrity": "sha512-JCB8C6SnDoQf0cNycqd/35A7MjcnK+ZTqE7judS6o7utxUCg6imJg3QK2qzHKszlTjcj2cn+NwMB2i96ubpj7w==", + "devOptional": true + }, + "node_modules/@types/react": { + "version": "18.2.15", + "resolved": "https://registry.npmjs.org/@types/react/-/react-18.2.15.tgz", + "integrity": "sha512-oEjE7TQt1fFTFSbf8kkNuc798ahTUzn3Le67/PWjE8MAfYAD/qB7O8hSTcromLFqHCt9bcdOg5GXMokzTjJ5SA==", + "devOptional": true, + "dependencies": { + "@types/prop-types": "*", + "@types/scheduler": "*", + "csstype": "^3.0.2" + } + }, + "node_modules/@types/react-dom": { + "version": "18.2.7", + "resolved": "https://registry.npmjs.org/@types/react-dom/-/react-dom-18.2.7.tgz", + "integrity": "sha512-GRaAEriuT4zp9N4p1i8BDBYmEyfo+xQ3yHjJU4eiK5NDa1RmUZG+unZABUTK4/Ox/M+GaHwb6Ow8rUITrtjszA==", + "devOptional": true, + "dependencies": { + "@types/react": "*" + } + }, + "node_modules/@types/scheduler": { + "version": "0.16.3", + "resolved": "https://registry.npmjs.org/@types/scheduler/-/scheduler-0.16.3.tgz", + "integrity": "sha512-5cJ8CB4yAx7BH1oMvdU0Jh9lrEXyPkar6F9G/ERswkCuvP4KQZfZkSjcMbAICCpQTN4OuZn8tz0HiKv9TGZgrQ==", + "devOptional": true + }, + "node_modules/@types/semver": { + "version": "7.5.0", + "resolved": "https://registry.npmjs.org/@types/semver/-/semver-7.5.0.tgz", + "integrity": "sha512-G8hZ6XJiHnuhQKR7ZmysCeJWE08o8T0AXtk5darsCaTVsYZhhgUrq53jizaR2FvsoeCwJhlmwTjkXBY5Pn/ZHw==", + "dev": true + }, + "node_modules/@types/tern": { + "version": "0.23.4", + "resolved": "https://registry.npmjs.org/@types/tern/-/tern-0.23.4.tgz", + "integrity": "sha512-JAUw1iXGO1qaWwEOzxTKJZ/5JxVeON9kvGZ/osgZaJImBnyjyn0cjovPsf6FNLmyGY8Vw9DoXZCMlfMkMwHRWg==", + "dependencies": { + "@types/estree": "*" + } + }, + "node_modules/@typescript-eslint/eslint-plugin": { + "version": "5.59.11", + "resolved": "https://registry.npmjs.org/@typescript-eslint/eslint-plugin/-/eslint-plugin-5.59.11.tgz", + "integrity": "sha512-XxuOfTkCUiOSyBWIvHlUraLw/JT/6Io1365RO6ZuI88STKMavJZPNMU0lFcUTeQXEhHiv64CbxYxBNoDVSmghg==", + "dev": true, + "dependencies": { + "@eslint-community/regexpp": "^4.4.0", + "@typescript-eslint/scope-manager": "5.59.11", + "@typescript-eslint/type-utils": "5.59.11", + "@typescript-eslint/utils": "5.59.11", + "debug": "^4.3.4", + "grapheme-splitter": "^1.0.4", + "ignore": "^5.2.0", + "natural-compare-lite": "^1.4.0", + "semver": "^7.3.7", + "tsutils": "^3.21.0" + }, + "engines": { + "node": "^12.22.0 || ^14.17.0 || >=16.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + }, + "peerDependencies": { + "@typescript-eslint/parser": "^5.0.0", + "eslint": "^6.0.0 || ^7.0.0 || ^8.0.0" + }, + "peerDependenciesMeta": { + "typescript": { + "optional": true + } + } + }, + "node_modules/@typescript-eslint/parser": { + "version": "5.62.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/parser/-/parser-5.62.0.tgz", + "integrity": "sha512-VlJEV0fOQ7BExOsHYAGrgbEiZoi8D+Bl2+f6V2RrXerRSylnp+ZBHmPvaIa8cz0Ajx7WO7Z5RqfgYg7ED1nRhA==", + "dev": true, + "dependencies": { + "@typescript-eslint/scope-manager": "5.62.0", + "@typescript-eslint/types": "5.62.0", + "@typescript-eslint/typescript-estree": "5.62.0", + "debug": "^4.3.4" + }, + "engines": { + "node": "^12.22.0 || ^14.17.0 || >=16.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + }, + "peerDependencies": { + "eslint": "^6.0.0 || ^7.0.0 || ^8.0.0" + }, + "peerDependenciesMeta": { + "typescript": { + "optional": true + } + } + }, + "node_modules/@typescript-eslint/parser/node_modules/@typescript-eslint/scope-manager": { + "version": "5.62.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/scope-manager/-/scope-manager-5.62.0.tgz", + "integrity": "sha512-VXuvVvZeQCQb5Zgf4HAxc04q5j+WrNAtNh9OwCsCgpKqESMTu3tF/jhZ3xG6T4NZwWl65Bg8KuS2uEvhSfLl0w==", + "dev": true, + "dependencies": { + "@typescript-eslint/types": "5.62.0", + "@typescript-eslint/visitor-keys": "5.62.0" + }, + "engines": { + "node": "^12.22.0 || ^14.17.0 || >=16.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + } + }, + "node_modules/@typescript-eslint/parser/node_modules/@typescript-eslint/types": { + "version": "5.62.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/types/-/types-5.62.0.tgz", + "integrity": "sha512-87NVngcbVXUahrRTqIK27gD2t5Cu1yuCXxbLcFtCzZGlfyVWWh8mLHkoxzjsB6DDNnvdL+fW8MiwPEJyGJQDgQ==", + "dev": true, + "engines": { + "node": "^12.22.0 || ^14.17.0 || >=16.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + } + }, + "node_modules/@typescript-eslint/parser/node_modules/@typescript-eslint/typescript-estree": { + "version": "5.62.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/typescript-estree/-/typescript-estree-5.62.0.tgz", + "integrity": "sha512-CmcQ6uY7b9y694lKdRB8FEel7JbU/40iSAPomu++SjLMntB+2Leay2LO6i8VnJk58MtE9/nQSFIH6jpyRWyYzA==", + "dev": true, + "dependencies": { + "@typescript-eslint/types": "5.62.0", + "@typescript-eslint/visitor-keys": "5.62.0", + "debug": "^4.3.4", + "globby": "^11.1.0", + "is-glob": "^4.0.3", + "semver": "^7.3.7", + "tsutils": "^3.21.0" + }, + "engines": { + "node": "^12.22.0 || ^14.17.0 || >=16.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + }, + "peerDependenciesMeta": { + "typescript": { + "optional": true + } + } + }, + "node_modules/@typescript-eslint/parser/node_modules/@typescript-eslint/visitor-keys": { + "version": "5.62.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/visitor-keys/-/visitor-keys-5.62.0.tgz", + "integrity": "sha512-07ny+LHRzQXepkGg6w0mFY41fVUNBrL2Roj/++7V1txKugfjm/Ci/qSND03r2RhlJhJYMcTn9AhhSSqQp0Ysyw==", + "dev": true, + "dependencies": { + "@typescript-eslint/types": "5.62.0", + "eslint-visitor-keys": "^3.3.0" + }, + "engines": { + "node": "^12.22.0 || ^14.17.0 || >=16.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + } + }, + "node_modules/@typescript-eslint/scope-manager": { + "version": "5.59.11", + "resolved": "https://registry.npmjs.org/@typescript-eslint/scope-manager/-/scope-manager-5.59.11.tgz", + "integrity": "sha512-dHFOsxoLFtrIcSj5h0QoBT/89hxQONwmn3FOQ0GOQcLOOXm+MIrS8zEAhs4tWl5MraxCY3ZJpaXQQdFMc2Tu+Q==", + "dev": true, + "dependencies": { + "@typescript-eslint/types": "5.59.11", + "@typescript-eslint/visitor-keys": "5.59.11" + }, + "engines": { + "node": "^12.22.0 || ^14.17.0 || >=16.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + } + }, + "node_modules/@typescript-eslint/type-utils": { + "version": "5.59.11", + "resolved": "https://registry.npmjs.org/@typescript-eslint/type-utils/-/type-utils-5.59.11.tgz", + "integrity": "sha512-LZqVY8hMiVRF2a7/swmkStMYSoXMFlzL6sXV6U/2gL5cwnLWQgLEG8tjWPpaE4rMIdZ6VKWwcffPlo1jPfk43g==", + "dev": true, + "dependencies": { + "@typescript-eslint/typescript-estree": "5.59.11", + "@typescript-eslint/utils": "5.59.11", + "debug": "^4.3.4", + "tsutils": "^3.21.0" + }, + "engines": { + "node": "^12.22.0 || ^14.17.0 || >=16.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + }, + "peerDependencies": { + "eslint": "*" + }, + "peerDependenciesMeta": { + "typescript": { + "optional": true + } + } + }, + "node_modules/@typescript-eslint/types": { + "version": "5.59.11", + "resolved": "https://registry.npmjs.org/@typescript-eslint/types/-/types-5.59.11.tgz", + "integrity": "sha512-epoN6R6tkvBYSc+cllrz+c2sOFWkbisJZWkOE+y3xHtvYaOE6Wk6B8e114McRJwFRjGvYdJwLXQH5c9osME/AA==", + "dev": true, + "engines": { + "node": "^12.22.0 || ^14.17.0 || >=16.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + } + }, + "node_modules/@typescript-eslint/typescript-estree": { + "version": "5.59.11", + "resolved": "https://registry.npmjs.org/@typescript-eslint/typescript-estree/-/typescript-estree-5.59.11.tgz", + "integrity": "sha512-YupOpot5hJO0maupJXixi6l5ETdrITxeo5eBOeuV7RSKgYdU3G5cxO49/9WRnJq9EMrB7AuTSLH/bqOsXi7wPA==", + "dev": true, + "dependencies": { + "@typescript-eslint/types": "5.59.11", + "@typescript-eslint/visitor-keys": "5.59.11", + "debug": "^4.3.4", + "globby": "^11.1.0", + "is-glob": "^4.0.3", + "semver": "^7.3.7", + "tsutils": "^3.21.0" + }, + "engines": { + "node": "^12.22.0 || ^14.17.0 || >=16.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + }, + "peerDependenciesMeta": { + "typescript": { + "optional": true + } + } + }, + "node_modules/@typescript-eslint/utils": { + "version": "5.59.11", + "resolved": "https://registry.npmjs.org/@typescript-eslint/utils/-/utils-5.59.11.tgz", + "integrity": "sha512-didu2rHSOMUdJThLk4aZ1Or8IcO3HzCw/ZvEjTTIfjIrcdd5cvSIwwDy2AOlE7htSNp7QIZ10fLMyRCveesMLg==", + "dev": true, + "dependencies": { + "@eslint-community/eslint-utils": "^4.2.0", + "@types/json-schema": "^7.0.9", + "@types/semver": "^7.3.12", + "@typescript-eslint/scope-manager": "5.59.11", + "@typescript-eslint/types": "5.59.11", + "@typescript-eslint/typescript-estree": "5.59.11", + "eslint-scope": "^5.1.1", + "semver": "^7.3.7" + }, + "engines": { + "node": "^12.22.0 || ^14.17.0 || >=16.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + }, + "peerDependencies": { + "eslint": "^6.0.0 || ^7.0.0 || ^8.0.0" + } + }, + "node_modules/@typescript-eslint/visitor-keys": { + "version": "5.59.11", + "resolved": "https://registry.npmjs.org/@typescript-eslint/visitor-keys/-/visitor-keys-5.59.11.tgz", + "integrity": "sha512-KGYniTGG3AMTuKF9QBD7EIrvufkB6O6uX3knP73xbKLMpH+QRPcgnCxjWXSHjMRuOxFLovljqQgQpR0c7GvjoA==", + "dev": true, + "dependencies": { + "@typescript-eslint/types": "5.59.11", + "eslint-visitor-keys": "^3.3.0" + }, + "engines": { + "node": "^12.22.0 || ^14.17.0 || >=16.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + } + }, + "node_modules/@vitejs/plugin-react-swc": { + "version": "3.3.2", + "resolved": "https://registry.npmjs.org/@vitejs/plugin-react-swc/-/plugin-react-swc-3.3.2.tgz", + "integrity": "sha512-VJFWY5sfoZerQRvJrh518h3AcQt6f/yTuWn4/TRB+dqmYU0NX1qz7qM5Wfd+gOQqUzQW4gxKqKN3KpE/P3+zrA==", + "dev": true, + "dependencies": { + "@swc/core": "^1.3.61" + }, + "peerDependencies": { + "vite": "^4" + } + }, + "node_modules/acorn": { + "version": "8.10.0", + "resolved": "https://registry.npmjs.org/acorn/-/acorn-8.10.0.tgz", + "integrity": "sha512-F0SAmZ8iUtS//m8DmCTA0jlh6TDKkHQyK6xc6V4KDTyZKA9dnvX9/3sRTVQrWm79glUAZbnmmNcdYwUIHWVybw==", + "dev": true, + "bin": { + "acorn": "bin/acorn" + }, + "engines": { + "node": ">=0.4.0" + } + }, + "node_modules/acorn-jsx": { + "version": "5.3.2", + "resolved": "https://registry.npmjs.org/acorn-jsx/-/acorn-jsx-5.3.2.tgz", + "integrity": "sha512-rq9s+JNhf0IChjtDXxllJ7g41oZk5SlXtp0LHwyA5cejwn7vKmKp4pPri6YEePv2PU65sAsegbXtIinmDFDXgQ==", + "dev": true, + "peerDependencies": { + "acorn": "^6.0.0 || ^7.0.0 || ^8.0.0" + } + }, + "node_modules/ajv": { + "version": "6.12.6", + "resolved": "https://registry.npmjs.org/ajv/-/ajv-6.12.6.tgz", + "integrity": "sha512-j3fVLgvTo527anyYyJOGTYJbG+vnnQYvE0m5mmkc1TK+nxAppkCLMIL0aZ4dblVCNoGShhm+kzE4ZUykBoMg4g==", + "dev": true, + "dependencies": { + "fast-deep-equal": "^3.1.1", + "fast-json-stable-stringify": "^2.0.0", + "json-schema-traverse": "^0.4.1", + "uri-js": "^4.2.2" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/epoberezkin" + } + }, + "node_modules/ansi-regex": { + "version": "5.0.1", + "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-5.0.1.tgz", + "integrity": "sha512-quJQXlTSUGL2LH9SUXo8VwsY4soanhgo6LNSm84E1LBcE8s3O0wpdiRzyR9z/ZZJMlMWv37qOOb9pdJlMUEKFQ==", + "dev": true, + "engines": { + "node": ">=8" + } + }, + "node_modules/ansi-styles": { + "version": "4.3.0", + "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-4.3.0.tgz", + "integrity": "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==", + "dev": true, + "dependencies": { + "color-convert": "^2.0.1" + }, + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/chalk/ansi-styles?sponsor=1" + } + }, + "node_modules/argparse": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/argparse/-/argparse-2.0.1.tgz", + "integrity": "sha512-8+9WqebbFzpX9OR+Wa6O29asIogeRMzcGtAINdpMHHyAg10f05aSFVBbcEqGf/PXw1EjAZ+q2/bEBg3DvurK3Q==" + }, + "node_modules/aria-hidden": { + "version": "1.2.3", + "resolved": "https://registry.npmjs.org/aria-hidden/-/aria-hidden-1.2.3.tgz", + "integrity": "sha512-xcLxITLe2HYa1cnYnwCjkOO1PqUHQpozB8x9AR0OgWN2woOBi5kSDVxKfd0b7sb1hw5qFeJhXm9H1nu3xSfLeQ==", + "dependencies": { + "tslib": "^2.0.0" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/aria-hidden/node_modules/tslib": { + "version": "2.6.1", + "resolved": "https://registry.npmjs.org/tslib/-/tslib-2.6.1.tgz", + "integrity": "sha512-t0hLfiEKfMUoqhG+U1oid7Pva4bbDPHYfJNiB7BiIjRkj1pyC++4N3huJfqY6aRH6VTB0rvtzQwjM4K6qpfOig==" + }, + "node_modules/array-union": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/array-union/-/array-union-2.1.0.tgz", + "integrity": "sha512-HGyxoOTYUyCM6stUe6EJgnd4EoewAI7zMdfqO+kGjnlZmBDz/cR5pf8r/cR4Wq60sL/p0IkcjUEEPwS3GFrIyw==", + "dev": true, + "engines": { + "node": ">=8" + } + }, + "node_modules/balanced-match": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/balanced-match/-/balanced-match-1.0.2.tgz", + "integrity": "sha512-3oSeUO0TMV67hN1AmbXsK4yaqU7tjiHlbxRDZOpH0KW9+CeX4bRAaX0Anxt0tx2MrpRpWwQaPwIlISEJhYU5Pw==", + "dev": true + }, + "node_modules/brace-expansion": { + "version": "1.1.11", + "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-1.1.11.tgz", + "integrity": "sha512-iCuPHDFgrHX7H2vEI/5xpz07zSHB00TpugqhmYtVmMO6518mCuRMoOYFldEBl0g187ufozdaHgWKcYFb61qGiA==", + "dev": true, + "dependencies": { + "balanced-match": "^1.0.0", + "concat-map": "0.0.1" + } + }, + "node_modules/braces": { + "version": "3.0.2", + "resolved": "https://registry.npmjs.org/braces/-/braces-3.0.2.tgz", + "integrity": "sha512-b8um+L1RzM3WDSzvhm6gIz1yfTbBt6YTlcEKAvsmqCZZFw46z626lVj9j1yEPW33H5H+lBQpZMP1k8l+78Ha0A==", + "dev": true, + "dependencies": { + "fill-range": "^7.0.1" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/callsites": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/callsites/-/callsites-3.1.0.tgz", + "integrity": "sha512-P8BjAsXvZS+VIDUI11hHCQEv74YT67YUi5JJFNWIqL235sBmjX4+qx9Muvls5ivyNENctx46xQLQ3aTuE7ssaQ==", + "dev": true, + "engines": { + "node": ">=6" + } + }, + "node_modules/chalk": { + "version": "4.1.2", + "resolved": "https://registry.npmjs.org/chalk/-/chalk-4.1.2.tgz", + "integrity": "sha512-oKnbhFyRIXpUuez8iBMmyEa4nbj4IOQyuhc/wy9kY7/WVPcwIO9VA668Pu8RkO7+0G76SLROeyw9CpQ061i4mA==", + "dev": true, + "dependencies": { + "ansi-styles": "^4.1.0", + "supports-color": "^7.1.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/chalk/chalk?sponsor=1" + } + }, + "node_modules/client-only": { + "version": "0.0.1", + "resolved": "https://registry.npmjs.org/client-only/-/client-only-0.0.1.tgz", + "integrity": "sha512-IV3Ou0jSMzZrd3pZ48nLkT9DA7Ag1pnPzaiQhpW7c3RbcqqzvzzVu+L8gfqMp/8IM2MQtSiqaCxrrcfu8I8rMA==" + }, + "node_modules/clsx": { + "version": "1.2.1", + "resolved": "https://registry.npmjs.org/clsx/-/clsx-1.2.1.tgz", + "integrity": "sha512-EcR6r5a8bj6pu3ycsa/E/cKVGuTgZJZdsyUYHOksG/UHIiKfjxzRxYJpyVBwYaQeOvghal9fcc4PidlgzugAQg==", + "engines": { + "node": ">=6" + } + }, + "node_modules/codemirror": { + "version": "5.65.14", + "resolved": "https://registry.npmjs.org/codemirror/-/codemirror-5.65.14.tgz", + "integrity": "sha512-VSNugIBDGt0OU9gDjeVr6fNkoFQznrWEUdAApMlXQNbfE8gGO19776D6MwSqF/V/w/sDwonsQ0z7KmmI9guScg==" + }, + "node_modules/codemirror-graphql": { + "version": "2.0.9", + "resolved": "https://registry.npmjs.org/codemirror-graphql/-/codemirror-graphql-2.0.9.tgz", + "integrity": "sha512-gl1LR6XSBgZtl7Dr2q4jjRNfhxMF8vn+rnjZTZPf/l+VrQgavY8l3G//hW7s3hWy73iiqkq5LZ4KE1tdaxB/vQ==", + "dependencies": { + "graphql-language-service": "5.1.7" + }, + "peerDependencies": { + "@codemirror/language": "6.0.0", + "codemirror": "^5.65.3", + "graphql": "^15.5.0 || ^16.0.0" + } + }, + "node_modules/color-convert": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-2.0.1.tgz", + "integrity": "sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ==", + "dev": true, + "dependencies": { + "color-name": "~1.1.4" + }, + "engines": { + "node": ">=7.0.0" + } + }, + "node_modules/color-name": { + "version": "1.1.4", + "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.4.tgz", + "integrity": "sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==", + "dev": true + }, + "node_modules/concat-map": { + "version": "0.0.1", + "resolved": "https://registry.npmjs.org/concat-map/-/concat-map-0.0.1.tgz", + "integrity": "sha512-/Srv4dswyQNBfohGpz9o6Yb3Gz3SrUDqBH5rTuhGR7ahtlbYKnVxw2bCFMRljaA7EXHaXZ8wsHdodFvbkhKmqg==", + "dev": true + }, + "node_modules/copy-to-clipboard": { + "version": "3.3.3", + "resolved": "https://registry.npmjs.org/copy-to-clipboard/-/copy-to-clipboard-3.3.3.tgz", + "integrity": "sha512-2KV8NhB5JqC3ky0r9PMCAZKbUHSwtEo4CwCs0KXgruG43gX5PMqDEBbVU4OUzw2MuAWUfsuFmWvEKG5QRfSnJA==", + "dependencies": { + "toggle-selection": "^1.0.6" + } + }, + "node_modules/cross-spawn": { + "version": "7.0.3", + "resolved": "https://registry.npmjs.org/cross-spawn/-/cross-spawn-7.0.3.tgz", + "integrity": "sha512-iRDPJKUPVEND7dHPO8rkbOnPpyDygcDFtWjpeWNCgy8WP2rXcxXL8TskReQl6OrB2G7+UJrags1q15Fudc7G6w==", + "dev": true, + "dependencies": { + "path-key": "^3.1.0", + "shebang-command": "^2.0.0", + "which": "^2.0.1" + }, + "engines": { + "node": ">= 8" + } + }, + "node_modules/csstype": { + "version": "3.1.2", + "resolved": "https://registry.npmjs.org/csstype/-/csstype-3.1.2.tgz", + "integrity": "sha512-I7K1Uu0MBPzaFKg4nI5Q7Vs2t+3gWWW648spaF+Rg7pI9ds18Ugn+lvg4SHczUdKlHI5LWBXyqfS8+DufyBsgQ==", + "devOptional": true + }, + "node_modules/debug": { + "version": "4.3.4", + "resolved": "https://registry.npmjs.org/debug/-/debug-4.3.4.tgz", + "integrity": "sha512-PRWFHuSU3eDtQJPvnNY7Jcket1j0t5OuOsFzPPzsekD52Zl8qUfFIPEiswXqIvHWGVHOgX+7G/vCNNhehwxfkQ==", + "dev": true, + "dependencies": { + "ms": "2.1.2" + }, + "engines": { + "node": ">=6.0" + }, + "peerDependenciesMeta": { + "supports-color": { + "optional": true + } + } + }, + "node_modules/deep-is": { + "version": "0.1.4", + "resolved": "https://registry.npmjs.org/deep-is/-/deep-is-0.1.4.tgz", + "integrity": "sha512-oIPzksmTg4/MriiaYGO+okXDT7ztn/w3Eptv/+gSIdMdKsJo0u4CfYNFJPy+4SKMuCqGw2wxnA+URMg3t8a/bQ==", + "dev": true + }, + "node_modules/detect-node-es": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/detect-node-es/-/detect-node-es-1.1.0.tgz", + "integrity": "sha512-ypdmJU/TbBby2Dxibuv7ZLW3Bs1QEmM7nHjEANfohJLvE0XVujisn1qPJcZxg+qDucsr+bP6fLD1rPS3AhJ7EQ==" + }, + "node_modules/dir-glob": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/dir-glob/-/dir-glob-3.0.1.tgz", + "integrity": "sha512-WkrWp9GR4KXfKGYzOLmTuGVi1UWFfws377n9cc55/tb6DuqyF6pcQ5AbiHEshaDpY9v6oaSr2XCDidGmMwdzIA==", + "dev": true, + "dependencies": { + "path-type": "^4.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/doctrine": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/doctrine/-/doctrine-3.0.0.tgz", + "integrity": "sha512-yS+Q5i3hBf7GBkd4KG8a7eBNNWNGLTaEwwYWUijIYM7zrlYDM0BFXHjjPWlWZ1Rg7UaddZeIDmi9jF3HmqiQ2w==", + "dev": true, + "dependencies": { + "esutils": "^2.0.2" + }, + "engines": { + "node": ">=6.0.0" + } + }, + "node_modules/entities": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/entities/-/entities-2.1.0.tgz", + "integrity": "sha512-hCx1oky9PFrJ611mf0ifBLBRW8lUUVRlFolb5gWRfIELabBlbp9xZvrqZLZAs+NxFnbfQoeGd8wDkygjg7U85w==", + "funding": { + "url": "https://github.com/fb55/entities?sponsor=1" + } + }, + "node_modules/esbuild": { + "version": "0.17.19", + "resolved": "https://registry.npmjs.org/esbuild/-/esbuild-0.17.19.tgz", + "integrity": "sha512-XQ0jAPFkK/u3LcVRcvVHQcTIqD6E2H1fvZMA5dQPSOWb3suUbWbfbRf94pjc0bNzRYLfIrDRQXr7X+LHIm5oHw==", + "dev": true, + "hasInstallScript": true, + "bin": { + "esbuild": "bin/esbuild" + }, + "engines": { + "node": ">=12" + }, + "optionalDependencies": { + "@esbuild/android-arm": "0.17.19", + "@esbuild/android-arm64": "0.17.19", + "@esbuild/android-x64": "0.17.19", + "@esbuild/darwin-arm64": "0.17.19", + "@esbuild/darwin-x64": "0.17.19", + "@esbuild/freebsd-arm64": "0.17.19", + "@esbuild/freebsd-x64": "0.17.19", + "@esbuild/linux-arm": "0.17.19", + "@esbuild/linux-arm64": "0.17.19", + "@esbuild/linux-ia32": "0.17.19", + "@esbuild/linux-loong64": "0.17.19", + "@esbuild/linux-mips64el": "0.17.19", + "@esbuild/linux-ppc64": "0.17.19", + "@esbuild/linux-riscv64": "0.17.19", + "@esbuild/linux-s390x": "0.17.19", + "@esbuild/linux-x64": "0.17.19", + "@esbuild/netbsd-x64": "0.17.19", + "@esbuild/openbsd-x64": "0.17.19", + "@esbuild/sunos-x64": "0.17.19", + "@esbuild/win32-arm64": "0.17.19", + "@esbuild/win32-ia32": "0.17.19", + "@esbuild/win32-x64": "0.17.19" + } + }, + "node_modules/escape-string-regexp": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/escape-string-regexp/-/escape-string-regexp-4.0.0.tgz", + "integrity": "sha512-TtpcNJ3XAzx3Gq8sWRzJaVajRs0uVxA2YAkdb1jm2YkPz4G6egUFAyA3n5vtEIZefPk5Wa4UXbKuS5fKkJWdgA==", + "dev": true, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/eslint": { + "version": "8.45.0", + "resolved": "https://registry.npmjs.org/eslint/-/eslint-8.45.0.tgz", + "integrity": "sha512-pd8KSxiQpdYRfYa9Wufvdoct3ZPQQuVuU5O6scNgMuOMYuxvH0IGaYK0wUFjo4UYYQQCUndlXiMbnxopwvvTiw==", + "dev": true, + "dependencies": { + "@eslint-community/eslint-utils": "^4.2.0", + "@eslint-community/regexpp": "^4.4.0", + "@eslint/eslintrc": "^2.1.0", + "@eslint/js": "8.44.0", + "@humanwhocodes/config-array": "^0.11.10", + "@humanwhocodes/module-importer": "^1.0.1", + "@nodelib/fs.walk": "^1.2.8", + "ajv": "^6.10.0", + "chalk": "^4.0.0", + "cross-spawn": "^7.0.2", + "debug": "^4.3.2", + "doctrine": "^3.0.0", + "escape-string-regexp": "^4.0.0", + "eslint-scope": "^7.2.0", + "eslint-visitor-keys": "^3.4.1", + "espree": "^9.6.0", + "esquery": "^1.4.2", + "esutils": "^2.0.2", + "fast-deep-equal": "^3.1.3", + "file-entry-cache": "^6.0.1", + "find-up": "^5.0.0", + "glob-parent": "^6.0.2", + "globals": "^13.19.0", + "graphemer": "^1.4.0", + "ignore": "^5.2.0", + "imurmurhash": "^0.1.4", + "is-glob": "^4.0.0", + "is-path-inside": "^3.0.3", + "js-yaml": "^4.1.0", + "json-stable-stringify-without-jsonify": "^1.0.1", + "levn": "^0.4.1", + "lodash.merge": "^4.6.2", + "minimatch": "^3.1.2", + "natural-compare": "^1.4.0", + "optionator": "^0.9.3", + "strip-ansi": "^6.0.1", + "text-table": "^0.2.0" + }, + "bin": { + "eslint": "bin/eslint.js" + }, + "engines": { + "node": "^12.22.0 || ^14.17.0 || >=16.0.0" + }, + "funding": { + "url": "https://opencollective.com/eslint" + } + }, + "node_modules/eslint-plugin-react-hooks": { + "version": "4.6.0", + "resolved": "https://registry.npmjs.org/eslint-plugin-react-hooks/-/eslint-plugin-react-hooks-4.6.0.tgz", + "integrity": "sha512-oFc7Itz9Qxh2x4gNHStv3BqJq54ExXmfC+a1NjAta66IAN87Wu0R/QArgIS9qKzX3dXKPI9H5crl9QchNMY9+g==", + "dev": true, + "engines": { + "node": ">=10" + }, + "peerDependencies": { + "eslint": "^3.0.0 || ^4.0.0 || ^5.0.0 || ^6.0.0 || ^7.0.0 || ^8.0.0-0" + } + }, + "node_modules/eslint-plugin-react-refresh": { + "version": "0.3.5", + "resolved": "https://registry.npmjs.org/eslint-plugin-react-refresh/-/eslint-plugin-react-refresh-0.3.5.tgz", + "integrity": "sha512-61qNIsc7fo9Pp/mju0J83kzvLm0Bsayu7OQSLEoJxLDCBjIIyb87bkzufoOvdDxLkSlMfkF7UxomC4+eztUBSA==", + "dev": true, + "peerDependencies": { + "eslint": ">=7" + } + }, + "node_modules/eslint-scope": { + "version": "5.1.1", + "resolved": "https://registry.npmjs.org/eslint-scope/-/eslint-scope-5.1.1.tgz", + "integrity": "sha512-2NxwbF/hZ0KpepYN0cNbo+FN6XoK7GaHlQhgx/hIZl6Va0bF45RQOOwhLIy8lQDbuCiadSLCBnH2CFYquit5bw==", + "dev": true, + "dependencies": { + "esrecurse": "^4.3.0", + "estraverse": "^4.1.1" + }, + "engines": { + "node": ">=8.0.0" + } + }, + "node_modules/eslint-visitor-keys": { + "version": "3.4.1", + "resolved": "https://registry.npmjs.org/eslint-visitor-keys/-/eslint-visitor-keys-3.4.1.tgz", + "integrity": "sha512-pZnmmLwYzf+kWaM/Qgrvpen51upAktaaiI01nsJD/Yr3lMOdNtq0cxkrrg16w64VtisN6okbs7Q8AfGqj4c9fA==", + "dev": true, + "engines": { + "node": "^12.22.0 || ^14.17.0 || >=16.0.0" + }, + "funding": { + "url": "https://opencollective.com/eslint" + } + }, + "node_modules/eslint/node_modules/eslint-scope": { + "version": "7.2.0", + "resolved": "https://registry.npmjs.org/eslint-scope/-/eslint-scope-7.2.0.tgz", + "integrity": "sha512-DYj5deGlHBfMt15J7rdtyKNq/Nqlv5KfU4iodrQ019XESsRnwXH9KAE0y3cwtUHDo2ob7CypAnCqefh6vioWRw==", + "dev": true, + "dependencies": { + "esrecurse": "^4.3.0", + "estraverse": "^5.2.0" + }, + "engines": { + "node": "^12.22.0 || ^14.17.0 || >=16.0.0" + }, + "funding": { + "url": "https://opencollective.com/eslint" + } + }, + "node_modules/eslint/node_modules/estraverse": { + "version": "5.3.0", + "resolved": "https://registry.npmjs.org/estraverse/-/estraverse-5.3.0.tgz", + "integrity": "sha512-MMdARuVEQziNTeJD8DgMqmhwR11BRQ/cBP+pLtYdSTnf3MIO8fFeiINEbX36ZdNlfU/7A9f3gUw49B3oQsvwBA==", + "dev": true, + "engines": { + "node": ">=4.0" + } + }, + "node_modules/espree": { + "version": "9.6.1", + "resolved": "https://registry.npmjs.org/espree/-/espree-9.6.1.tgz", + "integrity": "sha512-oruZaFkjorTpF32kDSI5/75ViwGeZginGGy2NoOSg3Q9bnwlnmDm4HLnkl0RE3n+njDXR037aY1+x58Z/zFdwQ==", + "dev": true, + "dependencies": { + "acorn": "^8.9.0", + "acorn-jsx": "^5.3.2", + "eslint-visitor-keys": "^3.4.1" + }, + "engines": { + "node": "^12.22.0 || ^14.17.0 || >=16.0.0" + }, + "funding": { + "url": "https://opencollective.com/eslint" + } + }, + "node_modules/esquery": { + "version": "1.5.0", + "resolved": "https://registry.npmjs.org/esquery/-/esquery-1.5.0.tgz", + "integrity": "sha512-YQLXUplAwJgCydQ78IMJywZCceoqk1oH01OERdSAJc/7U2AylwjhSCLDEtqwg811idIS/9fIU5GjG73IgjKMVg==", + "dev": true, + "dependencies": { + "estraverse": "^5.1.0" + }, + "engines": { + "node": ">=0.10" + } + }, + "node_modules/esquery/node_modules/estraverse": { + "version": "5.3.0", + "resolved": "https://registry.npmjs.org/estraverse/-/estraverse-5.3.0.tgz", + "integrity": "sha512-MMdARuVEQziNTeJD8DgMqmhwR11BRQ/cBP+pLtYdSTnf3MIO8fFeiINEbX36ZdNlfU/7A9f3gUw49B3oQsvwBA==", + "dev": true, + "engines": { + "node": ">=4.0" + } + }, + "node_modules/esrecurse": { + "version": "4.3.0", + "resolved": "https://registry.npmjs.org/esrecurse/-/esrecurse-4.3.0.tgz", + "integrity": "sha512-KmfKL3b6G+RXvP8N1vr3Tq1kL/oCFgn2NYXEtqP8/L3pKapUA4G8cFVaoF3SU323CD4XypR/ffioHmkti6/Tag==", + "dev": true, + "dependencies": { + "estraverse": "^5.2.0" + }, + "engines": { + "node": ">=4.0" + } + }, + "node_modules/esrecurse/node_modules/estraverse": { + "version": "5.3.0", + "resolved": "https://registry.npmjs.org/estraverse/-/estraverse-5.3.0.tgz", + "integrity": "sha512-MMdARuVEQziNTeJD8DgMqmhwR11BRQ/cBP+pLtYdSTnf3MIO8fFeiINEbX36ZdNlfU/7A9f3gUw49B3oQsvwBA==", + "dev": true, + "engines": { + "node": ">=4.0" + } + }, + "node_modules/estraverse": { + "version": "4.3.0", + "resolved": "https://registry.npmjs.org/estraverse/-/estraverse-4.3.0.tgz", + "integrity": "sha512-39nnKffWz8xN1BU/2c79n9nB9HDzo0niYUqx6xyqUnyoAnQyyWpOTdZEeiCch8BBu515t4wp9ZmgVfVhn9EBpw==", + "dev": true, + "engines": { + "node": ">=4.0" + } + }, + "node_modules/esutils": { + "version": "2.0.3", + "resolved": "https://registry.npmjs.org/esutils/-/esutils-2.0.3.tgz", + "integrity": "sha512-kVscqXk4OCp68SZ0dkgEKVi6/8ij300KBWTJq32P/dYeWTSwK41WyTxalN1eRmA5Z9UU/LX9D7FWSmV9SAYx6g==", + "dev": true, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/fast-deep-equal": { + "version": "3.1.3", + "resolved": "https://registry.npmjs.org/fast-deep-equal/-/fast-deep-equal-3.1.3.tgz", + "integrity": "sha512-f3qQ9oQy9j2AhBe/H9VC91wLmKBCCU/gDOnKNAYG5hswO7BLKj09Hc5HYNz9cGI++xlpDCIgDaitVs03ATR84Q==", + "dev": true + }, + "node_modules/fast-glob": { + "version": "3.2.12", + "resolved": "https://registry.npmjs.org/fast-glob/-/fast-glob-3.2.12.tgz", + "integrity": "sha512-DVj4CQIYYow0BlaelwK1pHl5n5cRSJfM60UA0zK891sVInoPri2Ekj7+e1CT3/3qxXenpI+nBBmQAcJPJgaj4w==", + "dev": true, + "dependencies": { + "@nodelib/fs.stat": "^2.0.2", + "@nodelib/fs.walk": "^1.2.3", + "glob-parent": "^5.1.2", + "merge2": "^1.3.0", + "micromatch": "^4.0.4" + }, + "engines": { + "node": ">=8.6.0" + } + }, + "node_modules/fast-glob/node_modules/glob-parent": { + "version": "5.1.2", + "resolved": "https://registry.npmjs.org/glob-parent/-/glob-parent-5.1.2.tgz", + "integrity": "sha512-AOIgSQCepiJYwP3ARnGx+5VnTu2HBYdzbGP45eLw1vr3zB3vZLeyed1sC9hnbcOc9/SrMyM5RPQrkGz4aS9Zow==", + "dev": true, + "dependencies": { + "is-glob": "^4.0.1" + }, + "engines": { + "node": ">= 6" + } + }, + "node_modules/fast-json-patch": { + "version": "3.1.1", + "resolved": "https://registry.npmjs.org/fast-json-patch/-/fast-json-patch-3.1.1.tgz", + "integrity": "sha512-vf6IHUX2SBcA+5/+4883dsIjpBTqmfBjmYiWK1savxQmFk4JfBMLa7ynTYOs1Rolp/T1betJxHiGD3g1Mn8lUQ==" + }, + "node_modules/fast-json-stable-stringify": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/fast-json-stable-stringify/-/fast-json-stable-stringify-2.1.0.tgz", + "integrity": "sha512-lhd/wF+Lk98HZoTCtlVraHtfh5XYijIjalXck7saUtuanSDyLMxnHhSXEDJqHxD7msR8D0uCmqlkwjCV8xvwHw==", + "dev": true + }, + "node_modules/fast-levenshtein": { + "version": "2.0.6", + "resolved": "https://registry.npmjs.org/fast-levenshtein/-/fast-levenshtein-2.0.6.tgz", + "integrity": "sha512-DCXu6Ifhqcks7TZKY3Hxp3y6qphY5SJZmrWMDrKcERSOXWQdMhU9Ig/PYrzyw/ul9jOIyh0N4M0tbC5hodg8dw==", + "dev": true + }, + "node_modules/fastq": { + "version": "1.15.0", + "resolved": "https://registry.npmjs.org/fastq/-/fastq-1.15.0.tgz", + "integrity": "sha512-wBrocU2LCXXa+lWBt8RoIRD89Fi8OdABODa/kEnyeyjS5aZO5/GNvI5sEINADqP/h8M29UHTHUb53sUu5Ihqdw==", + "dev": true, + "dependencies": { + "reusify": "^1.0.4" + } + }, + "node_modules/file-entry-cache": { + "version": "6.0.1", + "resolved": "https://registry.npmjs.org/file-entry-cache/-/file-entry-cache-6.0.1.tgz", + "integrity": "sha512-7Gps/XWymbLk2QLYK4NzpMOrYjMhdIxXuIvy2QBsLE6ljuodKvdkWs/cpyJJ3CVIVpH0Oi1Hvg1ovbMzLdFBBg==", + "dev": true, + "dependencies": { + "flat-cache": "^3.0.4" + }, + "engines": { + "node": "^10.12.0 || >=12.0.0" + } + }, + "node_modules/fill-range": { + "version": "7.0.1", + "resolved": "https://registry.npmjs.org/fill-range/-/fill-range-7.0.1.tgz", + "integrity": "sha512-qOo9F+dMUmC2Lcb4BbVvnKJxTPjCm+RRpe4gDuGrzkL7mEVl/djYSu2OdQ2Pa302N4oqkSg9ir6jaLWJ2USVpQ==", + "dev": true, + "dependencies": { + "to-regex-range": "^5.0.1" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/find-up": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/find-up/-/find-up-5.0.0.tgz", + "integrity": "sha512-78/PXT1wlLLDgTzDs7sjq9hzz0vXD+zn+7wypEe4fXQxCmdmqfGsEPQxmiCSQI3ajFV91bVSsvNtrJRiW6nGng==", + "dev": true, + "dependencies": { + "locate-path": "^6.0.0", + "path-exists": "^4.0.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/flat-cache": { + "version": "3.0.4", + "resolved": "https://registry.npmjs.org/flat-cache/-/flat-cache-3.0.4.tgz", + "integrity": "sha512-dm9s5Pw7Jc0GvMYbshN6zchCA9RgQlzzEZX3vylR9IqFfS8XciblUXOKfW6SiuJ0e13eDYZoZV5wdrev7P3Nwg==", + "dev": true, + "dependencies": { + "flatted": "^3.1.0", + "rimraf": "^3.0.2" + }, + "engines": { + "node": "^10.12.0 || >=12.0.0" + } + }, + "node_modules/flatted": { + "version": "3.2.7", + "resolved": "https://registry.npmjs.org/flatted/-/flatted-3.2.7.tgz", + "integrity": "sha512-5nqDSxl8nn5BSNxyR3n4I6eDmbolI6WT+QqR547RwxQapgjQBmtktdP+HTBb/a/zLsbzERTONyUB5pefh5TtjQ==", + "dev": true + }, + "node_modules/framer-motion": { + "version": "6.5.1", + "resolved": "https://registry.npmjs.org/framer-motion/-/framer-motion-6.5.1.tgz", + "integrity": "sha512-o1BGqqposwi7cgDrtg0dNONhkmPsUFDaLcKXigzuTFC5x58mE8iyTazxSudFzmT6MEyJKfjjU8ItoMe3W+3fiw==", + "dependencies": { + "@motionone/dom": "10.12.0", + "framesync": "6.0.1", + "hey-listen": "^1.0.8", + "popmotion": "11.0.3", + "style-value-types": "5.0.0", + "tslib": "^2.1.0" + }, + "optionalDependencies": { + "@emotion/is-prop-valid": "^0.8.2" + }, + "peerDependencies": { + "react": ">=16.8 || ^17.0.0 || ^18.0.0", + "react-dom": ">=16.8 || ^17.0.0 || ^18.0.0" + } + }, + "node_modules/framer-motion/node_modules/tslib": { + "version": "2.6.1", + "resolved": "https://registry.npmjs.org/tslib/-/tslib-2.6.1.tgz", + "integrity": "sha512-t0hLfiEKfMUoqhG+U1oid7Pva4bbDPHYfJNiB7BiIjRkj1pyC++4N3huJfqY6aRH6VTB0rvtzQwjM4K6qpfOig==" + }, + "node_modules/framesync": { + "version": "6.0.1", + "resolved": "https://registry.npmjs.org/framesync/-/framesync-6.0.1.tgz", + "integrity": "sha512-fUY88kXvGiIItgNC7wcTOl0SNRCVXMKSWW2Yzfmn7EKNc+MpCzcz9DhdHcdjbrtN3c6R4H5dTY2jiCpPdysEjA==", + "dependencies": { + "tslib": "^2.1.0" + } + }, + "node_modules/framesync/node_modules/tslib": { + "version": "2.6.1", + "resolved": "https://registry.npmjs.org/tslib/-/tslib-2.6.1.tgz", + "integrity": "sha512-t0hLfiEKfMUoqhG+U1oid7Pva4bbDPHYfJNiB7BiIjRkj1pyC++4N3huJfqY6aRH6VTB0rvtzQwjM4K6qpfOig==" + }, + "node_modules/fs.realpath": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/fs.realpath/-/fs.realpath-1.0.0.tgz", + "integrity": "sha512-OO0pH2lK6a0hZnAdau5ItzHPI6pUlvI7jMVnxUQRtw4owF2wk8lOSabtGDCTP4Ggrg2MbGnWO9X8K1t4+fGMDw==", + "dev": true + }, + "node_modules/fsevents": { + "version": "2.3.2", + "resolved": "https://registry.npmjs.org/fsevents/-/fsevents-2.3.2.tgz", + "integrity": "sha512-xiqMQR4xAeHTuB9uWm+fFRcIOgKBMiOBP+eXiyT7jsgVCq1bkVygt00oASowB7EdtpOHaaPgKt812P9ab+DDKA==", + "dev": true, + "hasInstallScript": true, + "optional": true, + "os": [ + "darwin" + ], + "engines": { + "node": "^8.16.0 || ^10.6.0 || >=11.0.0" + } + }, + "node_modules/get-nonce": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/get-nonce/-/get-nonce-1.0.1.tgz", + "integrity": "sha512-FJhYRoDaiatfEkUK8HKlicmu/3SGFD51q3itKDGoSTysQJBnfOcxU5GxnhE1E6soB76MbT0MBtnKJuXyAx+96Q==", + "engines": { + "node": ">=6" + } + }, + "node_modules/glob": { + "version": "7.2.3", + "resolved": "https://registry.npmjs.org/glob/-/glob-7.2.3.tgz", + "integrity": "sha512-nFR0zLpU2YCaRxwoCJvL6UvCH2JFyFVIvwTLsIf21AuHlMskA1hhTdk+LlYJtOlYt9v6dvszD2BGRqBL+iQK9Q==", + "dev": true, + "dependencies": { + "fs.realpath": "^1.0.0", + "inflight": "^1.0.4", + "inherits": "2", + "minimatch": "^3.1.1", + "once": "^1.3.0", + "path-is-absolute": "^1.0.0" + }, + "engines": { + "node": "*" + }, + "funding": { + "url": "https://github.com/sponsors/isaacs" + } + }, + "node_modules/glob-parent": { + "version": "6.0.2", + "resolved": "https://registry.npmjs.org/glob-parent/-/glob-parent-6.0.2.tgz", + "integrity": "sha512-XxwI8EOhVQgWp6iDL+3b0r86f4d6AX6zSU55HfB4ydCEuXLXc5FcYeOu+nnGftS4TEju/11rt4KJPTMgbfmv4A==", + "dev": true, + "dependencies": { + "is-glob": "^4.0.3" + }, + "engines": { + "node": ">=10.13.0" + } + }, + "node_modules/globals": { + "version": "13.20.0", + "resolved": "https://registry.npmjs.org/globals/-/globals-13.20.0.tgz", + "integrity": "sha512-Qg5QtVkCy/kv3FUSlu4ukeZDVf9ee0iXLAUYX13gbR17bnejFTzr4iS9bY7kwCf1NztRNm1t91fjOiyx4CSwPQ==", + "dev": true, + "dependencies": { + "type-fest": "^0.20.2" + }, + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/globby": { + "version": "11.1.0", + "resolved": "https://registry.npmjs.org/globby/-/globby-11.1.0.tgz", + "integrity": "sha512-jhIXaOzy1sb8IyocaruWSn1TjmnBVs8Ayhcy83rmxNJ8q2uWKCAj3CnJY+KpGSXCueAPc0i05kVvVKtP1t9S3g==", + "dev": true, + "dependencies": { + "array-union": "^2.1.0", + "dir-glob": "^3.0.1", + "fast-glob": "^3.2.9", + "ignore": "^5.2.0", + "merge2": "^1.4.1", + "slash": "^3.0.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/grapheme-splitter": { + "version": "1.0.4", + "resolved": "https://registry.npmjs.org/grapheme-splitter/-/grapheme-splitter-1.0.4.tgz", + "integrity": "sha512-bzh50DW9kTPM00T8y4o8vQg89Di9oLJVLW/KaOGIXJWP/iqCN6WKYkbNOF04vFLJhwcpYUh9ydh/+5vpOqV4YQ==", + "dev": true + }, + "node_modules/graphemer": { + "version": "1.4.0", + "resolved": "https://registry.npmjs.org/graphemer/-/graphemer-1.4.0.tgz", + "integrity": "sha512-EtKwoO6kxCL9WO5xipiHTZlSzBm7WLT627TqC/uVRd0HKmq8NXyebnNYxDoBi7wt8eTWrUrKXCOVaFq9x1kgag==", + "dev": true + }, + "node_modules/graphiql": { + "version": "3.0.4", + "resolved": "https://registry.npmjs.org/graphiql/-/graphiql-3.0.4.tgz", + "integrity": "sha512-5NVEG1I8CkpHtZEauvHnU4yoVPjktTHiSMsxXCMwEB6OMkvSg71Fix1MtTc1k/8HnJUTomIDLodRAiRM3Hu+dQ==", + "dependencies": { + "@graphiql/react": "^0.19.2", + "@graphiql/toolkit": "^0.9.1", + "graphql-language-service": "^5.1.7", + "markdown-it": "^12.2.0" + }, + "peerDependencies": { + "graphql": "^15.5.0 || ^16.0.0", + "react": "^16.8.0 || ^17 || ^18", + "react-dom": "^16.8.0 || ^17 || ^18" + } + }, + "node_modules/graphql": { + "version": "16.7.1", + "resolved": "https://registry.npmjs.org/graphql/-/graphql-16.7.1.tgz", + "integrity": "sha512-DRYR9tf+UGU0KOsMcKAlXeFfX89UiiIZ0dRU3mR0yJfu6OjZqUcp68NnFLnqQU5RexygFoDy1EW+ccOYcPfmHg==", + "engines": { + "node": "^12.22.0 || ^14.16.0 || ^16.0.0 || >=17.0.0" + } + }, + "node_modules/graphql-language-service": { + "version": "5.1.7", + "resolved": "https://registry.npmjs.org/graphql-language-service/-/graphql-language-service-5.1.7.tgz", + "integrity": "sha512-xkawYMJeoNYGhT+SpSH3c2qf6HpGHQ/duDmrseVHBpVCrXAiGnliXGSCC4jyMGgZQ05GytsZ12p0nUo7s6lSSw==", + "dependencies": { + "nullthrows": "^1.0.0", + "vscode-languageserver-types": "^3.17.1" + }, + "bin": { + "graphql": "dist/temp-bin.js" + }, + "peerDependencies": { + "graphql": "^15.5.0 || ^16.0.0" + } + }, + "node_modules/has-flag": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-4.0.0.tgz", + "integrity": "sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ==", + "dev": true, + "engines": { + "node": ">=8" + } + }, + "node_modules/hey-listen": { + "version": "1.0.8", + "resolved": "https://registry.npmjs.org/hey-listen/-/hey-listen-1.0.8.tgz", + "integrity": "sha512-COpmrF2NOg4TBWUJ5UVyaCU2A88wEMkUPK4hNqyCkqHbxT92BbvfjoSozkAIIm6XhicGlJHhFdullInrdhwU8Q==" + }, + "node_modules/ignore": { + "version": "5.2.4", + "resolved": "https://registry.npmjs.org/ignore/-/ignore-5.2.4.tgz", + "integrity": "sha512-MAb38BcSbH0eHNBxn7ql2NH/kX33OkB3lZ1BNdh7ENeRChHTYsTvWrMubiIAMNS2llXEEgZ1MUOBtXChP3kaFQ==", + "dev": true, + "engines": { + "node": ">= 4" + } + }, + "node_modules/import-fresh": { + "version": "3.3.0", + "resolved": "https://registry.npmjs.org/import-fresh/-/import-fresh-3.3.0.tgz", + "integrity": "sha512-veYYhQa+D1QBKznvhUHxb8faxlrwUnxseDAbAp457E0wLNio2bOSKnjYDhMj+YiAq61xrMGhQk9iXVk5FzgQMw==", + "dev": true, + "dependencies": { + "parent-module": "^1.0.0", + "resolve-from": "^4.0.0" + }, + "engines": { + "node": ">=6" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/imurmurhash": { + "version": "0.1.4", + "resolved": "https://registry.npmjs.org/imurmurhash/-/imurmurhash-0.1.4.tgz", + "integrity": "sha512-JmXMZ6wuvDmLiHEml9ykzqO6lwFbof0GG4IkcGaENdCRDDmMVnny7s5HsIgHCbaq0w2MyPhDqkhTUgS2LU2PHA==", + "dev": true, + "engines": { + "node": ">=0.8.19" + } + }, + "node_modules/inflight": { + "version": "1.0.6", + "resolved": "https://registry.npmjs.org/inflight/-/inflight-1.0.6.tgz", + "integrity": "sha512-k92I/b08q4wvFscXCLvqfsHCrjrF7yiXsQuIVvVE7N82W3+aqpzuUdBbfhWcy/FZR3/4IgflMgKLOsvPDrGCJA==", + "dev": true, + "dependencies": { + "once": "^1.3.0", + "wrappy": "1" + } + }, + "node_modules/inherits": { + "version": "2.0.4", + "resolved": "https://registry.npmjs.org/inherits/-/inherits-2.0.4.tgz", + "integrity": "sha512-k/vGaX4/Yla3WzyMCvTQOXYeIHvqOKtnqBduzTHpzpQZzAskKMhZ2K+EnBiSM9zGSoIFeMpXKxa4dYeZIQqewQ==", + "dev": true + }, + "node_modules/invariant": { + "version": "2.2.4", + "resolved": "https://registry.npmjs.org/invariant/-/invariant-2.2.4.tgz", + "integrity": "sha512-phJfQVBuaJM5raOpJjSfkiD6BpbCE4Ns//LaXl6wGYtUBY83nWS6Rf9tXm2e8VaK60JEjYldbPif/A2B1C2gNA==", + "dependencies": { + "loose-envify": "^1.0.0" + } + }, + "node_modules/is-extglob": { + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/is-extglob/-/is-extglob-2.1.1.tgz", + "integrity": "sha512-SbKbANkN603Vi4jEZv49LeVJMn4yGwsbzZworEoyEiutsN3nJYdbO36zfhGJ6QEDpOZIFkDtnq5JRxmvl3jsoQ==", + "dev": true, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/is-glob": { + "version": "4.0.3", + "resolved": "https://registry.npmjs.org/is-glob/-/is-glob-4.0.3.tgz", + "integrity": "sha512-xelSayHH36ZgE7ZWhli7pW34hNbNl8Ojv5KVmkJD4hBdD3th8Tfk9vYasLM+mXWOZhFkgZfxhLSnrwRr4elSSg==", + "dev": true, + "dependencies": { + "is-extglob": "^2.1.1" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/is-number": { + "version": "7.0.0", + "resolved": "https://registry.npmjs.org/is-number/-/is-number-7.0.0.tgz", + "integrity": "sha512-41Cifkg6e8TylSpdtTpeLVMqvSBEVzTttHvERD741+pnZ8ANv0004MRL43QKPDlK9cGvNp6NZWZUBlbGXYxxng==", + "dev": true, + "engines": { + "node": ">=0.12.0" + } + }, + "node_modules/is-path-inside": { + "version": "3.0.3", + "resolved": "https://registry.npmjs.org/is-path-inside/-/is-path-inside-3.0.3.tgz", + "integrity": "sha512-Fd4gABb+ycGAmKou8eMftCupSir5lRxqf4aD/vd0cD2qc4HL07OjCeuHMr8Ro4CoMaeCKDB0/ECBOVWjTwUvPQ==", + "dev": true, + "engines": { + "node": ">=8" + } + }, + "node_modules/is-plain-object": { + "version": "2.0.4", + "resolved": "https://registry.npmjs.org/is-plain-object/-/is-plain-object-2.0.4.tgz", + "integrity": "sha512-h5PpgXkWitc38BBMYawTYMWJHFZJVnBquFE57xFpjB8pJFiF6gZ+bU+WyI/yqXiFR5mdLsgYNaPe8uao6Uv9Og==", + "dependencies": { + "isobject": "^3.0.1" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/is-primitive": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/is-primitive/-/is-primitive-3.0.1.tgz", + "integrity": "sha512-GljRxhWvlCNRfZyORiH77FwdFwGcMO620o37EOYC0ORWdq+WYNVqW0w2Juzew4M+L81l6/QS3t5gkkihyRqv9w==", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/isexe": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/isexe/-/isexe-2.0.0.tgz", + "integrity": "sha512-RHxMLp9lnKHGHRng9QFhRCMbYAcVpn69smSGcq3f36xjgVVWThj4qqLbTLlq7Ssj8B+fIQ1EuCEGI2lKsyQeIw==", + "dev": true + }, + "node_modules/isobject": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/isobject/-/isobject-3.0.1.tgz", + "integrity": "sha512-WhB9zCku7EGTj/HQQRz5aUQEUeoQZH2bWcltRErOpymJ4boYE6wL9Tbr23krRPSZ+C5zqNSrSw+Cc7sZZ4b7vg==", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/js-tokens": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/js-tokens/-/js-tokens-4.0.0.tgz", + "integrity": "sha512-RdJUflcE3cUzKiMqQgsCu06FPu9UdIJO0beYbPhHN4k6apgJtifcoCtT9bcxOpYBtpD2kCM6Sbzg4CausW/PKQ==" + }, + "node_modules/js-yaml": { + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/js-yaml/-/js-yaml-4.1.0.tgz", + "integrity": "sha512-wpxZs9NoxZaJESJGIZTyDEaYpl0FKSA+FB9aJiyemKhMwkxQg63h4T1KJgUGHpTqPDNRcmmYLugrRjJlBtWvRA==", + "dev": true, + "dependencies": { + "argparse": "^2.0.1" + }, + "bin": { + "js-yaml": "bin/js-yaml.js" + } + }, + "node_modules/json-schema-traverse": { + "version": "0.4.1", + "resolved": "https://registry.npmjs.org/json-schema-traverse/-/json-schema-traverse-0.4.1.tgz", + "integrity": "sha512-xbbCH5dCYU5T8LcEhhuh7HJ88HXuW3qsI3Y0zOZFKfZEHcpWiHU/Jxzk629Brsab/mMiHQti9wMP+845RPe3Vg==", + "dev": true + }, + "node_modules/json-stable-stringify-without-jsonify": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/json-stable-stringify-without-jsonify/-/json-stable-stringify-without-jsonify-1.0.1.tgz", + "integrity": "sha512-Bdboy+l7tA3OGW6FjyFHWkP5LuByj1Tk33Ljyq0axyzdk9//JSi2u3fP1QSmd1KNwq6VOKYGlAu87CisVir6Pw==", + "dev": true + }, + "node_modules/levn": { + "version": "0.4.1", + "resolved": "https://registry.npmjs.org/levn/-/levn-0.4.1.tgz", + "integrity": "sha512-+bT2uH4E5LGE7h/n3evcS/sQlJXCpIp6ym8OWJ5eV6+67Dsql/LaaT7qJBAt2rzfoa/5QBGBhxDix1dMt2kQKQ==", + "dev": true, + "dependencies": { + "prelude-ls": "^1.2.1", + "type-check": "~0.4.0" + }, + "engines": { + "node": ">= 0.8.0" + } + }, + "node_modules/linkify-it": { + "version": "3.0.3", + "resolved": "https://registry.npmjs.org/linkify-it/-/linkify-it-3.0.3.tgz", + "integrity": "sha512-ynTsyrFSdE5oZ/O9GEf00kPngmOfVwazR5GKDq6EYfhlpFug3J2zybX56a2PRRpc9P+FuSoGNAwjlbDs9jJBPQ==", + "dependencies": { + "uc.micro": "^1.0.1" + } + }, + "node_modules/locate-path": { + "version": "6.0.0", + "resolved": "https://registry.npmjs.org/locate-path/-/locate-path-6.0.0.tgz", + "integrity": "sha512-iPZK6eYjbxRu3uB4/WZ3EsEIMJFMqAoopl3R+zuq0UjcAm/MO6KCweDgPfP3elTztoKP3KtnVHxTn2NHBSDVUw==", + "dev": true, + "dependencies": { + "p-locate": "^5.0.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/lodash.merge": { + "version": "4.6.2", + "resolved": "https://registry.npmjs.org/lodash.merge/-/lodash.merge-4.6.2.tgz", + "integrity": "sha512-0KpjqXRVvrYyCsX1swR/XTK0va6VQkQM6MNo7PqW77ByjAhoARA8EfrP1N4+KlKj8YS0ZUCtRT/YUuhyYDujIQ==", + "dev": true + }, + "node_modules/loose-envify": { + "version": "1.4.0", + "resolved": "https://registry.npmjs.org/loose-envify/-/loose-envify-1.4.0.tgz", + "integrity": "sha512-lyuxPGr/Wfhrlem2CL/UcnUc1zcqKAImBDzukY7Y5F/yQiNdko6+fRLevlw1HgMySw7f611UIY408EtxRSoK3Q==", + "dependencies": { + "js-tokens": "^3.0.0 || ^4.0.0" + }, + "bin": { + "loose-envify": "cli.js" + } + }, + "node_modules/lru-cache": { + "version": "6.0.0", + "resolved": "https://registry.npmjs.org/lru-cache/-/lru-cache-6.0.0.tgz", + "integrity": "sha512-Jo6dJ04CmSjuznwJSS3pUeWmd/H0ffTlkXXgwZi+eq1UCmqQwCh+eLsYOYCwY991i2Fah4h1BEMCx4qThGbsiA==", + "dev": true, + "dependencies": { + "yallist": "^4.0.0" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/markdown-it": { + "version": "12.3.2", + "resolved": "https://registry.npmjs.org/markdown-it/-/markdown-it-12.3.2.tgz", + "integrity": "sha512-TchMembfxfNVpHkbtriWltGWc+m3xszaRD0CZup7GFFhzIgQqxIfn3eGj1yZpfuflzPvfkt611B2Q/Bsk1YnGg==", + "dependencies": { + "argparse": "^2.0.1", + "entities": "~2.1.0", + "linkify-it": "^3.0.1", + "mdurl": "^1.0.1", + "uc.micro": "^1.0.5" + }, + "bin": { + "markdown-it": "bin/markdown-it.js" + } + }, + "node_modules/mdurl": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/mdurl/-/mdurl-1.0.1.tgz", + "integrity": "sha512-/sKlQJCBYVY9Ers9hqzKou4H6V5UWc/M59TH2dvkt+84itfnq7uFOMLpOiOS4ujvHP4etln18fmIxA5R5fll0g==" + }, + "node_modules/merge2": { + "version": "1.4.1", + "resolved": "https://registry.npmjs.org/merge2/-/merge2-1.4.1.tgz", + "integrity": "sha512-8q7VEgMJW4J8tcfVPy8g09NcQwZdbwFEqhe/WZkoIzjn/3TGDwtOCYtXGxA3O8tPzpczCCDgv+P2P5y00ZJOOg==", + "dev": true, + "engines": { + "node": ">= 8" + } + }, + "node_modules/meros": { + "version": "1.3.0", + "resolved": "https://registry.npmjs.org/meros/-/meros-1.3.0.tgz", + "integrity": "sha512-2BNGOimxEz5hmjUG2FwoxCt5HN7BXdaWyFqEwxPTrJzVdABtrL4TiHTcsWSFAxPQ/tOnEaQEJh3qWq71QRMY+w==", + "engines": { + "node": ">=13" + }, + "peerDependencies": { + "@types/node": ">=13" + }, + "peerDependenciesMeta": { + "@types/node": { + "optional": true + } + } + }, + "node_modules/micromatch": { + "version": "4.0.5", + "resolved": "https://registry.npmjs.org/micromatch/-/micromatch-4.0.5.tgz", + "integrity": "sha512-DMy+ERcEW2q8Z2Po+WNXuw3c5YaUSFjAO5GsJqfEl7UjvtIuFKO6ZrKvcItdy98dwFI2N1tg3zNIdKaQT+aNdA==", + "dev": true, + "dependencies": { + "braces": "^3.0.2", + "picomatch": "^2.3.1" + }, + "engines": { + "node": ">=8.6" + } + }, + "node_modules/minimatch": { + "version": "3.1.2", + "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-3.1.2.tgz", + "integrity": "sha512-J7p63hRiAjw1NDEww1W7i37+ByIrOWO5XQQAzZ3VOcL0PNybwpfmV/N05zFAzwQ9USyEcX6t3UO+K5aqBQOIHw==", + "dev": true, + "dependencies": { + "brace-expansion": "^1.1.7" + }, + "engines": { + "node": "*" + } + }, + "node_modules/ms": { + "version": "2.1.2", + "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.2.tgz", + "integrity": "sha512-sGkPx+VjMtmA6MX27oA4FBFELFCZZ4S4XqeGOXCv68tT+jb3vk/RyaKWP0PTKyWtmLSM0b+adUTEvbs1PEaH2w==", + "dev": true + }, + "node_modules/nanoid": { + "version": "3.3.6", + "resolved": "https://registry.npmjs.org/nanoid/-/nanoid-3.3.6.tgz", + "integrity": "sha512-BGcqMMJuToF7i1rt+2PWSNVnWIkGCU78jBG3RxO/bZlnZPK2Cmi2QaffxGO/2RvWi9sL+FAiRiXMgsyxQ1DIDA==", + "dev": true, + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/ai" + } + ], + "bin": { + "nanoid": "bin/nanoid.cjs" + }, + "engines": { + "node": "^10 || ^12 || ^13.7 || ^14 || >=15.0.1" + } + }, + "node_modules/natural-compare": { + "version": "1.4.0", + "resolved": "https://registry.npmjs.org/natural-compare/-/natural-compare-1.4.0.tgz", + "integrity": "sha512-OWND8ei3VtNC9h7V60qff3SVobHr996CTwgxubgyQYEpg290h9J0buyECNNJexkFm5sOajh5G116RYA1c8ZMSw==", + "dev": true + }, + "node_modules/natural-compare-lite": { + "version": "1.4.0", + "resolved": "https://registry.npmjs.org/natural-compare-lite/-/natural-compare-lite-1.4.0.tgz", + "integrity": "sha512-Tj+HTDSJJKaZnfiuw+iaF9skdPpTo2GtEly5JHnWV/hfv2Qj/9RKsGISQtLh2ox3l5EAGw487hnBee0sIJ6v2g==", + "dev": true + }, + "node_modules/nullthrows": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/nullthrows/-/nullthrows-1.1.1.tgz", + "integrity": "sha512-2vPPEi+Z7WqML2jZYddDIfy5Dqb0r2fze2zTxNNknZaFpVHU3mFB3R+DWeJWGVx0ecvttSGlJTI+WG+8Z4cDWw==" + }, + "node_modules/once": { + "version": "1.4.0", + "resolved": "https://registry.npmjs.org/once/-/once-1.4.0.tgz", + "integrity": "sha512-lNaJgI+2Q5URQBkccEKHTQOPaXdUxnZZElQTZY0MFUAuaEqe1E+Nyvgdz/aIyNi6Z9MzO5dv1H8n58/GELp3+w==", + "dev": true, + "dependencies": { + "wrappy": "1" + } + }, + "node_modules/optionator": { + "version": "0.9.3", + "resolved": "https://registry.npmjs.org/optionator/-/optionator-0.9.3.tgz", + "integrity": "sha512-JjCoypp+jKn1ttEFExxhetCKeJt9zhAgAve5FXHixTvFDW/5aEktX9bufBKLRRMdU7bNtpLfcGu94B3cdEJgjg==", + "dev": true, + "dependencies": { + "@aashutoshrathi/word-wrap": "^1.2.3", + "deep-is": "^0.1.3", + "fast-levenshtein": "^2.0.6", + "levn": "^0.4.1", + "prelude-ls": "^1.2.1", + "type-check": "^0.4.0" + }, + "engines": { + "node": ">= 0.8.0" + } + }, + "node_modules/p-limit": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/p-limit/-/p-limit-3.1.0.tgz", + "integrity": "sha512-TYOanM3wGwNGsZN2cVTYPArw454xnXj5qmWF1bEoAc4+cU/ol7GVh7odevjp1FNHduHc3KZMcFduxU5Xc6uJRQ==", + "dev": true, + "dependencies": { + "yocto-queue": "^0.1.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/p-locate": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/p-locate/-/p-locate-5.0.0.tgz", + "integrity": "sha512-LaNjtRWUBY++zB5nE/NwcaoMylSPk+S+ZHNB1TzdbMJMny6dynpAGt7X/tl/QYq3TIeE6nxHppbo2LGymrG5Pw==", + "dev": true, + "dependencies": { + "p-limit": "^3.0.2" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/parent-module": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/parent-module/-/parent-module-1.0.1.tgz", + "integrity": "sha512-GQ2EWRpQV8/o+Aw8YqtfZZPfNRWZYkbidE9k5rpl/hC3vtHHBfGm2Ifi6qWV+coDGkrUKZAxE3Lot5kcsRlh+g==", + "dev": true, + "dependencies": { + "callsites": "^3.0.0" + }, + "engines": { + "node": ">=6" + } + }, + "node_modules/path-exists": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/path-exists/-/path-exists-4.0.0.tgz", + "integrity": "sha512-ak9Qy5Q7jYb2Wwcey5Fpvg2KoAc/ZIhLSLOSBmRmygPsGwkVVt0fZa0qrtMz+m6tJTAHfZQ8FnmB4MG4LWy7/w==", + "dev": true, + "engines": { + "node": ">=8" + } + }, + "node_modules/path-is-absolute": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/path-is-absolute/-/path-is-absolute-1.0.1.tgz", + "integrity": "sha512-AVbw3UJ2e9bq64vSaS9Am0fje1Pa8pbGqTTsmXfaIiMpnr5DlDhfJOuLj9Sf95ZPVDAUerDfEk88MPmPe7UCQg==", + "dev": true, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/path-key": { + "version": "3.1.1", + "resolved": "https://registry.npmjs.org/path-key/-/path-key-3.1.1.tgz", + "integrity": "sha512-ojmeN0qd+y0jszEtoY48r0Peq5dwMEkIlCOu6Q5f41lfkswXuKtYrhgoTpLnyIcHm24Uhqx+5Tqm2InSwLhE6Q==", + "dev": true, + "engines": { + "node": ">=8" + } + }, + "node_modules/path-type": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/path-type/-/path-type-4.0.0.tgz", + "integrity": "sha512-gDKb8aZMDeD/tZWs9P6+q0J9Mwkdl6xMV8TjnGP3qJVJ06bdMgkbBlLU8IdfOsIsFz2BW1rNVT3XuNEl8zPAvw==", + "dev": true, + "engines": { + "node": ">=8" + } + }, + "node_modules/picocolors": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/picocolors/-/picocolors-1.0.0.tgz", + "integrity": "sha512-1fygroTLlHu66zi26VoTDv8yRgm0Fccecssto+MhsZ0D/DGW2sm8E8AjW7NU5VVTRt5GxbeZ5qBuJr+HyLYkjQ==", + "dev": true + }, + "node_modules/picomatch": { + "version": "2.3.1", + "resolved": "https://registry.npmjs.org/picomatch/-/picomatch-2.3.1.tgz", + "integrity": "sha512-JU3teHTNjmE2VCGFzuY8EXzCDVwEqB2a8fsIvwaStHhAWJEeVd1o1QD80CU6+ZdEXXSLbSsuLwJjkCBWqRQUVA==", + "dev": true, + "engines": { + "node": ">=8.6" + }, + "funding": { + "url": "https://github.com/sponsors/jonschlinkert" + } + }, + "node_modules/popmotion": { + "version": "11.0.3", + "resolved": "https://registry.npmjs.org/popmotion/-/popmotion-11.0.3.tgz", + "integrity": "sha512-Y55FLdj3UxkR7Vl3s7Qr4e9m0onSnP8W7d/xQLsoJM40vs6UKHFdygs6SWryasTZYqugMjm3BepCF4CWXDiHgA==", + "dependencies": { + "framesync": "6.0.1", + "hey-listen": "^1.0.8", + "style-value-types": "5.0.0", + "tslib": "^2.1.0" + } + }, + "node_modules/popmotion/node_modules/tslib": { + "version": "2.6.1", + "resolved": "https://registry.npmjs.org/tslib/-/tslib-2.6.1.tgz", + "integrity": "sha512-t0hLfiEKfMUoqhG+U1oid7Pva4bbDPHYfJNiB7BiIjRkj1pyC++4N3huJfqY6aRH6VTB0rvtzQwjM4K6qpfOig==" + }, + "node_modules/postcss": { + "version": "8.4.24", + "resolved": "https://registry.npmjs.org/postcss/-/postcss-8.4.24.tgz", + "integrity": "sha512-M0RzbcI0sO/XJNucsGjvWU9ERWxb/ytp1w6dKtxTKgixdtQDq4rmx/g8W1hnaheq9jgwL/oyEdH5Bc4WwJKMqg==", + "dev": true, + "funding": [ + { + "type": "opencollective", + "url": "https://opencollective.com/postcss/" + }, + { + "type": "tidelift", + "url": "https://tidelift.com/funding/github/npm/postcss" + }, + { + "type": "github", + "url": "https://github.com/sponsors/ai" + } + ], + "dependencies": { + "nanoid": "^3.3.6", + "picocolors": "^1.0.0", + "source-map-js": "^1.0.2" + }, + "engines": { + "node": "^10 || ^12 || >=14" + } + }, + "node_modules/prelude-ls": { + "version": "1.2.1", + "resolved": "https://registry.npmjs.org/prelude-ls/-/prelude-ls-1.2.1.tgz", + "integrity": "sha512-vkcDPrRZo1QZLbn5RLGPpg/WmIQ65qoWWhcGKf/b5eplkkarX0m9z8ppCat4mlOqUsWpyNuYgO3VRyrYHSzX5g==", + "dev": true, + "engines": { + "node": ">= 0.8.0" + } + }, + "node_modules/punycode": { + "version": "2.3.0", + "resolved": "https://registry.npmjs.org/punycode/-/punycode-2.3.0.tgz", + "integrity": "sha512-rRV+zQD8tVFys26lAGR9WUuS4iUAngJScM+ZRSKtvl5tKeZ2t5bvdNFdNHBW9FWR4guGHlgmsZ1G7BSm2wTbuA==", + "dev": true, + "engines": { + "node": ">=6" + } + }, + "node_modules/queue-microtask": { + "version": "1.2.3", + "resolved": "https://registry.npmjs.org/queue-microtask/-/queue-microtask-1.2.3.tgz", + "integrity": "sha512-NuaNSa6flKT5JaSYQzJok04JzTL1CA6aGhv5rfLW3PgqA+M2ChpZQnAC8h8i4ZFkBS8X5RqkDBHA7r4hej3K9A==", + "dev": true, + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/feross" + }, + { + "type": "patreon", + "url": "https://www.patreon.com/feross" + }, + { + "type": "consulting", + "url": "https://feross.org/support" + } + ] + }, + "node_modules/react": { + "version": "18.2.0", + "resolved": "https://registry.npmjs.org/react/-/react-18.2.0.tgz", + "integrity": "sha512-/3IjMdb2L9QbBdWiW5e3P2/npwMBaU9mHCSCUzNln0ZCYbcfTsGbTJrU/kGemdH2IWmB2ioZ+zkxtmq6g09fGQ==", + "dependencies": { + "loose-envify": "^1.1.0" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/react-dom": { + "version": "18.2.0", + "resolved": "https://registry.npmjs.org/react-dom/-/react-dom-18.2.0.tgz", + "integrity": "sha512-6IMTriUmvsjHUjNtEDudZfuDQUoWXVxKHhlEGSk81n4YFS+r/Kl99wXiwlVXtPBtJenozv2P+hxDsw9eA7Xo6g==", + "dependencies": { + "loose-envify": "^1.1.0", + "scheduler": "^0.23.0" + }, + "peerDependencies": { + "react": "^18.2.0" + } + }, + "node_modules/react-hook-form": { + "version": "7.45.2", + "resolved": "https://registry.npmjs.org/react-hook-form/-/react-hook-form-7.45.2.tgz", + "integrity": "sha512-9s45OdTaKN+4NSTbXVqeDITd/nwIg++nxJGL8+OD5uf1DxvhsXQ641kaYHk5K28cpIOTYm71O/fYk7rFaygb3A==", + "engines": { + "node": ">=12.22.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/react-hook-form" + }, + "peerDependencies": { + "react": "^16.8.0 || ^17 || ^18" + } + }, + "node_modules/react-remove-scroll": { + "version": "2.5.5", + "resolved": "https://registry.npmjs.org/react-remove-scroll/-/react-remove-scroll-2.5.5.tgz", + "integrity": "sha512-ImKhrzJJsyXJfBZ4bzu8Bwpka14c/fQt0k+cyFp/PBhTfyDnU5hjOtM4AG/0AMyy8oKzOTR0lDgJIM7pYXI0kw==", + "dependencies": { + "react-remove-scroll-bar": "^2.3.3", + "react-style-singleton": "^2.2.1", + "tslib": "^2.1.0", + "use-callback-ref": "^1.3.0", + "use-sidecar": "^1.1.2" + }, + "engines": { + "node": ">=10" + }, + "peerDependencies": { + "@types/react": "^16.8.0 || ^17.0.0 || ^18.0.0", + "react": "^16.8.0 || ^17.0.0 || ^18.0.0" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + } + } + }, + "node_modules/react-remove-scroll-bar": { + "version": "2.3.4", + "resolved": "https://registry.npmjs.org/react-remove-scroll-bar/-/react-remove-scroll-bar-2.3.4.tgz", + "integrity": "sha512-63C4YQBUt0m6ALadE9XV56hV8BgJWDmmTPY758iIJjfQKt2nYwoUrPk0LXRXcB/yIj82T1/Ixfdpdk68LwIB0A==", + "dependencies": { + "react-style-singleton": "^2.2.1", + "tslib": "^2.0.0" + }, + "engines": { + "node": ">=10" + }, + "peerDependencies": { + "@types/react": "^16.8.0 || ^17.0.0 || ^18.0.0", + "react": "^16.8.0 || ^17.0.0 || ^18.0.0" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + } + } + }, + "node_modules/react-remove-scroll-bar/node_modules/tslib": { + "version": "2.6.1", + "resolved": "https://registry.npmjs.org/tslib/-/tslib-2.6.1.tgz", + "integrity": "sha512-t0hLfiEKfMUoqhG+U1oid7Pva4bbDPHYfJNiB7BiIjRkj1pyC++4N3huJfqY6aRH6VTB0rvtzQwjM4K6qpfOig==" + }, + "node_modules/react-remove-scroll/node_modules/tslib": { + "version": "2.6.1", + "resolved": "https://registry.npmjs.org/tslib/-/tslib-2.6.1.tgz", + "integrity": "sha512-t0hLfiEKfMUoqhG+U1oid7Pva4bbDPHYfJNiB7BiIjRkj1pyC++4N3huJfqY6aRH6VTB0rvtzQwjM4K6qpfOig==" + }, + "node_modules/react-style-singleton": { + "version": "2.2.1", + "resolved": "https://registry.npmjs.org/react-style-singleton/-/react-style-singleton-2.2.1.tgz", + "integrity": "sha512-ZWj0fHEMyWkHzKYUr2Bs/4zU6XLmq9HsgBURm7g5pAVfyn49DgUiNgY2d4lXRlYSiCif9YBGpQleewkcqddc7g==", + "dependencies": { + "get-nonce": "^1.0.0", + "invariant": "^2.2.4", + "tslib": "^2.0.0" + }, + "engines": { + "node": ">=10" + }, + "peerDependencies": { + "@types/react": "^16.8.0 || ^17.0.0 || ^18.0.0", + "react": "^16.8.0 || ^17.0.0 || ^18.0.0" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + } + } + }, + "node_modules/react-style-singleton/node_modules/tslib": { + "version": "2.6.1", + "resolved": "https://registry.npmjs.org/tslib/-/tslib-2.6.1.tgz", + "integrity": "sha512-t0hLfiEKfMUoqhG+U1oid7Pva4bbDPHYfJNiB7BiIjRkj1pyC++4N3huJfqY6aRH6VTB0rvtzQwjM4K6qpfOig==" + }, + "node_modules/regenerator-runtime": { + "version": "0.13.11", + "resolved": "https://registry.npmjs.org/regenerator-runtime/-/regenerator-runtime-0.13.11.tgz", + "integrity": "sha512-kY1AZVr2Ra+t+piVaJ4gxaFaReZVH40AKNo7UCX6W+dEwBo/2oZJzqfuN1qLq1oL45o56cPaTXELwrTh8Fpggg==" + }, + "node_modules/resolve-from": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/resolve-from/-/resolve-from-4.0.0.tgz", + "integrity": "sha512-pb/MYmXstAkysRFx8piNI1tGFNQIFA3vkE3Gq4EuA1dF6gHp/+vgZqsCGJapvy8N3Q+4o7FwvquPJcnZ7RYy4g==", + "dev": true, + "engines": { + "node": ">=4" + } + }, + "node_modules/reusify": { + "version": "1.0.4", + "resolved": "https://registry.npmjs.org/reusify/-/reusify-1.0.4.tgz", + "integrity": "sha512-U9nH88a3fc/ekCF1l0/UP1IosiuIjyTh7hBvXVMHYgVcfGvt897Xguj2UOLDeI5BG2m7/uwyaLVT6fbtCwTyzw==", + "dev": true, + "engines": { + "iojs": ">=1.0.0", + "node": ">=0.10.0" + } + }, + "node_modules/rimraf": { + "version": "3.0.2", + "resolved": "https://registry.npmjs.org/rimraf/-/rimraf-3.0.2.tgz", + "integrity": "sha512-JZkJMZkAGFFPP2YqXZXPbMlMBgsxzE8ILs4lMIX/2o0L9UBw9O/Y3o6wFw/i9YLapcUJWwqbi3kdxIPdC62TIA==", + "dev": true, + "dependencies": { + "glob": "^7.1.3" + }, + "bin": { + "rimraf": "bin.js" + }, + "funding": { + "url": "https://github.com/sponsors/isaacs" + } + }, + "node_modules/rollup": { + "version": "3.25.1", + "resolved": "https://registry.npmjs.org/rollup/-/rollup-3.25.1.tgz", + "integrity": "sha512-tywOR+rwIt5m2ZAWSe5AIJcTat8vGlnPFAv15ycCrw33t6iFsXZ6mzHVFh2psSjxQPmI+xgzMZZizUAukBI4aQ==", + "dev": true, + "bin": { + "rollup": "dist/bin/rollup" + }, + "engines": { + "node": ">=14.18.0", + "npm": ">=8.0.0" + }, + "optionalDependencies": { + "fsevents": "~2.3.2" + } + }, + "node_modules/run-parallel": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/run-parallel/-/run-parallel-1.2.0.tgz", + "integrity": "sha512-5l4VyZR86LZ/lDxZTR6jqL8AFE2S0IFLMP26AbjsLVADxHdhB/c0GUsH+y39UfCi3dzz8OlQuPmnaJOMoDHQBA==", + "dev": true, + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/feross" + }, + { + "type": "patreon", + "url": "https://www.patreon.com/feross" + }, + { + "type": "consulting", + "url": "https://feross.org/support" + } + ], + "dependencies": { + "queue-microtask": "^1.2.2" + } + }, + "node_modules/scheduler": { + "version": "0.23.0", + "resolved": "https://registry.npmjs.org/scheduler/-/scheduler-0.23.0.tgz", + "integrity": "sha512-CtuThmgHNg7zIZWAXi3AsyIzA3n4xx7aNyjwC2VJldO2LMVDhFK+63xGqq6CsJH4rTAt6/M+N4GhZiDYPx9eUw==", + "dependencies": { + "loose-envify": "^1.1.0" + } + }, + "node_modules/semver": { + "version": "7.5.1", + "resolved": "https://registry.npmjs.org/semver/-/semver-7.5.1.tgz", + "integrity": "sha512-Wvss5ivl8TMRZXXESstBA4uR5iXgEN/VC5/sOcuXdVLzcdkz4HWetIoRfG5gb5X+ij/G9rw9YoGn3QoQ8OCSpw==", + "dev": true, + "dependencies": { + "lru-cache": "^6.0.0" + }, + "bin": { + "semver": "bin/semver.js" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/set-value": { + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/set-value/-/set-value-4.1.0.tgz", + "integrity": "sha512-zTEg4HL0RwVrqcWs3ztF+x1vkxfm0lP+MQQFPiMJTKVceBwEV0A569Ou8l9IYQG8jOZdMVI1hGsc0tmeD2o/Lw==", + "funding": [ + "https://github.com/sponsors/jonschlinkert", + "https://paypal.me/jonathanschlinkert", + "https://jonschlinkert.dev/sponsor" + ], + "dependencies": { + "is-plain-object": "^2.0.4", + "is-primitive": "^3.0.1" + }, + "engines": { + "node": ">=11.0" + } + }, + "node_modules/shebang-command": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/shebang-command/-/shebang-command-2.0.0.tgz", + "integrity": "sha512-kHxr2zZpYtdmrN1qDjrrX/Z1rR1kG8Dx+gkpK1G4eXmvXswmcE1hTWBWYUzlraYw1/yZp6YuDY77YtvbN0dmDA==", + "dev": true, + "dependencies": { + "shebang-regex": "^3.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/shebang-regex": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/shebang-regex/-/shebang-regex-3.0.0.tgz", + "integrity": "sha512-7++dFhtcx3353uBaq8DDR4NuxBetBzC7ZQOhmTQInHEd6bSrXdiEyzCvG07Z44UYdLShWUyXt5M/yhz8ekcb1A==", + "dev": true, + "engines": { + "node": ">=8" + } + }, + "node_modules/slash": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/slash/-/slash-3.0.0.tgz", + "integrity": "sha512-g9Q1haeby36OSStwb4ntCGGGaKsaVSjQ68fBxoQcutl5fS1vuY18H3wSt3jFyFtrkx+Kz0V1G85A4MyAdDMi2Q==", + "dev": true, + "engines": { + "node": ">=8" + } + }, + "node_modules/source-map-js": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/source-map-js/-/source-map-js-1.0.2.tgz", + "integrity": "sha512-R0XvVJ9WusLiqTCEiGCmICCMplcCkIwwR11mOSD9CR5u+IXYdiseeEuXCVAjS54zqwkLcPNnmU4OeJ6tUrWhDw==", + "dev": true, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/strip-ansi": { + "version": "6.0.1", + "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-6.0.1.tgz", + "integrity": "sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A==", + "dev": true, + "dependencies": { + "ansi-regex": "^5.0.1" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/strip-json-comments": { + "version": "3.1.1", + "resolved": "https://registry.npmjs.org/strip-json-comments/-/strip-json-comments-3.1.1.tgz", + "integrity": "sha512-6fPc+R4ihwqP6N/aIv2f1gMH8lOVtWQHoqC4yK6oSDVVocumAsfCqjkXnqiYMhmMwS/mEHLp7Vehlt3ql6lEig==", + "dev": true, + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/style-mod": { + "version": "4.0.3", + "resolved": "https://registry.npmjs.org/style-mod/-/style-mod-4.0.3.tgz", + "integrity": "sha512-78Jv8kYJdjbvRwwijtCevYADfsI0lGzYJe4mMFdceO8l75DFFDoqBhR1jVDicDRRaX4//g1u9wKeo+ztc2h1Rw==", + "peer": true + }, + "node_modules/style-value-types": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/style-value-types/-/style-value-types-5.0.0.tgz", + "integrity": "sha512-08yq36Ikn4kx4YU6RD7jWEv27v4V+PUsOGa4n/as8Et3CuODMJQ00ENeAVXAeydX4Z2j1XHZF1K2sX4mGl18fA==", + "dependencies": { + "hey-listen": "^1.0.8", + "tslib": "^2.1.0" + } + }, + "node_modules/style-value-types/node_modules/tslib": { + "version": "2.6.1", + "resolved": "https://registry.npmjs.org/tslib/-/tslib-2.6.1.tgz", + "integrity": "sha512-t0hLfiEKfMUoqhG+U1oid7Pva4bbDPHYfJNiB7BiIjRkj1pyC++4N3huJfqY6aRH6VTB0rvtzQwjM4K6qpfOig==" + }, + "node_modules/supports-color": { + "version": "7.2.0", + "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-7.2.0.tgz", + "integrity": "sha512-qpCAvRl9stuOHveKsn7HncJRvv501qIacKzQlO/+Lwxc9+0q2wLyv4Dfvt80/DPn2pqOBsJdDiogXGR9+OvwRw==", + "dev": true, + "dependencies": { + "has-flag": "^4.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/text-table": { + "version": "0.2.0", + "resolved": "https://registry.npmjs.org/text-table/-/text-table-0.2.0.tgz", + "integrity": "sha512-N+8UisAXDGk8PFXP4HAzVR9nbfmVJ3zYLAWiTIoqC5v5isinhr+r5uaO8+7r3BMfuNIufIsA7RdpVgacC2cSpw==", + "dev": true + }, + "node_modules/to-regex-range": { + "version": "5.0.1", + "resolved": "https://registry.npmjs.org/to-regex-range/-/to-regex-range-5.0.1.tgz", + "integrity": "sha512-65P7iz6X5yEr1cwcgvQxbbIw7Uk3gOy5dIdtZ4rDveLqhrdJP+Li/Hx6tyK0NEb+2GCyneCMJiGqrADCSNk8sQ==", + "dev": true, + "dependencies": { + "is-number": "^7.0.0" + }, + "engines": { + "node": ">=8.0" + } + }, + "node_modules/toggle-selection": { + "version": "1.0.6", + "resolved": "https://registry.npmjs.org/toggle-selection/-/toggle-selection-1.0.6.tgz", + "integrity": "sha512-BiZS+C1OS8g/q2RRbJmy59xpyghNBqrr6k5L/uKBGRsTfxmu3ffiRnd8mlGPUVayg8pvfi5urfnu8TU7DVOkLQ==" + }, + "node_modules/tslib": { + "version": "1.14.1", + "resolved": "https://registry.npmjs.org/tslib/-/tslib-1.14.1.tgz", + "integrity": "sha512-Xni35NKzjgMrwevysHTCArtLDpPvye8zV/0E4EyYn43P7/7qvQwPh9BGkHewbMulVntbigmcT7rdX3BNo9wRJg==", + "dev": true + }, + "node_modules/tsutils": { + "version": "3.21.0", + "resolved": "https://registry.npmjs.org/tsutils/-/tsutils-3.21.0.tgz", + "integrity": "sha512-mHKK3iUXL+3UF6xL5k0PEhKRUBKPBCv/+RkEOpjRWxxx27KKRBmmA60A9pgOUvMi8GKhRMPEmjBRPzs2W7O1OA==", + "dev": true, + "dependencies": { + "tslib": "^1.8.1" + }, + "engines": { + "node": ">= 6" + }, + "peerDependencies": { + "typescript": ">=2.8.0 || >= 3.2.0-dev || >= 3.3.0-dev || >= 3.4.0-dev || >= 3.5.0-dev || >= 3.6.0-dev || >= 3.6.0-beta || >= 3.7.0-dev || >= 3.7.0-beta" + } + }, + "node_modules/type-check": { + "version": "0.4.0", + "resolved": "https://registry.npmjs.org/type-check/-/type-check-0.4.0.tgz", + "integrity": "sha512-XleUoc9uwGXqjWwXaUTZAmzMcFZ5858QA2vvx1Ur5xIcixXIP+8LnFDgRplU30us6teqdlskFfu+ae4K79Ooew==", + "dev": true, + "dependencies": { + "prelude-ls": "^1.2.1" + }, + "engines": { + "node": ">= 0.8.0" + } + }, + "node_modules/type-fest": { + "version": "0.20.2", + "resolved": "https://registry.npmjs.org/type-fest/-/type-fest-0.20.2.tgz", + "integrity": "sha512-Ne+eE4r0/iWnpAxD852z3A+N0Bt5RN//NjJwRd2VFHEmrywxf5vsZlh4R6lixl6B+wz/8d+maTSAkN1FIkI3LQ==", + "dev": true, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/typescript": { + "version": "5.1.3", + "resolved": "https://registry.npmjs.org/typescript/-/typescript-5.1.3.tgz", + "integrity": "sha512-XH627E9vkeqhlZFQuL+UsyAXEnibT0kWR2FWONlr4sTjvxyJYnyefgrkyECLzM5NenmKzRAy2rR/OlYLA1HkZw==", + "dev": true, + "bin": { + "tsc": "bin/tsc", + "tsserver": "bin/tsserver" + }, + "engines": { + "node": ">=14.17" + } + }, + "node_modules/uc.micro": { + "version": "1.0.6", + "resolved": "https://registry.npmjs.org/uc.micro/-/uc.micro-1.0.6.tgz", + "integrity": "sha512-8Y75pvTYkLJW2hWQHXxoqRgV7qb9B+9vFEtidML+7koHUFapnVJAZ6cKs+Qjz5Aw3aZWHMC6u0wJE3At+nSGwA==" + }, + "node_modules/uri-js": { + "version": "4.4.1", + "resolved": "https://registry.npmjs.org/uri-js/-/uri-js-4.4.1.tgz", + "integrity": "sha512-7rKUyy33Q1yc98pQ1DAmLtwX109F7TIfWlW1Ydo8Wl1ii1SeHieeh0HHfPeL2fMXK6z0s8ecKs9frCuLJvndBg==", + "dev": true, + "dependencies": { + "punycode": "^2.1.0" + } + }, + "node_modules/use-callback-ref": { + "version": "1.3.0", + "resolved": "https://registry.npmjs.org/use-callback-ref/-/use-callback-ref-1.3.0.tgz", + "integrity": "sha512-3FT9PRuRdbB9HfXhEq35u4oZkvpJ5kuYbpqhCfmiZyReuRgpnhDlbr2ZEnnuS0RrJAPn6l23xjFg9kpDM+Ms7w==", + "dependencies": { + "tslib": "^2.0.0" + }, + "engines": { + "node": ">=10" + }, + "peerDependencies": { + "@types/react": "^16.8.0 || ^17.0.0 || ^18.0.0", + "react": "^16.8.0 || ^17.0.0 || ^18.0.0" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + } + } + }, + "node_modules/use-callback-ref/node_modules/tslib": { + "version": "2.6.1", + "resolved": "https://registry.npmjs.org/tslib/-/tslib-2.6.1.tgz", + "integrity": "sha512-t0hLfiEKfMUoqhG+U1oid7Pva4bbDPHYfJNiB7BiIjRkj1pyC++4N3huJfqY6aRH6VTB0rvtzQwjM4K6qpfOig==" + }, + "node_modules/use-sidecar": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/use-sidecar/-/use-sidecar-1.1.2.tgz", + "integrity": "sha512-epTbsLuzZ7lPClpz2TyryBfztm7m+28DlEv2ZCQ3MDr5ssiwyOwGH/e5F9CkfWjJ1t4clvI58yF822/GUkjjhw==", + "dependencies": { + "detect-node-es": "^1.1.0", + "tslib": "^2.0.0" + }, + "engines": { + "node": ">=10" + }, + "peerDependencies": { + "@types/react": "^16.9.0 || ^17.0.0 || ^18.0.0", + "react": "^16.8.0 || ^17.0.0 || ^18.0.0" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + } + } + }, + "node_modules/use-sidecar/node_modules/tslib": { + "version": "2.6.1", + "resolved": "https://registry.npmjs.org/tslib/-/tslib-2.6.1.tgz", + "integrity": "sha512-t0hLfiEKfMUoqhG+U1oid7Pva4bbDPHYfJNiB7BiIjRkj1pyC++4N3huJfqY6aRH6VTB0rvtzQwjM4K6qpfOig==" + }, + "node_modules/use-sync-external-store": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/use-sync-external-store/-/use-sync-external-store-1.2.0.tgz", + "integrity": "sha512-eEgnFxGQ1Ife9bzYs6VLi8/4X6CObHMw9Qr9tPY43iKwsPw8xE8+EFsf/2cFZ5S3esXgpWgtSCtLNS41F+sKPA==", + "peerDependencies": { + "react": "^16.8.0 || ^17.0.0 || ^18.0.0" + } + }, + "node_modules/vite": { + "version": "4.3.9", + "resolved": "https://registry.npmjs.org/vite/-/vite-4.3.9.tgz", + "integrity": "sha512-qsTNZjO9NoJNW7KnOrgYwczm0WctJ8m/yqYAMAK9Lxt4SoySUfS5S8ia9K7JHpa3KEeMfyF8LoJ3c5NeBJy6pg==", + "dev": true, + "dependencies": { + "esbuild": "^0.17.5", + "postcss": "^8.4.23", + "rollup": "^3.21.0" + }, + "bin": { + "vite": "bin/vite.js" + }, + "engines": { + "node": "^14.18.0 || >=16.0.0" + }, + "optionalDependencies": { + "fsevents": "~2.3.2" + }, + "peerDependencies": { + "@types/node": ">= 14", + "less": "*", + "sass": "*", + "stylus": "*", + "sugarss": "*", + "terser": "^5.4.0" + }, + "peerDependenciesMeta": { + "@types/node": { + "optional": true + }, + "less": { + "optional": true + }, + "sass": { + "optional": true + }, + "stylus": { + "optional": true + }, + "sugarss": { + "optional": true + }, + "terser": { + "optional": true + } + } + }, + "node_modules/vscode-languageserver-types": { + "version": "3.17.3", + "resolved": "https://registry.npmjs.org/vscode-languageserver-types/-/vscode-languageserver-types-3.17.3.tgz", + "integrity": "sha512-SYU4z1dL0PyIMd4Vj8YOqFvHu7Hz/enbWtpfnVbJHU4Nd1YNYx8u0ennumc6h48GQNeOLxmwySmnADouT/AuZA==" + }, + "node_modules/w3c-keyname": { + "version": "2.2.8", + "resolved": "https://registry.npmjs.org/w3c-keyname/-/w3c-keyname-2.2.8.tgz", + "integrity": "sha512-dpojBhNsCNN7T82Tm7k26A6G9ML3NkhDsnw9n/eoxSRlVBB4CEtIQ/KTCLI2Fwf3ataSXRhYFkQi3SlnFwPvPQ==", + "peer": true + }, + "node_modules/which": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/which/-/which-2.0.2.tgz", + "integrity": "sha512-BLI3Tl1TW3Pvl70l3yq3Y64i+awpwXqsGBYWkkqMtnbXgrMD+yj7rhW0kuEDxzJaYXGjEW5ogapKNMEKNMjibA==", + "dev": true, + "dependencies": { + "isexe": "^2.0.0" + }, + "bin": { + "node-which": "bin/node-which" + }, + "engines": { + "node": ">= 8" + } + }, + "node_modules/wrappy": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/wrappy/-/wrappy-1.0.2.tgz", + "integrity": "sha512-l4Sp/DRseor9wL6EvV2+TuQn63dMkPjZ/sp9XkghTEbV9KlPS1xUsZ3u7/IQO4wxtcFB4bgpQPRcR3QCvezPcQ==", + "dev": true + }, + "node_modules/yallist": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/yallist/-/yallist-4.0.0.tgz", + "integrity": "sha512-3wdGidZyq5PB084XLES5TpOSRA3wjXAlIWMhum2kRcv/41Sn2emQ0dycQW4uZXLejwKvg6EsvbdlVL+FYEct7A==", + "dev": true + }, + "node_modules/yocto-queue": { + "version": "0.1.0", + "resolved": "https://registry.npmjs.org/yocto-queue/-/yocto-queue-0.1.0.tgz", + "integrity": "sha512-rVksvsnNCdJ/ohGc6xgPwyN8eheCxsiLM8mxuE/t/mOVqJewPuO1miLpTHQiRgTKCLexL4MeAFVagts7HmNZ2Q==", + "dev": true, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + } + } +} diff --git a/playground/package.json b/playground/package.json new file mode 100644 index 0000000000..184ec188a0 --- /dev/null +++ b/playground/package.json @@ -0,0 +1,33 @@ +{ + "name": "playground", + "private": true, + "version": "0.0.0", + "type": "module", + "scripts": { + "dev": "vite", + "build": "tsc && vite build", + "lint": "eslint src --ext ts,tsx --report-unused-disable-directives --max-warnings 0", + "preview": "vite preview" + }, + "dependencies": { + "@tanstack/react-query": "^4.32.0", + "fast-json-patch": "^3.1.1", + "graphiql": "^3.0.4", + "graphql": "^16.7.1", + "react": "^18.2.0", + "react-dom": "^18.2.0", + "react-hook-form": "^7.45.2" + }, + "devDependencies": { + "@types/react": "^18.2.15", + "@types/react-dom": "^18.2.7", + "@typescript-eslint/eslint-plugin": "^5.59.0", + "@typescript-eslint/parser": "^5.62.0", + "@vitejs/plugin-react-swc": "^3.0.0", + "eslint": "^8.45.0", + "eslint-plugin-react-hooks": "^4.6.0", + "eslint-plugin-react-refresh": "^0.3.4", + "typescript": "^5.0.2", + "vite": "^4.3.9" + } +} diff --git a/playground/playground.go b/playground/playground.go new file mode 100644 index 0000000000..6894d339b8 --- /dev/null +++ b/playground/playground.go @@ -0,0 +1,20 @@ +// Copyright 2023 Democratized Data Foundation +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +//go:build playground + +package playground + +import ( + "embed" +) + +//go:embed dist +var Dist embed.FS diff --git a/playground/src/App.tsx b/playground/src/App.tsx new file mode 100644 index 0000000000..dc00b98cbc --- /dev/null +++ b/playground/src/App.tsx @@ -0,0 +1,35 @@ +// Copyright 2023 Democratized Data Foundation +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +import { GraphiQL } from 'graphiql' +import { createGraphiQLFetcher } from '@graphiql/toolkit' +import { GraphiQLPlugin } from '@graphiql/react' +import { QueryClient, QueryClientProvider } from '@tanstack/react-query' +import { Plugin } from './components/Plugin' +import 'graphiql/graphiql.css' + +const client = new QueryClient() +const fetcher = createGraphiQLFetcher({ url: 'http://localhost:9181/api/v0/graphql' }) + +const plugin: GraphiQLPlugin = { + title: 'DefraDB', + icon: () => (
DB
), + content: () => (), +} + +function App() { + return ( + + + + ) +} + +export default App diff --git a/playground/src/components/Plugin.tsx b/playground/src/components/Plugin.tsx new file mode 100644 index 0000000000..e8c727fe61 --- /dev/null +++ b/playground/src/components/Plugin.tsx @@ -0,0 +1,57 @@ +// Copyright 2023 Democratized Data Foundation +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +import { useQuery } from '@tanstack/react-query' +import { SchemaLoadForm } from './SchemaLoadForm' +import { SchemaPatchForm } from './SchemaPatchForm' +import { listSchema } from '../lib/api' + +const defaultFieldTypes = [ + 'ID', + 'Boolean', + '[Boolean]', + '[Boolean!]', + 'Int', + '[Int]', + '[Int!]', + 'DateTime', + 'Float', + '[Float]', + '[Float!]', + 'String', + '[String]', + '[String!]', +] + +export function Plugin() { + const { data } = useQuery({ queryKey: ['schemas'], queryFn: listSchema }) + + const collections = data?.data?.collections ?? [] + const schemaFieldTypes = collections.map(col => [`${col.name}`, `[${col.name}]`]).flat() + const fieldTypes = [...defaultFieldTypes, ...schemaFieldTypes] + + return ( +
+

DefraDB

+
+
+

Add Schema

+ +
+ { collections?.map((schema) => +
+

{schema.name} Schema

+ +
+ )} +
+
+ ) +} \ No newline at end of file diff --git a/playground/src/components/SchemaLoadForm.tsx b/playground/src/components/SchemaLoadForm.tsx new file mode 100644 index 0000000000..a1df44d87c --- /dev/null +++ b/playground/src/components/SchemaLoadForm.tsx @@ -0,0 +1,81 @@ +// Copyright 2023 Democratized Data Foundation +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +import { useState, useEffect } from 'react' +import { useForm } from 'react-hook-form' +import { useSchemaContext } from '@graphiql/react' +import { useQueryClient } from '@tanstack/react-query' +import { loadSchema, ErrorItem } from '../lib/api' + +export type FormData = { + schema: string +} + +const defaultValues: FormData = { + schema: '', +} + +export function SchemaLoadForm() { + const queryClient = useQueryClient() + const schemaContext = useSchemaContext({ nonNull: true }) + + const { formState, reset, register, handleSubmit } = useForm({ defaultValues }) + + const [errors, setErrors] = useState() + const [isLoading, setIsLoading] = useState(false) + + useEffect(() => { + if (formState.isSubmitSuccessful) reset(defaultValues) + }, [formState, reset]) + + const onSubmit = async (data: FormData) => { + setErrors(undefined) + setIsLoading(true) + + try { + const res = await loadSchema(data.schema) + if (res.errors) { + setErrors(res.errors) + } else { + schemaContext.introspect() + queryClient.invalidateQueries(['schemas']) + } + } catch(err: any) { + setErrors([{ message: err.message }]) + } finally { + setIsLoading(false) + } + } + + return ( +
+ {errors?.map((error, index) => +
+ {error.message} +
+ )} +